Compare commits
36 Commits
feat/finis
...
fix/check-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7f8d0b06ee | ||
|
|
3e6dc56196 | ||
|
|
4be5fedeec | ||
|
|
163e016759 | ||
|
|
b3a58ec321 | ||
|
|
0097ca80e2 | ||
|
|
d968df4612 | ||
|
|
2bd680361a | ||
|
|
cc676d4bef | ||
|
|
3b1155b538 | ||
|
|
03ff6e1ca6 | ||
|
|
706fac898a | ||
|
|
f5c144404d | ||
|
|
50a217a638 | ||
|
|
444c13e1e3 | ||
|
|
255b19d6ee | ||
|
|
f1f4831157 | ||
|
|
876f59d650 | ||
|
|
c23e88ecd1 | ||
|
|
284d0f99e1 | ||
|
|
13ac5d564a | ||
|
|
4620b71aee | ||
|
|
1b926178f1 | ||
|
|
5167c927be | ||
|
|
b18c64b725 | ||
|
|
7ce1590eaf | ||
|
|
77a9504f74 | ||
|
|
bf35902696 | ||
|
|
0d12b5fbc2 | ||
|
|
1746e8b21f | ||
|
|
0836eef1a6 | ||
|
|
d0bd10190d | ||
|
|
d8191bd4fb | ||
|
|
d15571c727 | ||
|
|
a2f67dddb6 | ||
|
|
8f00321a60 |
@@ -11,6 +11,7 @@
|
|||||||
"dist/**",
|
"dist/**",
|
||||||
"out/**",
|
"out/**",
|
||||||
"local/**",
|
"local/**",
|
||||||
|
"tests/**",
|
||||||
".yarn/**",
|
".yarn/**",
|
||||||
".gitignore",
|
".gitignore",
|
||||||
"scripts/cloudflare-worker.js",
|
"scripts/cloudflare-worker.js",
|
||||||
|
|||||||
10
CLAUDE.md
10
CLAUDE.md
@@ -12,7 +12,15 @@ This file provides guidance to AI coding assistants when working with code in th
|
|||||||
- **Always propose before executing**: Before making any changes, clearly explain your planned approach and wait for explicit user approval to ensure alignment and prevent unwanted modifications.
|
- **Always propose before executing**: Before making any changes, clearly explain your planned approach and wait for explicit user approval to ensure alignment and prevent unwanted modifications.
|
||||||
- **Lint, test, and format before completion**: Coding tasks are only complete after running `yarn lint`, `yarn test`, and `yarn format` successfully.
|
- **Lint, test, and format before completion**: Coding tasks are only complete after running `yarn lint`, `yarn test`, and `yarn format` successfully.
|
||||||
- **Write conventional commits**: Commit small, focused changes using Conventional Commit messages (e.g., `feat:`, `fix:`, `refactor:`, `docs:`).
|
- **Write conventional commits**: Commit small, focused changes using Conventional Commit messages (e.g., `feat:`, `fix:`, `refactor:`, `docs:`).
|
||||||
- **Follow PR template**: When submitting pull requests, follow the template in `.github/pull_request_template.md` to ensure complete context and documentation.
|
|
||||||
|
## Pull Request Workflow (CRITICAL)
|
||||||
|
|
||||||
|
When creating a Pull Request, you MUST:
|
||||||
|
|
||||||
|
1. **Read the PR template first**: Always read `.github/pull_request_template.md` before creating the PR
|
||||||
|
2. **Follow ALL template sections**: Structure the `--body` parameter to include every section from the template
|
||||||
|
3. **Never skip sections**: Include all sections even if marking them as N/A or "None"
|
||||||
|
4. **Use proper formatting**: Match the template's markdown structure exactly (headings, checkboxes, code blocks)
|
||||||
|
|
||||||
## Development Commands
|
## Development Commands
|
||||||
|
|
||||||
|
|||||||
@@ -134,56 +134,108 @@ artifactBuildCompleted: scripts/artifact-build-completed.js
|
|||||||
releaseInfo:
|
releaseInfo:
|
||||||
releaseNotes: |
|
releaseNotes: |
|
||||||
<!--LANG:en-->
|
<!--LANG:en-->
|
||||||
What's New in v1.7.0-rc.3
|
A New Era of Intelligence with Cherry Studio 1.7.1
|
||||||
|
|
||||||
✨ New Features:
|
Today we're releasing Cherry Studio 1.7.1 — our most ambitious update yet, introducing Agent: autonomous AI that thinks, plans, and acts.
|
||||||
- Provider: Added Silicon provider support for Anthropic API compatibility
|
|
||||||
- Provider: AIHubMix support for nano banana
|
|
||||||
|
|
||||||
🐛 Bug Fixes:
|
For years, AI assistants have been reactive — waiting for your commands, responding to your questions. With Agent, we're changing that. Now, AI can truly work alongside you: understanding complex goals, breaking them into steps, and executing them independently.
|
||||||
- i18n: Clean up translation tags and untranslated strings
|
|
||||||
- Provider: Fixed Silicon provider code list
|
|
||||||
- Provider: Fixed Poe API reasoning parameters for GPT-5 and reasoning models
|
|
||||||
- Provider: Fixed duplicate /v1 in Anthropic API endpoints
|
|
||||||
- Provider: Fixed Azure provider handling in AI SDK integration
|
|
||||||
- Models: Added Claude Opus 4.5 pattern to THINKING_TOKEN_MAP
|
|
||||||
- Models: Improved Gemini reasoning and message handling
|
|
||||||
- Models: Fixed custom parameters for Gemini models
|
|
||||||
- Models: Fixed qwen-mt-flash text delta support
|
|
||||||
- Models: Fixed Groq verbosity setting
|
|
||||||
- UI: Fixed quota display and quota tips
|
|
||||||
- UI: Fixed web search button condition
|
|
||||||
- Settings: Fixed updateAssistantPreset reducer to properly update preset
|
|
||||||
- Settings: Respect enableMaxTokens setting when maxTokens is not configured
|
|
||||||
- SDK: Fixed header merging logic in AI SDK
|
|
||||||
|
|
||||||
⚡ Improvements:
|
This is what we've been building toward. And it's just the beginning.
|
||||||
- SDK: Upgraded @anthropic-ai/claude-agent-sdk to 0.1.53
|
|
||||||
|
🤖 Meet Agent
|
||||||
|
Imagine having a brilliant colleague who never sleeps. Give Agent a goal — write a report, analyze data, refactor code — and watch it work. It reasons through problems, breaks them into steps, calls the right tools, and adapts when things change.
|
||||||
|
|
||||||
|
- **Think → Plan → Act**: From goal to execution, fully autonomous
|
||||||
|
- **Deep Reasoning**: Multi-turn thinking that solves real problems
|
||||||
|
- **Tool Mastery**: File operations, web search, code execution, and more
|
||||||
|
- **Skill Plugins**: Extend with custom commands and capabilities
|
||||||
|
- **You Stay in Control**: Real-time approval for sensitive actions
|
||||||
|
- **Full Visibility**: Every thought, every decision, fully transparent
|
||||||
|
|
||||||
|
🌐 Expanding Ecosystem
|
||||||
|
- **New Providers**: HuggingFace, Mistral, CherryIN, AI Gateway, Intel OVMS, Didi MCP
|
||||||
|
- **New Models**: Claude 4.5 Haiku, DeepSeek v3.2, GLM-4.6, Doubao, Ling series
|
||||||
|
- **MCP Integration**: Alibaba Cloud, ModelScope, Higress, MCP.so, TokenFlux and more
|
||||||
|
|
||||||
|
📚 Smarter Knowledge Base
|
||||||
|
- **OpenMinerU**: Self-hosted document processing
|
||||||
|
- **Full-Text Search**: Find anything instantly across your notes
|
||||||
|
- **Enhanced Tool Selection**: Smarter configuration for better AI assistance
|
||||||
|
|
||||||
|
📝 Notes, Reimagined
|
||||||
|
- Full-text search with highlighted results
|
||||||
|
- AI-powered smart rename
|
||||||
|
- Export as image
|
||||||
|
- Auto-wrap for tables
|
||||||
|
|
||||||
|
🖼️ Image & OCR
|
||||||
|
- Intel OVMS painting capabilities
|
||||||
|
- Intel OpenVINO NPU-accelerated OCR
|
||||||
|
|
||||||
|
🌍 Now in 10+ Languages
|
||||||
|
- Added German support
|
||||||
|
- Enhanced internationalization
|
||||||
|
|
||||||
|
⚡ Faster & More Polished
|
||||||
|
- Electron 38 upgrade
|
||||||
|
- New MCP management interface
|
||||||
|
- Dozens of UI refinements
|
||||||
|
|
||||||
|
❤️ Fully Open Source
|
||||||
|
Commercial restrictions removed. Cherry Studio now follows standard AGPL v3 — free for teams of any size.
|
||||||
|
|
||||||
|
The Agent Era is here. We can't wait to see what you'll create.
|
||||||
|
|
||||||
<!--LANG:zh-CN-->
|
<!--LANG:zh-CN-->
|
||||||
v1.7.0-rc.3 更新内容
|
Cherry Studio 1.7.1:开启智能新纪元
|
||||||
|
|
||||||
✨ 新功能:
|
今天,我们正式发布 Cherry Studio 1.7.1 —— 迄今最具雄心的版本,带来全新的 Agent:能够自主思考、规划和行动的 AI。
|
||||||
- 提供商:新增 Silicon 提供商对 Anthropic API 的兼容性支持
|
|
||||||
- 提供商:AIHubMix 支持 nano banana
|
|
||||||
|
|
||||||
🐛 问题修复:
|
多年来,AI 助手一直是被动的——等待你的指令,回应你的问题。Agent 改变了这一切。现在,AI 能够真正与你并肩工作:理解复杂目标,将其拆解为步骤,并独立执行。
|
||||||
- 国际化:清理翻译标签和未翻译字符串
|
|
||||||
- 提供商:修复 Silicon 提供商代码列表
|
|
||||||
- 提供商:修复 Poe API 对 GPT-5 和推理模型的推理参数
|
|
||||||
- 提供商:修复 Anthropic API 端点重复 /v1 问题
|
|
||||||
- 提供商:修复 Azure 提供商在 AI SDK 集成中的处理
|
|
||||||
- 模型:Claude Opus 4.5 添加到 THINKING_TOKEN_MAP
|
|
||||||
- 模型:改进 Gemini 推理和消息处理
|
|
||||||
- 模型:修复 Gemini 模型自定义参数
|
|
||||||
- 模型:修复 qwen-mt-flash text delta 支持
|
|
||||||
- 模型:修复 Groq verbosity 设置
|
|
||||||
- 界面:修复配额显示和配额提示
|
|
||||||
- 界面:修复 Web 搜索按钮条件
|
|
||||||
- 设置:修复 updateAssistantPreset reducer 正确更新 preset
|
|
||||||
- 设置:尊重 enableMaxTokens 设置
|
|
||||||
- SDK:修复 AI SDK 中 header 合并逻辑
|
|
||||||
|
|
||||||
⚡ 改进:
|
这是我们一直在构建的未来。而这,仅仅是开始。
|
||||||
- SDK:升级 @anthropic-ai/claude-agent-sdk 到 0.1.53
|
|
||||||
|
🤖 认识 Agent
|
||||||
|
想象一位永不疲倦的得力伙伴。给 Agent 一个目标——撰写报告、分析数据、重构代码——然后看它工作。它会推理问题、拆解步骤、调用工具,并在情况变化时灵活应对。
|
||||||
|
|
||||||
|
- **思考 → 规划 → 行动**:从目标到执行,全程自主
|
||||||
|
- **深度推理**:多轮思考,解决真实问题
|
||||||
|
- **工具大师**:文件操作、网络搜索、代码执行,样样精通
|
||||||
|
- **技能插件**:自定义命令,无限扩展
|
||||||
|
- **你掌控全局**:敏感操作,实时审批
|
||||||
|
- **完全透明**:每一步思考,每一个决策,清晰可见
|
||||||
|
|
||||||
|
🌐 生态持续壮大
|
||||||
|
- **新增服务商**:Hugging Face、Mistral、Perplexity、SophNet、AI Gateway、Cerebras AI
|
||||||
|
- **新增模型**:Gemini 3、Gemini 3 Pro(支持图像预览)、GPT-5.1、Claude Opus 4.5
|
||||||
|
- **MCP 集成**:百炼、魔搭、Higress、MCP.so、TokenFlux 等平台
|
||||||
|
|
||||||
|
📚 更智能的知识库
|
||||||
|
- **OpenMinerU**:本地自部署文档处理
|
||||||
|
- **全文搜索**:笔记内容一搜即达
|
||||||
|
- **增强工具选择**:更智能的配置,更好的 AI 协助
|
||||||
|
|
||||||
|
📝 笔记,焕然一新
|
||||||
|
- 全文搜索,结果高亮
|
||||||
|
- AI 智能重命名
|
||||||
|
- 导出为图片
|
||||||
|
- 表格自动换行
|
||||||
|
|
||||||
|
🖼️ 图像与 OCR
|
||||||
|
- Intel OVMS 绘图能力
|
||||||
|
- Intel OpenVINO NPU 加速 OCR
|
||||||
|
|
||||||
|
🌍 支持 10+ 种语言
|
||||||
|
- 新增德语支持
|
||||||
|
- 全面增强国际化
|
||||||
|
|
||||||
|
⚡ 更快、更精致
|
||||||
|
- 升级 Electron 38
|
||||||
|
- 新的 MCP 管理界面
|
||||||
|
- 数十处 UI 细节打磨
|
||||||
|
|
||||||
|
❤️ 完全开源
|
||||||
|
商用限制已移除。Cherry Studio 现遵循标准 AGPL v3 协议——任意规模团队均可自由使用。
|
||||||
|
|
||||||
|
Agent 纪元已至。期待你的创造。
|
||||||
<!--LANG:END-->
|
<!--LANG:END-->
|
||||||
|
|||||||
@@ -58,6 +58,7 @@ export default defineConfig([
|
|||||||
'dist/**',
|
'dist/**',
|
||||||
'out/**',
|
'out/**',
|
||||||
'local/**',
|
'local/**',
|
||||||
|
'tests/**',
|
||||||
'.yarn/**',
|
'.yarn/**',
|
||||||
'.gitignore',
|
'.gitignore',
|
||||||
'scripts/cloudflare-worker.js',
|
'scripts/cloudflare-worker.js',
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "CherryStudio",
|
"name": "CherryStudio",
|
||||||
"version": "1.7.0-rc.3",
|
"version": "1.7.1",
|
||||||
"private": true,
|
"private": true,
|
||||||
"description": "A powerful AI assistant for producer.",
|
"description": "A powerful AI assistant for producer.",
|
||||||
"main": "./out/main/index.js",
|
"main": "./out/main/index.js",
|
||||||
@@ -62,6 +62,7 @@
|
|||||||
"test": "vitest run --silent",
|
"test": "vitest run --silent",
|
||||||
"test:main": "vitest run --project main",
|
"test:main": "vitest run --project main",
|
||||||
"test:renderer": "vitest run --project renderer",
|
"test:renderer": "vitest run --project renderer",
|
||||||
|
"test:aicore": "vitest run --project aiCore",
|
||||||
"test:update": "yarn test:renderer --update",
|
"test:update": "yarn test:renderer --update",
|
||||||
"test:coverage": "vitest run --coverage --silent",
|
"test:coverage": "vitest run --coverage --silent",
|
||||||
"test:ui": "vitest --ui",
|
"test:ui": "vitest --ui",
|
||||||
@@ -164,7 +165,7 @@
|
|||||||
"@modelcontextprotocol/sdk": "^1.17.5",
|
"@modelcontextprotocol/sdk": "^1.17.5",
|
||||||
"@mozilla/readability": "^0.6.0",
|
"@mozilla/readability": "^0.6.0",
|
||||||
"@notionhq/client": "^2.2.15",
|
"@notionhq/client": "^2.2.15",
|
||||||
"@openrouter/ai-sdk-provider": "^1.2.5",
|
"@openrouter/ai-sdk-provider": "^1.2.8",
|
||||||
"@opentelemetry/api": "^1.9.0",
|
"@opentelemetry/api": "^1.9.0",
|
||||||
"@opentelemetry/core": "2.0.0",
|
"@opentelemetry/core": "2.0.0",
|
||||||
"@opentelemetry/exporter-trace-otlp-http": "^0.200.0",
|
"@opentelemetry/exporter-trace-otlp-http": "^0.200.0",
|
||||||
@@ -172,7 +173,7 @@
|
|||||||
"@opentelemetry/sdk-trace-node": "^2.0.0",
|
"@opentelemetry/sdk-trace-node": "^2.0.0",
|
||||||
"@opentelemetry/sdk-trace-web": "^2.0.0",
|
"@opentelemetry/sdk-trace-web": "^2.0.0",
|
||||||
"@opeoginni/github-copilot-openai-compatible": "^0.1.21",
|
"@opeoginni/github-copilot-openai-compatible": "^0.1.21",
|
||||||
"@playwright/test": "^1.52.0",
|
"@playwright/test": "^1.55.1",
|
||||||
"@radix-ui/react-context-menu": "^2.2.16",
|
"@radix-ui/react-context-menu": "^2.2.16",
|
||||||
"@reduxjs/toolkit": "^2.2.5",
|
"@reduxjs/toolkit": "^2.2.5",
|
||||||
"@shikijs/markdown-it": "^3.12.0",
|
"@shikijs/markdown-it": "^3.12.0",
|
||||||
@@ -321,7 +322,6 @@
|
|||||||
"p-queue": "^8.1.0",
|
"p-queue": "^8.1.0",
|
||||||
"pdf-lib": "^1.17.1",
|
"pdf-lib": "^1.17.1",
|
||||||
"pdf-parse": "^1.1.1",
|
"pdf-parse": "^1.1.1",
|
||||||
"playwright": "^1.55.1",
|
|
||||||
"proxy-agent": "^6.5.0",
|
"proxy-agent": "^6.5.0",
|
||||||
"react": "^19.2.0",
|
"react": "^19.2.0",
|
||||||
"react-dom": "^19.2.0",
|
"react-dom": "^19.2.0",
|
||||||
|
|||||||
@@ -69,6 +69,7 @@ export interface CherryInProviderSettings {
|
|||||||
headers?: HeadersInput
|
headers?: HeadersInput
|
||||||
/**
|
/**
|
||||||
* Optional endpoint type to distinguish different endpoint behaviors.
|
* Optional endpoint type to distinguish different endpoint behaviors.
|
||||||
|
* "image-generation" is also openai endpoint, but specifically for image generation.
|
||||||
*/
|
*/
|
||||||
endpointType?: 'openai' | 'openai-response' | 'anthropic' | 'gemini' | 'image-generation' | 'jina-rerank'
|
endpointType?: 'openai' | 'openai-response' | 'anthropic' | 'gemini' | 'image-generation' | 'jina-rerank'
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,12 +3,13 @@
|
|||||||
* Provides realistic mock responses for all provider types
|
* Provides realistic mock responses for all provider types
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { jsonSchema, type ModelMessage, type Tool } from 'ai'
|
import type { ModelMessage, Tool } from 'ai'
|
||||||
|
import { jsonSchema } from 'ai'
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Standard test messages for all scenarios
|
* Standard test messages for all scenarios
|
||||||
*/
|
*/
|
||||||
export const testMessages = {
|
export const testMessages: Record<string, ModelMessage[]> = {
|
||||||
simple: [{ role: 'user' as const, content: 'Hello, how are you?' }],
|
simple: [{ role: 'user' as const, content: 'Hello, how are you?' }],
|
||||||
|
|
||||||
conversation: [
|
conversation: [
|
||||||
@@ -45,7 +46,7 @@ export const testMessages = {
|
|||||||
{ role: 'assistant' as const, content: '15 * 23 = 345' },
|
{ role: 'assistant' as const, content: '15 * 23 = 345' },
|
||||||
{ role: 'user' as const, content: 'Now divide that by 5' }
|
{ role: 'user' as const, content: 'Now divide that by 5' }
|
||||||
]
|
]
|
||||||
} satisfies Record<string, ModelMessage[]>
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Standard test tools for tool calling scenarios
|
* Standard test tools for tool calling scenarios
|
||||||
@@ -138,68 +139,17 @@ export const testTools: Record<string, Tool> = {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Mock streaming chunks for different providers
|
|
||||||
*/
|
|
||||||
export const mockStreamingChunks = {
|
|
||||||
text: [
|
|
||||||
{ type: 'text-delta' as const, textDelta: 'Hello' },
|
|
||||||
{ type: 'text-delta' as const, textDelta: ', ' },
|
|
||||||
{ type: 'text-delta' as const, textDelta: 'this ' },
|
|
||||||
{ type: 'text-delta' as const, textDelta: 'is ' },
|
|
||||||
{ type: 'text-delta' as const, textDelta: 'a ' },
|
|
||||||
{ type: 'text-delta' as const, textDelta: 'test.' }
|
|
||||||
],
|
|
||||||
|
|
||||||
withToolCall: [
|
|
||||||
{ type: 'text-delta' as const, textDelta: 'Let me check the weather for you.' },
|
|
||||||
{
|
|
||||||
type: 'tool-call-delta' as const,
|
|
||||||
toolCallType: 'function' as const,
|
|
||||||
toolCallId: 'call_123',
|
|
||||||
toolName: 'getWeather',
|
|
||||||
argsTextDelta: '{"location":'
|
|
||||||
},
|
|
||||||
{
|
|
||||||
type: 'tool-call-delta' as const,
|
|
||||||
toolCallType: 'function' as const,
|
|
||||||
toolCallId: 'call_123',
|
|
||||||
toolName: 'getWeather',
|
|
||||||
argsTextDelta: ' "San Francisco, CA"}'
|
|
||||||
},
|
|
||||||
{
|
|
||||||
type: 'tool-call' as const,
|
|
||||||
toolCallType: 'function' as const,
|
|
||||||
toolCallId: 'call_123',
|
|
||||||
toolName: 'getWeather',
|
|
||||||
args: { location: 'San Francisco, CA' }
|
|
||||||
}
|
|
||||||
],
|
|
||||||
|
|
||||||
withFinish: [
|
|
||||||
{ type: 'text-delta' as const, textDelta: 'Complete response.' },
|
|
||||||
{
|
|
||||||
type: 'finish' as const,
|
|
||||||
finishReason: 'stop' as const,
|
|
||||||
usage: {
|
|
||||||
promptTokens: 10,
|
|
||||||
completionTokens: 5,
|
|
||||||
totalTokens: 15
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Mock complete responses for non-streaming scenarios
|
* Mock complete responses for non-streaming scenarios
|
||||||
|
* Note: AI SDK v5 uses inputTokens/outputTokens instead of promptTokens/completionTokens
|
||||||
*/
|
*/
|
||||||
export const mockCompleteResponses = {
|
export const mockCompleteResponses = {
|
||||||
simple: {
|
simple: {
|
||||||
text: 'This is a simple response.',
|
text: 'This is a simple response.',
|
||||||
finishReason: 'stop' as const,
|
finishReason: 'stop' as const,
|
||||||
usage: {
|
usage: {
|
||||||
promptTokens: 15,
|
inputTokens: 15,
|
||||||
completionTokens: 8,
|
outputTokens: 8,
|
||||||
totalTokens: 23
|
totalTokens: 23
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@@ -215,8 +165,8 @@ export const mockCompleteResponses = {
|
|||||||
],
|
],
|
||||||
finishReason: 'tool-calls' as const,
|
finishReason: 'tool-calls' as const,
|
||||||
usage: {
|
usage: {
|
||||||
promptTokens: 25,
|
inputTokens: 25,
|
||||||
completionTokens: 12,
|
outputTokens: 12,
|
||||||
totalTokens: 37
|
totalTokens: 37
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@@ -225,14 +175,15 @@ export const mockCompleteResponses = {
|
|||||||
text: 'Response with warnings.',
|
text: 'Response with warnings.',
|
||||||
finishReason: 'stop' as const,
|
finishReason: 'stop' as const,
|
||||||
usage: {
|
usage: {
|
||||||
promptTokens: 10,
|
inputTokens: 10,
|
||||||
completionTokens: 5,
|
outputTokens: 5,
|
||||||
totalTokens: 15
|
totalTokens: 15
|
||||||
},
|
},
|
||||||
warnings: [
|
warnings: [
|
||||||
{
|
{
|
||||||
type: 'unsupported-setting' as const,
|
type: 'unsupported-setting' as const,
|
||||||
message: 'Temperature parameter not supported for this model'
|
setting: 'temperature',
|
||||||
|
details: 'Temperature parameter not supported for this model'
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@@ -285,47 +236,3 @@ export const mockImageResponses = {
|
|||||||
warnings: []
|
warnings: []
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Mock error responses
|
|
||||||
*/
|
|
||||||
export const mockErrors = {
|
|
||||||
invalidApiKey: {
|
|
||||||
name: 'APIError',
|
|
||||||
message: 'Invalid API key provided',
|
|
||||||
statusCode: 401
|
|
||||||
},
|
|
||||||
|
|
||||||
rateLimitExceeded: {
|
|
||||||
name: 'RateLimitError',
|
|
||||||
message: 'Rate limit exceeded. Please try again later.',
|
|
||||||
statusCode: 429,
|
|
||||||
headers: {
|
|
||||||
'retry-after': '60'
|
|
||||||
}
|
|
||||||
},
|
|
||||||
|
|
||||||
modelNotFound: {
|
|
||||||
name: 'ModelNotFoundError',
|
|
||||||
message: 'The requested model was not found',
|
|
||||||
statusCode: 404
|
|
||||||
},
|
|
||||||
|
|
||||||
contextLengthExceeded: {
|
|
||||||
name: 'ContextLengthError',
|
|
||||||
message: "This model's maximum context length is 4096 tokens",
|
|
||||||
statusCode: 400
|
|
||||||
},
|
|
||||||
|
|
||||||
timeout: {
|
|
||||||
name: 'TimeoutError',
|
|
||||||
message: 'Request timed out after 30000ms',
|
|
||||||
code: 'ETIMEDOUT'
|
|
||||||
},
|
|
||||||
|
|
||||||
networkError: {
|
|
||||||
name: 'NetworkError',
|
|
||||||
message: 'Network connection failed',
|
|
||||||
code: 'ECONNREFUSED'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
35
packages/aiCore/src/__tests__/mocks/ai-sdk-provider.ts
Normal file
35
packages/aiCore/src/__tests__/mocks/ai-sdk-provider.ts
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
/**
|
||||||
|
* Mock for @cherrystudio/ai-sdk-provider
|
||||||
|
* This mock is used in tests to avoid importing the actual package
|
||||||
|
*/
|
||||||
|
|
||||||
|
export type CherryInProviderSettings = {
|
||||||
|
apiKey?: string
|
||||||
|
baseURL?: string
|
||||||
|
}
|
||||||
|
|
||||||
|
// oxlint-disable-next-line no-unused-vars
|
||||||
|
export const createCherryIn = (_options?: CherryInProviderSettings) => ({
|
||||||
|
// oxlint-disable-next-line no-unused-vars
|
||||||
|
languageModel: (_modelId: string) => ({
|
||||||
|
specificationVersion: 'v1',
|
||||||
|
provider: 'cherryin',
|
||||||
|
modelId: 'mock-model',
|
||||||
|
doGenerate: async () => ({ text: 'mock response' }),
|
||||||
|
doStream: async () => ({ stream: (async function* () {})() })
|
||||||
|
}),
|
||||||
|
// oxlint-disable-next-line no-unused-vars
|
||||||
|
chat: (_modelId: string) => ({
|
||||||
|
specificationVersion: 'v1',
|
||||||
|
provider: 'cherryin-chat',
|
||||||
|
modelId: 'mock-model',
|
||||||
|
doGenerate: async () => ({ text: 'mock response' }),
|
||||||
|
doStream: async () => ({ stream: (async function* () {})() })
|
||||||
|
}),
|
||||||
|
// oxlint-disable-next-line no-unused-vars
|
||||||
|
textEmbeddingModel: (_modelId: string) => ({
|
||||||
|
specificationVersion: 'v1',
|
||||||
|
provider: 'cherryin',
|
||||||
|
modelId: 'mock-embedding-model'
|
||||||
|
})
|
||||||
|
})
|
||||||
9
packages/aiCore/src/__tests__/setup.ts
Normal file
9
packages/aiCore/src/__tests__/setup.ts
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
/**
|
||||||
|
* Vitest Setup File
|
||||||
|
* Global test configuration and mocks for @cherrystudio/ai-core package
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Mock Vite SSR helper to avoid Node environment errors
|
||||||
|
;(globalThis as any).__vite_ssr_exportName__ = (_name: string, value: any) => value
|
||||||
|
|
||||||
|
// Note: @cherrystudio/ai-sdk-provider is mocked via alias in vitest.config.ts
|
||||||
109
packages/aiCore/src/core/options/__tests__/factory.test.ts
Normal file
109
packages/aiCore/src/core/options/__tests__/factory.test.ts
Normal file
@@ -0,0 +1,109 @@
|
|||||||
|
import { describe, expect, it } from 'vitest'
|
||||||
|
|
||||||
|
import { createOpenAIOptions, createOpenRouterOptions, mergeProviderOptions } from '../factory'
|
||||||
|
|
||||||
|
describe('mergeProviderOptions', () => {
|
||||||
|
it('deep merges provider options for the same provider', () => {
|
||||||
|
const reasoningOptions = createOpenRouterOptions({
|
||||||
|
reasoning: {
|
||||||
|
enabled: true,
|
||||||
|
effort: 'medium'
|
||||||
|
}
|
||||||
|
})
|
||||||
|
const webSearchOptions = createOpenRouterOptions({
|
||||||
|
plugins: [{ id: 'web', max_results: 5 }]
|
||||||
|
})
|
||||||
|
|
||||||
|
const merged = mergeProviderOptions(reasoningOptions, webSearchOptions)
|
||||||
|
|
||||||
|
expect(merged.openrouter).toEqual({
|
||||||
|
reasoning: {
|
||||||
|
enabled: true,
|
||||||
|
effort: 'medium'
|
||||||
|
},
|
||||||
|
plugins: [{ id: 'web', max_results: 5 }]
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
it('preserves options from other providers while merging', () => {
|
||||||
|
const openRouter = createOpenRouterOptions({
|
||||||
|
reasoning: { enabled: true }
|
||||||
|
})
|
||||||
|
const openAI = createOpenAIOptions({
|
||||||
|
reasoningEffort: 'low'
|
||||||
|
})
|
||||||
|
const merged = mergeProviderOptions(openRouter, openAI)
|
||||||
|
|
||||||
|
expect(merged.openrouter).toEqual({ reasoning: { enabled: true } })
|
||||||
|
expect(merged.openai).toEqual({ reasoningEffort: 'low' })
|
||||||
|
})
|
||||||
|
|
||||||
|
it('overwrites primitive values with later values', () => {
|
||||||
|
const first = createOpenAIOptions({
|
||||||
|
reasoningEffort: 'low',
|
||||||
|
user: 'user-123'
|
||||||
|
})
|
||||||
|
const second = createOpenAIOptions({
|
||||||
|
reasoningEffort: 'high',
|
||||||
|
maxToolCalls: 5
|
||||||
|
})
|
||||||
|
|
||||||
|
const merged = mergeProviderOptions(first, second)
|
||||||
|
|
||||||
|
expect(merged.openai).toEqual({
|
||||||
|
reasoningEffort: 'high', // overwritten by second
|
||||||
|
user: 'user-123', // preserved from first
|
||||||
|
maxToolCalls: 5 // added from second
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
it('overwrites arrays with later values instead of merging', () => {
|
||||||
|
const first = createOpenRouterOptions({
|
||||||
|
models: ['gpt-4', 'gpt-3.5-turbo']
|
||||||
|
})
|
||||||
|
const second = createOpenRouterOptions({
|
||||||
|
models: ['claude-3-opus', 'claude-3-sonnet']
|
||||||
|
})
|
||||||
|
|
||||||
|
const merged = mergeProviderOptions(first, second)
|
||||||
|
|
||||||
|
// Array is completely replaced, not merged
|
||||||
|
expect(merged.openrouter?.models).toEqual(['claude-3-opus', 'claude-3-sonnet'])
|
||||||
|
})
|
||||||
|
|
||||||
|
it('deeply merges nested objects while overwriting primitives', () => {
|
||||||
|
const first = createOpenRouterOptions({
|
||||||
|
reasoning: {
|
||||||
|
enabled: true,
|
||||||
|
effort: 'low'
|
||||||
|
},
|
||||||
|
user: 'user-123'
|
||||||
|
})
|
||||||
|
const second = createOpenRouterOptions({
|
||||||
|
reasoning: {
|
||||||
|
effort: 'high',
|
||||||
|
max_tokens: 500
|
||||||
|
},
|
||||||
|
user: 'user-456'
|
||||||
|
})
|
||||||
|
|
||||||
|
const merged = mergeProviderOptions(first, second)
|
||||||
|
|
||||||
|
expect(merged.openrouter).toEqual({
|
||||||
|
reasoning: {
|
||||||
|
enabled: true, // preserved from first
|
||||||
|
effort: 'high', // overwritten by second
|
||||||
|
max_tokens: 500 // added from second
|
||||||
|
},
|
||||||
|
user: 'user-456' // overwritten by second
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
it('replaces arrays instead of merging them', () => {
|
||||||
|
const first = createOpenRouterOptions({ plugins: [{ id: 'old' }] })
|
||||||
|
const second = createOpenRouterOptions({ plugins: [{ id: 'new' }] })
|
||||||
|
const merged = mergeProviderOptions(first, second)
|
||||||
|
// @ts-expect-error type-check for openrouter options is skipped. see function signature of createOpenRouterOptions
|
||||||
|
expect(merged.openrouter?.plugins).toEqual([{ id: 'new' }])
|
||||||
|
})
|
||||||
|
})
|
||||||
@@ -26,13 +26,65 @@ export function createGenericProviderOptions<T extends string>(
|
|||||||
return { [provider]: options } as Record<T, Record<string, any>>
|
return { [provider]: options } as Record<T, Record<string, any>>
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type PlainObject = Record<string, any>
|
||||||
|
|
||||||
|
const isPlainObject = (value: unknown): value is PlainObject => {
|
||||||
|
return typeof value === 'object' && value !== null && !Array.isArray(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
function deepMergeObjects<T extends PlainObject>(target: T, source: PlainObject): T {
|
||||||
|
const result: PlainObject = { ...target }
|
||||||
|
Object.entries(source).forEach(([key, value]) => {
|
||||||
|
if (isPlainObject(value) && isPlainObject(result[key])) {
|
||||||
|
result[key] = deepMergeObjects(result[key], value)
|
||||||
|
} else {
|
||||||
|
result[key] = value
|
||||||
|
}
|
||||||
|
})
|
||||||
|
return result as T
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* 合并多个供应商的options
|
* Deep-merge multiple provider-specific options.
|
||||||
* @param optionsMap 包含多个供应商选项的对象
|
* Nested objects are recursively merged; primitive values are overwritten.
|
||||||
* @returns 合并后的TypedProviderOptions
|
*
|
||||||
|
* When the same key appears in multiple options:
|
||||||
|
* - If both values are plain objects: they are deeply merged (recursive merge)
|
||||||
|
* - If values are primitives/arrays: the later value overwrites the earlier one
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* mergeProviderOptions(
|
||||||
|
* { openrouter: { reasoning: { enabled: true, effort: 'low' }, user: 'user-123' } },
|
||||||
|
* { openrouter: { reasoning: { effort: 'high', max_tokens: 500 }, models: ['gpt-4'] } }
|
||||||
|
* )
|
||||||
|
* // Result: {
|
||||||
|
* // openrouter: {
|
||||||
|
* // reasoning: { enabled: true, effort: 'high', max_tokens: 500 },
|
||||||
|
* // user: 'user-123',
|
||||||
|
* // models: ['gpt-4']
|
||||||
|
* // }
|
||||||
|
* // }
|
||||||
|
*
|
||||||
|
* @param optionsMap Objects containing options for multiple providers
|
||||||
|
* @returns Fully merged TypedProviderOptions
|
||||||
*/
|
*/
|
||||||
export function mergeProviderOptions(...optionsMap: Partial<TypedProviderOptions>[]): TypedProviderOptions {
|
export function mergeProviderOptions(...optionsMap: Partial<TypedProviderOptions>[]): TypedProviderOptions {
|
||||||
return Object.assign({}, ...optionsMap)
|
return optionsMap.reduce<TypedProviderOptions>((acc, options) => {
|
||||||
|
if (!options) {
|
||||||
|
return acc
|
||||||
|
}
|
||||||
|
Object.entries(options).forEach(([providerId, providerOptions]) => {
|
||||||
|
if (!providerOptions) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if (acc[providerId]) {
|
||||||
|
acc[providerId] = deepMergeObjects(acc[providerId] as PlainObject, providerOptions as PlainObject)
|
||||||
|
} else {
|
||||||
|
acc[providerId] = providerOptions as any
|
||||||
|
}
|
||||||
|
})
|
||||||
|
return acc
|
||||||
|
}, {} as TypedProviderOptions)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
@@ -19,15 +19,20 @@ describe('Provider Schemas', () => {
|
|||||||
expect(Array.isArray(baseProviders)).toBe(true)
|
expect(Array.isArray(baseProviders)).toBe(true)
|
||||||
expect(baseProviders.length).toBeGreaterThan(0)
|
expect(baseProviders.length).toBeGreaterThan(0)
|
||||||
|
|
||||||
|
// These are the actual base providers defined in schemas.ts
|
||||||
const expectedIds = [
|
const expectedIds = [
|
||||||
'openai',
|
'openai',
|
||||||
'openai-responses',
|
'openai-chat',
|
||||||
'openai-compatible',
|
'openai-compatible',
|
||||||
'anthropic',
|
'anthropic',
|
||||||
'google',
|
'google',
|
||||||
'xai',
|
'xai',
|
||||||
'azure',
|
'azure',
|
||||||
'deepseek'
|
'azure-responses',
|
||||||
|
'deepseek',
|
||||||
|
'openrouter',
|
||||||
|
'cherryin',
|
||||||
|
'cherryin-chat'
|
||||||
]
|
]
|
||||||
const actualIds = baseProviders.map((p) => p.id)
|
const actualIds = baseProviders.map((p) => p.id)
|
||||||
expectedIds.forEach((id) => {
|
expectedIds.forEach((id) => {
|
||||||
|
|||||||
@@ -232,11 +232,13 @@ describe('RuntimeExecutor.generateImage', () => {
|
|||||||
|
|
||||||
expect(pluginCallOrder).toEqual(['onRequestStart', 'transformParams', 'transformResult', 'onRequestEnd'])
|
expect(pluginCallOrder).toEqual(['onRequestStart', 'transformParams', 'transformResult', 'onRequestEnd'])
|
||||||
|
|
||||||
|
// transformParams receives params without model (model is handled separately)
|
||||||
|
// and context with core fields + dynamic fields (requestId, startTime, etc.)
|
||||||
expect(testPlugin.transformParams).toHaveBeenCalledWith(
|
expect(testPlugin.transformParams).toHaveBeenCalledWith(
|
||||||
{ prompt: 'A test image' },
|
expect.objectContaining({ prompt: 'A test image' }),
|
||||||
expect.objectContaining({
|
expect.objectContaining({
|
||||||
providerId: 'openai',
|
providerId: 'openai',
|
||||||
modelId: 'dall-e-3'
|
model: 'dall-e-3'
|
||||||
})
|
})
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -273,11 +275,12 @@ describe('RuntimeExecutor.generateImage', () => {
|
|||||||
|
|
||||||
await executorWithPlugin.generateImage({ model: 'dall-e-3', prompt: 'A test image' })
|
await executorWithPlugin.generateImage({ model: 'dall-e-3', prompt: 'A test image' })
|
||||||
|
|
||||||
|
// resolveModel receives model id and context with core fields
|
||||||
expect(modelResolutionPlugin.resolveModel).toHaveBeenCalledWith(
|
expect(modelResolutionPlugin.resolveModel).toHaveBeenCalledWith(
|
||||||
'dall-e-3',
|
'dall-e-3',
|
||||||
expect.objectContaining({
|
expect.objectContaining({
|
||||||
providerId: 'openai',
|
providerId: 'openai',
|
||||||
modelId: 'dall-e-3'
|
model: 'dall-e-3'
|
||||||
})
|
})
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -339,12 +342,11 @@ describe('RuntimeExecutor.generateImage', () => {
|
|||||||
.generateImage({ model: 'invalid-model', prompt: 'A test image' })
|
.generateImage({ model: 'invalid-model', prompt: 'A test image' })
|
||||||
.catch((error) => error)
|
.catch((error) => error)
|
||||||
|
|
||||||
expect(thrownError).toBeInstanceOf(ImageGenerationError)
|
// Error is thrown from pluginEngine directly as ImageModelResolutionError
|
||||||
expect(thrownError.message).toContain('Failed to generate image:')
|
expect(thrownError).toBeInstanceOf(ImageModelResolutionError)
|
||||||
|
expect(thrownError.message).toContain('Failed to resolve image model: invalid-model')
|
||||||
expect(thrownError.providerId).toBe('openai')
|
expect(thrownError.providerId).toBe('openai')
|
||||||
expect(thrownError.modelId).toBe('invalid-model')
|
expect(thrownError.modelId).toBe('invalid-model')
|
||||||
expect(thrownError.cause).toBeInstanceOf(ImageModelResolutionError)
|
|
||||||
expect(thrownError.cause.message).toContain('Failed to resolve image model: invalid-model')
|
|
||||||
})
|
})
|
||||||
|
|
||||||
it('should handle ImageModelResolutionError without provider', async () => {
|
it('should handle ImageModelResolutionError without provider', async () => {
|
||||||
@@ -362,8 +364,9 @@ describe('RuntimeExecutor.generateImage', () => {
|
|||||||
const apiError = new Error('API request failed')
|
const apiError = new Error('API request failed')
|
||||||
vi.mocked(aiGenerateImage).mockRejectedValue(apiError)
|
vi.mocked(aiGenerateImage).mockRejectedValue(apiError)
|
||||||
|
|
||||||
|
// Error propagates directly from pluginEngine without wrapping
|
||||||
await expect(executor.generateImage({ model: 'dall-e-3', prompt: 'A test image' })).rejects.toThrow(
|
await expect(executor.generateImage({ model: 'dall-e-3', prompt: 'A test image' })).rejects.toThrow(
|
||||||
'Failed to generate image:'
|
'API request failed'
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -376,8 +379,9 @@ describe('RuntimeExecutor.generateImage', () => {
|
|||||||
vi.mocked(aiGenerateImage).mockRejectedValue(noImageError)
|
vi.mocked(aiGenerateImage).mockRejectedValue(noImageError)
|
||||||
vi.mocked(NoImageGeneratedError.isInstance).mockReturnValue(true)
|
vi.mocked(NoImageGeneratedError.isInstance).mockReturnValue(true)
|
||||||
|
|
||||||
|
// Error propagates directly from pluginEngine
|
||||||
await expect(executor.generateImage({ model: 'dall-e-3', prompt: 'A test image' })).rejects.toThrow(
|
await expect(executor.generateImage({ model: 'dall-e-3', prompt: 'A test image' })).rejects.toThrow(
|
||||||
'Failed to generate image:'
|
'No image generated'
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -398,15 +402,17 @@ describe('RuntimeExecutor.generateImage', () => {
|
|||||||
[errorPlugin]
|
[errorPlugin]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Error propagates directly from pluginEngine
|
||||||
await expect(executorWithPlugin.generateImage({ model: 'dall-e-3', prompt: 'A test image' })).rejects.toThrow(
|
await expect(executorWithPlugin.generateImage({ model: 'dall-e-3', prompt: 'A test image' })).rejects.toThrow(
|
||||||
'Failed to generate image:'
|
'Generation failed'
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// onError receives the original error and context with core fields
|
||||||
expect(errorPlugin.onError).toHaveBeenCalledWith(
|
expect(errorPlugin.onError).toHaveBeenCalledWith(
|
||||||
error,
|
error,
|
||||||
expect.objectContaining({
|
expect.objectContaining({
|
||||||
providerId: 'openai',
|
providerId: 'openai',
|
||||||
modelId: 'dall-e-3'
|
model: 'dall-e-3'
|
||||||
})
|
})
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
@@ -419,9 +425,10 @@ describe('RuntimeExecutor.generateImage', () => {
|
|||||||
const abortController = new AbortController()
|
const abortController = new AbortController()
|
||||||
setTimeout(() => abortController.abort(), 10)
|
setTimeout(() => abortController.abort(), 10)
|
||||||
|
|
||||||
|
// Error propagates directly from pluginEngine
|
||||||
await expect(
|
await expect(
|
||||||
executor.generateImage({ model: 'dall-e-3', prompt: 'A test image', abortSignal: abortController.signal })
|
executor.generateImage({ model: 'dall-e-3', prompt: 'A test image', abortSignal: abortController.signal })
|
||||||
).rejects.toThrow('Failed to generate image:')
|
).rejects.toThrow('Operation was aborted')
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|||||||
@@ -17,10 +17,14 @@ import type { AiPlugin } from '../../plugins'
|
|||||||
import { globalRegistryManagement } from '../../providers/RegistryManagement'
|
import { globalRegistryManagement } from '../../providers/RegistryManagement'
|
||||||
import { RuntimeExecutor } from '../executor'
|
import { RuntimeExecutor } from '../executor'
|
||||||
|
|
||||||
// Mock AI SDK
|
// Mock AI SDK - use importOriginal to keep jsonSchema and other non-mocked exports
|
||||||
vi.mock('ai', () => ({
|
vi.mock('ai', async (importOriginal) => {
|
||||||
generateText: vi.fn()
|
const actual = (await importOriginal()) as Record<string, unknown>
|
||||||
}))
|
return {
|
||||||
|
...actual,
|
||||||
|
generateText: vi.fn()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
vi.mock('../../providers/RegistryManagement', () => ({
|
vi.mock('../../providers/RegistryManagement', () => ({
|
||||||
globalRegistryManagement: {
|
globalRegistryManagement: {
|
||||||
@@ -409,11 +413,12 @@ describe('RuntimeExecutor.generateText', () => {
|
|||||||
})
|
})
|
||||||
).rejects.toThrow('Generation failed')
|
).rejects.toThrow('Generation failed')
|
||||||
|
|
||||||
|
// onError receives the original error and context with core fields
|
||||||
expect(errorPlugin.onError).toHaveBeenCalledWith(
|
expect(errorPlugin.onError).toHaveBeenCalledWith(
|
||||||
error,
|
error,
|
||||||
expect.objectContaining({
|
expect.objectContaining({
|
||||||
providerId: 'openai',
|
providerId: 'openai',
|
||||||
modelId: 'gpt-4'
|
model: 'gpt-4'
|
||||||
})
|
})
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -11,10 +11,14 @@ import type { AiPlugin } from '../../plugins'
|
|||||||
import { globalRegistryManagement } from '../../providers/RegistryManagement'
|
import { globalRegistryManagement } from '../../providers/RegistryManagement'
|
||||||
import { RuntimeExecutor } from '../executor'
|
import { RuntimeExecutor } from '../executor'
|
||||||
|
|
||||||
// Mock AI SDK
|
// Mock AI SDK - use importOriginal to keep jsonSchema and other non-mocked exports
|
||||||
vi.mock('ai', () => ({
|
vi.mock('ai', async (importOriginal) => {
|
||||||
streamText: vi.fn()
|
const actual = (await importOriginal()) as Record<string, unknown>
|
||||||
}))
|
return {
|
||||||
|
...actual,
|
||||||
|
streamText: vi.fn()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
vi.mock('../../providers/RegistryManagement', () => ({
|
vi.mock('../../providers/RegistryManagement', () => ({
|
||||||
globalRegistryManagement: {
|
globalRegistryManagement: {
|
||||||
@@ -153,7 +157,7 @@ describe('RuntimeExecutor.streamText', () => {
|
|||||||
describe('Max Tokens Parameter', () => {
|
describe('Max Tokens Parameter', () => {
|
||||||
const maxTokensValues = [10, 50, 100, 500, 1000, 2000, 4000]
|
const maxTokensValues = [10, 50, 100, 500, 1000, 2000, 4000]
|
||||||
|
|
||||||
it.each(maxTokensValues)('should support maxTokens=%s', async (maxTokens) => {
|
it.each(maxTokensValues)('should support maxOutputTokens=%s', async (maxOutputTokens) => {
|
||||||
const mockStream = {
|
const mockStream = {
|
||||||
textStream: (async function* () {
|
textStream: (async function* () {
|
||||||
yield 'Response'
|
yield 'Response'
|
||||||
@@ -168,12 +172,13 @@ describe('RuntimeExecutor.streamText', () => {
|
|||||||
await executor.streamText({
|
await executor.streamText({
|
||||||
model: 'gpt-4',
|
model: 'gpt-4',
|
||||||
messages: testMessages.simple,
|
messages: testMessages.simple,
|
||||||
maxOutputTokens: maxTokens
|
maxOutputTokens
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// Parameters are passed through without transformation
|
||||||
expect(streamText).toHaveBeenCalledWith(
|
expect(streamText).toHaveBeenCalledWith(
|
||||||
expect.objectContaining({
|
expect.objectContaining({
|
||||||
maxTokens
|
maxOutputTokens
|
||||||
})
|
})
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
@@ -513,11 +518,12 @@ describe('RuntimeExecutor.streamText', () => {
|
|||||||
})
|
})
|
||||||
).rejects.toThrow('Stream error')
|
).rejects.toThrow('Stream error')
|
||||||
|
|
||||||
|
// onError receives the original error and context with core fields
|
||||||
expect(errorPlugin.onError).toHaveBeenCalledWith(
|
expect(errorPlugin.onError).toHaveBeenCalledWith(
|
||||||
error,
|
error,
|
||||||
expect.objectContaining({
|
expect.objectContaining({
|
||||||
providerId: 'openai',
|
providerId: 'openai',
|
||||||
modelId: 'gpt-4'
|
model: 'gpt-4'
|
||||||
})
|
})
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -1,12 +1,20 @@
|
|||||||
|
import path from 'node:path'
|
||||||
|
import { fileURLToPath } from 'node:url'
|
||||||
|
|
||||||
import { defineConfig } from 'vitest/config'
|
import { defineConfig } from 'vitest/config'
|
||||||
|
|
||||||
|
const __dirname = path.dirname(fileURLToPath(import.meta.url))
|
||||||
|
|
||||||
export default defineConfig({
|
export default defineConfig({
|
||||||
test: {
|
test: {
|
||||||
globals: true
|
globals: true,
|
||||||
|
setupFiles: [path.resolve(__dirname, './src/__tests__/setup.ts')]
|
||||||
},
|
},
|
||||||
resolve: {
|
resolve: {
|
||||||
alias: {
|
alias: {
|
||||||
'@': './src'
|
'@': path.resolve(__dirname, './src'),
|
||||||
|
// Mock external packages that may not be available in test environment
|
||||||
|
'@cherrystudio/ai-sdk-provider': path.resolve(__dirname, './src/__tests__/mocks/ai-sdk-provider.ts')
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
esbuild: {
|
esbuild: {
|
||||||
|
|||||||
@@ -1,42 +1,64 @@
|
|||||||
import { defineConfig, devices } from '@playwright/test'
|
import { defineConfig } from '@playwright/test'
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* See https://playwright.dev/docs/test-configuration.
|
* Playwright configuration for Electron e2e testing.
|
||||||
|
* See https://playwright.dev/docs/test-configuration
|
||||||
*/
|
*/
|
||||||
export default defineConfig({
|
export default defineConfig({
|
||||||
// Look for test files, relative to this configuration file.
|
// Look for test files in the specs directory
|
||||||
testDir: './tests/e2e',
|
testDir: './tests/e2e/specs',
|
||||||
/* Run tests in files in parallel */
|
|
||||||
fullyParallel: true,
|
|
||||||
/* Fail the build on CI if you accidentally left test.only in the source code. */
|
|
||||||
forbidOnly: !!process.env.CI,
|
|
||||||
/* Retry on CI only */
|
|
||||||
retries: process.env.CI ? 2 : 0,
|
|
||||||
/* Opt out of parallel tests on CI. */
|
|
||||||
workers: process.env.CI ? 1 : undefined,
|
|
||||||
/* Reporter to use. See https://playwright.dev/docs/test-reporters */
|
|
||||||
reporter: 'html',
|
|
||||||
/* Shared settings for all the projects below. See https://playwright.dev/docs/api/class-testoptions. */
|
|
||||||
use: {
|
|
||||||
/* Base URL to use in actions like `await page.goto('/')`. */
|
|
||||||
// baseURL: 'http://localhost:3000',
|
|
||||||
|
|
||||||
/* Collect trace when retrying the failed test. See https://playwright.dev/docs/trace-viewer */
|
// Global timeout for each test
|
||||||
trace: 'on-first-retry'
|
timeout: 60000,
|
||||||
|
|
||||||
|
// Assertion timeout
|
||||||
|
expect: {
|
||||||
|
timeout: 10000
|
||||||
},
|
},
|
||||||
|
|
||||||
/* Configure projects for major browsers */
|
// Electron apps should run tests sequentially to avoid conflicts
|
||||||
|
fullyParallel: false,
|
||||||
|
workers: 1,
|
||||||
|
|
||||||
|
// Fail the build on CI if you accidentally left test.only in the source code
|
||||||
|
forbidOnly: !!process.env.CI,
|
||||||
|
|
||||||
|
// Retry on CI only
|
||||||
|
retries: process.env.CI ? 2 : 0,
|
||||||
|
|
||||||
|
// Reporter configuration
|
||||||
|
reporter: [['html', { outputFolder: 'playwright-report' }], ['list']],
|
||||||
|
|
||||||
|
// Global setup and teardown
|
||||||
|
globalSetup: './tests/e2e/global-setup.ts',
|
||||||
|
globalTeardown: './tests/e2e/global-teardown.ts',
|
||||||
|
|
||||||
|
// Output directory for test artifacts
|
||||||
|
outputDir: './test-results',
|
||||||
|
|
||||||
|
// Shared settings for all tests
|
||||||
|
use: {
|
||||||
|
// Collect trace when retrying the failed test
|
||||||
|
trace: 'retain-on-failure',
|
||||||
|
|
||||||
|
// Take screenshot only on failure
|
||||||
|
screenshot: 'only-on-failure',
|
||||||
|
|
||||||
|
// Record video only on failure
|
||||||
|
video: 'retain-on-failure',
|
||||||
|
|
||||||
|
// Action timeout
|
||||||
|
actionTimeout: 15000,
|
||||||
|
|
||||||
|
// Navigation timeout
|
||||||
|
navigationTimeout: 30000
|
||||||
|
},
|
||||||
|
|
||||||
|
// Single project for Electron testing
|
||||||
projects: [
|
projects: [
|
||||||
{
|
{
|
||||||
name: 'chromium',
|
name: 'electron',
|
||||||
use: { ...devices['Desktop Chrome'] }
|
testMatch: '**/*.spec.ts'
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
||||||
/* Run your local dev server before starting the tests */
|
|
||||||
// webServer: {
|
|
||||||
// command: 'npm run start',
|
|
||||||
// url: 'http://localhost:3000',
|
|
||||||
// reuseExistingServer: !process.env.CI,
|
|
||||||
// },
|
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -548,6 +548,17 @@ class CodeToolsService {
|
|||||||
logger.debug(`Environment variables:`, Object.keys(env))
|
logger.debug(`Environment variables:`, Object.keys(env))
|
||||||
logger.debug(`Options:`, options)
|
logger.debug(`Options:`, options)
|
||||||
|
|
||||||
|
// Validate directory exists before proceeding
|
||||||
|
if (!directory || !fs.existsSync(directory)) {
|
||||||
|
const errorMessage = `Directory does not exist: ${directory}`
|
||||||
|
logger.error(errorMessage)
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
message: errorMessage,
|
||||||
|
command: ''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const packageName = await this.getPackageName(cliTool)
|
const packageName = await this.getPackageName(cliTool)
|
||||||
const bunPath = await this.getBunPath()
|
const bunPath = await this.getBunPath()
|
||||||
const executableName = await this.getCliExecutableName(cliTool)
|
const executableName = await this.getCliExecutableName(cliTool)
|
||||||
@@ -709,6 +720,7 @@ class CodeToolsService {
|
|||||||
// Build bat file content, including debug information
|
// Build bat file content, including debug information
|
||||||
const batContent = [
|
const batContent = [
|
||||||
'@echo off',
|
'@echo off',
|
||||||
|
'chcp 65001 >nul 2>&1', // Switch to UTF-8 code page for international path support
|
||||||
`title ${cliTool} - Cherry Studio`, // Set window title in bat file
|
`title ${cliTool} - Cherry Studio`, // Set window title in bat file
|
||||||
'echo ================================================',
|
'echo ================================================',
|
||||||
'echo Cherry Studio CLI Tool Launcher',
|
'echo Cherry Studio CLI Tool Launcher',
|
||||||
|
|||||||
@@ -620,7 +620,7 @@ class McpService {
|
|||||||
tools.map((tool: SDKTool) => {
|
tools.map((tool: SDKTool) => {
|
||||||
const serverTool: MCPTool = {
|
const serverTool: MCPTool = {
|
||||||
...tool,
|
...tool,
|
||||||
id: buildFunctionCallToolName(server.name, tool.name),
|
id: buildFunctionCallToolName(server.name, tool.name, server.id),
|
||||||
serverId: server.id,
|
serverId: server.id,
|
||||||
serverName: server.name,
|
serverName: server.name,
|
||||||
type: 'mcp'
|
type: 'mcp'
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
// src/main/services/agents/services/claudecode/index.ts
|
// src/main/services/agents/services/claudecode/index.ts
|
||||||
import { EventEmitter } from 'node:events'
|
import { EventEmitter } from 'node:events'
|
||||||
import { createRequire } from 'node:module'
|
import { createRequire } from 'node:module'
|
||||||
|
import path from 'node:path'
|
||||||
|
|
||||||
import type {
|
import type {
|
||||||
CanUseTool,
|
CanUseTool,
|
||||||
@@ -121,7 +122,11 @@ class ClaudeCodeService implements AgentServiceInterface {
|
|||||||
// TODO: support set small model in UI
|
// TODO: support set small model in UI
|
||||||
ANTHROPIC_DEFAULT_HAIKU_MODEL: modelInfo.modelId,
|
ANTHROPIC_DEFAULT_HAIKU_MODEL: modelInfo.modelId,
|
||||||
ELECTRON_RUN_AS_NODE: '1',
|
ELECTRON_RUN_AS_NODE: '1',
|
||||||
ELECTRON_NO_ATTACH_CONSOLE: '1'
|
ELECTRON_NO_ATTACH_CONSOLE: '1',
|
||||||
|
// Set CLAUDE_CONFIG_DIR to app's userData directory to avoid path encoding issues
|
||||||
|
// on Windows when the username contains non-ASCII characters (e.g., Chinese characters)
|
||||||
|
// This prevents the SDK from using the user's home directory which may have encoding problems
|
||||||
|
CLAUDE_CONFIG_DIR: path.join(app.getPath('userData'), '.claude')
|
||||||
}
|
}
|
||||||
|
|
||||||
const errorChunks: string[] = []
|
const errorChunks: string[] = []
|
||||||
|
|||||||
196
src/main/utils/__tests__/mcp.test.ts
Normal file
196
src/main/utils/__tests__/mcp.test.ts
Normal file
@@ -0,0 +1,196 @@
|
|||||||
|
import { describe, expect, it } from 'vitest'
|
||||||
|
|
||||||
|
import { buildFunctionCallToolName } from '../mcp'
|
||||||
|
|
||||||
|
describe('buildFunctionCallToolName', () => {
|
||||||
|
describe('basic functionality', () => {
|
||||||
|
it('should combine server name and tool name', () => {
|
||||||
|
const result = buildFunctionCallToolName('github', 'search_issues')
|
||||||
|
expect(result).toContain('github')
|
||||||
|
expect(result).toContain('search')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should sanitize names by replacing dashes with underscores', () => {
|
||||||
|
const result = buildFunctionCallToolName('my-server', 'my-tool')
|
||||||
|
// Input dashes are replaced, but the separator between server and tool is a dash
|
||||||
|
expect(result).toBe('my_serv-my_tool')
|
||||||
|
expect(result).toContain('_')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should handle empty server names gracefully', () => {
|
||||||
|
const result = buildFunctionCallToolName('', 'tool')
|
||||||
|
expect(result).toBeTruthy()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('uniqueness with serverId', () => {
|
||||||
|
it('should generate different IDs for same server name but different serverIds', () => {
|
||||||
|
const serverId1 = 'server-id-123456'
|
||||||
|
const serverId2 = 'server-id-789012'
|
||||||
|
const serverName = 'github'
|
||||||
|
const toolName = 'search_repos'
|
||||||
|
|
||||||
|
const result1 = buildFunctionCallToolName(serverName, toolName, serverId1)
|
||||||
|
const result2 = buildFunctionCallToolName(serverName, toolName, serverId2)
|
||||||
|
|
||||||
|
expect(result1).not.toBe(result2)
|
||||||
|
expect(result1).toContain('123456')
|
||||||
|
expect(result2).toContain('789012')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should generate same ID when serverId is not provided', () => {
|
||||||
|
const serverName = 'github'
|
||||||
|
const toolName = 'search_repos'
|
||||||
|
|
||||||
|
const result1 = buildFunctionCallToolName(serverName, toolName)
|
||||||
|
const result2 = buildFunctionCallToolName(serverName, toolName)
|
||||||
|
|
||||||
|
expect(result1).toBe(result2)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should include serverId suffix when provided', () => {
|
||||||
|
const serverId = 'abc123def456'
|
||||||
|
const result = buildFunctionCallToolName('server', 'tool', serverId)
|
||||||
|
|
||||||
|
// Should include last 6 chars of serverId
|
||||||
|
expect(result).toContain('ef456')
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('character sanitization', () => {
|
||||||
|
it('should replace invalid characters with underscores', () => {
|
||||||
|
const result = buildFunctionCallToolName('test@server', 'tool#name')
|
||||||
|
expect(result).not.toMatch(/[@#]/)
|
||||||
|
expect(result).toMatch(/^[a-zA-Z0-9_-]+$/)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should ensure name starts with a letter', () => {
|
||||||
|
const result = buildFunctionCallToolName('123server', '456tool')
|
||||||
|
expect(result).toMatch(/^[a-zA-Z]/)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should handle consecutive underscores/dashes', () => {
|
||||||
|
const result = buildFunctionCallToolName('my--server', 'my__tool')
|
||||||
|
expect(result).not.toMatch(/[_-]{2,}/)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('length constraints', () => {
|
||||||
|
it('should truncate names longer than 63 characters', () => {
|
||||||
|
const longServerName = 'a'.repeat(50)
|
||||||
|
const longToolName = 'b'.repeat(50)
|
||||||
|
const result = buildFunctionCallToolName(longServerName, longToolName, 'id123456')
|
||||||
|
|
||||||
|
expect(result.length).toBeLessThanOrEqual(63)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should not end with underscore or dash after truncation', () => {
|
||||||
|
const longServerName = 'a'.repeat(50)
|
||||||
|
const longToolName = 'b'.repeat(50)
|
||||||
|
const result = buildFunctionCallToolName(longServerName, longToolName, 'id123456')
|
||||||
|
|
||||||
|
expect(result).not.toMatch(/[_-]$/)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should preserve serverId suffix even with long server/tool names', () => {
|
||||||
|
const longServerName = 'a'.repeat(50)
|
||||||
|
const longToolName = 'b'.repeat(50)
|
||||||
|
const serverId = 'server-id-xyz789'
|
||||||
|
|
||||||
|
const result = buildFunctionCallToolName(longServerName, longToolName, serverId)
|
||||||
|
|
||||||
|
// The suffix should be preserved and not truncated
|
||||||
|
expect(result).toContain('xyz789')
|
||||||
|
expect(result.length).toBeLessThanOrEqual(63)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should ensure two long-named servers with different IDs produce different results', () => {
|
||||||
|
const longServerName = 'a'.repeat(50)
|
||||||
|
const longToolName = 'b'.repeat(50)
|
||||||
|
const serverId1 = 'server-id-abc123'
|
||||||
|
const serverId2 = 'server-id-def456'
|
||||||
|
|
||||||
|
const result1 = buildFunctionCallToolName(longServerName, longToolName, serverId1)
|
||||||
|
const result2 = buildFunctionCallToolName(longServerName, longToolName, serverId2)
|
||||||
|
|
||||||
|
// Both should be within limit
|
||||||
|
expect(result1.length).toBeLessThanOrEqual(63)
|
||||||
|
expect(result2.length).toBeLessThanOrEqual(63)
|
||||||
|
|
||||||
|
// They should be different due to preserved suffix
|
||||||
|
expect(result1).not.toBe(result2)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('edge cases with serverId', () => {
|
||||||
|
it('should handle serverId with only non-alphanumeric characters', () => {
|
||||||
|
const serverId = '------' // All dashes
|
||||||
|
const result = buildFunctionCallToolName('server', 'tool', serverId)
|
||||||
|
|
||||||
|
// Should still produce a valid unique suffix via fallback hash
|
||||||
|
expect(result).toBeTruthy()
|
||||||
|
expect(result.length).toBeLessThanOrEqual(63)
|
||||||
|
expect(result).toMatch(/^[a-zA-Z][a-zA-Z0-9_-]*$/)
|
||||||
|
// Should have a suffix (underscore followed by something)
|
||||||
|
expect(result).toMatch(/_[a-z0-9]+$/)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should produce different results for different non-alphanumeric serverIds', () => {
|
||||||
|
const serverId1 = '------'
|
||||||
|
const serverId2 = '!!!!!!'
|
||||||
|
|
||||||
|
const result1 = buildFunctionCallToolName('server', 'tool', serverId1)
|
||||||
|
const result2 = buildFunctionCallToolName('server', 'tool', serverId2)
|
||||||
|
|
||||||
|
// Should be different because the hash fallback produces different values
|
||||||
|
expect(result1).not.toBe(result2)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should handle empty string serverId differently from undefined', () => {
|
||||||
|
const resultWithEmpty = buildFunctionCallToolName('server', 'tool', '')
|
||||||
|
const resultWithUndefined = buildFunctionCallToolName('server', 'tool', undefined)
|
||||||
|
|
||||||
|
// Empty string is falsy, so both should behave the same (no suffix)
|
||||||
|
expect(resultWithEmpty).toBe(resultWithUndefined)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should handle serverId with mixed alphanumeric and special chars', () => {
|
||||||
|
const serverId = 'ab@#cd' // Mixed chars, last 6 chars contain some alphanumeric
|
||||||
|
const result = buildFunctionCallToolName('server', 'tool', serverId)
|
||||||
|
|
||||||
|
// Should extract alphanumeric chars: 'abcd' from 'ab@#cd'
|
||||||
|
expect(result).toContain('abcd')
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('real-world scenarios', () => {
|
||||||
|
it('should handle GitHub MCP server instances correctly', () => {
|
||||||
|
const serverName = 'github'
|
||||||
|
const toolName = 'search_repositories'
|
||||||
|
|
||||||
|
const githubComId = 'server-github-com-abc123'
|
||||||
|
const gheId = 'server-ghe-internal-xyz789'
|
||||||
|
|
||||||
|
const tool1 = buildFunctionCallToolName(serverName, toolName, githubComId)
|
||||||
|
const tool2 = buildFunctionCallToolName(serverName, toolName, gheId)
|
||||||
|
|
||||||
|
// Should be different
|
||||||
|
expect(tool1).not.toBe(tool2)
|
||||||
|
|
||||||
|
// Both should be valid identifiers
|
||||||
|
expect(tool1).toMatch(/^[a-zA-Z][a-zA-Z0-9_-]*$/)
|
||||||
|
expect(tool2).toMatch(/^[a-zA-Z][a-zA-Z0-9_-]*$/)
|
||||||
|
|
||||||
|
// Both should be <= 63 chars
|
||||||
|
expect(tool1.length).toBeLessThanOrEqual(63)
|
||||||
|
expect(tool2.length).toBeLessThanOrEqual(63)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should handle tool names that already include server name prefix', () => {
|
||||||
|
const result = buildFunctionCallToolName('github', 'github_search_repos')
|
||||||
|
expect(result).toBeTruthy()
|
||||||
|
// Should not double the server name
|
||||||
|
expect(result.split('github').length - 1).toBeLessThanOrEqual(2)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
@@ -1,7 +1,25 @@
|
|||||||
export function buildFunctionCallToolName(serverName: string, toolName: string) {
|
export function buildFunctionCallToolName(serverName: string, toolName: string, serverId?: string) {
|
||||||
const sanitizedServer = serverName.trim().replace(/-/g, '_')
|
const sanitizedServer = serverName.trim().replace(/-/g, '_')
|
||||||
const sanitizedTool = toolName.trim().replace(/-/g, '_')
|
const sanitizedTool = toolName.trim().replace(/-/g, '_')
|
||||||
|
|
||||||
|
// Calculate suffix first to reserve space for it
|
||||||
|
// Suffix format: "_" + 6 alphanumeric chars = 7 chars total
|
||||||
|
let serverIdSuffix = ''
|
||||||
|
if (serverId) {
|
||||||
|
// Take the last 6 characters of the serverId for brevity
|
||||||
|
serverIdSuffix = serverId.slice(-6).replace(/[^a-zA-Z0-9]/g, '')
|
||||||
|
|
||||||
|
// Fallback: if suffix becomes empty (all non-alphanumeric chars), use a simple hash
|
||||||
|
if (!serverIdSuffix) {
|
||||||
|
const hash = serverId.split('').reduce((acc, char) => acc + char.charCodeAt(0), 0)
|
||||||
|
serverIdSuffix = hash.toString(36).slice(-6) || 'x'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reserve space for suffix when calculating max base name length
|
||||||
|
const SUFFIX_LENGTH = serverIdSuffix ? serverIdSuffix.length + 1 : 0 // +1 for underscore
|
||||||
|
const MAX_BASE_LENGTH = 63 - SUFFIX_LENGTH
|
||||||
|
|
||||||
// Combine server name and tool name
|
// Combine server name and tool name
|
||||||
let name = sanitizedTool
|
let name = sanitizedTool
|
||||||
if (!sanitizedTool.includes(sanitizedServer.slice(0, 7))) {
|
if (!sanitizedTool.includes(sanitizedServer.slice(0, 7))) {
|
||||||
@@ -20,9 +38,9 @@ export function buildFunctionCallToolName(serverName: string, toolName: string)
|
|||||||
// Remove consecutive underscores/dashes (optional improvement)
|
// Remove consecutive underscores/dashes (optional improvement)
|
||||||
name = name.replace(/[_-]{2,}/g, '_')
|
name = name.replace(/[_-]{2,}/g, '_')
|
||||||
|
|
||||||
// Truncate to 63 characters maximum
|
// Truncate base name BEFORE adding suffix to ensure suffix is never cut off
|
||||||
if (name.length > 63) {
|
if (name.length > MAX_BASE_LENGTH) {
|
||||||
name = name.slice(0, 63)
|
name = name.slice(0, MAX_BASE_LENGTH)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle edge case: ensure we still have a valid name if truncation left invalid chars at edges
|
// Handle edge case: ensure we still have a valid name if truncation left invalid chars at edges
|
||||||
@@ -30,5 +48,10 @@ export function buildFunctionCallToolName(serverName: string, toolName: string)
|
|||||||
name = name.slice(0, -1)
|
name = name.slice(0, -1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Now append the suffix - it will always fit within 63 chars
|
||||||
|
if (serverIdSuffix) {
|
||||||
|
name = `${name}_${serverIdSuffix}`
|
||||||
|
}
|
||||||
|
|
||||||
return name
|
return name
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -212,8 +212,9 @@ export class ToolCallChunkHandler {
|
|||||||
description: toolName,
|
description: toolName,
|
||||||
type: 'builtin'
|
type: 'builtin'
|
||||||
} as BaseTool
|
} as BaseTool
|
||||||
} else if ((mcpTool = this.mcpTools.find((t) => t.name === toolName) as MCPTool)) {
|
} else if ((mcpTool = this.mcpTools.find((t) => t.id === toolName) as MCPTool)) {
|
||||||
// 如果是客户端执行的 MCP 工具,沿用现有逻辑
|
// 如果是客户端执行的 MCP 工具,沿用现有逻辑
|
||||||
|
// toolName is mcpTool.id (registered with id as key in convertMcpToolsToAiSdkTools)
|
||||||
logger.info(`[ToolCallChunkHandler] Handling client-side MCP tool: ${toolName}`)
|
logger.info(`[ToolCallChunkHandler] Handling client-side MCP tool: ${toolName}`)
|
||||||
// mcpTool = this.mcpTools.find((t) => t.name === toolName) as MCPTool
|
// mcpTool = this.mcpTools.find((t) => t.name === toolName) as MCPTool
|
||||||
// if (!mcpTool) {
|
// if (!mcpTool) {
|
||||||
|
|||||||
@@ -27,6 +27,7 @@ import { buildAiSdkMiddlewares } from './middleware/AiSdkMiddlewareBuilder'
|
|||||||
import { buildPlugins } from './plugins/PluginBuilder'
|
import { buildPlugins } from './plugins/PluginBuilder'
|
||||||
import { createAiSdkProvider } from './provider/factory'
|
import { createAiSdkProvider } from './provider/factory'
|
||||||
import {
|
import {
|
||||||
|
adaptProvider,
|
||||||
getActualProvider,
|
getActualProvider,
|
||||||
isModernSdkSupported,
|
isModernSdkSupported,
|
||||||
prepareSpecialProviderConfig,
|
prepareSpecialProviderConfig,
|
||||||
@@ -64,12 +65,11 @@ export default class ModernAiProvider {
|
|||||||
* - URL will be automatically formatted via `formatProviderApiHost`, adding version suffixes like `/v1`
|
* - URL will be automatically formatted via `formatProviderApiHost`, adding version suffixes like `/v1`
|
||||||
*
|
*
|
||||||
* 2. When called with `(model, provider)`:
|
* 2. When called with `(model, provider)`:
|
||||||
* - **Directly uses the provided provider WITHOUT going through `getActualProvider`**
|
* - The provided provider will be adapted via `adaptProvider`
|
||||||
* - **URL will NOT be automatically formatted, `/v1` suffix will NOT be added**
|
* - URL formatting behavior depends on the adapted result
|
||||||
* - This is legacy behavior kept for backward compatibility
|
|
||||||
*
|
*
|
||||||
* 3. When called with `(provider)`:
|
* 3. When called with `(provider)`:
|
||||||
* - Directly uses the provider without requiring a model
|
* - The provider will be adapted via `adaptProvider`
|
||||||
* - Used for operations that don't need a model (e.g., fetchModels)
|
* - Used for operations that don't need a model (e.g., fetchModels)
|
||||||
*
|
*
|
||||||
* @example
|
* @example
|
||||||
@@ -77,7 +77,7 @@ export default class ModernAiProvider {
|
|||||||
* // Recommended: Auto-format URL
|
* // Recommended: Auto-format URL
|
||||||
* const ai = new ModernAiProvider(model)
|
* const ai = new ModernAiProvider(model)
|
||||||
*
|
*
|
||||||
* // Not recommended: Skip URL formatting (only for special cases)
|
* // Provider will be adapted
|
||||||
* const ai = new ModernAiProvider(model, customProvider)
|
* const ai = new ModernAiProvider(model, customProvider)
|
||||||
*
|
*
|
||||||
* // For operations that don't need a model
|
* // For operations that don't need a model
|
||||||
@@ -91,12 +91,12 @@ export default class ModernAiProvider {
|
|||||||
if (this.isModel(modelOrProvider)) {
|
if (this.isModel(modelOrProvider)) {
|
||||||
// 传入的是 Model
|
// 传入的是 Model
|
||||||
this.model = modelOrProvider
|
this.model = modelOrProvider
|
||||||
this.actualProvider = provider || getActualProvider(modelOrProvider)
|
this.actualProvider = provider ? adaptProvider({ provider }) : getActualProvider(modelOrProvider)
|
||||||
// 只保存配置,不预先创建executor
|
// 只保存配置,不预先创建executor
|
||||||
this.config = providerToAiSdkConfig(this.actualProvider, modelOrProvider)
|
this.config = providerToAiSdkConfig(this.actualProvider, modelOrProvider)
|
||||||
} else {
|
} else {
|
||||||
// 传入的是 Provider
|
// 传入的是 Provider
|
||||||
this.actualProvider = modelOrProvider
|
this.actualProvider = adaptProvider({ provider: modelOrProvider })
|
||||||
// model为可选,某些操作(如fetchModels)不需要model
|
// model为可选,某些操作(如fetchModels)不需要model
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -120,9 +120,12 @@ export default class ModernAiProvider {
|
|||||||
throw new Error('Model is required for completions. Please use constructor with model parameter.')
|
throw new Error('Model is required for completions. Please use constructor with model parameter.')
|
||||||
}
|
}
|
||||||
|
|
||||||
// 每次请求时重新生成配置以确保API key轮换生效
|
// Config is now set in constructor, ApiService handles key rotation before passing provider
|
||||||
this.config = providerToAiSdkConfig(this.actualProvider, this.model)
|
if (!this.config) {
|
||||||
logger.debug('Generated provider config for completions', this.config)
|
// If config wasn't set in constructor (when provider only), generate it now
|
||||||
|
this.config = providerToAiSdkConfig(this.actualProvider, this.model!)
|
||||||
|
}
|
||||||
|
logger.debug('Using provider config for completions', this.config)
|
||||||
|
|
||||||
// 检查 config 是否存在
|
// 检查 config 是否存在
|
||||||
if (!this.config) {
|
if (!this.config) {
|
||||||
|
|||||||
@@ -405,6 +405,9 @@ export abstract class BaseApiClient<
|
|||||||
if (!param.name?.trim()) {
|
if (!param.name?.trim()) {
|
||||||
return acc
|
return acc
|
||||||
}
|
}
|
||||||
|
// Parse JSON type parameters (Legacy API clients)
|
||||||
|
// Related: src/renderer/src/pages/settings/AssistantSettings/AssistantModelSettings.tsx:133-148
|
||||||
|
// The UI stores JSON type params as strings, this function parses them before sending to API
|
||||||
if (param.type === 'json') {
|
if (param.type === 'json') {
|
||||||
const value = param.value as string
|
const value = param.value as string
|
||||||
if (value === 'undefined') {
|
if (value === 'undefined') {
|
||||||
|
|||||||
@@ -46,6 +46,7 @@ import type {
|
|||||||
GeminiSdkRawOutput,
|
GeminiSdkRawOutput,
|
||||||
GeminiSdkToolCall
|
GeminiSdkToolCall
|
||||||
} from '@renderer/types/sdk'
|
} from '@renderer/types/sdk'
|
||||||
|
import { getTrailingApiVersion, withoutTrailingApiVersion } from '@renderer/utils'
|
||||||
import { isToolUseModeFunction } from '@renderer/utils/assistant'
|
import { isToolUseModeFunction } from '@renderer/utils/assistant'
|
||||||
import {
|
import {
|
||||||
geminiFunctionCallToMcpTool,
|
geminiFunctionCallToMcpTool,
|
||||||
@@ -163,6 +164,10 @@ export class GeminiAPIClient extends BaseApiClient<
|
|||||||
return models
|
return models
|
||||||
}
|
}
|
||||||
|
|
||||||
|
override getBaseURL(): string {
|
||||||
|
return withoutTrailingApiVersion(super.getBaseURL())
|
||||||
|
}
|
||||||
|
|
||||||
override async getSdkInstance() {
|
override async getSdkInstance() {
|
||||||
if (this.sdkInstance) {
|
if (this.sdkInstance) {
|
||||||
return this.sdkInstance
|
return this.sdkInstance
|
||||||
@@ -188,6 +193,13 @@ export class GeminiAPIClient extends BaseApiClient<
|
|||||||
if (this.provider.isVertex) {
|
if (this.provider.isVertex) {
|
||||||
return 'v1'
|
return 'v1'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Extract trailing API version from the URL
|
||||||
|
const trailingVersion = getTrailingApiVersion(this.provider.apiHost || '')
|
||||||
|
if (trailingVersion) {
|
||||||
|
return trailingVersion
|
||||||
|
}
|
||||||
|
|
||||||
return 'v1beta'
|
return 'v1beta'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ import { isAwsBedrockProvider, isVertexProvider } from '@renderer/utils/provider
|
|||||||
// https://docs.claude.com/en/docs/build-with-claude/extended-thinking#interleaved-thinking
|
// https://docs.claude.com/en/docs/build-with-claude/extended-thinking#interleaved-thinking
|
||||||
const INTERLEAVED_THINKING_HEADER = 'interleaved-thinking-2025-05-14'
|
const INTERLEAVED_THINKING_HEADER = 'interleaved-thinking-2025-05-14'
|
||||||
// https://docs.claude.com/en/docs/build-with-claude/context-windows#1m-token-context-window
|
// https://docs.claude.com/en/docs/build-with-claude/context-windows#1m-token-context-window
|
||||||
const CONTEXT_100M_HEADER = 'context-1m-2025-08-07'
|
// const CONTEXT_100M_HEADER = 'context-1m-2025-08-07'
|
||||||
// https://docs.cloud.google.com/vertex-ai/generative-ai/docs/partner-models/claude/web-search
|
// https://docs.cloud.google.com/vertex-ai/generative-ai/docs/partner-models/claude/web-search
|
||||||
const WEBSEARCH_HEADER = 'web-search-2025-03-05'
|
const WEBSEARCH_HEADER = 'web-search-2025-03-05'
|
||||||
|
|
||||||
@@ -17,7 +17,7 @@ export function addAnthropicHeaders(assistant: Assistant, model: Model): string[
|
|||||||
if (
|
if (
|
||||||
isClaude45ReasoningModel(model) &&
|
isClaude45ReasoningModel(model) &&
|
||||||
isToolUseModeFunction(assistant) &&
|
isToolUseModeFunction(assistant) &&
|
||||||
!(isVertexProvider(provider) && isAwsBedrockProvider(provider))
|
!(isVertexProvider(provider) || isAwsBedrockProvider(provider))
|
||||||
) {
|
) {
|
||||||
anthropicHeaders.push(INTERLEAVED_THINKING_HEADER)
|
anthropicHeaders.push(INTERLEAVED_THINKING_HEADER)
|
||||||
}
|
}
|
||||||
@@ -25,7 +25,9 @@ export function addAnthropicHeaders(assistant: Assistant, model: Model): string[
|
|||||||
if (isVertexProvider(provider) && assistant.enableWebSearch) {
|
if (isVertexProvider(provider) && assistant.enableWebSearch) {
|
||||||
anthropicHeaders.push(WEBSEARCH_HEADER)
|
anthropicHeaders.push(WEBSEARCH_HEADER)
|
||||||
}
|
}
|
||||||
anthropicHeaders.push(CONTEXT_100M_HEADER)
|
// We may add it by user preference in assistant.settings instead of always adding it.
|
||||||
|
// See #11540, #11397
|
||||||
|
// anthropicHeaders.push(CONTEXT_100M_HEADER)
|
||||||
}
|
}
|
||||||
return anthropicHeaders
|
return anthropicHeaders
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -28,6 +28,7 @@ import { type Assistant, type MCPTool, type Provider } from '@renderer/types'
|
|||||||
import type { StreamTextParams } from '@renderer/types/aiCoreTypes'
|
import type { StreamTextParams } from '@renderer/types/aiCoreTypes'
|
||||||
import { mapRegexToPatterns } from '@renderer/utils/blacklistMatchPattern'
|
import { mapRegexToPatterns } from '@renderer/utils/blacklistMatchPattern'
|
||||||
import { replacePromptVariables } from '@renderer/utils/prompt'
|
import { replacePromptVariables } from '@renderer/utils/prompt'
|
||||||
|
import { isAwsBedrockProvider } from '@renderer/utils/provider'
|
||||||
import type { ModelMessage, Tool } from 'ai'
|
import type { ModelMessage, Tool } from 'ai'
|
||||||
import { stepCountIs } from 'ai'
|
import { stepCountIs } from 'ai'
|
||||||
|
|
||||||
@@ -175,7 +176,7 @@ export async function buildStreamTextParams(
|
|||||||
|
|
||||||
let headers: Record<string, string | undefined> = options.requestOptions?.headers ?? {}
|
let headers: Record<string, string | undefined> = options.requestOptions?.headers ?? {}
|
||||||
|
|
||||||
if (isAnthropicModel(model)) {
|
if (isAnthropicModel(model) && !isAwsBedrockProvider(provider)) {
|
||||||
const newBetaHeaders = { 'anthropic-beta': addAnthropicHeaders(assistant, model).join(',') }
|
const newBetaHeaders = { 'anthropic-beta': addAnthropicHeaders(assistant, model).join(',') }
|
||||||
headers = combineHeaders(headers, newBetaHeaders)
|
headers = combineHeaders(headers, newBetaHeaders)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
import type { Provider } from '@renderer/types'
|
import type { Model, Provider } from '@renderer/types'
|
||||||
import { describe, expect, it, vi } from 'vitest'
|
import { describe, expect, it, vi } from 'vitest'
|
||||||
|
|
||||||
import { getAiSdkProviderId } from '../factory'
|
import { getAiSdkProviderId } from '../factory'
|
||||||
@@ -68,6 +68,18 @@ function createTestProvider(id: string, type: string): Provider {
|
|||||||
} as Provider
|
} as Provider
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function createAzureProvider(id: string, apiVersion?: string, model?: string): Provider {
|
||||||
|
return {
|
||||||
|
id,
|
||||||
|
type: 'azure-openai',
|
||||||
|
name: `Azure Test ${id}`,
|
||||||
|
apiKey: 'azure-test-key',
|
||||||
|
apiHost: 'azure-test-host',
|
||||||
|
apiVersion,
|
||||||
|
models: [{ id: model || 'gpt-4' } as Model]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
describe('Integrated Provider Registry', () => {
|
describe('Integrated Provider Registry', () => {
|
||||||
describe('Provider ID Resolution', () => {
|
describe('Provider ID Resolution', () => {
|
||||||
it('should resolve openrouter provider correctly', () => {
|
it('should resolve openrouter provider correctly', () => {
|
||||||
@@ -111,6 +123,24 @@ describe('Integrated Provider Registry', () => {
|
|||||||
const result = getAiSdkProviderId(unknownProvider)
|
const result = getAiSdkProviderId(unknownProvider)
|
||||||
expect(result).toBe('unknown-provider')
|
expect(result).toBe('unknown-provider')
|
||||||
})
|
})
|
||||||
|
|
||||||
|
it('should handle Azure OpenAI providers correctly', () => {
|
||||||
|
const azureProvider = createAzureProvider('azure-test', '2024-02-15', 'gpt-4o')
|
||||||
|
const result = getAiSdkProviderId(azureProvider)
|
||||||
|
expect(result).toBe('azure')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should handle Azure OpenAI providers response endpoint correctly', () => {
|
||||||
|
const azureProvider = createAzureProvider('azure-test', 'v1', 'gpt-4o')
|
||||||
|
const result = getAiSdkProviderId(azureProvider)
|
||||||
|
expect(result).toBe('azure-responses')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should handle Azure provider Claude Models', () => {
|
||||||
|
const provider = createTestProvider('azure-anthropic', 'anthropic')
|
||||||
|
const result = getAiSdkProviderId(provider)
|
||||||
|
expect(result).toBe('azure-anthropic')
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
describe('Backward Compatibility', () => {
|
describe('Backward Compatibility', () => {
|
||||||
|
|||||||
@@ -29,32 +29,6 @@ import { azureAnthropicProviderCreator } from './config/azure-anthropic'
|
|||||||
import { COPILOT_DEFAULT_HEADERS } from './constants'
|
import { COPILOT_DEFAULT_HEADERS } from './constants'
|
||||||
import { getAiSdkProviderId } from './factory'
|
import { getAiSdkProviderId } from './factory'
|
||||||
|
|
||||||
/**
|
|
||||||
* 获取轮询的API key
|
|
||||||
* 复用legacy架构的多key轮询逻辑
|
|
||||||
*/
|
|
||||||
function getRotatedApiKey(provider: Provider): string {
|
|
||||||
const keys = provider.apiKey.split(',').map((key) => key.trim())
|
|
||||||
const keyName = `provider:${provider.id}:last_used_key`
|
|
||||||
|
|
||||||
if (keys.length === 1) {
|
|
||||||
return keys[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
const lastUsedKey = window.keyv.get(keyName)
|
|
||||||
if (!lastUsedKey) {
|
|
||||||
window.keyv.set(keyName, keys[0])
|
|
||||||
return keys[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
const currentIndex = keys.indexOf(lastUsedKey)
|
|
||||||
const nextIndex = (currentIndex + 1) % keys.length
|
|
||||||
const nextKey = keys[nextIndex]
|
|
||||||
window.keyv.set(keyName, nextKey)
|
|
||||||
|
|
||||||
return nextKey
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* 处理特殊provider的转换逻辑
|
* 处理特殊provider的转换逻辑
|
||||||
*/
|
*/
|
||||||
@@ -78,11 +52,13 @@ function handleSpecialProviders(model: Model, provider: Provider): Provider {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* 主要用来对齐AISdk的BaseURL格式
|
* Format and normalize the API host URL for a provider.
|
||||||
* @param provider
|
* Handles provider-specific URL formatting rules (e.g., appending version paths, Azure formatting).
|
||||||
* @returns
|
*
|
||||||
|
* @param provider - The provider whose API host is to be formatted.
|
||||||
|
* @returns A new provider instance with the formatted API host.
|
||||||
*/
|
*/
|
||||||
function formatProviderApiHost(provider: Provider): Provider {
|
export function formatProviderApiHost(provider: Provider): Provider {
|
||||||
const formatted = { ...provider }
|
const formatted = { ...provider }
|
||||||
if (formatted.anthropicApiHost) {
|
if (formatted.anthropicApiHost) {
|
||||||
formatted.anthropicApiHost = formatApiHost(formatted.anthropicApiHost)
|
formatted.anthropicApiHost = formatApiHost(formatted.anthropicApiHost)
|
||||||
@@ -114,18 +90,38 @@ function formatProviderApiHost(provider: Provider): Provider {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* 获取实际的Provider配置
|
* Retrieve the effective Provider configuration for the given model.
|
||||||
* 简化版:将逻辑分解为小函数
|
* Applies all necessary transformations (special-provider handling, URL formatting, etc.).
|
||||||
|
*
|
||||||
|
* @param model - The model whose provider is to be resolved.
|
||||||
|
* @returns A new Provider instance with all adaptations applied.
|
||||||
*/
|
*/
|
||||||
export function getActualProvider(model: Model): Provider {
|
export function getActualProvider(model: Model): Provider {
|
||||||
const baseProvider = getProviderByModel(model)
|
const baseProvider = getProviderByModel(model)
|
||||||
|
|
||||||
// 按顺序处理各种转换
|
return adaptProvider({ provider: baseProvider, model })
|
||||||
let actualProvider = cloneDeep(baseProvider)
|
}
|
||||||
actualProvider = handleSpecialProviders(model, actualProvider)
|
|
||||||
actualProvider = formatProviderApiHost(actualProvider)
|
|
||||||
|
|
||||||
return actualProvider
|
/**
|
||||||
|
* Transforms a provider configuration by applying model-specific adaptations and normalizing its API host.
|
||||||
|
* The transformations are applied in the following order:
|
||||||
|
* 1. Model-specific provider handling (e.g., New-API, system providers, Azure OpenAI)
|
||||||
|
* 2. API host formatting (provider-specific URL normalization)
|
||||||
|
*
|
||||||
|
* @param provider - The base provider configuration to transform.
|
||||||
|
* @param model - The model associated with the provider; optional but required for special-provider handling.
|
||||||
|
* @returns A new Provider instance with all transformations applied.
|
||||||
|
*/
|
||||||
|
export function adaptProvider({ provider, model }: { provider: Provider; model?: Model }): Provider {
|
||||||
|
let adaptedProvider = cloneDeep(provider)
|
||||||
|
|
||||||
|
// Apply transformations in order
|
||||||
|
if (model) {
|
||||||
|
adaptedProvider = handleSpecialProviders(model, adaptedProvider)
|
||||||
|
}
|
||||||
|
adaptedProvider = formatProviderApiHost(adaptedProvider)
|
||||||
|
|
||||||
|
return adaptedProvider
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -139,7 +135,7 @@ export function providerToAiSdkConfig(actualProvider: Provider, model: Model): A
|
|||||||
const { baseURL, endpoint } = routeToEndpoint(actualProvider.apiHost)
|
const { baseURL, endpoint } = routeToEndpoint(actualProvider.apiHost)
|
||||||
const baseConfig = {
|
const baseConfig = {
|
||||||
baseURL: baseURL,
|
baseURL: baseURL,
|
||||||
apiKey: getRotatedApiKey(actualProvider)
|
apiKey: actualProvider.apiKey
|
||||||
}
|
}
|
||||||
|
|
||||||
const isCopilotProvider = actualProvider.id === SystemProviderIds.copilot
|
const isCopilotProvider = actualProvider.id === SystemProviderIds.copilot
|
||||||
|
|||||||
@@ -245,8 +245,8 @@ export class AiSdkSpanAdapter {
|
|||||||
'gen_ai.usage.output_tokens'
|
'gen_ai.usage.output_tokens'
|
||||||
]
|
]
|
||||||
|
|
||||||
const completionTokens = attributes[inputsTokenKeys.find((key) => attributes[key]) || '']
|
const promptTokens = attributes[inputsTokenKeys.find((key) => attributes[key]) || '']
|
||||||
const promptTokens = attributes[outputTokenKeys.find((key) => attributes[key]) || '']
|
const completionTokens = attributes[outputTokenKeys.find((key) => attributes[key]) || '']
|
||||||
|
|
||||||
if (completionTokens !== undefined || promptTokens !== undefined) {
|
if (completionTokens !== undefined || promptTokens !== undefined) {
|
||||||
const usage: TokenUsage = {
|
const usage: TokenUsage = {
|
||||||
|
|||||||
@@ -0,0 +1,53 @@
|
|||||||
|
import type { Span } from '@opentelemetry/api'
|
||||||
|
import { SpanKind, SpanStatusCode } from '@opentelemetry/api'
|
||||||
|
import { describe, expect, it, vi } from 'vitest'
|
||||||
|
|
||||||
|
import { AiSdkSpanAdapter } from '../AiSdkSpanAdapter'
|
||||||
|
|
||||||
|
vi.mock('@logger', () => ({
|
||||||
|
loggerService: {
|
||||||
|
withContext: () => ({
|
||||||
|
debug: vi.fn(),
|
||||||
|
error: vi.fn(),
|
||||||
|
info: vi.fn(),
|
||||||
|
warn: vi.fn()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
|
||||||
|
describe('AiSdkSpanAdapter', () => {
|
||||||
|
const createMockSpan = (attributes: Record<string, unknown>): Span => {
|
||||||
|
const span = {
|
||||||
|
spanContext: () => ({
|
||||||
|
traceId: 'trace-id',
|
||||||
|
spanId: 'span-id'
|
||||||
|
}),
|
||||||
|
_attributes: attributes,
|
||||||
|
_events: [],
|
||||||
|
name: 'test span',
|
||||||
|
status: { code: SpanStatusCode.OK },
|
||||||
|
kind: SpanKind.CLIENT,
|
||||||
|
startTime: [0, 0] as [number, number],
|
||||||
|
endTime: [0, 1] as [number, number],
|
||||||
|
ended: true,
|
||||||
|
parentSpanId: '',
|
||||||
|
links: []
|
||||||
|
}
|
||||||
|
return span as unknown as Span
|
||||||
|
}
|
||||||
|
|
||||||
|
it('maps prompt and completion usage tokens to the correct fields', () => {
|
||||||
|
const attributes = {
|
||||||
|
'ai.usage.promptTokens': 321,
|
||||||
|
'ai.usage.completionTokens': 654
|
||||||
|
}
|
||||||
|
|
||||||
|
const span = createMockSpan(attributes)
|
||||||
|
const result = AiSdkSpanAdapter.convertToSpanEntity({ span })
|
||||||
|
|
||||||
|
expect(result.usage).toBeDefined()
|
||||||
|
expect(result.usage?.prompt_tokens).toBe(321)
|
||||||
|
expect(result.usage?.completion_tokens).toBe(654)
|
||||||
|
expect(result.usage?.total_tokens).toBe(975)
|
||||||
|
})
|
||||||
|
})
|
||||||
@@ -71,10 +71,11 @@ describe('mcp utils', () => {
|
|||||||
const result = setupToolsConfig(mcpTools)
|
const result = setupToolsConfig(mcpTools)
|
||||||
|
|
||||||
expect(result).not.toBeUndefined()
|
expect(result).not.toBeUndefined()
|
||||||
expect(Object.keys(result!)).toEqual(['test-tool'])
|
// Tools are now keyed by id (which includes serverId suffix) for uniqueness
|
||||||
expect(result!['test-tool']).toHaveProperty('description')
|
expect(Object.keys(result!)).toEqual(['test-tool-1'])
|
||||||
expect(result!['test-tool']).toHaveProperty('inputSchema')
|
expect(result!['test-tool-1']).toHaveProperty('description')
|
||||||
expect(result!['test-tool']).toHaveProperty('execute')
|
expect(result!['test-tool-1']).toHaveProperty('inputSchema')
|
||||||
|
expect(result!['test-tool-1']).toHaveProperty('execute')
|
||||||
})
|
})
|
||||||
|
|
||||||
it('should handle multiple MCP tools', () => {
|
it('should handle multiple MCP tools', () => {
|
||||||
@@ -109,7 +110,8 @@ describe('mcp utils', () => {
|
|||||||
|
|
||||||
expect(result).not.toBeUndefined()
|
expect(result).not.toBeUndefined()
|
||||||
expect(Object.keys(result!)).toHaveLength(2)
|
expect(Object.keys(result!)).toHaveLength(2)
|
||||||
expect(Object.keys(result!)).toEqual(['tool1', 'tool2'])
|
// Tools are keyed by id for uniqueness
|
||||||
|
expect(Object.keys(result!)).toEqual(['tool1-id', 'tool2-id'])
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -135,9 +137,10 @@ describe('mcp utils', () => {
|
|||||||
|
|
||||||
const result = convertMcpToolsToAiSdkTools(mcpTools)
|
const result = convertMcpToolsToAiSdkTools(mcpTools)
|
||||||
|
|
||||||
expect(Object.keys(result)).toEqual(['get-weather'])
|
// Tools are keyed by id for uniqueness when multiple server instances exist
|
||||||
|
expect(Object.keys(result)).toEqual(['get-weather-id'])
|
||||||
|
|
||||||
const tool = result['get-weather'] as Tool
|
const tool = result['get-weather-id'] as Tool
|
||||||
expect(tool.description).toBe('Get weather information')
|
expect(tool.description).toBe('Get weather information')
|
||||||
expect(tool.inputSchema).toBeDefined()
|
expect(tool.inputSchema).toBeDefined()
|
||||||
expect(typeof tool.execute).toBe('function')
|
expect(typeof tool.execute).toBe('function')
|
||||||
@@ -160,8 +163,8 @@ describe('mcp utils', () => {
|
|||||||
|
|
||||||
const result = convertMcpToolsToAiSdkTools(mcpTools)
|
const result = convertMcpToolsToAiSdkTools(mcpTools)
|
||||||
|
|
||||||
expect(Object.keys(result)).toEqual(['no-desc-tool'])
|
expect(Object.keys(result)).toEqual(['no-desc-tool-id'])
|
||||||
const tool = result['no-desc-tool'] as Tool
|
const tool = result['no-desc-tool-id'] as Tool
|
||||||
expect(tool.description).toBe('Tool from test-server')
|
expect(tool.description).toBe('Tool from test-server')
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -202,13 +205,13 @@ describe('mcp utils', () => {
|
|||||||
|
|
||||||
const result = convertMcpToolsToAiSdkTools(mcpTools)
|
const result = convertMcpToolsToAiSdkTools(mcpTools)
|
||||||
|
|
||||||
expect(Object.keys(result)).toEqual(['complex-tool'])
|
expect(Object.keys(result)).toEqual(['complex-tool-id'])
|
||||||
const tool = result['complex-tool'] as Tool
|
const tool = result['complex-tool-id'] as Tool
|
||||||
expect(tool.inputSchema).toBeDefined()
|
expect(tool.inputSchema).toBeDefined()
|
||||||
expect(typeof tool.execute).toBe('function')
|
expect(typeof tool.execute).toBe('function')
|
||||||
})
|
})
|
||||||
|
|
||||||
it('should preserve tool names with special characters', () => {
|
it('should preserve tool id with special characters', () => {
|
||||||
const mcpTools: MCPTool[] = [
|
const mcpTools: MCPTool[] = [
|
||||||
{
|
{
|
||||||
id: 'special-tool-id',
|
id: 'special-tool-id',
|
||||||
@@ -225,7 +228,8 @@ describe('mcp utils', () => {
|
|||||||
]
|
]
|
||||||
|
|
||||||
const result = convertMcpToolsToAiSdkTools(mcpTools)
|
const result = convertMcpToolsToAiSdkTools(mcpTools)
|
||||||
expect(Object.keys(result)).toEqual(['tool_with-special.chars'])
|
// Tools are keyed by id for uniqueness
|
||||||
|
expect(Object.keys(result)).toEqual(['special-tool-id'])
|
||||||
})
|
})
|
||||||
|
|
||||||
it('should handle multiple tools with different schemas', () => {
|
it('should handle multiple tools with different schemas', () => {
|
||||||
@@ -276,10 +280,11 @@ describe('mcp utils', () => {
|
|||||||
|
|
||||||
const result = convertMcpToolsToAiSdkTools(mcpTools)
|
const result = convertMcpToolsToAiSdkTools(mcpTools)
|
||||||
|
|
||||||
expect(Object.keys(result).sort()).toEqual(['boolean-tool', 'number-tool', 'string-tool'])
|
// Tools are keyed by id for uniqueness
|
||||||
expect(result['string-tool']).toBeDefined()
|
expect(Object.keys(result).sort()).toEqual(['boolean-tool-id', 'number-tool-id', 'string-tool-id'])
|
||||||
expect(result['number-tool']).toBeDefined()
|
expect(result['string-tool-id']).toBeDefined()
|
||||||
expect(result['boolean-tool']).toBeDefined()
|
expect(result['number-tool-id']).toBeDefined()
|
||||||
|
expect(result['boolean-tool-id']).toBeDefined()
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -310,7 +315,7 @@ describe('mcp utils', () => {
|
|||||||
]
|
]
|
||||||
|
|
||||||
const tools = convertMcpToolsToAiSdkTools(mcpTools)
|
const tools = convertMcpToolsToAiSdkTools(mcpTools)
|
||||||
const tool = tools['test-exec-tool'] as Tool
|
const tool = tools['test-exec-tool-id'] as Tool
|
||||||
const result = await tool.execute!({}, { messages: [], abortSignal: undefined, toolCallId: 'test-call-123' })
|
const result = await tool.execute!({}, { messages: [], abortSignal: undefined, toolCallId: 'test-call-123' })
|
||||||
|
|
||||||
expect(requestToolConfirmation).toHaveBeenCalled()
|
expect(requestToolConfirmation).toHaveBeenCalled()
|
||||||
@@ -343,7 +348,7 @@ describe('mcp utils', () => {
|
|||||||
]
|
]
|
||||||
|
|
||||||
const tools = convertMcpToolsToAiSdkTools(mcpTools)
|
const tools = convertMcpToolsToAiSdkTools(mcpTools)
|
||||||
const tool = tools['cancelled-tool'] as Tool
|
const tool = tools['cancelled-tool-id'] as Tool
|
||||||
const result = await tool.execute!({}, { messages: [], abortSignal: undefined, toolCallId: 'cancel-call-123' })
|
const result = await tool.execute!({}, { messages: [], abortSignal: undefined, toolCallId: 'cancel-call-123' })
|
||||||
|
|
||||||
expect(requestToolConfirmation).toHaveBeenCalled()
|
expect(requestToolConfirmation).toHaveBeenCalled()
|
||||||
@@ -385,7 +390,7 @@ describe('mcp utils', () => {
|
|||||||
]
|
]
|
||||||
|
|
||||||
const tools = convertMcpToolsToAiSdkTools(mcpTools)
|
const tools = convertMcpToolsToAiSdkTools(mcpTools)
|
||||||
const tool = tools['error-tool'] as Tool
|
const tool = tools['error-tool-id'] as Tool
|
||||||
|
|
||||||
await expect(
|
await expect(
|
||||||
tool.execute!({}, { messages: [], abortSignal: undefined, toolCallId: 'error-call-123' })
|
tool.execute!({}, { messages: [], abortSignal: undefined, toolCallId: 'error-call-123' })
|
||||||
@@ -421,7 +426,7 @@ describe('mcp utils', () => {
|
|||||||
]
|
]
|
||||||
|
|
||||||
const tools = convertMcpToolsToAiSdkTools(mcpTools)
|
const tools = convertMcpToolsToAiSdkTools(mcpTools)
|
||||||
const tool = tools['auto-approve-tool'] as Tool
|
const tool = tools['auto-approve-tool-id'] as Tool
|
||||||
const result = await tool.execute!({}, { messages: [], abortSignal: undefined, toolCallId: 'auto-call-123' })
|
const result = await tool.execute!({}, { messages: [], abortSignal: undefined, toolCallId: 'auto-call-123' })
|
||||||
|
|
||||||
expect(requestToolConfirmation).not.toHaveBeenCalled()
|
expect(requestToolConfirmation).not.toHaveBeenCalled()
|
||||||
|
|||||||
@@ -154,6 +154,10 @@ vi.mock('../websearch', () => ({
|
|||||||
getWebSearchParams: vi.fn(() => ({ enable_search: true }))
|
getWebSearchParams: vi.fn(() => ({ enable_search: true }))
|
||||||
}))
|
}))
|
||||||
|
|
||||||
|
vi.mock('../../prepareParams/header', () => ({
|
||||||
|
addAnthropicHeaders: vi.fn(() => ['context-1m-2025-08-07'])
|
||||||
|
}))
|
||||||
|
|
||||||
const ensureWindowApi = () => {
|
const ensureWindowApi = () => {
|
||||||
const globalWindow = window as any
|
const globalWindow = window as any
|
||||||
globalWindow.api = globalWindow.api || {}
|
globalWindow.api = globalWindow.api || {}
|
||||||
@@ -633,5 +637,64 @@ describe('options utils', () => {
|
|||||||
expect(result.providerOptions).toHaveProperty('anthropic')
|
expect(result.providerOptions).toHaveProperty('anthropic')
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
describe('AWS Bedrock provider', () => {
|
||||||
|
const bedrockProvider = {
|
||||||
|
id: 'bedrock',
|
||||||
|
name: 'AWS Bedrock',
|
||||||
|
type: 'aws-bedrock',
|
||||||
|
apiKey: 'test-key',
|
||||||
|
apiHost: 'https://bedrock.us-east-1.amazonaws.com',
|
||||||
|
models: [] as Model[]
|
||||||
|
} as Provider
|
||||||
|
|
||||||
|
const bedrockModel: Model = {
|
||||||
|
id: 'anthropic.claude-sonnet-4-20250514-v1:0',
|
||||||
|
name: 'Claude Sonnet 4',
|
||||||
|
provider: 'bedrock'
|
||||||
|
} as Model
|
||||||
|
|
||||||
|
it('should build basic Bedrock options', () => {
|
||||||
|
const result = buildProviderOptions(mockAssistant, bedrockModel, bedrockProvider, {
|
||||||
|
enableReasoning: false,
|
||||||
|
enableWebSearch: false,
|
||||||
|
enableGenerateImage: false
|
||||||
|
})
|
||||||
|
|
||||||
|
expect(result.providerOptions).toHaveProperty('bedrock')
|
||||||
|
expect(result.providerOptions.bedrock).toBeDefined()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should include anthropicBeta when Anthropic headers are needed', async () => {
|
||||||
|
const { addAnthropicHeaders } = await import('../../prepareParams/header')
|
||||||
|
vi.mocked(addAnthropicHeaders).mockReturnValue(['interleaved-thinking-2025-05-14', 'context-1m-2025-08-07'])
|
||||||
|
|
||||||
|
const result = buildProviderOptions(mockAssistant, bedrockModel, bedrockProvider, {
|
||||||
|
enableReasoning: false,
|
||||||
|
enableWebSearch: false,
|
||||||
|
enableGenerateImage: false
|
||||||
|
})
|
||||||
|
|
||||||
|
expect(result.providerOptions.bedrock).toHaveProperty('anthropicBeta')
|
||||||
|
expect(result.providerOptions.bedrock.anthropicBeta).toEqual([
|
||||||
|
'interleaved-thinking-2025-05-14',
|
||||||
|
'context-1m-2025-08-07'
|
||||||
|
])
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should include reasoning parameters when enabled', () => {
|
||||||
|
const result = buildProviderOptions(mockAssistant, bedrockModel, bedrockProvider, {
|
||||||
|
enableReasoning: true,
|
||||||
|
enableWebSearch: false,
|
||||||
|
enableGenerateImage: false
|
||||||
|
})
|
||||||
|
|
||||||
|
expect(result.providerOptions.bedrock).toHaveProperty('reasoningConfig')
|
||||||
|
expect(result.providerOptions.bedrock.reasoningConfig).toEqual({
|
||||||
|
type: 'enabled',
|
||||||
|
budgetTokens: 5000
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -144,7 +144,7 @@ describe('reasoning utils', () => {
|
|||||||
expect(result).toEqual({})
|
expect(result).toEqual({})
|
||||||
})
|
})
|
||||||
|
|
||||||
it('should disable reasoning for OpenRouter when no reasoning effort set', async () => {
|
it('should not override reasoning for OpenRouter when reasoning effort undefined', async () => {
|
||||||
const { isReasoningModel } = await import('@renderer/config/models')
|
const { isReasoningModel } = await import('@renderer/config/models')
|
||||||
|
|
||||||
vi.mocked(isReasoningModel).mockReturnValue(true)
|
vi.mocked(isReasoningModel).mockReturnValue(true)
|
||||||
@@ -161,6 +161,29 @@ describe('reasoning utils', () => {
|
|||||||
settings: {}
|
settings: {}
|
||||||
} as Assistant
|
} as Assistant
|
||||||
|
|
||||||
|
const result = getReasoningEffort(assistant, model)
|
||||||
|
expect(result).toEqual({})
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should disable reasoning for OpenRouter when reasoning effort explicitly none', async () => {
|
||||||
|
const { isReasoningModel } = await import('@renderer/config/models')
|
||||||
|
|
||||||
|
vi.mocked(isReasoningModel).mockReturnValue(true)
|
||||||
|
|
||||||
|
const model: Model = {
|
||||||
|
id: 'anthropic/claude-sonnet-4',
|
||||||
|
name: 'Claude Sonnet 4',
|
||||||
|
provider: SystemProviderIds.openrouter
|
||||||
|
} as Model
|
||||||
|
|
||||||
|
const assistant: Assistant = {
|
||||||
|
id: 'test',
|
||||||
|
name: 'Test',
|
||||||
|
settings: {
|
||||||
|
reasoning_effort: 'none'
|
||||||
|
}
|
||||||
|
} as Assistant
|
||||||
|
|
||||||
const result = getReasoningEffort(assistant, model)
|
const result = getReasoningEffort(assistant, model)
|
||||||
expect(result).toEqual({ reasoning: { enabled: false, exclude: true } })
|
expect(result).toEqual({ reasoning: { enabled: false, exclude: true } })
|
||||||
})
|
})
|
||||||
@@ -269,7 +292,9 @@ describe('reasoning utils', () => {
|
|||||||
const assistant: Assistant = {
|
const assistant: Assistant = {
|
||||||
id: 'test',
|
id: 'test',
|
||||||
name: 'Test',
|
name: 'Test',
|
||||||
settings: {}
|
settings: {
|
||||||
|
reasoning_effort: 'none'
|
||||||
|
}
|
||||||
} as Assistant
|
} as Assistant
|
||||||
|
|
||||||
const result = getReasoningEffort(assistant, model)
|
const result = getReasoningEffort(assistant, model)
|
||||||
|
|||||||
@@ -28,7 +28,9 @@ export function convertMcpToolsToAiSdkTools(mcpTools: MCPTool[]): ToolSet {
|
|||||||
const tools: ToolSet = {}
|
const tools: ToolSet = {}
|
||||||
|
|
||||||
for (const mcpTool of mcpTools) {
|
for (const mcpTool of mcpTools) {
|
||||||
tools[mcpTool.name] = tool({
|
// Use mcpTool.id (which includes serverId suffix) to ensure uniqueness
|
||||||
|
// when multiple instances of the same MCP server type are configured
|
||||||
|
tools[mcpTool.id] = tool({
|
||||||
description: mcpTool.description || `Tool from ${mcpTool.serverName}`,
|
description: mcpTool.description || `Tool from ${mcpTool.serverName}`,
|
||||||
inputSchema: jsonSchema(mcpTool.inputSchema as JSONSchema7),
|
inputSchema: jsonSchema(mcpTool.inputSchema as JSONSchema7),
|
||||||
execute: async (params, { toolCallId }) => {
|
execute: async (params, { toolCallId }) => {
|
||||||
|
|||||||
@@ -36,6 +36,7 @@ import { isSupportServiceTierProvider, isSupportVerbosityProvider } from '@rende
|
|||||||
import type { JSONValue } from 'ai'
|
import type { JSONValue } from 'ai'
|
||||||
import { t } from 'i18next'
|
import { t } from 'i18next'
|
||||||
|
|
||||||
|
import { addAnthropicHeaders } from '../prepareParams/header'
|
||||||
import { getAiSdkProviderId } from '../provider/factory'
|
import { getAiSdkProviderId } from '../provider/factory'
|
||||||
import { buildGeminiGenerateImageParams } from './image'
|
import { buildGeminiGenerateImageParams } from './image'
|
||||||
import {
|
import {
|
||||||
@@ -469,6 +470,11 @@ function buildBedrockProviderOptions(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const betaHeaders = addAnthropicHeaders(assistant, model)
|
||||||
|
if (betaHeaders.length > 0) {
|
||||||
|
providerOptions.anthropicBeta = betaHeaders
|
||||||
|
}
|
||||||
|
|
||||||
return providerOptions
|
return providerOptions
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -16,10 +16,8 @@ import {
|
|||||||
isGPT5SeriesModel,
|
isGPT5SeriesModel,
|
||||||
isGPT51SeriesModel,
|
isGPT51SeriesModel,
|
||||||
isGrok4FastReasoningModel,
|
isGrok4FastReasoningModel,
|
||||||
isGrokReasoningModel,
|
|
||||||
isOpenAIDeepResearchModel,
|
isOpenAIDeepResearchModel,
|
||||||
isOpenAIModel,
|
isOpenAIModel,
|
||||||
isOpenAIReasoningModel,
|
|
||||||
isQwenAlwaysThinkModel,
|
isQwenAlwaysThinkModel,
|
||||||
isQwenReasoningModel,
|
isQwenReasoningModel,
|
||||||
isReasoningModel,
|
isReasoningModel,
|
||||||
@@ -64,30 +62,22 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
|||||||
}
|
}
|
||||||
const reasoningEffort = assistant?.settings?.reasoning_effort
|
const reasoningEffort = assistant?.settings?.reasoning_effort
|
||||||
|
|
||||||
// Handle undefined and 'none' reasoningEffort.
|
// reasoningEffort is not set, no extra reasoning setting
|
||||||
// TODO: They should be separated.
|
// Generally, for every model which supports reasoning control, the reasoning effort won't be undefined.
|
||||||
if (!reasoningEffort || reasoningEffort === 'none') {
|
// It's for some reasoning models that don't support reasoning control, such as deepseek reasoner.
|
||||||
|
if (!reasoningEffort) {
|
||||||
|
return {}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle 'none' reasoningEffort. It's explicitly off.
|
||||||
|
if (reasoningEffort === 'none') {
|
||||||
// openrouter: use reasoning
|
// openrouter: use reasoning
|
||||||
if (model.provider === SystemProviderIds.openrouter) {
|
if (model.provider === SystemProviderIds.openrouter) {
|
||||||
// Don't disable reasoning for Gemini models that support thinking tokens
|
|
||||||
if (isSupportedThinkingTokenGeminiModel(model) && !GEMINI_FLASH_MODEL_REGEX.test(model.id)) {
|
|
||||||
return {}
|
|
||||||
}
|
|
||||||
// 'none' is not an available value for effort for now.
|
// 'none' is not an available value for effort for now.
|
||||||
// I think they should resolve this issue soon, so I'll just go ahead and use this value.
|
// I think they should resolve this issue soon, so I'll just go ahead and use this value.
|
||||||
if (isGPT51SeriesModel(model) && reasoningEffort === 'none') {
|
if (isGPT51SeriesModel(model) && reasoningEffort === 'none') {
|
||||||
return { reasoning: { effort: 'none' } }
|
return { reasoning: { effort: 'none' } }
|
||||||
}
|
}
|
||||||
// Don't disable reasoning for models that require it
|
|
||||||
if (
|
|
||||||
isGrokReasoningModel(model) ||
|
|
||||||
isOpenAIReasoningModel(model) ||
|
|
||||||
isQwenAlwaysThinkModel(model) ||
|
|
||||||
model.id.includes('seed-oss') ||
|
|
||||||
model.id.includes('minimax-m2')
|
|
||||||
) {
|
|
||||||
return {}
|
|
||||||
}
|
|
||||||
return { reasoning: { enabled: false, exclude: true } }
|
return { reasoning: { enabled: false, exclude: true } }
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -101,11 +91,6 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
|||||||
return { enable_thinking: false }
|
return { enable_thinking: false }
|
||||||
}
|
}
|
||||||
|
|
||||||
// claude
|
|
||||||
if (isSupportedThinkingTokenClaudeModel(model)) {
|
|
||||||
return {}
|
|
||||||
}
|
|
||||||
|
|
||||||
// gemini
|
// gemini
|
||||||
if (isSupportedThinkingTokenGeminiModel(model)) {
|
if (isSupportedThinkingTokenGeminiModel(model)) {
|
||||||
if (GEMINI_FLASH_MODEL_REGEX.test(model.id)) {
|
if (GEMINI_FLASH_MODEL_REGEX.test(model.id)) {
|
||||||
@@ -118,8 +103,10 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
logger.warn(`Model ${model.id} cannot disable reasoning. Fallback to empty reasoning param.`)
|
||||||
|
return {}
|
||||||
}
|
}
|
||||||
return {}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// use thinking, doubao, zhipu, etc.
|
// use thinking, doubao, zhipu, etc.
|
||||||
@@ -139,6 +126,7 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logger.warn(`Model ${model.id} doesn't match any disable reasoning behavior. Fallback to empty reasoning param.`)
|
||||||
return {}
|
return {}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -293,6 +281,7 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
|||||||
}
|
}
|
||||||
|
|
||||||
// OpenRouter models, use reasoning
|
// OpenRouter models, use reasoning
|
||||||
|
// FIXME: duplicated openrouter handling. remove one
|
||||||
if (model.provider === SystemProviderIds.openrouter) {
|
if (model.provider === SystemProviderIds.openrouter) {
|
||||||
if (isSupportedReasoningEffortModel(model) || isSupportedThinkingTokenModel(model)) {
|
if (isSupportedReasoningEffortModel(model) || isSupportedThinkingTokenModel(model)) {
|
||||||
return {
|
return {
|
||||||
@@ -684,6 +673,10 @@ export function getCustomParameters(assistant: Assistant): Record<string, any> {
|
|||||||
if (!param.name?.trim()) {
|
if (!param.name?.trim()) {
|
||||||
return acc
|
return acc
|
||||||
}
|
}
|
||||||
|
// Parse JSON type parameters
|
||||||
|
// Related: src/renderer/src/pages/settings/AssistantSettings/AssistantModelSettings.tsx:133-148
|
||||||
|
// The UI stores JSON type params as strings (e.g., '{"key":"value"}')
|
||||||
|
// This function parses them into objects before sending to the API
|
||||||
if (param.type === 'json') {
|
if (param.type === 'json') {
|
||||||
const value = param.value as string
|
const value = param.value as string
|
||||||
if (value === 'undefined') {
|
if (value === 'undefined') {
|
||||||
|
|||||||
@@ -215,6 +215,10 @@
|
|||||||
border-top: none !important;
|
border-top: none !important;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.ant-collapse-header-text {
|
||||||
|
overflow-x: hidden;
|
||||||
|
}
|
||||||
|
|
||||||
.ant-slider .ant-slider-handle::after {
|
.ant-slider .ant-slider-handle::after {
|
||||||
box-shadow: 0 1px 4px 0px rgb(128 128 128 / 50%) !important;
|
box-shadow: 0 1px 4px 0px rgb(128 128 128 / 50%) !important;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import {
|
|||||||
} from '@ant-design/icons'
|
} from '@ant-design/icons'
|
||||||
import { loggerService } from '@logger'
|
import { loggerService } from '@logger'
|
||||||
import { download } from '@renderer/utils/download'
|
import { download } from '@renderer/utils/download'
|
||||||
|
import { convertImageToPng } from '@renderer/utils/image'
|
||||||
import type { ImageProps as AntImageProps } from 'antd'
|
import type { ImageProps as AntImageProps } from 'antd'
|
||||||
import { Dropdown, Image as AntImage, Space } from 'antd'
|
import { Dropdown, Image as AntImage, Space } from 'antd'
|
||||||
import { Base64 } from 'js-base64'
|
import { Base64 } from 'js-base64'
|
||||||
@@ -33,39 +34,38 @@ const ImageViewer: React.FC<ImageViewerProps> = ({ src, style, ...props }) => {
|
|||||||
// 复制图片到剪贴板
|
// 复制图片到剪贴板
|
||||||
const handleCopyImage = async (src: string) => {
|
const handleCopyImage = async (src: string) => {
|
||||||
try {
|
try {
|
||||||
|
let blob: Blob
|
||||||
|
|
||||||
if (src.startsWith('data:')) {
|
if (src.startsWith('data:')) {
|
||||||
// 处理 base64 格式的图片
|
// 处理 base64 格式的图片
|
||||||
const match = src.match(/^data:(image\/\w+);base64,(.+)$/)
|
const match = src.match(/^data:(image\/\w+);base64,(.+)$/)
|
||||||
if (!match) throw new Error('Invalid base64 image format')
|
if (!match) throw new Error('Invalid base64 image format')
|
||||||
const mimeType = match[1]
|
const mimeType = match[1]
|
||||||
const byteArray = Base64.toUint8Array(match[2])
|
const byteArray = Base64.toUint8Array(match[2])
|
||||||
const blob = new Blob([byteArray], { type: mimeType })
|
blob = new Blob([byteArray], { type: mimeType })
|
||||||
await navigator.clipboard.write([new ClipboardItem({ [mimeType]: blob })])
|
|
||||||
} else if (src.startsWith('file://')) {
|
} else if (src.startsWith('file://')) {
|
||||||
// 处理本地文件路径
|
// 处理本地文件路径
|
||||||
const bytes = await window.api.fs.read(src)
|
const bytes = await window.api.fs.read(src)
|
||||||
const mimeType = mime.getType(src) || 'application/octet-stream'
|
const mimeType = mime.getType(src) || 'application/octet-stream'
|
||||||
const blob = new Blob([bytes], { type: mimeType })
|
blob = new Blob([bytes], { type: mimeType })
|
||||||
await navigator.clipboard.write([
|
|
||||||
new ClipboardItem({
|
|
||||||
[mimeType]: blob
|
|
||||||
})
|
|
||||||
])
|
|
||||||
} else {
|
} else {
|
||||||
// 处理 URL 格式的图片
|
// 处理 URL 格式的图片
|
||||||
const response = await fetch(src)
|
const response = await fetch(src)
|
||||||
const blob = await response.blob()
|
blob = await response.blob()
|
||||||
|
|
||||||
await navigator.clipboard.write([
|
|
||||||
new ClipboardItem({
|
|
||||||
[blob.type]: blob
|
|
||||||
})
|
|
||||||
])
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 统一转换为 PNG 以确保兼容性(剪贴板 API 不支持 JPEG)
|
||||||
|
const pngBlob = await convertImageToPng(blob)
|
||||||
|
|
||||||
|
const item = new ClipboardItem({
|
||||||
|
'image/png': pngBlob
|
||||||
|
})
|
||||||
|
await navigator.clipboard.write([item])
|
||||||
|
|
||||||
window.toast.success(t('message.copy.success'))
|
window.toast.success(t('message.copy.success'))
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error('Failed to copy image:', error as Error)
|
const err = error as Error
|
||||||
|
logger.error(`Failed to copy image: ${err.message}`, { stack: err.stack })
|
||||||
window.toast.error(t('message.copy.failed'))
|
window.toast.error(t('message.copy.failed'))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -57,7 +57,7 @@ const PopupContainer: React.FC<Props> = ({ model, apiFilter, modelFilter, showTa
|
|||||||
const [_searchText, setSearchText] = useState('')
|
const [_searchText, setSearchText] = useState('')
|
||||||
const searchText = useDeferredValue(_searchText)
|
const searchText = useDeferredValue(_searchText)
|
||||||
const { models, isLoading } = useApiModels(apiFilter)
|
const { models, isLoading } = useApiModels(apiFilter)
|
||||||
const adaptedModels = models.map((model) => apiModelAdapter(model))
|
const adaptedModels = useMemo(() => models.map((model) => apiModelAdapter(model)), [models])
|
||||||
|
|
||||||
// 当前选中的模型ID
|
// 当前选中的模型ID
|
||||||
const currentModelId = model ? model.id : ''
|
const currentModelId = model ? model.id : ''
|
||||||
|
|||||||
@@ -309,11 +309,14 @@ describe('Ling Models', () => {
|
|||||||
describe('Claude & regional providers', () => {
|
describe('Claude & regional providers', () => {
|
||||||
it('identifies claude 4.5 variants', () => {
|
it('identifies claude 4.5 variants', () => {
|
||||||
expect(isClaude45ReasoningModel(createModel({ id: 'claude-sonnet-4.5-preview' }))).toBe(true)
|
expect(isClaude45ReasoningModel(createModel({ id: 'claude-sonnet-4.5-preview' }))).toBe(true)
|
||||||
|
expect(isClaude4SeriesModel(createModel({ id: 'claude-sonnet-4-5@20250929' }))).toBe(true)
|
||||||
expect(isClaude45ReasoningModel(createModel({ id: 'claude-3-sonnet' }))).toBe(false)
|
expect(isClaude45ReasoningModel(createModel({ id: 'claude-3-sonnet' }))).toBe(false)
|
||||||
})
|
})
|
||||||
|
|
||||||
it('identifies claude 4 variants', () => {
|
it('identifies claude 4 variants', () => {
|
||||||
expect(isClaude4SeriesModel(createModel({ id: 'claude-opus-4' }))).toBe(true)
|
expect(isClaude4SeriesModel(createModel({ id: 'claude-opus-4' }))).toBe(true)
|
||||||
|
expect(isClaude4SeriesModel(createModel({ id: 'claude-sonnet-4@20250514' }))).toBe(true)
|
||||||
|
expect(isClaude4SeriesModel(createModel({ id: 'anthropic.claude-sonnet-4-20250514-v1:0' }))).toBe(true)
|
||||||
expect(isClaude4SeriesModel(createModel({ id: 'claude-4.2-sonnet-variant' }))).toBe(false)
|
expect(isClaude4SeriesModel(createModel({ id: 'claude-4.2-sonnet-variant' }))).toBe(false)
|
||||||
expect(isClaude4SeriesModel(createModel({ id: 'claude-3-haiku' }))).toBe(false)
|
expect(isClaude4SeriesModel(createModel({ id: 'claude-3-haiku' }))).toBe(false)
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -125,195 +125,371 @@ describe('model utils', () => {
|
|||||||
openAIWebSearchOnlyMock.mockReturnValue(false)
|
openAIWebSearchOnlyMock.mockReturnValue(false)
|
||||||
})
|
})
|
||||||
|
|
||||||
it('detects OpenAI LLM models through reasoning and GPT prefix', () => {
|
describe('OpenAI model detection', () => {
|
||||||
expect(isOpenAILLMModel(undefined as unknown as Model)).toBe(false)
|
describe('isOpenAILLMModel', () => {
|
||||||
expect(isOpenAILLMModel(createModel({ id: 'gpt-4o-image' }))).toBe(false)
|
it('returns false for undefined model', () => {
|
||||||
|
expect(isOpenAILLMModel(undefined as unknown as Model)).toBe(false)
|
||||||
|
})
|
||||||
|
|
||||||
reasoningMock.mockReturnValueOnce(true)
|
it('returns false for image generation models', () => {
|
||||||
expect(isOpenAILLMModel(createModel({ id: 'o1-preview' }))).toBe(true)
|
expect(isOpenAILLMModel(createModel({ id: 'gpt-4o-image' }))).toBe(false)
|
||||||
|
})
|
||||||
|
|
||||||
expect(isOpenAILLMModel(createModel({ id: 'GPT-5-turbo' }))).toBe(true)
|
it('returns true for reasoning models', () => {
|
||||||
})
|
reasoningMock.mockReturnValueOnce(true)
|
||||||
|
expect(isOpenAILLMModel(createModel({ id: 'o1-preview' }))).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
it('detects OpenAI models via GPT prefix or reasoning support', () => {
|
it('returns true for GPT-prefixed models', () => {
|
||||||
expect(isOpenAIModel(createModel({ id: 'gpt-4.1' }))).toBe(true)
|
expect(isOpenAILLMModel(createModel({ id: 'GPT-5-turbo' }))).toBe(true)
|
||||||
reasoningMock.mockReturnValueOnce(true)
|
})
|
||||||
expect(isOpenAIModel(createModel({ id: 'o3' }))).toBe(true)
|
|
||||||
})
|
|
||||||
|
|
||||||
it('evaluates support for flex service tier and alias helper', () => {
|
|
||||||
expect(isSupportFlexServiceTierModel(createModel({ id: 'o3' }))).toBe(true)
|
|
||||||
expect(isSupportFlexServiceTierModel(createModel({ id: 'o3-mini' }))).toBe(false)
|
|
||||||
expect(isSupportFlexServiceTierModel(createModel({ id: 'o4-mini' }))).toBe(true)
|
|
||||||
expect(isSupportFlexServiceTierModel(createModel({ id: 'gpt-5-preview' }))).toBe(true)
|
|
||||||
expect(isSupportedFlexServiceTier(createModel({ id: 'gpt-4o' }))).toBe(false)
|
|
||||||
})
|
|
||||||
|
|
||||||
it('detects verbosity support for GPT-5+ families', () => {
|
|
||||||
expect(isSupportVerbosityModel(createModel({ id: 'gpt-5' }))).toBe(true)
|
|
||||||
expect(isSupportVerbosityModel(createModel({ id: 'gpt-5-chat' }))).toBe(false)
|
|
||||||
expect(isSupportVerbosityModel(createModel({ id: 'gpt-5.1-preview' }))).toBe(true)
|
|
||||||
})
|
|
||||||
|
|
||||||
it('limits verbosity controls for GPT-5 Pro models', () => {
|
|
||||||
const proModel = createModel({ id: 'gpt-5-pro' })
|
|
||||||
const previewModel = createModel({ id: 'gpt-5-preview' })
|
|
||||||
expect(getModelSupportedVerbosity(proModel)).toEqual([undefined, 'high'])
|
|
||||||
expect(getModelSupportedVerbosity(previewModel)).toEqual([undefined, 'low', 'medium', 'high'])
|
|
||||||
expect(isGPT5ProModel(proModel)).toBe(true)
|
|
||||||
expect(isGPT5ProModel(previewModel)).toBe(false)
|
|
||||||
})
|
|
||||||
|
|
||||||
it('identifies OpenAI chat-completion-only models', () => {
|
|
||||||
expect(isOpenAIChatCompletionOnlyModel(createModel({ id: 'gpt-4o-search-preview' }))).toBe(true)
|
|
||||||
expect(isOpenAIChatCompletionOnlyModel(createModel({ id: 'o1-mini' }))).toBe(true)
|
|
||||||
expect(isOpenAIChatCompletionOnlyModel(createModel({ id: 'gpt-4o' }))).toBe(false)
|
|
||||||
})
|
|
||||||
|
|
||||||
it('filters unsupported OpenAI catalog entries', () => {
|
|
||||||
expect(isSupportedModel({ id: 'gpt-4', object: 'model' } as any)).toBe(true)
|
|
||||||
expect(isSupportedModel({ id: 'tts-1', object: 'model' } as any)).toBe(false)
|
|
||||||
})
|
|
||||||
|
|
||||||
it('calculates temperature/top-p support correctly', () => {
|
|
||||||
const model = createModel({ id: 'o1' })
|
|
||||||
reasoningMock.mockReturnValue(true)
|
|
||||||
expect(isNotSupportTemperatureAndTopP(model)).toBe(true)
|
|
||||||
|
|
||||||
const openWeight = createModel({ id: 'gpt-oss-debug' })
|
|
||||||
expect(isNotSupportTemperatureAndTopP(openWeight)).toBe(false)
|
|
||||||
|
|
||||||
const chatOnly = createModel({ id: 'o1-preview' })
|
|
||||||
reasoningMock.mockReturnValue(false)
|
|
||||||
expect(isNotSupportTemperatureAndTopP(chatOnly)).toBe(true)
|
|
||||||
|
|
||||||
const qwenMt = createModel({ id: 'qwen-mt-large', provider: 'aliyun' })
|
|
||||||
expect(isNotSupportTemperatureAndTopP(qwenMt)).toBe(true)
|
|
||||||
})
|
|
||||||
|
|
||||||
it('handles gemma and gemini detections plus zhipu tagging', () => {
|
|
||||||
expect(isGemmaModel(createModel({ id: 'Gemma-3-27B' }))).toBe(true)
|
|
||||||
expect(isGemmaModel(createModel({ group: 'Gemma' }))).toBe(true)
|
|
||||||
expect(isGemmaModel(createModel({ id: 'gpt-4o' }))).toBe(false)
|
|
||||||
|
|
||||||
expect(isGeminiModel(createModel({ id: 'Gemini-2.0' }))).toBe(true)
|
|
||||||
|
|
||||||
expect(isZhipuModel(createModel({ provider: 'zhipu' }))).toBe(true)
|
|
||||||
expect(isZhipuModel(createModel({ provider: 'openai' }))).toBe(false)
|
|
||||||
})
|
|
||||||
|
|
||||||
it('groups qwen models by prefix', () => {
|
|
||||||
const qwen = createModel({ id: 'Qwen-7B', provider: 'qwen', name: 'Qwen-7B' })
|
|
||||||
const qwenOmni = createModel({ id: 'qwen2.5-omni', name: 'qwen2.5-omni' })
|
|
||||||
const other = createModel({ id: 'deepseek-v3', group: 'DeepSeek' })
|
|
||||||
|
|
||||||
const grouped = groupQwenModels([qwen, qwenOmni, other])
|
|
||||||
expect(Object.keys(grouped)).toContain('qwen-7b')
|
|
||||||
expect(Object.keys(grouped)).toContain('qwen2.5')
|
|
||||||
expect(grouped.DeepSeek).toContain(other)
|
|
||||||
})
|
|
||||||
|
|
||||||
it('aggregates boolean helpers based on regex rules', () => {
|
|
||||||
expect(isAnthropicModel(createModel({ id: 'claude-3.5' }))).toBe(true)
|
|
||||||
expect(isQwenMTModel(createModel({ id: 'qwen-mt-plus' }))).toBe(true)
|
|
||||||
expect(isNotSupportSystemMessageModel(createModel({ id: 'gemma-moe' }))).toBe(true)
|
|
||||||
expect(isOpenAIOpenWeightModel(createModel({ id: 'gpt-oss-free' }))).toBe(true)
|
|
||||||
})
|
|
||||||
|
|
||||||
describe('isNotSupportedTextDelta', () => {
|
|
||||||
it('returns true for qwen-mt-turbo and qwen-mt-plus models', () => {
|
|
||||||
// qwen-mt series that don't support text delta
|
|
||||||
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-mt-turbo' }))).toBe(true)
|
|
||||||
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-mt-plus' }))).toBe(true)
|
|
||||||
expect(isNotSupportTextDeltaModel(createModel({ id: 'Qwen-MT-Turbo' }))).toBe(true)
|
|
||||||
expect(isNotSupportTextDeltaModel(createModel({ id: 'QWEN-MT-PLUS' }))).toBe(true)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
it('returns false for qwen-mt-flash and other models', () => {
|
describe('isOpenAIModel', () => {
|
||||||
// qwen-mt-flash supports text delta
|
it('detects models via GPT prefix', () => {
|
||||||
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-mt-flash' }))).toBe(false)
|
expect(isOpenAIModel(createModel({ id: 'gpt-4.1' }))).toBe(true)
|
||||||
expect(isNotSupportTextDeltaModel(createModel({ id: 'Qwen-MT-Flash' }))).toBe(false)
|
})
|
||||||
|
|
||||||
// Legacy qwen models without mt prefix (support text delta)
|
it('detects models via reasoning support', () => {
|
||||||
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-turbo' }))).toBe(false)
|
reasoningMock.mockReturnValueOnce(true)
|
||||||
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-plus' }))).toBe(false)
|
expect(isOpenAIModel(createModel({ id: 'o3' }))).toBe(true)
|
||||||
|
})
|
||||||
// Other qwen models
|
|
||||||
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-max' }))).toBe(false)
|
|
||||||
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen2.5-72b' }))).toBe(false)
|
|
||||||
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-vl-plus' }))).toBe(false)
|
|
||||||
|
|
||||||
// Non-qwen models
|
|
||||||
expect(isNotSupportTextDeltaModel(createModel({ id: 'gpt-4o' }))).toBe(false)
|
|
||||||
expect(isNotSupportTextDeltaModel(createModel({ id: 'claude-3.5' }))).toBe(false)
|
|
||||||
expect(isNotSupportTextDeltaModel(createModel({ id: 'glm-4-plus' }))).toBe(false)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
it('handles models with version suffixes', () => {
|
describe('isOpenAIChatCompletionOnlyModel', () => {
|
||||||
// qwen-mt models with version suffixes
|
it('identifies chat-completion-only models', () => {
|
||||||
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-mt-turbo-1201' }))).toBe(true)
|
expect(isOpenAIChatCompletionOnlyModel(createModel({ id: 'gpt-4o-search-preview' }))).toBe(true)
|
||||||
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-mt-plus-0828' }))).toBe(true)
|
expect(isOpenAIChatCompletionOnlyModel(createModel({ id: 'o1-mini' }))).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
// Legacy qwen models with version suffixes (support text delta)
|
it('returns false for general models', () => {
|
||||||
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-turbo-0828' }))).toBe(false)
|
expect(isOpenAIChatCompletionOnlyModel(createModel({ id: 'gpt-4o' }))).toBe(false)
|
||||||
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-plus-latest' }))).toBe(false)
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
it('evaluates GPT-5 family helpers', () => {
|
describe('GPT-5 family detection', () => {
|
||||||
expect(isGPT5SeriesModel(createModel({ id: 'gpt-5-preview' }))).toBe(true)
|
describe('isGPT5SeriesModel', () => {
|
||||||
expect(isGPT5SeriesModel(createModel({ id: 'gpt-5.1-preview' }))).toBe(false)
|
it('returns true for GPT-5 models', () => {
|
||||||
expect(isGPT51SeriesModel(createModel({ id: 'gpt-5.1-mini' }))).toBe(true)
|
expect(isGPT5SeriesModel(createModel({ id: 'gpt-5-preview' }))).toBe(true)
|
||||||
expect(isGPT5SeriesReasoningModel(createModel({ id: 'gpt-5-prompt' }))).toBe(true)
|
})
|
||||||
expect(isSupportVerbosityModel(createModel({ id: 'gpt-5-chat' }))).toBe(false)
|
|
||||||
|
it('returns false for GPT-5.1 models', () => {
|
||||||
|
expect(isGPT5SeriesModel(createModel({ id: 'gpt-5.1-preview' }))).toBe(false)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('isGPT51SeriesModel', () => {
|
||||||
|
it('returns true for GPT-5.1 models', () => {
|
||||||
|
expect(isGPT51SeriesModel(createModel({ id: 'gpt-5.1-mini' }))).toBe(true)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('isGPT5SeriesReasoningModel', () => {
|
||||||
|
it('returns true for GPT-5 reasoning models', () => {
|
||||||
|
expect(isGPT5SeriesReasoningModel(createModel({ id: 'gpt-5' }))).toBe(true)
|
||||||
|
})
|
||||||
|
it('returns false for gpt-5-chat', () => {
|
||||||
|
expect(isGPT5SeriesReasoningModel(createModel({ id: 'gpt-5-chat' }))).toBe(false)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('isGPT5ProModel', () => {
|
||||||
|
it('returns true for GPT-5 Pro models', () => {
|
||||||
|
expect(isGPT5ProModel(createModel({ id: 'gpt-5-pro' }))).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns false for non-Pro GPT-5 models', () => {
|
||||||
|
expect(isGPT5ProModel(createModel({ id: 'gpt-5-preview' }))).toBe(false)
|
||||||
|
})
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
it('wraps generate/vision helpers that operate on arrays', () => {
|
describe('Verbosity support', () => {
|
||||||
const models = [createModel({ id: 'gpt-4o' }), createModel({ id: 'gpt-4o-mini' })]
|
describe('isSupportVerbosityModel', () => {
|
||||||
expect(isVisionModels(models)).toBe(true)
|
it('returns true for GPT-5 models', () => {
|
||||||
visionMock.mockReturnValueOnce(true).mockReturnValueOnce(false)
|
expect(isSupportVerbosityModel(createModel({ id: 'gpt-5' }))).toBe(true)
|
||||||
expect(isVisionModels(models)).toBe(false)
|
})
|
||||||
|
|
||||||
expect(isGenerateImageModels(models)).toBe(true)
|
it('returns false for GPT-5 chat models', () => {
|
||||||
generateImageMock.mockReturnValueOnce(true).mockReturnValueOnce(false)
|
expect(isSupportVerbosityModel(createModel({ id: 'gpt-5-chat' }))).toBe(false)
|
||||||
expect(isGenerateImageModels(models)).toBe(false)
|
})
|
||||||
|
|
||||||
|
it('returns true for GPT-5.1 models', () => {
|
||||||
|
expect(isSupportVerbosityModel(createModel({ id: 'gpt-5.1-preview' }))).toBe(true)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('getModelSupportedVerbosity', () => {
|
||||||
|
it('returns only "high" for GPT-5 Pro models', () => {
|
||||||
|
expect(getModelSupportedVerbosity(createModel({ id: 'gpt-5-pro' }))).toEqual([undefined, 'high'])
|
||||||
|
expect(getModelSupportedVerbosity(createModel({ id: 'gpt-5-pro-2025-10-06' }))).toEqual([undefined, 'high'])
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns all levels for non-Pro GPT-5 models', () => {
|
||||||
|
const previewModel = createModel({ id: 'gpt-5-preview' })
|
||||||
|
expect(getModelSupportedVerbosity(previewModel)).toEqual([undefined, 'low', 'medium', 'high'])
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns all levels for GPT-5.1 models', () => {
|
||||||
|
const gpt51Model = createModel({ id: 'gpt-5.1-preview' })
|
||||||
|
expect(getModelSupportedVerbosity(gpt51Model)).toEqual([undefined, 'low', 'medium', 'high'])
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns only undefined for non-GPT-5 models', () => {
|
||||||
|
expect(getModelSupportedVerbosity(createModel({ id: 'gpt-4o' }))).toEqual([undefined])
|
||||||
|
expect(getModelSupportedVerbosity(createModel({ id: 'claude-3.5' }))).toEqual([undefined])
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns only undefined for undefiend/null input', () => {
|
||||||
|
expect(getModelSupportedVerbosity(undefined)).toEqual([undefined])
|
||||||
|
expect(getModelSupportedVerbosity(null)).toEqual([undefined])
|
||||||
|
})
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
it('filters models for agent usage', () => {
|
describe('Flex service tier support', () => {
|
||||||
expect(agentModelFilter(createModel())).toBe(true)
|
describe('isSupportFlexServiceTierModel', () => {
|
||||||
|
it('returns true for supported models', () => {
|
||||||
|
expect(isSupportFlexServiceTierModel(createModel({ id: 'o3' }))).toBe(true)
|
||||||
|
expect(isSupportFlexServiceTierModel(createModel({ id: 'o4-mini' }))).toBe(true)
|
||||||
|
expect(isSupportFlexServiceTierModel(createModel({ id: 'gpt-5-preview' }))).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
embeddingMock.mockReturnValueOnce(true)
|
it('returns false for unsupported models', () => {
|
||||||
expect(agentModelFilter(createModel({ id: 'text-embedding' }))).toBe(false)
|
expect(isSupportFlexServiceTierModel(createModel({ id: 'o3-mini' }))).toBe(false)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
embeddingMock.mockReturnValue(false)
|
describe('isSupportedFlexServiceTier', () => {
|
||||||
rerankMock.mockReturnValueOnce(true)
|
it('returns false for non-flex models', () => {
|
||||||
expect(agentModelFilter(createModel({ id: 'rerank' }))).toBe(false)
|
expect(isSupportedFlexServiceTier(createModel({ id: 'gpt-4o' }))).toBe(false)
|
||||||
|
})
|
||||||
rerankMock.mockReturnValue(false)
|
})
|
||||||
textToImageMock.mockReturnValueOnce(true)
|
|
||||||
expect(agentModelFilter(createModel({ id: 'gpt-image-1' }))).toBe(false)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
it('identifies models with maximum temperature of 1.0', () => {
|
describe('Temperature and top-p support', () => {
|
||||||
// Zhipu models should have max temperature of 1.0
|
describe('isNotSupportTemperatureAndTopP', () => {
|
||||||
expect(isMaxTemperatureOneModel(createModel({ id: 'glm-4' }))).toBe(true)
|
it('returns true for reasoning models', () => {
|
||||||
expect(isMaxTemperatureOneModel(createModel({ id: 'GLM-4-Plus' }))).toBe(true)
|
const model = createModel({ id: 'o1' })
|
||||||
expect(isMaxTemperatureOneModel(createModel({ id: 'glm-3-turbo' }))).toBe(true)
|
reasoningMock.mockReturnValue(true)
|
||||||
|
expect(isNotSupportTemperatureAndTopP(model)).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
// Anthropic models should have max temperature of 1.0
|
it('returns false for open weight models', () => {
|
||||||
expect(isMaxTemperatureOneModel(createModel({ id: 'claude-3.5-sonnet' }))).toBe(true)
|
const openWeight = createModel({ id: 'gpt-oss-debug' })
|
||||||
expect(isMaxTemperatureOneModel(createModel({ id: 'Claude-3-opus' }))).toBe(true)
|
expect(isNotSupportTemperatureAndTopP(openWeight)).toBe(false)
|
||||||
expect(isMaxTemperatureOneModel(createModel({ id: 'claude-2.1' }))).toBe(true)
|
})
|
||||||
|
|
||||||
// Moonshot models should have max temperature of 1.0
|
it('returns true for chat-only models without reasoning', () => {
|
||||||
expect(isMaxTemperatureOneModel(createModel({ id: 'moonshot-1.0' }))).toBe(true)
|
const chatOnly = createModel({ id: 'o1-preview' })
|
||||||
expect(isMaxTemperatureOneModel(createModel({ id: 'kimi-k2-thinking' }))).toBe(true)
|
reasoningMock.mockReturnValue(false)
|
||||||
expect(isMaxTemperatureOneModel(createModel({ id: 'Moonshot-Pro' }))).toBe(true)
|
expect(isNotSupportTemperatureAndTopP(chatOnly)).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
// Other models should return false
|
it('returns true for Qwen MT models', () => {
|
||||||
expect(isMaxTemperatureOneModel(createModel({ id: 'gpt-4o' }))).toBe(false)
|
const qwenMt = createModel({ id: 'qwen-mt-large', provider: 'aliyun' })
|
||||||
expect(isMaxTemperatureOneModel(createModel({ id: 'gpt-4-turbo' }))).toBe(false)
|
expect(isNotSupportTemperatureAndTopP(qwenMt)).toBe(true)
|
||||||
expect(isMaxTemperatureOneModel(createModel({ id: 'qwen-max' }))).toBe(false)
|
})
|
||||||
expect(isMaxTemperatureOneModel(createModel({ id: 'gemini-pro' }))).toBe(false)
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('Text delta support', () => {
|
||||||
|
describe('isNotSupportTextDeltaModel', () => {
|
||||||
|
it('returns true for qwen-mt-turbo and qwen-mt-plus models', () => {
|
||||||
|
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-mt-turbo' }))).toBe(true)
|
||||||
|
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-mt-plus' }))).toBe(true)
|
||||||
|
expect(isNotSupportTextDeltaModel(createModel({ id: 'Qwen-MT-Turbo' }))).toBe(true)
|
||||||
|
expect(isNotSupportTextDeltaModel(createModel({ id: 'QWEN-MT-PLUS' }))).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns false for qwen-mt-flash and other models', () => {
|
||||||
|
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-mt-flash' }))).toBe(false)
|
||||||
|
expect(isNotSupportTextDeltaModel(createModel({ id: 'Qwen-MT-Flash' }))).toBe(false)
|
||||||
|
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-turbo' }))).toBe(false)
|
||||||
|
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-plus' }))).toBe(false)
|
||||||
|
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-max' }))).toBe(false)
|
||||||
|
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen2.5-72b' }))).toBe(false)
|
||||||
|
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-vl-plus' }))).toBe(false)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns false for non-qwen models', () => {
|
||||||
|
expect(isNotSupportTextDeltaModel(createModel({ id: 'gpt-4o' }))).toBe(false)
|
||||||
|
expect(isNotSupportTextDeltaModel(createModel({ id: 'claude-3.5' }))).toBe(false)
|
||||||
|
expect(isNotSupportTextDeltaModel(createModel({ id: 'glm-4-plus' }))).toBe(false)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('handles models with version suffixes', () => {
|
||||||
|
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-mt-turbo-1201' }))).toBe(true)
|
||||||
|
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-mt-plus-0828' }))).toBe(true)
|
||||||
|
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-turbo-0828' }))).toBe(false)
|
||||||
|
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-plus-latest' }))).toBe(false)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('Model provider detection', () => {
|
||||||
|
describe('isGemmaModel', () => {
|
||||||
|
it('detects Gemma models by ID', () => {
|
||||||
|
expect(isGemmaModel(createModel({ id: 'Gemma-3-27B' }))).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('detects Gemma models by group', () => {
|
||||||
|
expect(isGemmaModel(createModel({ group: 'Gemma' }))).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns false for non-Gemma models', () => {
|
||||||
|
expect(isGemmaModel(createModel({ id: 'gpt-4o' }))).toBe(false)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('isGeminiModel', () => {
|
||||||
|
it('detects Gemini models', () => {
|
||||||
|
expect(isGeminiModel(createModel({ id: 'Gemini-2.0' }))).toBe(true)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('isZhipuModel', () => {
|
||||||
|
it('detects Zhipu models by provider', () => {
|
||||||
|
expect(isZhipuModel(createModel({ provider: 'zhipu' }))).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns false for non-Zhipu models', () => {
|
||||||
|
expect(isZhipuModel(createModel({ provider: 'openai' }))).toBe(false)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('isAnthropicModel', () => {
|
||||||
|
it('detects Anthropic models', () => {
|
||||||
|
expect(isAnthropicModel(createModel({ id: 'claude-3.5' }))).toBe(true)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('isQwenMTModel', () => {
|
||||||
|
it('detects Qwen MT models', () => {
|
||||||
|
expect(isQwenMTModel(createModel({ id: 'qwen-mt-plus' }))).toBe(true)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('isOpenAIOpenWeightModel', () => {
|
||||||
|
it('detects OpenAI open weight models', () => {
|
||||||
|
expect(isOpenAIOpenWeightModel(createModel({ id: 'gpt-oss-free' }))).toBe(true)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('System message support', () => {
|
||||||
|
describe('isNotSupportSystemMessageModel', () => {
|
||||||
|
it('returns true for models that do not support system messages', () => {
|
||||||
|
expect(isNotSupportSystemMessageModel(createModel({ id: 'gemma-moe' }))).toBe(true)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('Model grouping', () => {
|
||||||
|
describe('groupQwenModels', () => {
|
||||||
|
it('groups qwen models by prefix', () => {
|
||||||
|
const qwen = createModel({ id: 'Qwen-7B', provider: 'qwen', name: 'Qwen-7B' })
|
||||||
|
const qwenOmni = createModel({ id: 'qwen2.5-omni', name: 'qwen2.5-omni' })
|
||||||
|
const other = createModel({ id: 'deepseek-v3', group: 'DeepSeek' })
|
||||||
|
|
||||||
|
const grouped = groupQwenModels([qwen, qwenOmni, other])
|
||||||
|
expect(Object.keys(grouped)).toContain('qwen-7b')
|
||||||
|
expect(Object.keys(grouped)).toContain('qwen2.5')
|
||||||
|
expect(grouped.DeepSeek).toContain(other)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('Vision and image generation', () => {
|
||||||
|
describe('isVisionModels', () => {
|
||||||
|
it('returns true when all models support vision', () => {
|
||||||
|
const models = [createModel({ id: 'gpt-4o' }), createModel({ id: 'gpt-4o-mini' })]
|
||||||
|
expect(isVisionModels(models)).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns false when some models do not support vision', () => {
|
||||||
|
const models = [createModel({ id: 'gpt-4o' }), createModel({ id: 'gpt-4o-mini' })]
|
||||||
|
visionMock.mockReturnValueOnce(true).mockReturnValueOnce(false)
|
||||||
|
expect(isVisionModels(models)).toBe(false)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('isGenerateImageModels', () => {
|
||||||
|
it('returns true when all models support image generation', () => {
|
||||||
|
const models = [createModel({ id: 'gpt-4o' }), createModel({ id: 'gpt-4o-mini' })]
|
||||||
|
expect(isGenerateImageModels(models)).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns false when some models do not support image generation', () => {
|
||||||
|
const models = [createModel({ id: 'gpt-4o' }), createModel({ id: 'gpt-4o-mini' })]
|
||||||
|
generateImageMock.mockReturnValueOnce(true).mockReturnValueOnce(false)
|
||||||
|
expect(isGenerateImageModels(models)).toBe(false)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('Model filtering', () => {
|
||||||
|
describe('isSupportedModel', () => {
|
||||||
|
it('filters supported OpenAI catalog entries', () => {
|
||||||
|
expect(isSupportedModel({ id: 'gpt-4', object: 'model' } as any)).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('filters unsupported OpenAI catalog entries', () => {
|
||||||
|
expect(isSupportedModel({ id: 'tts-1', object: 'model' } as any)).toBe(false)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('agentModelFilter', () => {
|
||||||
|
it('returns true for regular models', () => {
|
||||||
|
expect(agentModelFilter(createModel())).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('filters out embedding models', () => {
|
||||||
|
embeddingMock.mockReturnValueOnce(true)
|
||||||
|
expect(agentModelFilter(createModel({ id: 'text-embedding' }))).toBe(false)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('filters out rerank models', () => {
|
||||||
|
embeddingMock.mockReturnValue(false)
|
||||||
|
rerankMock.mockReturnValueOnce(true)
|
||||||
|
expect(agentModelFilter(createModel({ id: 'rerank' }))).toBe(false)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('filters out text-to-image models', () => {
|
||||||
|
rerankMock.mockReturnValue(false)
|
||||||
|
textToImageMock.mockReturnValueOnce(true)
|
||||||
|
expect(agentModelFilter(createModel({ id: 'gpt-image-1' }))).toBe(false)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('Temperature limits', () => {
|
||||||
|
describe('isMaxTemperatureOneModel', () => {
|
||||||
|
it('returns true for Zhipu models', () => {
|
||||||
|
expect(isMaxTemperatureOneModel(createModel({ id: 'glm-4' }))).toBe(true)
|
||||||
|
expect(isMaxTemperatureOneModel(createModel({ id: 'GLM-4-Plus' }))).toBe(true)
|
||||||
|
expect(isMaxTemperatureOneModel(createModel({ id: 'glm-3-turbo' }))).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns true for Anthropic models', () => {
|
||||||
|
expect(isMaxTemperatureOneModel(createModel({ id: 'claude-3.5-sonnet' }))).toBe(true)
|
||||||
|
expect(isMaxTemperatureOneModel(createModel({ id: 'Claude-3-opus' }))).toBe(true)
|
||||||
|
expect(isMaxTemperatureOneModel(createModel({ id: 'claude-2.1' }))).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns true for Moonshot models', () => {
|
||||||
|
expect(isMaxTemperatureOneModel(createModel({ id: 'moonshot-1.0' }))).toBe(true)
|
||||||
|
expect(isMaxTemperatureOneModel(createModel({ id: 'kimi-k2-thinking' }))).toBe(true)
|
||||||
|
expect(isMaxTemperatureOneModel(createModel({ id: 'Moonshot-Pro' }))).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns false for other models', () => {
|
||||||
|
expect(isMaxTemperatureOneModel(createModel({ id: 'gpt-4o' }))).toBe(false)
|
||||||
|
expect(isMaxTemperatureOneModel(createModel({ id: 'gpt-4-turbo' }))).toBe(false)
|
||||||
|
expect(isMaxTemperatureOneModel(createModel({ id: 'qwen-max' }))).toBe(false)
|
||||||
|
expect(isMaxTemperatureOneModel(createModel({ id: 'gemini-pro' }))).toBe(false)
|
||||||
|
})
|
||||||
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -396,7 +396,11 @@ export function isClaude45ReasoningModel(model: Model): boolean {
|
|||||||
|
|
||||||
export function isClaude4SeriesModel(model: Model): boolean {
|
export function isClaude4SeriesModel(model: Model): boolean {
|
||||||
const modelId = getLowerBaseModelName(model.id, '/')
|
const modelId = getLowerBaseModelName(model.id, '/')
|
||||||
const regex = /claude-(sonnet|opus|haiku)-4(?:[.-]\d+)?(?:-[\w-]+)?$/i
|
// Supports various formats including:
|
||||||
|
// - Direct API: claude-sonnet-4, claude-opus-4-20250514
|
||||||
|
// - GCP Vertex AI: claude-sonnet-4@20250514
|
||||||
|
// - AWS Bedrock: anthropic.claude-sonnet-4-20250514-v1:0
|
||||||
|
const regex = /claude-(sonnet|opus|haiku)-4(?:[.-]\d+)?(?:[@\-:][\w\-:]+)?$/i
|
||||||
return regex.test(modelId)
|
return regex.test(modelId)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -456,16 +460,19 @@ export const isSupportedThinkingTokenZhipuModel = (model: Model): boolean => {
|
|||||||
}
|
}
|
||||||
|
|
||||||
export const isDeepSeekHybridInferenceModel = (model: Model) => {
|
export const isDeepSeekHybridInferenceModel = (model: Model) => {
|
||||||
const modelId = getLowerBaseModelName(model.id)
|
const { idResult, nameResult } = withModelIdAndNameAsId(model, (model) => {
|
||||||
// deepseek官方使用chat和reasoner做推理控制,其他provider需要单独判断,id可能会有所差别
|
const modelId = getLowerBaseModelName(model.id)
|
||||||
// openrouter: deepseek/deepseek-chat-v3.1 不知道会不会有其他provider仿照ds官方分出一个同id的作为非思考模式的模型,这里有风险
|
// deepseek官方使用chat和reasoner做推理控制,其他provider需要单独判断,id可能会有所差别
|
||||||
// Matches: "deepseek-v3" followed by ".digit" or "-digit".
|
// openrouter: deepseek/deepseek-chat-v3.1 不知道会不会有其他provider仿照ds官方分出一个同id的作为非思考模式的模型,这里有风险
|
||||||
// Optionally, this can be followed by ".alphanumeric_sequence" or "-alphanumeric_sequence"
|
// Matches: "deepseek-v3" followed by ".digit" or "-digit".
|
||||||
// until the end of the string.
|
// Optionally, this can be followed by ".alphanumeric_sequence" or "-alphanumeric_sequence"
|
||||||
// Examples: deepseek-v3.1, deepseek-v3-1, deepseek-v3.1.2, deepseek-v3.1-alpha
|
// until the end of the string.
|
||||||
// Does NOT match: deepseek-v3.123 (missing separator after '1'), deepseek-v3.x (x isn't a digit)
|
// Examples: deepseek-v3.1, deepseek-v3-1, deepseek-v3.1.2, deepseek-v3.1-alpha
|
||||||
// TODO: move to utils and add test cases
|
// Does NOT match: deepseek-v3.123 (missing separator after '1'), deepseek-v3.x (x isn't a digit)
|
||||||
return /deepseek-v3(?:\.\d|-\d)(?:(\.|-)\w+)?$/.test(modelId) || modelId.includes('deepseek-chat-v3.1')
|
// TODO: move to utils and add test cases
|
||||||
|
return /deepseek-v3(?:\.\d|-\d)(?:(\.|-)\w+)?$/.test(modelId) || modelId.includes('deepseek-chat-v3.1')
|
||||||
|
})
|
||||||
|
return idResult || nameResult
|
||||||
}
|
}
|
||||||
|
|
||||||
export const isLingReasoningModel = (model?: Model): boolean => {
|
export const isLingReasoningModel = (model?: Model): boolean => {
|
||||||
@@ -519,7 +526,6 @@ export function isReasoningModel(model?: Model): boolean {
|
|||||||
REASONING_REGEX.test(model.name) ||
|
REASONING_REGEX.test(model.name) ||
|
||||||
isSupportedThinkingTokenDoubaoModel(model) ||
|
isSupportedThinkingTokenDoubaoModel(model) ||
|
||||||
isDeepSeekHybridInferenceModel(model) ||
|
isDeepSeekHybridInferenceModel(model) ||
|
||||||
isDeepSeekHybridInferenceModel({ ...model, id: model.name }) ||
|
|
||||||
false
|
false
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,7 +4,14 @@ import { type Model, SystemProviderIds } from '@renderer/types'
|
|||||||
import type { OpenAIVerbosity, ValidOpenAIVerbosity } from '@renderer/types/aiCoreTypes'
|
import type { OpenAIVerbosity, ValidOpenAIVerbosity } from '@renderer/types/aiCoreTypes'
|
||||||
import { getLowerBaseModelName } from '@renderer/utils'
|
import { getLowerBaseModelName } from '@renderer/utils'
|
||||||
|
|
||||||
import { isOpenAIChatCompletionOnlyModel, isOpenAIOpenWeightModel, isOpenAIReasoningModel } from './openai'
|
import {
|
||||||
|
isGPT5ProModel,
|
||||||
|
isGPT5SeriesModel,
|
||||||
|
isGPT51SeriesModel,
|
||||||
|
isOpenAIChatCompletionOnlyModel,
|
||||||
|
isOpenAIOpenWeightModel,
|
||||||
|
isOpenAIReasoningModel
|
||||||
|
} from './openai'
|
||||||
import { isQwenMTModel } from './qwen'
|
import { isQwenMTModel } from './qwen'
|
||||||
import { isGenerateImageModel, isTextToImageModel, isVisionModel } from './vision'
|
import { isGenerateImageModel, isTextToImageModel, isVisionModel } from './vision'
|
||||||
export const NOT_SUPPORTED_REGEX = /(?:^tts|whisper|speech)/i
|
export const NOT_SUPPORTED_REGEX = /(?:^tts|whisper|speech)/i
|
||||||
@@ -123,21 +130,46 @@ export const isNotSupportSystemMessageModel = (model: Model): boolean => {
|
|||||||
return isQwenMTModel(model) || isGemmaModel(model)
|
return isQwenMTModel(model) || isGemmaModel(model)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GPT-5 verbosity configuration
|
// Verbosity settings is only supported by GPT-5 and newer models
|
||||||
|
// Specifically, GPT-5 and GPT-5.1 for now
|
||||||
// gpt-5-pro only supports 'high', other GPT-5 models support all levels
|
// gpt-5-pro only supports 'high', other GPT-5 models support all levels
|
||||||
export const MODEL_SUPPORTED_VERBOSITY: Record<string, ValidOpenAIVerbosity[]> = {
|
const MODEL_SUPPORTED_VERBOSITY: readonly {
|
||||||
'gpt-5-pro': ['high'],
|
readonly validator: (model: Model) => boolean
|
||||||
default: ['low', 'medium', 'high']
|
readonly values: readonly ValidOpenAIVerbosity[]
|
||||||
} as const
|
}[] = [
|
||||||
|
// gpt-5-pro
|
||||||
|
{ validator: isGPT5ProModel, values: ['high'] },
|
||||||
|
// gpt-5 except gpt-5-pro
|
||||||
|
{
|
||||||
|
validator: (model: Model) => isGPT5SeriesModel(model) && !isGPT5ProModel(model),
|
||||||
|
values: ['low', 'medium', 'high']
|
||||||
|
},
|
||||||
|
// gpt-5.1
|
||||||
|
{ validator: isGPT51SeriesModel, values: ['low', 'medium', 'high'] }
|
||||||
|
]
|
||||||
|
|
||||||
export const getModelSupportedVerbosity = (model: Model): OpenAIVerbosity[] => {
|
/**
|
||||||
const modelId = getLowerBaseModelName(model.id)
|
* Returns the list of supported verbosity levels for the given model.
|
||||||
let supportedValues: ValidOpenAIVerbosity[]
|
* If the model is not recognized as a GPT-5 series model, only `undefined` is returned.
|
||||||
if (modelId.includes('gpt-5-pro')) {
|
* For GPT-5-pro, only 'high' is supported; for other GPT-5 models, 'low', 'medium', and 'high' are supported.
|
||||||
supportedValues = MODEL_SUPPORTED_VERBOSITY['gpt-5-pro']
|
* For GPT-5.1 series models, 'low', 'medium', and 'high' are supported.
|
||||||
} else {
|
* @param model - The model to check
|
||||||
supportedValues = MODEL_SUPPORTED_VERBOSITY.default
|
* @returns An array of supported verbosity levels, always including `undefined` as the first element
|
||||||
|
*/
|
||||||
|
export const getModelSupportedVerbosity = (model: Model | undefined | null): OpenAIVerbosity[] => {
|
||||||
|
if (!model) {
|
||||||
|
return [undefined]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let supportedValues: ValidOpenAIVerbosity[] = []
|
||||||
|
|
||||||
|
for (const { validator, values } of MODEL_SUPPORTED_VERBOSITY) {
|
||||||
|
if (validator(model)) {
|
||||||
|
supportedValues = [...values]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return [undefined, ...supportedValues]
|
return [undefined, ...supportedValues]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
import { throttle } from 'lodash'
|
import { throttle } from 'lodash'
|
||||||
import { useEffect, useRef } from 'react'
|
import { useEffect, useMemo, useRef } from 'react'
|
||||||
|
|
||||||
import { useTimer } from './useTimer'
|
import { useTimer } from './useTimer'
|
||||||
|
|
||||||
@@ -12,13 +12,18 @@ import { useTimer } from './useTimer'
|
|||||||
*/
|
*/
|
||||||
export default function useScrollPosition(key: string, throttleWait?: number) {
|
export default function useScrollPosition(key: string, throttleWait?: number) {
|
||||||
const containerRef = useRef<HTMLDivElement>(null)
|
const containerRef = useRef<HTMLDivElement>(null)
|
||||||
const scrollKey = `scroll:${key}`
|
const scrollKey = useMemo(() => `scroll:${key}`, [key])
|
||||||
|
const scrollKeyRef = useRef(scrollKey)
|
||||||
const { setTimeoutTimer } = useTimer()
|
const { setTimeoutTimer } = useTimer()
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
scrollKeyRef.current = scrollKey
|
||||||
|
}, [scrollKey])
|
||||||
|
|
||||||
const handleScroll = throttle(() => {
|
const handleScroll = throttle(() => {
|
||||||
const position = containerRef.current?.scrollTop ?? 0
|
const position = containerRef.current?.scrollTop ?? 0
|
||||||
window.requestAnimationFrame(() => {
|
window.requestAnimationFrame(() => {
|
||||||
window.keyv.set(scrollKey, position)
|
window.keyv.set(scrollKeyRef.current, position)
|
||||||
})
|
})
|
||||||
}, throttleWait ?? 100)
|
}, throttleWait ?? 100)
|
||||||
|
|
||||||
@@ -28,5 +33,9 @@ export default function useScrollPosition(key: string, throttleWait?: number) {
|
|||||||
setTimeoutTimer('scrollEffect', scroll, 50)
|
setTimeoutTimer('scrollEffect', scroll, 50)
|
||||||
}, [scrollKey, setTimeoutTimer])
|
}, [scrollKey, setTimeoutTimer])
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
return () => handleScroll.cancel()
|
||||||
|
}, [handleScroll])
|
||||||
|
|
||||||
return { containerRef, handleScroll }
|
return { containerRef, handleScroll }
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
import { useEffect, useRef } from 'react'
|
import { useCallback, useEffect, useRef } from 'react'
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* 定时器管理 Hook,用于管理 setTimeout 和 setInterval 定时器,支持通过 key 来标识不同的定时器
|
* 定时器管理 Hook,用于管理 setTimeout 和 setInterval 定时器,支持通过 key 来标识不同的定时器
|
||||||
@@ -43,10 +43,38 @@ export const useTimer = () => {
|
|||||||
const timeoutMapRef = useRef(new Map<string, NodeJS.Timeout>())
|
const timeoutMapRef = useRef(new Map<string, NodeJS.Timeout>())
|
||||||
const intervalMapRef = useRef(new Map<string, NodeJS.Timeout>())
|
const intervalMapRef = useRef(new Map<string, NodeJS.Timeout>())
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 清除指定 key 的 setTimeout 定时器
|
||||||
|
* @param key - 定时器标识符
|
||||||
|
*/
|
||||||
|
const clearTimeoutTimer = useCallback((key: string) => {
|
||||||
|
clearTimeout(timeoutMapRef.current.get(key))
|
||||||
|
timeoutMapRef.current.delete(key)
|
||||||
|
}, [])
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 清除指定 key 的 setInterval 定时器
|
||||||
|
* @param key - 定时器标识符
|
||||||
|
*/
|
||||||
|
const clearIntervalTimer = useCallback((key: string) => {
|
||||||
|
clearInterval(intervalMapRef.current.get(key))
|
||||||
|
intervalMapRef.current.delete(key)
|
||||||
|
}, [])
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 清除所有定时器,包括 setTimeout 和 setInterval
|
||||||
|
*/
|
||||||
|
const clearAllTimers = useCallback(() => {
|
||||||
|
timeoutMapRef.current.forEach((timer) => clearTimeout(timer))
|
||||||
|
intervalMapRef.current.forEach((timer) => clearInterval(timer))
|
||||||
|
timeoutMapRef.current.clear()
|
||||||
|
intervalMapRef.current.clear()
|
||||||
|
}, [])
|
||||||
|
|
||||||
// 组件卸载时自动清理所有定时器
|
// 组件卸载时自动清理所有定时器
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
return () => clearAllTimers()
|
return () => clearAllTimers()
|
||||||
}, [])
|
}, [clearAllTimers])
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* 设置一个 setTimeout 定时器
|
* 设置一个 setTimeout 定时器
|
||||||
@@ -65,12 +93,15 @@ export const useTimer = () => {
|
|||||||
* cleanup();
|
* cleanup();
|
||||||
* ```
|
* ```
|
||||||
*/
|
*/
|
||||||
const setTimeoutTimer = (key: string, ...args: Parameters<typeof setTimeout>) => {
|
const setTimeoutTimer = useCallback(
|
||||||
clearTimeout(timeoutMapRef.current.get(key))
|
(key: string, ...args: Parameters<typeof setTimeout>) => {
|
||||||
const timer = setTimeout(...args)
|
clearTimeout(timeoutMapRef.current.get(key))
|
||||||
timeoutMapRef.current.set(key, timer)
|
const timer = setTimeout(...args)
|
||||||
return () => clearTimeoutTimer(key)
|
timeoutMapRef.current.set(key, timer)
|
||||||
}
|
return () => clearTimeoutTimer(key)
|
||||||
|
},
|
||||||
|
[clearTimeoutTimer]
|
||||||
|
)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* 设置一个 setInterval 定时器
|
* 设置一个 setInterval 定时器
|
||||||
@@ -89,56 +120,31 @@ export const useTimer = () => {
|
|||||||
* cleanup();
|
* cleanup();
|
||||||
* ```
|
* ```
|
||||||
*/
|
*/
|
||||||
const setIntervalTimer = (key: string, ...args: Parameters<typeof setInterval>) => {
|
const setIntervalTimer = useCallback(
|
||||||
clearInterval(intervalMapRef.current.get(key))
|
(key: string, ...args: Parameters<typeof setInterval>) => {
|
||||||
const timer = setInterval(...args)
|
clearInterval(intervalMapRef.current.get(key))
|
||||||
intervalMapRef.current.set(key, timer)
|
const timer = setInterval(...args)
|
||||||
return () => clearIntervalTimer(key)
|
intervalMapRef.current.set(key, timer)
|
||||||
}
|
return () => clearIntervalTimer(key)
|
||||||
|
},
|
||||||
/**
|
[clearIntervalTimer]
|
||||||
* 清除指定 key 的 setTimeout 定时器
|
)
|
||||||
* @param key - 定时器标识符
|
|
||||||
*/
|
|
||||||
const clearTimeoutTimer = (key: string) => {
|
|
||||||
clearTimeout(timeoutMapRef.current.get(key))
|
|
||||||
timeoutMapRef.current.delete(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* 清除指定 key 的 setInterval 定时器
|
|
||||||
* @param key - 定时器标识符
|
|
||||||
*/
|
|
||||||
const clearIntervalTimer = (key: string) => {
|
|
||||||
clearInterval(intervalMapRef.current.get(key))
|
|
||||||
intervalMapRef.current.delete(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* 清除所有 setTimeout 定时器
|
* 清除所有 setTimeout 定时器
|
||||||
*/
|
*/
|
||||||
const clearAllTimeoutTimers = () => {
|
const clearAllTimeoutTimers = useCallback(() => {
|
||||||
timeoutMapRef.current.forEach((timer) => clearTimeout(timer))
|
timeoutMapRef.current.forEach((timer) => clearTimeout(timer))
|
||||||
timeoutMapRef.current.clear()
|
timeoutMapRef.current.clear()
|
||||||
}
|
}, [])
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* 清除所有 setInterval 定时器
|
* 清除所有 setInterval 定时器
|
||||||
*/
|
*/
|
||||||
const clearAllIntervalTimers = () => {
|
const clearAllIntervalTimers = useCallback(() => {
|
||||||
intervalMapRef.current.forEach((timer) => clearInterval(timer))
|
intervalMapRef.current.forEach((timer) => clearInterval(timer))
|
||||||
intervalMapRef.current.clear()
|
intervalMapRef.current.clear()
|
||||||
}
|
}, [])
|
||||||
|
|
||||||
/**
|
|
||||||
* 清除所有定时器,包括 setTimeout 和 setInterval
|
|
||||||
*/
|
|
||||||
const clearAllTimers = () => {
|
|
||||||
timeoutMapRef.current.forEach((timer) => clearTimeout(timer))
|
|
||||||
intervalMapRef.current.forEach((timer) => clearInterval(timer))
|
|
||||||
timeoutMapRef.current.clear()
|
|
||||||
intervalMapRef.current.clear()
|
|
||||||
}
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
setTimeoutTimer,
|
setTimeoutTimer,
|
||||||
|
|||||||
@@ -280,6 +280,7 @@
|
|||||||
"denied": "Tool request was denied.",
|
"denied": "Tool request was denied.",
|
||||||
"timeout": "Tool request timed out before receiving approval."
|
"timeout": "Tool request timed out before receiving approval."
|
||||||
},
|
},
|
||||||
|
"toolPendingFallback": "Tool",
|
||||||
"waiting": "Waiting for tool permission decision..."
|
"waiting": "Waiting for tool permission decision..."
|
||||||
},
|
},
|
||||||
"type": {
|
"type": {
|
||||||
@@ -1208,7 +1209,7 @@
|
|||||||
"endpoint_type": {
|
"endpoint_type": {
|
||||||
"anthropic": "Anthropic",
|
"anthropic": "Anthropic",
|
||||||
"gemini": "Gemini",
|
"gemini": "Gemini",
|
||||||
"image-generation": "Image Generation",
|
"image-generation": "Image Generation (OpenAI)",
|
||||||
"jina-rerank": "Jina Rerank",
|
"jina-rerank": "Jina Rerank",
|
||||||
"openai": "OpenAI",
|
"openai": "OpenAI",
|
||||||
"openai-response": "OpenAI-Response"
|
"openai-response": "OpenAI-Response"
|
||||||
@@ -4371,7 +4372,7 @@
|
|||||||
"url": {
|
"url": {
|
||||||
"preview": "Preview: {{url}}",
|
"preview": "Preview: {{url}}",
|
||||||
"reset": "Reset",
|
"reset": "Reset",
|
||||||
"tip": "ending with # forces use of input address"
|
"tip": "Add # at the end to disable the automatically appended API version."
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"api_host": "API Host",
|
"api_host": "API Host",
|
||||||
|
|||||||
@@ -280,6 +280,7 @@
|
|||||||
"denied": "工具请求已被拒绝。",
|
"denied": "工具请求已被拒绝。",
|
||||||
"timeout": "工具请求在收到批准前超时。"
|
"timeout": "工具请求在收到批准前超时。"
|
||||||
},
|
},
|
||||||
|
"toolPendingFallback": "工具",
|
||||||
"waiting": "等待工具权限决定..."
|
"waiting": "等待工具权限决定..."
|
||||||
},
|
},
|
||||||
"type": {
|
"type": {
|
||||||
@@ -1208,7 +1209,7 @@
|
|||||||
"endpoint_type": {
|
"endpoint_type": {
|
||||||
"anthropic": "Anthropic",
|
"anthropic": "Anthropic",
|
||||||
"gemini": "Gemini",
|
"gemini": "Gemini",
|
||||||
"image-generation": "图片生成",
|
"image-generation": "图像生成 (OpenAI)",
|
||||||
"jina-rerank": "Jina 重排序",
|
"jina-rerank": "Jina 重排序",
|
||||||
"openai": "OpenAI",
|
"openai": "OpenAI",
|
||||||
"openai-response": "OpenAI-Response"
|
"openai-response": "OpenAI-Response"
|
||||||
@@ -4371,7 +4372,7 @@
|
|||||||
"url": {
|
"url": {
|
||||||
"preview": "预览: {{url}}",
|
"preview": "预览: {{url}}",
|
||||||
"reset": "重置",
|
"reset": "重置",
|
||||||
"tip": "# 结尾强制使用输入地址"
|
"tip": "在末尾添加 # 以禁用自动附加的API版本。"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"api_host": "API 地址",
|
"api_host": "API 地址",
|
||||||
|
|||||||
@@ -280,6 +280,7 @@
|
|||||||
"denied": "工具請求已被拒絕。",
|
"denied": "工具請求已被拒絕。",
|
||||||
"timeout": "工具請求在收到核准前逾時。"
|
"timeout": "工具請求在收到核准前逾時。"
|
||||||
},
|
},
|
||||||
|
"toolPendingFallback": "工具",
|
||||||
"waiting": "等待工具權限決定..."
|
"waiting": "等待工具權限決定..."
|
||||||
},
|
},
|
||||||
"type": {
|
"type": {
|
||||||
@@ -1208,7 +1209,7 @@
|
|||||||
"endpoint_type": {
|
"endpoint_type": {
|
||||||
"anthropic": "Anthropic",
|
"anthropic": "Anthropic",
|
||||||
"gemini": "Gemini",
|
"gemini": "Gemini",
|
||||||
"image-generation": "圖片生成",
|
"image-generation": "圖像生成 (OpenAI)",
|
||||||
"jina-rerank": "Jina Rerank",
|
"jina-rerank": "Jina Rerank",
|
||||||
"openai": "OpenAI",
|
"openai": "OpenAI",
|
||||||
"openai-response": "OpenAI-Response"
|
"openai-response": "OpenAI-Response"
|
||||||
@@ -4371,7 +4372,7 @@
|
|||||||
"url": {
|
"url": {
|
||||||
"preview": "預覽:{{url}}",
|
"preview": "預覽:{{url}}",
|
||||||
"reset": "重設",
|
"reset": "重設",
|
||||||
"tip": "# 結尾強制使用輸入位址"
|
"tip": "在末尾添加 # 以停用自動附加的 API 版本。"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"api_host": "API 主機地址",
|
"api_host": "API 主機地址",
|
||||||
|
|||||||
@@ -280,6 +280,7 @@
|
|||||||
"denied": "Tool-Anfrage wurde abgelehnt.",
|
"denied": "Tool-Anfrage wurde abgelehnt.",
|
||||||
"timeout": "Tool-Anfrage ist abgelaufen, bevor eine Genehmigung eingegangen ist."
|
"timeout": "Tool-Anfrage ist abgelaufen, bevor eine Genehmigung eingegangen ist."
|
||||||
},
|
},
|
||||||
|
"toolPendingFallback": "Werkzeug",
|
||||||
"waiting": "Warten auf Entscheidung über Tool-Berechtigung..."
|
"waiting": "Warten auf Entscheidung über Tool-Berechtigung..."
|
||||||
},
|
},
|
||||||
"type": {
|
"type": {
|
||||||
@@ -1208,7 +1209,7 @@
|
|||||||
"endpoint_type": {
|
"endpoint_type": {
|
||||||
"anthropic": "Anthropic",
|
"anthropic": "Anthropic",
|
||||||
"gemini": "Gemini",
|
"gemini": "Gemini",
|
||||||
"image-generation": "Bildgenerierung",
|
"image-generation": "Bilderzeugung (OpenAI)",
|
||||||
"jina-rerank": "Jina Reranking",
|
"jina-rerank": "Jina Reranking",
|
||||||
"openai": "OpenAI",
|
"openai": "OpenAI",
|
||||||
"openai-response": "OpenAI-Response"
|
"openai-response": "OpenAI-Response"
|
||||||
@@ -4371,7 +4372,7 @@
|
|||||||
"url": {
|
"url": {
|
||||||
"preview": "Vorschau: {{url}}",
|
"preview": "Vorschau: {{url}}",
|
||||||
"reset": "Zurücksetzen",
|
"reset": "Zurücksetzen",
|
||||||
"tip": "# am Ende erzwingt die Verwendung der Eingabe-Adresse"
|
"tip": "Fügen Sie am Ende ein # hinzu, um die automatisch angehängte API-Version zu deaktivieren."
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"api_host": "API-Adresse",
|
"api_host": "API-Adresse",
|
||||||
|
|||||||
@@ -280,6 +280,7 @@
|
|||||||
"denied": "Το αίτημα για εργαλείο απορρίφθηκε.",
|
"denied": "Το αίτημα για εργαλείο απορρίφθηκε.",
|
||||||
"timeout": "Το αίτημα για το εργαλείο έληξε πριν λάβει έγκριση."
|
"timeout": "Το αίτημα για το εργαλείο έληξε πριν λάβει έγκριση."
|
||||||
},
|
},
|
||||||
|
"toolPendingFallback": "Εργαλείο",
|
||||||
"waiting": "Αναμονή για απόφαση άδειας εργαλείου..."
|
"waiting": "Αναμονή για απόφαση άδειας εργαλείου..."
|
||||||
},
|
},
|
||||||
"type": {
|
"type": {
|
||||||
@@ -1208,7 +1209,7 @@
|
|||||||
"endpoint_type": {
|
"endpoint_type": {
|
||||||
"anthropic": "Anthropic",
|
"anthropic": "Anthropic",
|
||||||
"gemini": "Gemini",
|
"gemini": "Gemini",
|
||||||
"image-generation": "Δημιουργία Εικόνας",
|
"image-generation": "Δημιουργία Εικόνων (OpenAI)",
|
||||||
"jina-rerank": "Επαναταξινόμηση Jina",
|
"jina-rerank": "Επαναταξινόμηση Jina",
|
||||||
"openai": "OpenAI",
|
"openai": "OpenAI",
|
||||||
"openai-response": "Απάντηση OpenAI"
|
"openai-response": "Απάντηση OpenAI"
|
||||||
@@ -4371,7 +4372,7 @@
|
|||||||
"url": {
|
"url": {
|
||||||
"preview": "Προεπισκόπηση: {{url}}",
|
"preview": "Προεπισκόπηση: {{url}}",
|
||||||
"reset": "Επαναφορά",
|
"reset": "Επαναφορά",
|
||||||
"tip": "#τέλος ενδεχόμενη χρήση της εισαγωγής διευθύνσεως"
|
"tip": "Προσθέστε το σύμβολο # στο τέλος για να απενεργοποιήσετε την αυτόματα προστιθέμενη έκδοση API."
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"api_host": "Διεύθυνση API",
|
"api_host": "Διεύθυνση API",
|
||||||
|
|||||||
@@ -280,6 +280,7 @@
|
|||||||
"denied": "La solicitud de herramienta fue denegada.",
|
"denied": "La solicitud de herramienta fue denegada.",
|
||||||
"timeout": "La solicitud de herramienta expiró antes de recibir la aprobación."
|
"timeout": "La solicitud de herramienta expiró antes de recibir la aprobación."
|
||||||
},
|
},
|
||||||
|
"toolPendingFallback": "Herramienta",
|
||||||
"waiting": "Esperando la decisión de permiso de la herramienta..."
|
"waiting": "Esperando la decisión de permiso de la herramienta..."
|
||||||
},
|
},
|
||||||
"type": {
|
"type": {
|
||||||
@@ -1208,7 +1209,7 @@
|
|||||||
"endpoint_type": {
|
"endpoint_type": {
|
||||||
"anthropic": "Anthropic",
|
"anthropic": "Anthropic",
|
||||||
"gemini": "Gemini",
|
"gemini": "Gemini",
|
||||||
"image-generation": "Generación de imágenes",
|
"image-generation": "Generación de Imágenes (OpenAI)",
|
||||||
"jina-rerank": "Reordenamiento Jina",
|
"jina-rerank": "Reordenamiento Jina",
|
||||||
"openai": "OpenAI",
|
"openai": "OpenAI",
|
||||||
"openai-response": "Respuesta de OpenAI"
|
"openai-response": "Respuesta de OpenAI"
|
||||||
@@ -4371,7 +4372,7 @@
|
|||||||
"url": {
|
"url": {
|
||||||
"preview": "Vista previa: {{url}}",
|
"preview": "Vista previa: {{url}}",
|
||||||
"reset": "Restablecer",
|
"reset": "Restablecer",
|
||||||
"tip": "forzar uso de dirección de entrada con # al final"
|
"tip": "Añada # al final para deshabilitar la versión de la API que se añade automáticamente."
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"api_host": "Dirección API",
|
"api_host": "Dirección API",
|
||||||
|
|||||||
@@ -280,6 +280,7 @@
|
|||||||
"denied": "La demande d'outil a été refusée.",
|
"denied": "La demande d'outil a été refusée.",
|
||||||
"timeout": "La demande d'outil a expiré avant d'obtenir l'approbation."
|
"timeout": "La demande d'outil a expiré avant d'obtenir l'approbation."
|
||||||
},
|
},
|
||||||
|
"toolPendingFallback": "Outil",
|
||||||
"waiting": "En attente de la décision d'autorisation de l'outil..."
|
"waiting": "En attente de la décision d'autorisation de l'outil..."
|
||||||
},
|
},
|
||||||
"type": {
|
"type": {
|
||||||
@@ -1208,7 +1209,7 @@
|
|||||||
"endpoint_type": {
|
"endpoint_type": {
|
||||||
"anthropic": "Anthropic",
|
"anthropic": "Anthropic",
|
||||||
"gemini": "Gemini",
|
"gemini": "Gemini",
|
||||||
"image-generation": "Génération d'images",
|
"image-generation": "Génération d'images (OpenAI)",
|
||||||
"jina-rerank": "Reclassement Jina",
|
"jina-rerank": "Reclassement Jina",
|
||||||
"openai": "OpenAI",
|
"openai": "OpenAI",
|
||||||
"openai-response": "Réponse OpenAI"
|
"openai-response": "Réponse OpenAI"
|
||||||
@@ -4371,7 +4372,7 @@
|
|||||||
"url": {
|
"url": {
|
||||||
"preview": "Aperçu : {{url}}",
|
"preview": "Aperçu : {{url}}",
|
||||||
"reset": "Réinitialiser",
|
"reset": "Réinitialiser",
|
||||||
"tip": "forcer l'utilisation de l'adresse d'entrée si terminé par #"
|
"tip": "Ajoutez # à la fin pour désactiver la version d'API ajoutée automatiquement."
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"api_host": "Adresse API",
|
"api_host": "Adresse API",
|
||||||
|
|||||||
@@ -280,6 +280,7 @@
|
|||||||
"denied": "ツールリクエストは拒否されました。",
|
"denied": "ツールリクエストは拒否されました。",
|
||||||
"timeout": "ツールリクエストは承認を受ける前にタイムアウトしました。"
|
"timeout": "ツールリクエストは承認を受ける前にタイムアウトしました。"
|
||||||
},
|
},
|
||||||
|
"toolPendingFallback": "ツール",
|
||||||
"waiting": "ツールの許可決定を待っています..."
|
"waiting": "ツールの許可決定を待っています..."
|
||||||
},
|
},
|
||||||
"type": {
|
"type": {
|
||||||
@@ -1208,7 +1209,7 @@
|
|||||||
"endpoint_type": {
|
"endpoint_type": {
|
||||||
"anthropic": "Anthropic",
|
"anthropic": "Anthropic",
|
||||||
"gemini": "Gemini",
|
"gemini": "Gemini",
|
||||||
"image-generation": "画像生成",
|
"image-generation": "画像生成 (OpenAI)",
|
||||||
"jina-rerank": "Jina Rerank",
|
"jina-rerank": "Jina Rerank",
|
||||||
"openai": "OpenAI",
|
"openai": "OpenAI",
|
||||||
"openai-response": "OpenAI-Response"
|
"openai-response": "OpenAI-Response"
|
||||||
@@ -4371,7 +4372,7 @@
|
|||||||
"url": {
|
"url": {
|
||||||
"preview": "プレビュー: {{url}}",
|
"preview": "プレビュー: {{url}}",
|
||||||
"reset": "リセット",
|
"reset": "リセット",
|
||||||
"tip": "#で終わる場合、入力されたアドレスを強制的に使用します"
|
"tip": "自動的に付加されるAPIバージョンを無効にするには、末尾に#を追加します。"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"api_host": "APIホスト",
|
"api_host": "APIホスト",
|
||||||
|
|||||||
@@ -16,7 +16,7 @@
|
|||||||
"error": {
|
"error": {
|
||||||
"failed": "Falha ao excluir o agente"
|
"failed": "Falha ao excluir o agente"
|
||||||
},
|
},
|
||||||
"title": "删除代理"
|
"title": "Excluir Agente"
|
||||||
},
|
},
|
||||||
"edit": {
|
"edit": {
|
||||||
"title": "Agent Editor"
|
"title": "Agent Editor"
|
||||||
@@ -111,7 +111,7 @@
|
|||||||
"label": "Modo de permissão",
|
"label": "Modo de permissão",
|
||||||
"options": {
|
"options": {
|
||||||
"acceptEdits": "Aceitar edições automaticamente",
|
"acceptEdits": "Aceitar edições automaticamente",
|
||||||
"bypassPermissions": "忽略检查 de permissão",
|
"bypassPermissions": "Ignorar verificações de permissão",
|
||||||
"default": "Padrão (perguntar antes de continuar)",
|
"default": "Padrão (perguntar antes de continuar)",
|
||||||
"plan": "Modo de planejamento (plano sujeito a aprovação)"
|
"plan": "Modo de planejamento (plano sujeito a aprovação)"
|
||||||
},
|
},
|
||||||
@@ -150,7 +150,7 @@
|
|||||||
},
|
},
|
||||||
"success": {
|
"success": {
|
||||||
"install": "Plugin instalado com sucesso",
|
"install": "Plugin instalado com sucesso",
|
||||||
"uninstall": "插件 desinstalado com sucesso"
|
"uninstall": "Plugin desinstalado com sucesso"
|
||||||
},
|
},
|
||||||
"tab": "plug-in",
|
"tab": "plug-in",
|
||||||
"type": {
|
"type": {
|
||||||
@@ -280,6 +280,7 @@
|
|||||||
"denied": "Solicitação de ferramenta foi negada.",
|
"denied": "Solicitação de ferramenta foi negada.",
|
||||||
"timeout": "A solicitação da ferramenta expirou antes de receber aprovação."
|
"timeout": "A solicitação da ferramenta expirou antes de receber aprovação."
|
||||||
},
|
},
|
||||||
|
"toolPendingFallback": "Ferramenta",
|
||||||
"waiting": "Aguardando decisão de permissão da ferramenta..."
|
"waiting": "Aguardando decisão de permissão da ferramenta..."
|
||||||
},
|
},
|
||||||
"type": {
|
"type": {
|
||||||
@@ -1134,7 +1135,7 @@
|
|||||||
"duplicate": "Duplicar",
|
"duplicate": "Duplicar",
|
||||||
"edit": "Editar",
|
"edit": "Editar",
|
||||||
"enabled": "Ativado",
|
"enabled": "Ativado",
|
||||||
"error": "错误",
|
"error": "Erro",
|
||||||
"errors": {
|
"errors": {
|
||||||
"create_message": "Falha ao criar mensagem",
|
"create_message": "Falha ao criar mensagem",
|
||||||
"validation": "Falha na verificação"
|
"validation": "Falha na verificação"
|
||||||
@@ -1208,7 +1209,7 @@
|
|||||||
"endpoint_type": {
|
"endpoint_type": {
|
||||||
"anthropic": "Anthropic",
|
"anthropic": "Anthropic",
|
||||||
"gemini": "Gemini",
|
"gemini": "Gemini",
|
||||||
"image-generation": "Geração de Imagem",
|
"image-generation": "Geração de Imagens (OpenAI)",
|
||||||
"jina-rerank": "Jina Reordenar",
|
"jina-rerank": "Jina Reordenar",
|
||||||
"openai": "OpenAI",
|
"openai": "OpenAI",
|
||||||
"openai-response": "Resposta OpenAI"
|
"openai-response": "Resposta OpenAI"
|
||||||
@@ -4371,7 +4372,7 @@
|
|||||||
"url": {
|
"url": {
|
||||||
"preview": "Pré-visualização: {{url}}",
|
"preview": "Pré-visualização: {{url}}",
|
||||||
"reset": "Redefinir",
|
"reset": "Redefinir",
|
||||||
"tip": "e forçar o uso do endereço original quando terminar com '#'"
|
"tip": "Adicione # no final para desativar a versão da API adicionada automaticamente."
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"api_host": "Endereço API",
|
"api_host": "Endereço API",
|
||||||
|
|||||||
@@ -280,6 +280,7 @@
|
|||||||
"denied": "Запрос на инструмент был отклонён.",
|
"denied": "Запрос на инструмент был отклонён.",
|
||||||
"timeout": "Запрос на инструмент превысил время ожидания до получения подтверждения."
|
"timeout": "Запрос на инструмент превысил время ожидания до получения подтверждения."
|
||||||
},
|
},
|
||||||
|
"toolPendingFallback": "Инструмент",
|
||||||
"waiting": "Ожидание решения о разрешении на использование инструмента..."
|
"waiting": "Ожидание решения о разрешении на использование инструмента..."
|
||||||
},
|
},
|
||||||
"type": {
|
"type": {
|
||||||
@@ -1208,7 +1209,7 @@
|
|||||||
"endpoint_type": {
|
"endpoint_type": {
|
||||||
"anthropic": "Anthropic",
|
"anthropic": "Anthropic",
|
||||||
"gemini": "Gemini",
|
"gemini": "Gemini",
|
||||||
"image-generation": "Изображение",
|
"image-generation": "Генерация изображений (OpenAI)",
|
||||||
"jina-rerank": "Jina Rerank",
|
"jina-rerank": "Jina Rerank",
|
||||||
"openai": "OpenAI",
|
"openai": "OpenAI",
|
||||||
"openai-response": "OpenAI-Response"
|
"openai-response": "OpenAI-Response"
|
||||||
@@ -4371,7 +4372,7 @@
|
|||||||
"url": {
|
"url": {
|
||||||
"preview": "Предпросмотр: {{url}}",
|
"preview": "Предпросмотр: {{url}}",
|
||||||
"reset": "Сброс",
|
"reset": "Сброс",
|
||||||
"tip": "заканчивая на # принудительно использует введенный адрес"
|
"tip": "Добавьте # в конце, чтобы отключить автоматически добавляемую версию API."
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"api_host": "Хост API",
|
"api_host": "Хост API",
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ import { getModel } from '@renderer/hooks/useModel'
|
|||||||
import { useSettings } from '@renderer/hooks/useSettings'
|
import { useSettings } from '@renderer/hooks/useSettings'
|
||||||
import { useTextareaResize } from '@renderer/hooks/useTextareaResize'
|
import { useTextareaResize } from '@renderer/hooks/useTextareaResize'
|
||||||
import { useTimer } from '@renderer/hooks/useTimer'
|
import { useTimer } from '@renderer/hooks/useTimer'
|
||||||
|
import { CacheService } from '@renderer/services/CacheService'
|
||||||
import { pauseTrace } from '@renderer/services/SpanManagerService'
|
import { pauseTrace } from '@renderer/services/SpanManagerService'
|
||||||
import { estimateUserPromptUsage } from '@renderer/services/TokenService'
|
import { estimateUserPromptUsage } from '@renderer/services/TokenService'
|
||||||
import { useAppDispatch, useAppSelector } from '@renderer/store'
|
import { useAppDispatch, useAppSelector } from '@renderer/store'
|
||||||
@@ -41,19 +42,10 @@ import { getInputbarConfig } from './registry'
|
|||||||
import { TopicType } from './types'
|
import { TopicType } from './types'
|
||||||
|
|
||||||
const logger = loggerService.withContext('AgentSessionInputbar')
|
const logger = loggerService.withContext('AgentSessionInputbar')
|
||||||
const agentSessionDraftCache = new Map<string, string>()
|
|
||||||
|
|
||||||
const readDraftFromCache = (key: string): string => {
|
const DRAFT_CACHE_TTL = 24 * 60 * 60 * 1000 // 24 hours
|
||||||
return agentSessionDraftCache.get(key) ?? ''
|
|
||||||
}
|
|
||||||
|
|
||||||
const writeDraftToCache = (key: string, value: string) => {
|
const getAgentDraftCacheKey = (agentId: string) => `agent-session-draft-${agentId}`
|
||||||
if (!value) {
|
|
||||||
agentSessionDraftCache.delete(key)
|
|
||||||
} else {
|
|
||||||
agentSessionDraftCache.set(key, value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type Props = {
|
type Props = {
|
||||||
agentId: string
|
agentId: string
|
||||||
@@ -170,16 +162,15 @@ const AgentSessionInputbarInner: FC<InnerProps> = ({ assistant, agentId, session
|
|||||||
const scope = TopicType.Session
|
const scope = TopicType.Session
|
||||||
const config = getInputbarConfig(scope)
|
const config = getInputbarConfig(scope)
|
||||||
|
|
||||||
// Use shared hooks for text and textarea management
|
// Use shared hooks for text and textarea management with draft persistence
|
||||||
const initialDraft = useMemo(() => readDraftFromCache(agentId), [agentId])
|
const draftCacheKey = getAgentDraftCacheKey(agentId)
|
||||||
const persistDraft = useCallback((next: string) => writeDraftToCache(agentId, next), [agentId])
|
|
||||||
const {
|
const {
|
||||||
text,
|
text,
|
||||||
setText,
|
setText,
|
||||||
isEmpty: inputEmpty
|
isEmpty: inputEmpty
|
||||||
} = useInputText({
|
} = useInputText({
|
||||||
initialValue: initialDraft,
|
initialValue: CacheService.get<string>(draftCacheKey) ?? '',
|
||||||
onChange: persistDraft
|
onChange: (value) => CacheService.set(draftCacheKey, value, DRAFT_CACHE_TTL)
|
||||||
})
|
})
|
||||||
const {
|
const {
|
||||||
textareaRef,
|
textareaRef,
|
||||||
@@ -431,6 +422,7 @@ const AgentSessionInputbarInner: FC<InnerProps> = ({ assistant, agentId, session
|
|||||||
})
|
})
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Clear text after successful send (draft is cleared automatically via onChange)
|
||||||
setText('')
|
setText('')
|
||||||
setTimeoutTimer('agentSession_sendMessage', () => setText(''), 500)
|
setTimeoutTimer('agentSession_sendMessage', () => setText(''), 500)
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
|
|||||||
@@ -14,7 +14,6 @@ import { useInputText } from '@renderer/hooks/useInputText'
|
|||||||
import { useMessageOperations, useTopicLoading } from '@renderer/hooks/useMessageOperations'
|
import { useMessageOperations, useTopicLoading } from '@renderer/hooks/useMessageOperations'
|
||||||
import { useSettings } from '@renderer/hooks/useSettings'
|
import { useSettings } from '@renderer/hooks/useSettings'
|
||||||
import { useShortcut } from '@renderer/hooks/useShortcuts'
|
import { useShortcut } from '@renderer/hooks/useShortcuts'
|
||||||
import { useSidebarIconShow } from '@renderer/hooks/useSidebarIcon'
|
|
||||||
import { useTextareaResize } from '@renderer/hooks/useTextareaResize'
|
import { useTextareaResize } from '@renderer/hooks/useTextareaResize'
|
||||||
import { useTimer } from '@renderer/hooks/useTimer'
|
import { useTimer } from '@renderer/hooks/useTimer'
|
||||||
import {
|
import {
|
||||||
@@ -24,6 +23,7 @@ import {
|
|||||||
useInputbarToolsState
|
useInputbarToolsState
|
||||||
} from '@renderer/pages/home/Inputbar/context/InputbarToolsProvider'
|
} from '@renderer/pages/home/Inputbar/context/InputbarToolsProvider'
|
||||||
import { getDefaultTopic } from '@renderer/services/AssistantService'
|
import { getDefaultTopic } from '@renderer/services/AssistantService'
|
||||||
|
import { CacheService } from '@renderer/services/CacheService'
|
||||||
import { EVENT_NAMES, EventEmitter } from '@renderer/services/EventService'
|
import { EVENT_NAMES, EventEmitter } from '@renderer/services/EventService'
|
||||||
import FileManager from '@renderer/services/FileManager'
|
import FileManager from '@renderer/services/FileManager'
|
||||||
import { checkRateLimit, getUserMessage } from '@renderer/services/MessagesService'
|
import { checkRateLimit, getUserMessage } from '@renderer/services/MessagesService'
|
||||||
@@ -39,7 +39,7 @@ import { getSendMessageShortcutLabel } from '@renderer/utils/input'
|
|||||||
import { documentExts, imageExts, textExts } from '@shared/config/constant'
|
import { documentExts, imageExts, textExts } from '@shared/config/constant'
|
||||||
import { debounce } from 'lodash'
|
import { debounce } from 'lodash'
|
||||||
import type { FC } from 'react'
|
import type { FC } from 'react'
|
||||||
import React, { useCallback, useEffect, useMemo, useRef, useState } from 'react'
|
import React, { useCallback, useEffect, useEffectEvent, useMemo, useRef, useState } from 'react'
|
||||||
import { useTranslation } from 'react-i18next'
|
import { useTranslation } from 'react-i18next'
|
||||||
|
|
||||||
import { InputbarCore } from './components/InputbarCore'
|
import { InputbarCore } from './components/InputbarCore'
|
||||||
@@ -51,6 +51,17 @@ import TokenCount from './TokenCount'
|
|||||||
|
|
||||||
const logger = loggerService.withContext('Inputbar')
|
const logger = loggerService.withContext('Inputbar')
|
||||||
|
|
||||||
|
const INPUTBAR_DRAFT_CACHE_KEY = 'inputbar-draft'
|
||||||
|
const DRAFT_CACHE_TTL = 24 * 60 * 60 * 1000 // 24 hours
|
||||||
|
|
||||||
|
const getMentionedModelsCacheKey = (assistantId: string) => `inputbar-mentioned-models-${assistantId}`
|
||||||
|
|
||||||
|
const getValidatedCachedModels = (assistantId: string): Model[] => {
|
||||||
|
const cached = CacheService.get<Model[]>(getMentionedModelsCacheKey(assistantId))
|
||||||
|
if (!Array.isArray(cached)) return []
|
||||||
|
return cached.filter((model) => model?.id && model?.name)
|
||||||
|
}
|
||||||
|
|
||||||
interface Props {
|
interface Props {
|
||||||
assistant: Assistant
|
assistant: Assistant
|
||||||
setActiveTopic: (topic: Topic) => void
|
setActiveTopic: (topic: Topic) => void
|
||||||
@@ -80,16 +91,18 @@ const Inputbar: FC<Props> = ({ assistant: initialAssistant, setActiveTopic, topi
|
|||||||
toggleExpanded: () => {}
|
toggleExpanded: () => {}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
const [initialMentionedModels] = useState(() => getValidatedCachedModels(initialAssistant.id))
|
||||||
|
|
||||||
const initialState = useMemo(
|
const initialState = useMemo(
|
||||||
() => ({
|
() => ({
|
||||||
files: [] as FileType[],
|
files: [] as FileType[],
|
||||||
mentionedModels: [] as Model[],
|
mentionedModels: initialMentionedModels,
|
||||||
selectedKnowledgeBases: initialAssistant.knowledge_bases ?? [],
|
selectedKnowledgeBases: initialAssistant.knowledge_bases ?? [],
|
||||||
isExpanded: false,
|
isExpanded: false,
|
||||||
couldAddImageFile: false,
|
couldAddImageFile: false,
|
||||||
extensions: [] as string[]
|
extensions: [] as string[]
|
||||||
}),
|
}),
|
||||||
[initialAssistant.knowledge_bases]
|
[initialMentionedModels, initialAssistant.knowledge_bases]
|
||||||
)
|
)
|
||||||
|
|
||||||
return (
|
return (
|
||||||
@@ -121,7 +134,10 @@ const InputbarInner: FC<InputbarInnerProps> = ({ assistant: initialAssistant, se
|
|||||||
const { setFiles, setMentionedModels, setSelectedKnowledgeBases } = useInputbarToolsDispatch()
|
const { setFiles, setMentionedModels, setSelectedKnowledgeBases } = useInputbarToolsDispatch()
|
||||||
const { setCouldAddImageFile } = useInputbarToolsInternalDispatch()
|
const { setCouldAddImageFile } = useInputbarToolsInternalDispatch()
|
||||||
|
|
||||||
const { text, setText } = useInputText()
|
const { text, setText } = useInputText({
|
||||||
|
initialValue: CacheService.get<string>(INPUTBAR_DRAFT_CACHE_KEY) ?? '',
|
||||||
|
onChange: (value) => CacheService.set(INPUTBAR_DRAFT_CACHE_KEY, value, DRAFT_CACHE_TTL)
|
||||||
|
})
|
||||||
const {
|
const {
|
||||||
textareaRef,
|
textareaRef,
|
||||||
resize: resizeTextArea,
|
resize: resizeTextArea,
|
||||||
@@ -133,7 +149,6 @@ const InputbarInner: FC<InputbarInnerProps> = ({ assistant: initialAssistant, se
|
|||||||
minHeight: 30
|
minHeight: 30
|
||||||
})
|
})
|
||||||
|
|
||||||
const showKnowledgeIcon = useSidebarIconShow('knowledge')
|
|
||||||
const { assistant, addTopic, model, setModel, updateAssistant } = useAssistant(initialAssistant.id)
|
const { assistant, addTopic, model, setModel, updateAssistant } = useAssistant(initialAssistant.id)
|
||||||
const { sendMessageShortcut, showInputEstimatedTokens, enableQuickPanelTriggers } = useSettings()
|
const { sendMessageShortcut, showInputEstimatedTokens, enableQuickPanelTriggers } = useSettings()
|
||||||
const [estimateTokenCount, setEstimateTokenCount] = useState(0)
|
const [estimateTokenCount, setEstimateTokenCount] = useState(0)
|
||||||
@@ -190,6 +205,15 @@ const InputbarInner: FC<InputbarInnerProps> = ({ assistant: initialAssistant, se
|
|||||||
setCouldAddImageFile(canAddImageFile)
|
setCouldAddImageFile(canAddImageFile)
|
||||||
}, [canAddImageFile, setCouldAddImageFile])
|
}, [canAddImageFile, setCouldAddImageFile])
|
||||||
|
|
||||||
|
const onUnmount = useEffectEvent((id: string) => {
|
||||||
|
CacheService.set(getMentionedModelsCacheKey(id), mentionedModels, DRAFT_CACHE_TTL)
|
||||||
|
})
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
return () => onUnmount(assistant.id)
|
||||||
|
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||||
|
}, [assistant.id])
|
||||||
|
|
||||||
const placeholderText = enableQuickPanelTriggers
|
const placeholderText = enableQuickPanelTriggers
|
||||||
? t('chat.input.placeholder', { key: getSendMessageShortcutLabel(sendMessageShortcut) })
|
? t('chat.input.placeholder', { key: getSendMessageShortcutLabel(sendMessageShortcut) })
|
||||||
: t('chat.input.placeholder_without_triggers', {
|
: t('chat.input.placeholder_without_triggers', {
|
||||||
@@ -381,9 +405,10 @@ const InputbarInner: FC<InputbarInnerProps> = ({ assistant: initialAssistant, se
|
|||||||
focusTextarea
|
focusTextarea
|
||||||
])
|
])
|
||||||
|
|
||||||
|
// TODO: Just use assistant.knowledge_bases as selectedKnowledgeBases. context state is overdesigned.
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
setSelectedKnowledgeBases(showKnowledgeIcon ? (assistant.knowledge_bases ?? []) : [])
|
setSelectedKnowledgeBases(assistant.knowledge_bases ?? [])
|
||||||
}, [assistant.knowledge_bases, setSelectedKnowledgeBases, showKnowledgeIcon])
|
}, [assistant.knowledge_bases, setSelectedKnowledgeBases])
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
// Disable web search if model doesn't support it
|
// Disable web search if model doesn't support it
|
||||||
|
|||||||
@@ -156,11 +156,8 @@ export const InputbarCore: FC<InputbarCoreProps> = ({
|
|||||||
|
|
||||||
const setText = useCallback<React.Dispatch<React.SetStateAction<string>>>(
|
const setText = useCallback<React.Dispatch<React.SetStateAction<string>>>(
|
||||||
(value) => {
|
(value) => {
|
||||||
if (typeof value === 'function') {
|
const newText = typeof value === 'function' ? value(textRef.current) : value
|
||||||
onTextChange(value(textRef.current))
|
onTextChange(newText)
|
||||||
} else {
|
|
||||||
onTextChange(value)
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
[onTextChange]
|
[onTextChange]
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
import { useAssistant } from '@renderer/hooks/useAssistant'
|
import { useAssistant } from '@renderer/hooks/useAssistant'
|
||||||
import { useSidebarIconShow } from '@renderer/hooks/useSidebarIcon'
|
|
||||||
import { defineTool, registerTool, TopicType } from '@renderer/pages/home/Inputbar/types'
|
import { defineTool, registerTool, TopicType } from '@renderer/pages/home/Inputbar/types'
|
||||||
import type { KnowledgeBase } from '@renderer/types'
|
import type { KnowledgeBase } from '@renderer/types'
|
||||||
import { isPromptToolUse, isSupportedToolUse } from '@renderer/utils/mcp-tools'
|
import { isPromptToolUse, isSupportedToolUse } from '@renderer/utils/mcp-tools'
|
||||||
@@ -30,7 +29,6 @@ const knowledgeBaseTool = defineTool({
|
|||||||
render: function KnowledgeBaseToolRender(context) {
|
render: function KnowledgeBaseToolRender(context) {
|
||||||
const { assistant, state, actions, quickPanel } = context
|
const { assistant, state, actions, quickPanel } = context
|
||||||
|
|
||||||
const knowledgeSidebarEnabled = useSidebarIconShow('knowledge')
|
|
||||||
const { updateAssistant } = useAssistant(assistant.id)
|
const { updateAssistant } = useAssistant(assistant.id)
|
||||||
|
|
||||||
const handleSelect = useCallback(
|
const handleSelect = useCallback(
|
||||||
@@ -41,10 +39,6 @@ const knowledgeBaseTool = defineTool({
|
|||||||
[updateAssistant, actions]
|
[updateAssistant, actions]
|
||||||
)
|
)
|
||||||
|
|
||||||
if (!knowledgeSidebarEnabled) {
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<KnowledgeBaseButton
|
<KnowledgeBaseButton
|
||||||
quickPanel={quickPanel}
|
quickPanel={quickPanel}
|
||||||
|
|||||||
@@ -102,10 +102,12 @@ const ThinkingBlock: React.FC<Props> = ({ block }) => {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const normalizeThinkingTime = (value?: number) => (typeof value === 'number' && Number.isFinite(value) ? value : 0)
|
||||||
|
|
||||||
const ThinkingTimeSeconds = memo(
|
const ThinkingTimeSeconds = memo(
|
||||||
({ blockThinkingTime, isThinking }: { blockThinkingTime: number; isThinking: boolean }) => {
|
({ blockThinkingTime, isThinking }: { blockThinkingTime: number; isThinking: boolean }) => {
|
||||||
const { t } = useTranslation()
|
const { t } = useTranslation()
|
||||||
const [displayTime, setDisplayTime] = useState(blockThinkingTime)
|
const [displayTime, setDisplayTime] = useState(normalizeThinkingTime(blockThinkingTime))
|
||||||
|
|
||||||
const timer = useRef<NodeJS.Timeout | null>(null)
|
const timer = useRef<NodeJS.Timeout | null>(null)
|
||||||
|
|
||||||
@@ -121,7 +123,7 @@ const ThinkingTimeSeconds = memo(
|
|||||||
clearInterval(timer.current)
|
clearInterval(timer.current)
|
||||||
timer.current = null
|
timer.current = null
|
||||||
}
|
}
|
||||||
setDisplayTime(blockThinkingTime)
|
setDisplayTime(normalizeThinkingTime(blockThinkingTime))
|
||||||
}
|
}
|
||||||
|
|
||||||
return () => {
|
return () => {
|
||||||
@@ -132,10 +134,10 @@ const ThinkingTimeSeconds = memo(
|
|||||||
}
|
}
|
||||||
}, [isThinking, blockThinkingTime])
|
}, [isThinking, blockThinkingTime])
|
||||||
|
|
||||||
const thinkingTimeSeconds = useMemo(
|
const thinkingTimeSeconds = useMemo(() => {
|
||||||
() => ((displayTime < 1000 ? 100 : displayTime) / 1000).toFixed(1),
|
const safeTime = normalizeThinkingTime(displayTime)
|
||||||
[displayTime]
|
return ((safeTime < 1000 ? 100 : safeTime) / 1000).toFixed(1)
|
||||||
)
|
}, [displayTime])
|
||||||
|
|
||||||
return isThinking
|
return isThinking
|
||||||
? t('chat.thinking', {
|
? t('chat.thinking', {
|
||||||
|
|||||||
@@ -255,6 +255,20 @@ describe('ThinkingBlock', () => {
|
|||||||
unmount()
|
unmount()
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
it('should clamp invalid thinking times to a safe default', () => {
|
||||||
|
const testCases = [undefined, Number.NaN, Number.POSITIVE_INFINITY]
|
||||||
|
|
||||||
|
testCases.forEach((thinking_millsec) => {
|
||||||
|
const block = createThinkingBlock({
|
||||||
|
thinking_millsec: thinking_millsec as any,
|
||||||
|
status: MessageBlockStatus.SUCCESS
|
||||||
|
})
|
||||||
|
const { unmount } = renderThinkingBlock(block)
|
||||||
|
expect(getThinkingTimeText()).toHaveTextContent('0.1s')
|
||||||
|
unmount()
|
||||||
|
})
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
describe('collapse behavior', () => {
|
describe('collapse behavior', () => {
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import { useSettings } from '@renderer/hooks/useSettings'
|
|||||||
import { useTimer } from '@renderer/hooks/useTimer'
|
import { useTimer } from '@renderer/hooks/useTimer'
|
||||||
import type { RootState } from '@renderer/store'
|
import type { RootState } from '@renderer/store'
|
||||||
// import { selectCurrentTopicId } from '@renderer/store/newMessage'
|
// import { selectCurrentTopicId } from '@renderer/store/newMessage'
|
||||||
|
import { scrollIntoView } from '@renderer/utils/dom'
|
||||||
import { Button, Drawer, Tooltip } from 'antd'
|
import { Button, Drawer, Tooltip } from 'antd'
|
||||||
import type { FC } from 'react'
|
import type { FC } from 'react'
|
||||||
import { useCallback, useEffect, useRef, useState } from 'react'
|
import { useCallback, useEffect, useRef, useState } from 'react'
|
||||||
@@ -118,7 +119,8 @@ const ChatNavigation: FC<ChatNavigationProps> = ({ containerId }) => {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const scrollToMessage = (element: HTMLElement) => {
|
const scrollToMessage = (element: HTMLElement) => {
|
||||||
element.scrollIntoView({ behavior: 'smooth', block: 'start' })
|
// Use container: 'nearest' to keep scroll within the chat pane (Chromium-only, see #11565, #11567)
|
||||||
|
scrollIntoView(element, { behavior: 'smooth', block: 'start', container: 'nearest' })
|
||||||
}
|
}
|
||||||
|
|
||||||
const scrollToTop = () => {
|
const scrollToTop = () => {
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ import { estimateMessageUsage } from '@renderer/services/TokenService'
|
|||||||
import type { Assistant, Topic } from '@renderer/types'
|
import type { Assistant, Topic } from '@renderer/types'
|
||||||
import type { Message, MessageBlock } from '@renderer/types/newMessage'
|
import type { Message, MessageBlock } from '@renderer/types/newMessage'
|
||||||
import { classNames, cn } from '@renderer/utils'
|
import { classNames, cn } from '@renderer/utils'
|
||||||
|
import { scrollIntoView } from '@renderer/utils/dom'
|
||||||
import { isMessageProcessing } from '@renderer/utils/messageUtils/is'
|
import { isMessageProcessing } from '@renderer/utils/messageUtils/is'
|
||||||
import { Divider } from 'antd'
|
import { Divider } from 'antd'
|
||||||
import type { Dispatch, FC, SetStateAction } from 'react'
|
import type { Dispatch, FC, SetStateAction } from 'react'
|
||||||
@@ -79,9 +80,10 @@ const MessageItem: FC<Props> = ({
|
|||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (isEditing && messageContainerRef.current) {
|
if (isEditing && messageContainerRef.current) {
|
||||||
messageContainerRef.current.scrollIntoView({
|
scrollIntoView(messageContainerRef.current, {
|
||||||
behavior: 'smooth',
|
behavior: 'smooth',
|
||||||
block: 'center'
|
block: 'center',
|
||||||
|
container: 'nearest'
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}, [isEditing])
|
}, [isEditing])
|
||||||
@@ -124,7 +126,7 @@ const MessageItem: FC<Props> = ({
|
|||||||
const messageHighlightHandler = useCallback(
|
const messageHighlightHandler = useCallback(
|
||||||
(highlight: boolean = true) => {
|
(highlight: boolean = true) => {
|
||||||
if (messageContainerRef.current) {
|
if (messageContainerRef.current) {
|
||||||
messageContainerRef.current.scrollIntoView({ behavior: 'smooth' })
|
scrollIntoView(messageContainerRef.current, { behavior: 'smooth', block: 'center', container: 'nearest' })
|
||||||
if (highlight) {
|
if (highlight) {
|
||||||
setTimeoutTimer(
|
setTimeoutTimer(
|
||||||
'messageHighlightHandler',
|
'messageHighlightHandler',
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ import { newMessagesActions } from '@renderer/store/newMessage'
|
|||||||
// import { updateMessageThunk } from '@renderer/store/thunk/messageThunk'
|
// import { updateMessageThunk } from '@renderer/store/thunk/messageThunk'
|
||||||
import type { Message } from '@renderer/types/newMessage'
|
import type { Message } from '@renderer/types/newMessage'
|
||||||
import { isEmoji, removeLeadingEmoji } from '@renderer/utils'
|
import { isEmoji, removeLeadingEmoji } from '@renderer/utils'
|
||||||
|
import { scrollIntoView } from '@renderer/utils/dom'
|
||||||
import { getMainTextContent } from '@renderer/utils/messageUtils/find'
|
import { getMainTextContent } from '@renderer/utils/messageUtils/find'
|
||||||
import { Avatar } from 'antd'
|
import { Avatar } from 'antd'
|
||||||
import { CircleChevronDown } from 'lucide-react'
|
import { CircleChevronDown } from 'lucide-react'
|
||||||
@@ -119,7 +120,7 @@ const MessageAnchorLine: FC<MessageLineProps> = ({ messages }) => {
|
|||||||
() => {
|
() => {
|
||||||
const messageElement = document.getElementById(`message-${message.id}`)
|
const messageElement = document.getElementById(`message-${message.id}`)
|
||||||
if (messageElement) {
|
if (messageElement) {
|
||||||
messageElement.scrollIntoView({ behavior: 'auto', block: 'start' })
|
scrollIntoView(messageElement, { behavior: 'auto', block: 'start', container: 'nearest' })
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
100
|
100
|
||||||
@@ -141,7 +142,7 @@ const MessageAnchorLine: FC<MessageLineProps> = ({ messages }) => {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
messageElement.scrollIntoView({ behavior: 'smooth', block: 'start' })
|
scrollIntoView(messageElement, { behavior: 'smooth', block: 'start', container: 'nearest' })
|
||||||
},
|
},
|
||||||
[setSelectedMessage]
|
[setSelectedMessage]
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import type { MultiModelMessageStyle } from '@renderer/store/settings'
|
|||||||
import type { Topic } from '@renderer/types'
|
import type { Topic } from '@renderer/types'
|
||||||
import type { Message } from '@renderer/types/newMessage'
|
import type { Message } from '@renderer/types/newMessage'
|
||||||
import { classNames } from '@renderer/utils'
|
import { classNames } from '@renderer/utils'
|
||||||
|
import { scrollIntoView } from '@renderer/utils/dom'
|
||||||
import { Popover } from 'antd'
|
import { Popover } from 'antd'
|
||||||
import type { ComponentProps } from 'react'
|
import type { ComponentProps } from 'react'
|
||||||
import { memo, useCallback, useEffect, useMemo, useState } from 'react'
|
import { memo, useCallback, useEffect, useMemo, useState } from 'react'
|
||||||
@@ -73,7 +74,7 @@ const MessageGroup = ({ messages, topic, registerMessageElement }: Props) => {
|
|||||||
() => {
|
() => {
|
||||||
const messageElement = document.getElementById(`message-${message.id}`)
|
const messageElement = document.getElementById(`message-${message.id}`)
|
||||||
if (messageElement) {
|
if (messageElement) {
|
||||||
messageElement.scrollIntoView({ behavior: 'smooth', block: 'start' })
|
scrollIntoView(messageElement, { behavior: 'smooth', block: 'start', container: 'nearest' })
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
200
|
200
|
||||||
@@ -132,7 +133,7 @@ const MessageGroup = ({ messages, topic, registerMessageElement }: Props) => {
|
|||||||
setSelectedMessage(message)
|
setSelectedMessage(message)
|
||||||
} else {
|
} else {
|
||||||
// 直接滚动
|
// 直接滚动
|
||||||
element.scrollIntoView({ behavior: 'smooth', block: 'start' })
|
scrollIntoView(element, { behavior: 'smooth', block: 'start', container: 'nearest' })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ import type { RootState } from '@renderer/store'
|
|||||||
import { messageBlocksSelectors } from '@renderer/store/messageBlock'
|
import { messageBlocksSelectors } from '@renderer/store/messageBlock'
|
||||||
import type { Message } from '@renderer/types/newMessage'
|
import type { Message } from '@renderer/types/newMessage'
|
||||||
import { MessageBlockType } from '@renderer/types/newMessage'
|
import { MessageBlockType } from '@renderer/types/newMessage'
|
||||||
|
import { scrollIntoView } from '@renderer/utils/dom'
|
||||||
import type { FC } from 'react'
|
import type { FC } from 'react'
|
||||||
import React, { useMemo, useRef } from 'react'
|
import React, { useMemo, useRef } from 'react'
|
||||||
import { useSelector } from 'react-redux'
|
import { useSelector } from 'react-redux'
|
||||||
@@ -72,10 +73,10 @@ const MessageOutline: FC<MessageOutlineProps> = ({ message }) => {
|
|||||||
const parent = messageOutlineContainerRef.current?.parentElement
|
const parent = messageOutlineContainerRef.current?.parentElement
|
||||||
const messageContentContainer = parent?.querySelector('.message-content-container')
|
const messageContentContainer = parent?.querySelector('.message-content-container')
|
||||||
if (messageContentContainer) {
|
if (messageContentContainer) {
|
||||||
const headingElement = messageContentContainer.querySelector(`#${id}`)
|
const headingElement = messageContentContainer.querySelector<HTMLElement>(`#${id}`)
|
||||||
if (headingElement) {
|
if (headingElement) {
|
||||||
const scrollBlock = ['horizontal', 'grid'].includes(message.multiModelMessageStyle ?? '') ? 'nearest' : 'start'
|
const scrollBlock = ['horizontal', 'grid'].includes(message.multiModelMessageStyle ?? '') ? 'nearest' : 'start'
|
||||||
headingElement.scrollIntoView({ behavior: 'smooth', block: scrollBlock })
|
scrollIntoView(headingElement, { behavior: 'smooth', block: scrollBlock, container: 'nearest' })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -76,7 +76,7 @@ export function BashOutputTool({
|
|||||||
input,
|
input,
|
||||||
output
|
output
|
||||||
}: {
|
}: {
|
||||||
input: BashOutputToolInput
|
input?: BashOutputToolInput
|
||||||
output?: BashOutputToolOutput
|
output?: BashOutputToolOutput
|
||||||
}): NonNullable<CollapseProps['items']>[number] {
|
}): NonNullable<CollapseProps['items']>[number] {
|
||||||
const parsedOutput = parseBashOutput(output)
|
const parsedOutput = parseBashOutput(output)
|
||||||
@@ -144,7 +144,7 @@ export function BashOutputTool({
|
|||||||
label="Bash Output"
|
label="Bash Output"
|
||||||
params={
|
params={
|
||||||
<div className="flex items-center gap-2">
|
<div className="flex items-center gap-2">
|
||||||
<Tag className="py-0 font-mono text-xs">{input.bash_id}</Tag>
|
<Tag className="py-0 font-mono text-xs">{input?.bash_id}</Tag>
|
||||||
{statusConfig && (
|
{statusConfig && (
|
||||||
<Tag
|
<Tag
|
||||||
color={statusConfig.color}
|
color={statusConfig.color}
|
||||||
|
|||||||
@@ -5,24 +5,20 @@ import { Terminal } from 'lucide-react'
|
|||||||
import { ToolTitle } from './GenericTools'
|
import { ToolTitle } from './GenericTools'
|
||||||
import type { BashToolInput as BashToolInputType, BashToolOutput as BashToolOutputType } from './types'
|
import type { BashToolInput as BashToolInputType, BashToolOutput as BashToolOutputType } from './types'
|
||||||
|
|
||||||
const MAX_TAG_LENGTH = 100
|
|
||||||
|
|
||||||
export function BashTool({
|
export function BashTool({
|
||||||
input,
|
input,
|
||||||
output
|
output
|
||||||
}: {
|
}: {
|
||||||
input: BashToolInputType
|
input?: BashToolInputType
|
||||||
output?: BashToolOutputType
|
output?: BashToolOutputType
|
||||||
}): NonNullable<CollapseProps['items']>[number] {
|
}): NonNullable<CollapseProps['items']>[number] {
|
||||||
// 如果有输出,计算输出行数
|
// 如果有输出,计算输出行数
|
||||||
const outputLines = output ? output.split('\n').length : 0
|
const outputLines = output ? output.split('\n').length : 0
|
||||||
|
|
||||||
// 处理命令字符串的截断
|
// 处理命令字符串,添加空值检查
|
||||||
const command = input.command
|
const command = input?.command ?? ''
|
||||||
const needsTruncate = command.length > MAX_TAG_LENGTH
|
|
||||||
const displayCommand = needsTruncate ? `${command.slice(0, MAX_TAG_LENGTH)}...` : command
|
|
||||||
|
|
||||||
const tagContent = <Tag className="whitespace-pre-wrap break-all font-mono">{displayCommand}</Tag>
|
const tagContent = <Tag className="!m-0 max-w-full truncate font-mono">{command}</Tag>
|
||||||
|
|
||||||
return {
|
return {
|
||||||
key: 'tool',
|
key: 'tool',
|
||||||
@@ -31,19 +27,15 @@ export function BashTool({
|
|||||||
<ToolTitle
|
<ToolTitle
|
||||||
icon={<Terminal className="h-4 w-4" />}
|
icon={<Terminal className="h-4 w-4" />}
|
||||||
label="Bash"
|
label="Bash"
|
||||||
params={input.description}
|
params={input?.description}
|
||||||
stats={output ? `${outputLines} ${outputLines === 1 ? 'line' : 'lines'}` : undefined}
|
stats={output ? `${outputLines} ${outputLines === 1 ? 'line' : 'lines'}` : undefined}
|
||||||
/>
|
/>
|
||||||
<div className="mt-1">
|
<div className="mt-1 max-w-full">
|
||||||
{needsTruncate ? (
|
<Popover
|
||||||
<Popover
|
content={<div className="max-w-xl whitespace-pre-wrap break-all font-mono text-xs">{command}</div>}
|
||||||
content={<div className="max-w-xl whitespace-pre-wrap break-all font-mono">{command}</div>}
|
trigger="hover">
|
||||||
trigger="hover">
|
{tagContent}
|
||||||
{tagContent}
|
</Popover>
|
||||||
</Popover>
|
|
||||||
) : (
|
|
||||||
tagContent
|
|
||||||
)}
|
|
||||||
</div>
|
</div>
|
||||||
</>
|
</>
|
||||||
),
|
),
|
||||||
|
|||||||
@@ -32,19 +32,19 @@ export function EditTool({
|
|||||||
input,
|
input,
|
||||||
output
|
output
|
||||||
}: {
|
}: {
|
||||||
input: EditToolInput
|
input?: EditToolInput
|
||||||
output?: EditToolOutput
|
output?: EditToolOutput
|
||||||
}): NonNullable<CollapseProps['items']>[number] {
|
}): NonNullable<CollapseProps['items']>[number] {
|
||||||
return {
|
return {
|
||||||
key: AgentToolsType.Edit,
|
key: AgentToolsType.Edit,
|
||||||
label: <ToolTitle icon={<FileEdit className="h-4 w-4" />} label="Edit" params={input.file_path} />,
|
label: <ToolTitle icon={<FileEdit className="h-4 w-4" />} label="Edit" params={input?.file_path} />,
|
||||||
children: (
|
children: (
|
||||||
<>
|
<>
|
||||||
{/* Diff View */}
|
{/* Diff View */}
|
||||||
{/* Old Content */}
|
{/* Old Content */}
|
||||||
{renderCodeBlock(input.old_string, 'old')}
|
{renderCodeBlock(input?.old_string ?? '', 'old')}
|
||||||
{/* New Content */}
|
{/* New Content */}
|
||||||
{renderCodeBlock(input.new_string, 'new')}
|
{renderCodeBlock(input?.new_string ?? '', 'new')}
|
||||||
{/* Output */}
|
{/* Output */}
|
||||||
{output}
|
{output}
|
||||||
</>
|
</>
|
||||||
|
|||||||
@@ -10,18 +10,19 @@ export function ExitPlanModeTool({
|
|||||||
input,
|
input,
|
||||||
output
|
output
|
||||||
}: {
|
}: {
|
||||||
input: ExitPlanModeToolInput
|
input?: ExitPlanModeToolInput
|
||||||
output?: ExitPlanModeToolOutput
|
output?: ExitPlanModeToolOutput
|
||||||
}): NonNullable<CollapseProps['items']>[number] {
|
}): NonNullable<CollapseProps['items']>[number] {
|
||||||
|
const plan = input?.plan ?? ''
|
||||||
return {
|
return {
|
||||||
key: AgentToolsType.ExitPlanMode,
|
key: AgentToolsType.ExitPlanMode,
|
||||||
label: (
|
label: (
|
||||||
<ToolTitle
|
<ToolTitle
|
||||||
icon={<DoorOpen className="h-4 w-4" />}
|
icon={<DoorOpen className="h-4 w-4" />}
|
||||||
label="ExitPlanMode"
|
label="ExitPlanMode"
|
||||||
stats={`${input.plan.split('\n\n').length} plans`}
|
stats={`${plan.split('\n\n').length} plans`}
|
||||||
/>
|
/>
|
||||||
),
|
),
|
||||||
children: <ReactMarkdown>{input.plan + '\n\n' + (output ?? '')}</ReactMarkdown>
|
children: <ReactMarkdown>{plan + '\n\n' + (output ?? '')}</ReactMarkdown>
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,9 +18,9 @@ export function ToolTitle({
|
|||||||
}) {
|
}) {
|
||||||
return (
|
return (
|
||||||
<div className={`flex items-center gap-1 ${className}`}>
|
<div className={`flex items-center gap-1 ${className}`}>
|
||||||
{icon}
|
{icon && <span className="flex flex-shrink-0">{icon}</span>}
|
||||||
{label && <span className="font-medium text-sm">{label}</span>}
|
{label && <span className="flex-shrink-0 font-medium text-sm">{label}</span>}
|
||||||
{params && <span className="flex-shrink-0 text-muted-foreground text-xs">{params}</span>}
|
{params && <span className="min-w-0 truncate text-muted-foreground text-xs">{params}</span>}
|
||||||
{stats && <span className="flex-shrink-0 text-muted-foreground text-xs">{stats}</span>}
|
{stats && <span className="flex-shrink-0 text-muted-foreground text-xs">{stats}</span>}
|
||||||
</div>
|
</div>
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ export function GlobTool({
|
|||||||
input,
|
input,
|
||||||
output
|
output
|
||||||
}: {
|
}: {
|
||||||
input: GlobToolInputType
|
input?: GlobToolInputType
|
||||||
output?: GlobToolOutputType
|
output?: GlobToolOutputType
|
||||||
}): NonNullable<CollapseProps['items']>[number] {
|
}): NonNullable<CollapseProps['items']>[number] {
|
||||||
// 如果有输出,计算文件数量
|
// 如果有输出,计算文件数量
|
||||||
@@ -20,7 +20,7 @@ export function GlobTool({
|
|||||||
<ToolTitle
|
<ToolTitle
|
||||||
icon={<FolderSearch className="h-4 w-4" />}
|
icon={<FolderSearch className="h-4 w-4" />}
|
||||||
label="Glob"
|
label="Glob"
|
||||||
params={input.pattern}
|
params={input?.pattern}
|
||||||
stats={output ? `${lineCount} ${lineCount === 1 ? 'file' : 'files'}` : undefined}
|
stats={output ? `${lineCount} ${lineCount === 1 ? 'file' : 'files'}` : undefined}
|
||||||
/>
|
/>
|
||||||
),
|
),
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ export function GrepTool({
|
|||||||
input,
|
input,
|
||||||
output
|
output
|
||||||
}: {
|
}: {
|
||||||
input: GrepToolInput
|
input?: GrepToolInput
|
||||||
output?: GrepToolOutput
|
output?: GrepToolOutput
|
||||||
}): NonNullable<CollapseProps['items']>[number] {
|
}): NonNullable<CollapseProps['items']>[number] {
|
||||||
// 如果有输出,计算结果行数
|
// 如果有输出,计算结果行数
|
||||||
@@ -22,8 +22,8 @@ export function GrepTool({
|
|||||||
label="Grep"
|
label="Grep"
|
||||||
params={
|
params={
|
||||||
<>
|
<>
|
||||||
{input.pattern}
|
{input?.pattern}
|
||||||
{input.output_mode && <span className="ml-1">({input.output_mode})</span>}
|
{input?.output_mode && <span className="ml-1">({input.output_mode})</span>}
|
||||||
</>
|
</>
|
||||||
}
|
}
|
||||||
stats={output ? `${resultLines} ${resultLines === 1 ? 'line' : 'lines'}` : undefined}
|
stats={output ? `${resultLines} ${resultLines === 1 ? 'line' : 'lines'}` : undefined}
|
||||||
|
|||||||
@@ -9,18 +9,19 @@ import { AgentToolsType } from './types'
|
|||||||
export function MultiEditTool({
|
export function MultiEditTool({
|
||||||
input
|
input
|
||||||
}: {
|
}: {
|
||||||
input: MultiEditToolInput
|
input?: MultiEditToolInput
|
||||||
output?: MultiEditToolOutput
|
output?: MultiEditToolOutput
|
||||||
}): NonNullable<CollapseProps['items']>[number] {
|
}): NonNullable<CollapseProps['items']>[number] {
|
||||||
|
const edits = Array.isArray(input?.edits) ? input.edits : []
|
||||||
return {
|
return {
|
||||||
key: AgentToolsType.MultiEdit,
|
key: AgentToolsType.MultiEdit,
|
||||||
label: <ToolTitle icon={<FileText className="h-4 w-4" />} label="MultiEdit" params={input.file_path} />,
|
label: <ToolTitle icon={<FileText className="h-4 w-4" />} label="MultiEdit" params={input?.file_path} />,
|
||||||
children: (
|
children: (
|
||||||
<div>
|
<div>
|
||||||
{input.edits.map((edit, index) => (
|
{edits.map((edit, index) => (
|
||||||
<div key={index}>
|
<div key={index}>
|
||||||
{renderCodeBlock(edit.old_string, 'old')}
|
{renderCodeBlock(edit.old_string ?? '', 'old')}
|
||||||
{renderCodeBlock(edit.new_string, 'new')}
|
{renderCodeBlock(edit.new_string ?? '', 'new')}
|
||||||
</div>
|
</div>
|
||||||
))}
|
))}
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ export function NotebookEditTool({
|
|||||||
input,
|
input,
|
||||||
output
|
output
|
||||||
}: {
|
}: {
|
||||||
input: NotebookEditToolInput
|
input?: NotebookEditToolInput
|
||||||
output?: NotebookEditToolOutput
|
output?: NotebookEditToolOutput
|
||||||
}): NonNullable<CollapseProps['items']>[number] {
|
}): NonNullable<CollapseProps['items']>[number] {
|
||||||
return {
|
return {
|
||||||
@@ -20,10 +20,10 @@ export function NotebookEditTool({
|
|||||||
<>
|
<>
|
||||||
<ToolTitle icon={<FileText className="h-4 w-4" />} label="NotebookEdit" />
|
<ToolTitle icon={<FileText className="h-4 w-4" />} label="NotebookEdit" />
|
||||||
<Tag className="mt-1" color="blue">
|
<Tag className="mt-1" color="blue">
|
||||||
{input.notebook_path}{' '}
|
{input?.notebook_path}{' '}
|
||||||
</Tag>
|
</Tag>
|
||||||
</>
|
</>
|
||||||
),
|
),
|
||||||
children: <ReactMarkdown>{output}</ReactMarkdown>
|
children: <ReactMarkdown>{output ?? ''}</ReactMarkdown>
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -46,7 +46,7 @@ export function ReadTool({
|
|||||||
input,
|
input,
|
||||||
output
|
output
|
||||||
}: {
|
}: {
|
||||||
input: ReadToolInputType
|
input?: ReadToolInputType
|
||||||
output?: ReadToolOutputType
|
output?: ReadToolOutputType
|
||||||
}): NonNullable<CollapseProps['items']>[number] {
|
}): NonNullable<CollapseProps['items']>[number] {
|
||||||
const outputString = normalizeOutputString(output)
|
const outputString = normalizeOutputString(output)
|
||||||
@@ -58,7 +58,7 @@ export function ReadTool({
|
|||||||
<ToolTitle
|
<ToolTitle
|
||||||
icon={<FileText className="h-4 w-4" />}
|
icon={<FileText className="h-4 w-4" />}
|
||||||
label="Read File"
|
label="Read File"
|
||||||
params={input.file_path.split('/').pop()}
|
params={input?.file_path?.split('/').pop()}
|
||||||
stats={stats ? `${stats.lineCount} lines, ${stats.formatSize(stats.fileSize)}` : undefined}
|
stats={stats ? `${stats.lineCount} lines, ${stats.formatSize(stats.fileSize)}` : undefined}
|
||||||
/>
|
/>
|
||||||
),
|
),
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ export function SearchTool({
|
|||||||
input,
|
input,
|
||||||
output
|
output
|
||||||
}: {
|
}: {
|
||||||
input: SearchToolInputType
|
input?: SearchToolInputType
|
||||||
output?: SearchToolOutputType
|
output?: SearchToolOutputType
|
||||||
}): NonNullable<CollapseProps['items']>[number] {
|
}): NonNullable<CollapseProps['items']>[number] {
|
||||||
// 如果有输出,计算结果数量
|
// 如果有输出,计算结果数量
|
||||||
@@ -20,13 +20,13 @@ export function SearchTool({
|
|||||||
<ToolTitle
|
<ToolTitle
|
||||||
icon={<Search className="h-4 w-4" />}
|
icon={<Search className="h-4 w-4" />}
|
||||||
label="Search"
|
label="Search"
|
||||||
params={`"${input}"`}
|
params={input ? `"${input}"` : undefined}
|
||||||
stats={output ? `${resultCount} ${resultCount === 1 ? 'result' : 'results'}` : undefined}
|
stats={output ? `${resultCount} ${resultCount === 1 ? 'result' : 'results'}` : undefined}
|
||||||
/>
|
/>
|
||||||
),
|
),
|
||||||
children: (
|
children: (
|
||||||
<div>
|
<div>
|
||||||
<StringInputTool input={input} label="Search Query" />
|
{input && <StringInputTool input={input} label="Search Query" />}
|
||||||
{output && (
|
{output && (
|
||||||
<div>
|
<div>
|
||||||
<StringOutputTool output={output} label="Search Results" textColor="text-yellow-600 dark:text-yellow-400" />
|
<StringOutputTool output={output} label="Search Results" textColor="text-yellow-600 dark:text-yellow-400" />
|
||||||
|
|||||||
@@ -8,12 +8,12 @@ export function SkillTool({
|
|||||||
input,
|
input,
|
||||||
output
|
output
|
||||||
}: {
|
}: {
|
||||||
input: SkillToolInput
|
input?: SkillToolInput
|
||||||
output?: SkillToolOutput
|
output?: SkillToolOutput
|
||||||
}): NonNullable<CollapseProps['items']>[number] {
|
}): NonNullable<CollapseProps['items']>[number] {
|
||||||
return {
|
return {
|
||||||
key: 'tool',
|
key: 'tool',
|
||||||
label: <ToolTitle icon={<PencilRuler className="h-4 w-4" />} label="Skill" params={input.command} />,
|
label: <ToolTitle icon={<PencilRuler className="h-4 w-4" />} label="Skill" params={input?.command} />,
|
||||||
children: <div>{output}</div>
|
children: <div>{output}</div>
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,19 +9,20 @@ export function TaskTool({
|
|||||||
input,
|
input,
|
||||||
output
|
output
|
||||||
}: {
|
}: {
|
||||||
input: TaskToolInputType
|
input?: TaskToolInputType
|
||||||
output?: TaskToolOutputType
|
output?: TaskToolOutputType
|
||||||
}): NonNullable<CollapseProps['items']>[number] {
|
}): NonNullable<CollapseProps['items']>[number] {
|
||||||
return {
|
return {
|
||||||
key: 'tool',
|
key: 'tool',
|
||||||
label: <ToolTitle icon={<Bot className="h-4 w-4" />} label="Task" params={input.description} />,
|
label: <ToolTitle icon={<Bot className="h-4 w-4" />} label="Task" params={input?.description} />,
|
||||||
children: (
|
children: (
|
||||||
<div>
|
<div>
|
||||||
{output?.map((item) => (
|
{Array.isArray(output) &&
|
||||||
<div key={item.type}>
|
output.map((item) => (
|
||||||
<div>{item.type === 'text' ? <Markdown>{item.text}</Markdown> : item.text}</div>
|
<div key={item.type}>
|
||||||
</div>
|
<div>{item.type === 'text' ? <Markdown>{item.text}</Markdown> : item.text}</div>
|
||||||
))}
|
</div>
|
||||||
|
))}
|
||||||
</div>
|
</div>
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -38,9 +38,10 @@ const getStatusConfig = (status: TodoItem['status']) => {
|
|||||||
export function TodoWriteTool({
|
export function TodoWriteTool({
|
||||||
input
|
input
|
||||||
}: {
|
}: {
|
||||||
input: TodoWriteToolInputType
|
input?: TodoWriteToolInputType
|
||||||
}): NonNullable<CollapseProps['items']>[number] {
|
}): NonNullable<CollapseProps['items']>[number] {
|
||||||
const doneCount = input.todos.filter((todo) => todo.status === 'completed').length
|
const todos = Array.isArray(input?.todos) ? input.todos : []
|
||||||
|
const doneCount = todos.filter((todo) => todo.status === 'completed').length
|
||||||
|
|
||||||
return {
|
return {
|
||||||
key: AgentToolsType.TodoWrite,
|
key: AgentToolsType.TodoWrite,
|
||||||
@@ -49,12 +50,12 @@ export function TodoWriteTool({
|
|||||||
icon={<ListTodo className="h-4 w-4" />}
|
icon={<ListTodo className="h-4 w-4" />}
|
||||||
label="Todo Write"
|
label="Todo Write"
|
||||||
params={`${doneCount} Done`}
|
params={`${doneCount} Done`}
|
||||||
stats={`${input.todos.length} ${input.todos.length === 1 ? 'item' : 'items'}`}
|
stats={`${todos.length} ${todos.length === 1 ? 'item' : 'items'}`}
|
||||||
/>
|
/>
|
||||||
),
|
),
|
||||||
children: (
|
children: (
|
||||||
<div className="space-y-3">
|
<div className="space-y-3">
|
||||||
{input.todos.map((todo, index) => {
|
{todos.map((todo, index) => {
|
||||||
const statusConfig = getStatusConfig(todo.status)
|
const statusConfig = getStatusConfig(todo.status)
|
||||||
return (
|
return (
|
||||||
<div key={index}>
|
<div key={index}>
|
||||||
|
|||||||
@@ -8,12 +8,12 @@ export function WebFetchTool({
|
|||||||
input,
|
input,
|
||||||
output
|
output
|
||||||
}: {
|
}: {
|
||||||
input: WebFetchToolInput
|
input?: WebFetchToolInput
|
||||||
output?: WebFetchToolOutput
|
output?: WebFetchToolOutput
|
||||||
}): NonNullable<CollapseProps['items']>[number] {
|
}): NonNullable<CollapseProps['items']>[number] {
|
||||||
return {
|
return {
|
||||||
key: 'tool',
|
key: 'tool',
|
||||||
label: <ToolTitle icon={<Globe className="h-4 w-4" />} label="Web Fetch" params={input.url} />,
|
label: <ToolTitle icon={<Globe className="h-4 w-4" />} label="Web Fetch" params={input?.url} />,
|
||||||
children: <div>{output}</div>
|
children: <div>{output}</div>
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ export function WebSearchTool({
|
|||||||
input,
|
input,
|
||||||
output
|
output
|
||||||
}: {
|
}: {
|
||||||
input: WebSearchToolInput
|
input?: WebSearchToolInput
|
||||||
output?: WebSearchToolOutput
|
output?: WebSearchToolOutput
|
||||||
}): NonNullable<CollapseProps['items']>[number] {
|
}): NonNullable<CollapseProps['items']>[number] {
|
||||||
// 如果有输出,计算结果数量
|
// 如果有输出,计算结果数量
|
||||||
@@ -20,7 +20,7 @@ export function WebSearchTool({
|
|||||||
<ToolTitle
|
<ToolTitle
|
||||||
icon={<Globe className="h-4 w-4" />}
|
icon={<Globe className="h-4 w-4" />}
|
||||||
label="Web Search"
|
label="Web Search"
|
||||||
params={input.query}
|
params={input?.query}
|
||||||
stats={output ? `${resultCount} ${resultCount === 1 ? 'result' : 'results'}` : undefined}
|
stats={output ? `${resultCount} ${resultCount === 1 ? 'result' : 'results'}` : undefined}
|
||||||
/>
|
/>
|
||||||
),
|
),
|
||||||
|
|||||||
@@ -7,12 +7,12 @@ import type { WriteToolInput, WriteToolOutput } from './types'
|
|||||||
export function WriteTool({
|
export function WriteTool({
|
||||||
input
|
input
|
||||||
}: {
|
}: {
|
||||||
input: WriteToolInput
|
input?: WriteToolInput
|
||||||
output?: WriteToolOutput
|
output?: WriteToolOutput
|
||||||
}): NonNullable<CollapseProps['items']>[number] {
|
}): NonNullable<CollapseProps['items']>[number] {
|
||||||
return {
|
return {
|
||||||
key: 'tool',
|
key: 'tool',
|
||||||
label: <ToolTitle icon={<FileText className="h-4 w-4" />} label="Write" params={input.file_path} />,
|
label: <ToolTitle icon={<FileText className="h-4 w-4" />} label="Write" params={input?.file_path} />,
|
||||||
children: <div>{input.content}</div>
|
children: <div>{input?.content}</div>
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,10 @@
|
|||||||
import { loggerService } from '@logger'
|
import { loggerService } from '@logger'
|
||||||
|
import { useAppSelector } from '@renderer/store'
|
||||||
|
import { selectPendingPermission } from '@renderer/store/toolPermissions'
|
||||||
import type { NormalToolResponse } from '@renderer/types'
|
import type { NormalToolResponse } from '@renderer/types'
|
||||||
import type { CollapseProps } from 'antd'
|
import type { CollapseProps } from 'antd'
|
||||||
import { Collapse } from 'antd'
|
import { Collapse, Spin } from 'antd'
|
||||||
|
import { useTranslation } from 'react-i18next'
|
||||||
|
|
||||||
// 导出所有类型
|
// 导出所有类型
|
||||||
export * from './types'
|
export * from './types'
|
||||||
@@ -83,17 +86,41 @@ function ToolContent({ toolName, input, output }: { toolName: AgentToolsType; in
|
|||||||
// 统一的组件渲染入口
|
// 统一的组件渲染入口
|
||||||
export function MessageAgentTools({ toolResponse }: { toolResponse: NormalToolResponse }) {
|
export function MessageAgentTools({ toolResponse }: { toolResponse: NormalToolResponse }) {
|
||||||
const { arguments: args, response, tool, status } = toolResponse
|
const { arguments: args, response, tool, status } = toolResponse
|
||||||
logger.info('Rendering agent tool response', {
|
logger.debug('Rendering agent tool response', {
|
||||||
tool: tool,
|
tool: tool,
|
||||||
arguments: args,
|
arguments: args,
|
||||||
|
status,
|
||||||
response
|
response
|
||||||
})
|
})
|
||||||
|
|
||||||
|
const pendingPermission = useAppSelector((state) =>
|
||||||
|
selectPendingPermission(state.toolPermissions, toolResponse.toolCallId)
|
||||||
|
)
|
||||||
|
|
||||||
if (status === 'pending') {
|
if (status === 'pending') {
|
||||||
return <ToolPermissionRequestCard toolResponse={toolResponse} />
|
if (pendingPermission) {
|
||||||
|
return <ToolPermissionRequestCard toolResponse={toolResponse} />
|
||||||
|
}
|
||||||
|
return <ToolPendingIndicator toolName={tool?.name} description={tool?.description} />
|
||||||
}
|
}
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<ToolContent toolName={tool.name as AgentToolsType} input={args as ToolInput} output={response as ToolOutput} />
|
<ToolContent toolName={tool.name as AgentToolsType} input={args as ToolInput} output={response as ToolOutput} />
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function ToolPendingIndicator({ toolName, description }: { toolName?: string; description?: string }) {
|
||||||
|
const { t } = useTranslation()
|
||||||
|
const label = toolName || t('agent.toolPermission.toolPendingFallback', 'Tool')
|
||||||
|
const detail = description?.trim() || t('agent.toolPermission.executing')
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div className="flex w-full max-w-xl items-center gap-3 rounded-xl border border-default-200 bg-default-100 px-4 py-3 shadow-sm">
|
||||||
|
<Spin size="small" />
|
||||||
|
<div className="flex flex-col gap-1">
|
||||||
|
<span className="font-semibold text-default-700 text-sm">{label}</span>
|
||||||
|
<span className="text-default-500 text-xs">{detail}</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ import {
|
|||||||
DEFAULT_TEMPERATURE,
|
DEFAULT_TEMPERATURE,
|
||||||
MAX_CONTEXT_COUNT
|
MAX_CONTEXT_COUNT
|
||||||
} from '@renderer/config/constant'
|
} from '@renderer/config/constant'
|
||||||
import { isOpenAIModel } from '@renderer/config/models'
|
import { isOpenAIModel, isSupportVerbosityModel } from '@renderer/config/models'
|
||||||
import { UNKNOWN } from '@renderer/config/translate'
|
import { UNKNOWN } from '@renderer/config/translate'
|
||||||
import { useCodeStyle } from '@renderer/context/CodeStyleProvider'
|
import { useCodeStyle } from '@renderer/context/CodeStyleProvider'
|
||||||
import { useTheme } from '@renderer/context/ThemeProvider'
|
import { useTheme } from '@renderer/context/ThemeProvider'
|
||||||
@@ -56,7 +56,7 @@ import type { Assistant, AssistantSettings, CodeStyleVarious, MathEngine } from
|
|||||||
import { isGroqSystemProvider, ThemeMode } from '@renderer/types'
|
import { isGroqSystemProvider, ThemeMode } from '@renderer/types'
|
||||||
import { modalConfirm } from '@renderer/utils'
|
import { modalConfirm } from '@renderer/utils'
|
||||||
import { getSendMessageShortcutLabel } from '@renderer/utils/input'
|
import { getSendMessageShortcutLabel } from '@renderer/utils/input'
|
||||||
import { isSupportServiceTierProvider } from '@renderer/utils/provider'
|
import { isSupportServiceTierProvider, isSupportVerbosityProvider } from '@renderer/utils/provider'
|
||||||
import { Button, Col, InputNumber, Row, Slider, Switch } from 'antd'
|
import { Button, Col, InputNumber, Row, Slider, Switch } from 'antd'
|
||||||
import { Settings2 } from 'lucide-react'
|
import { Settings2 } from 'lucide-react'
|
||||||
import type { FC } from 'react'
|
import type { FC } from 'react'
|
||||||
@@ -183,7 +183,10 @@ const SettingsTab: FC<Props> = (props) => {
|
|||||||
|
|
||||||
const model = assistant.model || getDefaultModel()
|
const model = assistant.model || getDefaultModel()
|
||||||
|
|
||||||
const showOpenAiSettings = isOpenAIModel(model) || isSupportServiceTierProvider(provider)
|
const showOpenAiSettings =
|
||||||
|
isOpenAIModel(model) ||
|
||||||
|
isSupportServiceTierProvider(provider) ||
|
||||||
|
(isSupportVerbosityModel(model) && isSupportVerbosityProvider(provider))
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<Container className="settings-tab">
|
<Container className="settings-tab">
|
||||||
|
|||||||
@@ -404,11 +404,11 @@ const UpdateNotesWrapper = styled.div`
|
|||||||
margin: 8px 0;
|
margin: 8px 0;
|
||||||
background-color: var(--color-bg-2);
|
background-color: var(--color-bg-2);
|
||||||
border-radius: 6px;
|
border-radius: 6px;
|
||||||
|
color: var(--color-text-2);
|
||||||
|
font-size: 14px;
|
||||||
|
|
||||||
p {
|
p {
|
||||||
margin: 0;
|
margin: 0;
|
||||||
color: var(--color-text-2);
|
|
||||||
font-size: 14px;
|
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
||||||
|
|||||||
@@ -135,12 +135,18 @@ const AssistantModelSettings: FC<Props> = ({ assistant, updateAssistant, updateA
|
|||||||
<Input
|
<Input
|
||||||
value={typeof param.value === 'string' ? param.value : JSON.stringify(param.value, null, 2)}
|
value={typeof param.value === 'string' ? param.value : JSON.stringify(param.value, null, 2)}
|
||||||
onChange={(e) => {
|
onChange={(e) => {
|
||||||
try {
|
// For JSON type parameters, always store the value as a STRING
|
||||||
const jsonValue = JSON.parse(e.target.value)
|
//
|
||||||
onUpdateCustomParameter(index, 'value', jsonValue)
|
// Data Flow:
|
||||||
} catch {
|
// 1. UI stores: { name: "config", value: '{"key":"value"}', type: "json" } ← STRING format
|
||||||
onUpdateCustomParameter(index, 'value', e.target.value)
|
// 2. API parses: getCustomParameters() in src/renderer/src/aiCore/utils/reasoning.ts:687-696
|
||||||
}
|
// calls JSON.parse() to convert string to object
|
||||||
|
// 3. Request sends: The parsed object is sent to the AI provider
|
||||||
|
//
|
||||||
|
// Previously this code was parsing JSON here and storing
|
||||||
|
// the object directly, which caused getCustomParameters() to fail when trying
|
||||||
|
// to JSON.parse() an already-parsed object.
|
||||||
|
onUpdateCustomParameter(index, 'value', e.target.value)
|
||||||
}}
|
}}
|
||||||
/>
|
/>
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,8 +1,10 @@
|
|||||||
|
import { adaptProvider } from '@renderer/aiCore/provider/providerConfig'
|
||||||
import OpenAIAlert from '@renderer/components/Alert/OpenAIAlert'
|
import OpenAIAlert from '@renderer/components/Alert/OpenAIAlert'
|
||||||
import { LoadingIcon } from '@renderer/components/Icons'
|
import { LoadingIcon } from '@renderer/components/Icons'
|
||||||
import { HStack } from '@renderer/components/Layout'
|
import { HStack } from '@renderer/components/Layout'
|
||||||
import { ApiKeyListPopup } from '@renderer/components/Popups/ApiKeyListPopup'
|
import { ApiKeyListPopup } from '@renderer/components/Popups/ApiKeyListPopup'
|
||||||
import Selector from '@renderer/components/Selector'
|
import Selector from '@renderer/components/Selector'
|
||||||
|
import { HelpTooltip } from '@renderer/components/TooltipIcons'
|
||||||
import { isEmbeddingModel, isRerankModel } from '@renderer/config/models'
|
import { isEmbeddingModel, isRerankModel } from '@renderer/config/models'
|
||||||
import { PROVIDER_URLS } from '@renderer/config/providers'
|
import { PROVIDER_URLS } from '@renderer/config/providers'
|
||||||
import { useTheme } from '@renderer/context/ThemeProvider'
|
import { useTheme } from '@renderer/context/ThemeProvider'
|
||||||
@@ -19,14 +21,7 @@ import type { SystemProviderId } from '@renderer/types'
|
|||||||
import { isSystemProvider, isSystemProviderId, SystemProviderIds } from '@renderer/types'
|
import { isSystemProvider, isSystemProviderId, SystemProviderIds } from '@renderer/types'
|
||||||
import type { ApiKeyConnectivity } from '@renderer/types/healthCheck'
|
import type { ApiKeyConnectivity } from '@renderer/types/healthCheck'
|
||||||
import { HealthStatus } from '@renderer/types/healthCheck'
|
import { HealthStatus } from '@renderer/types/healthCheck'
|
||||||
import {
|
import { formatApiHost, formatApiKeys, getFancyProviderName, validateApiHost } from '@renderer/utils'
|
||||||
formatApiHost,
|
|
||||||
formatApiKeys,
|
|
||||||
formatAzureOpenAIApiHost,
|
|
||||||
formatVertexApiHost,
|
|
||||||
getFancyProviderName,
|
|
||||||
validateApiHost
|
|
||||||
} from '@renderer/utils'
|
|
||||||
import { formatErrorMessage } from '@renderer/utils/error'
|
import { formatErrorMessage } from '@renderer/utils/error'
|
||||||
import {
|
import {
|
||||||
isAIGatewayProvider,
|
isAIGatewayProvider,
|
||||||
@@ -36,7 +31,6 @@ import {
|
|||||||
isNewApiProvider,
|
isNewApiProvider,
|
||||||
isOpenAICompatibleProvider,
|
isOpenAICompatibleProvider,
|
||||||
isOpenAIProvider,
|
isOpenAIProvider,
|
||||||
isSupportAPIVersionProvider,
|
|
||||||
isVertexProvider
|
isVertexProvider
|
||||||
} from '@renderer/utils/provider'
|
} from '@renderer/utils/provider'
|
||||||
import { Button, Divider, Flex, Input, Select, Space, Switch, Tooltip } from 'antd'
|
import { Button, Divider, Flex, Input, Select, Space, Switch, Tooltip } from 'antd'
|
||||||
@@ -281,12 +275,10 @@ const ProviderSetting: FC<Props> = ({ providerId }) => {
|
|||||||
}, [configuredApiHost, apiHost])
|
}, [configuredApiHost, apiHost])
|
||||||
|
|
||||||
const hostPreview = () => {
|
const hostPreview = () => {
|
||||||
if (apiHost.endsWith('#')) {
|
const formattedApiHost = adaptProvider({ provider: { ...provider, apiHost } }).apiHost
|
||||||
return apiHost.replace('#', '')
|
|
||||||
}
|
|
||||||
|
|
||||||
if (isOpenAICompatibleProvider(provider)) {
|
if (isOpenAICompatibleProvider(provider)) {
|
||||||
return formatApiHost(apiHost, isSupportAPIVersionProvider(provider)) + '/chat/completions'
|
return formattedApiHost + '/chat/completions'
|
||||||
}
|
}
|
||||||
|
|
||||||
if (isAzureOpenAIProvider(provider)) {
|
if (isAzureOpenAIProvider(provider)) {
|
||||||
@@ -294,29 +286,26 @@ const ProviderSetting: FC<Props> = ({ providerId }) => {
|
|||||||
const path = !['preview', 'v1'].includes(apiVersion)
|
const path = !['preview', 'v1'].includes(apiVersion)
|
||||||
? `/v1/chat/completion?apiVersion=v1`
|
? `/v1/chat/completion?apiVersion=v1`
|
||||||
: `/v1/responses?apiVersion=v1`
|
: `/v1/responses?apiVersion=v1`
|
||||||
return formatAzureOpenAIApiHost(apiHost) + path
|
return formattedApiHost + path
|
||||||
}
|
}
|
||||||
|
|
||||||
if (isAnthropicProvider(provider)) {
|
if (isAnthropicProvider(provider)) {
|
||||||
// AI SDK uses the baseURL with /v1, then appends /messages
|
return formattedApiHost + '/messages'
|
||||||
// formatApiHost adds /v1 automatically if not present
|
|
||||||
const normalizedHost = formatApiHost(apiHost)
|
|
||||||
return normalizedHost + '/messages'
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (isGeminiProvider(provider)) {
|
if (isGeminiProvider(provider)) {
|
||||||
return formatApiHost(apiHost, true, 'v1beta') + '/models'
|
return formattedApiHost + '/models'
|
||||||
}
|
}
|
||||||
if (isOpenAIProvider(provider)) {
|
if (isOpenAIProvider(provider)) {
|
||||||
return formatApiHost(apiHost) + '/responses'
|
return formattedApiHost + '/responses'
|
||||||
}
|
}
|
||||||
if (isVertexProvider(provider)) {
|
if (isVertexProvider(provider)) {
|
||||||
return formatVertexApiHost(provider) + '/publishers/google'
|
return formattedApiHost + '/publishers/google'
|
||||||
}
|
}
|
||||||
if (isAIGatewayProvider(provider)) {
|
if (isAIGatewayProvider(provider)) {
|
||||||
return formatApiHost(apiHost) + '/language-model'
|
return formattedApiHost + '/language-model'
|
||||||
}
|
}
|
||||||
return formatApiHost(apiHost)
|
return formattedApiHost
|
||||||
}
|
}
|
||||||
|
|
||||||
// API key 连通性检查状态指示器,目前仅在失败时显示
|
// API key 连通性检查状态指示器,目前仅在失败时显示
|
||||||
@@ -494,16 +483,21 @@ const ProviderSetting: FC<Props> = ({ providerId }) => {
|
|||||||
{!isDmxapi && (
|
{!isDmxapi && (
|
||||||
<>
|
<>
|
||||||
<SettingSubtitle style={{ display: 'flex', alignItems: 'center', justifyContent: 'space-between' }}>
|
<SettingSubtitle style={{ display: 'flex', alignItems: 'center', justifyContent: 'space-between' }}>
|
||||||
<Tooltip title={hostSelectorTooltip} mouseEnterDelay={0.3}>
|
<div className="flex items-center gap-1">
|
||||||
<Selector
|
<Tooltip title={hostSelectorTooltip} mouseEnterDelay={0.3}>
|
||||||
size={14}
|
<div>
|
||||||
value={activeHostField}
|
<Selector
|
||||||
onChange={(value) => setActiveHostField(value as HostField)}
|
size={14}
|
||||||
options={hostSelectorOptions}
|
value={activeHostField}
|
||||||
style={{ paddingLeft: 1, fontWeight: 'bold' }}
|
onChange={(value) => setActiveHostField(value as HostField)}
|
||||||
placement="bottomLeft"
|
options={hostSelectorOptions}
|
||||||
/>
|
style={{ paddingLeft: 1, fontWeight: 'bold' }}
|
||||||
</Tooltip>
|
placement="bottomLeft"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
</Tooltip>
|
||||||
|
<HelpTooltip title={t('settings.provider.api.url.tip')}></HelpTooltip>
|
||||||
|
</div>
|
||||||
<div style={{ display: 'flex', alignItems: 'center', gap: 4 }}>
|
<div style={{ display: 'flex', alignItems: 'center', gap: 4 }}>
|
||||||
<Button
|
<Button
|
||||||
type="text"
|
type="text"
|
||||||
|
|||||||
@@ -8,11 +8,11 @@ import { isDedicatedImageGenerationModel, isEmbeddingModel, isFunctionCallingMod
|
|||||||
import { getStoreSetting } from '@renderer/hooks/useSettings'
|
import { getStoreSetting } from '@renderer/hooks/useSettings'
|
||||||
import i18n from '@renderer/i18n'
|
import i18n from '@renderer/i18n'
|
||||||
import store from '@renderer/store'
|
import store from '@renderer/store'
|
||||||
import type { FetchChatCompletionParams } from '@renderer/types'
|
|
||||||
import type { Assistant, MCPServer, MCPTool, Model, Provider } from '@renderer/types'
|
import type { Assistant, MCPServer, MCPTool, Model, Provider } from '@renderer/types'
|
||||||
|
import { type FetchChatCompletionParams, isSystemProvider } from '@renderer/types'
|
||||||
import type { StreamTextParams } from '@renderer/types/aiCoreTypes'
|
import type { StreamTextParams } from '@renderer/types/aiCoreTypes'
|
||||||
import { type Chunk, ChunkType } from '@renderer/types/chunk'
|
import { type Chunk, ChunkType } from '@renderer/types/chunk'
|
||||||
import type { Message } from '@renderer/types/newMessage'
|
import type { Message, ResponseError } from '@renderer/types/newMessage'
|
||||||
import type { SdkModel } from '@renderer/types/sdk'
|
import type { SdkModel } from '@renderer/types/sdk'
|
||||||
import { removeSpecialCharactersForTopicName, uuid } from '@renderer/utils'
|
import { removeSpecialCharactersForTopicName, uuid } from '@renderer/utils'
|
||||||
import { abortCompletion, readyToAbort } from '@renderer/utils/abortController'
|
import { abortCompletion, readyToAbort } from '@renderer/utils/abortController'
|
||||||
@@ -22,7 +22,8 @@ import { purifyMarkdownImages } from '@renderer/utils/markdown'
|
|||||||
import { isPromptToolUse, isSupportedToolUse } from '@renderer/utils/mcp-tools'
|
import { isPromptToolUse, isSupportedToolUse } from '@renderer/utils/mcp-tools'
|
||||||
import { findFileBlocks, getMainTextContent } from '@renderer/utils/messageUtils/find'
|
import { findFileBlocks, getMainTextContent } from '@renderer/utils/messageUtils/find'
|
||||||
import { containsSupportedVariables, replacePromptVariables } from '@renderer/utils/prompt'
|
import { containsSupportedVariables, replacePromptVariables } from '@renderer/utils/prompt'
|
||||||
import { isEmpty, takeRight } from 'lodash'
|
import { NOT_SUPPORT_API_KEY_PROVIDERS } from '@renderer/utils/provider'
|
||||||
|
import { cloneDeep, isEmpty, takeRight } from 'lodash'
|
||||||
|
|
||||||
import type { ModernAiProviderConfig } from '../aiCore/index_new'
|
import type { ModernAiProviderConfig } from '../aiCore/index_new'
|
||||||
import AiProviderNew from '../aiCore/index_new'
|
import AiProviderNew from '../aiCore/index_new'
|
||||||
@@ -43,6 +44,8 @@ import {
|
|||||||
// } from './MessagesService'
|
// } from './MessagesService'
|
||||||
// import WebSearchService from './WebSearchService'
|
// import WebSearchService from './WebSearchService'
|
||||||
|
|
||||||
|
// FIXME: 这里太多重复逻辑,需要重构
|
||||||
|
|
||||||
const logger = loggerService.withContext('ApiService')
|
const logger = loggerService.withContext('ApiService')
|
||||||
|
|
||||||
export async function fetchMcpTools(assistant: Assistant) {
|
export async function fetchMcpTools(assistant: Assistant) {
|
||||||
@@ -95,7 +98,15 @@ export async function fetchChatCompletion({
|
|||||||
modelId: assistant.model?.id,
|
modelId: assistant.model?.id,
|
||||||
modelName: assistant.model?.name
|
modelName: assistant.model?.name
|
||||||
})
|
})
|
||||||
const AI = new AiProviderNew(assistant.model || getDefaultModel())
|
|
||||||
|
// Get base provider and apply API key rotation
|
||||||
|
const baseProvider = getProviderByModel(assistant.model || getDefaultModel())
|
||||||
|
const providerWithRotatedKey = {
|
||||||
|
...cloneDeep(baseProvider),
|
||||||
|
apiKey: getRotatedApiKey(baseProvider)
|
||||||
|
}
|
||||||
|
|
||||||
|
const AI = new AiProviderNew(assistant.model || getDefaultModel(), providerWithRotatedKey)
|
||||||
const provider = AI.getActualProvider()
|
const provider = AI.getActualProvider()
|
||||||
|
|
||||||
const mcpTools: MCPTool[] = []
|
const mcpTools: MCPTool[] = []
|
||||||
@@ -172,7 +183,13 @@ export async function fetchMessagesSummary({ messages, assistant }: { messages:
|
|||||||
return null
|
return null
|
||||||
}
|
}
|
||||||
|
|
||||||
const AI = new AiProviderNew(model)
|
// Apply API key rotation
|
||||||
|
const providerWithRotatedKey = {
|
||||||
|
...cloneDeep(provider),
|
||||||
|
apiKey: getRotatedApiKey(provider)
|
||||||
|
}
|
||||||
|
|
||||||
|
const AI = new AiProviderNew(model, providerWithRotatedKey)
|
||||||
|
|
||||||
const topicId = messages?.find((message) => message.topicId)?.topicId || ''
|
const topicId = messages?.find((message) => message.topicId)?.topicId || ''
|
||||||
|
|
||||||
@@ -271,7 +288,13 @@ export async function fetchNoteSummary({ content, assistant }: { content: string
|
|||||||
return null
|
return null
|
||||||
}
|
}
|
||||||
|
|
||||||
const AI = new AiProviderNew(model)
|
// Apply API key rotation
|
||||||
|
const providerWithRotatedKey = {
|
||||||
|
...cloneDeep(provider),
|
||||||
|
apiKey: getRotatedApiKey(provider)
|
||||||
|
}
|
||||||
|
|
||||||
|
const AI = new AiProviderNew(model, providerWithRotatedKey)
|
||||||
|
|
||||||
// only 2000 char and no images
|
// only 2000 char and no images
|
||||||
const truncatedContent = content.substring(0, 2000)
|
const truncatedContent = content.substring(0, 2000)
|
||||||
@@ -359,7 +382,13 @@ export async function fetchGenerate({
|
|||||||
return ''
|
return ''
|
||||||
}
|
}
|
||||||
|
|
||||||
const AI = new AiProviderNew(model)
|
// Apply API key rotation
|
||||||
|
const providerWithRotatedKey = {
|
||||||
|
...cloneDeep(provider),
|
||||||
|
apiKey: getRotatedApiKey(provider)
|
||||||
|
}
|
||||||
|
|
||||||
|
const AI = new AiProviderNew(model, providerWithRotatedKey)
|
||||||
|
|
||||||
const assistant = getDefaultAssistant()
|
const assistant = getDefaultAssistant()
|
||||||
assistant.model = model
|
assistant.model = model
|
||||||
@@ -404,28 +433,44 @@ export async function fetchGenerate({
|
|||||||
|
|
||||||
export function hasApiKey(provider: Provider) {
|
export function hasApiKey(provider: Provider) {
|
||||||
if (!provider) return false
|
if (!provider) return false
|
||||||
if (['ollama', 'lmstudio', 'vertexai', 'cherryai'].includes(provider.id)) return true
|
if (isSystemProvider(provider) && NOT_SUPPORT_API_KEY_PROVIDERS.includes(provider.id)) return true
|
||||||
return !isEmpty(provider.apiKey)
|
return !isEmpty(provider.apiKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the first available embedding model from enabled providers
|
* 获取轮询的API key
|
||||||
|
* 复用legacy架构的多key轮询逻辑
|
||||||
*/
|
*/
|
||||||
// function getFirstEmbeddingModel() {
|
function getRotatedApiKey(provider: Provider): string {
|
||||||
// const providers = store.getState().llm.providers.filter((p) => p.enabled)
|
const keys = provider.apiKey.split(',').map((key) => key.trim())
|
||||||
|
const keyName = `provider:${provider.id}:last_used_key`
|
||||||
|
|
||||||
// for (const provider of providers) {
|
if (keys.length === 1) {
|
||||||
// const embeddingModel = provider.models.find((model) => isEmbeddingModel(model))
|
return keys[0]
|
||||||
// if (embeddingModel) {
|
}
|
||||||
// return embeddingModel
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// return undefined
|
const lastUsedKey = window.keyv.get(keyName)
|
||||||
// }
|
if (!lastUsedKey) {
|
||||||
|
window.keyv.set(keyName, keys[0])
|
||||||
|
return keys[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
const currentIndex = keys.indexOf(lastUsedKey)
|
||||||
|
const nextIndex = (currentIndex + 1) % keys.length
|
||||||
|
const nextKey = keys[nextIndex]
|
||||||
|
window.keyv.set(keyName, nextKey)
|
||||||
|
|
||||||
|
return nextKey
|
||||||
|
}
|
||||||
|
|
||||||
export async function fetchModels(provider: Provider): Promise<SdkModel[]> {
|
export async function fetchModels(provider: Provider): Promise<SdkModel[]> {
|
||||||
const AI = new AiProviderNew(provider)
|
// Apply API key rotation
|
||||||
|
const providerWithRotatedKey = {
|
||||||
|
...cloneDeep(provider),
|
||||||
|
apiKey: getRotatedApiKey(provider)
|
||||||
|
}
|
||||||
|
|
||||||
|
const AI = new AiProviderNew(providerWithRotatedKey)
|
||||||
|
|
||||||
try {
|
try {
|
||||||
return await AI.models()
|
return await AI.models()
|
||||||
@@ -435,12 +480,7 @@ export async function fetchModels(provider: Provider): Promise<SdkModel[]> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
export function checkApiProvider(provider: Provider): void {
|
export function checkApiProvider(provider: Provider): void {
|
||||||
if (
|
if (isSystemProvider(provider) && !NOT_SUPPORT_API_KEY_PROVIDERS.includes(provider.id)) {
|
||||||
provider.id !== 'ollama' &&
|
|
||||||
provider.id !== 'lmstudio' &&
|
|
||||||
provider.type !== 'vertexai' &&
|
|
||||||
provider.id !== 'copilot'
|
|
||||||
) {
|
|
||||||
if (!provider.apiKey) {
|
if (!provider.apiKey) {
|
||||||
window.toast.error(i18n.t('message.error.enter.api.label'))
|
window.toast.error(i18n.t('message.error.enter.api.label'))
|
||||||
throw new Error(i18n.t('message.error.enter.api.label'))
|
throw new Error(i18n.t('message.error.enter.api.label'))
|
||||||
@@ -461,8 +501,7 @@ export function checkApiProvider(provider: Provider): void {
|
|||||||
export async function checkApi(provider: Provider, model: Model, timeout = 15000): Promise<void> {
|
export async function checkApi(provider: Provider, model: Model, timeout = 15000): Promise<void> {
|
||||||
checkApiProvider(provider)
|
checkApiProvider(provider)
|
||||||
|
|
||||||
// Don't pass in provider parameter. We need auto-format URL
|
const ai = new AiProviderNew(model, provider)
|
||||||
const ai = new AiProviderNew(model)
|
|
||||||
|
|
||||||
const assistant = getDefaultAssistant()
|
const assistant = getDefaultAssistant()
|
||||||
assistant.model = model
|
assistant.model = model
|
||||||
@@ -476,7 +515,7 @@ export async function checkApi(provider: Provider, model: Model, timeout = 15000
|
|||||||
} else {
|
} else {
|
||||||
const abortId = uuid()
|
const abortId = uuid()
|
||||||
const signal = readyToAbort(abortId)
|
const signal = readyToAbort(abortId)
|
||||||
let chunkError
|
let streamError: ResponseError | undefined
|
||||||
const params: StreamTextParams = {
|
const params: StreamTextParams = {
|
||||||
system: assistant.prompt,
|
system: assistant.prompt,
|
||||||
prompt: 'hi',
|
prompt: 'hi',
|
||||||
@@ -495,19 +534,18 @@ export async function checkApi(provider: Provider, model: Model, timeout = 15000
|
|||||||
callType: 'check',
|
callType: 'check',
|
||||||
onChunk: (chunk: Chunk) => {
|
onChunk: (chunk: Chunk) => {
|
||||||
if (chunk.type === ChunkType.ERROR) {
|
if (chunk.type === ChunkType.ERROR) {
|
||||||
chunkError = chunk.error
|
streamError = chunk.error
|
||||||
} else {
|
} else {
|
||||||
abortCompletion(abortId)
|
abortCompletion(abortId)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try streaming check
|
|
||||||
try {
|
try {
|
||||||
await ai.completions(model.id, params, config)
|
await ai.completions(model.id, params, config)
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
if (!isAbortError(e) && !isAbortError(chunkError)) {
|
if (!isAbortError(e) && !isAbortError(streamError)) {
|
||||||
throw e
|
throw streamError ?? e
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -216,7 +216,7 @@ const assistantsSlice = createSlice({
|
|||||||
if (agent.id === action.payload.assistantId) {
|
if (agent.id === action.payload.assistantId) {
|
||||||
for (const key in settings) {
|
for (const key in settings) {
|
||||||
if (!agent.settings) {
|
if (!agent.settings) {
|
||||||
agent.settings = DEFAULT_ASSISTANT_SETTINGS
|
agent.settings = { ...DEFAULT_ASSISTANT_SETTINGS }
|
||||||
}
|
}
|
||||||
agent.settings[key] = settings[key]
|
agent.settings[key] = settings[key]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -239,6 +239,7 @@ export type ModelType = 'text' | 'vision' | 'embedding' | 'reasoning' | 'functio
|
|||||||
|
|
||||||
export type ModelTag = Exclude<ModelType, 'text'> | 'free'
|
export type ModelTag = Exclude<ModelType, 'text'> | 'free'
|
||||||
|
|
||||||
|
// "image-generation" is also openai endpoint, but specifically for image generation.
|
||||||
export type EndpointType = 'openai' | 'openai-response' | 'anthropic' | 'gemini' | 'image-generation' | 'jina-rerank'
|
export type EndpointType = 'openai' | 'openai-response' | 'anthropic' | 'gemini' | 'image-generation' | 'jina-rerank'
|
||||||
|
|
||||||
export type ModelPricing = {
|
export type ModelPricing = {
|
||||||
|
|||||||
@@ -234,6 +234,7 @@ export interface Response {
|
|||||||
error?: ResponseError
|
error?: ResponseError
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FIXME: Weak type safety. It may be a specific class instance which inherits Error in runtime.
|
||||||
export type ResponseError = Record<string, any>
|
export type ResponseError = Record<string, any>
|
||||||
|
|
||||||
export interface MessageInputBaseParams {
|
export interface MessageInputBaseParams {
|
||||||
|
|||||||
@@ -7,11 +7,14 @@ import {
|
|||||||
formatApiKeys,
|
formatApiKeys,
|
||||||
formatAzureOpenAIApiHost,
|
formatAzureOpenAIApiHost,
|
||||||
formatVertexApiHost,
|
formatVertexApiHost,
|
||||||
|
getTrailingApiVersion,
|
||||||
hasAPIVersion,
|
hasAPIVersion,
|
||||||
maskApiKey,
|
maskApiKey,
|
||||||
routeToEndpoint,
|
routeToEndpoint,
|
||||||
splitApiKeyString,
|
splitApiKeyString,
|
||||||
validateApiHost
|
validateApiHost,
|
||||||
|
withoutTrailingApiVersion,
|
||||||
|
withoutTrailingSharp
|
||||||
} from '../api'
|
} from '../api'
|
||||||
|
|
||||||
vi.mock('@renderer/store', () => {
|
vi.mock('@renderer/store', () => {
|
||||||
@@ -79,6 +82,27 @@ describe('api', () => {
|
|||||||
it('keeps host untouched when api version unsupported', () => {
|
it('keeps host untouched when api version unsupported', () => {
|
||||||
expect(formatApiHost('https://api.example.com', false)).toBe('https://api.example.com')
|
expect(formatApiHost('https://api.example.com', false)).toBe('https://api.example.com')
|
||||||
})
|
})
|
||||||
|
|
||||||
|
it('removes trailing # and does not append api version when host ends with #', () => {
|
||||||
|
expect(formatApiHost('https://api.example.com#')).toBe('https://api.example.com')
|
||||||
|
expect(formatApiHost('http://localhost:5173/#')).toBe('http://localhost:5173/')
|
||||||
|
expect(formatApiHost(' https://api.openai.com/# ')).toBe('https://api.openai.com/')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('handles trailing # with custom api version settings', () => {
|
||||||
|
expect(formatApiHost('https://api.example.com#', true, 'v2')).toBe('https://api.example.com')
|
||||||
|
expect(formatApiHost('https://api.example.com#', false, 'v2')).toBe('https://api.example.com')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('handles host with both trailing # and existing api version', () => {
|
||||||
|
expect(formatApiHost('https://api.example.com/v2#')).toBe('https://api.example.com/v2')
|
||||||
|
expect(formatApiHost('https://api.example.com/v3beta#')).toBe('https://api.example.com/v3beta')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('trims whitespace before processing trailing #', () => {
|
||||||
|
expect(formatApiHost(' https://api.example.com# ')).toBe('https://api.example.com')
|
||||||
|
expect(formatApiHost('\thttps://api.example.com#\n')).toBe('https://api.example.com')
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
describe('hasAPIVersion', () => {
|
describe('hasAPIVersion', () => {
|
||||||
@@ -316,4 +340,142 @@ describe('api', () => {
|
|||||||
)
|
)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
describe('getTrailingApiVersion', () => {
|
||||||
|
it('extracts trailing API version from URL', () => {
|
||||||
|
expect(getTrailingApiVersion('https://api.example.com/v1')).toBe('v1')
|
||||||
|
expect(getTrailingApiVersion('https://api.example.com/v2')).toBe('v2')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('extracts trailing API version with alpha/beta suffix', () => {
|
||||||
|
expect(getTrailingApiVersion('https://api.example.com/v2alpha')).toBe('v2alpha')
|
||||||
|
expect(getTrailingApiVersion('https://api.example.com/v3beta')).toBe('v3beta')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('extracts trailing API version with trailing slash', () => {
|
||||||
|
expect(getTrailingApiVersion('https://api.example.com/v1/')).toBe('v1')
|
||||||
|
expect(getTrailingApiVersion('https://api.example.com/v2beta/')).toBe('v2beta')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns undefined when API version is in the middle of path', () => {
|
||||||
|
expect(getTrailingApiVersion('https://api.example.com/v1/chat')).toBeUndefined()
|
||||||
|
expect(getTrailingApiVersion('https://api.example.com/v1/completions')).toBeUndefined()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns undefined when no trailing version exists', () => {
|
||||||
|
expect(getTrailingApiVersion('https://api.example.com')).toBeUndefined()
|
||||||
|
expect(getTrailingApiVersion('https://api.example.com/api')).toBeUndefined()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('extracts trailing version from complex URLs', () => {
|
||||||
|
expect(getTrailingApiVersion('https://api.example.com/service/v1')).toBe('v1')
|
||||||
|
expect(getTrailingApiVersion('https://gateway.ai.cloudflare.com/v1/xxx/google-ai-studio/v1beta')).toBe('v1beta')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('only extracts the trailing version when multiple versions exist', () => {
|
||||||
|
expect(getTrailingApiVersion('https://api.example.com/v1/service/v2')).toBe('v2')
|
||||||
|
expect(
|
||||||
|
getTrailingApiVersion('https://gateway.ai.cloudflare.com/v1/xxxxxx/google-ai-studio/google-ai-studio/v1beta')
|
||||||
|
).toBe('v1beta')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns undefined for empty string', () => {
|
||||||
|
expect(getTrailingApiVersion('')).toBeUndefined()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('withoutTrailingApiVersion', () => {
|
||||||
|
it('removes trailing API version from URL', () => {
|
||||||
|
expect(withoutTrailingApiVersion('https://api.example.com/v1')).toBe('https://api.example.com')
|
||||||
|
expect(withoutTrailingApiVersion('https://api.example.com/v2')).toBe('https://api.example.com')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('removes trailing API version with alpha/beta suffix', () => {
|
||||||
|
expect(withoutTrailingApiVersion('https://api.example.com/v2alpha')).toBe('https://api.example.com')
|
||||||
|
expect(withoutTrailingApiVersion('https://api.example.com/v3beta')).toBe('https://api.example.com')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('removes trailing API version with trailing slash', () => {
|
||||||
|
expect(withoutTrailingApiVersion('https://api.example.com/v1/')).toBe('https://api.example.com')
|
||||||
|
expect(withoutTrailingApiVersion('https://api.example.com/v2beta/')).toBe('https://api.example.com')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('does not remove API version in the middle of path', () => {
|
||||||
|
expect(withoutTrailingApiVersion('https://api.example.com/v1/chat')).toBe('https://api.example.com/v1/chat')
|
||||||
|
expect(withoutTrailingApiVersion('https://api.example.com/v1/completions')).toBe(
|
||||||
|
'https://api.example.com/v1/completions'
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns URL unchanged when no trailing version exists', () => {
|
||||||
|
expect(withoutTrailingApiVersion('https://api.example.com')).toBe('https://api.example.com')
|
||||||
|
expect(withoutTrailingApiVersion('https://api.example.com/api')).toBe('https://api.example.com/api')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('handles complex URLs with version at the end', () => {
|
||||||
|
expect(withoutTrailingApiVersion('https://api.example.com/service/v1')).toBe('https://api.example.com/service')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('handles URLs with multiple versions but only removes the trailing one', () => {
|
||||||
|
expect(withoutTrailingApiVersion('https://api.example.com/v1/service/v2')).toBe(
|
||||||
|
'https://api.example.com/v1/service'
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns empty string unchanged', () => {
|
||||||
|
expect(withoutTrailingApiVersion('')).toBe('')
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('withoutTrailingSharp', () => {
|
||||||
|
it('removes trailing # from URL', () => {
|
||||||
|
expect(withoutTrailingSharp('https://api.example.com#')).toBe('https://api.example.com')
|
||||||
|
expect(withoutTrailingSharp('http://localhost:3000#')).toBe('http://localhost:3000')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns URL unchanged when no trailing #', () => {
|
||||||
|
expect(withoutTrailingSharp('https://api.example.com')).toBe('https://api.example.com')
|
||||||
|
expect(withoutTrailingSharp('http://localhost:3000')).toBe('http://localhost:3000')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('handles URLs with multiple # characters but only removes trailing one', () => {
|
||||||
|
expect(withoutTrailingSharp('https://api.example.com#path#')).toBe('https://api.example.com#path')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('handles URLs with # in the middle (not trailing)', () => {
|
||||||
|
expect(withoutTrailingSharp('https://api.example.com#section/path')).toBe('https://api.example.com#section/path')
|
||||||
|
expect(withoutTrailingSharp('https://api.example.com/v1/chat/completions#')).toBe(
|
||||||
|
'https://api.example.com/v1/chat/completions'
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('handles empty string', () => {
|
||||||
|
expect(withoutTrailingSharp('')).toBe('')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('handles single character #', () => {
|
||||||
|
expect(withoutTrailingSharp('#')).toBe('')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('preserves whitespace around the URL (pure function)', () => {
|
||||||
|
expect(withoutTrailingSharp(' https://api.example.com# ')).toBe(' https://api.example.com# ')
|
||||||
|
expect(withoutTrailingSharp('\thttps://api.example.com#\n')).toBe('\thttps://api.example.com#\n')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('only removes exact trailing # character', () => {
|
||||||
|
expect(withoutTrailingSharp('https://api.example.com# ')).toBe('https://api.example.com# ')
|
||||||
|
expect(withoutTrailingSharp(' https://api.example.com#')).toBe(' https://api.example.com')
|
||||||
|
expect(withoutTrailingSharp('https://api.example.com#\t')).toBe('https://api.example.com#\t')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('handles URLs ending with multiple # characters', () => {
|
||||||
|
expect(withoutTrailingSharp('https://api.example.com##')).toBe('https://api.example.com#')
|
||||||
|
expect(withoutTrailingSharp('https://api.example.com###')).toBe('https://api.example.com##')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('preserves URL with trailing # and other content', () => {
|
||||||
|
expect(withoutTrailingSharp('https://api.example.com/v1#')).toBe('https://api.example.com/v1')
|
||||||
|
expect(withoutTrailingSharp('https://api.example.com/v2beta#')).toBe('https://api.example.com/v2beta')
|
||||||
|
})
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -12,6 +12,19 @@ export function formatApiKeys(value: string): string {
|
|||||||
return value.replaceAll(',', ',').replaceAll('\n', ',')
|
return value.replaceAll(',', ',').replaceAll('\n', ',')
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Matches a version segment in a path that starts with `/v<number>` and optionally
|
||||||
|
* continues with `alpha` or `beta`. The segment may be followed by `/` or the end
|
||||||
|
* of the string (useful for cases like `/v3alpha/resources`).
|
||||||
|
*/
|
||||||
|
const VERSION_REGEX_PATTERN = '\\/v\\d+(?:alpha|beta)?(?=\\/|$)'
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Matches an API version at the end of a URL (with optional trailing slash).
|
||||||
|
* Used to detect and extract versions only from the trailing position.
|
||||||
|
*/
|
||||||
|
const TRAILING_VERSION_REGEX = /\/v\d+(?:alpha|beta)?\/?$/i
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* 判断 host 的 path 中是否包含形如版本的字符串(例如 /v1、/v2beta 等),
|
* 判断 host 的 path 中是否包含形如版本的字符串(例如 /v1、/v2beta 等),
|
||||||
*
|
*
|
||||||
@@ -21,16 +34,14 @@ export function formatApiKeys(value: string): string {
|
|||||||
export function hasAPIVersion(host?: string): boolean {
|
export function hasAPIVersion(host?: string): boolean {
|
||||||
if (!host) return false
|
if (!host) return false
|
||||||
|
|
||||||
// 匹配路径中以 `/v<number>` 开头并可选跟随 `alpha` 或 `beta` 的版本段,
|
const regex = new RegExp(VERSION_REGEX_PATTERN, 'i')
|
||||||
// 该段后面可以跟 `/` 或字符串结束(用于匹配诸如 `/v3alpha/resources` 的情况)。
|
|
||||||
const versionRegex = /\/v\d+(?:alpha|beta)?(?=\/|$)/i
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const url = new URL(host)
|
const url = new URL(host)
|
||||||
return versionRegex.test(url.pathname)
|
return regex.test(url.pathname)
|
||||||
} catch {
|
} catch {
|
||||||
// 若无法作为完整 URL 解析,则当作路径直接检测
|
// 若无法作为完整 URL 解析,则当作路径直接检测
|
||||||
return versionRegex.test(host)
|
return regex.test(host)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -51,32 +62,52 @@ export function withoutTrailingSlash<T extends string>(url: T): T {
|
|||||||
return url.replace(/\/$/, '') as T
|
return url.replace(/\/$/, '') as T
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Removes the trailing '#' from a URL string if it exists.
|
||||||
|
*
|
||||||
|
* @template T - The string type to preserve type safety
|
||||||
|
* @param {T} url - The URL string to process
|
||||||
|
* @returns {T} The URL string without a trailing '#'
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* ```ts
|
||||||
|
* withoutTrailingSharp('https://example.com#') // 'https://example.com'
|
||||||
|
* withoutTrailingSharp('https://example.com') // 'https://example.com'
|
||||||
|
* ```
|
||||||
|
*/
|
||||||
|
export function withoutTrailingSharp<T extends string>(url: T): T {
|
||||||
|
return url.replace(/#$/, '') as T
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Formats an API host URL by normalizing it and optionally appending an API version.
|
* Formats an API host URL by normalizing it and optionally appending an API version.
|
||||||
*
|
*
|
||||||
* @param host - The API host URL to format. Leading/trailing whitespace will be trimmed and trailing slashes removed.
|
* @param host - The API host URL to format. Leading/trailing whitespace will be trimmed and trailing slashes removed.
|
||||||
* @param isSupportedAPIVerion - Whether the API version is supported. Defaults to `true`.
|
* @param supportApiVersion - Whether the API version is supported. Defaults to `true`.
|
||||||
* @param apiVersion - The API version to append if needed. Defaults to `'v1'`.
|
* @param apiVersion - The API version to append if needed. Defaults to `'v1'`.
|
||||||
*
|
*
|
||||||
* @returns The formatted API host URL. If the host is empty after normalization, returns an empty string.
|
* @returns The formatted API host URL. If the host is empty after normalization, returns an empty string.
|
||||||
* If the host ends with '#', API version is not supported, or the host already contains a version, returns the normalized host as-is.
|
* If the host ends with '#', API version is not supported, or the host already contains a version, returns the normalized host with trailing '#' removed.
|
||||||
* Otherwise, returns the host with the API version appended.
|
* Otherwise, returns the host with the API version appended.
|
||||||
*
|
*
|
||||||
* @example
|
* @example
|
||||||
* formatApiHost('https://api.example.com/') // Returns 'https://api.example.com/v1'
|
* formatApiHost('https://api.example.com/') // Returns 'https://api.example.com/v1'
|
||||||
* formatApiHost('https://api.example.com#') // Returns 'https://api.example.com#'
|
* formatApiHost('https://api.example.com#') // Returns 'https://api.example.com'
|
||||||
* formatApiHost('https://api.example.com/v2', true, 'v1') // Returns 'https://api.example.com/v2'
|
* formatApiHost('https://api.example.com/v2', true, 'v1') // Returns 'https://api.example.com/v2'
|
||||||
*/
|
*/
|
||||||
export function formatApiHost(host?: string, isSupportedAPIVerion: boolean = true, apiVersion: string = 'v1'): string {
|
export function formatApiHost(host?: string, supportApiVersion: boolean = true, apiVersion: string = 'v1'): string {
|
||||||
const normalizedHost = withoutTrailingSlash(trim(host))
|
const normalizedHost = withoutTrailingSlash(trim(host))
|
||||||
if (!normalizedHost) {
|
if (!normalizedHost) {
|
||||||
return ''
|
return ''
|
||||||
}
|
}
|
||||||
|
|
||||||
if (normalizedHost.endsWith('#') || !isSupportedAPIVerion || hasAPIVersion(normalizedHost)) {
|
const shouldAppendApiVersion = !(normalizedHost.endsWith('#') || !supportApiVersion || hasAPIVersion(normalizedHost))
|
||||||
return normalizedHost
|
|
||||||
|
if (shouldAppendApiVersion) {
|
||||||
|
return `${normalizedHost}/${apiVersion}`
|
||||||
|
} else {
|
||||||
|
return withoutTrailingSharp(normalizedHost)
|
||||||
}
|
}
|
||||||
return `${normalizedHost}/${apiVersion}`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -213,3 +244,50 @@ export function splitApiKeyString(keyStr: string): string[] {
|
|||||||
.map((k) => k.replace(/\\,/g, ','))
|
.map((k) => k.replace(/\\,/g, ','))
|
||||||
.filter((k) => k)
|
.filter((k) => k)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extracts the trailing API version segment from a URL path.
|
||||||
|
*
|
||||||
|
* This function extracts API version patterns (e.g., `v1`, `v2beta`) from the end of a URL.
|
||||||
|
* Only versions at the end of the path are extracted, not versions in the middle.
|
||||||
|
* The returned version string does not include leading or trailing slashes.
|
||||||
|
*
|
||||||
|
* @param {string} url - The URL string to parse.
|
||||||
|
* @returns {string | undefined} The trailing API version found (e.g., 'v1', 'v2beta'), or undefined if none found.
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* getTrailingApiVersion('https://api.example.com/v1') // 'v1'
|
||||||
|
* getTrailingApiVersion('https://api.example.com/v2beta/') // 'v2beta'
|
||||||
|
* getTrailingApiVersion('https://api.example.com/v1/chat') // undefined (version not at end)
|
||||||
|
* getTrailingApiVersion('https://gateway.ai.cloudflare.com/v1/xxx/v1beta') // 'v1beta'
|
||||||
|
* getTrailingApiVersion('https://api.example.com') // undefined
|
||||||
|
*/
|
||||||
|
export function getTrailingApiVersion(url: string): string | undefined {
|
||||||
|
const match = url.match(TRAILING_VERSION_REGEX)
|
||||||
|
|
||||||
|
if (match) {
|
||||||
|
// Extract version without leading slash and trailing slash
|
||||||
|
return match[0].replace(/^\//, '').replace(/\/$/, '')
|
||||||
|
}
|
||||||
|
|
||||||
|
return undefined
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Removes the trailing API version segment from a URL path.
|
||||||
|
*
|
||||||
|
* This function removes API version patterns (e.g., `/v1`, `/v2beta`) from the end of a URL.
|
||||||
|
* Only versions at the end of the path are removed, not versions in the middle.
|
||||||
|
*
|
||||||
|
* @param {string} url - The URL string to process.
|
||||||
|
* @returns {string} The URL with the trailing API version removed, or the original URL if no trailing version found.
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* withoutTrailingApiVersion('https://api.example.com/v1') // 'https://api.example.com'
|
||||||
|
* withoutTrailingApiVersion('https://api.example.com/v2beta/') // 'https://api.example.com'
|
||||||
|
* withoutTrailingApiVersion('https://api.example.com/v1/chat') // 'https://api.example.com/v1/chat' (no change)
|
||||||
|
* withoutTrailingApiVersion('https://api.example.com') // 'https://api.example.com'
|
||||||
|
*/
|
||||||
|
export function withoutTrailingApiVersion(url: string): string {
|
||||||
|
return url.replace(TRAILING_VERSION_REGEX, '')
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,3 +1,15 @@
|
|||||||
|
import { loggerService } from '@logger'
|
||||||
|
|
||||||
|
const logger = loggerService.withContext('utils/dom')
|
||||||
|
|
||||||
|
interface ChromiumScrollIntoViewOptions extends ScrollIntoViewOptions {
|
||||||
|
/**
|
||||||
|
* @see https://developer.mozilla.org/en-US/docs/Web/API/Element/scrollIntoView#container
|
||||||
|
* @see https://github.com/microsoft/TypeScript/issues/62803
|
||||||
|
*/
|
||||||
|
container?: 'all' | 'nearest'
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Simple wrapper for scrollIntoView with common default options.
|
* Simple wrapper for scrollIntoView with common default options.
|
||||||
* Provides a unified interface with sensible defaults.
|
* Provides a unified interface with sensible defaults.
|
||||||
@@ -5,7 +17,12 @@
|
|||||||
* @param element - The target element to scroll into view
|
* @param element - The target element to scroll into view
|
||||||
* @param options - Scroll options. If not provided, uses { behavior: 'smooth', block: 'center', inline: 'nearest' }
|
* @param options - Scroll options. If not provided, uses { behavior: 'smooth', block: 'center', inline: 'nearest' }
|
||||||
*/
|
*/
|
||||||
export function scrollIntoView(element: HTMLElement, options?: ScrollIntoViewOptions): void {
|
export function scrollIntoView(element: HTMLElement, options?: ChromiumScrollIntoViewOptions): void {
|
||||||
|
if (!element) {
|
||||||
|
logger.warn('[scrollIntoView] Unexpected falsy element. Do nothing as fallback.')
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
const defaultOptions: ScrollIntoViewOptions = {
|
const defaultOptions: ScrollIntoViewOptions = {
|
||||||
behavior: 'smooth',
|
behavior: 'smooth',
|
||||||
block: 'center',
|
block: 'center',
|
||||||
|
|||||||
@@ -566,3 +566,54 @@ export const makeSvgSizeAdaptive = (element: Element): Element => {
|
|||||||
|
|
||||||
return element
|
return element
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 将图片 Blob 转换为 PNG 格式的 Blob
|
||||||
|
* @param blob 原始图片 Blob
|
||||||
|
* @returns Promise<Blob> 转换后的 PNG Blob
|
||||||
|
*/
|
||||||
|
export const convertImageToPng = async (blob: Blob): Promise<Blob> => {
|
||||||
|
if (blob.type === 'image/png') {
|
||||||
|
return blob
|
||||||
|
}
|
||||||
|
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
const img = new Image()
|
||||||
|
const url = URL.createObjectURL(blob)
|
||||||
|
|
||||||
|
img.onload = () => {
|
||||||
|
try {
|
||||||
|
const canvas = document.createElement('canvas')
|
||||||
|
canvas.width = img.width
|
||||||
|
canvas.height = img.height
|
||||||
|
const ctx = canvas.getContext('2d')
|
||||||
|
|
||||||
|
if (!ctx) {
|
||||||
|
URL.revokeObjectURL(url)
|
||||||
|
reject(new Error('Failed to get canvas context'))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx.drawImage(img, 0, 0)
|
||||||
|
canvas.toBlob((pngBlob) => {
|
||||||
|
URL.revokeObjectURL(url)
|
||||||
|
if (pngBlob) {
|
||||||
|
resolve(pngBlob)
|
||||||
|
} else {
|
||||||
|
reject(new Error('Failed to convert image to png'))
|
||||||
|
}
|
||||||
|
}, 'image/png')
|
||||||
|
} catch (error) {
|
||||||
|
URL.revokeObjectURL(url)
|
||||||
|
reject(error)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
img.onerror = () => {
|
||||||
|
URL.revokeObjectURL(url)
|
||||||
|
reject(new Error('Failed to load image for conversion'))
|
||||||
|
}
|
||||||
|
|
||||||
|
img.src = url
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|||||||
@@ -90,7 +90,8 @@ export function openAIToolsToMcpTool(
|
|||||||
return undefined
|
return undefined
|
||||||
}
|
}
|
||||||
const tools = mcpTools.filter((mcpTool) => {
|
const tools = mcpTools.filter((mcpTool) => {
|
||||||
return mcpTool.id === toolName || mcpTool.name === toolName
|
// toolName is mcpTool.id (registered with id as function name)
|
||||||
|
return mcpTool.id === toolName
|
||||||
})
|
})
|
||||||
if (tools.length > 1) {
|
if (tools.length > 1) {
|
||||||
logger.warn(`Multiple MCP Tools found for tool call: ${toolName}`)
|
logger.warn(`Multiple MCP Tools found for tool call: ${toolName}`)
|
||||||
|
|||||||
@@ -71,15 +71,21 @@ export const isSupportEnableThinkingProvider = (provider: Provider) => {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
const NOT_SUPPORT_SERVICE_TIER_PROVIDERS = ['github', 'copilot', 'cerebras'] as const satisfies SystemProviderId[]
|
const SUPPORT_SERVICE_TIER_PROVIDERS = [
|
||||||
|
SystemProviderIds.openai,
|
||||||
|
SystemProviderIds['azure-openai'],
|
||||||
|
SystemProviderIds.groq
|
||||||
|
// TODO: 等待上游支持aws-bedrock
|
||||||
|
]
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* 判断提供商是否支持 service_tier 设置。 Only for OpenAI API.
|
* 判断提供商是否支持 service_tier 设置
|
||||||
*/
|
*/
|
||||||
export const isSupportServiceTierProvider = (provider: Provider) => {
|
export const isSupportServiceTierProvider = (provider: Provider) => {
|
||||||
return (
|
return (
|
||||||
provider.apiOptions?.isSupportServiceTier === true ||
|
provider.apiOptions?.isSupportServiceTier === true ||
|
||||||
(isSystemProvider(provider) && !NOT_SUPPORT_SERVICE_TIER_PROVIDERS.some((pid) => pid === provider.id))
|
provider.type === 'azure-openai' ||
|
||||||
|
(isSystemProvider(provider) && SUPPORT_SERVICE_TIER_PROVIDERS.some((pid) => pid === provider.id))
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -102,6 +108,7 @@ const SUPPORT_URL_CONTEXT_PROVIDER_TYPES = [
|
|||||||
'gemini',
|
'gemini',
|
||||||
'vertexai',
|
'vertexai',
|
||||||
'anthropic',
|
'anthropic',
|
||||||
|
'azure-openai',
|
||||||
'new-api'
|
'new-api'
|
||||||
] as const satisfies ProviderType[]
|
] as const satisfies ProviderType[]
|
||||||
|
|
||||||
@@ -176,3 +183,11 @@ export const isSupportAPIVersionProvider = (provider: Provider) => {
|
|||||||
}
|
}
|
||||||
return provider.apiOptions?.isNotSupportAPIVersion !== false
|
return provider.apiOptions?.isNotSupportAPIVersion !== false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export const NOT_SUPPORT_API_KEY_PROVIDERS: readonly SystemProviderId[] = [
|
||||||
|
'ollama',
|
||||||
|
'lmstudio',
|
||||||
|
'vertexai',
|
||||||
|
'aws-bedrock',
|
||||||
|
'copilot'
|
||||||
|
]
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user