Compare commits

..

1 Commits

Author SHA1 Message Date
suyao
22ed2b605e feat: Add MCP UI Demo server and integrate UI rendering capabilities 2025-11-24 12:41:03 +08:00
212 changed files with 2699 additions and 6811 deletions

View File

@@ -11,7 +11,6 @@
"dist/**",
"out/**",
"local/**",
"tests/**",
".yarn/**",
".gitignore",
"scripts/cloudflare-worker.js",

View File

@@ -1,8 +1,8 @@
diff --git a/dist/index.js b/dist/index.js
index 51ce7e423934fb717cb90245cdfcdb3dae6780e6..0f7f7009e2f41a79a8669d38c8a44867bbff5e1f 100644
index dc7b74ba55337c491cdf1ab3e39ca68cc4187884..ace8c90591288e42c2957e93c9bf7984f1b22444 100644
--- a/dist/index.js
+++ b/dist/index.js
@@ -474,7 +474,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
@@ -472,7 +472,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
// src/get-model-path.ts
function getModelPath(modelId) {
@@ -12,10 +12,10 @@ index 51ce7e423934fb717cb90245cdfcdb3dae6780e6..0f7f7009e2f41a79a8669d38c8a44867
// src/google-generative-ai-options.ts
diff --git a/dist/index.mjs b/dist/index.mjs
index f4b77e35c0cbfece85a3ef0d4f4e67aa6dde6271..8d2fecf8155a226006a0bde72b00b6036d4014b6 100644
index 8390439c38cb7eaeb52080862cd6f4c58509e67c..a7647f2e11700dff7e1c8d4ae8f99d3637010733 100644
--- a/dist/index.mjs
+++ b/dist/index.mjs
@@ -480,7 +480,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
@@ -478,7 +478,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
// src/get-model-path.ts
function getModelPath(modelId) {

View File

@@ -1,5 +1,5 @@
diff --git a/dist/index.js b/dist/index.js
index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a70ea2b5a2 100644
index 7481f3b3511078068d87d03855b568b20bb86971..8ac5ec28d2f7ad1b3b0d3f8da945c75674e59637 100644
--- a/dist/index.js
+++ b/dist/index.js
@@ -274,6 +274,7 @@ var openaiChatResponseSchema = (0, import_provider_utils3.lazyValidator)(

View File

@@ -1,8 +1,8 @@
diff --git a/sdk.mjs b/sdk.mjs
index bf429a344b7d59f70aead16b639f949b07688a81..f77d50cc5d3fb04292cb3ac7fa7085d02dcc628f 100755
index 8cc6aaf0b25bcdf3c579ec95cde12d419fcb2a71..3b3b8beaea5ad2bbac26a15f792058306d0b059f 100755
--- a/sdk.mjs
+++ b/sdk.mjs
@@ -6250,7 +6250,7 @@ function createAbortController(maxListeners = DEFAULT_MAX_LISTENERS) {
@@ -6213,7 +6213,7 @@ function createAbortController(maxListeners = DEFAULT_MAX_LISTENERS) {
}
// ../src/transport/ProcessTransport.ts
@@ -11,20 +11,16 @@ index bf429a344b7d59f70aead16b639f949b07688a81..f77d50cc5d3fb04292cb3ac7fa7085d0
import { createInterface } from "readline";
// ../src/utils/fsOperations.ts
@@ -6619,18 +6619,11 @@ class ProcessTransport {
@@ -6505,14 +6505,11 @@ class ProcessTransport {
const errorMessage = isNativeBinary(pathToClaudeCodeExecutable) ? `Claude Code native binary not found at ${pathToClaudeCodeExecutable}. Please ensure Claude Code is installed via native installer or specify a valid path with options.pathToClaudeCodeExecutable.` : `Claude Code executable not found at ${pathToClaudeCodeExecutable}. Is options.pathToClaudeCodeExecutable set?`;
throw new ReferenceError(errorMessage);
}
- const isNative = isNativeBinary(pathToClaudeCodeExecutable);
- const spawnCommand = isNative ? pathToClaudeCodeExecutable : executable;
- const spawnArgs = isNative ? [...executableArgs, ...args] : [...executableArgs, pathToClaudeCodeExecutable, ...args];
- const spawnMessage = isNative ? `Spawning Claude Code native binary: ${spawnCommand} ${spawnArgs.join(" ")}` : `Spawning Claude Code process: ${spawnCommand} ${spawnArgs.join(" ")}`;
- logForSdkDebugging(spawnMessage);
- if (stderr) {
- stderr(spawnMessage);
- }
+ logForSdkDebugging(`Forking Claude Code Node.js process: ${pathToClaudeCodeExecutable} ${args.join(" ")}`);
const stderrMode = env.DEBUG_CLAUDE_AGENT_SDK || stderr ? "pipe" : "ignore";
- this.logForDebugging(isNative ? `Spawning Claude Code native binary: ${spawnCommand} ${spawnArgs.join(" ")}` : `Spawning Claude Code process: ${spawnCommand} ${spawnArgs.join(" ")}`);
+ this.logForDebugging(`Forking Claude Code Node.js process: ${pathToClaudeCodeExecutable} ${args.join(" ")}`);
const stderrMode = env.DEBUG || stderr ? "pipe" : "ignore";
- this.child = spawn(spawnCommand, spawnArgs, {
+ this.child = fork(pathToClaudeCodeExecutable, args, {
cwd,

View File

@@ -10,18 +10,8 @@ This file provides guidance to AI coding assistants when working with code in th
- **Log centrally**: Route all logging through `loggerService` with the right context—no `console.log`.
- **Research via subagent**: Lean on `subagent` for external docs, APIs, news, and references.
- **Always propose before executing**: Before making any changes, clearly explain your planned approach and wait for explicit user approval to ensure alignment and prevent unwanted modifications.
- **Lint, test, and format before completion**: Coding tasks are only complete after running `yarn lint`, `yarn test`, and `yarn format` successfully.
- **Write conventional commits**: Commit small, focused changes using Conventional Commit messages (e.g., `feat:`, `fix:`, `refactor:`, `docs:`).
## Pull Request Workflow (CRITICAL)
When creating a Pull Request, you MUST:
1. **Read the PR template first**: Always read `.github/pull_request_template.md` before creating the PR
2. **Follow ALL template sections**: Structure the `--body` parameter to include every section from the template
3. **Never skip sections**: Include all sections even if marking them as N/A or "None"
4. **Use proper formatting**: Match the template's markdown structure exactly (headings, checkboxes, code blocks)
## Development Commands
- **Install**: `yarn install` - Install all project dependencies

View File

@@ -1,4 +1,4 @@
[中文](docs/zh/guides/contributing.md) | [English](CONTRIBUTING.md)
[中文](docs/CONTRIBUTING.zh.md) | [English](CONTRIBUTING.md)
# Cherry Studio Contributor Guide
@@ -32,7 +32,7 @@ To help you get familiar with the codebase, we recommend tackling issues tagged
### Testing
Features without tests are considered non-existent. To ensure code is truly effective, relevant processes should be covered by unit tests and functional tests. Therefore, when considering contributions, please also consider testability. All tests can be run locally without dependency on CI. Please refer to the "Testing" section in the [Developer Guide](docs/zh/guides/development.md).
Features without tests are considered non-existent. To ensure code is truly effective, relevant processes should be covered by unit tests and functional tests. Therefore, when considering contributions, please also consider testability. All tests can be run locally without dependency on CI. Please refer to the "Testing" section in the [Developer Guide](docs/dev.md).
### Automated Testing for Pull Requests
@@ -60,7 +60,7 @@ Maintainers are here to help you implement your use case within a reasonable tim
### Participating in the Test Plan
The Test Plan aims to provide users with a more stable application experience and faster iteration speed. For details, please refer to the [Test Plan](docs/en/guides/test-plan.md).
The Test Plan aims to provide users with a more stable application experience and faster iteration speed. For details, please refer to the [Test Plan](docs/testplan-en.md).
### Other Suggestions

View File

@@ -34,7 +34,7 @@
</a>
</h1>
<p align="center">English | <a href="./docs/zh/README.md">中文</a> | <a href="https://cherry-ai.com">Official Site</a> | <a href="https://docs.cherry-ai.com/cherry-studio-wen-dang/en-us">Documents</a> | <a href="./docs/en/guides/development.md">Development</a> | <a href="https://github.com/CherryHQ/cherry-studio/issues">Feedback</a><br></p>
<p align="center">English | <a href="./docs/README.zh.md">中文</a> | <a href="https://cherry-ai.com">Official Site</a> | <a href="https://docs.cherry-ai.com/cherry-studio-wen-dang/en-us">Documents</a> | <a href="./docs/dev.md">Development</a> | <a href="https://github.com/CherryHQ/cherry-studio/issues">Feedback</a><br></p>
<div align="center">
@@ -67,7 +67,7 @@ Cherry Studio is a desktop client that supports multiple LLM providers, availabl
👏 Join [Telegram Group](https://t.me/CherryStudioAI)[Discord](https://discord.gg/wez8HtpxqQ) | [QQ Group(575014769)](https://qm.qq.com/q/lo0D4qVZKi)
❤️ Like Cherry Studio? Give it a star 🌟 or [Sponsor](docs/zh/guides/sponsor.md) to support the development!
❤️ Like Cherry Studio? Give it a star 🌟 or [Sponsor](docs/sponsor.md) to support the development!
# 🌠 Screenshot
@@ -175,7 +175,7 @@ We welcome contributions to Cherry Studio! Here are some ways you can contribute
6. **Community Engagement**: Join discussions and help users.
7. **Promote Usage**: Spread the word about Cherry Studio.
Refer to the [Branching Strategy](docs/en/guides/branching-strategy.md) for contribution guidelines
Refer to the [Branching Strategy](docs/branching-strategy-en.md) for contribution guidelines
## Getting Started

View File

@@ -1,6 +1,6 @@
# Cherry Studio 贡献者指南
[**English**](../../../CONTRIBUTING.md) | **中文**
[**English**](../CONTRIBUTING.md) | [**中文**](CONTRIBUTING.zh.md)
欢迎来到 Cherry Studio 的贡献者社区!我们致力于将 Cherry Studio 打造成一个长期提供价值的项目,并希望邀请更多的开发者加入我们的行列。无论您是经验丰富的开发者还是刚刚起步的初学者,您的贡献都将帮助我们更好地服务用户,提升软件质量。
@@ -24,7 +24,7 @@
## 开始之前
请确保阅读了[行为准则](../../../CODE_OF_CONDUCT.md)和[LICENSE](../../../LICENSE)。
请确保阅读了[行为准则](../CODE_OF_CONDUCT.md)和[LICENSE](../LICENSE)。
## 开始贡献
@@ -32,7 +32,7 @@
### 测试
未经测试的功能等同于不存在。为确保代码真正有效,应通过单元测试和功能测试覆盖相关流程。因此,在考虑贡献时,也请考虑可测试性。所有测试均可本地运行,无需依赖 CI。请参阅[开发者指南](./development.md#test)中的"Test"部分。
未经测试的功能等同于不存在。为确保代码真正有效,应通过单元测试和功能测试覆盖相关流程。因此,在考虑贡献时,也请考虑可测试性。所有测试均可本地运行,无需依赖 CI。请参阅[开发者指南](dev.md#test)中的Test部分。
### 拉取请求的自动化测试
@@ -60,11 +60,11 @@ git commit --signoff -m "Your commit message"
### 获取代码审查/合并
维护者在此帮助您在合理时间内实现您的用例。他们会尽力在合理时间内审查您的代码并提供建设性反馈。但如果您在审查过程中受阻,或认为您的 Pull Request 未得到应有的关注,请通过 Issue 中的评论或者[社群](../README.md#-community)联系我们
维护者在此帮助您在合理时间内实现您的用例。他们会尽力在合理时间内审查您的代码并提供建设性反馈。但如果您在审查过程中受阻,或认为您的 Pull Request 未得到应有的关注,请通过 Issue 中的评论或者[社群](README.zh.md#-community)联系我们
### 参与测试计划
测试计划旨在为用户提供更稳定的应用体验和更快的迭代速度,详细情况请参阅[测试计划](./test-plan.md)。
测试计划旨在为用户提供更稳定的应用体验和更快的迭代速度,详细情况请参阅[测试计划](testplan-zh.md)。
### 其他建议

View File

@@ -1,81 +0,0 @@
# Cherry Studio Documentation / 文档
This directory contains the project documentation in multiple languages.
本目录包含多语言项目文档。
---
## Languages / 语言
- **[中文文档](./zh/README.md)** - Chinese Documentation
- **English Documentation** - See sections below
---
## English Documentation
### Guides
| Document | Description |
|----------|-------------|
| [Development Setup](./en/guides/development.md) | Development environment setup |
| [Branching Strategy](./en/guides/branching-strategy.md) | Git branching workflow |
| [i18n Guide](./en/guides/i18n.md) | Internationalization guide |
| [Logging Guide](./en/guides/logging.md) | How to use the logger service |
| [Test Plan](./en/guides/test-plan.md) | Test plan and release channels |
### References
| Document | Description |
|----------|-------------|
| [App Upgrade Config](./en/references/app-upgrade.md) | Application upgrade configuration |
| [CodeBlockView Component](./en/references/components/code-block-view.md) | Code block view component |
| [Image Preview Components](./en/references/components/image-preview.md) | Image preview components |
---
## 中文文档
### 指南 (Guides)
| 文档 | 说明 |
|------|------|
| [开发环境设置](./zh/guides/development.md) | 开发环境配置 |
| [贡献指南](./zh/guides/contributing.md) | 如何贡献代码 |
| [分支策略](./zh/guides/branching-strategy.md) | Git 分支工作流 |
| [测试计划](./zh/guides/test-plan.md) | 测试计划和发布通道 |
| [国际化指南](./zh/guides/i18n.md) | 国际化开发指南 |
| [日志使用指南](./zh/guides/logging.md) | 如何使用日志服务 |
| [中间件开发](./zh/guides/middleware.md) | 如何编写中间件 |
| [记忆功能](./zh/guides/memory.md) | 记忆功能使用指南 |
| [赞助信息](./zh/guides/sponsor.md) | 赞助相关信息 |
### 参考 (References)
| 文档 | 说明 |
|------|------|
| [消息系统](./zh/references/message-system.md) | 消息系统架构和 API |
| [数据库结构](./zh/references/database.md) | 数据库表结构 |
| [服务](./zh/references/services.md) | 服务层文档 (KnowledgeService) |
| [代码执行](./zh/references/code-execution.md) | 代码执行功能 |
| [应用升级配置](./zh/references/app-upgrade.md) | 应用升级配置 |
| [CodeBlockView 组件](./zh/references/components/code-block-view.md) | 代码块视图组件 |
| [图像预览组件](./zh/references/components/image-preview.md) | 图像预览组件 |
---
## Missing Translations / 缺少翻译
The following documents are only available in Chinese and need English translations:
以下文档仅有中文版本,需要英文翻译:
- `guides/contributing.md`
- `guides/memory.md`
- `guides/middleware.md`
- `guides/sponsor.md`
- `references/message-system.md`
- `references/database.md`
- `references/services.md`
- `references/code-execution.md`

View File

@@ -34,7 +34,7 @@
</a>
</h1>
<p align="center">
<a href="https://github.com/CherryHQ/cherry-studio">English</a> | 中文 | <a href="https://cherry-ai.com">官方网站</a> | <a href="https://docs.cherry-ai.com/cherry-studio-wen-dang/zh-cn">文档</a> | <a href="./guides/development.md">开发</a> | <a href="https://github.com/CherryHQ/cherry-studio/issues">反馈</a><br>
<a href="https://github.com/CherryHQ/cherry-studio">English</a> | 中文 | <a href="https://cherry-ai.com">官方网站</a> | <a href="https://docs.cherry-ai.com/cherry-studio-wen-dang/zh-cn">文档</a> | <a href="./dev.md">开发</a> | <a href="https://github.com/CherryHQ/cherry-studio/issues">反馈</a><br>
</p>
<!-- 题头徽章组合 -->
@@ -70,7 +70,7 @@ Cherry Studio 是一款支持多个大语言模型LLM服务商的桌面客
👏 欢迎加入 [Telegram 群组](https://t.me/CherryStudioAI)[Discord](https://discord.gg/wez8HtpxqQ) | [QQ群(575014769)](https://qm.qq.com/q/lo0D4qVZKi)
❤️ 喜欢 Cherry Studio? 点亮小星星 🌟 或 [赞助开发者](./guides/sponsor.md)! ❤️
❤️ 喜欢 Cherry Studio? 点亮小星星 🌟 或 [赞助开发者](sponsor.md)! ❤️
# 📖 使用教程
@@ -181,7 +181,7 @@ https://docs.cherry-ai.com
6. **社区参与**:加入讨论并帮助用户
7. **推广使用**:宣传 Cherry Studio
参考[分支策略](./guides/branching-strategy.md)了解贡献指南
参考[分支策略](branching-strategy-zh.md)了解贡献指南
## 入门
@@ -190,7 +190,7 @@ https://docs.cherry-ai.com
3. **提交更改**:提交并推送您的更改
4. **打开 Pull Request**:描述您的更改和原因
有关更详细的指南,请参阅我们的 [贡献指南](./guides/contributing.md)
有关更详细的指南,请参阅我们的 [贡献指南](CONTRIBUTING.zh.md)
感谢您的支持和贡献!

View File

@@ -16,7 +16,7 @@ Cherry Studio implements a structured branching strategy to maintain code qualit
- Only accepts documentation updates and bug fixes
- Thoroughly tested before production deployment
For details about the `testplan` branch used in the Test Plan, please refer to the [Test Plan](./test-plan.md).
For details about the `testplan` branch used in the Test Plan, please refer to the [Test Plan](testplan-en.md).
## Contributing Branches

View File

@@ -16,7 +16,7 @@ Cherry Studio 采用结构化的分支策略来维护代码质量并简化开发
- 只接受文档更新和 bug 修复
- 经过完整测试后可以发布到生产环境
关于测试计划所使用的`testplan`分支,请查阅[测试计划](./test-plan.md)。
关于测试计划所使用的`testplan`分支,请查阅[测试计划](testplan-zh.md)。
## 贡献分支

View File

Before

Width:  |  Height:  |  Size: 150 KiB

After

Width:  |  Height:  |  Size: 150 KiB

View File

Before

Width:  |  Height:  |  Size: 40 KiB

After

Width:  |  Height:  |  Size: 40 KiB

View File

Before

Width:  |  Height:  |  Size: 35 KiB

After

Width:  |  Height:  |  Size: 35 KiB

View File

@@ -85,7 +85,7 @@ Main responsibilities:
- **SvgPreview**: SVG image preview
- **GraphvizPreview**: Graphviz diagram preview
All special view components share a common architecture for consistent user experience and functionality. For detailed information about these components and their implementation, see [Image Preview Components Documentation](./image-preview.md).
All special view components share a common architecture for consistent user experience and functionality. For detailed information about these components and their implementation, see [Image Preview Components Documentation](./ImagePreview-en.md).
#### StatusBar

View File

@@ -85,7 +85,7 @@ graph TD
- **SvgPreview**: SVG 图像预览
- **GraphvizPreview**: Graphviz 图表预览
所有特殊视图组件共享通用架构,以确保一致的用户体验和功能。有关这些组件及其实现的详细信息,请参阅[图像预览组件文档](./image-preview.md)。
所有特殊视图组件共享通用架构,以确保一致的用户体验和功能。有关这些组件及其实现的详细信息,请参阅 [图像预览组件文档](./ImagePreview-zh.md)。
#### StatusBar 状态栏

View File

@@ -192,4 +192,4 @@ Image Preview Components integrate seamlessly with CodeBlockView:
- Shared state management
- Responsive layout adaptation
For more information about the overall CodeBlockView architecture, see [CodeBlockView Documentation](./code-block-view.md).
For more information about the overall CodeBlockView architecture, see [CodeBlockView Documentation](./CodeBlockView-en.md).

View File

@@ -192,4 +192,4 @@ const { containerRef, error, isLoading, triggerRender, cancelRender, clearError,
- 共享状态管理
- 响应式布局适应
有关整体 CodeBlockView 架构的更多信息,请参阅 [CodeBlockView 文档](./code-block-view.md)。
有关整体 CodeBlockView 架构的更多信息,请参阅 [CodeBlockView 文档](./CodeBlockView-zh.md)。

View File

@@ -0,0 +1,3 @@
# 消息的生命周期
![image](./message-lifecycle.png)

View File

@@ -0,0 +1,11 @@
# 数据库设置字段
此文档包含部分字段的数据类型说明。
## 字段
| 字段名 | 类型 | 说明 |
| ------------------------------ | ------------------------------ | ------------ |
| `translate:target:language` | `LanguageCode` | 翻译目标语言 |
| `translate:source:language` | `LanguageCode` | 翻译源语言 |
| `translate:bidirectional:pair` | `[LanguageCode, LanguageCode]` | 双向翻译对 |

View File

@@ -1,24 +1,6 @@
# 数据库参考文档
# `translate_languages` 表技术文档
本文档介绍 Cherry Studio 的数据库结构,包括设置字段和翻译语言表。
---
## 设置字段 (settings)
此部分包含设置相关字段的数据类型说明。
### 翻译相关字段
| 字段名 | 类型 | 说明 |
| ------------------------------ | ------------------------------ | ------------ |
| `translate:target:language` | `LanguageCode` | 翻译目标语言 |
| `translate:source:language` | `LanguageCode` | 翻译源语言 |
| `translate:bidirectional:pair` | `[LanguageCode, LanguageCode]` | 双向翻译对 |
---
## 翻译语言表 (translate_languages)
## 📄 概述
`translate_languages` 记录用户自定义的的语言类型(`Language`)。

View File

@@ -18,11 +18,11 @@ The plugin has already been configured in the project — simply install it to g
### Demo
![demo-1](../../assets/images/i18n/demo-1.png)
![demo-1](./.assets.how-to-i18n/demo-1.png)
![demo-2](../../assets/images/i18n/demo-2.png)
![demo-2](./.assets.how-to-i18n/demo-2.png)
![demo-3](../../assets/images/i18n/demo-3.png)
![demo-3](./.assets.how-to-i18n/demo-3.png)
## i18n Conventions

View File

@@ -15,11 +15,11 @@ i18n ally是一个强大的VSCode插件它能在开发阶段提供实时反
### 效果展示
![demo-1](../../assets/images/i18n/demo-1.png)
![demo-1](./.assets.how-to-i18n/demo-1.png)
![demo-2](../../assets/images/i18n/demo-2.png)
![demo-2](./.assets.how-to-i18n/demo-2.png)
![demo-3](../../assets/images/i18n/demo-3.png)
![demo-3](./.assets.how-to-i18n/demo-3.png)
## i18n 约定

View File

@@ -0,0 +1,127 @@
# messageBlock.ts 使用指南
该文件定义了用于管理应用程序中所有 `MessageBlock` 实体的 Redux Slice。它使用 Redux Toolkit 的 `createSlice``createEntityAdapter` 来高效地处理规范化的状态,并提供了一系列 actions 和 selectors 用于与消息块数据交互。
## 核心目标
- **状态管理**: 集中管理所有 `MessageBlock` 的状态。`MessageBlock` 代表消息中的不同内容单元(如文本、代码、图片、引用等)。
- **规范化**: 使用 `createEntityAdapter``MessageBlock` 数据存储在规范化的结构中(`{ ids: [], entities: {} }`),这有助于提高性能和简化更新逻辑。
- **可预测性**: 提供明确的 actions 来修改状态,并通过 selectors 安全地访问状态。
## 关键概念
- **Slice (`createSlice`)**: Redux Toolkit 的核心 API用于创建包含 reducer 逻辑、action creators 和初始状态的 Redux 模块。
- **Entity Adapter (`createEntityAdapter`)**: Redux Toolkit 提供的工具,用于简化对规范化数据的 CRUD创建、读取、更新、删除操作。它会自动生成 reducer 函数和 selectors。
- **Selectors**: 用于从 Redux store 中派生和计算数据的函数。Selectors 可以被记忆化memoized以提高性能。
## State 结构
`messageBlocks` slice 的状态结构由 `createEntityAdapter` 定义,大致如下:
```typescript
{
ids: string[]; // 存储所有 MessageBlock ID 的有序列表
entities: { [id: string]: MessageBlock }; // 按 ID 存储 MessageBlock 对象的字典
loadingState: 'idle' | 'loading' | 'succeeded' | 'failed'; // (可选) 其他状态,如加载状态
error: string | null; // (可选) 错误信息
}
```
## Actions
该 slice 导出以下 actions (由 `createSlice``createEntityAdapter` 自动生成或自定义)
- **`upsertOneBlock(payload: MessageBlock)`**:
- 添加一个新的 `MessageBlock` 或更新一个已存在的 `MessageBlock`。如果 payload 中的 `id` 已存在,则执行更新;否则执行插入。
- **`upsertManyBlocks(payload: MessageBlock[])`**:
- 添加或更新多个 `MessageBlock`。常用于批量加载数据(例如,加载一个 Topic 的所有消息块)。
- **`removeOneBlock(payload: string)`**:
- 根据提供的 `id` (payload) 移除单个 `MessageBlock`
- **`removeManyBlocks(payload: string[])`**:
- 根据提供的 `id` 数组 (payload) 移除多个 `MessageBlock`。常用于删除消息或清空 Topic 时清理相关的块。
- **`removeAllBlocks()`**:
- 移除 state 中的所有 `MessageBlock` 实体。
- **`updateOneBlock(payload: { id: string; changes: Partial<MessageBlock> })`**:
- 更新一个已存在的 `MessageBlock``payload` 需要包含块的 `id` 和一个包含要更改的字段的 `changes` 对象。
- **`setMessageBlocksLoading(payload: 'idle' | 'loading')`**:
- (自定义) 设置 `loadingState` 属性。
- **`setMessageBlocksError(payload: string)`**:
- (自定义) 设置 `loadingState``'failed'` 并记录错误信息。
**使用示例 (在 Thunk 或其他 Dispatch 的地方):**
```typescript
import { upsertOneBlock, removeManyBlocks, updateOneBlock } from './messageBlock'
import store from './store' // 假设这是你的 Redux store 实例
// 添加或更新一个块
const newBlock: MessageBlock = {
/* ... block data ... */
}
store.dispatch(upsertOneBlock(newBlock))
// 更新一个块的内容
store.dispatch(updateOneBlock({ id: blockId, changes: { content: 'New content' } }))
// 删除多个块
const blockIdsToRemove = ['id1', 'id2']
store.dispatch(removeManyBlocks(blockIdsToRemove))
```
## Selectors
该 slice 导出由 `createEntityAdapter` 生成的基础 selectors并通过 `messageBlocksSelectors` 对象访问:
- **`messageBlocksSelectors.selectIds(state: RootState): string[]`**: 返回包含所有块 ID 的数组。
- **`messageBlocksSelectors.selectEntities(state: RootState): { [id: string]: MessageBlock }`**: 返回块 ID 到块对象的映射字典。
- **`messageBlocksSelectors.selectAll(state: RootState): MessageBlock[]`**: 返回包含所有块对象的数组。
- **`messageBlocksSelectors.selectTotal(state: RootState): number`**: 返回块的总数。
- **`messageBlocksSelectors.selectById(state: RootState, id: string): MessageBlock | undefined`**: 根据 ID 返回单个块对象,如果找不到则返回 `undefined`
**此外,还提供了一个自定义的、记忆化的 selector**
- **`selectFormattedCitationsByBlockId(state: RootState, blockId: string | undefined): Citation[]`**:
- 接收一个 `blockId`
- 如果该 ID 对应的块是 `CITATION` 类型,则提取并格式化其包含的引用信息(来自网页搜索、知识库等),进行去重和重新编号,最后返回一个 `Citation[]` 数组,用于在 UI 中显示。
- 如果块不存在或类型不匹配,返回空数组 `[]`
- 这个 selector 封装了处理不同引用来源Gemini, OpenAI, OpenRouter, Zhipu 等)的复杂逻辑。
**使用示例 (在 React 组件或 `useSelector` 中):**
```typescript
import { useSelector } from 'react-redux'
import { messageBlocksSelectors, selectFormattedCitationsByBlockId } from './messageBlock'
import type { RootState } from './store'
// 获取所有块
const allBlocks = useSelector(messageBlocksSelectors.selectAll)
// 获取特定 ID 的块
const specificBlock = useSelector((state: RootState) => messageBlocksSelectors.selectById(state, someBlockId))
// 获取特定引用块格式化后的引用列表
const formattedCitations = useSelector((state: RootState) => selectFormattedCitationsByBlockId(state, citationBlockId))
// 在组件中使用引用数据
// {formattedCitations.map(citation => ...)}
```
## 集成
`messageBlock.ts` slice 通常与 `messageThunk.ts` 中的 Thunks 紧密协作。Thunks 负责处理异步逻辑(如 API 调用、数据库操作),并在需要时 dispatch `messageBlock` slice 的 actions 来更新状态。例如,当 `messageThunk` 接收到流式响应时,它会 dispatch `upsertOneBlock``updateOneBlock` 来实时更新对应的 `MessageBlock`。同样,删除消息的 Thunk 会 dispatch `removeManyBlocks`
理解 `messageBlock.ts` 的职责是管理**状态本身**,而 `messageThunk.ts` 负责**触发状态变更**的异步流程,这对于维护清晰的应用架构至关重要。

View File

@@ -0,0 +1,105 @@
# messageThunk.ts 使用指南
该文件包含用于管理应用程序中消息流、处理助手交互以及同步 Redux 状态与 IndexedDB 数据库的核心 Thunk Action Creators。主要围绕 `Message``MessageBlock` 对象进行操作。
## 核心功能
1. **发送/接收消息**: 处理用户消息的发送,触发助手响应,并流式处理返回的数据,将其解析为不同的 `MessageBlock`
2. **状态管理**: 确保 Redux store 中的消息和消息块状态与 IndexedDB 中的持久化数据保持一致。
3. **消息操作**: 提供删除、重发、重新生成、编辑后重发、追加响应、克隆等消息生命周期管理功能。
4. **Block 处理**: 动态创建、更新和保存各种类型的 `MessageBlock`(文本、思考过程、工具调用、引用、图片、错误、翻译等)。
## 主要 Thunks
以下是一些关键的 Thunk 函数及其用途:
1. **`sendMessage(userMessage, userMessageBlocks, assistant, topicId)`**
- **用途**: 发送一条新的用户消息。
- **流程**:
- 保存用户消息 (`userMessage`) 及其块 (`userMessageBlocks`) 到 Redux 和 DB。
- 检查 `@mentions` 以确定是单模型响应还是多模型响应。
- 创建助手消息(们)的存根 (Stub)。
- 将存根添加到 Redux 和 DB。
- 将核心处理逻辑 `fetchAndProcessAssistantResponseImpl` 添加到该 `topicId` 的队列中以获取实际响应。
- **Block 相关**: 主要处理用户消息的初始 `MessageBlock` 保存。
2. **`fetchAndProcessAssistantResponseImpl(dispatch, getState, topicId, assistant, assistantMessage)`**
- **用途**: (内部函数) 获取并处理单个助手响应的核心逻辑,被 `sendMessage`, `resend...`, `regenerate...`, `append...` 等调用。
- **流程**:
- 设置 Topic 加载状态。
- 准备上下文消息。
- 调用 `fetchChatCompletion` API 服务。
- 使用 `createStreamProcessor` 处理流式响应。
- 通过各种回调 (`onTextChunk`, `onThinkingChunk`, `onToolCallComplete`, `onImageGenerated`, `onError`, `onComplete` 等) 处理不同类型的事件。
- **Block 相关**:
- 根据流事件创建初始 `UNKNOWN` 块。
- 实时创建和更新 `MAIN_TEXT``THINKING` 块,使用 `throttledBlockUpdate``throttledBlockDbUpdate` 进行节流更新。
- 创建 `TOOL`, `CITATION`, `IMAGE`, `ERROR` 等类型的块。
- 在事件完成时(如 `onTextComplete`, `onToolCallComplete`)将块状态标记为 `SUCCESS``ERROR`,并使用 `saveUpdatedBlockToDB` 保存最终状态。
- 使用 `handleBlockTransition` 管理非流式块(如 `TOOL`, `CITATION`)的添加和状态更新。
3. **`loadTopicMessagesThunk(topicId, forceReload)`**
- **用途**: 从数据库加载指定主题的所有消息及其关联的 `MessageBlock`
- **流程**:
- 从 DB 获取 `Topic` 及其 `messages` 列表。
- 根据消息 ID 列表从 DB 获取所有相关的 `MessageBlock`
- 使用 `upsertManyBlocks` 将块更新到 Redux。
- 将消息更新到 Redux。
- **Block 相关**: 负责将持久化的 `MessageBlock` 加载到 Redux 状态。
4. **删除 Thunks**
- `deleteSingleMessageThunk(topicId, messageId)`: 删除单个消息及其所有 `MessageBlock`
- `deleteMessageGroupThunk(topicId, askId)`: 删除一个用户消息及其所有相关的助手响应消息和它们的所有 `MessageBlock`
- `clearTopicMessagesThunk(topicId)`: 清空主题下的所有消息及其所有 `MessageBlock`
- **Block 相关**: 从 Redux 和 DB 中移除指定的 `MessageBlock`
5. **重发/重新生成 Thunks**
- `resendMessageThunk(topicId, userMessageToResend, assistant)`: 重发用户消息。会重置(清空 Block 并标记为 PENDING所有与该用户消息关联的助手响应然后重新请求生成。
- `resendUserMessageWithEditThunk(topicId, originalMessage, mainTextBlockId, editedContent, assistant)`: 用户编辑消息内容后重发。先更新用户消息的 `MAIN_TEXT` 块内容,然后调用 `resendMessageThunk`
- `regenerateAssistantResponseThunk(topicId, assistantMessageToRegenerate, assistant)`: 重新生成单个助手响应。重置该助手消息(清空 Block 并标记为 PENDING然后重新请求生成。
- **Block 相关**: 删除旧的 `MessageBlock`,并在重新生成过程中创建新的 `MessageBlock`
6. **`appendAssistantResponseThunk(topicId, existingAssistantMessageId, newModel, assistant)`**
- **用途**: 在已有的对话上下文中,针对同一个用户问题,使用新选择的模型追加一个新的助手响应。
- **流程**:
- 找到现有助手消息以获取原始 `askId`
- 创建使用 `newModel` 的新助手消息存根(使用相同的 `askId`)。
- 添加新存根到 Redux 和 DB。
-`fetchAndProcessAssistantResponseImpl` 添加到队列以生成新响应。
- **Block 相关**: 为新的助手响应创建全新的 `MessageBlock`
7. **`cloneMessagesToNewTopicThunk(sourceTopicId, branchPointIndex, newTopic)`**
- **用途**: 将源主题的部分消息(及其 Block克隆到一个**已存在**的新主题中。
- **流程**:
- 复制指定索引前的消息。
- 为所有克隆的消息和 Block 生成新的 UUID。
- 正确映射克隆消息之间的 `askId` 关系。
- 复制 `MessageBlock` 内容,更新其 `messageId` 指向新的消息 ID。
- 更新文件引用计数(如果 Block 是文件或图片)。
- 将克隆的消息和 Block 保存到新主题的 Redux 状态和 DB 中。
- **Block 相关**: 创建 `MessageBlock` 的副本,并更新其 ID 和 `messageId`
8. **`initiateTranslationThunk(messageId, topicId, targetLanguage, sourceBlockId?, sourceLanguage?)`**
- **用途**: 为指定消息启动翻译流程,创建一个初始的 `TRANSLATION` 类型的 `MessageBlock`
- **流程**:
- 创建一个状态为 `STREAMING``TranslationMessageBlock`
- 将其添加到 Redux 和 DB。
- 更新原消息的 `blocks` 列表以包含新的翻译块 ID。
- **Block 相关**: 创建并保存一个占位的 `TranslationMessageBlock`。实际翻译内容的获取和填充需要后续步骤。
## 内部机制和注意事项
- **数据库交互**: 通过 `saveMessageAndBlocksToDB`, `updateExistingMessageAndBlocksInDB`, `saveUpdatesToDB`, `saveUpdatedBlockToDB`, `throttledBlockDbUpdate` 等辅助函数与 IndexedDB (`db`) 交互,确保数据持久化。
- **状态同步**: Thunks 负责协调 Redux Store 和 IndexedDB 之间的数据一致性。
- **队列 (`getTopicQueue`)**: 使用 `AsyncQueue` 确保对同一主题的操作(尤其是 API 请求)按顺序执行,避免竞态条件。
- **节流 (`throttle`)**: 对流式响应中频繁的 Block 更新(文本、思考)使用 `lodash.throttle` 优化性能,减少 Redux dispatch 和 DB 写入次数。
- **错误处理**: `fetchAndProcessAssistantResponseImpl` 内的回调函数(特别是 `onError`)处理流处理和 API 调用中可能出现的错误,并创建 `ERROR` 类型的 `MessageBlock`
开发者在使用这些 Thunks 时,通常需要提供 `dispatch`, `getState` (由 Redux Thunk 中间件注入),以及如 `topicId`, `assistant` 配置对象, 相关的 `Message``MessageBlock` 对象/ID 等参数。理解每个 Thunk 的职责和它如何影响消息及块的状态至关重要。

View File

@@ -0,0 +1,156 @@
# useMessageOperations.ts 使用指南
该文件定义了一个名为 `useMessageOperations` 的自定义 React Hook。这个 Hook 的主要目的是为 React 组件提供一个便捷的接口用于执行与特定主题Topic相关的各种消息操作。它封装了调用 Redux Thunks (`messageThunk.ts`) 和 Actions (`newMessage.ts`, `messageBlock.ts`) 的逻辑,简化了组件与消息数据交互的代码。
## 核心目标
- **封装**: 将复杂的消息操作逻辑(如删除、重发、重新生成、编辑、翻译等)封装在易于使用的函数中。
- **简化**: 让组件可以直接调用这些操作函数,而无需直接与 Redux `dispatch` 或 Thunks 交互。
- **上下文关联**: 所有操作都与传入的 `topic` 对象相关联,确保操作作用于正确的主题。
## 如何使用
在你的 React 函数组件中,导入并调用 `useMessageOperations` Hook并传入当前活动的 `Topic` 对象。
```typescript
import React from 'react';
import { useMessageOperations } from '@renderer/hooks/useMessageOperations';
import type { Topic, Message, Assistant, Model } from '@renderer/types';
interface MyComponentProps {
currentTopic: Topic;
currentAssistant: Assistant;
}
function MyComponent({ currentTopic, currentAssistant }: MyComponentProps) {
const {
deleteMessage,
resendMessage,
regenerateAssistantMessage,
appendAssistantResponse,
getTranslationUpdater,
createTopicBranch,
// ... 其他操作函数
} = useMessageOperations(currentTopic);
const handleDelete = (messageId: string) => {
deleteMessage(messageId);
};
const handleResend = (message: Message) => {
resendMessage(message, currentAssistant);
};
const handleAppend = (existingMsg: Message, newModel: Model) => {
appendAssistantResponse(existingMsg, newModel, currentAssistant);
}
// ... 在组件中使用其他操作函数
return (
<div>
{/* Component UI */}
<button onClick={() => handleDelete('some-message-id')}>Delete Message</button>
{/* ... */}
</div>
);
}
```
## 返回值
`useMessageOperations(topic)` Hook 返回一个包含以下函数和值的对象:
- **`deleteMessage(id: string)`**:
- 删除指定 `id` 的单个消息。
- 内部调用 `deleteSingleMessageThunk`
- **`deleteGroupMessages(askId: string)`**:
- 删除与指定 `askId` 相关联的一组消息(通常是用户提问及其所有助手回答)。
- 内部调用 `deleteMessageGroupThunk`
- **`editMessage(messageId: string, updates: Partial<Message>)`**:
- 更新指定 `messageId` 的消息的部分属性。
- **注意**: 目前主要用于更新 Redux 状态
- 内部调用 `newMessagesActions.updateMessage`
- **`resendMessage(message: Message, assistant: Assistant)`**:
- 重新发送指定的用户消息 (`message`),这将触发其所有关联助手响应的重新生成。
- 内部调用 `resendMessageThunk`
- **`resendUserMessageWithEdit(message: Message, editedContent: string, assistant: Assistant)`**:
- 在用户消息的主要文本块被编辑后,重新发送该消息。
- 会先查找消息的 `MAIN_TEXT` 块 ID然后调用 `resendUserMessageWithEditThunk`
- **`clearTopicMessages(_topicId?: string)`**:
- 清除当前主题(或可选的指定 `_topicId`)下的所有消息。
- 内部调用 `clearTopicMessagesThunk`
- **`createNewContext()`**:
- 发出一个全局事件 (`EVENT_NAMES.NEW_CONTEXT`),通常用于通知 UI 清空显示,准备新的上下文。不直接修改 Redux 状态。
- **`displayCount`**:
- (非操作函数) 从 Redux store 中获取当前的 `displayCount` 值。
- **`pauseMessages()`**:
- 尝试中止当前主题中正在进行的消息生成(状态为 `processing``pending`)。
- 通过查找相关的 `askId` 并调用 `abortCompletion` 来实现。
- 同时会 dispatch `setTopicLoading` action 将加载状态设为 `false`
- **`resumeMessage(message: Message, assistant: Assistant)`**:
- 恢复/重新发送一个用户消息。目前实现为直接调用 `resendMessage`
- **`regenerateAssistantMessage(message: Message, assistant: Assistant)`**:
- 重新生成指定的**助手**消息 (`message`) 的响应。
- 内部调用 `regenerateAssistantResponseThunk`
- **`appendAssistantResponse(existingAssistantMessage: Message, newModel: Model, assistant: Assistant)`**:
- 针对 `existingAssistantMessage` 所回复的**同一用户提问**,使用 `newModel` 追加一个新的助手响应。
- 内部调用 `appendAssistantResponseThunk`
- **`getTranslationUpdater(messageId: string, targetLanguage: string, sourceBlockId?: string, sourceLanguage?: string)`**:
- **用途**: 获取一个用于逐步更新翻译块内容的函数。
- **流程**:
1. 内部调用 `initiateTranslationThunk` 来创建或获取一个 `TRANSLATION` 类型的 `MessageBlock`,并获取其 `blockId`
2. 返回一个**异步更新函数**。
- **返回的更新函数 `(accumulatedText: string, isComplete?: boolean) => void`**:
- 接收累积的翻译文本和完成状态。
- 调用 `updateOneBlock` 更新 Redux 中的翻译块内容和状态 (`STREAMING``SUCCESS`)。
- 调用 `throttledBlockDbUpdate` 将更新(节流地)保存到数据库。
- 如果初始化失败Thunk 返回 `undefined`),则此函数返回 `null`
- **`createTopicBranch(sourceTopicId: string, branchPointIndex: number, newTopic: Topic)`**:
- 创建一个主题分支,将 `sourceTopicId` 主题中 `branchPointIndex` 索引之前的消息克隆到 `newTopic` 中。
- **注意**: `newTopic` 对象必须是调用此函数**之前**已经创建并添加到 Redux 和数据库中的。
- 内部调用 `cloneMessagesToNewTopicThunk`
## 依赖
- **`topic: Topic`**: 必须传入当前操作上下文的主题对象。Hook 返回的操作函数将始终作用于这个主题的 `topic.id`
- **Redux `dispatch`**: Hook 内部使用 `useAppDispatch` 获取 `dispatch` 函数来调用 actions 和 thunks。
## 相关 Hooks
在同一文件中还定义了两个辅助 Hook
- **`useTopicMessages(topic: Topic)`**:
- 使用 `selectMessagesForTopic` selector 来获取并返回指定主题的消息列表。
- **`useTopicLoading(topic: Topic)`**:
- 使用 `selectNewTopicLoading` selector 来获取并返回指定主题的加载状态。
这些 Hook 可以与 `useMessageOperations` 结合使用,方便地在组件中获取消息数据、加载状态,并执行相关操作。

View File

Before

Width:  |  Height:  |  Size: 563 KiB

After

Width:  |  Height:  |  Size: 563 KiB

View File

@@ -19,7 +19,7 @@ Users are welcome to submit issues or provide feedback through other channels fo
### Participating in the Test Plan
Developers should submit `PRs` according to the [Contributor Guide](../../CONTRIBUTING.md) (and ensure the target branch is `main`). The repository maintainers will evaluate whether the `PR` should be included in the Test Plan based on factors such as the impact of the feature on the application, its importance, and whether broader testing is needed.
Developers should submit `PRs` according to the [Contributor Guide](../CONTRIBUTING.md) (and ensure the target branch is `main`). The repository maintainers will evaluate whether the `PR` should be included in the Test Plan based on factors such as the impact of the feature on the application, its importance, and whether broader testing is needed.
If the `PR` is added to the Test Plan, the repository maintainers will:

View File

@@ -19,7 +19,7 @@
### 参与测试计划
开发者按照[贡献者指南](./contributing.md)要求正常提交`PR`并注意提交target为`main`)。仓库维护者会综合考虑(例如该功能对应用的影响程度,功能的重要性,是否需要更广泛的测试等),决定该`PR`是否应加入测试计划。
开发者按照[贡献者指南](CONTRIBUTING.zh.md)要求正常提交`PR`并注意提交target为`main`)。仓库维护者会综合考虑(例如该功能对应用的影响程度,功能的重要性,是否需要更广泛的测试等),决定该`PR`是否应加入测试计划。
若该`PR`加入测试计划,仓库维护者会做如下操作:

View File

@@ -1,73 +0,0 @@
# 🖥️ Develop
## IDE Setup
- Editor: [Cursor](https://www.cursor.com/), etc. Any VS Code compatible editor.
- Linter: [ESLint](https://marketplace.visualstudio.com/items?itemName=dbaeumer.vscode-eslint)
- Formatter: [Biome](https://marketplace.visualstudio.com/items?itemName=biomejs.biome)
## Project Setup
### Install
```bash
yarn
```
### Development
### Setup Node.js
Download and install [Node.js v22.x.x](https://nodejs.org/en/download)
### Setup Yarn
```bash
corepack enable
corepack prepare yarn@4.9.1 --activate
```
### Install Dependencies
```bash
yarn install
```
### ENV
```bash
copy .env.example .env
```
### Start
```bash
yarn dev
```
### Debug
```bash
yarn debug
```
Then input chrome://inspect in browser
### Test
```bash
yarn test
```
### Build
```bash
# For windows
$ yarn build:win
# For macOS
$ yarn build:mac
# For Linux
$ yarn build:linux
```

View File

@@ -1,404 +0,0 @@
# 消息系统
本文档介绍 Cherry Studio 的消息系统架构,包括消息生命周期、状态管理和操作接口。
## 消息的生命周期
![消息生命周期](../../assets/images/message-lifecycle.png)
---
# messageBlock.ts 使用指南
该文件定义了用于管理应用程序中所有 `MessageBlock` 实体的 Redux Slice。它使用 Redux Toolkit 的 `createSlice``createEntityAdapter` 来高效地处理规范化的状态,并提供了一系列 actions 和 selectors 用于与消息块数据交互。
## 核心目标
- **状态管理**: 集中管理所有 `MessageBlock` 的状态。`MessageBlock` 代表消息中的不同内容单元(如文本、代码、图片、引用等)。
- **规范化**: 使用 `createEntityAdapter``MessageBlock` 数据存储在规范化的结构中(`{ ids: [], entities: {} }`),这有助于提高性能和简化更新逻辑。
- **可预测性**: 提供明确的 actions 来修改状态,并通过 selectors 安全地访问状态。
## 关键概念
- **Slice (`createSlice`)**: Redux Toolkit 的核心 API用于创建包含 reducer 逻辑、action creators 和初始状态的 Redux 模块。
- **Entity Adapter (`createEntityAdapter`)**: Redux Toolkit 提供的工具,用于简化对规范化数据的 CRUD创建、读取、更新、删除操作。它会自动生成 reducer 函数和 selectors。
- **Selectors**: 用于从 Redux store 中派生和计算数据的函数。Selectors 可以被记忆化memoized以提高性能。
## State 结构
`messageBlocks` slice 的状态结构由 `createEntityAdapter` 定义,大致如下:
```typescript
{
ids: string[]; // 存储所有 MessageBlock ID 的有序列表
entities: { [id: string]: MessageBlock }; // 按 ID 存储 MessageBlock 对象的字典
loadingState: 'idle' | 'loading' | 'succeeded' | 'failed'; // (可选) 其他状态,如加载状态
error: string | null; // (可选) 错误信息
}
```
## Actions
该 slice 导出以下 actions (由 `createSlice``createEntityAdapter` 自动生成或自定义)
- **`upsertOneBlock(payload: MessageBlock)`**:
- 添加一个新的 `MessageBlock` 或更新一个已存在的 `MessageBlock`。如果 payload 中的 `id` 已存在,则执行更新;否则执行插入。
- **`upsertManyBlocks(payload: MessageBlock[])`**:
- 添加或更新多个 `MessageBlock`。常用于批量加载数据(例如,加载一个 Topic 的所有消息块)。
- **`removeOneBlock(payload: string)`**:
- 根据提供的 `id` (payload) 移除单个 `MessageBlock`
- **`removeManyBlocks(payload: string[])`**:
- 根据提供的 `id` 数组 (payload) 移除多个 `MessageBlock`。常用于删除消息或清空 Topic 时清理相关的块。
- **`removeAllBlocks()`**:
- 移除 state 中的所有 `MessageBlock` 实体。
- **`updateOneBlock(payload: { id: string; changes: Partial<MessageBlock> })`**:
- 更新一个已存在的 `MessageBlock``payload` 需要包含块的 `id` 和一个包含要更改的字段的 `changes` 对象。
- **`setMessageBlocksLoading(payload: 'idle' | 'loading')`**:
- (自定义) 设置 `loadingState` 属性。
- **`setMessageBlocksError(payload: string)`**:
- (自定义) 设置 `loadingState``'failed'` 并记录错误信息。
**使用示例 (在 Thunk 或其他 Dispatch 的地方):**
```typescript
import { upsertOneBlock, removeManyBlocks, updateOneBlock } from './messageBlock'
import store from './store' // 假设这是你的 Redux store 实例
// 添加或更新一个块
const newBlock: MessageBlock = {
/* ... block data ... */
}
store.dispatch(upsertOneBlock(newBlock))
// 更新一个块的内容
store.dispatch(updateOneBlock({ id: blockId, changes: { content: 'New content' } }))
// 删除多个块
const blockIdsToRemove = ['id1', 'id2']
store.dispatch(removeManyBlocks(blockIdsToRemove))
```
## Selectors
该 slice 导出由 `createEntityAdapter` 生成的基础 selectors并通过 `messageBlocksSelectors` 对象访问:
- **`messageBlocksSelectors.selectIds(state: RootState): string[]`**: 返回包含所有块 ID 的数组。
- **`messageBlocksSelectors.selectEntities(state: RootState): { [id: string]: MessageBlock }`**: 返回块 ID 到块对象的映射字典。
- **`messageBlocksSelectors.selectAll(state: RootState): MessageBlock[]`**: 返回包含所有块对象的数组。
- **`messageBlocksSelectors.selectTotal(state: RootState): number`**: 返回块的总数。
- **`messageBlocksSelectors.selectById(state: RootState, id: string): MessageBlock | undefined`**: 根据 ID 返回单个块对象,如果找不到则返回 `undefined`
**此外,还提供了一个自定义的、记忆化的 selector**
- **`selectFormattedCitationsByBlockId(state: RootState, blockId: string | undefined): Citation[]`**:
- 接收一个 `blockId`
- 如果该 ID 对应的块是 `CITATION` 类型,则提取并格式化其包含的引用信息(来自网页搜索、知识库等),进行去重和重新编号,最后返回一个 `Citation[]` 数组,用于在 UI 中显示。
- 如果块不存在或类型不匹配,返回空数组 `[]`
- 这个 selector 封装了处理不同引用来源Gemini, OpenAI, OpenRouter, Zhipu 等)的复杂逻辑。
**使用示例 (在 React 组件或 `useSelector` 中):**
```typescript
import { useSelector } from 'react-redux'
import { messageBlocksSelectors, selectFormattedCitationsByBlockId } from './messageBlock'
import type { RootState } from './store'
// 获取所有块
const allBlocks = useSelector(messageBlocksSelectors.selectAll)
// 获取特定 ID 的块
const specificBlock = useSelector((state: RootState) => messageBlocksSelectors.selectById(state, someBlockId))
// 获取特定引用块格式化后的引用列表
const formattedCitations = useSelector((state: RootState) => selectFormattedCitationsByBlockId(state, citationBlockId))
// 在组件中使用引用数据
// {formattedCitations.map(citation => ...)}
```
## 集成
`messageBlock.ts` slice 通常与 `messageThunk.ts` 中的 Thunks 紧密协作。Thunks 负责处理异步逻辑(如 API 调用、数据库操作),并在需要时 dispatch `messageBlock` slice 的 actions 来更新状态。例如,当 `messageThunk` 接收到流式响应时,它会 dispatch `upsertOneBlock``updateOneBlock` 来实时更新对应的 `MessageBlock`。同样,删除消息的 Thunk 会 dispatch `removeManyBlocks`
理解 `messageBlock.ts` 的职责是管理**状态本身**,而 `messageThunk.ts` 负责**触发状态变更**的异步流程,这对于维护清晰的应用架构至关重要。
---
# messageThunk.ts 使用指南
该文件包含用于管理应用程序中消息流、处理助手交互以及同步 Redux 状态与 IndexedDB 数据库的核心 Thunk Action Creators。主要围绕 `Message``MessageBlock` 对象进行操作。
## 核心功能
1. **发送/接收消息**: 处理用户消息的发送,触发助手响应,并流式处理返回的数据,将其解析为不同的 `MessageBlock`
2. **状态管理**: 确保 Redux store 中的消息和消息块状态与 IndexedDB 中的持久化数据保持一致。
3. **消息操作**: 提供删除、重发、重新生成、编辑后重发、追加响应、克隆等消息生命周期管理功能。
4. **Block 处理**: 动态创建、更新和保存各种类型的 `MessageBlock`(文本、思考过程、工具调用、引用、图片、错误、翻译等)。
## 主要 Thunks
以下是一些关键的 Thunk 函数及其用途:
1. **`sendMessage(userMessage, userMessageBlocks, assistant, topicId)`**
- **用途**: 发送一条新的用户消息。
- **流程**:
- 保存用户消息 (`userMessage`) 及其块 (`userMessageBlocks`) 到 Redux 和 DB。
- 检查 `@mentions` 以确定是单模型响应还是多模型响应。
- 创建助手消息(们)的存根 (Stub)。
- 将存根添加到 Redux 和 DB。
- 将核心处理逻辑 `fetchAndProcessAssistantResponseImpl` 添加到该 `topicId` 的队列中以获取实际响应。
- **Block 相关**: 主要处理用户消息的初始 `MessageBlock` 保存。
2. **`fetchAndProcessAssistantResponseImpl(dispatch, getState, topicId, assistant, assistantMessage)`**
- **用途**: (内部函数) 获取并处理单个助手响应的核心逻辑,被 `sendMessage`, `resend...`, `regenerate...`, `append...` 等调用。
- **流程**:
- 设置 Topic 加载状态。
- 准备上下文消息。
- 调用 `fetchChatCompletion` API 服务。
- 使用 `createStreamProcessor` 处理流式响应。
- 通过各种回调 (`onTextChunk`, `onThinkingChunk`, `onToolCallComplete`, `onImageGenerated`, `onError`, `onComplete` 等) 处理不同类型的事件。
- **Block 相关**:
- 根据流事件创建初始 `UNKNOWN` 块。
- 实时创建和更新 `MAIN_TEXT``THINKING` 块,使用 `throttledBlockUpdate``throttledBlockDbUpdate` 进行节流更新。
- 创建 `TOOL`, `CITATION`, `IMAGE`, `ERROR` 等类型的块。
- 在事件完成时(如 `onTextComplete`, `onToolCallComplete`)将块状态标记为 `SUCCESS``ERROR`,并使用 `saveUpdatedBlockToDB` 保存最终状态。
- 使用 `handleBlockTransition` 管理非流式块(如 `TOOL`, `CITATION`)的添加和状态更新。
3. **`loadTopicMessagesThunk(topicId, forceReload)`**
- **用途**: 从数据库加载指定主题的所有消息及其关联的 `MessageBlock`
- **流程**:
- 从 DB 获取 `Topic` 及其 `messages` 列表。
- 根据消息 ID 列表从 DB 获取所有相关的 `MessageBlock`
- 使用 `upsertManyBlocks` 将块更新到 Redux。
- 将消息更新到 Redux。
- **Block 相关**: 负责将持久化的 `MessageBlock` 加载到 Redux 状态。
4. **删除 Thunks**
- `deleteSingleMessageThunk(topicId, messageId)`: 删除单个消息及其所有 `MessageBlock`
- `deleteMessageGroupThunk(topicId, askId)`: 删除一个用户消息及其所有相关的助手响应消息和它们的所有 `MessageBlock`
- `clearTopicMessagesThunk(topicId)`: 清空主题下的所有消息及其所有 `MessageBlock`
- **Block 相关**: 从 Redux 和 DB 中移除指定的 `MessageBlock`
5. **重发/重新生成 Thunks**
- `resendMessageThunk(topicId, userMessageToResend, assistant)`: 重发用户消息。会重置(清空 Block 并标记为 PENDING所有与该用户消息关联的助手响应然后重新请求生成。
- `resendUserMessageWithEditThunk(topicId, originalMessage, mainTextBlockId, editedContent, assistant)`: 用户编辑消息内容后重发。先更新用户消息的 `MAIN_TEXT` 块内容,然后调用 `resendMessageThunk`
- `regenerateAssistantResponseThunk(topicId, assistantMessageToRegenerate, assistant)`: 重新生成单个助手响应。重置该助手消息(清空 Block 并标记为 PENDING然后重新请求生成。
- **Block 相关**: 删除旧的 `MessageBlock`,并在重新生成过程中创建新的 `MessageBlock`
6. **`appendAssistantResponseThunk(topicId, existingAssistantMessageId, newModel, assistant)`**
- **用途**: 在已有的对话上下文中,针对同一个用户问题,使用新选择的模型追加一个新的助手响应。
- **流程**:
- 找到现有助手消息以获取原始 `askId`
- 创建使用 `newModel` 的新助手消息存根(使用相同的 `askId`)。
- 添加新存根到 Redux 和 DB。
-`fetchAndProcessAssistantResponseImpl` 添加到队列以生成新响应。
- **Block 相关**: 为新的助手响应创建全新的 `MessageBlock`
7. **`cloneMessagesToNewTopicThunk(sourceTopicId, branchPointIndex, newTopic)`**
- **用途**: 将源主题的部分消息(及其 Block克隆到一个**已存在**的新主题中。
- **流程**:
- 复制指定索引前的消息。
- 为所有克隆的消息和 Block 生成新的 UUID。
- 正确映射克隆消息之间的 `askId` 关系。
- 复制 `MessageBlock` 内容,更新其 `messageId` 指向新的消息 ID。
- 更新文件引用计数(如果 Block 是文件或图片)。
- 将克隆的消息和 Block 保存到新主题的 Redux 状态和 DB 中。
- **Block 相关**: 创建 `MessageBlock` 的副本,并更新其 ID 和 `messageId`
8. **`initiateTranslationThunk(messageId, topicId, targetLanguage, sourceBlockId?, sourceLanguage?)`**
- **用途**: 为指定消息启动翻译流程,创建一个初始的 `TRANSLATION` 类型的 `MessageBlock`
- **流程**:
- 创建一个状态为 `STREAMING``TranslationMessageBlock`
- 将其添加到 Redux 和 DB。
- 更新原消息的 `blocks` 列表以包含新的翻译块 ID。
- **Block 相关**: 创建并保存一个占位的 `TranslationMessageBlock`。实际翻译内容的获取和填充需要后续步骤。
## 内部机制和注意事项
- **数据库交互**: 通过 `saveMessageAndBlocksToDB`, `updateExistingMessageAndBlocksInDB`, `saveUpdatesToDB`, `saveUpdatedBlockToDB`, `throttledBlockDbUpdate` 等辅助函数与 IndexedDB (`db`) 交互,确保数据持久化。
- **状态同步**: Thunks 负责协调 Redux Store 和 IndexedDB 之间的数据一致性。
- **队列 (`getTopicQueue`)**: 使用 `AsyncQueue` 确保对同一主题的操作(尤其是 API 请求)按顺序执行,避免竞态条件。
- **节流 (`throttle`)**: 对流式响应中频繁的 Block 更新(文本、思考)使用 `lodash.throttle` 优化性能,减少 Redux dispatch 和 DB 写入次数。
- **错误处理**: `fetchAndProcessAssistantResponseImpl` 内的回调函数(特别是 `onError`)处理流处理和 API 调用中可能出现的错误,并创建 `ERROR` 类型的 `MessageBlock`
开发者在使用这些 Thunks 时,通常需要提供 `dispatch`, `getState` (由 Redux Thunk 中间件注入),以及如 `topicId`, `assistant` 配置对象, 相关的 `Message``MessageBlock` 对象/ID 等参数。理解每个 Thunk 的职责和它如何影响消息及块的状态至关重要。
---
# useMessageOperations.ts 使用指南
该文件定义了一个名为 `useMessageOperations` 的自定义 React Hook。这个 Hook 的主要目的是为 React 组件提供一个便捷的接口用于执行与特定主题Topic相关的各种消息操作。它封装了调用 Redux Thunks (`messageThunk.ts`) 和 Actions (`newMessage.ts`, `messageBlock.ts`) 的逻辑,简化了组件与消息数据交互的代码。
## 核心目标
- **封装**: 将复杂的消息操作逻辑(如删除、重发、重新生成、编辑、翻译等)封装在易于使用的函数中。
- **简化**: 让组件可以直接调用这些操作函数,而无需直接与 Redux `dispatch` 或 Thunks 交互。
- **上下文关联**: 所有操作都与传入的 `topic` 对象相关联,确保操作作用于正确的主题。
## 如何使用
在你的 React 函数组件中,导入并调用 `useMessageOperations` Hook并传入当前活动的 `Topic` 对象。
```typescript
import React from 'react';
import { useMessageOperations } from '@renderer/hooks/useMessageOperations';
import type { Topic, Message, Assistant, Model } from '@renderer/types';
interface MyComponentProps {
currentTopic: Topic;
currentAssistant: Assistant;
}
function MyComponent({ currentTopic, currentAssistant }: MyComponentProps) {
const {
deleteMessage,
resendMessage,
regenerateAssistantMessage,
appendAssistantResponse,
getTranslationUpdater,
createTopicBranch,
// ... 其他操作函数
} = useMessageOperations(currentTopic);
const handleDelete = (messageId: string) => {
deleteMessage(messageId);
};
const handleResend = (message: Message) => {
resendMessage(message, currentAssistant);
};
const handleAppend = (existingMsg: Message, newModel: Model) => {
appendAssistantResponse(existingMsg, newModel, currentAssistant);
}
// ... 在组件中使用其他操作函数
return (
<div>
{/* Component UI */}
<button onClick={() => handleDelete('some-message-id')}>Delete Message</button>
{/* ... */}
</div>
);
}
```
## 返回值
`useMessageOperations(topic)` Hook 返回一个包含以下函数和值的对象:
- **`deleteMessage(id: string)`**:
- 删除指定 `id` 的单个消息。
- 内部调用 `deleteSingleMessageThunk`
- **`deleteGroupMessages(askId: string)`**:
- 删除与指定 `askId` 相关联的一组消息(通常是用户提问及其所有助手回答)。
- 内部调用 `deleteMessageGroupThunk`
- **`editMessage(messageId: string, updates: Partial<Message>)`**:
- 更新指定 `messageId` 的消息的部分属性。
- **注意**: 目前主要用于更新 Redux 状态
- 内部调用 `newMessagesActions.updateMessage`
- **`resendMessage(message: Message, assistant: Assistant)`**:
- 重新发送指定的用户消息 (`message`),这将触发其所有关联助手响应的重新生成。
- 内部调用 `resendMessageThunk`
- **`resendUserMessageWithEdit(message: Message, editedContent: string, assistant: Assistant)`**:
- 在用户消息的主要文本块被编辑后,重新发送该消息。
- 会先查找消息的 `MAIN_TEXT` 块 ID然后调用 `resendUserMessageWithEditThunk`
- **`clearTopicMessages(_topicId?: string)`**:
- 清除当前主题(或可选的指定 `_topicId`)下的所有消息。
- 内部调用 `clearTopicMessagesThunk`
- **`createNewContext()`**:
- 发出一个全局事件 (`EVENT_NAMES.NEW_CONTEXT`),通常用于通知 UI 清空显示,准备新的上下文。不直接修改 Redux 状态。
- **`displayCount`**:
- (非操作函数) 从 Redux store 中获取当前的 `displayCount` 值。
- **`pauseMessages()`**:
- 尝试中止当前主题中正在进行的消息生成(状态为 `processing``pending`)。
- 通过查找相关的 `askId` 并调用 `abortCompletion` 来实现。
- 同时会 dispatch `setTopicLoading` action 将加载状态设为 `false`
- **`resumeMessage(message: Message, assistant: Assistant)`**:
- 恢复/重新发送一个用户消息。目前实现为直接调用 `resendMessage`
- **`regenerateAssistantMessage(message: Message, assistant: Assistant)`**:
- 重新生成指定的**助手**消息 (`message`) 的响应。
- 内部调用 `regenerateAssistantResponseThunk`
- **`appendAssistantResponse(existingAssistantMessage: Message, newModel: Model, assistant: Assistant)`**:
- 针对 `existingAssistantMessage` 所回复的**同一用户提问**,使用 `newModel` 追加一个新的助手响应。
- 内部调用 `appendAssistantResponseThunk`
- **`getTranslationUpdater(messageId: string, targetLanguage: string, sourceBlockId?: string, sourceLanguage?: string)`**:
- **用途**: 获取一个用于逐步更新翻译块内容的函数。
- **流程**:
1. 内部调用 `initiateTranslationThunk` 来创建或获取一个 `TRANSLATION` 类型的 `MessageBlock`,并获取其 `blockId`
2. 返回一个**异步更新函数**。
- **返回的更新函数 `(accumulatedText: string, isComplete?: boolean) => void`**:
- 接收累积的翻译文本和完成状态。
- 调用 `updateOneBlock` 更新 Redux 中的翻译块内容和状态 (`STREAMING``SUCCESS`)。
- 调用 `throttledBlockDbUpdate` 将更新(节流地)保存到数据库。
- 如果初始化失败Thunk 返回 `undefined`),则此函数返回 `null`
- **`createTopicBranch(sourceTopicId: string, branchPointIndex: number, newTopic: Topic)`**:
- 创建一个主题分支,将 `sourceTopicId` 主题中 `branchPointIndex` 索引之前的消息克隆到 `newTopic` 中。
- **注意**: `newTopic` 对象必须是调用此函数**之前**已经创建并添加到 Redux 和数据库中的。
- 内部调用 `cloneMessagesToNewTopicThunk`
## 依赖
- **`topic: Topic`**: 必须传入当前操作上下文的主题对象。Hook 返回的操作函数将始终作用于这个主题的 `topic.id`
- **Redux `dispatch`**: Hook 内部使用 `useAppDispatch` 获取 `dispatch` 函数来调用 actions 和 thunks。
## 相关 Hooks
在同一文件中还定义了两个辅助 Hook
- **`useTopicMessages(topic: Topic)`**:
- 使用 `selectMessagesForTopic` selector 来获取并返回指定主题的消息列表。
- **`useTopicLoading(topic: Topic)`**:
- 使用 `selectNewTopicLoading` selector 来获取并返回指定主题的加载状态。
这些 Hook 可以与 `useMessageOperations` 结合使用,方便地在组件中获取消息数据、加载状态,并执行相关操作。

View File

@@ -134,108 +134,58 @@ artifactBuildCompleted: scripts/artifact-build-completed.js
releaseInfo:
releaseNotes: |
<!--LANG:en-->
A New Era of Intelligence with Cherry Studio 1.7.1
What's New in v1.7.0-rc.1
Today we're releasing Cherry Studio 1.7.1 — our most ambitious update yet, introducing Agent: autonomous AI that thinks, plans, and acts.
🎉 MAJOR NEW FEATURE: AI Agents
- Create and manage custom AI agents with specialized tools and permissions
- Dedicated agent sessions with persistent SQLite storage, separate from regular chats
- Real-time tool approval system - review and approve agent actions dynamically
- MCP (Model Context Protocol) integration for connecting external tools
- Slash commands support for quick agent interactions
- OpenAI-compatible REST API for agent access
For years, AI assistants have been reactive — waiting for your commands, responding to your questions. With Agent, we're changing that. Now, AI can truly work alongside you: understanding complex goals, breaking them into steps, and executing them independently.
✨ New Features:
- AI Providers: Added support for Hugging Face, Mistral, Perplexity, and SophNet
- Knowledge Base: OpenMinerU document preprocessor, full-text search in notes, enhanced tool selection
- Image & OCR: Intel OVMS painting provider and Intel OpenVINO (NPU) OCR support
- MCP Management: Redesigned interface with dual-column layout for easier management
- Languages: Added German language support
This is what we've been building toward. And it's just the beginning.
⚡ Improvements:
- Upgraded to Electron 38.7.0
- Enhanced system shutdown handling and automatic update checks
- Improved proxy bypass rules
🤖 Meet Agent
Imagine having a brilliant colleague who never sleeps. Give Agent a goal — write a report, analyze data, refactor code — and watch it work. It reasons through problems, breaks them into steps, calls the right tools, and adapts when things change.
- **Think → Plan → Act**: From goal to execution, fully autonomous
- **Deep Reasoning**: Multi-turn thinking that solves real problems
- **Tool Mastery**: File operations, web search, code execution, and more
- **Skill Plugins**: Extend with custom commands and capabilities
- **You Stay in Control**: Real-time approval for sensitive actions
- **Full Visibility**: Every thought, every decision, fully transparent
🌐 Expanding Ecosystem
- **New Providers**: HuggingFace, Mistral, CherryIN, AI Gateway, Intel OVMS, Didi MCP
- **New Models**: Claude 4.5 Haiku, DeepSeek v3.2, GLM-4.6, Doubao, Ling series
- **MCP Integration**: Alibaba Cloud, ModelScope, Higress, MCP.so, TokenFlux and more
📚 Smarter Knowledge Base
- **OpenMinerU**: Self-hosted document processing
- **Full-Text Search**: Find anything instantly across your notes
- **Enhanced Tool Selection**: Smarter configuration for better AI assistance
📝 Notes, Reimagined
- Full-text search with highlighted results
- AI-powered smart rename
- Export as image
- Auto-wrap for tables
🖼️ Image & OCR
- Intel OVMS painting capabilities
- Intel OpenVINO NPU-accelerated OCR
🌍 Now in 10+ Languages
- Added German support
- Enhanced internationalization
⚡ Faster & More Polished
- Electron 38 upgrade
- New MCP management interface
- Dozens of UI refinements
❤️ Fully Open Source
Commercial restrictions removed. Cherry Studio now follows standard AGPL v3 — free for teams of any size.
The Agent Era is here. We can't wait to see what you'll create.
🐛 Important Bug Fixes:
- Fixed streaming response issues across multiple AI providers
- Fixed session list scrolling problems
- Fixed knowledge base deletion errors
<!--LANG:zh-CN-->
Cherry Studio 1.7.1:开启智能新纪元
v1.7.0-rc.1 新特性
今天,我们正式发布 Cherry Studio 1.7.1 —— 迄今最具雄心的版本,带来全新的 Agent能够自主思考、规划和行动的 AI。
🎉 重大更新AI Agent 智能体系统
- 创建和管理专属 AI Agent配置专用工具和权限
- 独立的 Agent 会话,使用 SQLite 持久化存储,与普通聊天分离
- 实时工具审批系统 - 动态审查和批准 Agent 操作
- MCP模型上下文协议集成连接外部工具
- 支持斜杠命令快速交互
- 兼容 OpenAI 的 REST API 访问
多年来AI 助手一直是被动的——等待你的指令回应你的问题。Agent 改变了这一切。现在AI 能够真正与你并肩工作:理解复杂目标,将其拆解为步骤,并独立执行。
✨ 新功能:
- AI 提供商:新增 Hugging Face、Mistral、Perplexity 和 SophNet 支持
- 知识库OpenMinerU 文档预处理器、笔记全文搜索、增强的工具选择
- 图像与 OCRIntel OVMS 绘图提供商和 Intel OpenVINO (NPU) OCR 支持
- MCP 管理:重构管理界面,采用双列布局,更加方便管理
- 语言:新增德语支持
这是我们一直在构建的未来。而这,仅仅是开始。
⚡ 改进:
- 升级到 Electron 38.7.0
- 增强的系统关机处理和自动更新检查
- 改进的代理绕过规则
🤖 认识 Agent
想象一位永不疲倦的得力伙伴。给 Agent 一个目标——撰写报告、分析数据、重构代码——然后看它工作。它会推理问题、拆解步骤、调用工具,并在情况变化时灵活应对。
- **思考 → 规划 → 行动**:从目标到执行,全程自主
- **深度推理**:多轮思考,解决真实问题
- **工具大师**:文件操作、网络搜索、代码执行,样样精通
- **技能插件**:自定义命令,无限扩展
- **你掌控全局**:敏感操作,实时审批
- **完全透明**:每一步思考,每一个决策,清晰可见
🌐 生态持续壮大
- **新增服务商**Hugging Face、Mistral、Perplexity、SophNet、AI Gateway、Cerebras AI
- **新增模型**Gemini 3、Gemini 3 Pro支持图像预览、GPT-5.1、Claude Opus 4.5
- **MCP 集成**百炼、魔搭、Higress、MCP.so、TokenFlux 等平台
📚 更智能的知识库
- **OpenMinerU**:本地自部署文档处理
- **全文搜索**:笔记内容一搜即达
- **增强工具选择**:更智能的配置,更好的 AI 协助
📝 笔记,焕然一新
- 全文搜索,结果高亮
- AI 智能重命名
- 导出为图片
- 表格自动换行
🖼️ 图像与 OCR
- Intel OVMS 绘图能力
- Intel OpenVINO NPU 加速 OCR
🌍 支持 10+ 种语言
- 新增德语支持
- 全面增强国际化
⚡ 更快、更精致
- 升级 Electron 38
- 新的 MCP 管理界面
- 数十处 UI 细节打磨
❤️ 完全开源
商用限制已移除。Cherry Studio 现遵循标准 AGPL v3 协议——任意规模团队均可自由使用。
Agent 纪元已至。期待你的创造。
🐛 重要修复:
- 修复多个 AI 提供商的流式响应问题
- 修复会话列表滚动问题
- 修复知识库删除错误
<!--LANG:END-->

View File

@@ -58,7 +58,6 @@ export default defineConfig([
'dist/**',
'out/**',
'local/**',
'tests/**',
'.yarn/**',
'.gitignore',
'scripts/cloudflare-worker.js',

View File

@@ -1,6 +1,6 @@
{
"name": "CherryStudio",
"version": "1.7.1",
"version": "1.7.0-rc.1",
"private": true,
"description": "A powerful AI assistant for producer.",
"main": "./out/main/index.js",
@@ -62,7 +62,6 @@
"test": "vitest run --silent",
"test:main": "vitest run --project main",
"test:renderer": "vitest run --project renderer",
"test:aicore": "vitest run --project aiCore",
"test:update": "yarn test:renderer --update",
"test:coverage": "vitest run --coverage --silent",
"test:ui": "vitest --ui",
@@ -81,7 +80,7 @@
"release:ai-sdk-provider": "yarn workspace @cherrystudio/ai-sdk-provider version patch --immediate && yarn workspace @cherrystudio/ai-sdk-provider build && yarn workspace @cherrystudio/ai-sdk-provider npm publish --access public"
},
"dependencies": {
"@anthropic-ai/claude-agent-sdk": "patch:@anthropic-ai/claude-agent-sdk@npm%3A0.1.53#~/.yarn/patches/@anthropic-ai-claude-agent-sdk-npm-0.1.53-4b77f4cf29.patch",
"@anthropic-ai/claude-agent-sdk": "patch:@anthropic-ai/claude-agent-sdk@npm%3A0.1.30#~/.yarn/patches/@anthropic-ai-claude-agent-sdk-npm-0.1.30-b50a299674.patch",
"@libsql/client": "0.14.0",
"@libsql/win32-x64-msvc": "^0.4.7",
"@napi-rs/system-ocr": "patch:@napi-rs/system-ocr@npm%3A1.0.2#~/.yarn/patches/@napi-rs-system-ocr-npm-1.0.2-59e7a78e8b.patch",
@@ -110,15 +109,15 @@
"@agentic/exa": "^7.3.3",
"@agentic/searxng": "^7.3.3",
"@agentic/tavily": "^7.3.3",
"@ai-sdk/amazon-bedrock": "^3.0.61",
"@ai-sdk/anthropic": "^2.0.49",
"@ai-sdk/amazon-bedrock": "^3.0.56",
"@ai-sdk/anthropic": "^2.0.45",
"@ai-sdk/cerebras": "^1.0.31",
"@ai-sdk/gateway": "^2.0.15",
"@ai-sdk/google": "patch:@ai-sdk/google@npm%3A2.0.43#~/.yarn/patches/@ai-sdk-google-npm-2.0.43-689ed559b3.patch",
"@ai-sdk/google-vertex": "^3.0.79",
"@ai-sdk/gateway": "^2.0.13",
"@ai-sdk/google": "patch:@ai-sdk/google@npm%3A2.0.40#~/.yarn/patches/@ai-sdk-google-npm-2.0.40-47e0eeee83.patch",
"@ai-sdk/google-vertex": "^3.0.72",
"@ai-sdk/huggingface": "^0.0.10",
"@ai-sdk/mistral": "^2.0.24",
"@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.72#~/.yarn/patches/@ai-sdk-openai-npm-2.0.72-234e68da87.patch",
"@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.71#~/.yarn/patches/@ai-sdk-openai-npm-2.0.71-a88ef00525.patch",
"@ai-sdk/perplexity": "^2.0.20",
"@ai-sdk/test-server": "^0.0.1",
"@ant-design/v5-patch-for-react-19": "^1.0.3",
@@ -161,19 +160,20 @@
"@langchain/community": "^1.0.0",
"@langchain/core": "patch:@langchain/core@npm%3A1.0.2#~/.yarn/patches/@langchain-core-npm-1.0.2-183ef83fe4.patch",
"@langchain/openai": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
"@mcp-ui/client": "^5.14.1",
"@mistralai/mistralai": "^1.7.5",
"@modelcontextprotocol/sdk": "^1.17.5",
"@mozilla/readability": "^0.6.0",
"@notionhq/client": "^2.2.15",
"@openrouter/ai-sdk-provider": "^1.2.8",
"@openrouter/ai-sdk-provider": "^1.2.5",
"@opentelemetry/api": "^1.9.0",
"@opentelemetry/core": "2.0.0",
"@opentelemetry/exporter-trace-otlp-http": "^0.200.0",
"@opentelemetry/sdk-trace-base": "^2.0.0",
"@opentelemetry/sdk-trace-node": "^2.0.0",
"@opentelemetry/sdk-trace-web": "^2.0.0",
"@opeoginni/github-copilot-openai-compatible": "^0.1.21",
"@playwright/test": "^1.55.1",
"@opeoginni/github-copilot-openai-compatible": "0.1.21",
"@playwright/test": "^1.52.0",
"@radix-ui/react-context-menu": "^2.2.16",
"@reduxjs/toolkit": "^2.2.5",
"@shikijs/markdown-it": "^3.12.0",
@@ -218,8 +218,8 @@
"@types/mime-types": "^3",
"@types/node": "^22.17.1",
"@types/pako": "^1.0.2",
"@types/react": "^19.2.7",
"@types/react-dom": "^19.2.3",
"@types/react": "^19.0.12",
"@types/react-dom": "^19.0.4",
"@types/react-infinite-scroll-component": "^5.0.0",
"@types/react-transition-group": "^4.4.12",
"@types/react-window": "^1",
@@ -322,6 +322,7 @@
"p-queue": "^8.1.0",
"pdf-lib": "^1.17.1",
"pdf-parse": "^1.1.1",
"playwright": "^1.55.1",
"proxy-agent": "^6.5.0",
"react": "^19.2.0",
"react-dom": "^19.2.0",
@@ -412,9 +413,12 @@
"@langchain/openai@npm:>=0.1.0 <0.6.0": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
"@langchain/openai@npm:^0.3.16": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
"@langchain/openai@npm:>=0.2.0 <0.7.0": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
"@ai-sdk/openai@npm:^2.0.42": "patch:@ai-sdk/openai@npm%3A2.0.72#~/.yarn/patches/@ai-sdk-openai-npm-2.0.72-234e68da87.patch",
"@ai-sdk/google@npm:^2.0.40": "patch:@ai-sdk/google@npm%3A2.0.40#~/.yarn/patches/@ai-sdk-google-npm-2.0.40-47e0eeee83.patch",
"@ai-sdk/openai-compatible@npm:^1.0.27": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch"
"@ai-sdk/openai@npm:2.0.64": "patch:@ai-sdk/openai@npm%3A2.0.64#~/.yarn/patches/@ai-sdk-openai-npm-2.0.64-48f99f5bf3.patch",
"@ai-sdk/openai@npm:^2.0.42": "patch:@ai-sdk/openai@npm%3A2.0.71#~/.yarn/patches/@ai-sdk-openai-npm-2.0.71-a88ef00525.patch",
"@ai-sdk/google@npm:2.0.40": "patch:@ai-sdk/google@npm%3A2.0.40#~/.yarn/patches/@ai-sdk-google-npm-2.0.40-47e0eeee83.patch",
"@ai-sdk/openai@npm:2.0.71": "patch:@ai-sdk/openai@npm%3A2.0.71#~/.yarn/patches/@ai-sdk-openai-npm-2.0.71-a88ef00525.patch",
"@ai-sdk/openai-compatible@npm:1.0.27": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch",
"@ai-sdk/openai-compatible@npm:^1.0.19": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch"
},
"packageManager": "yarn@4.9.1",
"lint-staged": {

View File

@@ -69,7 +69,6 @@ export interface CherryInProviderSettings {
headers?: HeadersInput
/**
* Optional endpoint type to distinguish different endpoint behaviors.
* "image-generation" is also openai endpoint, but specifically for image generation.
*/
endpointType?: 'openai' | 'openai-response' | 'anthropic' | 'gemini' | 'image-generation' | 'jina-rerank'
}

View File

@@ -39,13 +39,13 @@
"ai": "^5.0.26"
},
"dependencies": {
"@ai-sdk/anthropic": "^2.0.49",
"@ai-sdk/azure": "^2.0.74",
"@ai-sdk/anthropic": "^2.0.45",
"@ai-sdk/azure": "^2.0.73",
"@ai-sdk/deepseek": "^1.0.29",
"@ai-sdk/openai-compatible": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch",
"@ai-sdk/provider": "^2.0.0",
"@ai-sdk/provider-utils": "^3.0.17",
"@ai-sdk/xai": "^2.0.36",
"@ai-sdk/xai": "^2.0.34",
"zod": "^4.1.5"
},
"devDependencies": {

View File

@@ -3,13 +3,12 @@
* Provides realistic mock responses for all provider types
*/
import type { ModelMessage, Tool } from 'ai'
import { jsonSchema } from 'ai'
import { jsonSchema, type ModelMessage, type Tool } from 'ai'
/**
* Standard test messages for all scenarios
*/
export const testMessages: Record<string, ModelMessage[]> = {
export const testMessages = {
simple: [{ role: 'user' as const, content: 'Hello, how are you?' }],
conversation: [
@@ -46,7 +45,7 @@ export const testMessages: Record<string, ModelMessage[]> = {
{ role: 'assistant' as const, content: '15 * 23 = 345' },
{ role: 'user' as const, content: 'Now divide that by 5' }
]
}
} satisfies Record<string, ModelMessage[]>
/**
* Standard test tools for tool calling scenarios
@@ -139,17 +138,68 @@ export const testTools: Record<string, Tool> = {
}
}
/**
* Mock streaming chunks for different providers
*/
export const mockStreamingChunks = {
text: [
{ type: 'text-delta' as const, textDelta: 'Hello' },
{ type: 'text-delta' as const, textDelta: ', ' },
{ type: 'text-delta' as const, textDelta: 'this ' },
{ type: 'text-delta' as const, textDelta: 'is ' },
{ type: 'text-delta' as const, textDelta: 'a ' },
{ type: 'text-delta' as const, textDelta: 'test.' }
],
withToolCall: [
{ type: 'text-delta' as const, textDelta: 'Let me check the weather for you.' },
{
type: 'tool-call-delta' as const,
toolCallType: 'function' as const,
toolCallId: 'call_123',
toolName: 'getWeather',
argsTextDelta: '{"location":'
},
{
type: 'tool-call-delta' as const,
toolCallType: 'function' as const,
toolCallId: 'call_123',
toolName: 'getWeather',
argsTextDelta: ' "San Francisco, CA"}'
},
{
type: 'tool-call' as const,
toolCallType: 'function' as const,
toolCallId: 'call_123',
toolName: 'getWeather',
args: { location: 'San Francisco, CA' }
}
],
withFinish: [
{ type: 'text-delta' as const, textDelta: 'Complete response.' },
{
type: 'finish' as const,
finishReason: 'stop' as const,
usage: {
promptTokens: 10,
completionTokens: 5,
totalTokens: 15
}
}
]
}
/**
* Mock complete responses for non-streaming scenarios
* Note: AI SDK v5 uses inputTokens/outputTokens instead of promptTokens/completionTokens
*/
export const mockCompleteResponses = {
simple: {
text: 'This is a simple response.',
finishReason: 'stop' as const,
usage: {
inputTokens: 15,
outputTokens: 8,
promptTokens: 15,
completionTokens: 8,
totalTokens: 23
}
},
@@ -165,8 +215,8 @@ export const mockCompleteResponses = {
],
finishReason: 'tool-calls' as const,
usage: {
inputTokens: 25,
outputTokens: 12,
promptTokens: 25,
completionTokens: 12,
totalTokens: 37
}
},
@@ -175,15 +225,14 @@ export const mockCompleteResponses = {
text: 'Response with warnings.',
finishReason: 'stop' as const,
usage: {
inputTokens: 10,
outputTokens: 5,
promptTokens: 10,
completionTokens: 5,
totalTokens: 15
},
warnings: [
{
type: 'unsupported-setting' as const,
setting: 'temperature',
details: 'Temperature parameter not supported for this model'
message: 'Temperature parameter not supported for this model'
}
]
}
@@ -236,3 +285,47 @@ export const mockImageResponses = {
warnings: []
}
}
/**
* Mock error responses
*/
export const mockErrors = {
invalidApiKey: {
name: 'APIError',
message: 'Invalid API key provided',
statusCode: 401
},
rateLimitExceeded: {
name: 'RateLimitError',
message: 'Rate limit exceeded. Please try again later.',
statusCode: 429,
headers: {
'retry-after': '60'
}
},
modelNotFound: {
name: 'ModelNotFoundError',
message: 'The requested model was not found',
statusCode: 404
},
contextLengthExceeded: {
name: 'ContextLengthError',
message: "This model's maximum context length is 4096 tokens",
statusCode: 400
},
timeout: {
name: 'TimeoutError',
message: 'Request timed out after 30000ms',
code: 'ETIMEDOUT'
},
networkError: {
name: 'NetworkError',
message: 'Network connection failed',
code: 'ECONNREFUSED'
}
}

View File

@@ -1,35 +0,0 @@
/**
* Mock for @cherrystudio/ai-sdk-provider
* This mock is used in tests to avoid importing the actual package
*/
export type CherryInProviderSettings = {
apiKey?: string
baseURL?: string
}
// oxlint-disable-next-line no-unused-vars
export const createCherryIn = (_options?: CherryInProviderSettings) => ({
// oxlint-disable-next-line no-unused-vars
languageModel: (_modelId: string) => ({
specificationVersion: 'v1',
provider: 'cherryin',
modelId: 'mock-model',
doGenerate: async () => ({ text: 'mock response' }),
doStream: async () => ({ stream: (async function* () {})() })
}),
// oxlint-disable-next-line no-unused-vars
chat: (_modelId: string) => ({
specificationVersion: 'v1',
provider: 'cherryin-chat',
modelId: 'mock-model',
doGenerate: async () => ({ text: 'mock response' }),
doStream: async () => ({ stream: (async function* () {})() })
}),
// oxlint-disable-next-line no-unused-vars
textEmbeddingModel: (_modelId: string) => ({
specificationVersion: 'v1',
provider: 'cherryin',
modelId: 'mock-embedding-model'
})
})

View File

@@ -1,9 +0,0 @@
/**
* Vitest Setup File
* Global test configuration and mocks for @cherrystudio/ai-core package
*/
// Mock Vite SSR helper to avoid Node environment errors
;(globalThis as any).__vite_ssr_exportName__ = (_name: string, value: any) => value
// Note: @cherrystudio/ai-sdk-provider is mocked via alias in vitest.config.ts

View File

@@ -1,109 +0,0 @@
import { describe, expect, it } from 'vitest'
import { createOpenAIOptions, createOpenRouterOptions, mergeProviderOptions } from '../factory'
describe('mergeProviderOptions', () => {
it('deep merges provider options for the same provider', () => {
const reasoningOptions = createOpenRouterOptions({
reasoning: {
enabled: true,
effort: 'medium'
}
})
const webSearchOptions = createOpenRouterOptions({
plugins: [{ id: 'web', max_results: 5 }]
})
const merged = mergeProviderOptions(reasoningOptions, webSearchOptions)
expect(merged.openrouter).toEqual({
reasoning: {
enabled: true,
effort: 'medium'
},
plugins: [{ id: 'web', max_results: 5 }]
})
})
it('preserves options from other providers while merging', () => {
const openRouter = createOpenRouterOptions({
reasoning: { enabled: true }
})
const openAI = createOpenAIOptions({
reasoningEffort: 'low'
})
const merged = mergeProviderOptions(openRouter, openAI)
expect(merged.openrouter).toEqual({ reasoning: { enabled: true } })
expect(merged.openai).toEqual({ reasoningEffort: 'low' })
})
it('overwrites primitive values with later values', () => {
const first = createOpenAIOptions({
reasoningEffort: 'low',
user: 'user-123'
})
const second = createOpenAIOptions({
reasoningEffort: 'high',
maxToolCalls: 5
})
const merged = mergeProviderOptions(first, second)
expect(merged.openai).toEqual({
reasoningEffort: 'high', // overwritten by second
user: 'user-123', // preserved from first
maxToolCalls: 5 // added from second
})
})
it('overwrites arrays with later values instead of merging', () => {
const first = createOpenRouterOptions({
models: ['gpt-4', 'gpt-3.5-turbo']
})
const second = createOpenRouterOptions({
models: ['claude-3-opus', 'claude-3-sonnet']
})
const merged = mergeProviderOptions(first, second)
// Array is completely replaced, not merged
expect(merged.openrouter?.models).toEqual(['claude-3-opus', 'claude-3-sonnet'])
})
it('deeply merges nested objects while overwriting primitives', () => {
const first = createOpenRouterOptions({
reasoning: {
enabled: true,
effort: 'low'
},
user: 'user-123'
})
const second = createOpenRouterOptions({
reasoning: {
effort: 'high',
max_tokens: 500
},
user: 'user-456'
})
const merged = mergeProviderOptions(first, second)
expect(merged.openrouter).toEqual({
reasoning: {
enabled: true, // preserved from first
effort: 'high', // overwritten by second
max_tokens: 500 // added from second
},
user: 'user-456' // overwritten by second
})
})
it('replaces arrays instead of merging them', () => {
const first = createOpenRouterOptions({ plugins: [{ id: 'old' }] })
const second = createOpenRouterOptions({ plugins: [{ id: 'new' }] })
const merged = mergeProviderOptions(first, second)
// @ts-expect-error type-check for openrouter options is skipped. see function signature of createOpenRouterOptions
expect(merged.openrouter?.plugins).toEqual([{ id: 'new' }])
})
})

View File

@@ -26,65 +26,13 @@ export function createGenericProviderOptions<T extends string>(
return { [provider]: options } as Record<T, Record<string, any>>
}
type PlainObject = Record<string, any>
const isPlainObject = (value: unknown): value is PlainObject => {
return typeof value === 'object' && value !== null && !Array.isArray(value)
}
function deepMergeObjects<T extends PlainObject>(target: T, source: PlainObject): T {
const result: PlainObject = { ...target }
Object.entries(source).forEach(([key, value]) => {
if (isPlainObject(value) && isPlainObject(result[key])) {
result[key] = deepMergeObjects(result[key], value)
} else {
result[key] = value
}
})
return result as T
}
/**
* Deep-merge multiple provider-specific options.
* Nested objects are recursively merged; primitive values are overwritten.
*
* When the same key appears in multiple options:
* - If both values are plain objects: they are deeply merged (recursive merge)
* - If values are primitives/arrays: the later value overwrites the earlier one
*
* @example
* mergeProviderOptions(
* { openrouter: { reasoning: { enabled: true, effort: 'low' }, user: 'user-123' } },
* { openrouter: { reasoning: { effort: 'high', max_tokens: 500 }, models: ['gpt-4'] } }
* )
* // Result: {
* // openrouter: {
* // reasoning: { enabled: true, effort: 'high', max_tokens: 500 },
* // user: 'user-123',
* // models: ['gpt-4']
* // }
* // }
*
* @param optionsMap Objects containing options for multiple providers
* @returns Fully merged TypedProviderOptions
* 合并多个供应商的options
* @param optionsMap 包含多个供应商选项的对象
* @returns 合并后的TypedProviderOptions
*/
export function mergeProviderOptions(...optionsMap: Partial<TypedProviderOptions>[]): TypedProviderOptions {
return optionsMap.reduce<TypedProviderOptions>((acc, options) => {
if (!options) {
return acc
}
Object.entries(options).forEach(([providerId, providerOptions]) => {
if (!providerOptions) {
return
}
if (acc[providerId]) {
acc[providerId] = deepMergeObjects(acc[providerId] as PlainObject, providerOptions as PlainObject)
} else {
acc[providerId] = providerOptions as any
}
})
return acc
}, {} as TypedProviderOptions)
return Object.assign({}, ...optionsMap)
}
/**

View File

@@ -19,20 +19,15 @@ describe('Provider Schemas', () => {
expect(Array.isArray(baseProviders)).toBe(true)
expect(baseProviders.length).toBeGreaterThan(0)
// These are the actual base providers defined in schemas.ts
const expectedIds = [
'openai',
'openai-chat',
'openai-responses',
'openai-compatible',
'anthropic',
'google',
'xai',
'azure',
'azure-responses',
'deepseek',
'openrouter',
'cherryin',
'cherryin-chat'
'deepseek'
]
const actualIds = baseProviders.map((p) => p.id)
expectedIds.forEach((id) => {

View File

@@ -232,13 +232,11 @@ describe('RuntimeExecutor.generateImage', () => {
expect(pluginCallOrder).toEqual(['onRequestStart', 'transformParams', 'transformResult', 'onRequestEnd'])
// transformParams receives params without model (model is handled separately)
// and context with core fields + dynamic fields (requestId, startTime, etc.)
expect(testPlugin.transformParams).toHaveBeenCalledWith(
expect.objectContaining({ prompt: 'A test image' }),
{ prompt: 'A test image' },
expect.objectContaining({
providerId: 'openai',
model: 'dall-e-3'
modelId: 'dall-e-3'
})
)
@@ -275,12 +273,11 @@ describe('RuntimeExecutor.generateImage', () => {
await executorWithPlugin.generateImage({ model: 'dall-e-3', prompt: 'A test image' })
// resolveModel receives model id and context with core fields
expect(modelResolutionPlugin.resolveModel).toHaveBeenCalledWith(
'dall-e-3',
expect.objectContaining({
providerId: 'openai',
model: 'dall-e-3'
modelId: 'dall-e-3'
})
)
@@ -342,11 +339,12 @@ describe('RuntimeExecutor.generateImage', () => {
.generateImage({ model: 'invalid-model', prompt: 'A test image' })
.catch((error) => error)
// Error is thrown from pluginEngine directly as ImageModelResolutionError
expect(thrownError).toBeInstanceOf(ImageModelResolutionError)
expect(thrownError.message).toContain('Failed to resolve image model: invalid-model')
expect(thrownError).toBeInstanceOf(ImageGenerationError)
expect(thrownError.message).toContain('Failed to generate image:')
expect(thrownError.providerId).toBe('openai')
expect(thrownError.modelId).toBe('invalid-model')
expect(thrownError.cause).toBeInstanceOf(ImageModelResolutionError)
expect(thrownError.cause.message).toContain('Failed to resolve image model: invalid-model')
})
it('should handle ImageModelResolutionError without provider', async () => {
@@ -364,9 +362,8 @@ describe('RuntimeExecutor.generateImage', () => {
const apiError = new Error('API request failed')
vi.mocked(aiGenerateImage).mockRejectedValue(apiError)
// Error propagates directly from pluginEngine without wrapping
await expect(executor.generateImage({ model: 'dall-e-3', prompt: 'A test image' })).rejects.toThrow(
'API request failed'
'Failed to generate image:'
)
})
@@ -379,9 +376,8 @@ describe('RuntimeExecutor.generateImage', () => {
vi.mocked(aiGenerateImage).mockRejectedValue(noImageError)
vi.mocked(NoImageGeneratedError.isInstance).mockReturnValue(true)
// Error propagates directly from pluginEngine
await expect(executor.generateImage({ model: 'dall-e-3', prompt: 'A test image' })).rejects.toThrow(
'No image generated'
'Failed to generate image:'
)
})
@@ -402,17 +398,15 @@ describe('RuntimeExecutor.generateImage', () => {
[errorPlugin]
)
// Error propagates directly from pluginEngine
await expect(executorWithPlugin.generateImage({ model: 'dall-e-3', prompt: 'A test image' })).rejects.toThrow(
'Generation failed'
'Failed to generate image:'
)
// onError receives the original error and context with core fields
expect(errorPlugin.onError).toHaveBeenCalledWith(
error,
expect.objectContaining({
providerId: 'openai',
model: 'dall-e-3'
modelId: 'dall-e-3'
})
)
})
@@ -425,10 +419,9 @@ describe('RuntimeExecutor.generateImage', () => {
const abortController = new AbortController()
setTimeout(() => abortController.abort(), 10)
// Error propagates directly from pluginEngine
await expect(
executor.generateImage({ model: 'dall-e-3', prompt: 'A test image', abortSignal: abortController.signal })
).rejects.toThrow('Operation was aborted')
).rejects.toThrow('Failed to generate image:')
})
})

View File

@@ -17,14 +17,10 @@ import type { AiPlugin } from '../../plugins'
import { globalRegistryManagement } from '../../providers/RegistryManagement'
import { RuntimeExecutor } from '../executor'
// Mock AI SDK - use importOriginal to keep jsonSchema and other non-mocked exports
vi.mock('ai', async (importOriginal) => {
const actual = (await importOriginal()) as Record<string, unknown>
return {
...actual,
generateText: vi.fn()
}
})
// Mock AI SDK
vi.mock('ai', () => ({
generateText: vi.fn()
}))
vi.mock('../../providers/RegistryManagement', () => ({
globalRegistryManagement: {
@@ -413,12 +409,11 @@ describe('RuntimeExecutor.generateText', () => {
})
).rejects.toThrow('Generation failed')
// onError receives the original error and context with core fields
expect(errorPlugin.onError).toHaveBeenCalledWith(
error,
expect.objectContaining({
providerId: 'openai',
model: 'gpt-4'
modelId: 'gpt-4'
})
)
})

View File

@@ -11,14 +11,10 @@ import type { AiPlugin } from '../../plugins'
import { globalRegistryManagement } from '../../providers/RegistryManagement'
import { RuntimeExecutor } from '../executor'
// Mock AI SDK - use importOriginal to keep jsonSchema and other non-mocked exports
vi.mock('ai', async (importOriginal) => {
const actual = (await importOriginal()) as Record<string, unknown>
return {
...actual,
streamText: vi.fn()
}
})
// Mock AI SDK
vi.mock('ai', () => ({
streamText: vi.fn()
}))
vi.mock('../../providers/RegistryManagement', () => ({
globalRegistryManagement: {
@@ -157,7 +153,7 @@ describe('RuntimeExecutor.streamText', () => {
describe('Max Tokens Parameter', () => {
const maxTokensValues = [10, 50, 100, 500, 1000, 2000, 4000]
it.each(maxTokensValues)('should support maxOutputTokens=%s', async (maxOutputTokens) => {
it.each(maxTokensValues)('should support maxTokens=%s', async (maxTokens) => {
const mockStream = {
textStream: (async function* () {
yield 'Response'
@@ -172,13 +168,12 @@ describe('RuntimeExecutor.streamText', () => {
await executor.streamText({
model: 'gpt-4',
messages: testMessages.simple,
maxOutputTokens
maxOutputTokens: maxTokens
})
// Parameters are passed through without transformation
expect(streamText).toHaveBeenCalledWith(
expect.objectContaining({
maxOutputTokens
maxTokens
})
)
})
@@ -518,12 +513,11 @@ describe('RuntimeExecutor.streamText', () => {
})
).rejects.toThrow('Stream error')
// onError receives the original error and context with core fields
expect(errorPlugin.onError).toHaveBeenCalledWith(
error,
expect.objectContaining({
providerId: 'openai',
model: 'gpt-4'
modelId: 'gpt-4'
})
)
})

View File

@@ -1,20 +1,12 @@
import path from 'node:path'
import { fileURLToPath } from 'node:url'
import { defineConfig } from 'vitest/config'
const __dirname = path.dirname(fileURLToPath(import.meta.url))
export default defineConfig({
test: {
globals: true,
setupFiles: [path.resolve(__dirname, './src/__tests__/setup.ts')]
globals: true
},
resolve: {
alias: {
'@': path.resolve(__dirname, './src'),
// Mock external packages that may not be available in test environment
'@cherrystudio/ai-sdk-provider': path.resolve(__dirname, './src/__tests__/mocks/ai-sdk-provider.ts')
'@': './src'
}
},
esbuild: {

View File

@@ -88,16 +88,11 @@ export function getSdkClient(
}
})
}
let baseURL =
const baseURL =
provider.type === 'anthropic'
? provider.apiHost
: (provider.anthropicApiHost && provider.anthropicApiHost.trim()) || provider.apiHost
// Anthropic SDK automatically appends /v1 to all endpoints (like /v1/messages, /v1/models)
// We need to strip api version from baseURL to avoid duplication (e.g., /v3/v1/models)
// formatProviderApiHost adds /v1 for AI SDK compatibility, but Anthropic SDK needs it removed
baseURL = baseURL.replace(/\/v\d+(?:alpha|beta)?(?=\/|$)/i, '')
logger.debug('Anthropic API baseURL', { baseURL, providerId: provider.id })
if (provider.id === 'aihubmix') {

View File

@@ -1,48 +0,0 @@
/**
* @fileoverview Shared provider configuration for Claude Code and Anthropic API compatibility
*
* This module defines which models from specific providers support the Anthropic API endpoint.
* Used by both the Code Tools page and the Anthropic SDK client.
*/
/**
* Silicon provider models that support Anthropic API endpoint.
* These models can be used with Claude Code via the Anthropic-compatible API.
*
* @see https://docs.siliconflow.cn/cn/api-reference/chat-completions/messages
*/
export const SILICON_ANTHROPIC_COMPATIBLE_MODELS: readonly string[] = [
// DeepSeek V3.1 series
'Pro/deepseek-ai/DeepSeek-V3.1-Terminus',
'deepseek-ai/DeepSeek-V3.1',
'Pro/deepseek-ai/DeepSeek-V3.1',
// DeepSeek V3 series
'deepseek-ai/DeepSeek-V3',
'Pro/deepseek-ai/DeepSeek-V3',
// Moonshot/Kimi series
'moonshotai/Kimi-K2-Instruct-0905',
'Pro/moonshotai/Kimi-K2-Instruct-0905',
'moonshotai/Kimi-Dev-72B',
// Baidu ERNIE
'baidu/ERNIE-4.5-300B-A47B'
]
/**
* Creates a Set for efficient lookup of silicon Anthropic-compatible model IDs.
*/
const SILICON_ANTHROPIC_COMPATIBLE_MODEL_SET = new Set(SILICON_ANTHROPIC_COMPATIBLE_MODELS)
/**
* Checks if a model ID is compatible with Anthropic API on Silicon provider.
*
* @param modelId - The model ID to check
* @returns true if the model supports Anthropic API endpoint
*/
export function isSiliconAnthropicCompatibleModel(modelId: string): boolean {
return SILICON_ANTHROPIC_COMPATIBLE_MODEL_SET.has(modelId)
}
/**
* Silicon provider's Anthropic API host URL.
*/
export const SILICON_ANTHROPIC_API_HOST = 'https://api.siliconflow.cn'

View File

@@ -1,64 +1,42 @@
import { defineConfig } from '@playwright/test'
import { defineConfig, devices } from '@playwright/test'
/**
* Playwright configuration for Electron e2e testing.
* See https://playwright.dev/docs/test-configuration
* See https://playwright.dev/docs/test-configuration.
*/
export default defineConfig({
// Look for test files in the specs directory
testDir: './tests/e2e/specs',
// Global timeout for each test
timeout: 60000,
// Assertion timeout
expect: {
timeout: 10000
},
// Electron apps should run tests sequentially to avoid conflicts
fullyParallel: false,
workers: 1,
// Fail the build on CI if you accidentally left test.only in the source code
// Look for test files, relative to this configuration file.
testDir: './tests/e2e',
/* Run tests in files in parallel */
fullyParallel: true,
/* Fail the build on CI if you accidentally left test.only in the source code. */
forbidOnly: !!process.env.CI,
// Retry on CI only
/* Retry on CI only */
retries: process.env.CI ? 2 : 0,
// Reporter configuration
reporter: [['html', { outputFolder: 'playwright-report' }], ['list']],
// Global setup and teardown
globalSetup: './tests/e2e/global-setup.ts',
globalTeardown: './tests/e2e/global-teardown.ts',
// Output directory for test artifacts
outputDir: './test-results',
// Shared settings for all tests
/* Opt out of parallel tests on CI. */
workers: process.env.CI ? 1 : undefined,
/* Reporter to use. See https://playwright.dev/docs/test-reporters */
reporter: 'html',
/* Shared settings for all the projects below. See https://playwright.dev/docs/api/class-testoptions. */
use: {
// Collect trace when retrying the failed test
trace: 'retain-on-failure',
/* Base URL to use in actions like `await page.goto('/')`. */
// baseURL: 'http://localhost:3000',
// Take screenshot only on failure
screenshot: 'only-on-failure',
// Record video only on failure
video: 'retain-on-failure',
// Action timeout
actionTimeout: 15000,
// Navigation timeout
navigationTimeout: 30000
/* Collect trace when retrying the failed test. See https://playwright.dev/docs/trace-viewer */
trace: 'on-first-retry'
},
// Single project for Electron testing
/* Configure projects for major browsers */
projects: [
{
name: 'electron',
testMatch: '**/*.spec.ts'
name: 'chromium',
use: { ...devices['Desktop Chrome'] }
}
]
/* Run your local dev server before starting the tests */
// webServer: {
// command: 'npm run start',
// url: 'http://localhost:3000',
// reuseExistingServer: !process.env.CI,
// },
})

View File

@@ -1,7 +1,6 @@
import { CacheService } from '@main/services/CacheService'
import { loggerService } from '@main/services/LoggerService'
import { reduxService } from '@main/services/ReduxService'
import { isSiliconAnthropicCompatibleModel } from '@shared/config/providers'
import type { ApiModel, Model, Provider } from '@types'
const logger = loggerService.withContext('ApiServerUtils')
@@ -288,8 +287,6 @@ export const getProviderAnthropicModelChecker = (providerId: string): ((m: Model
return (m: Model) => m.endpoint_type === 'anthropic'
case 'aihubmix':
return (m: Model) => m.id.includes('claude')
case 'silicon':
return (m: Model) => isSiliconAnthropicCompatibleModel(m.id)
default:
// allow all models when checker not configured
return () => true

View File

@@ -8,6 +8,7 @@ import DiDiMcpServer from './didi-mcp'
import DifyKnowledgeServer from './dify-knowledge'
import FetchServer from './fetch'
import FileSystemServer from './filesystem'
import MCPUIDemoServer from './mcp-ui-demo'
import MemoryServer from './memory'
import PythonServer from './python'
import ThinkingServer from './sequentialthinking'
@@ -48,6 +49,9 @@ export function createInMemoryMCPServer(
const apiKey = envs.DIDI_API_KEY
return new DiDiMcpServer(apiKey).server
}
case BuiltinMCPServerNames.mcpUIDemo: {
return new MCPUIDemoServer().server
}
default:
throw new Error(`Unknown in-memory MCP server: ${name}`)
}

View File

@@ -0,0 +1,433 @@
import { Server } from '@modelcontextprotocol/sdk/server/index.js'
import { CallToolRequestSchema, ListToolsRequestSchema } from '@modelcontextprotocol/sdk/types.js'
const server = new Server(
{
name: 'mcp-ui-demo',
version: '1.0.0'
},
{
capabilities: {
tools: {}
}
}
)
// HTML templates for different UIs
const getHelloWorldUI = () =>
`
<!DOCTYPE html>
<html>
<head>
<style>
body {
font-family: system-ui, -apple-system, sans-serif;
padding: 20px;
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
color: white;
min-height: 200px;
display: flex;
align-items: center;
justify-content: center;
margin: 0;
}
.container {
text-align: center;
}
h1 {
font-size: 2.5em;
margin-bottom: 10px;
text-shadow: 2px 2px 4px rgba(0,0,0,0.3);
}
p {
font-size: 1.2em;
opacity: 0.9;
}
</style>
</head>
<body>
<div class="container">
<h1>🎉 Hello from MCP UI!</h1>
<p>This is a simple MCP UI Resource rendered in Cherry Studio</p>
</div>
</body>
</html>
`.trim()
const getInteractiveUI = () =>
`
<!DOCTYPE html>
<html>
<head>
<style>
body {
font-family: system-ui, -apple-system, sans-serif;
padding: 20px;
background: #f5f5f5;
margin: 0;
}
.container {
max-width: 600px;
margin: 0 auto;
background: white;
padding: 30px;
border-radius: 12px;
box-shadow: 0 2px 10px rgba(0,0,0,0.1);
}
h2 {
color: #333;
margin-bottom: 20px;
}
button {
background: #667eea;
color: white;
border: none;
padding: 12px 24px;
border-radius: 6px;
cursor: pointer;
font-size: 16px;
margin: 5px;
transition: background 0.2s;
}
button:hover {
background: #5568d3;
}
#output {
margin-top: 20px;
padding: 15px;
background: #f8f9fa;
border-radius: 6px;
border: 1px solid #e0e0e0;
min-height: 50px;
}
.info {
color: #666;
font-size: 14px;
margin-top: 15px;
}
</style>
</head>
<body>
<div class="container">
<h2>Interactive MCP UI Demo</h2>
<p>Click the buttons to interact with MCP tools:</p>
<button onclick="callEchoTool()">Call Echo Tool</button>
<button onclick="getTimestamp()">Get Timestamp</button>
<button onclick="openLink()">Open External Link</button>
<div id="output"></div>
<div class="info">
This UI can communicate with the host application through postMessage API.
</div>
</div>
<script>
function callEchoTool() {
const output = document.getElementById('output');
output.innerHTML = '<p style="color: #0066cc;">Calling echo tool...</p>';
window.parent.postMessage({
type: 'tool',
payload: {
toolName: 'demo_echo',
params: {
message: 'Hello from MCP UI! Time: ' + new Date().toLocaleTimeString()
}
}
}, '*');
}
function getTimestamp() {
const output = document.getElementById('output');
const now = new Date();
output.innerHTML = \`
<p style="color: #00aa00;">
<strong>Current Timestamp:</strong><br/>
\${now.toLocaleString()}<br/>
Unix: \${Math.floor(now.getTime() / 1000)}
</p>
\`;
}
function openLink() {
window.parent.postMessage({
type: 'link',
payload: {
url: 'https://github.com/idosal/mcp-ui'
}
}, '*');
}
// Listen for responses
window.addEventListener('message', (event) => {
if (event.data.type === 'ui-message-response') {
const output = document.getElementById('output');
const { response, error } = event.data.payload;
if (error) {
output.innerHTML = \`<p style="color: #cc0000;">Error: \${error}</p>\`;
} else {
output.innerHTML = \`<p style="color: #00aa00;">Response: \${JSON.stringify(response, null, 2)}</p>\`;
}
}
});
</script>
</body>
</html>
`.trim()
const getFormUI = () =>
`
<!DOCTYPE html>
<html>
<head>
<style>
body {
font-family: system-ui, -apple-system, sans-serif;
padding: 20px;
background: #f5f5f5;
margin: 0;
}
.container {
max-width: 500px;
margin: 0 auto;
background: white;
padding: 30px;
border-radius: 12px;
box-shadow: 0 2px 10px rgba(0,0,0,0.1);
}
h2 {
color: #333;
margin-bottom: 20px;
}
.form-group {
margin-bottom: 15px;
}
label {
display: block;
margin-bottom: 5px;
color: #555;
font-weight: 500;
}
input, textarea {
width: 100%;
padding: 10px;
border: 1px solid #ddd;
border-radius: 6px;
font-size: 14px;
box-sizing: border-box;
}
textarea {
min-height: 100px;
resize: vertical;
}
button {
background: #667eea;
color: white;
border: none;
padding: 12px 24px;
border-radius: 6px;
cursor: pointer;
font-size: 16px;
width: 100%;
margin-top: 10px;
}
button:hover {
background: #5568d3;
}
#result {
margin-top: 20px;
padding: 15px;
background: #f8f9fa;
border-radius: 6px;
display: none;
}
</style>
</head>
<body>
<div class="container">
<h2>📝 Form UI Demo</h2>
<form id="demoForm" onsubmit="handleSubmit(event)">
<div class="form-group">
<label for="name">Name:</label>
<input type="text" id="name" name="name" required placeholder="Enter your name">
</div>
<div class="form-group">
<label for="email">Email:</label>
<input type="email" id="email" name="email" required placeholder="your@email.com">
</div>
<div class="form-group">
<label for="message">Message:</label>
<textarea id="message" name="message" required placeholder="Enter your message here..."></textarea>
</div>
<button type="submit">Submit Form</button>
</form>
<div id="result"></div>
</div>
<script>
function handleSubmit(event) {
event.preventDefault();
const formData = new FormData(event.target);
const data = Object.fromEntries(formData.entries());
const result = document.getElementById('result');
result.style.display = 'block';
result.innerHTML = '<p style="color: #0066cc;">Submitting form...</p>';
// Send form data to host
window.parent.postMessage({
type: 'notify',
payload: {
message: 'Form submitted with data: ' + JSON.stringify(data)
}
}, '*');
// Display result
result.innerHTML = \`
<p style="color: #00aa00;"><strong>Form Submitted!</strong></p>
<pre style="background: white; padding: 10px; border-radius: 4px; overflow-x: auto;">\${JSON.stringify(data, null, 2)}</pre>
\`;
}
</script>
</body>
</html>
`.trim()
// List available tools
server.setRequestHandler(ListToolsRequestSchema, async () => {
return {
tools: [
{
name: 'demo_echo',
description: 'Echo back the message sent from UI',
inputSchema: {
type: 'object',
properties: {
message: {
type: 'string',
description: 'Message to echo back'
}
},
required: ['message']
}
},
{
name: 'show_hello_ui',
description: 'Display a simple hello world UI with gradient background',
inputSchema: {
type: 'object',
properties: {}
}
},
{
name: 'show_interactive_ui',
description:
'Display an interactive UI demo with buttons for calling tools, getting timestamps, and opening links',
inputSchema: {
type: 'object',
properties: {}
}
},
{
name: 'show_form_ui',
description: 'Display a form UI demo with input fields for name, email, and message',
inputSchema: {
type: 'object',
properties: {}
}
}
]
}
})
// Handle tool calls
server.setRequestHandler(CallToolRequestSchema, async (request) => {
const { name, arguments: args } = request.params
if (name === 'demo_echo') {
return {
content: [
{
type: 'text',
text: JSON.stringify({
echo: args?.message || 'No message provided',
timestamp: new Date().toISOString()
})
}
]
}
}
if (name === 'show_hello_ui') {
return {
content: [
{
type: 'text',
text: JSON.stringify({
type: 'resource',
resource: {
uri: 'ui://demo/hello',
mimeType: 'text/html',
text: getHelloWorldUI()
}
})
}
]
}
}
if (name === 'show_interactive_ui') {
return {
content: [
{
type: 'text',
text: JSON.stringify({
type: 'resource',
resource: {
uri: 'ui://demo/interactive',
mimeType: 'text/html',
text: getInteractiveUI()
}
})
}
]
}
}
if (name === 'show_form_ui') {
return {
content: [
{
type: 'text',
text: JSON.stringify({
type: 'resource',
resource: {
uri: 'ui://demo/form',
mimeType: 'text/html',
text: getFormUI()
}
})
}
]
}
}
throw new Error(`Unknown tool: ${name}`)
})
class MCPUIDemoServer {
public server: Server
constructor() {
this.server = server
}
}
export default MCPUIDemoServer

View File

@@ -548,17 +548,6 @@ class CodeToolsService {
logger.debug(`Environment variables:`, Object.keys(env))
logger.debug(`Options:`, options)
// Validate directory exists before proceeding
if (!directory || !fs.existsSync(directory)) {
const errorMessage = `Directory does not exist: ${directory}`
logger.error(errorMessage)
return {
success: false,
message: errorMessage,
command: ''
}
}
const packageName = await this.getPackageName(cliTool)
const bunPath = await this.getBunPath()
const executableName = await this.getCliExecutableName(cliTool)
@@ -720,7 +709,6 @@ class CodeToolsService {
// Build bat file content, including debug information
const batContent = [
'@echo off',
'chcp 65001 >nul 2>&1', // Switch to UTF-8 code page for international path support
`title ${cliTool} - Cherry Studio`, // Set window title in bat file
'echo ================================================',
'echo Cherry Studio CLI Tool Launcher',

View File

@@ -620,7 +620,7 @@ class McpService {
tools.map((tool: SDKTool) => {
const serverTool: MCPTool = {
...tool,
id: buildFunctionCallToolName(server.name, tool.name, server.id),
id: buildFunctionCallToolName(server.name, tool.name),
serverId: server.id,
serverName: server.name,
type: 'mcp'

View File

@@ -1,7 +1,6 @@
// src/main/services/agents/services/claudecode/index.ts
import { EventEmitter } from 'node:events'
import { createRequire } from 'node:module'
import path from 'node:path'
import type {
CanUseTool,
@@ -122,11 +121,7 @@ class ClaudeCodeService implements AgentServiceInterface {
// TODO: support set small model in UI
ANTHROPIC_DEFAULT_HAIKU_MODEL: modelInfo.modelId,
ELECTRON_RUN_AS_NODE: '1',
ELECTRON_NO_ATTACH_CONSOLE: '1',
// Set CLAUDE_CONFIG_DIR to app's userData directory to avoid path encoding issues
// on Windows when the username contains non-ASCII characters (e.g., Chinese characters)
// This prevents the SDK from using the user's home directory which may have encoding problems
CLAUDE_CONFIG_DIR: path.join(app.getPath('userData'), '.claude')
ELECTRON_NO_ATTACH_CONSOLE: '1'
}
const errorChunks: string[] = []

View File

@@ -1,196 +0,0 @@
import { describe, expect, it } from 'vitest'
import { buildFunctionCallToolName } from '../mcp'
describe('buildFunctionCallToolName', () => {
describe('basic functionality', () => {
it('should combine server name and tool name', () => {
const result = buildFunctionCallToolName('github', 'search_issues')
expect(result).toContain('github')
expect(result).toContain('search')
})
it('should sanitize names by replacing dashes with underscores', () => {
const result = buildFunctionCallToolName('my-server', 'my-tool')
// Input dashes are replaced, but the separator between server and tool is a dash
expect(result).toBe('my_serv-my_tool')
expect(result).toContain('_')
})
it('should handle empty server names gracefully', () => {
const result = buildFunctionCallToolName('', 'tool')
expect(result).toBeTruthy()
})
})
describe('uniqueness with serverId', () => {
it('should generate different IDs for same server name but different serverIds', () => {
const serverId1 = 'server-id-123456'
const serverId2 = 'server-id-789012'
const serverName = 'github'
const toolName = 'search_repos'
const result1 = buildFunctionCallToolName(serverName, toolName, serverId1)
const result2 = buildFunctionCallToolName(serverName, toolName, serverId2)
expect(result1).not.toBe(result2)
expect(result1).toContain('123456')
expect(result2).toContain('789012')
})
it('should generate same ID when serverId is not provided', () => {
const serverName = 'github'
const toolName = 'search_repos'
const result1 = buildFunctionCallToolName(serverName, toolName)
const result2 = buildFunctionCallToolName(serverName, toolName)
expect(result1).toBe(result2)
})
it('should include serverId suffix when provided', () => {
const serverId = 'abc123def456'
const result = buildFunctionCallToolName('server', 'tool', serverId)
// Should include last 6 chars of serverId
expect(result).toContain('ef456')
})
})
describe('character sanitization', () => {
it('should replace invalid characters with underscores', () => {
const result = buildFunctionCallToolName('test@server', 'tool#name')
expect(result).not.toMatch(/[@#]/)
expect(result).toMatch(/^[a-zA-Z0-9_-]+$/)
})
it('should ensure name starts with a letter', () => {
const result = buildFunctionCallToolName('123server', '456tool')
expect(result).toMatch(/^[a-zA-Z]/)
})
it('should handle consecutive underscores/dashes', () => {
const result = buildFunctionCallToolName('my--server', 'my__tool')
expect(result).not.toMatch(/[_-]{2,}/)
})
})
describe('length constraints', () => {
it('should truncate names longer than 63 characters', () => {
const longServerName = 'a'.repeat(50)
const longToolName = 'b'.repeat(50)
const result = buildFunctionCallToolName(longServerName, longToolName, 'id123456')
expect(result.length).toBeLessThanOrEqual(63)
})
it('should not end with underscore or dash after truncation', () => {
const longServerName = 'a'.repeat(50)
const longToolName = 'b'.repeat(50)
const result = buildFunctionCallToolName(longServerName, longToolName, 'id123456')
expect(result).not.toMatch(/[_-]$/)
})
it('should preserve serverId suffix even with long server/tool names', () => {
const longServerName = 'a'.repeat(50)
const longToolName = 'b'.repeat(50)
const serverId = 'server-id-xyz789'
const result = buildFunctionCallToolName(longServerName, longToolName, serverId)
// The suffix should be preserved and not truncated
expect(result).toContain('xyz789')
expect(result.length).toBeLessThanOrEqual(63)
})
it('should ensure two long-named servers with different IDs produce different results', () => {
const longServerName = 'a'.repeat(50)
const longToolName = 'b'.repeat(50)
const serverId1 = 'server-id-abc123'
const serverId2 = 'server-id-def456'
const result1 = buildFunctionCallToolName(longServerName, longToolName, serverId1)
const result2 = buildFunctionCallToolName(longServerName, longToolName, serverId2)
// Both should be within limit
expect(result1.length).toBeLessThanOrEqual(63)
expect(result2.length).toBeLessThanOrEqual(63)
// They should be different due to preserved suffix
expect(result1).not.toBe(result2)
})
})
describe('edge cases with serverId', () => {
it('should handle serverId with only non-alphanumeric characters', () => {
const serverId = '------' // All dashes
const result = buildFunctionCallToolName('server', 'tool', serverId)
// Should still produce a valid unique suffix via fallback hash
expect(result).toBeTruthy()
expect(result.length).toBeLessThanOrEqual(63)
expect(result).toMatch(/^[a-zA-Z][a-zA-Z0-9_-]*$/)
// Should have a suffix (underscore followed by something)
expect(result).toMatch(/_[a-z0-9]+$/)
})
it('should produce different results for different non-alphanumeric serverIds', () => {
const serverId1 = '------'
const serverId2 = '!!!!!!'
const result1 = buildFunctionCallToolName('server', 'tool', serverId1)
const result2 = buildFunctionCallToolName('server', 'tool', serverId2)
// Should be different because the hash fallback produces different values
expect(result1).not.toBe(result2)
})
it('should handle empty string serverId differently from undefined', () => {
const resultWithEmpty = buildFunctionCallToolName('server', 'tool', '')
const resultWithUndefined = buildFunctionCallToolName('server', 'tool', undefined)
// Empty string is falsy, so both should behave the same (no suffix)
expect(resultWithEmpty).toBe(resultWithUndefined)
})
it('should handle serverId with mixed alphanumeric and special chars', () => {
const serverId = 'ab@#cd' // Mixed chars, last 6 chars contain some alphanumeric
const result = buildFunctionCallToolName('server', 'tool', serverId)
// Should extract alphanumeric chars: 'abcd' from 'ab@#cd'
expect(result).toContain('abcd')
})
})
describe('real-world scenarios', () => {
it('should handle GitHub MCP server instances correctly', () => {
const serverName = 'github'
const toolName = 'search_repositories'
const githubComId = 'server-github-com-abc123'
const gheId = 'server-ghe-internal-xyz789'
const tool1 = buildFunctionCallToolName(serverName, toolName, githubComId)
const tool2 = buildFunctionCallToolName(serverName, toolName, gheId)
// Should be different
expect(tool1).not.toBe(tool2)
// Both should be valid identifiers
expect(tool1).toMatch(/^[a-zA-Z][a-zA-Z0-9_-]*$/)
expect(tool2).toMatch(/^[a-zA-Z][a-zA-Z0-9_-]*$/)
// Both should be <= 63 chars
expect(tool1.length).toBeLessThanOrEqual(63)
expect(tool2.length).toBeLessThanOrEqual(63)
})
it('should handle tool names that already include server name prefix', () => {
const result = buildFunctionCallToolName('github', 'github_search_repos')
expect(result).toBeTruthy()
// Should not double the server name
expect(result.split('github').length - 1).toBeLessThanOrEqual(2)
})
})
})

View File

@@ -1,25 +1,7 @@
export function buildFunctionCallToolName(serverName: string, toolName: string, serverId?: string) {
export function buildFunctionCallToolName(serverName: string, toolName: string) {
const sanitizedServer = serverName.trim().replace(/-/g, '_')
const sanitizedTool = toolName.trim().replace(/-/g, '_')
// Calculate suffix first to reserve space for it
// Suffix format: "_" + 6 alphanumeric chars = 7 chars total
let serverIdSuffix = ''
if (serverId) {
// Take the last 6 characters of the serverId for brevity
serverIdSuffix = serverId.slice(-6).replace(/[^a-zA-Z0-9]/g, '')
// Fallback: if suffix becomes empty (all non-alphanumeric chars), use a simple hash
if (!serverIdSuffix) {
const hash = serverId.split('').reduce((acc, char) => acc + char.charCodeAt(0), 0)
serverIdSuffix = hash.toString(36).slice(-6) || 'x'
}
}
// Reserve space for suffix when calculating max base name length
const SUFFIX_LENGTH = serverIdSuffix ? serverIdSuffix.length + 1 : 0 // +1 for underscore
const MAX_BASE_LENGTH = 63 - SUFFIX_LENGTH
// Combine server name and tool name
let name = sanitizedTool
if (!sanitizedTool.includes(sanitizedServer.slice(0, 7))) {
@@ -38,9 +20,9 @@ export function buildFunctionCallToolName(serverName: string, toolName: string,
// Remove consecutive underscores/dashes (optional improvement)
name = name.replace(/[_-]{2,}/g, '_')
// Truncate base name BEFORE adding suffix to ensure suffix is never cut off
if (name.length > MAX_BASE_LENGTH) {
name = name.slice(0, MAX_BASE_LENGTH)
// Truncate to 63 characters maximum
if (name.length > 63) {
name = name.slice(0, 63)
}
// Handle edge case: ensure we still have a valid name if truncation left invalid chars at edges
@@ -48,10 +30,5 @@ export function buildFunctionCallToolName(serverName: string, toolName: string,
name = name.slice(0, -1)
}
// Now append the suffix - it will always fit within 63 chars
if (serverIdSuffix) {
name = `${name}_${serverIdSuffix}`
}
return name
}

View File

@@ -386,13 +386,14 @@ export class AiSdkToChunkAdapter {
case 'error':
this.onChunk({
type: ChunkType.ERROR,
error: AISDKError.isInstance(chunk.error)
? chunk.error
: new ProviderSpecificError({
message: formatErrorMessage(chunk.error),
provider: 'unknown',
cause: chunk.error
})
error:
chunk.error instanceof AISDKError
? chunk.error
: new ProviderSpecificError({
message: formatErrorMessage(chunk.error),
provider: 'unknown',
cause: chunk.error
})
})
break

View File

@@ -212,9 +212,8 @@ export class ToolCallChunkHandler {
description: toolName,
type: 'builtin'
} as BaseTool
} else if ((mcpTool = this.mcpTools.find((t) => t.id === toolName) as MCPTool)) {
} else if ((mcpTool = this.mcpTools.find((t) => t.name === toolName) as MCPTool)) {
// 如果是客户端执行的 MCP 工具,沿用现有逻辑
// toolName is mcpTool.id (registered with id as key in convertMcpToolsToAiSdkTools)
logger.info(`[ToolCallChunkHandler] Handling client-side MCP tool: ${toolName}`)
// mcpTool = this.mcpTools.find((t) => t.name === toolName) as MCPTool
// if (!mcpTool) {

View File

@@ -27,7 +27,6 @@ import { buildAiSdkMiddlewares } from './middleware/AiSdkMiddlewareBuilder'
import { buildPlugins } from './plugins/PluginBuilder'
import { createAiSdkProvider } from './provider/factory'
import {
adaptProvider,
getActualProvider,
isModernSdkSupported,
prepareSpecialProviderConfig,
@@ -51,39 +50,7 @@ export default class ModernAiProvider {
private model?: Model
private localProvider: Awaited<AiSdkProvider> | null = null
/**
* Constructor for ModernAiProvider
*
* @param modelOrProvider - Model or Provider object
* @param provider - Optional Provider object (only used when first param is Model)
*
* @remarks
* **Important behavior notes**:
*
* 1. When called with `(model)`:
* - Calls `getActualProvider(model)` to retrieve and format the provider
* - URL will be automatically formatted via `formatProviderApiHost`, adding version suffixes like `/v1`
*
* 2. When called with `(model, provider)`:
* - The provided provider will be adapted via `adaptProvider`
* - URL formatting behavior depends on the adapted result
*
* 3. When called with `(provider)`:
* - The provider will be adapted via `adaptProvider`
* - Used for operations that don't need a model (e.g., fetchModels)
*
* @example
* ```typescript
* // Recommended: Auto-format URL
* const ai = new ModernAiProvider(model)
*
* // Provider will be adapted
* const ai = new ModernAiProvider(model, customProvider)
*
* // For operations that don't need a model
* const ai = new ModernAiProvider(provider)
* ```
*/
// 构造函数重载签名
constructor(model: Model, provider?: Provider)
constructor(provider: Provider)
constructor(modelOrProvider: Model | Provider, provider?: Provider)
@@ -91,12 +58,12 @@ export default class ModernAiProvider {
if (this.isModel(modelOrProvider)) {
// 传入的是 Model
this.model = modelOrProvider
this.actualProvider = provider ? adaptProvider({ provider }) : getActualProvider(modelOrProvider)
this.actualProvider = provider || getActualProvider(modelOrProvider)
// 只保存配置不预先创建executor
this.config = providerToAiSdkConfig(this.actualProvider, modelOrProvider)
} else {
// 传入的是 Provider
this.actualProvider = adaptProvider({ provider: modelOrProvider })
this.actualProvider = modelOrProvider
// model为可选某些操作如fetchModels不需要model
}
@@ -189,7 +156,7 @@ export default class ModernAiProvider {
config: ModernAiProviderConfig
): Promise<CompletionsResult> {
// ai-gateway不是image/generation 端点所以就先不走legacy了
if (config.isImageGenerationEndpoint && this.getActualProvider().id !== SystemProviderIds['ai-gateway']) {
if (config.isImageGenerationEndpoint && config.provider!.id !== SystemProviderIds['ai-gateway']) {
// 使用 legacy 实现处理图像生成(支持图片编辑等高级功能)
if (!config.uiMessages) {
throw new Error('uiMessages is required for image generation endpoint')
@@ -355,10 +322,10 @@ export default class ModernAiProvider {
}
}
// /**
// * 使用现代化 AI SDK 的图像生成实现,支持流式输出
// * @deprecated 已改为使用 legacy 实现以支持图片编辑等高级功能
// */
/**
* 使用现代化 AI SDK 的图像生成实现,支持流式输出
* @deprecated 已改为使用 legacy 实现以支持图片编辑等高级功能
*/
/*
private async modernImageGeneration(
model: ImageModel,

View File

@@ -405,9 +405,6 @@ export abstract class BaseApiClient<
if (!param.name?.trim()) {
return acc
}
// Parse JSON type parameters (Legacy API clients)
// Related: src/renderer/src/pages/settings/AssistantSettings/AssistantModelSettings.tsx:133-148
// The UI stores JSON type params as strings, this function parses them before sending to API
if (param.type === 'json') {
const value = param.value as string
if (value === 'undefined') {

View File

@@ -46,7 +46,6 @@ import type {
GeminiSdkRawOutput,
GeminiSdkToolCall
} from '@renderer/types/sdk'
import { getTrailingApiVersion, withoutTrailingApiVersion } from '@renderer/utils'
import { isToolUseModeFunction } from '@renderer/utils/assistant'
import {
geminiFunctionCallToMcpTool,
@@ -164,10 +163,6 @@ export class GeminiAPIClient extends BaseApiClient<
return models
}
override getBaseURL(): string {
return withoutTrailingApiVersion(super.getBaseURL())
}
override async getSdkInstance() {
if (this.sdkInstance) {
return this.sdkInstance
@@ -193,13 +188,6 @@ export class GeminiAPIClient extends BaseApiClient<
if (this.provider.isVertex) {
return 'v1'
}
// Extract trailing API version from the URL
const trailingVersion = getTrailingApiVersion(this.provider.apiHost || '')
if (trailingVersion) {
return trailingVersion
}
return 'v1beta'
}

View File

@@ -11,8 +11,10 @@ import {
findTokenLimit,
GEMINI_FLASH_MODEL_REGEX,
getThinkModelType,
isClaudeReasoningModel,
isDeepSeekHybridInferenceModel,
isDoubaoThinkingAutoModel,
isGeminiReasoningModel,
isGPT5SeriesModel,
isGrokReasoningModel,
isNotSupportSystemMessageModel,
@@ -649,6 +651,7 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
logger.warn('No user message. Some providers may not support.')
}
// poe 需要通过用户消息传递 reasoningEffort
const reasoningEffort = this.getReasoningEffort(assistant, model)
const lastUserMsg = userMessages.findLast((m) => m.role === 'user')
@@ -659,6 +662,22 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
lastUserMsg.content = processPostsuffixQwen3Model(currentContent, qwenThinkModeEnabled)
}
if (this.provider.id === SystemProviderIds.poe) {
// 如果以后 poe 支持 reasoning_effort 参数了,可以删掉这部分
let suffix = ''
if (isGPT5SeriesModel(model) && reasoningEffort.reasoning_effort) {
suffix = ` --reasoning_effort ${reasoningEffort.reasoning_effort}`
} else if (isClaudeReasoningModel(model) && reasoningEffort.thinking?.budget_tokens) {
suffix = ` --thinking_budget ${reasoningEffort.thinking.budget_tokens}`
} else if (isGeminiReasoningModel(model) && reasoningEffort.extra_body?.google?.thinking_config) {
suffix = ` --thinking_budget ${reasoningEffort.extra_body.google.thinking_config.thinking_budget}`
}
// FIXME: poe 不支持多个text part上传文本文件的时候用的不是file part而是text part因此会出问题
// 临时解决方案是强制poe用string content但是其实poe部分支持array
if (typeof lastUserMsg.content === 'string') {
lastUserMsg.content += suffix
}
}
}
// 4. 最终请求消息

View File

@@ -1,6 +1,6 @@
import type { WebSearchPluginConfig } from '@cherrystudio/ai-core/built-in/plugins'
import { loggerService } from '@logger'
import { isGemini3Model, isSupportedThinkingTokenQwenModel } from '@renderer/config/models'
import { isSupportedThinkingTokenQwenModel } from '@renderer/config/models'
import type { MCPTool } from '@renderer/types'
import { type Assistant, type Message, type Model, type Provider, SystemProviderIds } from '@renderer/types'
import type { Chunk } from '@renderer/types/chunk'
@@ -9,13 +9,11 @@ import type { LanguageModelMiddleware } from 'ai'
import { extractReasoningMiddleware, simulateStreamingMiddleware } from 'ai'
import { isEmpty } from 'lodash'
import { getAiSdkProviderId } from '../provider/factory'
import { isOpenRouterGeminiGenerateImageModel } from '../utils/image'
import { noThinkMiddleware } from './noThinkMiddleware'
import { openrouterGenerateImageMiddleware } from './openrouterGenerateImageMiddleware'
import { openrouterReasoningMiddleware } from './openrouterReasoningMiddleware'
import { qwenThinkingMiddleware } from './qwenThinkingMiddleware'
import { skipGeminiThoughtSignatureMiddleware } from './skipGeminiThoughtSignatureMiddleware'
import { toolChoiceMiddleware } from './toolChoiceMiddleware'
const logger = loggerService.withContext('AiSdkMiddlewareBuilder')
@@ -259,15 +257,6 @@ function addModelSpecificMiddlewares(builder: AiSdkMiddlewareBuilder, config: Ai
middleware: openrouterGenerateImageMiddleware()
})
}
if (isGemini3Model(config.model)) {
const aiSdkId = getAiSdkProviderId(config.provider)
builder.add({
name: 'skip-gemini3-thought-signature',
middleware: skipGeminiThoughtSignatureMiddleware(aiSdkId)
})
logger.debug('Added skip Gemini3 thought signature middleware')
}
}
/**

View File

@@ -1,36 +0,0 @@
import type { LanguageModelMiddleware } from 'ai'
/**
* skip Gemini Thought Signature Middleware
* 由于多模型客户端请求的复杂性(可以中途切换其他模型),这里选择通过中间件方式添加跳过所有 Gemini3 思考签名
* Due to the complexity of multi-model client requests (which can switch to other models mid-process),
* it was decided to add a skip for all Gemini3 thinking signatures via middleware.
* @param aiSdkId AI SDK Provider ID
* @returns LanguageModelMiddleware
*/
export function skipGeminiThoughtSignatureMiddleware(aiSdkId: string): LanguageModelMiddleware {
const MAGIC_STRING = 'skip_thought_signature_validator'
return {
middlewareVersion: 'v2',
transformParams: async ({ params }) => {
const transformedParams = { ...params }
// Process messages in prompt
if (transformedParams.prompt && Array.isArray(transformedParams.prompt)) {
transformedParams.prompt = transformedParams.prompt.map((message) => {
if (typeof message.content !== 'string') {
for (const part of message.content) {
const googleOptions = part?.providerOptions?.[aiSdkId]
if (googleOptions?.thoughtSignature) {
googleOptions.thoughtSignature = MAGIC_STRING
}
}
}
return message
})
}
return transformedParams
}
}
}

View File

@@ -180,10 +180,6 @@ describe('messageConverter', () => {
const result = await convertMessagesToSdkMessages([initialUser, assistant, finalUser], model)
expect(result).toEqual([
{
role: 'user',
content: [{ type: 'text', text: 'Start editing' }]
},
{
role: 'assistant',
content: [{ type: 'text', text: 'Here is the current preview' }]
@@ -221,7 +217,6 @@ describe('messageConverter', () => {
expect(result).toEqual([
{ role: 'system', content: 'fileid://reference' },
{ role: 'user', content: [{ type: 'text', text: 'Use this document as inspiration' }] },
{
role: 'assistant',
content: [{ type: 'text', text: 'Generated previews ready' }]

View File

@@ -7,7 +7,7 @@ import { isAwsBedrockProvider, isVertexProvider } from '@renderer/utils/provider
// https://docs.claude.com/en/docs/build-with-claude/extended-thinking#interleaved-thinking
const INTERLEAVED_THINKING_HEADER = 'interleaved-thinking-2025-05-14'
// https://docs.claude.com/en/docs/build-with-claude/context-windows#1m-token-context-window
// const CONTEXT_100M_HEADER = 'context-1m-2025-08-07'
const CONTEXT_100M_HEADER = 'context-1m-2025-08-07'
// https://docs.cloud.google.com/vertex-ai/generative-ai/docs/partner-models/claude/web-search
const WEBSEARCH_HEADER = 'web-search-2025-03-05'
@@ -17,7 +17,7 @@ export function addAnthropicHeaders(assistant: Assistant, model: Model): string[
if (
isClaude45ReasoningModel(model) &&
isToolUseModeFunction(assistant) &&
!(isVertexProvider(provider) || isAwsBedrockProvider(provider))
!(isVertexProvider(provider) && isAwsBedrockProvider(provider))
) {
anthropicHeaders.push(INTERLEAVED_THINKING_HEADER)
}
@@ -25,9 +25,7 @@ export function addAnthropicHeaders(assistant: Assistant, model: Model): string[
if (isVertexProvider(provider) && assistant.enableWebSearch) {
anthropicHeaders.push(WEBSEARCH_HEADER)
}
// We may add it by user preference in assistant.settings instead of always adding it.
// See #11540, #11397
// anthropicHeaders.push(CONTEXT_100M_HEADER)
anthropicHeaders.push(CONTEXT_100M_HEADER)
}
return anthropicHeaders
}

View File

@@ -194,20 +194,20 @@ async function convertMessageToAssistantModelMessage(
* This function processes messages and transforms them into the format required by the SDK.
* It handles special cases for vision models and image enhancement models.
*
* @param messages - Array of messages to convert. Must contain at least 3 messages when using image enhancement models for special handling.
* @param messages - Array of messages to convert. Must contain at least 2 messages when using image enhancement models.
* @param model - The model configuration that determines conversion behavior
*
* @returns A promise that resolves to an array of SDK-compatible model messages
*
* @remarks
* For image enhancement models with 3+ messages:
* - Examines the last 2 messages to find an assistant message containing image blocks
* - If found, extracts images from the assistant message and appends them to the last user message content
* - Returns all converted messages (not just the last two) with the images merged into the user message
* - Typical pattern: [system?, assistant(image), user] -> [system?, assistant, user(image)]
* For image enhancement models with 2+ messages:
* - Expects the second-to-last message (index length-2) to be an assistant message containing image blocks
* - Expects the last message (index length-1) to be a user message
* - Extracts images from the assistant message and appends them to the user message content
* - Returns only the last two processed messages [assistantSdkMessage, userSdkMessage]
*
* For other models:
* - Returns all converted messages in order without special image handling
* - Returns all converted messages in order
*
* The function automatically detects vision model capabilities and adjusts conversion accordingly.
*/
@@ -220,25 +220,29 @@ export async function convertMessagesToSdkMessages(messages: Message[], model: M
sdkMessages.push(...(Array.isArray(sdkMessage) ? sdkMessage : [sdkMessage]))
}
// Special handling for image enhancement models
// Only merge images into the user message
// [system?, assistant(image), user] -> [system?, assistant, user(image)]
// Only keep the last two messages and merge images into the user message
// [system?, user, assistant, user]
if (isImageEnhancementModel(model) && messages.length >= 3) {
const needUpdatedMessages = messages.slice(-2)
const assistantMessage = needUpdatedMessages.find((m) => m.role === 'assistant')
const userSdkMessage = sdkMessages[sdkMessages.length - 1]
if (assistantMessage && userSdkMessage?.role === 'user') {
const imageBlocks = findImageBlocks(assistantMessage)
const imageParts = await convertImageBlockToImagePart(imageBlocks)
if (imageParts.length > 0) {
if (typeof userSdkMessage.content === 'string') {
userSdkMessage.content = [{ type: 'text', text: userSdkMessage.content }, ...imageParts]
} else if (Array.isArray(userSdkMessage.content)) {
userSdkMessage.content.push(...imageParts)
}
}
const needUpdatedSdkMessages = sdkMessages.slice(-2)
const assistantMessage = needUpdatedMessages.filter((m) => m.role === 'assistant')[0]
const assistantSdkMessage = needUpdatedSdkMessages.filter((m) => m.role === 'assistant')[0]
const userSdkMessage = needUpdatedSdkMessages.filter((m) => m.role === 'user')[0]
const systemSdkMessages = sdkMessages.filter((m) => m.role === 'system')
const imageBlocks = findImageBlocks(assistantMessage)
const imageParts = await convertImageBlockToImagePart(imageBlocks)
const parts: Array<TextPart | ImagePart | FilePart> = []
if (typeof userSdkMessage.content === 'string') {
parts.push({ type: 'text', text: userSdkMessage.content })
parts.push(...imageParts)
userSdkMessage.content = parts
} else {
userSdkMessage.content.push(...imageParts)
}
if (systemSdkMessages.length > 0) {
return [systemSdkMessages[0], assistantSdkMessage, userSdkMessage]
}
return [assistantSdkMessage, userSdkMessage]
}
return sdkMessages

View File

@@ -3,6 +3,7 @@
* 处理温度、TopP、超时等基础参数的获取逻辑
*/
import { DEFAULT_MAX_TOKENS } from '@renderer/config/constant'
import {
isClaude45ReasoningModel,
isClaudeReasoningModel,
@@ -72,19 +73,11 @@ export function getTimeout(model: Model): number {
export function getMaxTokens(assistant: Assistant, model: Model): number | undefined {
// NOTE: ai-sdk会把maxToken和budgetToken加起来
const assistantSettings = getAssistantSettings(assistant)
const enabledMaxTokens = assistantSettings.enableMaxTokens ?? false
let maxTokens = assistantSettings.maxTokens
// If user hasn't enabled enableMaxTokens, return undefined to let the API use its default value.
// Note: Anthropic API requires max_tokens, but that's handled by the Anthropic client with a fallback.
if (!enabledMaxTokens || maxTokens === undefined) {
return undefined
}
let { maxTokens = DEFAULT_MAX_TOKENS } = getAssistantSettings(assistant)
const provider = getProviderByModel(model)
if (isSupportedThinkingTokenClaudeModel(model) && ['anthropic', 'aws-bedrock'].includes(provider.type)) {
const { reasoning_effort: reasoningEffort } = assistantSettings
const { reasoning_effort: reasoningEffort } = getAssistantSettings(assistant)
const budget = getAnthropicThinkingBudget(maxTokens, reasoningEffort, model.id)
if (budget) {
maxTokens -= budget

View File

@@ -28,7 +28,6 @@ import { type Assistant, type MCPTool, type Provider } from '@renderer/types'
import type { StreamTextParams } from '@renderer/types/aiCoreTypes'
import { mapRegexToPatterns } from '@renderer/utils/blacklistMatchPattern'
import { replacePromptVariables } from '@renderer/utils/prompt'
import { isAwsBedrockProvider } from '@renderer/utils/provider'
import type { ModelMessage, Tool } from 'ai'
import { stepCountIs } from 'ai'
@@ -107,7 +106,7 @@ export async function buildStreamTextParams(
searchWithTime: store.getState().websearch.searchWithTime
}
const { providerOptions, standardParams } = buildProviderOptions(assistant, model, provider, {
const providerOptions = buildProviderOptions(assistant, model, provider, {
enableReasoning,
enableWebSearch,
enableGenerateImage
@@ -176,22 +175,17 @@ export async function buildStreamTextParams(
let headers: Record<string, string | undefined> = options.requestOptions?.headers ?? {}
if (isAnthropicModel(model) && !isAwsBedrockProvider(provider)) {
if (isAnthropicModel(model)) {
const newBetaHeaders = { 'anthropic-beta': addAnthropicHeaders(assistant, model).join(',') }
headers = combineHeaders(headers, newBetaHeaders)
}
// 构建基础参数
// Note: standardParams (topK, frequencyPenalty, presencePenalty, stopSequences, seed)
// are extracted from custom parameters and passed directly to streamText()
// instead of being placed in providerOptions
const params: StreamTextParams = {
messages: sdkMessages,
maxOutputTokens: getMaxTokens(assistant, model),
temperature: getTemperature(assistant, model),
topP: getTopP(assistant, model),
// Include AI SDK standard params extracted from custom parameters
...standardParams,
abortSignal: options.requestOptions?.signal,
headers,
providerOptions,

View File

@@ -1,4 +1,4 @@
import type { Model, Provider } from '@renderer/types'
import type { Provider } from '@renderer/types'
import { describe, expect, it, vi } from 'vitest'
import { getAiSdkProviderId } from '../factory'
@@ -68,18 +68,6 @@ function createTestProvider(id: string, type: string): Provider {
} as Provider
}
function createAzureProvider(id: string, apiVersion?: string, model?: string): Provider {
return {
id,
type: 'azure-openai',
name: `Azure Test ${id}`,
apiKey: 'azure-test-key',
apiHost: 'azure-test-host',
apiVersion,
models: [{ id: model || 'gpt-4' } as Model]
}
}
describe('Integrated Provider Registry', () => {
describe('Provider ID Resolution', () => {
it('should resolve openrouter provider correctly', () => {
@@ -123,24 +111,6 @@ describe('Integrated Provider Registry', () => {
const result = getAiSdkProviderId(unknownProvider)
expect(result).toBe('unknown-provider')
})
it('should handle Azure OpenAI providers correctly', () => {
const azureProvider = createAzureProvider('azure-test', '2024-02-15', 'gpt-4o')
const result = getAiSdkProviderId(azureProvider)
expect(result).toBe('azure')
})
it('should handle Azure OpenAI providers response endpoint correctly', () => {
const azureProvider = createAzureProvider('azure-test', 'v1', 'gpt-4o')
const result = getAiSdkProviderId(azureProvider)
expect(result).toBe('azure-responses')
})
it('should handle Azure provider Claude Models', () => {
const provider = createTestProvider('azure-anthropic', 'anthropic')
const result = getAiSdkProviderId(provider)
expect(result).toBe('azure-anthropic')
})
})
describe('Backward Compatibility', () => {

View File

@@ -60,12 +60,8 @@ function tryResolveProviderId(identifier: string): ProviderId | null {
export function getAiSdkProviderId(provider: Provider): string {
// 1. 尝试解析provider.id
const resolvedFromId = tryResolveProviderId(provider.id)
if (isAzureOpenAIProvider(provider)) {
if (isAzureResponsesEndpoint(provider)) {
return 'azure-responses'
} else {
return 'azure'
}
if (isAzureOpenAIProvider(provider) && isAzureResponsesEndpoint(provider)) {
return 'azure-responses'
}
if (resolvedFromId) {
return resolvedFromId

View File

@@ -78,13 +78,11 @@ function handleSpecialProviders(model: Model, provider: Provider): Provider {
}
/**
* Format and normalize the API host URL for a provider.
* Handles provider-specific URL formatting rules (e.g., appending version paths, Azure formatting).
*
* @param provider - The provider whose API host is to be formatted.
* @returns A new provider instance with the formatted API host.
* 主要用来对齐AISdk的BaseURL格式
* @param provider
* @returns
*/
export function formatProviderApiHost(provider: Provider): Provider {
function formatProviderApiHost(provider: Provider): Provider {
const formatted = { ...provider }
if (formatted.anthropicApiHost) {
formatted.anthropicApiHost = formatApiHost(formatted.anthropicApiHost)
@@ -92,7 +90,6 @@ export function formatProviderApiHost(provider: Provider): Provider {
if (isAnthropicProvider(provider)) {
const baseHost = formatted.anthropicApiHost || formatted.apiHost
// AI SDK needs /v1 in baseURL, Anthropic SDK will strip it in getSdkClient
formatted.apiHost = formatApiHost(baseHost)
if (!formatted.anthropicApiHost) {
formatted.anthropicApiHost = formatted.apiHost
@@ -116,38 +113,18 @@ export function formatProviderApiHost(provider: Provider): Provider {
}
/**
* Retrieve the effective Provider configuration for the given model.
* Applies all necessary transformations (special-provider handling, URL formatting, etc.).
*
* @param model - The model whose provider is to be resolved.
* @returns A new Provider instance with all adaptations applied.
* 获取实际的Provider配置
* 简化版:将逻辑分解为小函数
*/
export function getActualProvider(model: Model): Provider {
const baseProvider = getProviderByModel(model)
return adaptProvider({ provider: baseProvider, model })
}
// 按顺序处理各种转换
let actualProvider = cloneDeep(baseProvider)
actualProvider = handleSpecialProviders(model, actualProvider)
actualProvider = formatProviderApiHost(actualProvider)
/**
* Transforms a provider configuration by applying model-specific adaptations and normalizing its API host.
* The transformations are applied in the following order:
* 1. Model-specific provider handling (e.g., New-API, system providers, Azure OpenAI)
* 2. API host formatting (provider-specific URL normalization)
*
* @param provider - The base provider configuration to transform.
* @param model - The model associated with the provider; optional but required for special-provider handling.
* @returns A new Provider instance with all transformations applied.
*/
export function adaptProvider({ provider, model }: { provider: Provider; model?: Model }): Provider {
let adaptedProvider = cloneDeep(provider)
// Apply transformations in order
if (model) {
adaptedProvider = handleSpecialProviders(model, adaptedProvider)
}
adaptedProvider = formatProviderApiHost(adaptedProvider)
return adaptedProvider
return actualProvider
}
/**

View File

@@ -245,8 +245,8 @@ export class AiSdkSpanAdapter {
'gen_ai.usage.output_tokens'
]
const promptTokens = attributes[inputsTokenKeys.find((key) => attributes[key]) || '']
const completionTokens = attributes[outputTokenKeys.find((key) => attributes[key]) || '']
const completionTokens = attributes[inputsTokenKeys.find((key) => attributes[key]) || '']
const promptTokens = attributes[outputTokenKeys.find((key) => attributes[key]) || '']
if (completionTokens !== undefined || promptTokens !== undefined) {
const usage: TokenUsage = {

View File

@@ -1,53 +0,0 @@
import type { Span } from '@opentelemetry/api'
import { SpanKind, SpanStatusCode } from '@opentelemetry/api'
import { describe, expect, it, vi } from 'vitest'
import { AiSdkSpanAdapter } from '../AiSdkSpanAdapter'
vi.mock('@logger', () => ({
loggerService: {
withContext: () => ({
debug: vi.fn(),
error: vi.fn(),
info: vi.fn(),
warn: vi.fn()
})
}
}))
describe('AiSdkSpanAdapter', () => {
const createMockSpan = (attributes: Record<string, unknown>): Span => {
const span = {
spanContext: () => ({
traceId: 'trace-id',
spanId: 'span-id'
}),
_attributes: attributes,
_events: [],
name: 'test span',
status: { code: SpanStatusCode.OK },
kind: SpanKind.CLIENT,
startTime: [0, 0] as [number, number],
endTime: [0, 1] as [number, number],
ended: true,
parentSpanId: '',
links: []
}
return span as unknown as Span
}
it('maps prompt and completion usage tokens to the correct fields', () => {
const attributes = {
'ai.usage.promptTokens': 321,
'ai.usage.completionTokens': 654
}
const span = createMockSpan(attributes)
const result = AiSdkSpanAdapter.convertToSpanEntity({ span })
expect(result.usage).toBeDefined()
expect(result.usage?.prompt_tokens).toBe(321)
expect(result.usage?.completion_tokens).toBe(654)
expect(result.usage?.total_tokens).toBe(975)
})
})

View File

@@ -1,652 +0,0 @@
/**
* extractAiSdkStandardParams Unit Tests
* Tests for extracting AI SDK standard parameters from custom parameters
*/
import { describe, expect, it, vi } from 'vitest'
import { extractAiSdkStandardParams } from '../options'
// Mock logger to prevent errors
vi.mock('@logger', () => ({
loggerService: {
withContext: () => ({
debug: vi.fn(),
error: vi.fn(),
warn: vi.fn(),
info: vi.fn()
})
}
}))
// Mock settings store
vi.mock('@renderer/store/settings', () => ({
default: (state = { settings: {} }) => state
}))
// Mock hooks to prevent uuid errors
vi.mock('@renderer/hooks/useSettings', () => ({
getStoreSetting: vi.fn(() => ({}))
}))
// Mock uuid to prevent errors
vi.mock('uuid', () => ({
v4: vi.fn(() => 'test-uuid')
}))
// Mock AssistantService to prevent uuid errors
vi.mock('@renderer/services/AssistantService', () => ({
getDefaultAssistant: vi.fn(() => ({
id: 'test-assistant',
name: 'Test Assistant',
settings: {}
})),
getDefaultTopic: vi.fn(() => ({
id: 'test-topic',
assistantId: 'test-assistant',
createdAt: new Date().toISOString()
}))
}))
// Mock provider service
vi.mock('@renderer/services/ProviderService', () => ({
getProviderById: vi.fn(() => ({
id: 'test-provider',
name: 'Test Provider'
}))
}))
// Mock config modules
vi.mock('@renderer/config/models', () => ({
isOpenAIModel: vi.fn(() => false),
isQwenMTModel: vi.fn(() => false),
isSupportFlexServiceTierModel: vi.fn(() => false),
isSupportVerbosityModel: vi.fn(() => false),
getModelSupportedVerbosity: vi.fn(() => [])
}))
vi.mock('@renderer/config/translate', () => ({
mapLanguageToQwenMTModel: vi.fn()
}))
vi.mock('@renderer/utils/provider', () => ({
isSupportServiceTierProvider: vi.fn(() => false),
isSupportVerbosityProvider: vi.fn(() => false)
}))
describe('extractAiSdkStandardParams', () => {
describe('Positive cases - Standard parameters extraction', () => {
it('should extract all AI SDK standard parameters', () => {
const customParams = {
maxOutputTokens: 1000,
temperature: 0.7,
topP: 0.9,
topK: 40,
presencePenalty: 0.5,
frequencyPenalty: 0.3,
stopSequences: ['STOP', 'END'],
seed: 42
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
maxOutputTokens: 1000,
temperature: 0.7,
topP: 0.9,
topK: 40,
presencePenalty: 0.5,
frequencyPenalty: 0.3,
stopSequences: ['STOP', 'END'],
seed: 42
})
expect(result.providerParams).toStrictEqual({})
})
it('should extract single standard parameter', () => {
const customParams = {
temperature: 0.8
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
temperature: 0.8
})
expect(result.providerParams).toStrictEqual({})
})
it('should extract topK parameter', () => {
const customParams = {
topK: 50
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
topK: 50
})
expect(result.providerParams).toStrictEqual({})
})
it('should extract frequencyPenalty parameter', () => {
const customParams = {
frequencyPenalty: 0.6
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
frequencyPenalty: 0.6
})
expect(result.providerParams).toStrictEqual({})
})
it('should extract presencePenalty parameter', () => {
const customParams = {
presencePenalty: 0.4
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
presencePenalty: 0.4
})
expect(result.providerParams).toStrictEqual({})
})
it('should extract stopSequences parameter', () => {
const customParams = {
stopSequences: ['HALT', 'TERMINATE']
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
stopSequences: ['HALT', 'TERMINATE']
})
expect(result.providerParams).toStrictEqual({})
})
it('should extract seed parameter', () => {
const customParams = {
seed: 12345
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
seed: 12345
})
expect(result.providerParams).toStrictEqual({})
})
it('should extract maxOutputTokens parameter', () => {
const customParams = {
maxOutputTokens: 2048
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
maxOutputTokens: 2048
})
expect(result.providerParams).toStrictEqual({})
})
it('should extract topP parameter', () => {
const customParams = {
topP: 0.95
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
topP: 0.95
})
expect(result.providerParams).toStrictEqual({})
})
})
describe('Negative cases - Provider-specific parameters', () => {
it('should place all non-standard parameters in providerParams', () => {
const customParams = {
customParam: 'value',
anotherParam: 123,
thirdParam: true
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({})
expect(result.providerParams).toStrictEqual({
customParam: 'value',
anotherParam: 123,
thirdParam: true
})
})
it('should place single provider-specific parameter in providerParams', () => {
const customParams = {
reasoningEffort: 'high'
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({})
expect(result.providerParams).toStrictEqual({
reasoningEffort: 'high'
})
})
it('should place model-specific parameter in providerParams', () => {
const customParams = {
thinking: { type: 'enabled', budgetTokens: 5000 }
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({})
expect(result.providerParams).toStrictEqual({
thinking: { type: 'enabled', budgetTokens: 5000 }
})
})
it('should place serviceTier in providerParams', () => {
const customParams = {
serviceTier: 'auto'
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({})
expect(result.providerParams).toStrictEqual({
serviceTier: 'auto'
})
})
it('should place textVerbosity in providerParams', () => {
const customParams = {
textVerbosity: 'high'
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({})
expect(result.providerParams).toStrictEqual({
textVerbosity: 'high'
})
})
})
describe('Mixed parameters', () => {
it('should correctly separate mixed standard and provider-specific parameters', () => {
const customParams = {
temperature: 0.7,
topK: 40,
customParam: 'custom_value',
reasoningEffort: 'medium',
frequencyPenalty: 0.5,
seed: 999
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
temperature: 0.7,
topK: 40,
frequencyPenalty: 0.5,
seed: 999
})
expect(result.providerParams).toStrictEqual({
customParam: 'custom_value',
reasoningEffort: 'medium'
})
})
it('should handle complex mixed parameters with nested objects', () => {
const customParams = {
topP: 0.9,
presencePenalty: 0.3,
thinking: { type: 'enabled', budgetTokens: 5000 },
stopSequences: ['STOP'],
serviceTier: 'auto',
maxOutputTokens: 4096
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
topP: 0.9,
presencePenalty: 0.3,
stopSequences: ['STOP'],
maxOutputTokens: 4096
})
expect(result.providerParams).toStrictEqual({
thinking: { type: 'enabled', budgetTokens: 5000 },
serviceTier: 'auto'
})
})
it('should handle all standard params with some provider params', () => {
const customParams = {
maxOutputTokens: 2000,
temperature: 0.8,
topP: 0.95,
topK: 50,
presencePenalty: 0.6,
frequencyPenalty: 0.4,
stopSequences: ['END', 'DONE'],
seed: 777,
customApiParam: 'value',
anotherCustomParam: 123
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
maxOutputTokens: 2000,
temperature: 0.8,
topP: 0.95,
topK: 50,
presencePenalty: 0.6,
frequencyPenalty: 0.4,
stopSequences: ['END', 'DONE'],
seed: 777
})
expect(result.providerParams).toStrictEqual({
customApiParam: 'value',
anotherCustomParam: 123
})
})
})
describe('Edge cases', () => {
it('should handle empty object', () => {
const customParams = {}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({})
expect(result.providerParams).toStrictEqual({})
})
it('should handle zero values for numeric parameters', () => {
const customParams = {
temperature: 0,
topK: 0,
seed: 0
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
temperature: 0,
topK: 0,
seed: 0
})
expect(result.providerParams).toStrictEqual({})
})
it('should handle negative values for numeric parameters', () => {
const customParams = {
presencePenalty: -0.5,
frequencyPenalty: -0.3,
seed: -1
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
presencePenalty: -0.5,
frequencyPenalty: -0.3,
seed: -1
})
expect(result.providerParams).toStrictEqual({})
})
it('should handle empty arrays for stopSequences', () => {
const customParams = {
stopSequences: []
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
stopSequences: []
})
expect(result.providerParams).toStrictEqual({})
})
it('should handle null values in mixed parameters', () => {
const customParams = {
temperature: 0.7,
customNull: null,
topK: 40
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
temperature: 0.7,
topK: 40
})
expect(result.providerParams).toStrictEqual({
customNull: null
})
})
it('should handle undefined values in mixed parameters', () => {
const customParams = {
temperature: 0.7,
customUndefined: undefined,
topK: 40
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
temperature: 0.7,
topK: 40
})
expect(result.providerParams).toStrictEqual({
customUndefined: undefined
})
})
it('should handle boolean values for standard parameters', () => {
const customParams = {
temperature: 0.7,
customBoolean: false,
topK: 40
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
temperature: 0.7,
topK: 40
})
expect(result.providerParams).toStrictEqual({
customBoolean: false
})
})
it('should handle very large numeric values', () => {
const customParams = {
maxOutputTokens: 999999,
seed: 2147483647,
topK: 10000
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
maxOutputTokens: 999999,
seed: 2147483647,
topK: 10000
})
expect(result.providerParams).toStrictEqual({})
})
it('should handle decimal values with high precision', () => {
const customParams = {
temperature: 0.123456789,
topP: 0.987654321,
presencePenalty: 0.111111111
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
temperature: 0.123456789,
topP: 0.987654321,
presencePenalty: 0.111111111
})
expect(result.providerParams).toStrictEqual({})
})
})
describe('Case sensitivity', () => {
it('should NOT extract parameters with incorrect case - uppercase first letter', () => {
const customParams = {
Temperature: 0.7,
TopK: 40,
FrequencyPenalty: 0.5
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({})
expect(result.providerParams).toStrictEqual({
Temperature: 0.7,
TopK: 40,
FrequencyPenalty: 0.5
})
})
it('should NOT extract parameters with incorrect case - all uppercase', () => {
const customParams = {
TEMPERATURE: 0.7,
TOPK: 40,
SEED: 42
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({})
expect(result.providerParams).toStrictEqual({
TEMPERATURE: 0.7,
TOPK: 40,
SEED: 42
})
})
it('should NOT extract parameters with incorrect case - all lowercase', () => {
const customParams = {
maxoutputtokens: 1000,
frequencypenalty: 0.5,
stopsequences: ['STOP']
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({})
expect(result.providerParams).toStrictEqual({
maxoutputtokens: 1000,
frequencypenalty: 0.5,
stopsequences: ['STOP']
})
})
it('should correctly extract exact case match while rejecting incorrect case', () => {
const customParams = {
temperature: 0.7,
Temperature: 0.8,
TEMPERATURE: 0.9,
topK: 40,
TopK: 50
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
temperature: 0.7,
topK: 40
})
expect(result.providerParams).toStrictEqual({
Temperature: 0.8,
TEMPERATURE: 0.9,
TopK: 50
})
})
})
describe('Parameter name variations', () => {
it('should NOT extract similar but incorrect parameter names', () => {
const customParams = {
temp: 0.7, // should not match temperature
top_k: 40, // should not match topK
max_tokens: 1000, // should not match maxOutputTokens
freq_penalty: 0.5 // should not match frequencyPenalty
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({})
expect(result.providerParams).toStrictEqual({
temp: 0.7,
top_k: 40,
max_tokens: 1000,
freq_penalty: 0.5
})
})
it('should NOT extract snake_case versions of standard parameters', () => {
const customParams = {
top_k: 40,
top_p: 0.9,
presence_penalty: 0.5,
frequency_penalty: 0.3,
stop_sequences: ['STOP'],
max_output_tokens: 1000
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({})
expect(result.providerParams).toStrictEqual({
top_k: 40,
top_p: 0.9,
presence_penalty: 0.5,
frequency_penalty: 0.3,
stop_sequences: ['STOP'],
max_output_tokens: 1000
})
})
it('should extract exact camelCase parameters only', () => {
const customParams = {
topK: 40, // correct
top_k: 50, // incorrect
topP: 0.9, // correct
top_p: 0.8, // incorrect
frequencyPenalty: 0.5, // correct
frequency_penalty: 0.4 // incorrect
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
topK: 40,
topP: 0.9,
frequencyPenalty: 0.5
})
expect(result.providerParams).toStrictEqual({
top_k: 50,
top_p: 0.8,
frequency_penalty: 0.4
})
})
})
})

View File

@@ -71,11 +71,10 @@ describe('mcp utils', () => {
const result = setupToolsConfig(mcpTools)
expect(result).not.toBeUndefined()
// Tools are now keyed by id (which includes serverId suffix) for uniqueness
expect(Object.keys(result!)).toEqual(['test-tool-1'])
expect(result!['test-tool-1']).toHaveProperty('description')
expect(result!['test-tool-1']).toHaveProperty('inputSchema')
expect(result!['test-tool-1']).toHaveProperty('execute')
expect(Object.keys(result!)).toEqual(['test-tool'])
expect(result!['test-tool']).toHaveProperty('description')
expect(result!['test-tool']).toHaveProperty('inputSchema')
expect(result!['test-tool']).toHaveProperty('execute')
})
it('should handle multiple MCP tools', () => {
@@ -110,8 +109,7 @@ describe('mcp utils', () => {
expect(result).not.toBeUndefined()
expect(Object.keys(result!)).toHaveLength(2)
// Tools are keyed by id for uniqueness
expect(Object.keys(result!)).toEqual(['tool1-id', 'tool2-id'])
expect(Object.keys(result!)).toEqual(['tool1', 'tool2'])
})
})
@@ -137,10 +135,9 @@ describe('mcp utils', () => {
const result = convertMcpToolsToAiSdkTools(mcpTools)
// Tools are keyed by id for uniqueness when multiple server instances exist
expect(Object.keys(result)).toEqual(['get-weather-id'])
expect(Object.keys(result)).toEqual(['get-weather'])
const tool = result['get-weather-id'] as Tool
const tool = result['get-weather'] as Tool
expect(tool.description).toBe('Get weather information')
expect(tool.inputSchema).toBeDefined()
expect(typeof tool.execute).toBe('function')
@@ -163,8 +160,8 @@ describe('mcp utils', () => {
const result = convertMcpToolsToAiSdkTools(mcpTools)
expect(Object.keys(result)).toEqual(['no-desc-tool-id'])
const tool = result['no-desc-tool-id'] as Tool
expect(Object.keys(result)).toEqual(['no-desc-tool'])
const tool = result['no-desc-tool'] as Tool
expect(tool.description).toBe('Tool from test-server')
})
@@ -205,13 +202,13 @@ describe('mcp utils', () => {
const result = convertMcpToolsToAiSdkTools(mcpTools)
expect(Object.keys(result)).toEqual(['complex-tool-id'])
const tool = result['complex-tool-id'] as Tool
expect(Object.keys(result)).toEqual(['complex-tool'])
const tool = result['complex-tool'] as Tool
expect(tool.inputSchema).toBeDefined()
expect(typeof tool.execute).toBe('function')
})
it('should preserve tool id with special characters', () => {
it('should preserve tool names with special characters', () => {
const mcpTools: MCPTool[] = [
{
id: 'special-tool-id',
@@ -228,8 +225,7 @@ describe('mcp utils', () => {
]
const result = convertMcpToolsToAiSdkTools(mcpTools)
// Tools are keyed by id for uniqueness
expect(Object.keys(result)).toEqual(['special-tool-id'])
expect(Object.keys(result)).toEqual(['tool_with-special.chars'])
})
it('should handle multiple tools with different schemas', () => {
@@ -280,11 +276,10 @@ describe('mcp utils', () => {
const result = convertMcpToolsToAiSdkTools(mcpTools)
// Tools are keyed by id for uniqueness
expect(Object.keys(result).sort()).toEqual(['boolean-tool-id', 'number-tool-id', 'string-tool-id'])
expect(result['string-tool-id']).toBeDefined()
expect(result['number-tool-id']).toBeDefined()
expect(result['boolean-tool-id']).toBeDefined()
expect(Object.keys(result).sort()).toEqual(['boolean-tool', 'number-tool', 'string-tool'])
expect(result['string-tool']).toBeDefined()
expect(result['number-tool']).toBeDefined()
expect(result['boolean-tool']).toBeDefined()
})
})
@@ -315,7 +310,7 @@ describe('mcp utils', () => {
]
const tools = convertMcpToolsToAiSdkTools(mcpTools)
const tool = tools['test-exec-tool-id'] as Tool
const tool = tools['test-exec-tool'] as Tool
const result = await tool.execute!({}, { messages: [], abortSignal: undefined, toolCallId: 'test-call-123' })
expect(requestToolConfirmation).toHaveBeenCalled()
@@ -348,7 +343,7 @@ describe('mcp utils', () => {
]
const tools = convertMcpToolsToAiSdkTools(mcpTools)
const tool = tools['cancelled-tool-id'] as Tool
const tool = tools['cancelled-tool'] as Tool
const result = await tool.execute!({}, { messages: [], abortSignal: undefined, toolCallId: 'cancel-call-123' })
expect(requestToolConfirmation).toHaveBeenCalled()
@@ -390,7 +385,7 @@ describe('mcp utils', () => {
]
const tools = convertMcpToolsToAiSdkTools(mcpTools)
const tool = tools['error-tool-id'] as Tool
const tool = tools['error-tool'] as Tool
await expect(
tool.execute!({}, { messages: [], abortSignal: undefined, toolCallId: 'error-call-123' })
@@ -426,7 +421,7 @@ describe('mcp utils', () => {
]
const tools = convertMcpToolsToAiSdkTools(mcpTools)
const tool = tools['auto-approve-tool-id'] as Tool
const tool = tools['auto-approve-tool'] as Tool
const result = await tool.execute!({}, { messages: [], abortSignal: undefined, toolCallId: 'auto-call-123' })
expect(requestToolConfirmation).not.toHaveBeenCalled()

View File

@@ -128,20 +128,7 @@ vi.mock('../reasoning', () => ({
reasoningConfig: { type: 'enabled', budgetTokens: 5000 }
})),
getReasoningEffort: vi.fn(() => ({ reasoningEffort: 'medium' })),
getCustomParameters: vi.fn(() => ({})),
extractAiSdkStandardParams: vi.fn((customParams: Record<string, any>) => {
const AI_SDK_STANDARD_PARAMS = ['topK', 'frequencyPenalty', 'presencePenalty', 'stopSequences', 'seed']
const standardParams: Record<string, any> = {}
const providerParams: Record<string, any> = {}
for (const [key, value] of Object.entries(customParams)) {
if (AI_SDK_STANDARD_PARAMS.includes(key)) {
standardParams[key] = value
} else {
providerParams[key] = value
}
}
return { standardParams, providerParams }
})
getCustomParameters: vi.fn(() => ({}))
}))
vi.mock('../image', () => ({
@@ -154,10 +141,6 @@ vi.mock('../websearch', () => ({
getWebSearchParams: vi.fn(() => ({ enable_search: true }))
}))
vi.mock('../../prepareParams/header', () => ({
addAnthropicHeaders: vi.fn(() => ['context-1m-2025-08-07'])
}))
const ensureWindowApi = () => {
const globalWindow = window as any
globalWindow.api = globalWindow.api || {}
@@ -201,9 +184,8 @@ describe('options utils', () => {
enableGenerateImage: false
})
expect(result.providerOptions).toHaveProperty('openai')
expect(result.providerOptions.openai).toBeDefined()
expect(result.standardParams).toBeDefined()
expect(result).toHaveProperty('openai')
expect(result.openai).toBeDefined()
})
it('should include reasoning parameters when enabled', () => {
@@ -213,8 +195,8 @@ describe('options utils', () => {
enableGenerateImage: false
})
expect(result.providerOptions.openai).toHaveProperty('reasoningEffort')
expect(result.providerOptions.openai.reasoningEffort).toBe('medium')
expect(result.openai).toHaveProperty('reasoningEffort')
expect(result.openai.reasoningEffort).toBe('medium')
})
it('should include service tier when supported', () => {
@@ -229,8 +211,8 @@ describe('options utils', () => {
enableGenerateImage: false
})
expect(result.providerOptions.openai).toHaveProperty('serviceTier')
expect(result.providerOptions.openai.serviceTier).toBe(OpenAIServiceTiers.auto)
expect(result.openai).toHaveProperty('serviceTier')
expect(result.openai.serviceTier).toBe(OpenAIServiceTiers.auto)
})
})
@@ -257,8 +239,8 @@ describe('options utils', () => {
enableGenerateImage: false
})
expect(result.providerOptions).toHaveProperty('anthropic')
expect(result.providerOptions.anthropic).toBeDefined()
expect(result).toHaveProperty('anthropic')
expect(result.anthropic).toBeDefined()
})
it('should include reasoning parameters when enabled', () => {
@@ -268,8 +250,8 @@ describe('options utils', () => {
enableGenerateImage: false
})
expect(result.providerOptions.anthropic).toHaveProperty('thinking')
expect(result.providerOptions.anthropic.thinking).toEqual({
expect(result.anthropic).toHaveProperty('thinking')
expect(result.anthropic.thinking).toEqual({
type: 'enabled',
budgetTokens: 5000
})
@@ -300,8 +282,8 @@ describe('options utils', () => {
enableGenerateImage: false
})
expect(result.providerOptions).toHaveProperty('google')
expect(result.providerOptions.google).toBeDefined()
expect(result).toHaveProperty('google')
expect(result.google).toBeDefined()
})
it('should include reasoning parameters when enabled', () => {
@@ -311,8 +293,8 @@ describe('options utils', () => {
enableGenerateImage: false
})
expect(result.providerOptions.google).toHaveProperty('thinkingConfig')
expect(result.providerOptions.google.thinkingConfig).toEqual({
expect(result.google).toHaveProperty('thinkingConfig')
expect(result.google.thinkingConfig).toEqual({
include_thoughts: true
})
})
@@ -324,8 +306,8 @@ describe('options utils', () => {
enableGenerateImage: true
})
expect(result.providerOptions.google).toHaveProperty('responseModalities')
expect(result.providerOptions.google.responseModalities).toEqual(['TEXT', 'IMAGE'])
expect(result.google).toHaveProperty('responseModalities')
expect(result.google.responseModalities).toEqual(['TEXT', 'IMAGE'])
})
})
@@ -353,8 +335,8 @@ describe('options utils', () => {
enableGenerateImage: false
})
expect(result.providerOptions).toHaveProperty('xai')
expect(result.providerOptions.xai).toBeDefined()
expect(result).toHaveProperty('xai')
expect(result.xai).toBeDefined()
})
it('should include reasoning parameters when enabled', () => {
@@ -364,8 +346,8 @@ describe('options utils', () => {
enableGenerateImage: false
})
expect(result.providerOptions.xai).toHaveProperty('reasoningEffort')
expect(result.providerOptions.xai.reasoningEffort).toBe('high')
expect(result.xai).toHaveProperty('reasoningEffort')
expect(result.xai.reasoningEffort).toBe('high')
})
})
@@ -392,8 +374,8 @@ describe('options utils', () => {
enableGenerateImage: false
})
expect(result.providerOptions).toHaveProperty('deepseek')
expect(result.providerOptions.deepseek).toBeDefined()
expect(result).toHaveProperty('deepseek')
expect(result.deepseek).toBeDefined()
})
})
@@ -420,8 +402,8 @@ describe('options utils', () => {
enableGenerateImage: false
})
expect(result.providerOptions).toHaveProperty('openrouter')
expect(result.providerOptions.openrouter).toBeDefined()
expect(result).toHaveProperty('openrouter')
expect(result.openrouter).toBeDefined()
})
it('should include web search parameters when enabled', () => {
@@ -431,12 +413,12 @@ describe('options utils', () => {
enableGenerateImage: false
})
expect(result.providerOptions.openrouter).toHaveProperty('enable_search')
expect(result.openrouter).toHaveProperty('enable_search')
})
})
describe('Custom parameters', () => {
it('should merge custom provider-specific parameters', async () => {
it('should merge custom parameters', async () => {
const { getCustomParameters } = await import('../reasoning')
vi.mocked(getCustomParameters).mockReturnValue({
@@ -461,88 +443,10 @@ describe('options utils', () => {
}
)
expect(result.providerOptions.openai).toHaveProperty('custom_param')
expect(result.providerOptions.openai.custom_param).toBe('custom_value')
expect(result.providerOptions.openai).toHaveProperty('another_param')
expect(result.providerOptions.openai.another_param).toBe(123)
})
it('should extract AI SDK standard params from custom parameters', async () => {
const { getCustomParameters } = await import('../reasoning')
vi.mocked(getCustomParameters).mockReturnValue({
topK: 5,
frequencyPenalty: 0.5,
presencePenalty: 0.3,
seed: 42,
custom_param: 'custom_value'
})
const result = buildProviderOptions(
mockAssistant,
mockModel,
{
id: SystemProviderIds.gemini,
name: 'Google',
type: 'gemini',
apiKey: 'test-key',
apiHost: 'https://generativelanguage.googleapis.com'
} as Provider,
{
enableReasoning: false,
enableWebSearch: false,
enableGenerateImage: false
}
)
// Standard params should be extracted and returned separately
expect(result.standardParams).toEqual({
topK: 5,
frequencyPenalty: 0.5,
presencePenalty: 0.3,
seed: 42
})
// Provider-specific params should still be in providerOptions
expect(result.providerOptions.google).toHaveProperty('custom_param')
expect(result.providerOptions.google.custom_param).toBe('custom_value')
// Standard params should NOT be in providerOptions
expect(result.providerOptions.google).not.toHaveProperty('topK')
expect(result.providerOptions.google).not.toHaveProperty('frequencyPenalty')
expect(result.providerOptions.google).not.toHaveProperty('presencePenalty')
expect(result.providerOptions.google).not.toHaveProperty('seed')
})
it('should handle stopSequences in custom parameters', async () => {
const { getCustomParameters } = await import('../reasoning')
vi.mocked(getCustomParameters).mockReturnValue({
stopSequences: ['STOP', 'END'],
custom_param: 'value'
})
const result = buildProviderOptions(
mockAssistant,
mockModel,
{
id: SystemProviderIds.gemini,
name: 'Google',
type: 'gemini',
apiKey: 'test-key',
apiHost: 'https://generativelanguage.googleapis.com'
} as Provider,
{
enableReasoning: false,
enableWebSearch: false,
enableGenerateImage: false
}
)
expect(result.standardParams).toEqual({
stopSequences: ['STOP', 'END']
})
expect(result.providerOptions.google).not.toHaveProperty('stopSequences')
expect(result.openai).toHaveProperty('custom_param')
expect(result.openai.custom_param).toBe('custom_value')
expect(result.openai).toHaveProperty('another_param')
expect(result.openai.another_param).toBe(123)
})
})
@@ -570,8 +474,8 @@ describe('options utils', () => {
enableGenerateImage: true
})
expect(result.providerOptions.google).toHaveProperty('thinkingConfig')
expect(result.providerOptions.google).toHaveProperty('responseModalities')
expect(result.google).toHaveProperty('thinkingConfig')
expect(result.google).toHaveProperty('responseModalities')
})
it('should handle all capabilities enabled', () => {
@@ -581,8 +485,8 @@ describe('options utils', () => {
enableGenerateImage: true
})
expect(result.providerOptions.google).toBeDefined()
expect(Object.keys(result.providerOptions.google).length).toBeGreaterThan(0)
expect(result.google).toBeDefined()
expect(Object.keys(result.google).length).toBeGreaterThan(0)
})
})
@@ -609,7 +513,7 @@ describe('options utils', () => {
enableGenerateImage: false
})
expect(result.providerOptions).toHaveProperty('google')
expect(result).toHaveProperty('google')
})
it('should map google-vertex-anthropic to anthropic', () => {
@@ -634,66 +538,7 @@ describe('options utils', () => {
enableGenerateImage: false
})
expect(result.providerOptions).toHaveProperty('anthropic')
})
})
describe('AWS Bedrock provider', () => {
const bedrockProvider = {
id: 'bedrock',
name: 'AWS Bedrock',
type: 'aws-bedrock',
apiKey: 'test-key',
apiHost: 'https://bedrock.us-east-1.amazonaws.com',
models: [] as Model[]
} as Provider
const bedrockModel: Model = {
id: 'anthropic.claude-sonnet-4-20250514-v1:0',
name: 'Claude Sonnet 4',
provider: 'bedrock'
} as Model
it('should build basic Bedrock options', () => {
const result = buildProviderOptions(mockAssistant, bedrockModel, bedrockProvider, {
enableReasoning: false,
enableWebSearch: false,
enableGenerateImage: false
})
expect(result.providerOptions).toHaveProperty('bedrock')
expect(result.providerOptions.bedrock).toBeDefined()
})
it('should include anthropicBeta when Anthropic headers are needed', async () => {
const { addAnthropicHeaders } = await import('../../prepareParams/header')
vi.mocked(addAnthropicHeaders).mockReturnValue(['interleaved-thinking-2025-05-14', 'context-1m-2025-08-07'])
const result = buildProviderOptions(mockAssistant, bedrockModel, bedrockProvider, {
enableReasoning: false,
enableWebSearch: false,
enableGenerateImage: false
})
expect(result.providerOptions.bedrock).toHaveProperty('anthropicBeta')
expect(result.providerOptions.bedrock.anthropicBeta).toEqual([
'interleaved-thinking-2025-05-14',
'context-1m-2025-08-07'
])
})
it('should include reasoning parameters when enabled', () => {
const result = buildProviderOptions(mockAssistant, bedrockModel, bedrockProvider, {
enableReasoning: true,
enableWebSearch: false,
enableGenerateImage: false
})
expect(result.providerOptions.bedrock).toHaveProperty('reasoningConfig')
expect(result.providerOptions.bedrock.reasoningConfig).toEqual({
type: 'enabled',
budgetTokens: 5000
})
expect(result).toHaveProperty('anthropic')
})
})
})

View File

@@ -1,288 +0,0 @@
import type { Assistant, Model, ReasoningEffortOption } from '@renderer/types'
import { SystemProviderIds } from '@renderer/types'
import { describe, expect, it, vi } from 'vitest'
import { getReasoningEffort } from '../reasoning'
// Mock logger
vi.mock('@logger', () => ({
loggerService: {
withContext: () => ({
warn: vi.fn(),
info: vi.fn(),
error: vi.fn()
})
}
}))
vi.mock('@renderer/store/settings', () => ({
default: {},
settingsSlice: {
name: 'settings',
reducer: vi.fn(),
actions: {}
}
}))
vi.mock('@renderer/store/assistants', () => {
const mockAssistantsSlice = {
name: 'assistants',
reducer: vi.fn((state = { entities: {}, ids: [] }) => state),
actions: {
updateTopicUpdatedAt: vi.fn(() => ({ type: 'UPDATE_TOPIC_UPDATED_AT' }))
}
}
return {
default: mockAssistantsSlice.reducer,
updateTopicUpdatedAt: vi.fn(() => ({ type: 'UPDATE_TOPIC_UPDATED_AT' })),
assistantsSlice: mockAssistantsSlice
}
})
// Mock provider service
vi.mock('@renderer/services/AssistantService', () => ({
getProviderByModel: (model: Model) => ({
id: model.provider,
name: 'Poe',
type: 'openai'
}),
getAssistantSettings: (assistant: Assistant) => assistant.settings || {}
}))
describe('Poe Provider Reasoning Support', () => {
const createPoeModel = (id: string): Model => ({
id,
name: id,
provider: SystemProviderIds.poe,
group: 'poe'
})
const createAssistant = (reasoning_effort?: ReasoningEffortOption, maxTokens?: number): Assistant => ({
id: 'test-assistant',
name: 'Test Assistant',
emoji: '🤖',
prompt: '',
topics: [],
messages: [],
type: 'assistant',
regularPhrases: [],
settings: {
reasoning_effort,
maxTokens
}
})
describe('GPT-5 Series Models', () => {
it('should return reasoning_effort in extra_body for GPT-5 model with low effort', () => {
const model = createPoeModel('gpt-5')
const assistant = createAssistant('low')
const result = getReasoningEffort(assistant, model)
expect(result).toEqual({
extra_body: {
reasoning_effort: 'low'
}
})
})
it('should return reasoning_effort in extra_body for GPT-5 model with medium effort', () => {
const model = createPoeModel('gpt-5')
const assistant = createAssistant('medium')
const result = getReasoningEffort(assistant, model)
expect(result).toEqual({
extra_body: {
reasoning_effort: 'medium'
}
})
})
it('should return reasoning_effort in extra_body for GPT-5 model with high effort', () => {
const model = createPoeModel('gpt-5')
const assistant = createAssistant('high')
const result = getReasoningEffort(assistant, model)
expect(result).toEqual({
extra_body: {
reasoning_effort: 'high'
}
})
})
it('should convert auto to medium for GPT-5 model in extra_body', () => {
const model = createPoeModel('gpt-5')
const assistant = createAssistant('auto')
const result = getReasoningEffort(assistant, model)
expect(result).toEqual({
extra_body: {
reasoning_effort: 'medium'
}
})
})
it('should return reasoning_effort in extra_body for GPT-5.1 model', () => {
const model = createPoeModel('gpt-5.1')
const assistant = createAssistant('medium')
const result = getReasoningEffort(assistant, model)
expect(result).toEqual({
extra_body: {
reasoning_effort: 'medium'
}
})
})
})
describe('Claude Models', () => {
it('should return thinking_budget in extra_body for Claude 3.7 Sonnet', () => {
const model = createPoeModel('claude-3.7-sonnet')
const assistant = createAssistant('medium', 4096)
const result = getReasoningEffort(assistant, model)
expect(result).toHaveProperty('extra_body')
expect(result.extra_body).toHaveProperty('thinking_budget')
expect(typeof result.extra_body?.thinking_budget).toBe('number')
expect(result.extra_body?.thinking_budget).toBeGreaterThan(0)
})
it('should return thinking_budget in extra_body for Claude Sonnet 4', () => {
const model = createPoeModel('claude-sonnet-4')
const assistant = createAssistant('high', 8192)
const result = getReasoningEffort(assistant, model)
expect(result).toHaveProperty('extra_body')
expect(result.extra_body).toHaveProperty('thinking_budget')
expect(typeof result.extra_body?.thinking_budget).toBe('number')
})
it('should calculate thinking_budget based on effort ratio and maxTokens', () => {
const model = createPoeModel('claude-3.7-sonnet')
const assistant = createAssistant('low', 4096)
const result = getReasoningEffort(assistant, model)
expect(result.extra_body?.thinking_budget).toBeGreaterThanOrEqual(1024)
})
})
describe('Gemini Models', () => {
it('should return thinking_budget in extra_body for Gemini 2.5 Flash', () => {
const model = createPoeModel('gemini-2.5-flash')
const assistant = createAssistant('medium')
const result = getReasoningEffort(assistant, model)
expect(result).toHaveProperty('extra_body')
expect(result.extra_body).toHaveProperty('thinking_budget')
expect(typeof result.extra_body?.thinking_budget).toBe('number')
})
it('should return thinking_budget in extra_body for Gemini 2.5 Pro', () => {
const model = createPoeModel('gemini-2.5-pro')
const assistant = createAssistant('high')
const result = getReasoningEffort(assistant, model)
expect(result).toHaveProperty('extra_body')
expect(result.extra_body).toHaveProperty('thinking_budget')
})
it('should use -1 for auto effort', () => {
const model = createPoeModel('gemini-2.5-flash')
const assistant = createAssistant('auto')
const result = getReasoningEffort(assistant, model)
expect(result.extra_body?.thinking_budget).toBe(-1)
})
it('should calculate thinking_budget for non-auto effort', () => {
const model = createPoeModel('gemini-2.5-flash')
const assistant = createAssistant('low')
const result = getReasoningEffort(assistant, model)
expect(typeof result.extra_body?.thinking_budget).toBe('number')
})
})
describe('No Reasoning Effort', () => {
it('should return empty object when reasoning_effort is not set', () => {
const model = createPoeModel('gpt-5')
const assistant = createAssistant(undefined)
const result = getReasoningEffort(assistant, model)
expect(result).toEqual({})
})
it('should return empty object when reasoning_effort is "none"', () => {
const model = createPoeModel('gpt-5')
const assistant = createAssistant('none')
const result = getReasoningEffort(assistant, model)
expect(result).toEqual({})
})
})
describe('Non-Reasoning Models', () => {
it('should return empty object for non-reasoning models', () => {
const model = createPoeModel('gpt-4')
const assistant = createAssistant('medium')
const result = getReasoningEffort(assistant, model)
expect(result).toEqual({})
})
})
describe('Edge Cases: Models Without Token Limit Configuration', () => {
it('should return empty object for Claude models without token limit configuration', () => {
const model = createPoeModel('claude-unknown-variant')
const assistant = createAssistant('medium', 4096)
const result = getReasoningEffort(assistant, model)
// Should return empty object when token limit is not found
expect(result).toEqual({})
expect(result.extra_body?.thinking_budget).toBeUndefined()
})
it('should return empty object for unmatched Poe reasoning models', () => {
// A hypothetical reasoning model that doesn't match GPT-5, Claude, or Gemini
const model = createPoeModel('some-reasoning-model')
// Make it appear as a reasoning model by giving it a name that won't match known categories
const assistant = createAssistant('medium')
const result = getReasoningEffort(assistant, model)
// Should return empty object for unmatched models
expect(result).toEqual({})
})
it('should fallback to -1 for Gemini models without token limit', () => {
// Use a Gemini model variant that won't match any token limit pattern
// The current regex patterns cover gemini-.*-flash.*$ and gemini-.*-pro.*$
// so we need a model that matches isSupportedThinkingTokenGeminiModel but not THINKING_TOKEN_MAP
const model = createPoeModel('gemini-2.5-flash')
const assistant = createAssistant('auto')
const result = getReasoningEffort(assistant, model)
// For 'auto' effort, should use -1
expect(result.extra_body?.thinking_budget).toBe(-1)
})
it('should enforce minimum 1024 token floor for Claude models', () => {
const model = createPoeModel('claude-3.7-sonnet')
// Use very small maxTokens to test the minimum floor
const assistant = createAssistant('low', 100)
const result = getReasoningEffort(assistant, model)
expect(result.extra_body?.thinking_budget).toBeGreaterThanOrEqual(1024)
})
it('should handle undefined maxTokens for Claude models', () => {
const model = createPoeModel('claude-3.7-sonnet')
const assistant = createAssistant('medium', undefined)
const result = getReasoningEffort(assistant, model)
expect(result).toHaveProperty('extra_body')
expect(result.extra_body).toHaveProperty('thinking_budget')
expect(typeof result.extra_body?.thinking_budget).toBe('number')
expect(result.extra_body?.thinking_budget).toBeGreaterThanOrEqual(1024)
})
})
})

View File

@@ -144,7 +144,7 @@ describe('reasoning utils', () => {
expect(result).toEqual({})
})
it('should not override reasoning for OpenRouter when reasoning effort undefined', async () => {
it('should disable reasoning for OpenRouter when no reasoning effort set', async () => {
const { isReasoningModel } = await import('@renderer/config/models')
vi.mocked(isReasoningModel).mockReturnValue(true)
@@ -161,29 +161,6 @@ describe('reasoning utils', () => {
settings: {}
} as Assistant
const result = getReasoningEffort(assistant, model)
expect(result).toEqual({})
})
it('should disable reasoning for OpenRouter when reasoning effort explicitly none', async () => {
const { isReasoningModel } = await import('@renderer/config/models')
vi.mocked(isReasoningModel).mockReturnValue(true)
const model: Model = {
id: 'anthropic/claude-sonnet-4',
name: 'Claude Sonnet 4',
provider: SystemProviderIds.openrouter
} as Model
const assistant: Assistant = {
id: 'test',
name: 'Test',
settings: {
reasoning_effort: 'none'
}
} as Assistant
const result = getReasoningEffort(assistant, model)
expect(result).toEqual({ reasoning: { enabled: false, exclude: true } })
})
@@ -292,9 +269,7 @@ describe('reasoning utils', () => {
const assistant: Assistant = {
id: 'test',
name: 'Test',
settings: {
reasoning_effort: 'none'
}
settings: {}
} as Assistant
const result = getReasoningEffort(assistant, model)

View File

@@ -28,9 +28,7 @@ export function convertMcpToolsToAiSdkTools(mcpTools: MCPTool[]): ToolSet {
const tools: ToolSet = {}
for (const mcpTool of mcpTools) {
// Use mcpTool.id (which includes serverId suffix) to ensure uniqueness
// when multiple instances of the same MCP server type are configured
tools[mcpTool.id] = tool({
tools[mcpTool.name] = tool({
description: mcpTool.description || `Tool from ${mcpTool.serverName}`,
inputSchema: jsonSchema(mcpTool.inputSchema as JSONSchema7),
execute: async (params, { toolCallId }) => {

View File

@@ -14,7 +14,6 @@ import {
} from '@renderer/config/models'
import { mapLanguageToQwenMTModel } from '@renderer/config/translate'
import { getStoreSetting } from '@renderer/hooks/useSettings'
import { getProviderById } from '@renderer/services/ProviderService'
import {
type Assistant,
type GroqServiceTier,
@@ -31,12 +30,11 @@ import {
type Provider,
type ServiceTier
} from '@renderer/types'
import { type AiSdkParam, isAiSdkParam, type OpenAIVerbosity } from '@renderer/types/aiCoreTypes'
import { isSupportServiceTierProvider, isSupportVerbosityProvider } from '@renderer/utils/provider'
import type { OpenAIVerbosity } from '@renderer/types/aiCoreTypes'
import { isSupportServiceTierProvider } from '@renderer/utils/provider'
import type { JSONValue } from 'ai'
import { t } from 'i18next'
import { addAnthropicHeaders } from '../prepareParams/header'
import { getAiSdkProviderId } from '../provider/factory'
import { buildGeminiGenerateImageParams } from './image'
import {
@@ -92,56 +90,15 @@ function getServiceTier<T extends Provider>(model: Model, provider: T): OpenAISe
}
}
function getVerbosity(model: Model): OpenAIVerbosity {
if (!isSupportVerbosityModel(model) || !isSupportVerbosityProvider(getProviderById(model.provider)!)) {
return undefined
}
function getVerbosity(): OpenAIVerbosity {
const openAI = getStoreSetting('openAI')
const userVerbosity = openAI.verbosity
if (userVerbosity) {
const supportedVerbosity = getModelSupportedVerbosity(model)
// Use user's verbosity if supported, otherwise use the first supported option
const verbosity = supportedVerbosity.includes(userVerbosity) ? userVerbosity : supportedVerbosity[0]
return verbosity
}
return undefined
}
/**
* Extract AI SDK standard parameters from custom parameters
* These parameters should be passed directly to streamText() instead of providerOptions
*/
export function extractAiSdkStandardParams(customParams: Record<string, any>): {
standardParams: Partial<Record<AiSdkParam, any>>
providerParams: Record<string, any>
} {
const standardParams: Partial<Record<AiSdkParam, any>> = {}
const providerParams: Record<string, any> = {}
for (const [key, value] of Object.entries(customParams)) {
if (isAiSdkParam(key)) {
standardParams[key] = value
} else {
providerParams[key] = value
}
}
return { standardParams, providerParams }
return openAI.verbosity
}
/**
* 构建 AI SDK 的 providerOptions
* 按 provider 类型分离,保持类型安全
* 返回格式:{
* providerOptions: { 'providerId': providerOptions },
* standardParams: { topK, frequencyPenalty, presencePenalty, stopSequences, seed }
* }
*
* Custom parameters are split into two categories:
* 1. AI SDK standard parameters (topK, frequencyPenalty, etc.) - returned separately to be passed to streamText()
* 2. Provider-specific parameters - merged into providerOptions
* 返回格式:{ 'providerId': providerOptions }
*/
export function buildProviderOptions(
assistant: Assistant,
@@ -152,16 +109,13 @@ export function buildProviderOptions(
enableWebSearch: boolean
enableGenerateImage: boolean
}
): {
providerOptions: Record<string, Record<string, JSONValue>>
standardParams: Partial<Record<AiSdkParam, any>>
} {
): Record<string, Record<string, JSONValue>> {
logger.debug('buildProviderOptions', { assistant, model, actualProvider, capabilities })
const rawProviderId = getAiSdkProviderId(actualProvider)
// 构建 provider 特定的选项
let providerSpecificOptions: Record<string, any> = {}
const serviceTier = getServiceTier(model, actualProvider)
const textVerbosity = getVerbosity(model)
const textVerbosity = getVerbosity()
// 根据 provider 类型分离构建逻辑
const { data: baseProviderId, success } = baseProviderIdSchema.safeParse(rawProviderId)
if (success) {
@@ -176,8 +130,7 @@ export function buildProviderOptions(
assistant,
model,
capabilities,
serviceTier,
textVerbosity
serviceTier
)
providerSpecificOptions = options
}
@@ -210,8 +163,7 @@ export function buildProviderOptions(
model,
capabilities,
actualProvider,
serviceTier,
textVerbosity
serviceTier
)
break
default:
@@ -249,14 +201,10 @@ export function buildProviderOptions(
}
}
// 获取自定义参数并分离标准参数和 provider 特定参数
const customParams = getCustomParameters(assistant)
const { standardParams, providerParams } = extractAiSdkStandardParams(customParams)
// 合并 provider 特定的自定义参数到 providerSpecificOptions
// 合并自定义参数 provider 特定的选项中
providerSpecificOptions = {
...providerSpecificOptions,
...providerParams
...getCustomParameters(assistant)
}
let rawProviderKey =
@@ -264,21 +212,16 @@ export function buildProviderOptions(
'google-vertex': 'google',
'google-vertex-anthropic': 'anthropic',
'azure-anthropic': 'anthropic',
'ai-gateway': 'gateway',
azure: 'openai',
'azure-responses': 'openai'
'ai-gateway': 'gateway'
}[rawProviderId] || rawProviderId
if (rawProviderKey === 'cherryin') {
rawProviderKey = { gemini: 'google', ['openai-response']: 'openai' }[actualProvider.type] || actualProvider.type
rawProviderKey = { gemini: 'google' }[actualProvider.type] || actualProvider.type
}
// 返回 AI Core SDK 要求的格式:{ 'providerId': providerOptions } 以及提取的标准参数
// 返回 AI Core SDK 要求的格式:{ 'providerId': providerOptions }
return {
providerOptions: {
[rawProviderKey]: providerSpecificOptions
},
standardParams
[rawProviderKey]: providerSpecificOptions
}
}
@@ -293,8 +236,7 @@ function buildOpenAIProviderOptions(
enableWebSearch: boolean
enableGenerateImage: boolean
},
serviceTier: OpenAIServiceTier,
textVerbosity?: OpenAIVerbosity
serviceTier: OpenAIServiceTier
): OpenAIResponsesProviderOptions {
const { enableReasoning } = capabilities
let providerOptions: OpenAIResponsesProviderOptions = {}
@@ -306,13 +248,8 @@ function buildOpenAIProviderOptions(
...reasoningParams
}
}
const provider = getProviderById(model.provider)
if (!provider) {
throw new Error(`Provider ${model.provider} not found`)
}
if (isSupportVerbosityModel(model) && isSupportVerbosityProvider(provider)) {
if (isSupportVerbosityModel(model)) {
const openAI = getStoreSetting<'openAI'>('openAI')
const userVerbosity = openAI?.verbosity
@@ -330,8 +267,7 @@ function buildOpenAIProviderOptions(
providerOptions = {
...providerOptions,
serviceTier,
textVerbosity
serviceTier
}
return providerOptions
@@ -430,13 +366,11 @@ function buildCherryInProviderOptions(
enableGenerateImage: boolean
},
actualProvider: Provider,
serviceTier: OpenAIServiceTier,
textVerbosity: OpenAIVerbosity
serviceTier: OpenAIServiceTier
): OpenAIResponsesProviderOptions | AnthropicProviderOptions | GoogleGenerativeAIProviderOptions {
switch (actualProvider.type) {
case 'openai':
case 'openai-response':
return buildOpenAIProviderOptions(assistant, model, capabilities, serviceTier, textVerbosity)
return buildOpenAIProviderOptions(assistant, model, capabilities, serviceTier)
case 'anthropic':
return buildAnthropicProviderOptions(assistant, model, capabilities)
@@ -470,11 +404,6 @@ function buildBedrockProviderOptions(
}
}
const betaHeaders = addAnthropicHeaders(assistant, model)
if (betaHeaders.length > 0) {
providerOptions.anthropicBeta = betaHeaders
}
return providerOptions
}

View File

@@ -12,12 +12,13 @@ import {
isDeepSeekHybridInferenceModel,
isDoubaoSeedAfter251015,
isDoubaoThinkingAutoModel,
isGemini3ThinkingTokenModel,
isGPT5SeriesModel,
isGemini3Model,
isGPT51SeriesModel,
isGrok4FastReasoningModel,
isGrokReasoningModel,
isOpenAIDeepResearchModel,
isOpenAIModel,
isOpenAIReasoningModel,
isQwenAlwaysThinkModel,
isQwenReasoningModel,
isReasoningModel,
@@ -35,7 +36,7 @@ import {
} from '@renderer/config/models'
import { getStoreSetting } from '@renderer/hooks/useSettings'
import { getAssistantSettings, getProviderByModel } from '@renderer/services/AssistantService'
import type { Assistant, Model } from '@renderer/types'
import type { Assistant, Model, ReasoningEffortOption } from '@renderer/types'
import { EFFORT_RATIO, isSystemProvider, SystemProviderIds } from '@renderer/types'
import type { OpenAISummaryText } from '@renderer/types/aiCoreTypes'
import type { ReasoningEffortOptionalParams } from '@renderer/types/sdk'
@@ -62,22 +63,30 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
}
const reasoningEffort = assistant?.settings?.reasoning_effort
// reasoningEffort is not set, no extra reasoning setting
// Generally, for every model which supports reasoning control, the reasoning effort won't be undefined.
// It's for some reasoning models that don't support reasoning control, such as deepseek reasoner.
if (!reasoningEffort) {
return {}
}
// Handle 'none' reasoningEffort. It's explicitly off.
if (reasoningEffort === 'none') {
// Handle undefined and 'none' reasoningEffort.
// TODO: They should be separated.
if (!reasoningEffort || reasoningEffort === 'none') {
// openrouter: use reasoning
if (model.provider === SystemProviderIds.openrouter) {
// Don't disable reasoning for Gemini models that support thinking tokens
if (isSupportedThinkingTokenGeminiModel(model) && !GEMINI_FLASH_MODEL_REGEX.test(model.id)) {
return {}
}
// 'none' is not an available value for effort for now.
// I think they should resolve this issue soon, so I'll just go ahead and use this value.
if (isGPT51SeriesModel(model) && reasoningEffort === 'none') {
return { reasoning: { effort: 'none' } }
}
// Don't disable reasoning for models that require it
if (
isGrokReasoningModel(model) ||
isOpenAIReasoningModel(model) ||
isQwenAlwaysThinkModel(model) ||
model.id.includes('seed-oss') ||
model.id.includes('minimax-m2')
) {
return {}
}
return { reasoning: { enabled: false, exclude: true } }
}
@@ -91,6 +100,11 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
return { enable_thinking: false }
}
// claude
if (isSupportedThinkingTokenClaudeModel(model)) {
return {}
}
// gemini
if (isSupportedThinkingTokenGeminiModel(model)) {
if (GEMINI_FLASH_MODEL_REGEX.test(model.id)) {
@@ -103,10 +117,8 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
}
}
}
} else {
logger.warn(`Model ${model.id} cannot disable reasoning. Fallback to empty reasoning param.`)
return {}
}
return {}
}
// use thinking, doubao, zhipu, etc.
@@ -126,74 +138,10 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
}
}
logger.warn(`Model ${model.id} doesn't match any disable reasoning behavior. Fallback to empty reasoning param.`)
return {}
}
// reasoningEffort有效的情况
// https://creator.poe.com/docs/external-applications/openai-compatible-api#additional-considerations
// Poe provider - supports custom bot parameters via extra_body
if (provider.id === SystemProviderIds.poe) {
// GPT-5 series models use reasoning_effort parameter in extra_body
if (isGPT5SeriesModel(model) || isGPT51SeriesModel(model)) {
return {
extra_body: {
reasoning_effort: reasoningEffort === 'auto' ? 'medium' : reasoningEffort
}
}
}
// Claude models use thinking_budget parameter in extra_body
if (isSupportedThinkingTokenClaudeModel(model)) {
const effortRatio = EFFORT_RATIO[reasoningEffort]
const tokenLimit = findTokenLimit(model.id)
const maxTokens = assistant.settings?.maxTokens
if (!tokenLimit) {
logger.warn(
`No token limit configuration found for Claude model "${model.id}" on Poe provider. ` +
`Reasoning effort setting "${reasoningEffort}" will not be applied.`
)
return {}
}
let budgetTokens = Math.floor((tokenLimit.max - tokenLimit.min) * effortRatio + tokenLimit.min)
budgetTokens = Math.floor(Math.max(1024, Math.min(budgetTokens, (maxTokens || DEFAULT_MAX_TOKENS) * effortRatio)))
return {
extra_body: {
thinking_budget: budgetTokens
}
}
}
// Gemini models use thinking_budget parameter in extra_body
if (isSupportedThinkingTokenGeminiModel(model)) {
const effortRatio = EFFORT_RATIO[reasoningEffort]
const tokenLimit = findTokenLimit(model.id)
let budgetTokens: number | undefined
if (tokenLimit && reasoningEffort !== 'auto') {
budgetTokens = Math.floor((tokenLimit.max - tokenLimit.min) * effortRatio + tokenLimit.min)
} else if (!tokenLimit && reasoningEffort !== 'auto') {
logger.warn(
`No token limit configuration found for Gemini model "${model.id}" on Poe provider. ` +
`Using auto (-1) instead of requested effort "${reasoningEffort}".`
)
}
return {
extra_body: {
thinking_budget: budgetTokens ?? -1
}
}
}
// Poe reasoning model not in known categories (GPT-5, Claude, Gemini)
logger.warn(
`Poe provider reasoning model "${model.id}" does not match known categories ` +
`(GPT-5, Claude, Gemini). Reasoning effort setting "${reasoningEffort}" will not be applied.`
)
return {}
}
// OpenRouter models
if (model.provider === SystemProviderIds.openrouter) {
@@ -281,7 +229,6 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
}
// OpenRouter models, use reasoning
// FIXME: duplicated openrouter handling. remove one
if (model.provider === SystemProviderIds.openrouter) {
if (isSupportedReasoningEffortModel(model) || isSupportedThinkingTokenModel(model)) {
return {
@@ -334,7 +281,7 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
// gemini series, openai compatible api
if (isSupportedThinkingTokenGeminiModel(model)) {
// https://ai.google.dev/gemini-api/docs/gemini-3?thinking=high#openai_compatibility
if (isGemini3ThinkingTokenModel(model)) {
if (isGemini3Model(model)) {
return {
reasoning_effort: reasoningEffort
}
@@ -518,20 +465,20 @@ export function getAnthropicReasoningParams(
return {}
}
// type GoogleThinkingLevel = NonNullable<GoogleGenerativeAIProviderOptions['thinkingConfig']>['thinkingLevel']
type GoogelThinkingLevel = NonNullable<GoogleGenerativeAIProviderOptions['thinkingConfig']>['thinkingLevel']
// function mapToGeminiThinkingLevel(reasoningEffort: ReasoningEffortOption): GoogelThinkingLevel {
// switch (reasoningEffort) {
// case 'low':
// return 'low'
// case 'medium':
// return 'medium'
// case 'high':
// return 'high'
// default:
// return 'medium'
// }
// }
function mapToGeminiThinkingLevel(reasoningEffort: ReasoningEffortOption): GoogelThinkingLevel {
switch (reasoningEffort) {
case 'low':
return 'low'
case 'medium':
return 'medium'
case 'high':
return 'high'
default:
return 'medium'
}
}
/**
* 获取 Gemini 推理参数
@@ -560,15 +507,14 @@ export function getGeminiReasoningParams(
}
}
// TODO: 很多中转还不支持
// https://ai.google.dev/gemini-api/docs/gemini-3?thinking=high#new_api_features_in_gemini_3
// if (isGemini3ThinkingTokenModel(model)) {
// return {
// thinkingConfig: {
// thinkingLevel: mapToGeminiThinkingLevel(reasoningEffort)
// }
// }
// }
if (isGemini3Model(model)) {
return {
thinkingConfig: {
thinkingLevel: mapToGeminiThinkingLevel(reasoningEffort)
}
}
}
const effortRatio = EFFORT_RATIO[reasoningEffort]
@@ -673,10 +619,6 @@ export function getCustomParameters(assistant: Assistant): Record<string, any> {
if (!param.name?.trim()) {
return acc
}
// Parse JSON type parameters
// Related: src/renderer/src/pages/settings/AssistantSettings/AssistantModelSettings.tsx:133-148
// The UI stores JSON type params as strings (e.g., '{"key":"value"}')
// This function parses them into objects before sending to the API
if (param.type === 'json') {
const value = param.value as string
if (value === 'undefined') {

View File

@@ -215,10 +215,6 @@
border-top: none !important;
}
.ant-collapse-header-text {
overflow-x: hidden;
}
.ant-slider .ant-slider-handle::after {
box-shadow: 0 1px 4px 0px rgb(128 128 128 / 50%) !important;
}

View File

@@ -10,7 +10,6 @@ import {
} from '@ant-design/icons'
import { loggerService } from '@logger'
import { download } from '@renderer/utils/download'
import { convertImageToPng } from '@renderer/utils/image'
import type { ImageProps as AntImageProps } from 'antd'
import { Dropdown, Image as AntImage, Space } from 'antd'
import { Base64 } from 'js-base64'
@@ -34,38 +33,39 @@ const ImageViewer: React.FC<ImageViewerProps> = ({ src, style, ...props }) => {
// 复制图片到剪贴板
const handleCopyImage = async (src: string) => {
try {
let blob: Blob
if (src.startsWith('data:')) {
// 处理 base64 格式的图片
const match = src.match(/^data:(image\/\w+);base64,(.+)$/)
if (!match) throw new Error('Invalid base64 image format')
const mimeType = match[1]
const byteArray = Base64.toUint8Array(match[2])
blob = new Blob([byteArray], { type: mimeType })
const blob = new Blob([byteArray], { type: mimeType })
await navigator.clipboard.write([new ClipboardItem({ [mimeType]: blob })])
} else if (src.startsWith('file://')) {
// 处理本地文件路径
const bytes = await window.api.fs.read(src)
const mimeType = mime.getType(src) || 'application/octet-stream'
blob = new Blob([bytes], { type: mimeType })
const blob = new Blob([bytes], { type: mimeType })
await navigator.clipboard.write([
new ClipboardItem({
[mimeType]: blob
})
])
} else {
// 处理 URL 格式的图片
const response = await fetch(src)
blob = await response.blob()
const blob = await response.blob()
await navigator.clipboard.write([
new ClipboardItem({
[blob.type]: blob
})
])
}
// 统一转换为 PNG 以确保兼容性(剪贴板 API 不支持 JPEG
const pngBlob = await convertImageToPng(blob)
const item = new ClipboardItem({
'image/png': pngBlob
})
await navigator.clipboard.write([item])
window.toast.success(t('message.copy.success'))
} catch (error) {
const err = error as Error
logger.error(`Failed to copy image: ${err.message}`, { stack: err.stack })
logger.error('Failed to copy image:', error as Error)
window.toast.error(t('message.copy.failed'))
}
}

View File

@@ -0,0 +1,157 @@
import { loggerService } from '@logger'
import type { UIActionResult } from '@mcp-ui/client'
import { UIResourceRenderer } from '@mcp-ui/client'
import type { EmbeddedResource } from '@modelcontextprotocol/sdk/types.js'
import { isUIResource } from '@renderer/types'
import type { FC } from 'react'
import { useCallback, useState } from 'react'
import { useTranslation } from 'react-i18next'
import styled from 'styled-components'
const logger = loggerService.withContext('MCPUIRenderer')
interface Props {
resource: EmbeddedResource
serverId?: string
serverName?: string
onToolCall?: (toolName: string, params: any) => Promise<any>
}
const MCPUIRenderer: FC<Props> = ({ resource, onToolCall }) => {
const { t } = useTranslation()
const [error] = useState<string | null>(null)
const handleUIAction = useCallback(
async (result: UIActionResult): Promise<any> => {
logger.debug('UI Action received:', result)
try {
switch (result.type) {
case 'tool': {
// Handle tool call from UI
if (onToolCall) {
const { toolName, params } = result.payload
logger.info(`UI requesting tool call: ${toolName}`, { params })
const response = await onToolCall(toolName, params)
// Check if the response contains a UIResource
try {
if (response && response.content && Array.isArray(response.content)) {
const firstContent = response.content[0]
if (firstContent && firstContent.type === 'text' && firstContent.text) {
const parsedText = JSON.parse(firstContent.text)
if (isUIResource(parsedText)) {
// Return the UIResource directly for rendering in the iframe
logger.info('Tool response contains UIResource:', { uri: parsedText.resource.uri })
return { status: 'success', data: parsedText }
}
}
}
} catch (parseError) {
// Not a UIResource, return the original response
logger.debug('Tool response is not a UIResource')
}
return { status: 'success', data: response }
} else {
logger.warn('Tool call requested but no handler provided')
return { status: 'error', message: 'Tool call handler not available' }
}
}
case 'intent': {
// Handle user intent
logger.info('UI intent:', result.payload)
window.toast.info(t('message.mcp.ui.intent_received'))
return { status: 'acknowledged' }
}
case 'notify': {
// Handle notification from UI
logger.info('UI notification:', result.payload)
window.toast.info(result.payload.message || t('message.mcp.ui.notification'))
return { status: 'acknowledged' }
}
case 'prompt': {
// Handle prompt request from UI
logger.info('UI prompt request:', result.payload)
// TODO: Integrate with prompt system
return { status: 'error', message: 'Prompt execution not yet implemented' }
}
case 'link': {
// Handle navigation request
const { url } = result.payload
logger.info('UI navigation request:', { url })
window.open(url, '_blank')
return { status: 'acknowledged' }
}
default:
logger.warn('Unknown UI action type:', { result })
return { status: 'error', message: 'Unknown action type' }
}
} catch (err) {
logger.error('Error handling UI action:', err as Error)
return {
status: 'error',
message: err instanceof Error ? err.message : 'Unknown error'
}
}
},
[onToolCall, t]
)
if (error) {
return (
<ErrorContainer>
<ErrorTitle>{t('message.mcp.ui.error')}</ErrorTitle>
<ErrorMessage>{error}</ErrorMessage>
</ErrorContainer>
)
}
return (
<UIContainer>
<UIResourceRenderer resource={resource} onUIAction={handleUIAction} />
</UIContainer>
)
}
const UIContainer = styled.div`
width: 100%;
min-height: 400px;
border-radius: 8px;
overflow: hidden;
background: var(--color-background);
border: 1px solid var(--color-border);
iframe {
width: 100%;
border: none;
min-height: 400px;
height: 600px;
}
`
const ErrorContainer = styled.div`
padding: 16px;
border-radius: 8px;
background: var(--color-error-bg, #fee);
border: 1px solid var(--color-error-border, #fcc);
color: var(--color-error-text, #c33);
`
const ErrorTitle = styled.div`
font-weight: 600;
margin-bottom: 8px;
font-size: 14px;
`
const ErrorMessage = styled.div`
font-size: 13px;
opacity: 0.9;
`
export default MCPUIRenderer

View File

@@ -0,0 +1 @@
export { default as MCPUIRenderer } from './MCPUIRenderer'

View File

@@ -57,7 +57,7 @@ const PopupContainer: React.FC<Props> = ({ model, apiFilter, modelFilter, showTa
const [_searchText, setSearchText] = useState('')
const searchText = useDeferredValue(_searchText)
const { models, isLoading } = useApiModels(apiFilter)
const adaptedModels = useMemo(() => models.map((model) => apiModelAdapter(model)), [models])
const adaptedModels = models.map((model) => apiModelAdapter(model))
// 当前选中的模型ID
const currentModelId = model ? model.id : ''

View File

@@ -6,7 +6,7 @@ import { useEffect, useMemo, useRef, useState } from 'react'
import { useTranslation } from 'react-i18next'
import styled, { css } from 'styled-components'
interface SelectorOption<V = string | number> {
interface SelectorOption<V = string | number | undefined | null> {
label: string | ReactNode
value: V
type?: 'group'
@@ -14,7 +14,7 @@ interface SelectorOption<V = string | number> {
disabled?: boolean
}
interface BaseSelectorProps<V = string | number> {
interface BaseSelectorProps<V = string | number | undefined | null> {
options: SelectorOption<V>[]
placeholder?: string
placement?: 'topLeft' | 'topCenter' | 'topRight' | 'bottomLeft' | 'bottomCenter' | 'bottomRight' | 'top' | 'bottom'
@@ -39,7 +39,7 @@ interface MultipleSelectorProps<V> extends BaseSelectorProps<V> {
export type SelectorProps<V> = SingleSelectorProps<V> | MultipleSelectorProps<V>
const Selector = <V extends string | number>({
const Selector = <V extends string | number | undefined | null>({
options,
value,
onChange = () => {},

Some files were not shown because too many files have changed in this diff Show More