Compare commits
178 Commits
v1.6.0-bet
...
feature/ad
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
be94216f4d | ||
|
|
38ac42af8c | ||
|
|
d11a2cd95c | ||
|
|
23f61b0d62 | ||
|
|
961ee22327 | ||
|
|
c7d2588f1a | ||
|
|
06ab2822be | ||
|
|
bb0ec0a3ec | ||
|
|
483b4e090e | ||
|
|
4975c2d9e8 | ||
|
|
5365fddec9 | ||
|
|
e401685449 | ||
|
|
e195ad4a8f | ||
|
|
20f5271682 | ||
|
|
5524571c80 | ||
|
|
cd3031479c | ||
|
|
1df6e8c732 | ||
|
|
ed2e01491e | ||
|
|
228ed474ce | ||
|
|
6829a03437 | ||
|
|
dabfb8dc0e | ||
|
|
4aa9c9f225 | ||
|
|
52a980f751 | ||
|
|
3b7ab2aec8 | ||
|
|
d41e239b89 | ||
|
|
b85040f579 | ||
|
|
8bcd229849 | ||
|
|
d12515ccb9 | ||
|
|
499cb52e28 | ||
|
|
05a318225c | ||
|
|
caad0bc005 | ||
|
|
067ecb5e8e | ||
|
|
0f8cbeed11 | ||
|
|
2ed99c0cb8 | ||
|
|
0a149e3d9e | ||
|
|
a3a26c69c5 | ||
|
|
2bafc53b25 | ||
|
|
09e9b95e08 | ||
|
|
bf2ffb7465 | ||
|
|
287c96ea2e | ||
|
|
adacb8c638 | ||
|
|
7a3d08672a | ||
|
|
ec4d106a59 | ||
|
|
fe0c0fac1e | ||
|
|
4a4a1686d3 | ||
|
|
37218eef4f | ||
|
|
3b34efd33a | ||
|
|
cc650b58d3 | ||
|
|
183b46be9e | ||
|
|
a847b74c32 | ||
|
|
25c5d671dc | ||
|
|
87d9c7b410 | ||
|
|
67a6a6a445 | ||
|
|
4f8507036a | ||
|
|
acf2f4758f | ||
|
|
abf368e558 | ||
|
|
0697c79daa | ||
|
|
3d6a82fb00 | ||
|
|
97e9e42173 | ||
|
|
5d8e706c0b | ||
|
|
a8cd2e2eac | ||
|
|
1e615d69e1 | ||
|
|
63be1d8cf2 | ||
|
|
f039aa253d | ||
|
|
5a7521e335 | ||
|
|
5dac1f5867 | ||
|
|
1d0fc26025 | ||
|
|
ca597b9b9b | ||
|
|
c76df7fb16 | ||
|
|
89d5bd817b | ||
|
|
6c9fc598d4 | ||
|
|
273475881e | ||
|
|
6afaf6244c | ||
|
|
77535b002a | ||
|
|
dec68ee297 | ||
|
|
a8bf55abc2 | ||
|
|
1481149e51 | ||
|
|
4f91a321a0 | ||
|
|
7319fc5ef4 | ||
|
|
cc860e48b1 | ||
|
|
a9093b1dea | ||
|
|
ee95fad7e5 | ||
|
|
1a5138c5b1 | ||
|
|
5451e2f34a | ||
|
|
c641b116ba | ||
|
|
d44654f003 | ||
|
|
85b8724c73 | ||
|
|
4d1d3e316f | ||
|
|
e5ccf68476 | ||
|
|
e3d2bb2ec6 | ||
|
|
7f9f5514a4 | ||
|
|
d5487ba6ac | ||
|
|
95ed17aa69 | ||
|
|
4f64afd8f4 | ||
|
|
4f8b5c1250 | ||
|
|
6eccb92817 | ||
|
|
734ddd7489 | ||
|
|
ea4db1c864 | ||
|
|
f9171f3df8 | ||
|
|
9b1e9552d6 | ||
|
|
c9bb3ff6f6 | ||
|
|
993d497aad | ||
|
|
80afb3a86e | ||
|
|
f5d8974d04 | ||
|
|
276269e583 | ||
|
|
8f36c4e793 | ||
|
|
5f999d3c84 | ||
|
|
d68aafea15 | ||
|
|
e093ae72da | ||
|
|
d17362d8c4 | ||
|
|
5e19e7ac6c | ||
|
|
871565c687 | ||
|
|
6104b7803b | ||
|
|
95a332f38a | ||
|
|
4c4cc52c07 | ||
|
|
4714442d6e | ||
|
|
66115ca306 | ||
|
|
7fec4c0dac | ||
|
|
2e31a5bbcb | ||
|
|
d6a320490a | ||
|
|
125353c5a3 | ||
|
|
cf7584bb63 | ||
|
|
e10042a433 | ||
|
|
3eee8faad4 | ||
|
|
42eb61434d | ||
|
|
2962ef79dc | ||
|
|
87bdfbeeeb | ||
|
|
493b0d4a11 | ||
|
|
0c589a6f79 | ||
|
|
9b1aa3cd36 | ||
|
|
de860ac316 | ||
|
|
2495871c48 | ||
|
|
56c1851848 | ||
|
|
86f9e93e97 | ||
|
|
11502edad2 | ||
|
|
f6ffd574bf | ||
|
|
7df1060370 | ||
|
|
ce26828e41 | ||
|
|
2bae30bd11 | ||
|
|
e16db26d18 | ||
|
|
9f81a77943 | ||
|
|
f9c60423a8 | ||
|
|
0a554661ad | ||
|
|
8a67a87461 | ||
|
|
1cd89561ab | ||
|
|
79592e2c27 | ||
|
|
4a72d40394 | ||
|
|
95f6e4dd3b | ||
|
|
778b8718c9 | ||
|
|
b81c3b8f1b | ||
|
|
1d8760742e | ||
|
|
eb00ceb1c7 | ||
|
|
33f8ea5acb | ||
|
|
4b65dfa6ea | ||
|
|
0187f1780e | ||
|
|
4a5f374b7c | ||
|
|
c4e22a23ea | ||
|
|
8b7776b545 | ||
|
|
f3787beade | ||
|
|
3607341413 | ||
|
|
b33b14b4b7 | ||
|
|
d09743d254 | ||
|
|
2361c1b211 | ||
|
|
ff74e9035d | ||
|
|
cb4b26c8a4 | ||
|
|
1640ba2e9e | ||
|
|
ca003943a9 | ||
|
|
71783aea21 | ||
|
|
b5632b0097 | ||
|
|
c506ff6872 | ||
|
|
fd83834fca | ||
|
|
973cab57ab | ||
|
|
d1535e1789 | ||
|
|
60e1f15e42 | ||
|
|
6531d40386 | ||
|
|
c940e0bc92 | ||
|
|
86e3776fff | ||
|
|
469b29c941 |
4
.github/CODEOWNERS
vendored
Normal file
4
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
/src/renderer/src/store/ @0xfullex
|
||||
/src/main/services/ConfigManager.ts @0xfullex
|
||||
/packages/shared/IpcChannel.ts @0xfullex
|
||||
/src/main/ipc.ts @0xfullex
|
||||
94
.github/ISSUE_TEMPLATE/#0_bug_report.yml
vendored
94
.github/ISSUE_TEMPLATE/#0_bug_report.yml
vendored
@@ -1,94 +0,0 @@
|
||||
name: 🐛 错误报告 (中文)
|
||||
description: 创建一个报告以帮助我们改进
|
||||
title: '[错误]: '
|
||||
labels: ['BUG']
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
感谢您花时间填写此错误报告!
|
||||
在提交此问题之前,请确保您已经了解了[常见问题](https://docs.cherry-ai.com/question-contact/questions)和[知识科普](https://docs.cherry-ai.com/question-contact/knowledge)
|
||||
|
||||
- type: checkboxes
|
||||
id: checklist
|
||||
attributes:
|
||||
label: 提交前检查
|
||||
description: |
|
||||
在提交 Issue 前请确保您已经完成了以下所有步骤
|
||||
options:
|
||||
- label: 我理解 Issue 是用于反馈和解决问题的,而非吐槽评论区,将尽可能提供更多信息帮助问题解决。
|
||||
required: true
|
||||
- label: 我的问题不是 [常见问题](https://github.com/CherryHQ/cherry-studio/issues/3881) 中的内容。
|
||||
required: true
|
||||
- label: 我已经查看了 **置顶 Issue** 并搜索了现有的 [开放Issue](https://github.com/CherryHQ/cherry-studio/issues)和[已关闭Issue](https://github.com/CherryHQ/cherry-studio/issues?q=is%3Aissue%20state%3Aclosed%20),没有找到类似的问题。
|
||||
required: true
|
||||
- label: 我填写了简短且清晰明确的标题,以便开发者在翻阅 Issue 列表时能快速确定大致问题。而不是“一个建议”、“卡住了”等。
|
||||
required: true
|
||||
- label: 我确认我正在使用最新版本的 Cherry Studio。
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: platform
|
||||
attributes:
|
||||
label: 平台
|
||||
description: 您正在使用哪个平台?
|
||||
options:
|
||||
- Windows
|
||||
- macOS
|
||||
- Linux
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: version
|
||||
attributes:
|
||||
label: 版本
|
||||
description: 您正在运行的 Cherry Studio 版本是什么?
|
||||
placeholder: 例如 v1.0.0
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: 错误描述
|
||||
description: 描述问题时请尽可能详细。请尽可能提供截图或屏幕录制,以帮助我们更好地理解问题。
|
||||
placeholder: 告诉我们发生了什么...(记得附上截图/录屏,如果适用)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: reproduction
|
||||
attributes:
|
||||
label: 重现步骤
|
||||
description: 提供详细的重现步骤,以便于我们的开发人员可以准确地重现问题。请尽可能为每个步骤提供截图或屏幕录制。
|
||||
placeholder: |
|
||||
1. 转到 '...'
|
||||
2. 点击 '....'
|
||||
3. 向下滚动到 '....'
|
||||
4. 看到错误
|
||||
|
||||
记得尽可能为每个步骤附上截图/录屏!
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: expected
|
||||
attributes:
|
||||
label: 预期行为
|
||||
description: 清晰简洁地描述您期望发生的事情
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: 相关日志输出
|
||||
description: 请复制并粘贴任何相关的日志输出
|
||||
render: shell
|
||||
|
||||
- type: textarea
|
||||
id: additional
|
||||
attributes:
|
||||
label: 附加信息
|
||||
description: 任何能让我们对你所遇到的问题有更多了解的东西
|
||||
76
.github/ISSUE_TEMPLATE/#1_feature_request.yml
vendored
76
.github/ISSUE_TEMPLATE/#1_feature_request.yml
vendored
@@ -1,76 +0,0 @@
|
||||
name: 💡 功能建议 (中文)
|
||||
description: 为项目提出新的想法
|
||||
title: '[功能]: '
|
||||
labels: ['feature']
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
感谢您花时间提出新的功能建议!
|
||||
在提交此问题之前,请确保您已经了解了[项目规划](https://docs.cherry-ai.com/cherrystudio/planning)和[功能介绍](https://docs.cherry-ai.com/cherrystudio/preview)
|
||||
|
||||
- type: checkboxes
|
||||
id: checklist
|
||||
attributes:
|
||||
label: 提交前检查
|
||||
description: |
|
||||
在提交 Issue 前请确保您已经完成了以下所有步骤
|
||||
options:
|
||||
- label: 我理解 Issue 是用于反馈和解决问题的,而非吐槽评论区,将尽可能提供更多信息帮助问题解决。
|
||||
required: true
|
||||
- label: 我已经查看了置顶 Issue 并搜索了现有的 [开放Issue](https://github.com/CherryHQ/cherry-studio/issues)和[已关闭Issue](https://github.com/CherryHQ/cherry-studio/issues?q=is%3Aissue%20state%3Aclosed%20),没有找到类似的建议。
|
||||
required: true
|
||||
- label: 我填写了简短且清晰明确的标题,以便开发者在翻阅 Issue 列表时能快速确定大致问题。而不是“一个建议”、“卡住了”等。
|
||||
required: true
|
||||
- label: 最新的 Cherry Studio 版本没有实现我所提出的功能。
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: platform
|
||||
attributes:
|
||||
label: 平台
|
||||
description: 您正在使用哪个平台?
|
||||
options:
|
||||
- Windows
|
||||
- macOS
|
||||
- Linux
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: version
|
||||
attributes:
|
||||
label: 版本
|
||||
description: 您正在运行的 Cherry Studio 版本是什么?
|
||||
placeholder: 例如 v1.0.0
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: problem
|
||||
attributes:
|
||||
label: 您的功能建议是否与某个问题/issue相关?
|
||||
description: 请简明扼要地描述您遇到的问题
|
||||
placeholder: 我总是感到沮丧,因为...
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: solution
|
||||
attributes:
|
||||
label: 请描述您希望实现的解决方案
|
||||
description: 请简明扼要地描述您希望发生的情况
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: alternatives
|
||||
attributes:
|
||||
label: 请描述您考虑过的其他方案
|
||||
description: 请简明扼要地描述您考虑过的任何其他解决方案或功能
|
||||
|
||||
- type: textarea
|
||||
id: additional
|
||||
attributes:
|
||||
label: 其他补充信息
|
||||
description: 在此添加任何其他与功能建议相关的上下文或截图
|
||||
77
.github/ISSUE_TEMPLATE/#2_question.yml
vendored
77
.github/ISSUE_TEMPLATE/#2_question.yml
vendored
@@ -1,77 +0,0 @@
|
||||
name: ❓ 提问 & 讨论 (中文)
|
||||
description: 寻求帮助、讨论问题、提出疑问等...
|
||||
title: '[讨论]: '
|
||||
labels: ['discussion', 'help wanted']
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
感谢您的提问!请尽可能详细地描述您的问题,这样我们才能更好地帮助您。
|
||||
|
||||
- type: checkboxes
|
||||
id: checklist
|
||||
attributes:
|
||||
label: Issue 检查清单
|
||||
description: |
|
||||
在提交 Issue 前请确保您已经完成了以下所有步骤
|
||||
options:
|
||||
- label: 我理解 Issue 是用于反馈和解决问题的,而非吐槽评论区,将尽可能提供更多信息帮助问题解决。
|
||||
required: true
|
||||
- label: 我确认自己需要的是提出问题并且讨论问题,而不是 Bug 反馈或需求建议。
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: platform
|
||||
attributes:
|
||||
label: 平台
|
||||
description: 您正在使用哪个平台?
|
||||
options:
|
||||
- Windows
|
||||
- macOS
|
||||
- Linux
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: version
|
||||
attributes:
|
||||
label: 版本
|
||||
description: 您正在运行的 Cherry Studio 版本是什么?
|
||||
placeholder: 例如 v1.0.0
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: question
|
||||
attributes:
|
||||
label: 您的问题
|
||||
description: 请详细描述您的问题
|
||||
placeholder: 请尽可能清楚地说明您的问题...
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: context
|
||||
attributes:
|
||||
label: 相关背景
|
||||
description: 请提供一些背景信息,帮助我们更好地理解您的问题
|
||||
placeholder: 例如:使用场景、已尝试的解决方案等
|
||||
|
||||
- type: textarea
|
||||
id: additional
|
||||
attributes:
|
||||
label: 补充信息
|
||||
description: 任何其他相关的信息、截图或代码示例
|
||||
render: shell
|
||||
|
||||
- type: dropdown
|
||||
id: priority
|
||||
attributes:
|
||||
label: 优先级
|
||||
description: 这个问题对您来说有多紧急?
|
||||
options:
|
||||
- 低 (有空再看)
|
||||
- 中 (希望尽快得到答复)
|
||||
- 高 (阻碍工作进行)
|
||||
validations:
|
||||
required: true
|
||||
76
.github/ISSUE_TEMPLATE/#3_others.yml
vendored
76
.github/ISSUE_TEMPLATE/#3_others.yml
vendored
@@ -1,76 +0,0 @@
|
||||
name: 🤔 其他问题 (中文)
|
||||
description: 提交不属于错误报告或功能需求的问题
|
||||
title: '[其他]: '
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
感谢您花时间提出问题!
|
||||
在提交此问题之前,请确保您已经了解了[常见问题](https://docs.cherry-ai.com/question-contact/questions)和[知识科普](https://docs.cherry-ai.com/question-contact/knowledge)
|
||||
|
||||
- type: checkboxes
|
||||
id: checklist
|
||||
attributes:
|
||||
label: 提交前检查
|
||||
description: |
|
||||
在提交 Issue 前请确保您已经完成了以下所有步骤
|
||||
options:
|
||||
- label: 我理解 Issue 是用于反馈和解决问题的,而非吐槽评论区,将尽可能提供更多信息帮助问题解决。
|
||||
required: true
|
||||
- label: 我已经查看了置顶 Issue 并搜索了现有的 [开放Issue](https://github.com/CherryHQ/cherry-studio/issues)和[已关闭Issue](https://github.com/CherryHQ/cherry-studio/issues?q=is%3Aissue%20state%3Aclosed%20),没有找到类似的问题。
|
||||
required: true
|
||||
- label: 我填写了简短且清晰明确的标题,以便开发者在翻阅 Issue 列表时能快速确定大致问题。而不是"一个问题"、"求助"等。
|
||||
required: true
|
||||
- label: 我的问题不属于错误报告或功能需求类别。
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: platform
|
||||
attributes:
|
||||
label: 平台
|
||||
description: 您正在使用哪个平台?
|
||||
options:
|
||||
- Windows
|
||||
- macOS
|
||||
- Linux
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: version
|
||||
attributes:
|
||||
label: 版本
|
||||
description: 您正在运行的 Cherry Studio 版本是什么?
|
||||
placeholder: 例如 v1.0.0
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: question
|
||||
attributes:
|
||||
label: 问题描述
|
||||
description: 请详细描述您的问题或疑问
|
||||
placeholder: 我想了解有关...的更多信息
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: context
|
||||
attributes:
|
||||
label: 相关背景
|
||||
description: 请提供与您的问题相关的任何背景信息或上下文
|
||||
placeholder: 我尝试实现...时遇到了疑问
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: attempts
|
||||
attributes:
|
||||
label: 您已尝试的方法
|
||||
description: 请描述您为解决问题已经尝试过的方法(如果有)
|
||||
|
||||
- type: textarea
|
||||
id: additional
|
||||
attributes:
|
||||
label: 附加信息
|
||||
description: 任何能让我们对您的问题有更多了解的信息,包括截图或相关链接
|
||||
10
.github/workflows/auto-i18n.yml
vendored
10
.github/workflows/auto-i18n.yml
vendored
@@ -1,9 +1,9 @@
|
||||
name: Auto I18N
|
||||
|
||||
env:
|
||||
API_KEY: ${{ secrets.TRANSLATE_API_KEY}}
|
||||
MODEL: ${{ vars.MODEL || 'deepseek/deepseek-v3.1'}}
|
||||
BASE_URL: ${{ vars.BASE_URL || 'https://api.ppinfra.com/openai'}}
|
||||
API_KEY: ${{ secrets.TRANSLATE_API_KEY }}
|
||||
MODEL: ${{ vars.AUTO_I18N_MODEL || 'deepseek/deepseek-v3.1'}}
|
||||
BASE_URL: ${{ vars.AUTO_I18N_BASE_URL || 'https://api.ppinfra.com/openai'}}
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
@@ -35,7 +35,7 @@ jobs:
|
||||
# 在临时目录安装依赖
|
||||
mkdir -p /tmp/translation-deps
|
||||
cd /tmp/translation-deps
|
||||
echo '{"dependencies": {"openai": "^5.12.2", "cli-progress": "^3.12.0", "tsx": "^4.20.3", "prettier": "^3.5.3", "prettier-plugin-sort-json": "^4.1.1"}}' > package.json
|
||||
echo '{"dependencies": {"openai": "^5.12.2", "cli-progress": "^3.12.0", "tsx": "^4.20.3", "@biomejs/biome": "2.2.4"}}' > package.json
|
||||
npm install --no-package-lock
|
||||
|
||||
# 设置 NODE_PATH 让项目能找到这些依赖
|
||||
@@ -45,7 +45,7 @@ jobs:
|
||||
run: npx tsx scripts/auto-translate-i18n.ts
|
||||
|
||||
- name: 🔍 Format
|
||||
run: cd /tmp/translation-deps && npx prettier --write --config /home/runner/work/cherry-studio/cherry-studio/.prettierrc /home/runner/work/cherry-studio/cherry-studio/src/renderer/src/i18n/
|
||||
run: cd /tmp/translation-deps && npx biome format --config-path /home/runner/work/cherry-studio/cherry-studio/biome.jsonc --write /home/runner/work/cherry-studio/cherry-studio/src/renderer/src/i18n/
|
||||
|
||||
- name: 🔄 Commit changes
|
||||
run: |
|
||||
|
||||
16
.github/workflows/claude-code-review.yml
vendored
16
.github/workflows/claude-code-review.yml
vendored
@@ -2,7 +2,7 @@ name: Claude Code Review
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize]
|
||||
types: [opened]
|
||||
# Optional: Only run on specific file changes
|
||||
# paths:
|
||||
# - "src/**/*.ts"
|
||||
@@ -12,12 +12,11 @@ on:
|
||||
|
||||
jobs:
|
||||
claude-review:
|
||||
# Optional: Filter by PR author
|
||||
# if: |
|
||||
# github.event.pull_request.user.login == 'external-contributor' ||
|
||||
# github.event.pull_request.user.login == 'new-developer' ||
|
||||
# github.event.pull_request.author_association == 'FIRST_TIME_CONTRIBUTOR'
|
||||
|
||||
# Only trigger code review for PRs from the main repository due to upstream OIDC issues
|
||||
# https://github.com/anthropics/claude-code-action/issues/542
|
||||
if: |
|
||||
(github.event.pull_request.head.repo.full_name == github.repository) &&
|
||||
(github.event.pull_request.draft == false)
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -45,6 +44,9 @@ jobs:
|
||||
- Security concerns
|
||||
- Test coverage
|
||||
|
||||
PR number: ${{ github.event.number }}
|
||||
Repo: ${{ github.repository }}
|
||||
|
||||
Use the repository's CLAUDE.md for guidance on style and conventions. Be constructive and helpful in your feedback.
|
||||
|
||||
Use `gh pr comment` with your Bash tool to leave your review as a comment on the PR.
|
||||
|
||||
107
.github/workflows/claude-translator.yml
vendored
Normal file
107
.github/workflows/claude-translator.yml
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
name: Claude Translator
|
||||
concurrency:
|
||||
group: translator-${{ github.event.comment.id || github.event.issue.number || github.event.review.id }}
|
||||
cancel-in-progress: false
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened]
|
||||
issue_comment:
|
||||
types: [created, edited]
|
||||
pull_request_review:
|
||||
types: [submitted, edited]
|
||||
pull_request_review_comment:
|
||||
types: [created, edited]
|
||||
|
||||
jobs:
|
||||
translate:
|
||||
if: |
|
||||
(github.event_name == 'issues') ||
|
||||
(github.event_name == 'issue_comment' && github.event.sender.type != 'Bot') ||
|
||||
(github.event_name == 'pull_request_review' && github.event.sender.type != 'Bot') ||
|
||||
(github.event_name == 'pull_request_review_comment' && github.event.sender.type != 'Bot')
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write # 编辑issues/comments
|
||||
pull-requests: write
|
||||
id-token: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Run Claude for translation
|
||||
uses: anthropics/claude-code-action@main
|
||||
id: claude
|
||||
with:
|
||||
# Warning: Permissions should have been controlled by workflow permission.
|
||||
# Now `contents: read` is safe for files, but we could make a fine-grained token to control it.
|
||||
# See: https://github.com/anthropics/claude-code-action/blob/main/docs/security.md
|
||||
github_token: ${{ secrets.TOKEN_GITHUB_WRITE }}
|
||||
allowed_non_write_users: "*"
|
||||
claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
|
||||
claude_args: "--allowed-tools Bash(gh issue:*),Bash(gh api:repos/*/issues:*),Bash(gh api:repos/*/pulls/*/reviews/*),Bash(gh api:repos/*/pulls/comments/*)"
|
||||
prompt: |
|
||||
你是一个多语言翻译助手。你需要响应 GitHub Webhooks 中的以下四种事件:
|
||||
|
||||
- issues
|
||||
- issue_comment
|
||||
- pull_request_review
|
||||
- pull_request_review_comment
|
||||
|
||||
请完成以下任务:
|
||||
|
||||
1. 获取当前事件的完整信息。
|
||||
|
||||
- 如果当前事件是 issues,就获取该 issues 的信息。
|
||||
- 如果当前事件是 issue_comment,就获取该 comment 的信息。
|
||||
- 如果当前事件是 pull_request_review,就获取该 review 的信息。
|
||||
- 如果当前事件是 pull_request_review_comment,就获取该 comment 的信息。
|
||||
|
||||
2. 智能检测内容。
|
||||
|
||||
- 如果获取到的信息是已经遵循格式要求翻译过的内容,则检查翻译内容和原始内容是否匹配。若不匹配,则重新翻译一次令其匹配,并遵循格式要求;
|
||||
- 如果获取到的信息是未翻译过的内容,检查其内容语言。若不是英文,则翻译成英文;
|
||||
- 如果获取到的信息是部分翻译为英文的内容,则将其翻译为英文;
|
||||
- 如果获取到的信息包含了对已翻译内容的引用,则将引用内容清理为仅含英文的内容。引用的内容不能够包含"This xxx was translated by Claude"和"Original Content`等内容。
|
||||
- 如果获取到的信息包含了其他类型的引用,即对非 Claude 翻译的内容的引用,则直接照原样引用,不进行翻译。
|
||||
- 如果获取到的信息是通过邮件回复的内容,则在翻译时应当将邮件内容的引用放到最后。在原始内容和翻译内容中只需要回复的内容本身,不要包含对邮件内容的引用。
|
||||
- 如果获取到的信息本身不需要任何处理,则跳过任务。
|
||||
|
||||
3. 格式要求:
|
||||
|
||||
- 标题:英文翻译(如果非英文)
|
||||
- 内容格式:
|
||||
> [!NOTE]
|
||||
> This issue/comment/review was translated by Claude.
|
||||
|
||||
[翻译内容]
|
||||
|
||||
---
|
||||
<details>
|
||||
<summary>Original Content</summary>
|
||||
[原始内容]
|
||||
</details>
|
||||
|
||||
4. 使用gh工具更新:
|
||||
|
||||
- 根据环境信息中的Event类型选择正确的命令:
|
||||
- 如果 Event 是 'issues': gh issue edit [ISSUE_NUMBER] --title "[英文标题]" --body "[翻译内容 + 原始内容]"
|
||||
- 如果 Event 是 'issue_comment': gh api -X PATCH /repos/${{ github.repository }}/issues/comments/${{ github.event.comment.id }} -f body="[翻译内容 + 原始内容]"
|
||||
- 如果 Event 是 'pull_request_review': gh api -X PUT /repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}/reviews/${{ github.event.review.id }} -f body="[翻译内容]"
|
||||
- 如果 Event 是 'pull_request_review_comment': gh api -X PATCH /repos/${{ github.repository }}/pulls/comments/${{ github.event.comment.id }} -f body="[翻译内容 + 原始内容]"
|
||||
|
||||
环境信息:
|
||||
- Event: ${{ github.event_name }}
|
||||
- Issue Number: ${{ github.event.issue.number }}
|
||||
- Repository: ${{ github.repository }}
|
||||
- (Review) Comment ID: ${{ github.event.comment.id || 'N/A' }}
|
||||
- Pull Request Number: ${{ github.event.pull_request.number || 'N/A' }}
|
||||
- Review ID: ${{ github.event.review.id || 'N/A' }}
|
||||
|
||||
|
||||
使用以下命令获取完整信息:
|
||||
gh issue view ${{ github.event.issue.number }} --json title,body,comments
|
||||
22
.github/workflows/delete-branch.yml
vendored
Normal file
22
.github/workflows/delete-branch.yml
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
name: Delete merged branch
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- closed
|
||||
|
||||
jobs:
|
||||
delete-branch:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
if: github.event.pull_request.merged == true && github.event.pull_request.head.repo.full_name == github.repository
|
||||
steps:
|
||||
- name: Delete merged branch
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
github.rest.git.deleteRef({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
ref: `heads/${context.payload.pull_request.head.ref}`,
|
||||
})
|
||||
30
.github/workflows/nightly-build.yml
vendored
30
.github/workflows/nightly-build.yml
vendored
@@ -98,10 +98,10 @@ jobs:
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
NODE_OPTIONS: --max-old-space-size=8192
|
||||
MAIN_VITE_CHERRYIN_CLIENT_SECRET: ${{ secrets.MAIN_VITE_CHERRYIN_CLIENT_SECRET }}
|
||||
MAIN_VITE_MINERU_API_KEY: ${{ vars.MAIN_VITE_MINERU_API_KEY }}
|
||||
RENDERER_VITE_AIHUBMIX_SECRET: ${{ vars.RENDERER_VITE_AIHUBMIX_SECRET }}
|
||||
RENDERER_VITE_PPIO_APP_SECRET: ${{ vars.RENDERER_VITE_PPIO_APP_SECRET }}
|
||||
MAIN_VITE_CHERRYAI_CLIENT_SECRET: ${{ secrets.MAIN_VITE_CHERRYAI_CLIENT_SECRET }}
|
||||
MAIN_VITE_MINERU_API_KEY: ${{ secrets.MAIN_VITE_MINERU_API_KEY }}
|
||||
RENDERER_VITE_AIHUBMIX_SECRET: ${{ secrets.RENDERER_VITE_AIHUBMIX_SECRET }}
|
||||
RENDERER_VITE_PPIO_APP_SECRET: ${{ secrets.RENDERER_VITE_PPIO_APP_SECRET }}
|
||||
|
||||
- name: Build Mac
|
||||
if: matrix.os == 'macos-latest'
|
||||
@@ -110,15 +110,15 @@ jobs:
|
||||
env:
|
||||
CSC_LINK: ${{ secrets.CSC_LINK }}
|
||||
CSC_KEY_PASSWORD: ${{ secrets.CSC_KEY_PASSWORD }}
|
||||
APPLE_ID: ${{ vars.APPLE_ID }}
|
||||
APPLE_APP_SPECIFIC_PASSWORD: ${{ vars.APPLE_APP_SPECIFIC_PASSWORD }}
|
||||
APPLE_TEAM_ID: ${{ vars.APPLE_TEAM_ID }}
|
||||
APPLE_ID: ${{ secrets.APPLE_ID }}
|
||||
APPLE_APP_SPECIFIC_PASSWORD: ${{ secrets.APPLE_APP_SPECIFIC_PASSWORD }}
|
||||
APPLE_TEAM_ID: ${{ secrets.APPLE_TEAM_ID }}
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
NODE_OPTIONS: --max-old-space-size=8192
|
||||
MAIN_VITE_CHERRYIN_CLIENT_SECRET: ${{ secrets.MAIN_VITE_CHERRYIN_CLIENT_SECRET }}
|
||||
MAIN_VITE_MINERU_API_KEY: ${{ vars.MAIN_VITE_MINERU_API_KEY }}
|
||||
RENDERER_VITE_AIHUBMIX_SECRET: ${{ vars.RENDERER_VITE_AIHUBMIX_SECRET }}
|
||||
RENDERER_VITE_PPIO_APP_SECRET: ${{ vars.RENDERER_VITE_PPIO_APP_SECRET }}
|
||||
MAIN_VITE_CHERRYAI_CLIENT_SECRET: ${{ secrets.MAIN_VITE_CHERRYAI_CLIENT_SECRET }}
|
||||
MAIN_VITE_MINERU_API_KEY: ${{ secrets.MAIN_VITE_MINERU_API_KEY }}
|
||||
RENDERER_VITE_AIHUBMIX_SECRET: ${{ secrets.RENDERER_VITE_AIHUBMIX_SECRET }}
|
||||
RENDERER_VITE_PPIO_APP_SECRET: ${{ secrets.RENDERER_VITE_PPIO_APP_SECRET }}
|
||||
|
||||
- name: Build Windows
|
||||
if: matrix.os == 'windows-latest'
|
||||
@@ -127,10 +127,10 @@ jobs:
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
NODE_OPTIONS: --max-old-space-size=8192
|
||||
MAIN_VITE_CHERRYIN_CLIENT_SECRET: ${{ secrets.MAIN_VITE_CHERRYIN_CLIENT_SECRET }}
|
||||
MAIN_VITE_MINERU_API_KEY: ${{ vars.MAIN_VITE_MINERU_API_KEY }}
|
||||
RENDERER_VITE_AIHUBMIX_SECRET: ${{ vars.RENDERER_VITE_AIHUBMIX_SECRET }}
|
||||
RENDERER_VITE_PPIO_APP_SECRET: ${{ vars.RENDERER_VITE_PPIO_APP_SECRET }}
|
||||
MAIN_VITE_CHERRYAI_CLIENT_SECRET: ${{ secrets.MAIN_VITE_CHERRYAI_CLIENT_SECRET }}
|
||||
MAIN_VITE_MINERU_API_KEY: ${{ secrets.MAIN_VITE_MINERU_API_KEY }}
|
||||
RENDERER_VITE_AIHUBMIX_SECRET: ${{ secrets.RENDERER_VITE_AIHUBMIX_SECRET }}
|
||||
RENDERER_VITE_PPIO_APP_SECRET: ${{ secrets.RENDERER_VITE_PPIO_APP_SECRET }}
|
||||
|
||||
- name: Rename artifacts with nightly format
|
||||
shell: bash
|
||||
|
||||
6
.github/workflows/pr-ci.yml
vendored
6
.github/workflows/pr-ci.yml
vendored
@@ -9,12 +9,15 @@ on:
|
||||
branches:
|
||||
- main
|
||||
- develop
|
||||
- v2
|
||||
types: [ready_for_review, synchronize, opened]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
PRCI: true
|
||||
if: github.event.pull_request.draft == false
|
||||
|
||||
steps:
|
||||
- name: Check out Git repository
|
||||
@@ -48,6 +51,9 @@ jobs:
|
||||
- name: Lint Check
|
||||
run: yarn test:lint
|
||||
|
||||
- name: Format Check
|
||||
run: yarn format:check
|
||||
|
||||
- name: Type Check
|
||||
run: yarn typecheck
|
||||
|
||||
|
||||
30
.github/workflows/release.yml
vendored
30
.github/workflows/release.yml
vendored
@@ -85,10 +85,10 @@ jobs:
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
NODE_OPTIONS: --max-old-space-size=8192
|
||||
MAIN_VITE_CHERRYIN_CLIENT_SECRET: ${{ secrets.MAIN_VITE_CHERRYIN_CLIENT_SECRET }}
|
||||
MAIN_VITE_MINERU_API_KEY: ${{ vars.MAIN_VITE_MINERU_API_KEY }}
|
||||
RENDERER_VITE_AIHUBMIX_SECRET: ${{ vars.RENDERER_VITE_AIHUBMIX_SECRET }}
|
||||
RENDERER_VITE_PPIO_APP_SECRET: ${{ vars.RENDERER_VITE_PPIO_APP_SECRET }}
|
||||
MAIN_VITE_CHERRYAI_CLIENT_SECRET: ${{ secrets.MAIN_VITE_CHERRYAI_CLIENT_SECRET }}
|
||||
MAIN_VITE_MINERU_API_KEY: ${{ secrets.MAIN_VITE_MINERU_API_KEY }}
|
||||
RENDERER_VITE_AIHUBMIX_SECRET: ${{ secrets.RENDERER_VITE_AIHUBMIX_SECRET }}
|
||||
RENDERER_VITE_PPIO_APP_SECRET: ${{ secrets.RENDERER_VITE_PPIO_APP_SECRET }}
|
||||
|
||||
- name: Build Mac
|
||||
if: matrix.os == 'macos-latest'
|
||||
@@ -98,15 +98,15 @@ jobs:
|
||||
env:
|
||||
CSC_LINK: ${{ secrets.CSC_LINK }}
|
||||
CSC_KEY_PASSWORD: ${{ secrets.CSC_KEY_PASSWORD }}
|
||||
APPLE_ID: ${{ vars.APPLE_ID }}
|
||||
APPLE_APP_SPECIFIC_PASSWORD: ${{ vars.APPLE_APP_SPECIFIC_PASSWORD }}
|
||||
APPLE_TEAM_ID: ${{ vars.APPLE_TEAM_ID }}
|
||||
APPLE_ID: ${{ secrets.APPLE_ID }}
|
||||
APPLE_APP_SPECIFIC_PASSWORD: ${{ secrets.APPLE_APP_SPECIFIC_PASSWORD }}
|
||||
APPLE_TEAM_ID: ${{ secrets.APPLE_TEAM_ID }}
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
NODE_OPTIONS: --max-old-space-size=8192
|
||||
MAIN_VITE_CHERRYIN_CLIENT_SECRET: ${{ secrets.MAIN_VITE_CHERRYIN_CLIENT_SECRET }}
|
||||
MAIN_VITE_MINERU_API_KEY: ${{ vars.MAIN_VITE_MINERU_API_KEY }}
|
||||
RENDERER_VITE_AIHUBMIX_SECRET: ${{ vars.RENDERER_VITE_AIHUBMIX_SECRET }}
|
||||
RENDERER_VITE_PPIO_APP_SECRET: ${{ vars.RENDERER_VITE_PPIO_APP_SECRET }}
|
||||
MAIN_VITE_CHERRYAI_CLIENT_SECRET: ${{ secrets.MAIN_VITE_CHERRYAI_CLIENT_SECRET }}
|
||||
MAIN_VITE_MINERU_API_KEY: ${{ secrets.MAIN_VITE_MINERU_API_KEY }}
|
||||
RENDERER_VITE_AIHUBMIX_SECRET: ${{ secrets.RENDERER_VITE_AIHUBMIX_SECRET }}
|
||||
RENDERER_VITE_PPIO_APP_SECRET: ${{ secrets.RENDERER_VITE_PPIO_APP_SECRET }}
|
||||
|
||||
- name: Build Windows
|
||||
if: matrix.os == 'windows-latest'
|
||||
@@ -115,10 +115,10 @@ jobs:
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
NODE_OPTIONS: --max-old-space-size=8192
|
||||
MAIN_VITE_CHERRYIN_CLIENT_SECRET: ${{ secrets.MAIN_VITE_CHERRYIN_CLIENT_SECRET }}
|
||||
MAIN_VITE_MINERU_API_KEY: ${{ vars.MAIN_VITE_MINERU_API_KEY }}
|
||||
RENDERER_VITE_AIHUBMIX_SECRET: ${{ vars.RENDERER_VITE_AIHUBMIX_SECRET }}
|
||||
RENDERER_VITE_PPIO_APP_SECRET: ${{ vars.RENDERER_VITE_PPIO_APP_SECRET }}
|
||||
MAIN_VITE_CHERRYAI_CLIENT_SECRET: ${{ secrets.MAIN_VITE_CHERRYAI_CLIENT_SECRET }}
|
||||
MAIN_VITE_MINERU_API_KEY: ${{ secrets.MAIN_VITE_MINERU_API_KEY }}
|
||||
RENDERER_VITE_AIHUBMIX_SECRET: ${{ secrets.RENDERER_VITE_AIHUBMIX_SECRET }}
|
||||
RENDERER_VITE_PPIO_APP_SECRET: ${{ secrets.RENDERER_VITE_PPIO_APP_SECRET }}
|
||||
|
||||
- name: Release
|
||||
uses: ncipollo/release-action@v1
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -37,6 +37,7 @@ dist
|
||||
out
|
||||
mcp_server
|
||||
stats.html
|
||||
.eslintcache
|
||||
|
||||
# ENV
|
||||
.env
|
||||
@@ -53,6 +54,8 @@ local
|
||||
.qwen/*
|
||||
.trae/*
|
||||
.claude-code-router/*
|
||||
.codebuddy/*
|
||||
.zed/*
|
||||
CLAUDE.local.md
|
||||
|
||||
# vitest
|
||||
|
||||
215
.oxlintrc.json
Normal file
215
.oxlintrc.json
Normal file
@@ -0,0 +1,215 @@
|
||||
{
|
||||
"$schema": "./node_modules/oxlint/configuration_schema.json",
|
||||
"categories": {},
|
||||
"env": {
|
||||
"es2022": true
|
||||
},
|
||||
"globals": {},
|
||||
"ignorePatterns": [
|
||||
"node_modules/**",
|
||||
"build/**",
|
||||
"dist/**",
|
||||
"out/**",
|
||||
"local/**",
|
||||
".yarn/**",
|
||||
".gitignore",
|
||||
"scripts/cloudflare-worker.js",
|
||||
"src/main/integration/nutstore/sso/lib/**",
|
||||
"src/main/integration/cherryai/index.js",
|
||||
"src/main/integration/nutstore/sso/lib/**",
|
||||
"src/renderer/src/ui/**",
|
||||
"packages/**/dist",
|
||||
"eslint.config.mjs"
|
||||
],
|
||||
"overrides": [
|
||||
// set different env
|
||||
{
|
||||
"env": {
|
||||
"node": true
|
||||
},
|
||||
"files": ["src/main/**", "resources/scripts/**", "scripts/**", "playwright.config.ts", "electron.vite.config.ts"]
|
||||
},
|
||||
{
|
||||
"env": {
|
||||
"browser": true
|
||||
},
|
||||
"files": [
|
||||
"src/renderer/**/*.{ts,tsx}",
|
||||
"packages/aiCore/**",
|
||||
"packages/extension-table-plus/**",
|
||||
"resources/js/**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"env": {
|
||||
"node": true,
|
||||
"vitest": true
|
||||
},
|
||||
"files": ["**/__tests__/*.test.{ts,tsx}", "tests/**"]
|
||||
},
|
||||
{
|
||||
"env": {
|
||||
"browser": true,
|
||||
"node": true
|
||||
},
|
||||
"files": ["src/preload/**"]
|
||||
}
|
||||
],
|
||||
// We don't use the React plugin here because its behavior differs slightly from that of ESLint's React plugin.
|
||||
"plugins": ["unicorn", "typescript", "oxc", "import"],
|
||||
"rules": {
|
||||
"constructor-super": "error",
|
||||
"for-direction": "error",
|
||||
"getter-return": "error",
|
||||
"no-array-constructor": "off",
|
||||
// "import/no-cycle": "error", // tons of error, bro
|
||||
"no-async-promise-executor": "error",
|
||||
"no-caller": "warn",
|
||||
"no-case-declarations": "error",
|
||||
"no-class-assign": "error",
|
||||
"no-compare-neg-zero": "error",
|
||||
"no-cond-assign": "error",
|
||||
"no-const-assign": "error",
|
||||
"no-constant-binary-expression": "error",
|
||||
"no-constant-condition": "error",
|
||||
"no-control-regex": "error",
|
||||
"no-debugger": "error",
|
||||
"no-delete-var": "error",
|
||||
"no-dupe-args": "error",
|
||||
"no-dupe-class-members": "error",
|
||||
"no-dupe-else-if": "error",
|
||||
"no-dupe-keys": "error",
|
||||
"no-duplicate-case": "error",
|
||||
"no-empty": "error",
|
||||
"no-empty-character-class": "error",
|
||||
"no-empty-pattern": "error",
|
||||
"no-empty-static-block": "error",
|
||||
"no-eval": "warn",
|
||||
"no-ex-assign": "error",
|
||||
"no-extra-boolean-cast": "error",
|
||||
"no-fallthrough": "warn",
|
||||
"no-func-assign": "error",
|
||||
"no-global-assign": "error",
|
||||
"no-import-assign": "error",
|
||||
"no-invalid-regexp": "error",
|
||||
"no-irregular-whitespace": "error",
|
||||
"no-loss-of-precision": "error",
|
||||
"no-misleading-character-class": "error",
|
||||
"no-new-native-nonconstructor": "error",
|
||||
"no-nonoctal-decimal-escape": "error",
|
||||
"no-obj-calls": "error",
|
||||
"no-octal": "error",
|
||||
"no-prototype-builtins": "error",
|
||||
"no-redeclare": "error",
|
||||
"no-regex-spaces": "error",
|
||||
"no-self-assign": "error",
|
||||
"no-setter-return": "error",
|
||||
"no-shadow-restricted-names": "error",
|
||||
"no-sparse-arrays": "error",
|
||||
"no-this-before-super": "error",
|
||||
"no-unassigned-vars": "warn",
|
||||
"no-undef": "error",
|
||||
"no-unexpected-multiline": "error",
|
||||
"no-unreachable": "error",
|
||||
"no-unsafe-finally": "error",
|
||||
"no-unsafe-negation": "error",
|
||||
"no-unsafe-optional-chaining": "error",
|
||||
"no-unused-expressions": "off", // this rule disallow us to use expression to call function, like `condition && fn()`
|
||||
"no-unused-labels": "error",
|
||||
"no-unused-private-class-members": "error",
|
||||
"no-unused-vars": ["error", { "caughtErrors": "none" }],
|
||||
"no-useless-backreference": "error",
|
||||
"no-useless-catch": "error",
|
||||
"no-useless-escape": "error",
|
||||
"no-useless-rename": "warn",
|
||||
"no-with": "error",
|
||||
"oxc/bad-array-method-on-arguments": "warn",
|
||||
"oxc/bad-char-at-comparison": "warn",
|
||||
"oxc/bad-comparison-sequence": "warn",
|
||||
"oxc/bad-min-max-func": "warn",
|
||||
"oxc/bad-object-literal-comparison": "warn",
|
||||
"oxc/bad-replace-all-arg": "warn",
|
||||
"oxc/const-comparisons": "warn",
|
||||
"oxc/double-comparisons": "warn",
|
||||
"oxc/erasing-op": "warn",
|
||||
"oxc/missing-throw": "warn",
|
||||
"oxc/number-arg-out-of-range": "warn",
|
||||
"oxc/only-used-in-recursion": "off", // manually off bacause of existing warning. may turn it on in the future
|
||||
"oxc/uninvoked-array-callback": "warn",
|
||||
"require-yield": "error",
|
||||
"typescript/await-thenable": "warn",
|
||||
// "typescript/ban-ts-comment": "error",
|
||||
"typescript/no-array-constructor": "error",
|
||||
// "typescript/consistent-type-imports": "error",
|
||||
"typescript/no-array-delete": "warn",
|
||||
"typescript/no-base-to-string": "warn",
|
||||
"typescript/no-duplicate-enum-values": "error",
|
||||
"typescript/no-duplicate-type-constituents": "warn",
|
||||
"typescript/no-empty-object-type": "off",
|
||||
"typescript/no-explicit-any": "off", // not safe but too many errors
|
||||
"typescript/no-extra-non-null-assertion": "error",
|
||||
"typescript/no-floating-promises": "warn",
|
||||
"typescript/no-for-in-array": "warn",
|
||||
"typescript/no-implied-eval": "warn",
|
||||
"typescript/no-meaningless-void-operator": "warn",
|
||||
"typescript/no-misused-new": "error",
|
||||
"typescript/no-misused-spread": "warn",
|
||||
"typescript/no-namespace": "error",
|
||||
"typescript/no-non-null-asserted-optional-chain": "off", // it's off now. but may turn it on.
|
||||
"typescript/no-redundant-type-constituents": "warn",
|
||||
"typescript/no-require-imports": "off",
|
||||
"typescript/no-this-alias": "error",
|
||||
"typescript/no-unnecessary-parameter-property-assignment": "warn",
|
||||
"typescript/no-unnecessary-type-constraint": "error",
|
||||
"typescript/no-unsafe-declaration-merging": "error",
|
||||
"typescript/no-unsafe-function-type": "error",
|
||||
"typescript/no-unsafe-unary-minus": "warn",
|
||||
"typescript/no-useless-empty-export": "warn",
|
||||
"typescript/no-wrapper-object-types": "error",
|
||||
"typescript/prefer-as-const": "error",
|
||||
"typescript/prefer-namespace-keyword": "error",
|
||||
"typescript/require-array-sort-compare": "warn",
|
||||
"typescript/restrict-template-expressions": "warn",
|
||||
"typescript/triple-slash-reference": "error",
|
||||
"typescript/unbound-method": "warn",
|
||||
"unicorn/no-await-in-promise-methods": "warn",
|
||||
"unicorn/no-empty-file": "off", // manually off bacause of existing warning. may turn it on in the future
|
||||
"unicorn/no-invalid-fetch-options": "warn",
|
||||
"unicorn/no-invalid-remove-event-listener": "warn",
|
||||
"unicorn/no-new-array": "off", // manually off bacause of existing warning. may turn it on in the future
|
||||
"unicorn/no-single-promise-in-promise-methods": "warn",
|
||||
"unicorn/no-thenable": "off", // manually off bacause of existing warning. may turn it on in the future
|
||||
"unicorn/no-unnecessary-await": "warn",
|
||||
"unicorn/no-useless-fallback-in-spread": "warn",
|
||||
"unicorn/no-useless-length-check": "warn",
|
||||
"unicorn/no-useless-spread": "off", // manually off bacause of existing warning. may turn it on in the future
|
||||
"unicorn/prefer-set-size": "warn",
|
||||
"unicorn/prefer-string-starts-ends-with": "warn",
|
||||
"use-isnan": "error",
|
||||
"valid-typeof": "error"
|
||||
},
|
||||
"settings": {
|
||||
"jsdoc": {
|
||||
"augmentsExtendsReplacesDocs": false,
|
||||
"exemptDestructuredRootsFromChecks": false,
|
||||
"ignoreInternal": false,
|
||||
"ignorePrivate": false,
|
||||
"ignoreReplacesDocs": true,
|
||||
"implementsReplacesDocs": false,
|
||||
"overrideReplacesDocs": true,
|
||||
"tagNamePreference": {}
|
||||
},
|
||||
"jsx-a11y": {
|
||||
"attributes": {},
|
||||
"components": {},
|
||||
"polymorphicPropName": null
|
||||
},
|
||||
"next": {
|
||||
"rootDir": []
|
||||
},
|
||||
"react": {
|
||||
"formComponents": [],
|
||||
"linkComponents": []
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
out
|
||||
dist
|
||||
pnpm-lock.yaml
|
||||
LICENSE.md
|
||||
tsconfig.json
|
||||
tsconfig.*.json
|
||||
CHANGELOG*.md
|
||||
agents.json
|
||||
src/renderer/src/integration/nutstore/sso/lib
|
||||
AGENT.md
|
||||
src/main/integration/cherryin/index.js
|
||||
11
.prettierrc
11
.prettierrc
@@ -1,11 +0,0 @@
|
||||
{
|
||||
"bracketSameLine": true,
|
||||
"endOfLine": "lf",
|
||||
"jsonRecursiveSort": true,
|
||||
"jsonSortOrder": "{\"*\": \"lexical\"}",
|
||||
"plugins": ["prettier-plugin-sort-json"],
|
||||
"printWidth": 120,
|
||||
"semi": false,
|
||||
"singleQuote": true,
|
||||
"trailingComma": "none"
|
||||
}
|
||||
7
.vscode/extensions.json
vendored
7
.vscode/extensions.json
vendored
@@ -1,8 +1,11 @@
|
||||
{
|
||||
"recommendations": [
|
||||
"dbaeumer.vscode-eslint",
|
||||
"esbenp.prettier-vscode",
|
||||
"editorconfig.editorconfig",
|
||||
"lokalise.i18n-ally"
|
||||
"lokalise.i18n-ally",
|
||||
"bradlc.vscode-tailwindcss",
|
||||
"vitest.explorer",
|
||||
"oxc.oxc-vscode",
|
||||
"biomejs.biome"
|
||||
]
|
||||
}
|
||||
|
||||
19
.vscode/settings.json
vendored
19
.vscode/settings.json
vendored
@@ -1,33 +1,38 @@
|
||||
{
|
||||
"[css]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode"
|
||||
"editor.defaultFormatter": "biomejs.biome"
|
||||
},
|
||||
"[javascript]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode"
|
||||
"editor.defaultFormatter": "biomejs.biome"
|
||||
},
|
||||
"[json]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode"
|
||||
"editor.defaultFormatter": "biomejs.biome"
|
||||
},
|
||||
"[jsonc]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode"
|
||||
"editor.defaultFormatter": "biomejs.biome"
|
||||
},
|
||||
"[markdown]": {
|
||||
"files.trimTrailingWhitespace": false
|
||||
},
|
||||
"[scss]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode"
|
||||
"editor.defaultFormatter": "biomejs.biome"
|
||||
},
|
||||
"[typescript]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode"
|
||||
"editor.defaultFormatter": "biomejs.biome"
|
||||
},
|
||||
"[typescriptreact]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode"
|
||||
"editor.defaultFormatter": "biomejs.biome"
|
||||
},
|
||||
"editor.codeActionsOnSave": {
|
||||
"source.fixAll.biome": "explicit",
|
||||
"source.fixAll.eslint": "explicit",
|
||||
"source.fixAll.oxc": "explicit",
|
||||
"source.organizeImports": "never"
|
||||
},
|
||||
"editor.formatOnSave": true,
|
||||
"files.associations": {
|
||||
"*.css": "tailwindcss"
|
||||
},
|
||||
"files.eol": "\n",
|
||||
"i18n-ally.displayLanguage": "zh-cn",
|
||||
"i18n-ally.enabledFrameworks": ["react-i18next", "i18next"],
|
||||
|
||||
13
.yarn/patches/@ai-sdk-google-npm-2.0.14-376d8b03cc.patch
vendored
Normal file
13
.yarn/patches/@ai-sdk-google-npm-2.0.14-376d8b03cc.patch
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
diff --git a/dist/index.mjs b/dist/index.mjs
|
||||
index 110f37ec18c98b1d55ae2b73cc716194e6f9094d..17e109b7778cbebb904f1919e768d21a2833d965 100644
|
||||
--- a/dist/index.mjs
|
||||
+++ b/dist/index.mjs
|
||||
@@ -448,7 +448,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
|
||||
|
||||
// src/get-model-path.ts
|
||||
function getModelPath(modelId) {
|
||||
- return modelId.includes("/") ? modelId : `models/${modelId}`;
|
||||
+ return modelId?.includes("models/") ? modelId : `models/${modelId}`;
|
||||
}
|
||||
|
||||
// src/google-generative-ai-options.ts
|
||||
@@ -5,3 +5,5 @@ httpTimeout: 300000
|
||||
nodeLinker: node-modules
|
||||
|
||||
yarnPath: .yarn/releases/yarn-4.9.1.cjs
|
||||
npmRegistryServer: https://registry.npmjs.org
|
||||
npmPublishRegistry: https://registry.npmjs.org
|
||||
|
||||
@@ -9,6 +9,7 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co
|
||||
- **Prerequisites**: Node.js v22.x.x or higher, Yarn 4.9.1
|
||||
- **Setup Yarn**: `corepack enable && corepack prepare yarn@4.9.1 --activate`
|
||||
- **Install Dependencies**: `yarn install`
|
||||
- **Add New Dependencies**: `yarn add -D` for renderer-specific dependencies, `yarn add` for others.
|
||||
|
||||
### Development
|
||||
|
||||
@@ -21,7 +22,7 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co
|
||||
- **Run E2E Tests**: `yarn test:e2e` - Playwright end-to-end tests
|
||||
- **Type Check**: `yarn typecheck` - Checks TypeScript for both node and web
|
||||
- **Lint**: `yarn lint` - ESLint with auto-fix
|
||||
- **Format**: `yarn format` - Prettier formatting
|
||||
- **Format**: `yarn format` - Biome formatting
|
||||
|
||||
### Build & Release
|
||||
|
||||
@@ -92,6 +93,12 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co
|
||||
- **Multi-language Support**: i18n with dynamic loading
|
||||
- **Theme System**: Light/dark themes with custom CSS variables
|
||||
|
||||
### UI Design
|
||||
|
||||
The project is in the process of migrating from antd & styled-components to HeroUI. Please use HeroUI to build UI components. The use of antd and styled-components is prohibited.
|
||||
|
||||
HeroUI Docs: https://www.heroui.com/docs/guide/introduction
|
||||
|
||||
## Logging Standards
|
||||
|
||||
### Usage
|
||||
|
||||
45
LICENSE
45
LICENSE
@@ -1,48 +1,3 @@
|
||||
**许可协议 (Licensing)**
|
||||
|
||||
本项目采用**区分用户的双重许可 (User-Segmented Dual Licensing)** 模式。
|
||||
|
||||
**核心原则:**
|
||||
|
||||
* **个人用户 和 10人及以下企业/组织:** 默认适用 **GNU Affero 通用公共许可证 v3.0 (AGPLv3)**。
|
||||
* **超过10人的企业/组织:** **必须** 获取 **商业许可证 (Commercial License)**。
|
||||
|
||||
定义:“10人及以下”
|
||||
指在您的组织(包括公司、非营利组织、政府机构、教育机构等任何实体)中,能够访问、使用或以任何方式直接或间接受益于本软件(Cherry Studio)功能的个人总数不超过10人。这包括但不限于开发者、测试人员、运营人员、最终用户、通过集成系统间接使用者等。
|
||||
|
||||
---
|
||||
|
||||
**1. 开源许可证 (Open Source License): AGPLv3 - 适用于个人及10人及以下组织**
|
||||
|
||||
* 如果您是个人用户,或者您的组织满足上述“10人及以下”的定义,您可以在 **AGPLv3** 的条款下自由使用、修改和分发 Cherry Studio。AGPLv3 的完整文本可以访问 [https://www.gnu.org/licenses/agpl-3.0.html](https://www.gnu.org/licenses/agpl-3.0.html) 获取。
|
||||
* **核心义务:** AGPLv3 的一个关键要求是,如果您修改了 Cherry Studio 并通过网络提供服务,或者分发了修改后的版本,您必须以 AGPLv3 许可证向接收者提供相应的**完整源代码**。即使您符合“10人及以下”的标准,如果您希望避免此源代码公开义务,您也需要考虑获取商业许可证(见下文)。
|
||||
* 使用前请务必仔细阅读并理解 AGPLv3 的所有条款。
|
||||
|
||||
**2. 商业许可证 (Commercial License) - 适用于超过10人的组织,或希望规避 AGPLv3 义务的用户**
|
||||
|
||||
* **强制要求:** 如果您的组织**不**满足上述“10人及以下”的定义(即有11人或更多人可以访问、使用或受益于本软件),您**必须**联系我们获取并签署一份商业许可证才能使用 Cherry Studio。
|
||||
* **自愿选择:** 即使您的组织满足“10人及以下”的条件,但如果您的使用场景**无法满足 AGPLv3 的条款要求**(特别是关于**源代码公开**的义务),或者您需要 AGPLv3 **未提供**的特定商业条款(如保证、赔偿、无 Copyleft 限制等),您也**必须**联系我们获取并签署一份商业许可证。
|
||||
* **需要商业许可证的常见情况包括(但不限于):**
|
||||
* 您的组织规模超过10人。
|
||||
* (无论组织规模)您希望分发修改过的 Cherry Studio 版本,但**不希望**根据 AGPLv3 公开您修改部分的源代码。
|
||||
* (无论组织规模)您希望基于修改过的 Cherry Studio 提供网络服务(SaaS),但**不希望**根据 AGPLv3 向服务使用者提供修改后的源代码。
|
||||
* (无论组织规模)您的公司政策、客户合同或项目要求不允许使用 AGPLv3 许可的软件,或要求闭源分发及保密。
|
||||
* 商业许可证将为您提供豁免 AGPLv3 义务(如源代码公开)的权利,并可能包含额外的商业保障条款。
|
||||
* **获取商业许可:** 请通过邮箱 **bd@cherry-ai.com** 联系 Cherry Studio 开发团队洽谈商业授权事宜。
|
||||
|
||||
**3. 贡献 (Contributions)**
|
||||
|
||||
* 我们欢迎社区对 Cherry Studio 的贡献。所有向本项目提交的贡献都将被视为在 **AGPLv3** 许可证下提供。
|
||||
* 通过向本项目提交贡献(例如通过 Pull Request),即表示您同意您的代码以 AGPLv3 许可证授权给本项目及所有后续使用者(无论这些使用者最终遵循 AGPLv3 还是商业许可)。
|
||||
* 您也理解并同意,您的贡献可能会被包含在根据商业许可证分发的 Cherry Studio 版本中。
|
||||
|
||||
**4. 其他条款 (Other Terms)**
|
||||
|
||||
* 关于商业许可证的具体条款和条件,以双方签署的正式商业许可协议为准。
|
||||
* 项目维护者保留根据需要更新本许可政策(包括用户规模定义和阈值)的权利。相关更新将通过项目官方渠道(如代码仓库、官方网站)进行通知。
|
||||
|
||||
---
|
||||
|
||||
**Licensing**
|
||||
|
||||
This project employs a **User-Segmented Dual Licensing** model.
|
||||
|
||||
@@ -82,7 +82,7 @@ Cherry Studio is a desktop client that supports multiple LLM providers, availabl
|
||||
1. **Diverse LLM Provider Support**:
|
||||
|
||||
- ☁️ Major LLM Cloud Services: OpenAI, Gemini, Anthropic, and more
|
||||
- 🔗 AI Web Service Integration: Claude, Peplexity, Poe, and others
|
||||
- 🔗 AI Web Service Integration: Claude, Perplexity, Poe, and others
|
||||
- 💻 Local Model Support with Ollama, LM Studio
|
||||
|
||||
2. **AI Assistants & Conversations**:
|
||||
|
||||
97
biome.jsonc
Normal file
97
biome.jsonc
Normal file
@@ -0,0 +1,97 @@
|
||||
{
|
||||
"$schema": "https://biomejs.dev/schemas/2.2.4/schema.json",
|
||||
"assist": {
|
||||
// to sort json
|
||||
"actions": {
|
||||
"source": {
|
||||
"organizeImports": "on",
|
||||
"useSortedKeys": {
|
||||
"level": "on",
|
||||
"options": {
|
||||
"sortOrder": "lexicographic"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"enabled": true,
|
||||
"includes": ["**/*.json", "!*.json", "!**/package.json"]
|
||||
},
|
||||
"css": {
|
||||
"formatter": {
|
||||
"quoteStyle": "single"
|
||||
}
|
||||
},
|
||||
"files": { "ignoreUnknown": false },
|
||||
"formatter": {
|
||||
"attributePosition": "auto",
|
||||
"bracketSameLine": false,
|
||||
"bracketSpacing": true,
|
||||
"enabled": true,
|
||||
"expand": "auto",
|
||||
"formatWithErrors": true,
|
||||
"includes": [
|
||||
"**",
|
||||
"!out/**",
|
||||
"!**/dist/**",
|
||||
"!build/**",
|
||||
"!.yarn/**",
|
||||
"!.github/**",
|
||||
"!.husky/**",
|
||||
"!.vscode/**",
|
||||
"!*.yaml",
|
||||
"!*.yml",
|
||||
"!*.mjs",
|
||||
"!*.cjs",
|
||||
"!*.md",
|
||||
"!*.json",
|
||||
"!src/main/integration/**",
|
||||
"!**/tailwind.css",
|
||||
"!**/package.json"
|
||||
],
|
||||
"indentStyle": "space",
|
||||
"indentWidth": 2,
|
||||
"lineEnding": "lf",
|
||||
"lineWidth": 120,
|
||||
"useEditorconfig": true
|
||||
},
|
||||
"html": { "formatter": { "selfCloseVoidElements": "always" } },
|
||||
"javascript": {
|
||||
"formatter": {
|
||||
"arrowParentheses": "always",
|
||||
"attributePosition": "auto",
|
||||
// To minimize changes in this PR as much as possible, it's set to true. However, setting it to false would make it more convenient to add attributes at the end.
|
||||
"bracketSameLine": true,
|
||||
"bracketSpacing": true,
|
||||
"jsxQuoteStyle": "double",
|
||||
"quoteProperties": "asNeeded",
|
||||
"quoteStyle": "single",
|
||||
"semicolons": "asNeeded",
|
||||
"trailingCommas": "none"
|
||||
}
|
||||
},
|
||||
"json": {
|
||||
"parser": {
|
||||
"allowComments": true
|
||||
}
|
||||
},
|
||||
"linter": {
|
||||
"enabled": true,
|
||||
"includes": ["!**/tailwind.css", "src/renderer/**/*.{tsx,ts}"],
|
||||
// only enable sorted tailwind css rule. used as formatter instead of linter
|
||||
"rules": {
|
||||
"nursery": {
|
||||
// to sort tailwind css classes
|
||||
"useSortedClasses": {
|
||||
"fix": "safe",
|
||||
"level": "warn",
|
||||
"options": {
|
||||
"functions": ["cn"]
|
||||
}
|
||||
}
|
||||
},
|
||||
"recommended": false,
|
||||
"suspicious": "off"
|
||||
}
|
||||
},
|
||||
"vcs": { "clientKind": "git", "enabled": false, "useIgnoreFile": false }
|
||||
}
|
||||
21
components.json
Normal file
21
components.json
Normal file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"$schema": "https://ui.shadcn.com/schema.json",
|
||||
"aliases": {
|
||||
"components": "@renderer/ui/third-party",
|
||||
"hooks": "@renderer/hooks",
|
||||
"lib": "@renderer/lib",
|
||||
"ui": "@renderer/ui",
|
||||
"utils": "@renderer/utils"
|
||||
},
|
||||
"iconLibrary": "lucide",
|
||||
"rsc": false,
|
||||
"style": "new-york",
|
||||
"tailwind": {
|
||||
"baseColor": "zinc",
|
||||
"config": "",
|
||||
"css": "src/renderer/src/assets/styles/tailwind.css",
|
||||
"cssVariables": true,
|
||||
"prefix": ""
|
||||
},
|
||||
"tsx": true
|
||||
}
|
||||
@@ -13,7 +13,7 @@
|
||||
<p><a href="https://openaitx.github.io/view.html?user=CherryHQ&project=cherry-studio&lang=fr">Français</a></p>
|
||||
<p><a href="https://openaitx.github.io/view.html?user=CherryHQ&project=cherry-studio&lang=de">Deutsch</a></p>
|
||||
<p><a href="https://openaitx.github.io/view.html?user=CherryHQ&project=cherry-studio&lang=es">Español</a></p>
|
||||
<p><a href="https://openaitx.github.io/view.html?user=CherryHQ&project=cherry-studio&lang=it">Itapano</a></p>
|
||||
<p><a href="https://openaitx.github.io/view.html?user=CherryHQ&project=cherry-studio&lang=it">Italiano</a></p>
|
||||
<p><a href="https://openaitx.github.io/view.html?user=CherryHQ&project=cherry-studio&lang=ru">Русский</a></p>
|
||||
<p><a href="https://openaitx.github.io/view.html?user=CherryHQ&project=cherry-studio&lang=pt">Português</a></p>
|
||||
<p><a href="https://openaitx.github.io/view.html?user=CherryHQ&project=cherry-studio&lang=nl">Nederlands</a></p>
|
||||
@@ -89,7 +89,7 @@ https://docs.cherry-ai.com
|
||||
1. **多样化 LLM 服务支持**:
|
||||
|
||||
- ☁️ 支持主流 LLM 云服务:OpenAI、Gemini、Anthropic、硅基流动等
|
||||
- 🔗 集成流行 AI Web 服务:Claude、Peplexity、Poe、腾讯元宝、知乎直答等
|
||||
- 🔗 集成流行 AI Web 服务:Claude、Perplexity、Poe、腾讯元宝、知乎直答等
|
||||
- 💻 支持 Ollama、LM Studio 本地模型部署
|
||||
|
||||
2. **智能助手与对话**:
|
||||
|
||||
@@ -2,7 +2,9 @@
|
||||
|
||||
## IDE Setup
|
||||
|
||||
[Cursor](https://www.cursor.com/) + [ESLint](https://marketplace.visualstudio.com/items?itemName=dbaeumer.vscode-eslint) + [Prettier](https://marketplace.visualstudio.com/items?itemName=esbenp.prettier-vscode)
|
||||
- Editor: [Cursor](https://www.cursor.com/), etc. Any VS Code compatible editor.
|
||||
- Linter: [ESLint](https://marketplace.visualstudio.com/items?itemName=dbaeumer.vscode-eslint)
|
||||
- Formatter: [Biome](https://marketplace.visualstudio.com/items?itemName=biomejs.biome)
|
||||
|
||||
## Project Setup
|
||||
|
||||
|
||||
@@ -8,9 +8,9 @@
|
||||
|
||||
| 字段名 | 类型 | 是否主键 | 索引 | 说明 |
|
||||
| ---------- | ------ | -------- | ---- | ------------------------------------------------------------------------ |
|
||||
| `id` | string | ✅ 是 | ✅ | 唯一标识符,主键 |
|
||||
| `langCode` | string | ❌ 否 | ✅ | 语言代码(如:`zh-cn`, `en-us`, `ja-jp` 等,均为小写),支持普通索引查询 |
|
||||
| `value` | string | ❌ 否 | ❌ | 语言的名称,用户输入 |
|
||||
| `emoji` | string | ❌ 否 | ❌ | 语言的emoji,用户输入 |
|
||||
| `id` | string | ✅ 是 | ✅ | 唯一标识符,主键 |
|
||||
| `langCode` | string | ❌ 否 | ✅ | 语言代码(如:`zh-cn`, `en-us`, `ja-jp` 等,均为小写),支持普通索引查询 |
|
||||
| `value` | string | ❌ 否 | ❌ | 语言的名称,用户输入 |
|
||||
| `emoji` | string | ❌ 否 | ❌ | 语言的emoji,用户输入 |
|
||||
|
||||
> `langCode` 虽非主键,但在业务层应当避免重复插入相同语言代码。
|
||||
|
||||
@@ -17,52 +17,52 @@ protocols:
|
||||
schemes:
|
||||
- cherrystudio
|
||||
files:
|
||||
- '**/*'
|
||||
- '!**/{.vscode,.yarn,.yarn-lock,.github,.cursorrules,.prettierrc}'
|
||||
- '!electron.vite.config.{js,ts,mjs,cjs}}'
|
||||
- '!**/{.eslintignore,.eslintrc.js,.eslintrc.json,.eslintcache,root.eslint.config.js,eslint.config.js,.eslintrc.cjs,.prettierignore,.prettierrc.yaml,eslint.config.mjs,dev-app-update.yml,CHANGELOG.md,README.md}'
|
||||
- '!**/{.env,.env.*,.npmrc,pnpm-lock.yaml}'
|
||||
- '!**/{tsconfig.json,tsconfig.tsbuildinfo,tsconfig.node.json,tsconfig.web.json}'
|
||||
- '!**/{.editorconfig,.jekyll-metadata}'
|
||||
- '!src'
|
||||
- '!scripts'
|
||||
- '!local'
|
||||
- '!docs'
|
||||
- '!packages'
|
||||
- '!.swc'
|
||||
- '!.bin'
|
||||
- '!._*'
|
||||
- '!*.log'
|
||||
- '!stats.html'
|
||||
- '!*.md'
|
||||
- '!**/*.{iml,o,hprof,orig,pyc,pyo,rbc,swp,csproj,sln,xproj}'
|
||||
- '!**/*.{map,ts,tsx,jsx,less,scss,sass,css.d.ts,d.cts,d.mts,md,markdown,yaml,yml}'
|
||||
- '!**/{test,tests,__tests__,powered-test,coverage}/**'
|
||||
- '!**/{example,examples}/**'
|
||||
- '!**/*.{spec,test}.{js,jsx,ts,tsx}'
|
||||
- '!**/*.min.*.map'
|
||||
- '!**/*.d.ts'
|
||||
- '!**/dist/es6/**'
|
||||
- '!**/dist/demo/**'
|
||||
- '!**/amd/**'
|
||||
- '!**/{.DS_Store,Thumbs.db,thumbs.db,__pycache__}'
|
||||
- '!**/{LICENSE,license,LICENSE.*,*.LICENSE.txt,NOTICE.txt,README.md,readme.md,CHANGELOG.md}'
|
||||
- '!node_modules/rollup-plugin-visualizer'
|
||||
- '!node_modules/js-tiktoken'
|
||||
- '!node_modules/@tavily/core/node_modules/js-tiktoken'
|
||||
- '!node_modules/pdf-parse/lib/pdf.js/{v1.9.426,v1.10.88,v2.0.550}'
|
||||
- '!node_modules/mammoth/{mammoth.browser.js,mammoth.browser.min.js}'
|
||||
- '!node_modules/selection-hook/prebuilds/**/*' # we rebuild .node, don't use prebuilds
|
||||
- '!node_modules/selection-hook/node_modules' # we don't need what in the node_modules dir
|
||||
- '!node_modules/selection-hook/src' # we don't need source files
|
||||
- '!node_modules/tesseract.js-core/{tesseract-core.js,tesseract-core.wasm,tesseract-core.wasm.js}' # we don't need source files
|
||||
- '!node_modules/tesseract.js-core/{tesseract-core-lstm.js,tesseract-core-lstm.wasm,tesseract-core-lstm.wasm.js}' # we don't need source files
|
||||
- '!node_modules/tesseract.js-core/{tesseract-core-simd-lstm.js,tesseract-core-simd-lstm.wasm,tesseract-core-simd-lstm.wasm.js}' # we don't need source files
|
||||
- '!**/*.{h,iobj,ipdb,tlog,recipe,vcxproj,vcxproj.filters,Makefile,*.Makefile}' # filter .node build files
|
||||
- "**/*"
|
||||
- "!**/{.vscode,.yarn,.yarn-lock,.github,.cursorrules,.prettierrc}"
|
||||
- "!electron.vite.config.{js,ts,mjs,cjs}}"
|
||||
- "!**/{.eslintignore,.eslintrc.js,.eslintrc.json,.eslintcache,root.eslint.config.js,eslint.config.js,.eslintrc.cjs,.prettierignore,.prettierrc.yaml,eslint.config.mjs,dev-app-update.yml,CHANGELOG.md,README.md,biome.jsonc}"
|
||||
- "!**/{.env,.env.*,.npmrc,pnpm-lock.yaml}"
|
||||
- "!**/{tsconfig.json,tsconfig.tsbuildinfo,tsconfig.node.json,tsconfig.web.json}"
|
||||
- "!**/{.editorconfig,.jekyll-metadata}"
|
||||
- "!src"
|
||||
- "!scripts"
|
||||
- "!local"
|
||||
- "!docs"
|
||||
- "!packages"
|
||||
- "!.swc"
|
||||
- "!.bin"
|
||||
- "!._*"
|
||||
- "!*.log"
|
||||
- "!stats.html"
|
||||
- "!*.md"
|
||||
- "!**/*.{iml,o,hprof,orig,pyc,pyo,rbc,swp,csproj,sln,xproj}"
|
||||
- "!**/*.{map,ts,tsx,jsx,less,scss,sass,css.d.ts,d.cts,d.mts,md,markdown,yaml,yml}"
|
||||
- "!**/{test,tests,__tests__,powered-test,coverage}/**"
|
||||
- "!**/{example,examples}/**"
|
||||
- "!**/*.{spec,test}.{js,jsx,ts,tsx}"
|
||||
- "!**/*.min.*.map"
|
||||
- "!**/*.d.ts"
|
||||
- "!**/dist/es6/**"
|
||||
- "!**/dist/demo/**"
|
||||
- "!**/amd/**"
|
||||
- "!**/{.DS_Store,Thumbs.db,thumbs.db,__pycache__}"
|
||||
- "!**/{LICENSE,license,LICENSE.*,*.LICENSE.txt,NOTICE.txt,README.md,readme.md,CHANGELOG.md}"
|
||||
- "!node_modules/rollup-plugin-visualizer"
|
||||
- "!node_modules/js-tiktoken"
|
||||
- "!node_modules/@tavily/core/node_modules/js-tiktoken"
|
||||
- "!node_modules/pdf-parse/lib/pdf.js/{v1.9.426,v1.10.88,v2.0.550}"
|
||||
- "!node_modules/mammoth/{mammoth.browser.js,mammoth.browser.min.js}"
|
||||
- "!node_modules/selection-hook/prebuilds/**/*" # we rebuild .node, don't use prebuilds
|
||||
- "!node_modules/selection-hook/node_modules" # we don't need what in the node_modules dir
|
||||
- "!node_modules/selection-hook/src" # we don't need source files
|
||||
- "!node_modules/tesseract.js-core/{tesseract-core.js,tesseract-core.wasm,tesseract-core.wasm.js}" # we don't need source files
|
||||
- "!node_modules/tesseract.js-core/{tesseract-core-lstm.js,tesseract-core-lstm.wasm,tesseract-core-lstm.wasm.js}" # we don't need source files
|
||||
- "!node_modules/tesseract.js-core/{tesseract-core-simd-lstm.js,tesseract-core-simd-lstm.wasm,tesseract-core-simd-lstm.wasm.js}" # we don't need source files
|
||||
- "!**/*.{h,iobj,ipdb,tlog,recipe,vcxproj,vcxproj.filters,Makefile,*.Makefile}" # filter .node build files
|
||||
asarUnpack:
|
||||
- resources/**
|
||||
- '**/*.{metal,exp,lib}'
|
||||
- 'node_modules/@img/sharp-libvips-*/**'
|
||||
- "**/*.{metal,exp,lib}"
|
||||
- "node_modules/@img/sharp-libvips-*/**"
|
||||
win:
|
||||
executableName: Cherry Studio
|
||||
artifactName: ${productName}-${version}-${arch}-setup.${ext}
|
||||
@@ -88,7 +88,7 @@ mac:
|
||||
entitlementsInherit: build/entitlements.mac.plist
|
||||
notarize: false
|
||||
artifactName: ${productName}-${version}-${arch}.${ext}
|
||||
minimumSystemVersion: '20.1.0' # 最低支持 macOS 11.0
|
||||
minimumSystemVersion: "20.1.0" # 最低支持 macOS 11.0
|
||||
extendInfo:
|
||||
- NSCameraUsageDescription: Application requests access to the device's camera.
|
||||
- NSMicrophoneUsageDescription: Application requests access to the device's microphone.
|
||||
@@ -110,6 +110,10 @@ linux:
|
||||
StartupWMClass: CherryStudio
|
||||
mimeTypes:
|
||||
- x-scheme-handler/cherrystudio
|
||||
rpm:
|
||||
# Workaround for electron build issue on rpm package:
|
||||
# https://github.com/electron/forge/issues/3594
|
||||
fpm: ["--rpm-rpmbuild-define=_build_id_links none"]
|
||||
publish:
|
||||
provider: generic
|
||||
url: https://releases.cherry-ai.com
|
||||
@@ -121,25 +125,7 @@ afterSign: scripts/notarize.js
|
||||
artifactBuildCompleted: scripts/artifact-build-completed.js
|
||||
releaseInfo:
|
||||
releaseNotes: |
|
||||
✨ 新功能:
|
||||
- 重构知识库模块,提升文档处理能力和搜索性能
|
||||
- 新增 PaddleOCR 支持,增强文档识别能力
|
||||
- 支持自定义窗口控制按钮样式
|
||||
- 新增 AI SDK 包,扩展 AI 能力集成
|
||||
- 支持标签页拖拽重排序功能
|
||||
- 增强笔记编辑器的同步和日志功能
|
||||
|
||||
🔧 性能优化:
|
||||
- 优化 MCP 服务的日志记录和错误处理
|
||||
- 改进 WebView 服务的 User-Agent 处理
|
||||
- 优化迷你应用的标题栏样式和状态栏适配
|
||||
- 重构依赖管理,清理和优化 package.json
|
||||
|
||||
🐛 问题修复:
|
||||
- 修复输入栏无限状态更新循环问题
|
||||
- 修复窗口控制提示框的鼠标悬停延迟
|
||||
- 修复翻译输入框粘贴多内容源的处理
|
||||
- 修复导航服务初始化时序问题
|
||||
- 修复 MCP 通过 JSON 添加时的参数转换
|
||||
- 修复模型作用域服务器同步时的 URL 格式
|
||||
- 标准化工具提示图标样式
|
||||
Optimized note-taking feature, now able to quickly rename by modifying the title
|
||||
Fixed issue where CherryAI free model could not be used
|
||||
Fixed issue where VertexAI proxy address could not be called normally
|
||||
Fixed issue where built-in tools from service providers could not be called normally
|
||||
|
||||
@@ -4,7 +4,9 @@ import { defineConfig, externalizeDepsPlugin } from 'electron-vite'
|
||||
import { resolve } from 'path'
|
||||
import { visualizer } from 'rollup-plugin-visualizer'
|
||||
|
||||
import pkg from './package.json' assert { type: 'json' }
|
||||
// assert not supported by biome
|
||||
// import pkg from './package.json' assert { type: 'json' }
|
||||
import pkg from './package.json'
|
||||
|
||||
const visualizerPlugin = (type: 'renderer' | 'main') => {
|
||||
return process.env[`VISUALIZER_${type.toUpperCase()}`] ? [visualizer({ open: true })] : []
|
||||
@@ -32,6 +34,10 @@ export default defineConfig({
|
||||
output: {
|
||||
manualChunks: undefined, // 彻底禁用代码分割 - 返回 null 强制单文件打包
|
||||
inlineDynamicImports: true // 内联所有动态导入,这是关键配置
|
||||
},
|
||||
onwarn(warning, warn) {
|
||||
if (warning.code === 'COMMONJS_VARIABLE_IN_ESM') return
|
||||
warn(warning)
|
||||
}
|
||||
},
|
||||
sourcemap: isDev
|
||||
@@ -60,6 +66,7 @@ export default defineConfig({
|
||||
},
|
||||
renderer: {
|
||||
plugins: [
|
||||
(async () => (await import('@tailwindcss/vite')).default())(),
|
||||
react({
|
||||
tsDecorators: true,
|
||||
plugins: [
|
||||
@@ -108,6 +115,10 @@ export default defineConfig({
|
||||
selectionToolbar: resolve(__dirname, 'src/renderer/selectionToolbar.html'),
|
||||
selectionAction: resolve(__dirname, 'src/renderer/selectionAction.html'),
|
||||
traceWindow: resolve(__dirname, 'src/renderer/traceWindow.html')
|
||||
},
|
||||
onwarn(warning, warn) {
|
||||
if (warning.code === 'COMMONJS_VARIABLE_IN_ESM') return
|
||||
warn(warning)
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import electronConfigPrettier from '@electron-toolkit/eslint-config-prettier'
|
||||
import tseslint from '@electron-toolkit/eslint-config-ts'
|
||||
import eslint from '@eslint/js'
|
||||
import eslintReact from '@eslint-react/eslint-plugin'
|
||||
import { defineConfig } from 'eslint/config'
|
||||
import oxlint from 'eslint-plugin-oxlint'
|
||||
import reactHooks from 'eslint-plugin-react-hooks'
|
||||
import simpleImportSort from 'eslint-plugin-simple-import-sort'
|
||||
import unusedImports from 'eslint-plugin-unused-imports'
|
||||
@@ -10,7 +10,6 @@ import unusedImports from 'eslint-plugin-unused-imports'
|
||||
export default defineConfig([
|
||||
eslint.configs.recommended,
|
||||
tseslint.configs.recommended,
|
||||
electronConfigPrettier,
|
||||
eslintReact.configs['recommended-typescript'],
|
||||
reactHooks.configs['recommended-latest'],
|
||||
{
|
||||
@@ -26,7 +25,6 @@ export default defineConfig([
|
||||
'simple-import-sort/exports': 'error',
|
||||
'unused-imports/no-unused-imports': 'error',
|
||||
'@eslint-react/no-prop-types': 'error',
|
||||
'prettier/prettier': ['error']
|
||||
}
|
||||
},
|
||||
// Configuration for ensuring compatibility with the original ESLint(8.x) rules
|
||||
@@ -50,10 +48,31 @@ export default defineConfig([
|
||||
'@eslint-react/no-children-to-array': 'off'
|
||||
}
|
||||
},
|
||||
{
|
||||
ignores: [
|
||||
'node_modules/**',
|
||||
'build/**',
|
||||
'dist/**',
|
||||
'out/**',
|
||||
'local/**',
|
||||
'.yarn/**',
|
||||
'.gitignore',
|
||||
'scripts/cloudflare-worker.js',
|
||||
'src/main/integration/nutstore/sso/lib/**',
|
||||
'src/main/integration/cherryai/index.js',
|
||||
'src/main/integration/nutstore/sso/lib/**',
|
||||
'src/renderer/src/ui/**',
|
||||
'packages/**/dist'
|
||||
]
|
||||
},
|
||||
// turn off oxlint supported rules.
|
||||
...oxlint.configs['flat/eslint'],
|
||||
...oxlint.configs['flat/typescript'],
|
||||
...oxlint.configs['flat/unicorn'],
|
||||
{
|
||||
// LoggerService Custom Rules - only apply to src directory
|
||||
files: ['src/**/*.{ts,tsx,js,jsx}'],
|
||||
ignores: ['src/**/__tests__/**', 'src/**/__mocks__/**', 'src/**/*.test.*'],
|
||||
ignores: ['src/**/__tests__/**', 'src/**/__mocks__/**', 'src/**/*.test.*', 'src/preload/**'],
|
||||
rules: {
|
||||
'no-restricted-syntax': [
|
||||
process.env.PRCI ? 'error' : 'warn',
|
||||
@@ -112,18 +131,4 @@ export default defineConfig([
|
||||
'i18n/no-template-in-t': 'warn'
|
||||
}
|
||||
},
|
||||
{
|
||||
ignores: [
|
||||
'node_modules/**',
|
||||
'build/**',
|
||||
'dist/**',
|
||||
'out/**',
|
||||
'local/**',
|
||||
'.yarn/**',
|
||||
'.gitignore',
|
||||
'scripts/cloudflare-worker.js',
|
||||
'src/main/integration/nutstore/sso/lib/**',
|
||||
'src/main/integration/cherryin/index.js'
|
||||
]
|
||||
}
|
||||
])
|
||||
|
||||
79
package.json
79
package.json
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "CherryStudio",
|
||||
"version": "1.6.0-beta.7",
|
||||
"version": "1.6.2",
|
||||
"private": true,
|
||||
"description": "A powerful AI assistant for producer.",
|
||||
"main": "./out/main/index.js",
|
||||
@@ -48,8 +48,8 @@
|
||||
"analyze:renderer": "VISUALIZER_RENDERER=true yarn build",
|
||||
"analyze:main": "VISUALIZER_MAIN=true yarn build",
|
||||
"typecheck": "concurrently -n \"node,web\" -c \"cyan,magenta\" \"npm run typecheck:node\" \"npm run typecheck:web\"",
|
||||
"typecheck:node": "tsc --noEmit -p tsconfig.node.json --composite false",
|
||||
"typecheck:web": "tsc --noEmit -p tsconfig.web.json --composite false",
|
||||
"typecheck:node": "tsgo --noEmit -p tsconfig.node.json --composite false",
|
||||
"typecheck:web": "tsgo --noEmit -p tsconfig.web.json --composite false",
|
||||
"check:i18n": "tsx scripts/check-i18n.ts",
|
||||
"sync:i18n": "tsx scripts/sync-i18n.ts",
|
||||
"update:i18n": "dotenv -e .env -- tsx scripts/update-i18n.ts",
|
||||
@@ -63,25 +63,33 @@
|
||||
"test:ui": "vitest --ui",
|
||||
"test:watch": "vitest",
|
||||
"test:e2e": "yarn playwright test",
|
||||
"test:lint": "eslint . --ext .js,.jsx,.cjs,.mjs,.ts,.tsx,.cts,.mts",
|
||||
"test:lint": "oxlint --deny-warnings && eslint . --ext .js,.jsx,.cjs,.mjs,.ts,.tsx,.cts,.mts --cache",
|
||||
"test:scripts": "vitest scripts",
|
||||
"format": "prettier --write .",
|
||||
"lint": "eslint . --ext .js,.jsx,.cjs,.mjs,.ts,.tsx,.cts,.mts --fix && yarn typecheck && yarn check:i18n",
|
||||
"prepare": "git config blame.ignoreRevsFile .git-blame-ignore-revs && husky"
|
||||
"lint": "oxlint --fix && eslint . --ext .js,.jsx,.cjs,.mjs,.ts,.tsx,.cts,.mts --fix --cache && yarn typecheck && yarn check:i18n",
|
||||
"format": "biome format --write && biome lint --write",
|
||||
"format:check": "biome format && biome lint",
|
||||
"prepare": "git config blame.ignoreRevsFile .git-blame-ignore-revs && husky",
|
||||
"claude": "dotenv -e .env -- claude",
|
||||
"release:aicore:alpha": "yarn workspace @cherrystudio/ai-core version prerelease --immediate && yarn workspace @cherrystudio/ai-core npm publish --tag alpha --access public",
|
||||
"release:aicore:beta": "yarn workspace @cherrystudio/ai-core version prerelease --immediate && yarn workspace @cherrystudio/ai-core npm publish --tag beta --access public",
|
||||
"release:aicore": "yarn workspace @cherrystudio/ai-core version patch --immediate && yarn workspace @cherrystudio/ai-core npm publish --access public"
|
||||
},
|
||||
"dependencies": {
|
||||
"@libsql/client": "0.14.0",
|
||||
"@libsql/win32-x64-msvc": "^0.4.7",
|
||||
"@napi-rs/system-ocr": "patch:@napi-rs/system-ocr@npm%3A1.0.2#~/.yarn/patches/@napi-rs-system-ocr-npm-1.0.2-59e7a78e8b.patch",
|
||||
"@strongtz/win32-arm64-msvc": "^0.4.7",
|
||||
"faiss-node": "^0.5.1",
|
||||
"express": "^5.1.0",
|
||||
"font-list": "^2.0.0",
|
||||
"graceful-fs": "^4.2.11",
|
||||
"jsdom": "26.1.0",
|
||||
"node-stream-zip": "^1.15.0",
|
||||
"officeparser": "^4.2.0",
|
||||
"os-proxy-config": "^1.1.2",
|
||||
"selection-hook": "^1.0.11",
|
||||
"selection-hook": "^1.0.12",
|
||||
"sharp": "^0.34.3",
|
||||
"swagger-jsdoc": "^6.2.8",
|
||||
"swagger-ui-express": "^5.0.1",
|
||||
"tesseract.js": "patch:tesseract.js@npm%3A6.0.1#~/.yarn/patches/tesseract.js-npm-6.0.1-2562a7e46d.patch",
|
||||
"turndown": "7.2.0"
|
||||
},
|
||||
@@ -89,16 +97,18 @@
|
||||
"@agentic/exa": "^7.3.3",
|
||||
"@agentic/searxng": "^7.3.3",
|
||||
"@agentic/tavily": "^7.3.3",
|
||||
"@ai-sdk/amazon-bedrock": "^3.0.0",
|
||||
"@ai-sdk/google-vertex": "^3.0.0",
|
||||
"@ai-sdk/mistral": "^2.0.0",
|
||||
"@ai-sdk/amazon-bedrock": "^3.0.29",
|
||||
"@ai-sdk/google-vertex": "^3.0.33",
|
||||
"@ai-sdk/mistral": "^2.0.17",
|
||||
"@ai-sdk/perplexity": "^2.0.11",
|
||||
"@ant-design/v5-patch-for-react-19": "^1.0.3",
|
||||
"@anthropic-ai/sdk": "^0.41.0",
|
||||
"@anthropic-ai/vertex-sdk": "patch:@anthropic-ai/vertex-sdk@npm%3A0.11.4#~/.yarn/patches/@anthropic-ai-vertex-sdk-npm-0.11.4-c19cb41edb.patch",
|
||||
"@aws-sdk/client-bedrock": "^3.840.0",
|
||||
"@aws-sdk/client-bedrock-runtime": "^3.840.0",
|
||||
"@aws-sdk/client-s3": "^3.840.0",
|
||||
"@cherrystudio/ai-core": "workspace:*",
|
||||
"@biomejs/biome": "2.2.4",
|
||||
"@cherrystudio/ai-core": "workspace:^1.0.0-alpha.18",
|
||||
"@cherrystudio/embedjs": "^0.1.31",
|
||||
"@cherrystudio/embedjs-libsql": "^0.1.31",
|
||||
"@cherrystudio/embedjs-loader-csv": "^0.1.31",
|
||||
@@ -116,7 +126,6 @@
|
||||
"@dnd-kit/modifiers": "^9.0.0",
|
||||
"@dnd-kit/sortable": "^10.0.0",
|
||||
"@dnd-kit/utilities": "^3.2.2",
|
||||
"@electron-toolkit/eslint-config-prettier": "^3.0.0",
|
||||
"@electron-toolkit/eslint-config-ts": "^3.0.0",
|
||||
"@electron-toolkit/preload": "^3.0.0",
|
||||
"@electron-toolkit/tsconfig": "^1.0.1",
|
||||
@@ -127,13 +136,11 @@
|
||||
"@eslint/js": "^9.22.0",
|
||||
"@google/genai": "patch:@google/genai@npm%3A1.0.1#~/.yarn/patches/@google-genai-npm-1.0.1-e26f0f9af7.patch",
|
||||
"@hello-pangea/dnd": "^18.0.1",
|
||||
"@heroui/react": "^2.8.3",
|
||||
"@kangfenmao/keyv-storage": "^0.1.0",
|
||||
"@langchain/community": "^0.3.50",
|
||||
"@langchain/core": "^0.3.68",
|
||||
"@langchain/ollama": "^0.2.1",
|
||||
"@langchain/openai": "^0.6.7",
|
||||
"@mistralai/mistralai": "^1.7.5",
|
||||
"@modelcontextprotocol/sdk": "^1.17.0",
|
||||
"@modelcontextprotocol/sdk": "^1.17.5",
|
||||
"@mozilla/readability": "^0.6.0",
|
||||
"@notionhq/client": "^2.2.15",
|
||||
"@openrouter/ai-sdk-provider": "^1.1.2",
|
||||
@@ -147,6 +154,7 @@
|
||||
"@reduxjs/toolkit": "^2.2.5",
|
||||
"@shikijs/markdown-it": "^3.12.0",
|
||||
"@swc/plugin-styled-components": "^8.0.4",
|
||||
"@tailwindcss/vite": "^4.1.13",
|
||||
"@tanstack/react-query": "^5.85.5",
|
||||
"@tanstack/react-virtual": "^3.13.12",
|
||||
"@testing-library/dom": "^10.4.0",
|
||||
@@ -172,6 +180,10 @@
|
||||
"@truto/turndown-plugin-gfm": "^1.0.2",
|
||||
"@tryfabric/martian": "^1.2.4",
|
||||
"@types/cli-progress": "^3",
|
||||
"@types/content-type": "^1.1.9",
|
||||
"@types/cors": "^2.8.19",
|
||||
"@types/diff": "^7",
|
||||
"@types/express": "^5",
|
||||
"@types/fs-extra": "^11",
|
||||
"@types/he": "^1",
|
||||
"@types/html-to-text": "^9",
|
||||
@@ -185,9 +197,13 @@
|
||||
"@types/react-dom": "^19.0.4",
|
||||
"@types/react-infinite-scroll-component": "^5.0.0",
|
||||
"@types/react-transition-group": "^4.4.12",
|
||||
"@types/react-window": "^1",
|
||||
"@types/swagger-jsdoc": "^6",
|
||||
"@types/swagger-ui-express": "^4.1.8",
|
||||
"@types/tinycolor2": "^1",
|
||||
"@types/turndown": "^5.0.5",
|
||||
"@types/word-extractor": "^1",
|
||||
"@typescript/native-preview": "latest",
|
||||
"@uiw/codemirror-extensions-langs": "^4.25.1",
|
||||
"@uiw/codemirror-themes-all": "^4.25.1",
|
||||
"@uiw/react-codemirror": "^4.25.1",
|
||||
@@ -199,16 +215,18 @@
|
||||
"@viz-js/lang-dot": "^1.0.5",
|
||||
"@viz-js/viz": "^3.14.0",
|
||||
"@xyflow/react": "^12.4.4",
|
||||
"ai": "^5.0.29",
|
||||
"ai": "^5.0.59",
|
||||
"antd": "patch:antd@npm%3A5.27.0#~/.yarn/patches/antd-npm-5.27.0-aa91c36546.patch",
|
||||
"archiver": "^7.0.1",
|
||||
"async-mutex": "^0.5.0",
|
||||
"axios": "^1.7.3",
|
||||
"browser-image-compression": "^2.0.2",
|
||||
"chardet": "^2.1.0",
|
||||
"check-disk-space": "3.4.0",
|
||||
"cheerio": "^1.1.2",
|
||||
"chokidar": "^4.0.3",
|
||||
"cli-progress": "^3.12.0",
|
||||
"clsx": "^2.1.1",
|
||||
"code-inspector-plugin": "^0.20.14",
|
||||
"color": "^5.0.0",
|
||||
"concurrently": "^9.2.1",
|
||||
@@ -231,12 +249,14 @@
|
||||
"emoji-picker-element": "^1.22.1",
|
||||
"epub": "patch:epub@npm%3A1.3.0#~/.yarn/patches/epub-npm-1.3.0-8325494ffe.patch",
|
||||
"eslint": "^9.22.0",
|
||||
"eslint-plugin-oxlint": "^1.15.0",
|
||||
"eslint-plugin-react-hooks": "^5.2.0",
|
||||
"eslint-plugin-simple-import-sort": "^12.1.1",
|
||||
"eslint-plugin-unused-imports": "^4.1.4",
|
||||
"fast-diff": "^1.3.0",
|
||||
"fast-xml-parser": "^5.2.0",
|
||||
"fetch-socks": "1.3.2",
|
||||
"framer-motion": "^12.23.12",
|
||||
"franc-min": "^6.2.0",
|
||||
"fs-extra": "^11.2.0",
|
||||
"google-auth-library": "^9.15.1",
|
||||
@@ -265,12 +285,12 @@
|
||||
"notion-helper": "^1.3.22",
|
||||
"npx-scope-finder": "^1.2.0",
|
||||
"openai": "patch:openai@npm%3A5.12.2#~/.yarn/patches/openai-npm-5.12.2-30b075401c.patch",
|
||||
"oxlint": "^1.15.0",
|
||||
"oxlint-tsgolint": "^0.2.0",
|
||||
"p-queue": "^8.1.0",
|
||||
"pdf-lib": "^1.17.1",
|
||||
"pdf-parse": "^1.1.1",
|
||||
"playwright": "^1.52.0",
|
||||
"prettier": "^3.5.3",
|
||||
"prettier-plugin-sort-json": "^4.1.1",
|
||||
"proxy-agent": "^6.5.0",
|
||||
"react": "^19.0.0",
|
||||
"react-dom": "^19.0.0",
|
||||
@@ -300,18 +320,19 @@
|
||||
"remark-math": "^6.0.0",
|
||||
"remove-markdown": "^0.6.2",
|
||||
"rollup-plugin-visualizer": "^5.12.0",
|
||||
"sass": "^1.88.0",
|
||||
"shiki": "^3.12.0",
|
||||
"strict-url-sanitise": "^0.0.1",
|
||||
"string-width": "^7.2.0",
|
||||
"striptags": "^3.2.0",
|
||||
"styled-components": "^6.1.11",
|
||||
"tailwindcss": "^4.1.13",
|
||||
"tar": "^7.4.3",
|
||||
"tiny-pinyin": "^1.3.2",
|
||||
"tokenx": "^1.1.0",
|
||||
"tsx": "^4.20.3",
|
||||
"turndown-plugin-gfm": "^1.0.2",
|
||||
"typescript": "^5.6.2",
|
||||
"tw-animate-css": "^1.3.8",
|
||||
"typescript": "~5.8.2",
|
||||
"undici": "6.21.2",
|
||||
"unified": "^11.0.5",
|
||||
"uuid": "^10.0.0",
|
||||
@@ -322,10 +343,11 @@
|
||||
"winston-daily-rotate-file": "^5.0.0",
|
||||
"word-extractor": "^1.0.4",
|
||||
"y-protocols": "^1.0.6",
|
||||
"yaml": "^2.8.1",
|
||||
"yjs": "^13.6.27",
|
||||
"youtubei.js": "^15.0.1",
|
||||
"zipread": "^1.3.3",
|
||||
"zod": "^3.25.74"
|
||||
"zod": "^4.1.5"
|
||||
},
|
||||
"resolutions": {
|
||||
"@codemirror/language": "6.11.3",
|
||||
@@ -346,16 +368,17 @@
|
||||
"pkce-challenge@npm:^4.1.0": "patch:pkce-challenge@npm%3A4.1.0#~/.yarn/patches/pkce-challenge-npm-4.1.0-fbc51695a3.patch",
|
||||
"undici": "6.21.2",
|
||||
"vite": "npm:rolldown-vite@latest",
|
||||
"tesseract.js@npm:*": "patch:tesseract.js@npm%3A6.0.1#~/.yarn/patches/tesseract.js-npm-6.0.1-2562a7e46d.patch"
|
||||
"tesseract.js@npm:*": "patch:tesseract.js@npm%3A6.0.1#~/.yarn/patches/tesseract.js-npm-6.0.1-2562a7e46d.patch",
|
||||
"@ai-sdk/google@npm:2.0.14": "patch:@ai-sdk/google@npm%3A2.0.14#~/.yarn/patches/@ai-sdk-google-npm-2.0.14-376d8b03cc.patch"
|
||||
},
|
||||
"packageManager": "yarn@4.9.1",
|
||||
"lint-staged": {
|
||||
"*.{js,jsx,ts,tsx,cjs,mjs,cts,mts}": [
|
||||
"prettier --write",
|
||||
"biome format --write --no-errors-on-unmatched",
|
||||
"eslint --fix"
|
||||
],
|
||||
"*.{json,yml,yaml,css,scss,html}": [
|
||||
"prettier --write"
|
||||
"*.{json,yml,yaml,css,html}": [
|
||||
"biome format --write --no-errors-on-unmatched"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,103 +0,0 @@
|
||||
/**
|
||||
* Hub Provider 使用示例
|
||||
*
|
||||
* 演示如何使用简化后的Hub Provider功能来路由到多个底层provider
|
||||
*/
|
||||
|
||||
import { createHubProvider, initializeProvider, providerRegistry } from '../src/index'
|
||||
|
||||
async function demonstrateHubProvider() {
|
||||
try {
|
||||
// 1. 初始化底层providers
|
||||
console.log('📦 初始化底层providers...')
|
||||
|
||||
initializeProvider('openai', {
|
||||
apiKey: process.env.OPENAI_API_KEY || 'sk-test-key'
|
||||
})
|
||||
|
||||
initializeProvider('anthropic', {
|
||||
apiKey: process.env.ANTHROPIC_API_KEY || 'sk-ant-test-key'
|
||||
})
|
||||
|
||||
// 2. 创建Hub Provider(自动包含所有已初始化的providers)
|
||||
console.log('🌐 创建Hub Provider...')
|
||||
|
||||
const aihubmixProvider = createHubProvider({
|
||||
hubId: 'aihubmix',
|
||||
debug: true
|
||||
})
|
||||
|
||||
// 3. 注册Hub Provider
|
||||
providerRegistry.registerProvider('aihubmix', aihubmixProvider)
|
||||
|
||||
console.log('✅ Hub Provider "aihubmix" 注册成功')
|
||||
|
||||
// 4. 使用Hub Provider访问不同的模型
|
||||
console.log('\n🚀 使用Hub模型...')
|
||||
|
||||
// 通过Hub路由到OpenAI
|
||||
const openaiModel = providerRegistry.languageModel('aihubmix:openai:gpt-4')
|
||||
console.log('✓ OpenAI模型已获取:', openaiModel.modelId)
|
||||
|
||||
// 通过Hub路由到Anthropic
|
||||
const anthropicModel = providerRegistry.languageModel('aihubmix:anthropic:claude-3.5-sonnet')
|
||||
console.log('✓ Anthropic模型已获取:', anthropicModel.modelId)
|
||||
|
||||
// 5. 演示错误处理
|
||||
console.log('\n❌ 演示错误处理...')
|
||||
|
||||
try {
|
||||
// 尝试访问未初始化的provider
|
||||
providerRegistry.languageModel('aihubmix:google:gemini-pro')
|
||||
} catch (error) {
|
||||
console.log('预期错误:', error.message)
|
||||
}
|
||||
|
||||
try {
|
||||
// 尝试使用错误的模型ID格式
|
||||
providerRegistry.languageModel('aihubmix:invalid-format')
|
||||
} catch (error) {
|
||||
console.log('预期错误:', error.message)
|
||||
}
|
||||
|
||||
// 6. 多个Hub Provider示例
|
||||
console.log('\n🔄 创建多个Hub Provider...')
|
||||
|
||||
const localHubProvider = createHubProvider({
|
||||
hubId: 'local-ai'
|
||||
})
|
||||
|
||||
providerRegistry.registerProvider('local-ai', localHubProvider)
|
||||
console.log('✅ Hub Provider "local-ai" 注册成功')
|
||||
|
||||
console.log('\n🎉 Hub Provider演示完成!')
|
||||
} catch (error) {
|
||||
console.error('💥 演示过程中发生错误:', error)
|
||||
}
|
||||
}
|
||||
|
||||
// 演示简化的使用方式
|
||||
function simplifiedUsageExample() {
|
||||
console.log('\n📝 简化使用示例:')
|
||||
console.log(`
|
||||
// 1. 初始化providers
|
||||
initializeProvider('openai', { apiKey: 'sk-xxx' })
|
||||
initializeProvider('anthropic', { apiKey: 'sk-ant-xxx' })
|
||||
|
||||
// 2. 创建并注册Hub Provider
|
||||
const hubProvider = createHubProvider({ hubId: 'aihubmix' })
|
||||
providerRegistry.registerProvider('aihubmix', hubProvider)
|
||||
|
||||
// 3. 直接使用
|
||||
const model1 = providerRegistry.languageModel('aihubmix:openai:gpt-4')
|
||||
const model2 = providerRegistry.languageModel('aihubmix:anthropic:claude-3.5-sonnet')
|
||||
`)
|
||||
}
|
||||
|
||||
// 运行演示
|
||||
if (require.main === module) {
|
||||
demonstrateHubProvider()
|
||||
simplifiedUsageExample()
|
||||
}
|
||||
|
||||
export { demonstrateHubProvider, simplifiedUsageExample }
|
||||
@@ -1,167 +0,0 @@
|
||||
/**
|
||||
* Image Generation Example
|
||||
* 演示如何使用 aiCore 的文生图功能
|
||||
*/
|
||||
|
||||
import { createExecutor, generateImage } from '../src/index'
|
||||
|
||||
async function main() {
|
||||
// 方式1: 使用执行器实例
|
||||
console.log('📸 创建 OpenAI 图像生成执行器...')
|
||||
const executor = createExecutor('openai', {
|
||||
apiKey: process.env.OPENAI_API_KEY!
|
||||
})
|
||||
|
||||
try {
|
||||
console.log('🎨 使用执行器生成图像...')
|
||||
const result1 = await executor.generateImage('dall-e-3', {
|
||||
prompt: 'A futuristic cityscape at sunset with flying cars',
|
||||
size: '1024x1024',
|
||||
n: 1
|
||||
})
|
||||
|
||||
console.log('✅ 图像生成成功!')
|
||||
console.log('📊 结果:', {
|
||||
imagesCount: result1.images.length,
|
||||
mediaType: result1.image.mediaType,
|
||||
hasBase64: !!result1.image.base64,
|
||||
providerMetadata: result1.providerMetadata
|
||||
})
|
||||
} catch (error) {
|
||||
console.error('❌ 执行器生成失败:', error)
|
||||
}
|
||||
|
||||
// 方式2: 使用直接调用 API
|
||||
try {
|
||||
console.log('🎨 使用直接 API 生成图像...')
|
||||
const result2 = await generateImage('openai', { apiKey: process.env.OPENAI_API_KEY! }, 'dall-e-3', {
|
||||
prompt: 'A magical forest with glowing mushrooms and fairy lights',
|
||||
aspectRatio: '16:9',
|
||||
providerOptions: {
|
||||
openai: {
|
||||
quality: 'hd',
|
||||
style: 'vivid'
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
console.log('✅ 直接 API 生成成功!')
|
||||
console.log('📊 结果:', {
|
||||
imagesCount: result2.images.length,
|
||||
mediaType: result2.image.mediaType,
|
||||
hasBase64: !!result2.image.base64
|
||||
})
|
||||
} catch (error) {
|
||||
console.error('❌ 直接 API 生成失败:', error)
|
||||
}
|
||||
|
||||
// 方式3: 支持其他提供商 (Google Imagen)
|
||||
if (process.env.GOOGLE_API_KEY) {
|
||||
try {
|
||||
console.log('🎨 使用 Google Imagen 生成图像...')
|
||||
const googleExecutor = createExecutor('google', {
|
||||
apiKey: process.env.GOOGLE_API_KEY!
|
||||
})
|
||||
|
||||
const result3 = await googleExecutor.generateImage('imagen-3.0-generate-002', {
|
||||
prompt: 'A serene mountain lake at dawn with mist rising from the water',
|
||||
aspectRatio: '1:1'
|
||||
})
|
||||
|
||||
console.log('✅ Google Imagen 生成成功!')
|
||||
console.log('📊 结果:', {
|
||||
imagesCount: result3.images.length,
|
||||
mediaType: result3.image.mediaType,
|
||||
hasBase64: !!result3.image.base64
|
||||
})
|
||||
} catch (error) {
|
||||
console.error('❌ Google Imagen 生成失败:', error)
|
||||
}
|
||||
}
|
||||
|
||||
// 方式4: 支持插件系统
|
||||
const pluginExample = async () => {
|
||||
console.log('🔌 演示插件系统...')
|
||||
|
||||
// 创建一个示例插件,用于修改提示词
|
||||
const promptEnhancerPlugin = {
|
||||
name: 'prompt-enhancer',
|
||||
transformParams: async (params: any) => {
|
||||
console.log('🔧 插件: 增强提示词...')
|
||||
return {
|
||||
...params,
|
||||
prompt: `${params.prompt}, highly detailed, cinematic lighting, 4K resolution`
|
||||
}
|
||||
},
|
||||
transformResult: async (result: any) => {
|
||||
console.log('🔧 插件: 处理结果...')
|
||||
return {
|
||||
...result,
|
||||
enhanced: true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const executorWithPlugin = createExecutor(
|
||||
'openai',
|
||||
{
|
||||
apiKey: process.env.OPENAI_API_KEY!
|
||||
},
|
||||
[promptEnhancerPlugin]
|
||||
)
|
||||
|
||||
try {
|
||||
const result4 = await executorWithPlugin.generateImage('dall-e-3', {
|
||||
prompt: 'A cute robot playing in a garden'
|
||||
})
|
||||
|
||||
console.log('✅ 插件系统生成成功!')
|
||||
console.log('📊 结果:', {
|
||||
imagesCount: result4.images.length,
|
||||
enhanced: (result4 as any).enhanced,
|
||||
mediaType: result4.image.mediaType
|
||||
})
|
||||
} catch (error) {
|
||||
console.error('❌ 插件系统生成失败:', error)
|
||||
}
|
||||
}
|
||||
|
||||
await pluginExample()
|
||||
}
|
||||
|
||||
// 错误处理演示
|
||||
async function errorHandlingExample() {
|
||||
console.log('⚠️ 演示错误处理...')
|
||||
|
||||
try {
|
||||
const executor = createExecutor('openai', {
|
||||
apiKey: 'invalid-key'
|
||||
})
|
||||
|
||||
await executor.generateImage('dall-e-3', {
|
||||
prompt: 'Test image'
|
||||
})
|
||||
} catch (error: any) {
|
||||
console.log('✅ 成功捕获错误:', error.constructor.name)
|
||||
console.log('📋 错误信息:', error.message)
|
||||
console.log('🏷️ 提供商ID:', error.providerId)
|
||||
console.log('🏷️ 模型ID:', error.modelId)
|
||||
}
|
||||
}
|
||||
|
||||
// 运行示例
|
||||
if (require.main === module) {
|
||||
main()
|
||||
.then(() => {
|
||||
console.log('🎉 所有示例完成!')
|
||||
return errorHandlingExample()
|
||||
})
|
||||
.then(() => {
|
||||
console.log('🎯 示例程序结束')
|
||||
process.exit(0)
|
||||
})
|
||||
.catch((error) => {
|
||||
console.error('💥 程序执行出错:', error)
|
||||
process.exit(1)
|
||||
})
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@cherrystudio/ai-core",
|
||||
"version": "1.0.0-alpha.11",
|
||||
"version": "1.0.1",
|
||||
"description": "Cherry Studio AI Core - Unified AI Provider Interface Based on Vercel AI SDK",
|
||||
"main": "dist/index.js",
|
||||
"module": "dist/index.mjs",
|
||||
@@ -36,16 +36,15 @@
|
||||
"ai": "^5.0.26"
|
||||
},
|
||||
"dependencies": {
|
||||
"@ai-sdk/anthropic": "^2.0.5",
|
||||
"@ai-sdk/azure": "^2.0.16",
|
||||
"@ai-sdk/deepseek": "^1.0.9",
|
||||
"@ai-sdk/google": "^2.0.7",
|
||||
"@ai-sdk/openai": "^2.0.19",
|
||||
"@ai-sdk/openai-compatible": "^1.0.9",
|
||||
"@ai-sdk/anthropic": "^2.0.22",
|
||||
"@ai-sdk/azure": "^2.0.42",
|
||||
"@ai-sdk/deepseek": "^1.0.20",
|
||||
"@ai-sdk/openai": "^2.0.42",
|
||||
"@ai-sdk/openai-compatible": "^1.0.19",
|
||||
"@ai-sdk/provider": "^2.0.0",
|
||||
"@ai-sdk/provider-utils": "^3.0.4",
|
||||
"@ai-sdk/xai": "^2.0.9",
|
||||
"zod": "^3.25.0"
|
||||
"@ai-sdk/provider-utils": "^3.0.10",
|
||||
"@ai-sdk/xai": "^2.0.23",
|
||||
"zod": "^4.1.5"
|
||||
},
|
||||
"devDependencies": {
|
||||
"tsdown": "^0.12.9",
|
||||
|
||||
@@ -84,7 +84,6 @@ export class ModelResolver {
|
||||
*/
|
||||
private resolveTraditionalModel(providerId: string, modelId: string): LanguageModelV2 {
|
||||
const fullModelId = `${providerId}${DEFAULT_SEPARATOR}${modelId}`
|
||||
console.log('fullModelId', fullModelId)
|
||||
return globalRegistryManagement.languageModel(fullModelId as any)
|
||||
}
|
||||
|
||||
|
||||
@@ -59,7 +59,7 @@ export function createGoogleOptions(options: ExtractProviderOptions<'google'>) {
|
||||
/**
|
||||
* 创建OpenRouter供应商选项的便捷函数
|
||||
*/
|
||||
export function createOpenRouterOptions(options: ExtractProviderOptions<'openrouter'>) {
|
||||
export function createOpenRouterOptions(options: ExtractProviderOptions<'openrouter'> | Record<string, any>) {
|
||||
return createProviderOptions('openrouter', options)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
export type OpenRouterProviderOptions = {
|
||||
models?: string[]
|
||||
|
||||
/**
|
||||
* https://openrouter.ai/docs/use-cases/reasoning-tokens
|
||||
* One of `max_tokens` or `effort` is required.
|
||||
* If `exclude` is true, reasoning will be removed from the response. Default is false.
|
||||
*/
|
||||
reasoning?: {
|
||||
exclude?: boolean
|
||||
} & (
|
||||
| {
|
||||
max_tokens: number
|
||||
}
|
||||
| {
|
||||
effort: 'high' | 'medium' | 'low'
|
||||
}
|
||||
)
|
||||
|
||||
/**
|
||||
* A unique identifier representing your end-user, which can
|
||||
* help OpenRouter to monitor and detect abuse.
|
||||
*/
|
||||
user?: string
|
||||
|
||||
extraBody?: Record<string, unknown>
|
||||
|
||||
/**
|
||||
* Enable usage accounting to get detailed token usage information.
|
||||
* https://openrouter.ai/docs/use-cases/usage-accounting
|
||||
*/
|
||||
usage?: {
|
||||
/**
|
||||
* When true, includes token usage information in the response.
|
||||
*/
|
||||
include: boolean
|
||||
}
|
||||
}
|
||||
@@ -2,9 +2,8 @@ import { type AnthropicProviderOptions } from '@ai-sdk/anthropic'
|
||||
import { type GoogleGenerativeAIProviderOptions } from '@ai-sdk/google'
|
||||
import { type OpenAIResponsesProviderOptions } from '@ai-sdk/openai'
|
||||
import { type SharedV2ProviderMetadata } from '@ai-sdk/provider'
|
||||
|
||||
import { type OpenRouterProviderOptions } from './openrouter'
|
||||
import { type XaiProviderOptions } from './xai'
|
||||
import { type XaiProviderOptions } from '@ai-sdk/xai'
|
||||
import { type OpenRouterProviderOptions } from '@openrouter/ai-sdk-provider'
|
||||
|
||||
export type ProviderOptions<T extends keyof SharedV2ProviderMetadata> = SharedV2ProviderMetadata[T]
|
||||
|
||||
|
||||
@@ -1,86 +0,0 @@
|
||||
// copy from @ai-sdk/xai/xai-chat-options.ts
|
||||
// 如果@ai-sdk/xai暴露出了xaiProviderOptions就删除这个文件
|
||||
|
||||
import * as z from 'zod/v4'
|
||||
|
||||
const webSourceSchema = z.object({
|
||||
type: z.literal('web'),
|
||||
country: z.string().length(2).optional(),
|
||||
excludedWebsites: z.array(z.string()).max(5).optional(),
|
||||
allowedWebsites: z.array(z.string()).max(5).optional(),
|
||||
safeSearch: z.boolean().optional()
|
||||
})
|
||||
|
||||
const xSourceSchema = z.object({
|
||||
type: z.literal('x'),
|
||||
xHandles: z.array(z.string()).optional()
|
||||
})
|
||||
|
||||
const newsSourceSchema = z.object({
|
||||
type: z.literal('news'),
|
||||
country: z.string().length(2).optional(),
|
||||
excludedWebsites: z.array(z.string()).max(5).optional(),
|
||||
safeSearch: z.boolean().optional()
|
||||
})
|
||||
|
||||
const rssSourceSchema = z.object({
|
||||
type: z.literal('rss'),
|
||||
links: z.array(z.url()).max(1) // currently only supports one RSS link
|
||||
})
|
||||
|
||||
const searchSourceSchema = z.discriminatedUnion('type', [
|
||||
webSourceSchema,
|
||||
xSourceSchema,
|
||||
newsSourceSchema,
|
||||
rssSourceSchema
|
||||
])
|
||||
|
||||
export const xaiProviderOptions = z.object({
|
||||
/**
|
||||
* reasoning effort for reasoning models
|
||||
* only supported by grok-3-mini and grok-3-mini-fast models
|
||||
*/
|
||||
reasoningEffort: z.enum(['low', 'high']).optional(),
|
||||
|
||||
searchParameters: z
|
||||
.object({
|
||||
/**
|
||||
* search mode preference
|
||||
* - "off": disables search completely
|
||||
* - "auto": model decides whether to search (default)
|
||||
* - "on": always enables search
|
||||
*/
|
||||
mode: z.enum(['off', 'auto', 'on']),
|
||||
|
||||
/**
|
||||
* whether to return citations in the response
|
||||
* defaults to true
|
||||
*/
|
||||
returnCitations: z.boolean().optional(),
|
||||
|
||||
/**
|
||||
* start date for search data (ISO8601 format: YYYY-MM-DD)
|
||||
*/
|
||||
fromDate: z.string().optional(),
|
||||
|
||||
/**
|
||||
* end date for search data (ISO8601 format: YYYY-MM-DD)
|
||||
*/
|
||||
toDate: z.string().optional(),
|
||||
|
||||
/**
|
||||
* maximum number of search results to consider
|
||||
* defaults to 20
|
||||
*/
|
||||
maxSearchResults: z.number().min(1).max(50).optional(),
|
||||
|
||||
/**
|
||||
* data sources to search from
|
||||
* defaults to ["web", "x"] if not specified
|
||||
*/
|
||||
sources: z.array(searchSourceSchema).optional()
|
||||
})
|
||||
.optional()
|
||||
})
|
||||
|
||||
export type XaiProviderOptions = z.infer<typeof xaiProviderOptions>
|
||||
@@ -0,0 +1,38 @@
|
||||
import { google } from '@ai-sdk/google'
|
||||
|
||||
import { definePlugin } from '../../'
|
||||
import type { AiRequestContext } from '../../types'
|
||||
|
||||
const toolNameMap = {
|
||||
googleSearch: 'google_search',
|
||||
urlContext: 'url_context',
|
||||
codeExecution: 'code_execution'
|
||||
} as const
|
||||
|
||||
type ToolConfigKey = keyof typeof toolNameMap
|
||||
type ToolConfig = { googleSearch?: boolean; urlContext?: boolean; codeExecution?: boolean }
|
||||
|
||||
export const googleToolsPlugin = (config?: ToolConfig) =>
|
||||
definePlugin({
|
||||
name: 'googleToolsPlugin',
|
||||
transformParams: <T>(params: T, context: AiRequestContext): T => {
|
||||
const { providerId } = context
|
||||
if (providerId === 'google' && config) {
|
||||
if (typeof params === 'object' && params !== null) {
|
||||
const typedParams = params as T & { tools?: Record<string, unknown> }
|
||||
|
||||
if (!typedParams.tools) {
|
||||
typedParams.tools = {}
|
||||
}
|
||||
// 使用类型安全的方式遍历配置
|
||||
;(Object.keys(config) as ToolConfigKey[]).forEach((key) => {
|
||||
if (config[key] && key in toolNameMap && key in google.tools) {
|
||||
const toolName = toolNameMap[key]
|
||||
typedParams.tools![toolName] = google.tools[key]({})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
return params
|
||||
}
|
||||
})
|
||||
@@ -4,7 +4,12 @@
|
||||
*/
|
||||
export const BUILT_IN_PLUGIN_PREFIX = 'built-in:'
|
||||
|
||||
export { googleToolsPlugin } from './googleToolsPlugin'
|
||||
export { createLoggingPlugin } from './logging'
|
||||
export { createPromptToolUsePlugin } from './toolUsePlugin/promptToolUsePlugin'
|
||||
export type { PromptToolUseConfig, ToolUseRequestContext, ToolUseResult } from './toolUsePlugin/type'
|
||||
export { webSearchPlugin } from './webSearchPlugin'
|
||||
export type {
|
||||
PromptToolUseConfig,
|
||||
ToolUseRequestContext,
|
||||
ToolUseResult
|
||||
} from './toolUsePlugin/type'
|
||||
export { webSearchPlugin, type WebSearchPluginConfig } from './webSearchPlugin'
|
||||
|
||||
@@ -27,10 +27,20 @@ export class StreamEventManager {
|
||||
/**
|
||||
* 发送步骤完成事件
|
||||
*/
|
||||
sendStepFinishEvent(controller: StreamController, chunk: any): void {
|
||||
sendStepFinishEvent(
|
||||
controller: StreamController,
|
||||
chunk: any,
|
||||
context: AiRequestContext,
|
||||
finishReason: string = 'stop'
|
||||
): void {
|
||||
// 累加当前步骤的 usage
|
||||
if (chunk.usage && context.accumulatedUsage) {
|
||||
this.accumulateUsage(context.accumulatedUsage, chunk.usage)
|
||||
}
|
||||
|
||||
controller.enqueue({
|
||||
type: 'finish-step',
|
||||
finishReason: 'stop',
|
||||
finishReason,
|
||||
response: chunk.response,
|
||||
usage: chunk.usage,
|
||||
providerMetadata: chunk.providerMetadata
|
||||
@@ -43,28 +53,32 @@ export class StreamEventManager {
|
||||
async handleRecursiveCall(
|
||||
controller: StreamController,
|
||||
recursiveParams: any,
|
||||
context: AiRequestContext,
|
||||
stepId: string
|
||||
context: AiRequestContext
|
||||
): Promise<void> {
|
||||
try {
|
||||
console.log('[MCP Prompt] Starting recursive call after tool execution...')
|
||||
// try {
|
||||
// 重置工具执行状态,准备处理新的步骤
|
||||
context.hasExecutedToolsInCurrentStep = false
|
||||
|
||||
const recursiveResult = await context.recursiveCall(recursiveParams)
|
||||
const recursiveResult = await context.recursiveCall(recursiveParams)
|
||||
|
||||
if (recursiveResult && recursiveResult.fullStream) {
|
||||
await this.pipeRecursiveStream(controller, recursiveResult.fullStream)
|
||||
} else {
|
||||
console.warn('[MCP Prompt] No fullstream found in recursive result:', recursiveResult)
|
||||
}
|
||||
} catch (error) {
|
||||
this.handleRecursiveCallError(controller, error, stepId)
|
||||
if (recursiveResult && recursiveResult.fullStream) {
|
||||
await this.pipeRecursiveStream(controller, recursiveResult.fullStream, context)
|
||||
} else {
|
||||
console.warn('[MCP Prompt] No fullstream found in recursive result:', recursiveResult)
|
||||
}
|
||||
// } catch (error) {
|
||||
// this.handleRecursiveCallError(controller, error, stepId)
|
||||
// }
|
||||
}
|
||||
|
||||
/**
|
||||
* 将递归流的数据传递到当前流
|
||||
*/
|
||||
private async pipeRecursiveStream(controller: StreamController, recursiveStream: ReadableStream): Promise<void> {
|
||||
private async pipeRecursiveStream(
|
||||
controller: StreamController,
|
||||
recursiveStream: ReadableStream,
|
||||
context?: AiRequestContext
|
||||
): Promise<void> {
|
||||
const reader = recursiveStream.getReader()
|
||||
try {
|
||||
while (true) {
|
||||
@@ -73,9 +87,16 @@ export class StreamEventManager {
|
||||
break
|
||||
}
|
||||
if (value.type === 'finish') {
|
||||
// 迭代的流不发finish
|
||||
// 迭代的流不发finish,但需要累加其 usage
|
||||
if (value.usage && context?.accumulatedUsage) {
|
||||
this.accumulateUsage(context.accumulatedUsage, value.usage)
|
||||
}
|
||||
break
|
||||
}
|
||||
// 对于 finish-step 类型,累加其 usage
|
||||
if (value.type === 'finish-step' && value.usage && context?.accumulatedUsage) {
|
||||
this.accumulateUsage(context.accumulatedUsage, value.usage)
|
||||
}
|
||||
// 将递归流的数据传递到当前流
|
||||
controller.enqueue(value)
|
||||
}
|
||||
@@ -87,25 +108,25 @@ export class StreamEventManager {
|
||||
/**
|
||||
* 处理递归调用错误
|
||||
*/
|
||||
private handleRecursiveCallError(controller: StreamController, error: unknown, stepId: string): void {
|
||||
console.error('[MCP Prompt] Recursive call failed:', error)
|
||||
// private handleRecursiveCallError(controller: StreamController, error: unknown): void {
|
||||
// console.error('[MCP Prompt] Recursive call failed:', error)
|
||||
|
||||
// 使用 AI SDK 标准错误格式,但不中断流
|
||||
controller.enqueue({
|
||||
type: 'error',
|
||||
error: {
|
||||
message: error instanceof Error ? error.message : String(error),
|
||||
name: error instanceof Error ? error.name : 'RecursiveCallError'
|
||||
}
|
||||
})
|
||||
// // 使用 AI SDK 标准错误格式,但不中断流
|
||||
// controller.enqueue({
|
||||
// type: 'error',
|
||||
// error: {
|
||||
// message: error instanceof Error ? error.message : String(error),
|
||||
// name: error instanceof Error ? error.name : 'RecursiveCallError'
|
||||
// }
|
||||
// })
|
||||
|
||||
// 继续发送文本增量,保持流的连续性
|
||||
controller.enqueue({
|
||||
type: 'text-delta',
|
||||
id: stepId,
|
||||
text: '\n\n[工具执行后递归调用失败,继续对话...]'
|
||||
})
|
||||
}
|
||||
// // // 继续发送文本增量,保持流的连续性
|
||||
// // controller.enqueue({
|
||||
// // type: 'text-delta',
|
||||
// // id: stepId,
|
||||
// // text: '\n\n[工具执行后递归调用失败,继续对话...]'
|
||||
// // })
|
||||
// }
|
||||
|
||||
/**
|
||||
* 构建递归调用的参数
|
||||
@@ -136,4 +157,18 @@ export class StreamEventManager {
|
||||
|
||||
return recursiveParams
|
||||
}
|
||||
|
||||
/**
|
||||
* 累加 usage 数据
|
||||
*/
|
||||
private accumulateUsage(target: any, source: any): void {
|
||||
if (!target || !source) return
|
||||
|
||||
// 累加各种 token 类型
|
||||
target.inputTokens = (target.inputTokens || 0) + (source.inputTokens || 0)
|
||||
target.outputTokens = (target.outputTokens || 0) + (source.outputTokens || 0)
|
||||
target.totalTokens = (target.totalTokens || 0) + (source.totalTokens || 0)
|
||||
target.reasoningTokens = (target.reasoningTokens || 0) + (source.reasoningTokens || 0)
|
||||
target.cachedInputTokens = (target.cachedInputTokens || 0) + (source.cachedInputTokens || 0)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
* 负责工具的执行、结果格式化和相关事件发送
|
||||
* 从 promptToolUsePlugin.ts 中提取出来以降低复杂度
|
||||
*/
|
||||
import type { ToolSet } from 'ai'
|
||||
import type { ToolSet, TypedToolError } from 'ai'
|
||||
|
||||
import type { ToolUseResult } from './type'
|
||||
|
||||
@@ -38,7 +38,6 @@ export class ToolExecutor {
|
||||
controller: StreamController
|
||||
): Promise<ExecutedResult[]> {
|
||||
const executedResults: ExecutedResult[] = []
|
||||
|
||||
for (const toolUse of toolUses) {
|
||||
try {
|
||||
const tool = tools[toolUse.toolName]
|
||||
@@ -46,17 +45,12 @@ export class ToolExecutor {
|
||||
throw new Error(`Tool "${toolUse.toolName}" has no execute method`)
|
||||
}
|
||||
|
||||
// 发送工具调用开始事件
|
||||
this.sendToolStartEvents(controller, toolUse)
|
||||
|
||||
console.log(`[MCP Prompt Stream] Executing tool: ${toolUse.toolName}`, toolUse.arguments)
|
||||
|
||||
// 发送 tool-call 事件
|
||||
controller.enqueue({
|
||||
type: 'tool-call',
|
||||
toolCallId: toolUse.id,
|
||||
toolName: toolUse.toolName,
|
||||
input: tool.inputSchema
|
||||
input: toolUse.arguments
|
||||
})
|
||||
|
||||
const result = await tool.execute(toolUse.arguments, {
|
||||
@@ -111,45 +105,46 @@ export class ToolExecutor {
|
||||
/**
|
||||
* 发送工具调用开始相关事件
|
||||
*/
|
||||
private sendToolStartEvents(controller: StreamController, toolUse: ToolUseResult): void {
|
||||
// 发送 tool-input-start 事件
|
||||
controller.enqueue({
|
||||
type: 'tool-input-start',
|
||||
id: toolUse.id,
|
||||
toolName: toolUse.toolName
|
||||
})
|
||||
}
|
||||
// private sendToolStartEvents(controller: StreamController, toolUse: ToolUseResult): void {
|
||||
// // 发送 tool-input-start 事件
|
||||
// controller.enqueue({
|
||||
// type: 'tool-input-start',
|
||||
// id: toolUse.id,
|
||||
// toolName: toolUse.toolName
|
||||
// })
|
||||
// }
|
||||
|
||||
/**
|
||||
* 处理工具执行错误
|
||||
*/
|
||||
private handleToolError(
|
||||
private handleToolError<T extends ToolSet>(
|
||||
toolUse: ToolUseResult,
|
||||
error: unknown,
|
||||
controller: StreamController
|
||||
// _tools: ToolSet
|
||||
): ExecutedResult {
|
||||
// 使用 AI SDK 标准错误格式
|
||||
// const toolError: TypedToolError<typeof _tools> = {
|
||||
// type: 'tool-error',
|
||||
// toolCallId: toolUse.id,
|
||||
// toolName: toolUse.toolName,
|
||||
// input: toolUse.arguments,
|
||||
// error: error instanceof Error ? error.message : String(error)
|
||||
// }
|
||||
const toolError: TypedToolError<T> = {
|
||||
type: 'tool-error',
|
||||
toolCallId: toolUse.id,
|
||||
toolName: toolUse.toolName,
|
||||
input: toolUse.arguments,
|
||||
error
|
||||
}
|
||||
|
||||
// controller.enqueue(toolError)
|
||||
controller.enqueue(toolError)
|
||||
|
||||
// 发送标准错误事件
|
||||
controller.enqueue({
|
||||
type: 'error',
|
||||
error: error instanceof Error ? error.message : String(error)
|
||||
})
|
||||
// controller.enqueue({
|
||||
// type: 'tool-error',
|
||||
// toolCallId: toolUse.id,
|
||||
// error: error instanceof Error ? error.message : String(error),
|
||||
// input: toolUse.arguments
|
||||
// })
|
||||
|
||||
return {
|
||||
toolCallId: toolUse.id,
|
||||
toolName: toolUse.toolName,
|
||||
result: error instanceof Error ? error.message : String(error),
|
||||
result: error,
|
||||
isError: true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,9 +8,19 @@ import type { TextStreamPart, ToolSet } from 'ai'
|
||||
import { definePlugin } from '../../index'
|
||||
import type { AiRequestContext } from '../../types'
|
||||
import { StreamEventManager } from './StreamEventManager'
|
||||
import { type TagConfig, TagExtractor } from './tagExtraction'
|
||||
import { ToolExecutor } from './ToolExecutor'
|
||||
import { PromptToolUseConfig, ToolUseResult } from './type'
|
||||
|
||||
/**
|
||||
* 工具使用标签配置
|
||||
*/
|
||||
const TOOL_USE_TAG_CONFIG: TagConfig = {
|
||||
openingTag: '<tool_use>',
|
||||
closingTag: '</tool_use>',
|
||||
separator: '\n'
|
||||
}
|
||||
|
||||
/**
|
||||
* 默认系统提示符模板(提取自 Cherry Studio)
|
||||
*/
|
||||
@@ -146,8 +156,10 @@ Assistant: The population of Shanghai is 26 million, while Guangzhou has a popul
|
||||
/**
|
||||
* 构建可用工具部分(提取自 Cherry Studio)
|
||||
*/
|
||||
function buildAvailableTools(tools: ToolSet): string {
|
||||
function buildAvailableTools(tools: ToolSet): string | null {
|
||||
const availableTools = Object.keys(tools)
|
||||
if (availableTools.length === 0) return null
|
||||
const result = availableTools
|
||||
.map((toolName: string) => {
|
||||
const tool = tools[toolName]
|
||||
return `
|
||||
@@ -162,7 +174,7 @@ function buildAvailableTools(tools: ToolSet): string {
|
||||
})
|
||||
.join('\n')
|
||||
return `<tools>
|
||||
${availableTools}
|
||||
${result}
|
||||
</tools>`
|
||||
}
|
||||
|
||||
@@ -171,6 +183,7 @@ ${availableTools}
|
||||
*/
|
||||
function defaultBuildSystemPrompt(userSystemPrompt: string, tools: ToolSet): string {
|
||||
const availableTools = buildAvailableTools(tools)
|
||||
if (availableTools === null) return userSystemPrompt
|
||||
|
||||
const fullPrompt = DEFAULT_SYSTEM_PROMPT.replace('{{ TOOL_USE_EXAMPLES }}', DEFAULT_TOOL_USE_EXAMPLES)
|
||||
.replace('{{ AVAILABLE_TOOLS }}', availableTools)
|
||||
@@ -248,40 +261,76 @@ export const createPromptToolUsePlugin = (config: PromptToolUseConfig = {}) => {
|
||||
return params
|
||||
}
|
||||
|
||||
context.mcpTools = params.tools
|
||||
console.log('tools stored in context', params.tools)
|
||||
// 分离 provider-defined 和其他类型的工具
|
||||
const providerDefinedTools: ToolSet = {}
|
||||
const promptTools: ToolSet = {}
|
||||
|
||||
// 构建系统提示符
|
||||
for (const [toolName, tool] of Object.entries(params.tools as ToolSet)) {
|
||||
if (tool.type === 'provider-defined') {
|
||||
// provider-defined 类型的工具保留在 tools 参数中
|
||||
providerDefinedTools[toolName] = tool
|
||||
} else {
|
||||
// 其他工具转换为 prompt 模式
|
||||
promptTools[toolName] = tool
|
||||
}
|
||||
}
|
||||
|
||||
// 只有当有非 provider-defined 工具时才保存到 context
|
||||
if (Object.keys(promptTools).length > 0) {
|
||||
context.mcpTools = promptTools
|
||||
}
|
||||
|
||||
// 构建系统提示符(只包含非 provider-defined 工具)
|
||||
const userSystemPrompt = typeof params.system === 'string' ? params.system : ''
|
||||
const systemPrompt = buildSystemPrompt(userSystemPrompt, params.tools)
|
||||
const systemPrompt = buildSystemPrompt(userSystemPrompt, promptTools)
|
||||
let systemMessage: string | null = systemPrompt
|
||||
console.log('config.context', context)
|
||||
if (config.createSystemMessage) {
|
||||
// 🎯 如果用户提供了自定义处理函数,使用它
|
||||
systemMessage = config.createSystemMessage(systemPrompt, params, context)
|
||||
}
|
||||
|
||||
// 移除 tools,改为 prompt 模式
|
||||
// 保留 provider-defined tools,移除其他 tools
|
||||
const transformedParams = {
|
||||
...params,
|
||||
...(systemMessage ? { system: systemMessage } : {}),
|
||||
tools: undefined
|
||||
tools: Object.keys(providerDefinedTools).length > 0 ? providerDefinedTools : undefined
|
||||
}
|
||||
context.originalParams = transformedParams
|
||||
console.log('transformedParams', transformedParams)
|
||||
return transformedParams
|
||||
},
|
||||
transformStream: (_: any, context: AiRequestContext) => () => {
|
||||
let textBuffer = ''
|
||||
let stepId = ''
|
||||
// let stepId = ''
|
||||
|
||||
// 如果没有需要 prompt 模式处理的工具,直接返回原始流
|
||||
if (!context.mcpTools) {
|
||||
throw new Error('No tools available')
|
||||
return new TransformStream()
|
||||
}
|
||||
|
||||
// 创建工具执行器和流事件管理器
|
||||
// 从 context 中获取或初始化 usage 累加器
|
||||
if (!context.accumulatedUsage) {
|
||||
context.accumulatedUsage = {
|
||||
inputTokens: 0,
|
||||
outputTokens: 0,
|
||||
totalTokens: 0,
|
||||
reasoningTokens: 0,
|
||||
cachedInputTokens: 0
|
||||
}
|
||||
}
|
||||
|
||||
// 创建工具执行器、流事件管理器和标签提取器
|
||||
const toolExecutor = new ToolExecutor()
|
||||
const streamEventManager = new StreamEventManager()
|
||||
const tagExtractor = new TagExtractor(TOOL_USE_TAG_CONFIG)
|
||||
|
||||
// 在context中初始化工具执行状态,避免递归调用时状态丢失
|
||||
if (!context.hasExecutedToolsInCurrentStep) {
|
||||
context.hasExecutedToolsInCurrentStep = false
|
||||
}
|
||||
|
||||
// 用于hold text-start事件,直到确认有非工具标签内容
|
||||
let pendingTextStart: TextStreamPart<TOOLS> | null = null
|
||||
let hasStartedText = false
|
||||
|
||||
type TOOLS = NonNullable<typeof context.mcpTools>
|
||||
return new TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>({
|
||||
@@ -289,83 +338,106 @@ export const createPromptToolUsePlugin = (config: PromptToolUseConfig = {}) => {
|
||||
chunk: TextStreamPart<TOOLS>,
|
||||
controller: TransformStreamDefaultController<TextStreamPart<TOOLS>>
|
||||
) {
|
||||
// 收集文本内容
|
||||
if (chunk.type === 'text-delta') {
|
||||
textBuffer += chunk.text || ''
|
||||
stepId = chunk.id || ''
|
||||
controller.enqueue(chunk)
|
||||
// Hold住text-start事件,直到确认有非工具标签内容
|
||||
if ((chunk as any).type === 'text-start') {
|
||||
pendingTextStart = chunk
|
||||
return
|
||||
}
|
||||
|
||||
if (chunk.type === 'text-end' || chunk.type === 'finish-step') {
|
||||
const tools = context.mcpTools
|
||||
if (!tools || Object.keys(tools).length === 0) {
|
||||
controller.enqueue(chunk)
|
||||
return
|
||||
}
|
||||
// text-delta阶段:收集文本内容并过滤工具标签
|
||||
if (chunk.type === 'text-delta') {
|
||||
textBuffer += chunk.text || ''
|
||||
// stepId = chunk.id || ''
|
||||
|
||||
// 解析工具调用
|
||||
const { results: parsedTools, content: parsedContent } = parseToolUse(textBuffer, tools)
|
||||
const validToolUses = parsedTools.filter((t) => t.status === 'pending')
|
||||
// 使用TagExtractor过滤工具标签,只传递非标签内容到UI层
|
||||
const extractionResults = tagExtractor.processText(chunk.text || '')
|
||||
|
||||
// 如果没有有效的工具调用,直接传递原始事件
|
||||
if (validToolUses.length === 0) {
|
||||
controller.enqueue(chunk)
|
||||
return
|
||||
}
|
||||
|
||||
if (chunk.type === 'text-end') {
|
||||
controller.enqueue({
|
||||
type: 'text-end',
|
||||
id: stepId,
|
||||
providerMetadata: {
|
||||
text: {
|
||||
value: parsedContent
|
||||
}
|
||||
for (const result of extractionResults) {
|
||||
// 只传递非标签内容到UI层
|
||||
if (!result.isTagContent && result.content) {
|
||||
// 如果还没有发送text-start且有pending的text-start,先发送它
|
||||
if (!hasStartedText && pendingTextStart) {
|
||||
controller.enqueue(pendingTextStart)
|
||||
hasStartedText = true
|
||||
pendingTextStart = null
|
||||
}
|
||||
})
|
||||
return
|
||||
|
||||
const filteredChunk = {
|
||||
...chunk,
|
||||
text: result.content
|
||||
}
|
||||
controller.enqueue(filteredChunk)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if (chunk.type === 'text-end') {
|
||||
// 只有当已经发送了text-start时才发送text-end
|
||||
if (hasStartedText) {
|
||||
controller.enqueue(chunk)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if (chunk.type === 'finish-step') {
|
||||
// 统一在finish-step阶段检查并执行工具调用
|
||||
const tools = context.mcpTools
|
||||
if (tools && Object.keys(tools).length > 0 && !context.hasExecutedToolsInCurrentStep) {
|
||||
// 解析完整的textBuffer来检测工具调用
|
||||
const { results: parsedTools } = parseToolUse(textBuffer, tools)
|
||||
const validToolUses = parsedTools.filter((t) => t.status === 'pending')
|
||||
|
||||
if (validToolUses.length > 0) {
|
||||
context.hasExecutedToolsInCurrentStep = true
|
||||
|
||||
// 执行工具调用(不需要手动发送 start-step,外部流已经处理)
|
||||
const executedResults = await toolExecutor.executeTools(validToolUses, tools, controller)
|
||||
|
||||
// 发送步骤完成事件,使用 tool-calls 作为 finishReason
|
||||
streamEventManager.sendStepFinishEvent(controller, chunk, context, 'tool-calls')
|
||||
|
||||
// 处理递归调用
|
||||
const toolResultsText = toolExecutor.formatToolResults(executedResults)
|
||||
const recursiveParams = streamEventManager.buildRecursiveParams(
|
||||
context,
|
||||
textBuffer,
|
||||
toolResultsText,
|
||||
tools
|
||||
)
|
||||
|
||||
await streamEventManager.handleRecursiveCall(controller, recursiveParams, context)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
controller.enqueue({
|
||||
...chunk,
|
||||
finishReason: 'tool-calls'
|
||||
})
|
||||
|
||||
// 发送步骤开始事件
|
||||
streamEventManager.sendStepStartEvent(controller)
|
||||
|
||||
// 执行工具调用
|
||||
const executedResults = await toolExecutor.executeTools(validToolUses, tools, controller)
|
||||
|
||||
// 发送步骤完成事件
|
||||
streamEventManager.sendStepFinishEvent(controller, chunk)
|
||||
|
||||
// 处理递归调用
|
||||
if (validToolUses.length > 0) {
|
||||
const toolResultsText = toolExecutor.formatToolResults(executedResults)
|
||||
const recursiveParams = streamEventManager.buildRecursiveParams(
|
||||
context,
|
||||
textBuffer,
|
||||
toolResultsText,
|
||||
tools
|
||||
)
|
||||
|
||||
await streamEventManager.handleRecursiveCall(controller, recursiveParams, context, stepId)
|
||||
}
|
||||
// 如果没有执行工具调用,直接传递原始finish-step事件
|
||||
controller.enqueue(chunk)
|
||||
|
||||
// 清理状态
|
||||
textBuffer = ''
|
||||
return
|
||||
}
|
||||
|
||||
// 对于其他类型的事件,直接传递
|
||||
controller.enqueue(chunk)
|
||||
// 处理 finish 类型,使用累加后的 totalUsage
|
||||
if (chunk.type === 'finish') {
|
||||
controller.enqueue({
|
||||
...chunk,
|
||||
totalUsage: context.accumulatedUsage
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// 对于其他类型的事件,直接传递(不包括text-start,已在上面处理)
|
||||
if ((chunk as any).type !== 'text-start') {
|
||||
controller.enqueue(chunk)
|
||||
}
|
||||
},
|
||||
|
||||
flush() {
|
||||
// 流结束时的清理工作
|
||||
console.log('[MCP Prompt] Stream ended, cleaning up...')
|
||||
// 清理pending状态
|
||||
pendingTextStart = null
|
||||
hasStartedText = false
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,15 +1,19 @@
|
||||
import { anthropic } from '@ai-sdk/anthropic'
|
||||
import { google } from '@ai-sdk/google'
|
||||
import { openai } from '@ai-sdk/openai'
|
||||
import { InferToolInput, InferToolOutput } from 'ai'
|
||||
|
||||
import { ProviderOptionsMap } from '../../../options/types'
|
||||
import { OpenRouterSearchConfig } from './openrouter'
|
||||
|
||||
/**
|
||||
* 从 AI SDK 的工具函数中提取参数类型,以确保类型安全。
|
||||
*/
|
||||
type OpenAISearchConfig = Parameters<typeof openai.tools.webSearchPreview>[0]
|
||||
type AnthropicSearchConfig = Parameters<typeof anthropic.tools.webSearch_20250305>[0]
|
||||
type GoogleSearchConfig = Parameters<typeof google.tools.googleSearch>[0]
|
||||
export type OpenAISearchConfig = NonNullable<Parameters<typeof openai.tools.webSearch>[0]>
|
||||
export type OpenAISearchPreviewConfig = NonNullable<Parameters<typeof openai.tools.webSearchPreview>[0]>
|
||||
export type AnthropicSearchConfig = NonNullable<Parameters<typeof anthropic.tools.webSearch_20250305>[0]>
|
||||
export type GoogleSearchConfig = NonNullable<Parameters<typeof google.tools.googleSearch>[0]>
|
||||
export type XAISearchConfig = NonNullable<ProviderOptionsMap['xai']['searchParameters']>
|
||||
|
||||
/**
|
||||
* 插件初始化时接收的完整配置对象
|
||||
@@ -18,10 +22,12 @@ type GoogleSearchConfig = Parameters<typeof google.tools.googleSearch>[0]
|
||||
*/
|
||||
export interface WebSearchPluginConfig {
|
||||
openai?: OpenAISearchConfig
|
||||
'openai-chat'?: OpenAISearchPreviewConfig
|
||||
anthropic?: AnthropicSearchConfig
|
||||
xai?: ProviderOptionsMap['xai']['searchParameters']
|
||||
google?: GoogleSearchConfig
|
||||
'google-vertex'?: GoogleSearchConfig
|
||||
openrouter?: OpenRouterSearchConfig
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -31,6 +37,7 @@ export const DEFAULT_WEB_SEARCH_CONFIG: WebSearchPluginConfig = {
|
||||
google: {},
|
||||
'google-vertex': {},
|
||||
openai: {},
|
||||
'openai-chat': {},
|
||||
xai: {
|
||||
mode: 'on',
|
||||
returnCitations: true,
|
||||
@@ -39,29 +46,44 @@ export const DEFAULT_WEB_SEARCH_CONFIG: WebSearchPluginConfig = {
|
||||
},
|
||||
anthropic: {
|
||||
maxUses: 5
|
||||
},
|
||||
openrouter: {
|
||||
plugins: [
|
||||
{
|
||||
id: 'web',
|
||||
max_results: 5
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
export type WebSearchToolOutputSchema = {
|
||||
// Anthropic 工具 - 手动定义
|
||||
anthropicWebSearch: Array<{
|
||||
url: string
|
||||
title: string
|
||||
pageAge: string | null
|
||||
encryptedContent: string
|
||||
type: string
|
||||
}>
|
||||
anthropic: InferToolOutput<ReturnType<typeof anthropic.tools.webSearch_20250305>>
|
||||
|
||||
// OpenAI 工具 - 基于实际输出
|
||||
openaiWebSearch: {
|
||||
// TODO: 上游定义不规范,是unknown
|
||||
// openai: InferToolOutput<ReturnType<typeof openai.tools.webSearch>>
|
||||
openai: {
|
||||
status: 'completed' | 'failed'
|
||||
}
|
||||
'openai-chat': {
|
||||
status: 'completed' | 'failed'
|
||||
}
|
||||
|
||||
// Google 工具
|
||||
googleSearch: {
|
||||
// TODO: 上游定义不规范,是unknown
|
||||
// google: InferToolOutput<ReturnType<typeof google.tools.googleSearch>>
|
||||
google: {
|
||||
webSearchQueries?: string[]
|
||||
groundingChunks?: Array<{
|
||||
web?: { uri: string; title: string }
|
||||
}>
|
||||
}
|
||||
}
|
||||
|
||||
export type WebSearchToolInputSchema = {
|
||||
anthropic: InferToolInput<ReturnType<typeof anthropic.tools.webSearch_20250305>>
|
||||
openai: InferToolInput<ReturnType<typeof openai.tools.webSearch>>
|
||||
google: InferToolInput<ReturnType<typeof google.tools.googleSearch>>
|
||||
'openai-chat': InferToolInput<ReturnType<typeof openai.tools.webSearchPreview>>
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ import { anthropic } from '@ai-sdk/anthropic'
|
||||
import { google } from '@ai-sdk/google'
|
||||
import { openai } from '@ai-sdk/openai'
|
||||
|
||||
import { createXaiOptions, mergeProviderOptions } from '../../../options'
|
||||
import { createOpenRouterOptions, createXaiOptions, mergeProviderOptions } from '../../../options'
|
||||
import { definePlugin } from '../../'
|
||||
import type { AiRequestContext } from '../../types'
|
||||
import { DEFAULT_WEB_SEARCH_CONFIG, WebSearchPluginConfig } from './helper'
|
||||
@@ -27,7 +27,14 @@ export const webSearchPlugin = (config: WebSearchPluginConfig = DEFAULT_WEB_SEAR
|
||||
case 'openai': {
|
||||
if (config.openai) {
|
||||
if (!params.tools) params.tools = {}
|
||||
params.tools.web_search_preview = openai.tools.webSearchPreview(config.openai)
|
||||
params.tools.web_search = openai.tools.webSearch(config.openai)
|
||||
}
|
||||
break
|
||||
}
|
||||
case 'openai-chat': {
|
||||
if (config['openai-chat']) {
|
||||
if (!params.tools) params.tools = {}
|
||||
params.tools.web_search_preview = openai.tools.webSearchPreview(config['openai-chat'])
|
||||
}
|
||||
break
|
||||
}
|
||||
@@ -56,6 +63,14 @@ export const webSearchPlugin = (config: WebSearchPluginConfig = DEFAULT_WEB_SEAR
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
case 'openrouter': {
|
||||
if (config.openrouter) {
|
||||
const searchOptions = createOpenRouterOptions(config.openrouter)
|
||||
params.providerOptions = mergeProviderOptions(params.providerOptions, searchOptions)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return params
|
||||
|
||||
@@ -0,0 +1,26 @@
|
||||
export type OpenRouterSearchConfig = {
|
||||
plugins?: Array<{
|
||||
id: 'web'
|
||||
/**
|
||||
* Maximum number of search results to include (default: 5)
|
||||
*/
|
||||
max_results?: number
|
||||
/**
|
||||
* Custom search prompt to guide the search query
|
||||
*/
|
||||
search_prompt?: string
|
||||
}>
|
||||
/**
|
||||
* Built-in web search options for models that support native web search
|
||||
*/
|
||||
web_search_options?: {
|
||||
/**
|
||||
* Maximum number of search results to include
|
||||
*/
|
||||
max_results?: number
|
||||
/**
|
||||
* Custom search prompt to guide the search query
|
||||
*/
|
||||
search_prompt?: string
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,8 @@
|
||||
// 核心类型和接口
|
||||
export type { AiPlugin, AiRequestContext, HookResult, PluginManagerConfig } from './types'
|
||||
import type { ImageModelV2 } from '@ai-sdk/provider'
|
||||
import type { LanguageModel } from 'ai'
|
||||
|
||||
import type { ProviderId } from '../providers'
|
||||
import type { AiPlugin, AiRequestContext } from './types'
|
||||
|
||||
@@ -9,16 +12,16 @@ export { PluginManager } from './manager'
|
||||
// 工具函数
|
||||
export function createContext<T extends ProviderId>(
|
||||
providerId: T,
|
||||
modelId: string,
|
||||
model: LanguageModel | ImageModelV2,
|
||||
originalParams: any
|
||||
): AiRequestContext {
|
||||
return {
|
||||
providerId,
|
||||
modelId,
|
||||
model,
|
||||
originalParams,
|
||||
metadata: {},
|
||||
startTime: Date.now(),
|
||||
requestId: `${providerId}-${modelId}-${Date.now()}-${Math.random().toString(36).slice(2)}`,
|
||||
requestId: `${providerId}-${typeof model === 'string' ? model : model?.modelId}-${Date.now()}-${Math.random().toString(36).slice(2)}`,
|
||||
// 占位
|
||||
recursiveCall: () => Promise.resolve(null)
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ export type RecursiveCallFn = (newParams: any) => Promise<any>
|
||||
*/
|
||||
export interface AiRequestContext {
|
||||
providerId: ProviderId
|
||||
modelId: string
|
||||
model: LanguageModel | ImageModelV2
|
||||
originalParams: any
|
||||
metadata: Record<string, any>
|
||||
startTime: number
|
||||
|
||||
@@ -9,9 +9,11 @@ import { createDeepSeek } from '@ai-sdk/deepseek'
|
||||
import { createGoogleGenerativeAI } from '@ai-sdk/google'
|
||||
import { createOpenAI, type OpenAIProviderSettings } from '@ai-sdk/openai'
|
||||
import { createOpenAICompatible } from '@ai-sdk/openai-compatible'
|
||||
import { LanguageModelV2 } from '@ai-sdk/provider'
|
||||
import { createXai } from '@ai-sdk/xai'
|
||||
import { customProvider, type Provider } from 'ai'
|
||||
import * as z from 'zod'
|
||||
import { createOpenRouter } from '@openrouter/ai-sdk-provider'
|
||||
import { customProvider, Provider } from 'ai'
|
||||
import { z } from 'zod'
|
||||
|
||||
/**
|
||||
* 基础 Provider IDs
|
||||
@@ -25,7 +27,8 @@ export const baseProviderIds = [
|
||||
'xai',
|
||||
'azure',
|
||||
'azure-responses',
|
||||
'deepseek'
|
||||
'deepseek',
|
||||
'openrouter'
|
||||
] as const
|
||||
|
||||
/**
|
||||
@@ -38,14 +41,16 @@ export const baseProviderIdSchema = z.enum(baseProviderIds)
|
||||
*/
|
||||
export type BaseProviderId = z.infer<typeof baseProviderIdSchema>
|
||||
|
||||
export const baseProviderSchema = z.object({
|
||||
id: baseProviderIdSchema,
|
||||
name: z.string(),
|
||||
creator: z.function().args(z.any()).returns(z.any()) as z.ZodType<(options: any) => Provider>,
|
||||
supportsImageGeneration: z.boolean()
|
||||
})
|
||||
export const isBaseProvider = (id: ProviderId): id is BaseProviderId => {
|
||||
return baseProviderIdSchema.safeParse(id).success
|
||||
}
|
||||
|
||||
export type BaseProvider = z.infer<typeof baseProviderSchema>
|
||||
type BaseProvider = {
|
||||
id: BaseProviderId
|
||||
name: string
|
||||
creator: (options: any) => Provider | LanguageModelV2
|
||||
supportsImageGeneration: boolean
|
||||
}
|
||||
|
||||
/**
|
||||
* 基础 Providers 定义
|
||||
@@ -121,6 +126,12 @@ export const baseProviders = [
|
||||
name: 'DeepSeek',
|
||||
creator: createDeepSeek,
|
||||
supportsImageGeneration: false
|
||||
},
|
||||
{
|
||||
id: 'openrouter',
|
||||
name: 'OpenRouter',
|
||||
creator: createOpenRouter,
|
||||
supportsImageGeneration: true
|
||||
}
|
||||
] as const satisfies BaseProvider[]
|
||||
|
||||
@@ -148,7 +159,12 @@ export const providerConfigSchema = z
|
||||
.object({
|
||||
id: customProviderIdSchema, // 只允许自定义ID
|
||||
name: z.string().min(1),
|
||||
creator: z.function().optional(),
|
||||
creator: z
|
||||
.function({
|
||||
input: z.any(),
|
||||
output: z.any()
|
||||
})
|
||||
.optional(),
|
||||
import: z.function().optional(),
|
||||
creatorFunctionName: z.string().optional(),
|
||||
supportsImageGeneration: z.boolean().default(false),
|
||||
|
||||
@@ -4,12 +4,12 @@
|
||||
*/
|
||||
import { ImageModelV2, LanguageModelV2, LanguageModelV2Middleware } from '@ai-sdk/provider'
|
||||
import {
|
||||
experimental_generateImage as generateImage,
|
||||
generateObject,
|
||||
generateText,
|
||||
experimental_generateImage as _generateImage,
|
||||
generateObject as _generateObject,
|
||||
generateText as _generateText,
|
||||
LanguageModel,
|
||||
streamObject,
|
||||
streamText
|
||||
streamObject as _streamObject,
|
||||
streamText as _streamText
|
||||
} from 'ai'
|
||||
|
||||
import { globalModelResolver } from '../models'
|
||||
@@ -18,7 +18,14 @@ import { type AiPlugin, type AiRequestContext, definePlugin } from '../plugins'
|
||||
import { type ProviderId } from '../providers'
|
||||
import { ImageGenerationError, ImageModelResolutionError } from './errors'
|
||||
import { PluginEngine } from './pluginEngine'
|
||||
import { type RuntimeConfig } from './types'
|
||||
import type {
|
||||
generateImageParams,
|
||||
generateObjectParams,
|
||||
generateTextParams,
|
||||
RuntimeConfig,
|
||||
streamObjectParams,
|
||||
streamTextParams
|
||||
} from './types'
|
||||
|
||||
export class RuntimeExecutor<T extends ProviderId = ProviderId> {
|
||||
public pluginEngine: PluginEngine<T>
|
||||
@@ -75,12 +82,12 @@ export class RuntimeExecutor<T extends ProviderId = ProviderId> {
|
||||
* 流式文本生成
|
||||
*/
|
||||
async streamText(
|
||||
params: Parameters<typeof streamText>[0],
|
||||
params: streamTextParams,
|
||||
options?: {
|
||||
middlewares?: LanguageModelV2Middleware[]
|
||||
}
|
||||
): Promise<ReturnType<typeof streamText>> {
|
||||
const { model, ...restParams } = params
|
||||
): Promise<ReturnType<typeof _streamText>> {
|
||||
const { model } = params
|
||||
|
||||
// 根据 model 类型决定插件配置
|
||||
if (typeof model === 'string') {
|
||||
@@ -94,19 +101,16 @@ export class RuntimeExecutor<T extends ProviderId = ProviderId> {
|
||||
|
||||
return this.pluginEngine.executeStreamWithPlugins(
|
||||
'streamText',
|
||||
model,
|
||||
restParams,
|
||||
async (resolvedModel, transformedParams, streamTransforms) => {
|
||||
params,
|
||||
(resolvedModel, transformedParams, streamTransforms) => {
|
||||
const experimental_transform =
|
||||
params?.experimental_transform ?? (streamTransforms.length > 0 ? streamTransforms : undefined)
|
||||
|
||||
const finalParams = {
|
||||
model: resolvedModel,
|
||||
return _streamText({
|
||||
...transformedParams,
|
||||
model: resolvedModel,
|
||||
experimental_transform
|
||||
} as Parameters<typeof streamText>[0]
|
||||
|
||||
return await streamText(finalParams)
|
||||
})
|
||||
}
|
||||
)
|
||||
}
|
||||
@@ -117,12 +121,12 @@ export class RuntimeExecutor<T extends ProviderId = ProviderId> {
|
||||
* 生成文本
|
||||
*/
|
||||
async generateText(
|
||||
params: Parameters<typeof generateText>[0],
|
||||
params: generateTextParams,
|
||||
options?: {
|
||||
middlewares?: LanguageModelV2Middleware[]
|
||||
}
|
||||
): Promise<ReturnType<typeof generateText>> {
|
||||
const { model, ...restParams } = params
|
||||
): Promise<ReturnType<typeof _generateText>> {
|
||||
const { model } = params
|
||||
|
||||
// 根据 model 类型决定插件配置
|
||||
if (typeof model === 'string') {
|
||||
@@ -134,12 +138,10 @@ export class RuntimeExecutor<T extends ProviderId = ProviderId> {
|
||||
this.pluginEngine.usePlugins([this.createConfigureContextPlugin()])
|
||||
}
|
||||
|
||||
return this.pluginEngine.executeWithPlugins(
|
||||
return this.pluginEngine.executeWithPlugins<Parameters<typeof _generateText>[0], ReturnType<typeof _generateText>>(
|
||||
'generateText',
|
||||
model,
|
||||
restParams,
|
||||
async (resolvedModel, transformedParams) =>
|
||||
generateText({ model: resolvedModel, ...transformedParams } as Parameters<typeof generateText>[0])
|
||||
params,
|
||||
(resolvedModel, transformedParams) => _generateText({ ...transformedParams, model: resolvedModel })
|
||||
)
|
||||
}
|
||||
|
||||
@@ -147,12 +149,12 @@ export class RuntimeExecutor<T extends ProviderId = ProviderId> {
|
||||
* 生成结构化对象
|
||||
*/
|
||||
async generateObject(
|
||||
params: Parameters<typeof generateObject>[0],
|
||||
params: generateObjectParams,
|
||||
options?: {
|
||||
middlewares?: LanguageModelV2Middleware[]
|
||||
}
|
||||
): Promise<ReturnType<typeof generateObject>> {
|
||||
const { model, ...restParams } = params
|
||||
): Promise<ReturnType<typeof _generateObject>> {
|
||||
const { model } = params
|
||||
|
||||
// 根据 model 类型决定插件配置
|
||||
if (typeof model === 'string') {
|
||||
@@ -164,25 +166,23 @@ export class RuntimeExecutor<T extends ProviderId = ProviderId> {
|
||||
this.pluginEngine.usePlugins([this.createConfigureContextPlugin()])
|
||||
}
|
||||
|
||||
return this.pluginEngine.executeWithPlugins(
|
||||
return this.pluginEngine.executeWithPlugins<generateObjectParams, ReturnType<typeof _generateObject>>(
|
||||
'generateObject',
|
||||
model,
|
||||
restParams,
|
||||
async (resolvedModel, transformedParams) =>
|
||||
generateObject({ model: resolvedModel, ...transformedParams } as Parameters<typeof generateObject>[0])
|
||||
params,
|
||||
async (resolvedModel, transformedParams) => _generateObject({ ...transformedParams, model: resolvedModel })
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* 流式生成结构化对象
|
||||
*/
|
||||
async streamObject(
|
||||
params: Parameters<typeof streamObject>[0],
|
||||
streamObject(
|
||||
params: streamObjectParams,
|
||||
options?: {
|
||||
middlewares?: LanguageModelV2Middleware[]
|
||||
}
|
||||
): Promise<ReturnType<typeof streamObject>> {
|
||||
const { model, ...restParams } = params
|
||||
): Promise<ReturnType<typeof _streamObject>> {
|
||||
const { model } = params
|
||||
|
||||
// 根据 model 类型决定插件配置
|
||||
if (typeof model === 'string') {
|
||||
@@ -194,23 +194,17 @@ export class RuntimeExecutor<T extends ProviderId = ProviderId> {
|
||||
this.pluginEngine.usePlugins([this.createConfigureContextPlugin()])
|
||||
}
|
||||
|
||||
return this.pluginEngine.executeWithPlugins(
|
||||
'streamObject',
|
||||
model,
|
||||
restParams,
|
||||
async (resolvedModel, transformedParams) =>
|
||||
streamObject({ model: resolvedModel, ...transformedParams } as Parameters<typeof streamObject>[0])
|
||||
return this.pluginEngine.executeStreamWithPlugins('streamObject', params, (resolvedModel, transformedParams) =>
|
||||
_streamObject({ ...transformedParams, model: resolvedModel })
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* 生成图像
|
||||
*/
|
||||
async generateImage(
|
||||
params: Omit<Parameters<typeof generateImage>[0], 'model'> & { model: string | ImageModelV2 }
|
||||
): Promise<ReturnType<typeof generateImage>> {
|
||||
generateImage(params: generateImageParams): Promise<ReturnType<typeof _generateImage>> {
|
||||
try {
|
||||
const { model, ...restParams } = params
|
||||
const { model } = params
|
||||
|
||||
// 根据 model 类型决定插件配置
|
||||
if (typeof model === 'string') {
|
||||
@@ -219,13 +213,8 @@ export class RuntimeExecutor<T extends ProviderId = ProviderId> {
|
||||
this.pluginEngine.usePlugins([this.createConfigureContextPlugin()])
|
||||
}
|
||||
|
||||
return await this.pluginEngine.executeImageWithPlugins(
|
||||
'generateImage',
|
||||
model,
|
||||
restParams,
|
||||
async (resolvedModel, transformedParams) => {
|
||||
return await generateImage({ model: resolvedModel, ...transformedParams })
|
||||
}
|
||||
return this.pluginEngine.executeImageWithPlugins('generateImage', params, (resolvedModel, transformedParams) =>
|
||||
_generateImage({ ...transformedParams, model: resolvedModel })
|
||||
)
|
||||
} catch (error) {
|
||||
if (error instanceof Error) {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/* eslint-disable @eslint-react/naming-convention/context-name */
|
||||
import { ImageModelV2 } from '@ai-sdk/provider'
|
||||
import { LanguageModel } from 'ai'
|
||||
import { experimental_generateImage, generateObject, generateText, LanguageModel, streamObject, streamText } from 'ai'
|
||||
|
||||
import { type AiPlugin, createContext, PluginManager } from '../plugins'
|
||||
import { type ProviderId } from '../providers/types'
|
||||
@@ -62,17 +62,19 @@ export class PluginEngine<T extends ProviderId = ProviderId> {
|
||||
* 执行带插件的操作(非流式)
|
||||
* 提供给AiExecutor使用
|
||||
*/
|
||||
async executeWithPlugins<TParams, TResult>(
|
||||
async executeWithPlugins<
|
||||
TParams extends Parameters<typeof generateText | typeof generateObject>[0],
|
||||
TResult extends ReturnType<typeof generateText | typeof generateObject>
|
||||
>(
|
||||
methodName: string,
|
||||
model: LanguageModel,
|
||||
params: TParams,
|
||||
executor: (model: LanguageModel, transformedParams: TParams) => Promise<TResult>,
|
||||
executor: (model: LanguageModel, transformedParams: TParams) => TResult,
|
||||
_context?: ReturnType<typeof createContext>
|
||||
): Promise<TResult> {
|
||||
// 统一处理模型解析
|
||||
let resolvedModel: LanguageModel | undefined
|
||||
let modelId: string
|
||||
|
||||
const { model } = params
|
||||
if (typeof model === 'string') {
|
||||
// 字符串:需要通过插件解析
|
||||
modelId = model
|
||||
@@ -83,13 +85,13 @@ export class PluginEngine<T extends ProviderId = ProviderId> {
|
||||
}
|
||||
|
||||
// 使用正确的createContext创建请求上下文
|
||||
const context = _context ? _context : createContext(this.providerId, modelId, params)
|
||||
const context = _context ? _context : createContext(this.providerId, model, params)
|
||||
|
||||
// 🔥 为上下文添加递归调用能力
|
||||
context.recursiveCall = async (newParams: any): Promise<TResult> => {
|
||||
// 递归调用自身,重新走完整的插件流程
|
||||
context.isRecursiveCall = true
|
||||
const result = await this.executeWithPlugins(methodName, model, newParams, executor, context)
|
||||
const result = await this.executeWithPlugins(methodName, newParams, executor, context)
|
||||
context.isRecursiveCall = false
|
||||
return result
|
||||
}
|
||||
@@ -138,17 +140,19 @@ export class PluginEngine<T extends ProviderId = ProviderId> {
|
||||
* 执行带插件的图像生成操作
|
||||
* 提供给AiExecutor使用
|
||||
*/
|
||||
async executeImageWithPlugins<TParams, TResult>(
|
||||
async executeImageWithPlugins<
|
||||
TParams extends Omit<Parameters<typeof experimental_generateImage>[0], 'model'> & { model: string | ImageModelV2 },
|
||||
TResult extends ReturnType<typeof experimental_generateImage>
|
||||
>(
|
||||
methodName: string,
|
||||
model: ImageModelV2 | string,
|
||||
params: TParams,
|
||||
executor: (model: ImageModelV2, transformedParams: TParams) => Promise<TResult>,
|
||||
executor: (model: ImageModelV2, transformedParams: TParams) => TResult,
|
||||
_context?: ReturnType<typeof createContext>
|
||||
): Promise<TResult> {
|
||||
// 统一处理模型解析
|
||||
let resolvedModel: ImageModelV2 | undefined
|
||||
let modelId: string
|
||||
|
||||
const { model } = params
|
||||
if (typeof model === 'string') {
|
||||
// 字符串:需要通过插件解析
|
||||
modelId = model
|
||||
@@ -159,13 +163,13 @@ export class PluginEngine<T extends ProviderId = ProviderId> {
|
||||
}
|
||||
|
||||
// 使用正确的createContext创建请求上下文
|
||||
const context = _context ? _context : createContext(this.providerId, modelId, params)
|
||||
const context = _context ? _context : createContext(this.providerId, model, params)
|
||||
|
||||
// 🔥 为上下文添加递归调用能力
|
||||
context.recursiveCall = async (newParams: any): Promise<TResult> => {
|
||||
// 递归调用自身,重新走完整的插件流程
|
||||
context.isRecursiveCall = true
|
||||
const result = await this.executeImageWithPlugins(methodName, model, newParams, executor, context)
|
||||
const result = await this.executeImageWithPlugins(methodName, newParams, executor, context)
|
||||
context.isRecursiveCall = false
|
||||
return result
|
||||
}
|
||||
@@ -214,17 +218,19 @@ export class PluginEngine<T extends ProviderId = ProviderId> {
|
||||
* 执行流式调用的通用逻辑(支持流转换器)
|
||||
* 提供给AiExecutor使用
|
||||
*/
|
||||
async executeStreamWithPlugins<TParams, TResult>(
|
||||
async executeStreamWithPlugins<
|
||||
TParams extends Parameters<typeof streamText | typeof streamObject>[0],
|
||||
TResult extends ReturnType<typeof streamText | typeof streamObject>
|
||||
>(
|
||||
methodName: string,
|
||||
model: LanguageModel,
|
||||
params: TParams,
|
||||
executor: (model: LanguageModel, transformedParams: TParams, streamTransforms: any[]) => Promise<TResult>,
|
||||
executor: (model: LanguageModel, transformedParams: TParams, streamTransforms: any[]) => TResult,
|
||||
_context?: ReturnType<typeof createContext>
|
||||
): Promise<TResult> {
|
||||
// 统一处理模型解析
|
||||
let resolvedModel: LanguageModel | undefined
|
||||
let modelId: string
|
||||
|
||||
const { model } = params
|
||||
if (typeof model === 'string') {
|
||||
// 字符串:需要通过插件解析
|
||||
modelId = model
|
||||
@@ -235,13 +241,13 @@ export class PluginEngine<T extends ProviderId = ProviderId> {
|
||||
}
|
||||
|
||||
// 创建请求上下文
|
||||
const context = _context ? _context : createContext(this.providerId, modelId, params)
|
||||
const context = _context ? _context : createContext(this.providerId, model, params)
|
||||
|
||||
// 🔥 为上下文添加递归调用能力
|
||||
context.recursiveCall = async (newParams: any): Promise<TResult> => {
|
||||
// 递归调用自身,重新走完整的插件流程
|
||||
context.isRecursiveCall = true
|
||||
const result = await this.executeStreamWithPlugins(methodName, model, newParams, executor, context)
|
||||
const result = await this.executeStreamWithPlugins(methodName, newParams, executor, context)
|
||||
context.isRecursiveCall = false
|
||||
return result
|
||||
}
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
/**
|
||||
* Runtime 层类型定义
|
||||
*/
|
||||
import { ImageModelV2 } from '@ai-sdk/provider'
|
||||
import { experimental_generateImage, generateObject, generateText, streamObject, streamText } from 'ai'
|
||||
|
||||
import { type ModelConfig } from '../models/types'
|
||||
import { type AiPlugin } from '../plugins'
|
||||
import { type ProviderId } from '../providers/types'
|
||||
@@ -13,3 +16,11 @@ export interface RuntimeConfig<T extends ProviderId = ProviderId> {
|
||||
providerSettings: ModelConfig<T>['providerSettings'] & { mode?: 'chat' | 'responses' }
|
||||
plugins?: AiPlugin[]
|
||||
}
|
||||
|
||||
export type generateImageParams = Omit<Parameters<typeof experimental_generateImage>[0], 'model'> & {
|
||||
model: string | ImageModelV2
|
||||
}
|
||||
export type generateObjectParams = Parameters<typeof generateObject>[0]
|
||||
export type generateTextParams = Parameters<typeof generateText>[0]
|
||||
export type streamObjectParams = Parameters<typeof streamObject>[0]
|
||||
export type streamTextParams = Parameters<typeof streamText>[0]
|
||||
|
||||
@@ -1,26 +1,21 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"target": "ES2020",
|
||||
"allowSyntheticDefaultImports": true,
|
||||
"declaration": true,
|
||||
"emitDecoratorMetadata": true,
|
||||
"esModuleInterop": true,
|
||||
"experimentalDecorators": true,
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"module": "ESNext",
|
||||
"moduleResolution": "bundler",
|
||||
"declaration": true,
|
||||
"outDir": "./dist",
|
||||
"rootDir": "./src",
|
||||
"strict": true,
|
||||
"esModuleInterop": true,
|
||||
"skipLibCheck": true,
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"resolveJsonModule": true,
|
||||
"allowSyntheticDefaultImports": true,
|
||||
"noEmitOnError": false,
|
||||
"experimentalDecorators": true,
|
||||
"emitDecoratorMetadata": true
|
||||
"outDir": "./dist",
|
||||
"resolveJsonModule": true,
|
||||
"rootDir": "./src",
|
||||
"skipLibCheck": true,
|
||||
"strict": true,
|
||||
"target": "ES2020"
|
||||
},
|
||||
"include": [
|
||||
"src/**/*"
|
||||
],
|
||||
"exclude": [
|
||||
"node_modules",
|
||||
"dist"
|
||||
]
|
||||
}
|
||||
"exclude": ["node_modules", "dist"],
|
||||
"include": ["src/**/*"]
|
||||
}
|
||||
|
||||
@@ -67,13 +67,13 @@
|
||||
"dist"
|
||||
],
|
||||
"devDependencies": {
|
||||
"@biomejs/biome": "2.2.4",
|
||||
"@tiptap/core": "^3.2.0",
|
||||
"@tiptap/pm": "^3.2.0",
|
||||
"eslint": "^9.22.0",
|
||||
"eslint-plugin-react-hooks": "^5.2.0",
|
||||
"eslint-plugin-simple-import-sort": "^12.1.1",
|
||||
"eslint-plugin-unused-imports": "^4.1.4",
|
||||
"prettier": "^3.5.3",
|
||||
"tsdown": "^0.13.3"
|
||||
},
|
||||
"peerDependencies": {
|
||||
@@ -87,7 +87,7 @@
|
||||
},
|
||||
"scripts": {
|
||||
"build": "tsdown",
|
||||
"lint": "prettier ./src/ --write && eslint --fix ./src/"
|
||||
"lint": "biome format ./src/ --write && eslint --fix ./src/"
|
||||
},
|
||||
"packageManager": "yarn@4.9.1"
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ export enum IpcChannel {
|
||||
App_ShowUpdateDialog = 'app:show-update-dialog',
|
||||
App_CheckForUpdate = 'app:check-for-update',
|
||||
App_Reload = 'app:reload',
|
||||
App_Quit = 'app:quit',
|
||||
App_Info = 'app:info',
|
||||
App_Proxy = 'app:proxy',
|
||||
App_SetLaunchToTray = 'app:set-launch-to-tray',
|
||||
@@ -33,10 +34,13 @@ export enum IpcChannel {
|
||||
App_GetBinaryPath = 'app:get-binary-path',
|
||||
App_InstallUvBinary = 'app:install-uv-binary',
|
||||
App_InstallBunBinary = 'app:install-bun-binary',
|
||||
App_InstallOvmsBinary = 'app:install-ovms-binary',
|
||||
App_LogToMain = 'app:log-to-main',
|
||||
App_SaveData = 'app:save-data',
|
||||
App_GetDiskInfo = 'app:get-disk-info',
|
||||
App_SetFullScreen = 'app:set-full-screen',
|
||||
App_IsFullScreen = 'app:is-full-screen',
|
||||
App_GetSystemFonts = 'app:get-system-fonts',
|
||||
|
||||
App_MacIsProcessTrusted = 'app:mac-is-process-trusted',
|
||||
App_MacRequestProcessTrust = 'app:mac-request-process-trust',
|
||||
@@ -83,7 +87,7 @@ export enum IpcChannel {
|
||||
Mcp_UploadDxt = 'mcp:upload-dxt',
|
||||
Mcp_AbortTool = 'mcp:abort-tool',
|
||||
Mcp_GetServerVersion = 'mcp:get-server-version',
|
||||
|
||||
Mcp_Progress = 'mcp:progress',
|
||||
// Python
|
||||
Python_Execute = 'python:execute',
|
||||
|
||||
@@ -217,6 +221,7 @@ export enum IpcChannel {
|
||||
// system
|
||||
System_GetDeviceType = 'system:getDeviceType',
|
||||
System_GetHostname = 'system:getHostname',
|
||||
System_GetCpuName = 'system:getCpuName',
|
||||
|
||||
// DevTools
|
||||
System_ToggleDevTools = 'system:toggleDevTools',
|
||||
@@ -302,12 +307,40 @@ export enum IpcChannel {
|
||||
TRACE_CLEAN_LOCAL_DATA = 'trace:cleanLocalData',
|
||||
TRACE_ADD_STREAM_MESSAGE = 'trace:addStreamMessage',
|
||||
|
||||
// API Server
|
||||
ApiServer_Start = 'api-server:start',
|
||||
ApiServer_Stop = 'api-server:stop',
|
||||
ApiServer_Restart = 'api-server:restart',
|
||||
ApiServer_GetStatus = 'api-server:get-status',
|
||||
ApiServer_GetConfig = 'api-server:get-config',
|
||||
|
||||
// Anthropic OAuth
|
||||
Anthropic_StartOAuthFlow = 'anthropic:start-oauth-flow',
|
||||
Anthropic_CompleteOAuthWithCode = 'anthropic:complete-oauth-with-code',
|
||||
Anthropic_CancelOAuthFlow = 'anthropic:cancel-oauth-flow',
|
||||
Anthropic_GetAccessToken = 'anthropic:get-access-token',
|
||||
Anthropic_HasCredentials = 'anthropic:has-credentials',
|
||||
Anthropic_ClearCredentials = 'anthropic:clear-credentials',
|
||||
|
||||
// CodeTools
|
||||
CodeTools_Run = 'code-tools:run',
|
||||
CodeTools_GetAvailableTerminals = 'code-tools:get-available-terminals',
|
||||
CodeTools_SetCustomTerminalPath = 'code-tools:set-custom-terminal-path',
|
||||
CodeTools_GetCustomTerminalPath = 'code-tools:get-custom-terminal-path',
|
||||
CodeTools_RemoveCustomTerminalPath = 'code-tools:remove-custom-terminal-path',
|
||||
|
||||
// OCR
|
||||
OCR_ocr = 'ocr:ocr',
|
||||
|
||||
// Cherryin
|
||||
Cherryin_GetSignature = 'cherryin:get-signature'
|
||||
// OVMS
|
||||
Ovms_AddModel = 'ovms:add-model',
|
||||
Ovms_StopAddModel = 'ovms:stop-addmodel',
|
||||
Ovms_GetModels = 'ovms:get-models',
|
||||
Ovms_IsRunning = 'ovms:is-running',
|
||||
Ovms_GetStatus = 'ovms:get-status',
|
||||
Ovms_RunOVMS = 'ovms:run-ovms',
|
||||
Ovms_StopOVMS = 'ovms:stop-ovms',
|
||||
|
||||
// CherryAI
|
||||
Cherryai_GetSignature = 'cherryai:get-signature'
|
||||
}
|
||||
|
||||
@@ -216,5 +216,257 @@ export enum codeTools {
|
||||
qwenCode = 'qwen-code',
|
||||
claudeCode = 'claude-code',
|
||||
geminiCli = 'gemini-cli',
|
||||
openaiCodex = 'openai-codex'
|
||||
openaiCodex = 'openai-codex',
|
||||
iFlowCli = 'iflow-cli',
|
||||
githubCopilotCli = 'github-copilot-cli'
|
||||
}
|
||||
|
||||
export enum terminalApps {
|
||||
systemDefault = 'Terminal',
|
||||
iterm2 = 'iTerm2',
|
||||
kitty = 'kitty',
|
||||
alacritty = 'Alacritty',
|
||||
wezterm = 'WezTerm',
|
||||
ghostty = 'Ghostty',
|
||||
tabby = 'Tabby',
|
||||
// Windows terminals
|
||||
windowsTerminal = 'WindowsTerminal',
|
||||
powershell = 'PowerShell',
|
||||
cmd = 'CMD',
|
||||
wsl = 'WSL'
|
||||
}
|
||||
|
||||
export interface TerminalConfig {
|
||||
id: string
|
||||
name: string
|
||||
bundleId?: string
|
||||
customPath?: string // For user-configured terminal paths on Windows
|
||||
}
|
||||
|
||||
export interface TerminalConfigWithCommand extends TerminalConfig {
|
||||
command: (directory: string, fullCommand: string) => { command: string; args: string[] }
|
||||
}
|
||||
|
||||
export const MACOS_TERMINALS: TerminalConfig[] = [
|
||||
{
|
||||
id: terminalApps.systemDefault,
|
||||
name: 'Terminal',
|
||||
bundleId: 'com.apple.Terminal'
|
||||
},
|
||||
{
|
||||
id: terminalApps.iterm2,
|
||||
name: 'iTerm2',
|
||||
bundleId: 'com.googlecode.iterm2'
|
||||
},
|
||||
{
|
||||
id: terminalApps.kitty,
|
||||
name: 'kitty',
|
||||
bundleId: 'net.kovidgoyal.kitty'
|
||||
},
|
||||
{
|
||||
id: terminalApps.alacritty,
|
||||
name: 'Alacritty',
|
||||
bundleId: 'org.alacritty'
|
||||
},
|
||||
{
|
||||
id: terminalApps.wezterm,
|
||||
name: 'WezTerm',
|
||||
bundleId: 'com.github.wez.wezterm'
|
||||
},
|
||||
{
|
||||
id: terminalApps.ghostty,
|
||||
name: 'Ghostty',
|
||||
bundleId: 'com.mitchellh.ghostty'
|
||||
},
|
||||
{
|
||||
id: terminalApps.tabby,
|
||||
name: 'Tabby',
|
||||
bundleId: 'org.tabby'
|
||||
}
|
||||
]
|
||||
|
||||
export const WINDOWS_TERMINALS: TerminalConfig[] = [
|
||||
{
|
||||
id: terminalApps.cmd,
|
||||
name: 'Command Prompt'
|
||||
},
|
||||
{
|
||||
id: terminalApps.powershell,
|
||||
name: 'PowerShell'
|
||||
},
|
||||
{
|
||||
id: terminalApps.windowsTerminal,
|
||||
name: 'Windows Terminal'
|
||||
},
|
||||
{
|
||||
id: terminalApps.wsl,
|
||||
name: 'WSL (Ubuntu/Debian)'
|
||||
},
|
||||
{
|
||||
id: terminalApps.alacritty,
|
||||
name: 'Alacritty'
|
||||
},
|
||||
{
|
||||
id: terminalApps.wezterm,
|
||||
name: 'WezTerm'
|
||||
}
|
||||
]
|
||||
|
||||
export const WINDOWS_TERMINALS_WITH_COMMANDS: TerminalConfigWithCommand[] = [
|
||||
{
|
||||
id: terminalApps.cmd,
|
||||
name: 'Command Prompt',
|
||||
command: (_: string, fullCommand: string) => ({
|
||||
command: 'cmd',
|
||||
args: ['/c', 'start', 'cmd', '/k', fullCommand]
|
||||
})
|
||||
},
|
||||
{
|
||||
id: terminalApps.powershell,
|
||||
name: 'PowerShell',
|
||||
command: (_: string, fullCommand: string) => ({
|
||||
command: 'cmd',
|
||||
args: ['/c', 'start', 'powershell', '-NoExit', '-Command', `& '${fullCommand}'`]
|
||||
})
|
||||
},
|
||||
{
|
||||
id: terminalApps.windowsTerminal,
|
||||
name: 'Windows Terminal',
|
||||
command: (_: string, fullCommand: string) => ({
|
||||
command: 'wt',
|
||||
args: ['cmd', '/k', fullCommand]
|
||||
})
|
||||
},
|
||||
{
|
||||
id: terminalApps.wsl,
|
||||
name: 'WSL (Ubuntu/Debian)',
|
||||
command: (_: string, fullCommand: string) => {
|
||||
// Start WSL in a new window and execute the batch file from within WSL using cmd.exe
|
||||
// The batch file will run in Windows context but output will be in WSL terminal
|
||||
return {
|
||||
command: 'cmd',
|
||||
args: ['/c', 'start', 'wsl', '-e', 'bash', '-c', `cmd.exe /c '${fullCommand}' ; exec bash`]
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
id: terminalApps.alacritty,
|
||||
name: 'Alacritty',
|
||||
customPath: '', // Will be set by user in settings
|
||||
command: (_: string, fullCommand: string) => ({
|
||||
command: 'alacritty', // Will be replaced with customPath if set
|
||||
args: ['-e', 'cmd', '/k', fullCommand]
|
||||
})
|
||||
},
|
||||
{
|
||||
id: terminalApps.wezterm,
|
||||
name: 'WezTerm',
|
||||
customPath: '', // Will be set by user in settings
|
||||
command: (_: string, fullCommand: string) => ({
|
||||
command: 'wezterm', // Will be replaced with customPath if set
|
||||
args: ['start', 'cmd', '/k', fullCommand]
|
||||
})
|
||||
}
|
||||
]
|
||||
|
||||
// Helper function to escape strings for AppleScript
|
||||
const escapeForAppleScript = (str: string): string => {
|
||||
// In AppleScript strings, backslashes and double quotes need to be escaped
|
||||
// When passed through osascript -e with single quotes, we need:
|
||||
// 1. Backslash: \ -> \\
|
||||
// 2. Double quote: " -> \"
|
||||
return str
|
||||
.replace(/\\/g, '\\\\') // Escape backslashes first
|
||||
.replace(/"/g, '\\"') // Then escape double quotes
|
||||
}
|
||||
|
||||
export const MACOS_TERMINALS_WITH_COMMANDS: TerminalConfigWithCommand[] = [
|
||||
{
|
||||
id: terminalApps.systemDefault,
|
||||
name: 'Terminal',
|
||||
bundleId: 'com.apple.Terminal',
|
||||
command: (_directory: string, fullCommand: string) => ({
|
||||
command: 'sh',
|
||||
args: [
|
||||
'-c',
|
||||
`open -na Terminal && sleep 0.5 && osascript -e 'tell application "Terminal" to activate' -e 'tell application "Terminal" to do script "${escapeForAppleScript(fullCommand)}" in front window'`
|
||||
]
|
||||
})
|
||||
},
|
||||
{
|
||||
id: terminalApps.iterm2,
|
||||
name: 'iTerm2',
|
||||
bundleId: 'com.googlecode.iterm2',
|
||||
command: (_directory: string, fullCommand: string) => ({
|
||||
command: 'sh',
|
||||
args: [
|
||||
'-c',
|
||||
`open -na iTerm && sleep 0.8 && osascript -e 'on waitUntilRunning()\n repeat 50 times\n tell application "System Events"\n if (exists process "iTerm2") then exit repeat\n end tell\n delay 0.1\n end repeat\nend waitUntilRunning\n\nwaitUntilRunning()\n\ntell application "iTerm2"\n if (count of windows) = 0 then\n create window with default profile\n delay 0.3\n else\n tell current window\n create tab with default profile\n end tell\n delay 0.3\n end if\n tell current session of current window to write text "${escapeForAppleScript(fullCommand)}"\n activate\nend tell'`
|
||||
]
|
||||
})
|
||||
},
|
||||
{
|
||||
id: terminalApps.kitty,
|
||||
name: 'kitty',
|
||||
bundleId: 'net.kovidgoyal.kitty',
|
||||
command: (_directory: string, fullCommand: string) => ({
|
||||
command: 'sh',
|
||||
args: [
|
||||
'-c',
|
||||
`cd "${_directory}" && open -na kitty --args --directory="${_directory}" sh -c "${fullCommand.replace(/\\/g, '\\\\').replace(/"/g, '\\"')}; exec \\$SHELL" && sleep 0.5 && osascript -e 'tell application "kitty" to activate'`
|
||||
]
|
||||
})
|
||||
},
|
||||
{
|
||||
id: terminalApps.alacritty,
|
||||
name: 'Alacritty',
|
||||
bundleId: 'org.alacritty',
|
||||
command: (_directory: string, fullCommand: string) => ({
|
||||
command: 'sh',
|
||||
args: [
|
||||
'-c',
|
||||
`open -na Alacritty --args --working-directory "${_directory}" -e sh -c "${fullCommand.replace(/\\/g, '\\\\').replace(/"/g, '\\"')}; exec \\$SHELL" && sleep 0.5 && osascript -e 'tell application "Alacritty" to activate'`
|
||||
]
|
||||
})
|
||||
},
|
||||
{
|
||||
id: terminalApps.wezterm,
|
||||
name: 'WezTerm',
|
||||
bundleId: 'com.github.wez.wezterm',
|
||||
command: (_directory: string, fullCommand: string) => ({
|
||||
command: 'sh',
|
||||
args: [
|
||||
'-c',
|
||||
`open -na WezTerm --args start --new-tab --cwd "${_directory}" -- sh -c "${fullCommand.replace(/\\/g, '\\\\').replace(/"/g, '\\"')}; exec \\$SHELL" && sleep 0.5 && osascript -e 'tell application "WezTerm" to activate'`
|
||||
]
|
||||
})
|
||||
},
|
||||
{
|
||||
id: terminalApps.ghostty,
|
||||
name: 'Ghostty',
|
||||
bundleId: 'com.mitchellh.ghostty',
|
||||
command: (_directory: string, fullCommand: string) => ({
|
||||
command: 'sh',
|
||||
args: [
|
||||
'-c',
|
||||
`cd "${_directory}" && open -na Ghostty --args --working-directory="${_directory}" -e sh -c "${fullCommand.replace(/\\/g, '\\\\').replace(/"/g, '\\"')}; exec \\$SHELL" && sleep 0.5 && osascript -e 'tell application "Ghostty" to activate'`
|
||||
]
|
||||
})
|
||||
},
|
||||
{
|
||||
id: terminalApps.tabby,
|
||||
name: 'Tabby',
|
||||
bundleId: 'org.tabby',
|
||||
command: (_directory: string, fullCommand: string) => ({
|
||||
command: 'sh',
|
||||
args: [
|
||||
'-c',
|
||||
`if pgrep -x "Tabby" > /dev/null; then
|
||||
open -na Tabby --args open && sleep 0.3
|
||||
else
|
||||
open -na Tabby --args open && sleep 2
|
||||
fi && osascript -e 'tell application "Tabby" to activate' -e 'set the clipboard to "${escapeForAppleScript(fullCommand)}"' -e 'tell application "System Events" to tell process "Tabby" to keystroke "v" using {command down}' -e 'tell application "System Events" to key code 36'`
|
||||
]
|
||||
})
|
||||
}
|
||||
]
|
||||
|
||||
@@ -17,3 +17,8 @@ export type FileChangeEvent = {
|
||||
filePath: string
|
||||
watchPath: string
|
||||
}
|
||||
|
||||
export type MCPProgressEvent = {
|
||||
callId: string
|
||||
progress: number // 0-1 range
|
||||
}
|
||||
|
||||
6
packages/shared/utils.ts
Normal file
6
packages/shared/utils.ts
Normal file
@@ -0,0 +1,6 @@
|
||||
export const defaultAppHeaders = () => {
|
||||
return {
|
||||
'HTTP-Referer': 'https://cherry-ai.com',
|
||||
'X-Title': 'Cherry Studio'
|
||||
}
|
||||
}
|
||||
@@ -1,199 +1,274 @@
|
||||
<!DOCTYPE html>
|
||||
<!doctype html>
|
||||
<html lang="zh-CN">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>许可协议 | License Agreement</title>
|
||||
<script src="https://cdn.tailwindcss.com"></script>
|
||||
</head>
|
||||
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>许可协议 | License Agreement</title>
|
||||
<script src="https://cdn.tailwindcss.com"></script>
|
||||
</head>
|
||||
<body class="bg-gray-50">
|
||||
<div class="mx-auto max-w-4xl px-4 py-8">
|
||||
<!-- 中文版本 -->
|
||||
<div class="mb-12">
|
||||
<h1 class="mb-8 text-3xl font-bold text-gray-900">许可协议</h1>
|
||||
|
||||
<body class="bg-gray-50">
|
||||
<div class="max-w-4xl mx-auto px-4 py-8">
|
||||
<!-- 中文版本 -->
|
||||
<div class="mb-12">
|
||||
<h1 class="text-3xl font-bold mb-8 text-gray-900">许可协议</h1>
|
||||
|
||||
<p class="mb-6 text-gray-700">本项目采用<strong>区分用户的双重许可 (User-Segmented Dual Licensing)</strong> 模式。</p>
|
||||
|
||||
<section class="mb-8">
|
||||
<h2 class="text-xl font-semibold mb-4 text-gray-900">核心原则</h2>
|
||||
<ul class="list-disc pl-6 space-y-2 text-gray-700">
|
||||
<li><strong>个人用户 和 10人及以下企业/组织:</strong> 默认适用 <strong>GNU Affero 通用公共许可证 v3.0 (AGPLv3)</strong>。</li>
|
||||
<li><strong>超过10人的企业/组织:</strong> <strong>必须</strong> 获取 <strong>商业许可证 (Commercial License)</strong>。</li>
|
||||
</ul>
|
||||
</section>
|
||||
|
||||
<section class="mb-8">
|
||||
<h2 class="text-xl font-semibold mb-4 text-gray-900">定义:"10人及以下"</h2>
|
||||
<p class="text-gray-700">
|
||||
指在您的组织(包括公司、非营利组织、政府机构、教育机构等任何实体)中,能够访问、使用或以任何方式直接或间接受益于本软件(Cherry
|
||||
Studio)功能的个人总数不超过10人。这包括但不限于开发者、测试人员、运营人员、最终用户、通过集成系统间接使用者等。
|
||||
<p class="mb-6 text-gray-700">
|
||||
本项目采用<strong>区分用户的双重许可 (User-Segmented Dual Licensing)</strong> 模式。
|
||||
</p>
|
||||
</section>
|
||||
|
||||
<section class="mb-8">
|
||||
<h2 class="text-xl font-semibold mb-4 text-gray-900">1. 开源许可证 (Open Source License): AGPLv3 - 适用于个人及10人及以下组织
|
||||
</h2>
|
||||
<ul class="list-disc pl-6 space-y-2 text-gray-700">
|
||||
<li>如果您是个人用户,或者您的组织满足上述"10人及以下"的定义,您可以在 <strong>AGPLv3</strong> 的条款下自由使用、修改和分发 Cherry Studio。AGPLv3 的完整文本可以访问
|
||||
<a href="https://www.gnu.org/licenses/agpl-3.0.html"
|
||||
class="text-blue-600 hover:underline">https://www.gnu.org/licenses/agpl-3.0.html</a> 获取。
|
||||
</li>
|
||||
<li><strong>核心义务:</strong> AGPLv3 的一个关键要求是,如果您修改了 Cherry Studio 并通过网络提供服务,或者分发了修改后的版本,您必须以 AGPLv3
|
||||
许可证向接收者提供相应的<strong>完整源代码</strong>。即使您符合"10人及以下"的标准,如果您希望避免此源代码公开义务,您也需要考虑获取商业许可证(见下文)。</li>
|
||||
<li>使用前请务必仔细阅读并理解 AGPLv3 的所有条款。</li>
|
||||
</ul>
|
||||
</section>
|
||||
<section class="mb-8">
|
||||
<h2 class="mb-4 text-xl font-semibold text-gray-900">核心原则</h2>
|
||||
<ul class="list-disc space-y-2 pl-6 text-gray-700">
|
||||
<li>
|
||||
<strong>个人用户 和 10人及以下企业/组织:</strong> 默认适用
|
||||
<strong>GNU Affero 通用公共许可证 v3.0 (AGPLv3)</strong>。
|
||||
</li>
|
||||
<li>
|
||||
<strong>超过10人的企业/组织:</strong> <strong>必须</strong> 获取
|
||||
<strong>商业许可证 (Commercial License)</strong>。
|
||||
</li>
|
||||
</ul>
|
||||
</section>
|
||||
|
||||
<section class="mb-8">
|
||||
<h2 class="text-xl font-semibold mb-4 text-gray-900">2. 商业许可证 (Commercial License) - 适用于超过10人的组织,或希望规避 AGPLv3
|
||||
义务的用户</h2>
|
||||
<ul class="list-disc pl-6 space-y-2 text-gray-700">
|
||||
<li><strong>强制要求:</strong>
|
||||
如果您的组织<strong>不</strong>满足上述"10人及以下"的定义(即有11人或更多人可以访问、使用或受益于本软件),您<strong>必须</strong>联系我们获取并签署一份商业许可证才能使用
|
||||
Cherry Studio。</li>
|
||||
<li><strong>自愿选择:</strong> 即使您的组织满足"10人及以下"的条件,但如果您的使用场景<strong>无法满足 AGPLv3
|
||||
的条款要求</strong>(特别是关于<strong>源代码公开</strong>的义务),或者您需要 AGPLv3 <strong>未提供</strong>的特定商业条款(如保证、赔偿、无 Copyleft
|
||||
限制等),您也<strong>必须</strong>联系我们获取并签署一份商业许可证。</li>
|
||||
<li><strong>需要商业许可证的常见情况包括(但不限于):</strong>
|
||||
<ul class="list-disc pl-6 mt-2 space-y-1">
|
||||
<li>您的组织规模超过10人。</li>
|
||||
<li>(无论组织规模)您希望分发修改过的 Cherry Studio 版本,但<strong>不希望</strong>根据 AGPLv3 公开您修改部分的源代码。</li>
|
||||
<li>(无论组织规模)您希望基于修改过的 Cherry Studio 提供网络服务(SaaS),但<strong>不希望</strong>根据 AGPLv3 向服务使用者提供修改后的源代码。</li>
|
||||
<li>(无论组织规模)您的公司政策、客户合同或项目要求不允许使用 AGPLv3 许可的软件,或要求闭源分发及保密。</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li><strong>获取商业许可:</strong> 请通过邮箱 <a href="mailto:bd@cherry-ai.com"
|
||||
class="text-blue-600 hover:underline">bd@cherry-ai.com</a> 联系 Cherry Studio 开发团队洽谈商业授权事宜。</li>
|
||||
</ul>
|
||||
</section>
|
||||
<section class="mb-8">
|
||||
<h2 class="mb-4 text-xl font-semibold text-gray-900">定义:"10人及以下"</h2>
|
||||
<p class="text-gray-700">
|
||||
指在您的组织(包括公司、非营利组织、政府机构、教育机构等任何实体)中,能够访问、使用或以任何方式直接或间接受益于本软件(Cherry
|
||||
Studio)功能的个人总数不超过10人。这包括但不限于开发者、测试人员、运营人员、最终用户、通过集成系统间接使用者等。
|
||||
</p>
|
||||
</section>
|
||||
|
||||
<section class="mb-8">
|
||||
<h2 class="text-xl font-semibold mb-4 text-gray-900">3. 贡献 (Contributions)</h2>
|
||||
<ul class="list-disc pl-6 space-y-2 text-gray-700">
|
||||
<li>我们欢迎社区对 Cherry Studio 的贡献。所有向本项目提交的贡献都将被视为在 <strong>AGPLv3</strong> 许可证下提供。</li>
|
||||
<li>通过向本项目提交贡献(例如通过 Pull Request),即表示您同意您的代码以 AGPLv3 许可证授权给本项目及所有后续使用者(无论这些使用者最终遵循 AGPLv3 还是商业许可)。</li>
|
||||
<li>您也理解并同意,您的贡献可能会被包含在根据商业许可证分发的 Cherry Studio 版本中。</li>
|
||||
</ul>
|
||||
</section>
|
||||
<section class="mb-8">
|
||||
<h2 class="mb-4 text-xl font-semibold text-gray-900">
|
||||
1. 开源许可证 (Open Source License): AGPLv3 - 适用于个人及10人及以下组织
|
||||
</h2>
|
||||
<ul class="list-disc space-y-2 pl-6 text-gray-700">
|
||||
<li>
|
||||
如果您是个人用户,或者您的组织满足上述"10人及以下"的定义,您可以在
|
||||
<strong>AGPLv3</strong> 的条款下自由使用、修改和分发 Cherry Studio。AGPLv3 的完整文本可以访问
|
||||
<a href="https://www.gnu.org/licenses/agpl-3.0.html" class="text-blue-600 hover:underline"
|
||||
>https://www.gnu.org/licenses/agpl-3.0.html</a
|
||||
>
|
||||
获取。
|
||||
</li>
|
||||
<li>
|
||||
<strong>核心义务:</strong> AGPLv3 的一个关键要求是,如果您修改了 Cherry Studio
|
||||
并通过网络提供服务,或者分发了修改后的版本,您必须以 AGPLv3
|
||||
许可证向接收者提供相应的<strong>完整源代码</strong>。即使您符合"10人及以下"的标准,如果您希望避免此源代码公开义务,您也需要考虑获取商业许可证(见下文)。
|
||||
</li>
|
||||
<li>使用前请务必仔细阅读并理解 AGPLv3 的所有条款。</li>
|
||||
</ul>
|
||||
</section>
|
||||
|
||||
<section class="mb-8">
|
||||
<h2 class="text-xl font-semibold mb-4 text-gray-900">4. 其他条款 (Other Terms)</h2>
|
||||
<ul class="list-disc pl-6 space-y-2 text-gray-700">
|
||||
<li>关于商业许可证的具体条款和条件,以双方签署的正式商业许可协议为准。</li>
|
||||
<li>项目维护者保留根据需要更新本许可政策(包括用户规模定义和阈值)的权利。相关更新将通过项目官方渠道(如代码仓库、官方网站)进行通知。</li>
|
||||
</ul>
|
||||
</section>
|
||||
<section class="mb-8">
|
||||
<h2 class="mb-4 text-xl font-semibold text-gray-900">
|
||||
2. 商业许可证 (Commercial License) - 适用于超过10人的组织,或希望规避 AGPLv3 义务的用户
|
||||
</h2>
|
||||
<ul class="list-disc space-y-2 pl-6 text-gray-700">
|
||||
<li>
|
||||
<strong>强制要求:</strong>
|
||||
如果您的组织<strong>不</strong>满足上述"10人及以下"的定义(即有11人或更多人可以访问、使用或受益于本软件),您<strong>必须</strong>联系我们获取并签署一份商业许可证才能使用
|
||||
Cherry Studio。
|
||||
</li>
|
||||
<li>
|
||||
<strong>自愿选择:</strong> 即使您的组织满足"10人及以下"的条件,但如果您的使用场景<strong
|
||||
>无法满足 AGPLv3 的条款要求</strong
|
||||
>(特别是关于<strong>源代码公开</strong>的义务),或者您需要 AGPLv3
|
||||
<strong>未提供</strong>的特定商业条款(如保证、赔偿、无 Copyleft
|
||||
限制等),您也<strong>必须</strong>联系我们获取并签署一份商业许可证。
|
||||
</li>
|
||||
<li>
|
||||
<strong>需要商业许可证的常见情况包括(但不限于):</strong>
|
||||
<ul class="mt-2 list-disc space-y-1 pl-6">
|
||||
<li>您的组织规模超过10人。</li>
|
||||
<li>
|
||||
(无论组织规模)您希望分发修改过的 Cherry Studio 版本,但<strong>不希望</strong>根据 AGPLv3
|
||||
公开您修改部分的源代码。
|
||||
</li>
|
||||
<li>
|
||||
(无论组织规模)您希望基于修改过的 Cherry Studio 提供网络服务(SaaS),但<strong>不希望</strong>根据
|
||||
AGPLv3 向服务使用者提供修改后的源代码。
|
||||
</li>
|
||||
<li>
|
||||
(无论组织规模)您的公司政策、客户合同或项目要求不允许使用 AGPLv3 许可的软件,或要求闭源分发及保密。
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<strong>获取商业许可:</strong> 请通过邮箱
|
||||
<a href="mailto:bd@cherry-ai.com" class="text-blue-600 hover:underline">bd@cherry-ai.com</a> 联系 Cherry
|
||||
Studio 开发团队洽谈商业授权事宜。
|
||||
</li>
|
||||
</ul>
|
||||
</section>
|
||||
|
||||
<section class="mb-8">
|
||||
<h2 class="mb-4 text-xl font-semibold text-gray-900">3. 贡献 (Contributions)</h2>
|
||||
<ul class="list-disc space-y-2 pl-6 text-gray-700">
|
||||
<li>
|
||||
我们欢迎社区对 Cherry Studio 的贡献。所有向本项目提交的贡献都将被视为在
|
||||
<strong>AGPLv3</strong> 许可证下提供。
|
||||
</li>
|
||||
<li>
|
||||
通过向本项目提交贡献(例如通过 Pull Request),即表示您同意您的代码以 AGPLv3
|
||||
许可证授权给本项目及所有后续使用者(无论这些使用者最终遵循 AGPLv3 还是商业许可)。
|
||||
</li>
|
||||
<li>您也理解并同意,您的贡献可能会被包含在根据商业许可证分发的 Cherry Studio 版本中。</li>
|
||||
</ul>
|
||||
</section>
|
||||
|
||||
<section class="mb-8">
|
||||
<h2 class="mb-4 text-xl font-semibold text-gray-900">4. 其他条款 (Other Terms)</h2>
|
||||
<ul class="list-disc space-y-2 pl-6 text-gray-700">
|
||||
<li>关于商业许可证的具体条款和条件,以双方签署的正式商业许可协议为准。</li>
|
||||
<li>
|
||||
项目维护者保留根据需要更新本许可政策(包括用户规模定义和阈值)的权利。相关更新将通过项目官方渠道(如代码仓库、官方网站)进行通知。
|
||||
</li>
|
||||
</ul>
|
||||
</section>
|
||||
</div>
|
||||
|
||||
<hr class="my-12 border-gray-300" />
|
||||
|
||||
<!-- English Version -->
|
||||
<div>
|
||||
<h1 class="mb-8 text-3xl font-bold text-gray-900">Licensing</h1>
|
||||
|
||||
<p class="mb-6 text-gray-700">This project employs a <strong>User-Segmented Dual Licensing</strong> model.</p>
|
||||
|
||||
<section class="mb-8">
|
||||
<h2 class="mb-4 text-xl font-semibold text-gray-900">Core Principle</h2>
|
||||
<ul class="list-disc space-y-2 pl-6 text-gray-700">
|
||||
<li>
|
||||
<strong>Individual Users and Organizations with 10 or Fewer Individuals:</strong> Governed by default
|
||||
under the <strong>GNU Affero General Public License v3.0 (AGPLv3)</strong>.
|
||||
</li>
|
||||
<li>
|
||||
<strong>Organizations with More Than 10 Individuals:</strong> <strong>Must</strong> obtain a
|
||||
<strong>Commercial License</strong>.
|
||||
</li>
|
||||
</ul>
|
||||
</section>
|
||||
|
||||
<section class="mb-8">
|
||||
<h2 class="mb-4 text-xl font-semibold text-gray-900">Definition: "10 or Fewer Individuals"</h2>
|
||||
<p class="text-gray-700">
|
||||
Refers to any organization (including companies, non-profits, government agencies, educational institutions,
|
||||
etc.) where the total number of individuals who can access, use, or in any way directly or indirectly
|
||||
benefit from the functionality of this software (Cherry Studio) does not exceed 10. This includes, but is
|
||||
not limited to, developers, testers, operations staff, end-users, and indirect users via integrated systems.
|
||||
</p>
|
||||
</section>
|
||||
|
||||
<section class="mb-8">
|
||||
<h2 class="mb-4 text-xl font-semibold text-gray-900">
|
||||
1. Open Source License: AGPLv3 - For Individuals and Organizations of 10 or Fewer
|
||||
</h2>
|
||||
<ul class="list-disc space-y-2 pl-6 text-gray-700">
|
||||
<li>
|
||||
If you are an individual user, or if your organization meets the "10 or Fewer Individuals" definition
|
||||
above, you are free to use, modify, and distribute Cherry Studio under the terms of the
|
||||
<strong>AGPLv3</strong>. The full text of the AGPLv3 can be found at
|
||||
<a href="https://www.gnu.org/licenses/agpl-3.0.html" class="text-blue-600 hover:underline"
|
||||
>https://www.gnu.org/licenses/agpl-3.0.html</a
|
||||
>.
|
||||
</li>
|
||||
<li>
|
||||
<strong>Core Obligation:</strong> A key requirement of the AGPLv3 is that if you modify Cherry Studio and
|
||||
make it available over a network, or distribute the modified version, you must provide the
|
||||
<strong>complete corresponding source code</strong> under the AGPLv3 license to the recipients. Even if
|
||||
you qualify under the "10 or Fewer Individuals" rule, if you wish to avoid this source code disclosure
|
||||
obligation, you will need to obtain a Commercial License (see below).
|
||||
</li>
|
||||
<li>Please read and understand the full terms of the AGPLv3 carefully before use.</li>
|
||||
</ul>
|
||||
</section>
|
||||
|
||||
<section class="mb-8">
|
||||
<h2 class="mb-4 text-xl font-semibold text-gray-900">
|
||||
2. Commercial License - For Organizations with More Than 10 Individuals, or Users Needing to Avoid AGPLv3
|
||||
Obligations
|
||||
</h2>
|
||||
<ul class="list-disc space-y-2 pl-6 text-gray-700">
|
||||
<li>
|
||||
<strong>Mandatory Requirement:</strong> If your organization does <strong>not</strong> meet the "10 or
|
||||
Fewer Individuals" definition above (i.e., 11 or more individuals can access, use, or benefit from the
|
||||
software), you <strong>must</strong> contact us to obtain and execute a Commercial License to use Cherry
|
||||
Studio.
|
||||
</li>
|
||||
<li>
|
||||
<strong>Voluntary Option:</strong> Even if your organization meets the "10 or Fewer Individuals"
|
||||
condition, if your intended use case
|
||||
<strong>cannot comply with the terms of the AGPLv3</strong> (particularly the obligations regarding
|
||||
<strong>source code disclosure</strong>), or if you require specific commercial terms
|
||||
<strong>not offered</strong> by the AGPLv3 (such as warranties, indemnities, or freedom from copyleft
|
||||
restrictions), you also <strong>must</strong> contact us to obtain and execute a Commercial License.
|
||||
</li>
|
||||
<li>
|
||||
<strong>Common scenarios requiring a Commercial License include (but are not limited to):</strong>
|
||||
<ul class="mt-2 list-disc space-y-1 pl-6">
|
||||
<li>
|
||||
Your organization has more than 10 individuals who can access, use, or benefit from the software.
|
||||
</li>
|
||||
<li>
|
||||
(Regardless of organization size) You wish to distribute a modified version of Cherry Studio but
|
||||
<strong>do not want</strong> to disclose the source code of your modifications under AGPLv3.
|
||||
</li>
|
||||
<li>
|
||||
(Regardless of organization size) You wish to provide a network service (SaaS) based on a modified
|
||||
version of Cherry Studio but <strong>do not want</strong> to provide the modified source code to users
|
||||
of the service under AGPLv3.
|
||||
</li>
|
||||
<li>
|
||||
(Regardless of organization size) Your corporate policies, client contracts, or project requirements
|
||||
prohibit the use of AGPLv3-licensed software or mandate closed-source distribution and
|
||||
confidentiality.
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<strong>Obtaining a Commercial License:</strong> Please contact the Cherry Studio development team via
|
||||
email at <a href="mailto:bd@cherry-ai.com" class="text-blue-600 hover:underline">bd@cherry-ai.com</a> to
|
||||
discuss commercial licensing options.
|
||||
</li>
|
||||
</ul>
|
||||
</section>
|
||||
|
||||
<section class="mb-8">
|
||||
<h2 class="mb-4 text-xl font-semibold text-gray-900">3. Contributions</h2>
|
||||
<ul class="list-disc space-y-2 pl-6 text-gray-700">
|
||||
<li>
|
||||
We welcome community contributions to Cherry Studio. All contributions submitted to this project are
|
||||
considered to be offered under the <strong>AGPLv3</strong> license.
|
||||
</li>
|
||||
<li>
|
||||
By submitting a contribution to this project (e.g., via a Pull Request), you agree to license your code
|
||||
under the AGPLv3 to the project and all its downstream users (regardless of whether those users ultimately
|
||||
operate under AGPLv3 or a Commercial License).
|
||||
</li>
|
||||
<li>
|
||||
You also understand and agree that your contribution may be included in distributions of Cherry Studio
|
||||
offered under our commercial license.
|
||||
</li>
|
||||
</ul>
|
||||
</section>
|
||||
|
||||
<section class="mb-8">
|
||||
<h2 class="mb-4 text-xl font-semibold text-gray-900">4. Other Terms</h2>
|
||||
<ul class="list-disc space-y-2 pl-6 text-gray-700">
|
||||
<li>
|
||||
The specific terms and conditions of the Commercial License are governed by the formal commercial license
|
||||
agreement signed by both parties.
|
||||
</li>
|
||||
<li>
|
||||
The project maintainers reserve the right to update this licensing policy (including the definition and
|
||||
threshold for user count) as needed. Updates will be communicated through official project channels (e.g.,
|
||||
code repository, official website).
|
||||
</li>
|
||||
</ul>
|
||||
</section>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<hr class="my-12 border-gray-300">
|
||||
|
||||
<!-- English Version -->
|
||||
<div>
|
||||
<h1 class="text-3xl font-bold mb-8 text-gray-900">Licensing</h1>
|
||||
|
||||
<p class="mb-6 text-gray-700">This project employs a <strong>User-Segmented Dual Licensing</strong> model.</p>
|
||||
|
||||
<section class="mb-8">
|
||||
<h2 class="text-xl font-semibold mb-4 text-gray-900">Core Principle</h2>
|
||||
<ul class="list-disc pl-6 space-y-2 text-gray-700">
|
||||
<li><strong>Individual Users and Organizations with 10 or Fewer Individuals:</strong> Governed by default
|
||||
under the <strong>GNU Affero General Public License v3.0 (AGPLv3)</strong>.</li>
|
||||
<li><strong>Organizations with More Than 10 Individuals:</strong> <strong>Must</strong> obtain a
|
||||
<strong>Commercial License</strong>.
|
||||
</li>
|
||||
</ul>
|
||||
</section>
|
||||
|
||||
<section class="mb-8">
|
||||
<h2 class="text-xl font-semibold mb-4 text-gray-900">Definition: "10 or Fewer Individuals"</h2>
|
||||
<p class="text-gray-700">
|
||||
Refers to any organization (including companies, non-profits, government agencies, educational institutions,
|
||||
etc.) where the total number of individuals who can access, use, or in any way directly or indirectly benefit
|
||||
from the functionality of this software (Cherry Studio) does not exceed 10. This includes, but is not limited
|
||||
to, developers, testers, operations staff, end-users, and indirect users via integrated systems.
|
||||
</p>
|
||||
</section>
|
||||
|
||||
<section class="mb-8">
|
||||
<h2 class="text-xl font-semibold mb-4 text-gray-900">1. Open Source License: AGPLv3 - For Individuals and
|
||||
Organizations of 10 or Fewer</h2>
|
||||
<ul class="list-disc pl-6 space-y-2 text-gray-700">
|
||||
<li>If you are an individual user, or if your organization meets the "10 or Fewer Individuals" definition
|
||||
above, you are free to use, modify, and distribute Cherry Studio under the terms of the
|
||||
<strong>AGPLv3</strong>. The full text of the AGPLv3 can be found at <a
|
||||
href="https://www.gnu.org/licenses/agpl-3.0.html"
|
||||
class="text-blue-600 hover:underline">https://www.gnu.org/licenses/agpl-3.0.html</a>.
|
||||
</li>
|
||||
<li><strong>Core Obligation:</strong> A key requirement of the AGPLv3 is that if you modify Cherry Studio and
|
||||
make it available over a network, or distribute the modified version, you must provide the <strong>complete
|
||||
corresponding source code</strong> under the AGPLv3 license to the recipients. Even if you qualify under
|
||||
the "10 or Fewer Individuals" rule, if you wish to avoid this source code disclosure obligation, you will
|
||||
need to obtain a Commercial License (see below).</li>
|
||||
<li>Please read and understand the full terms of the AGPLv3 carefully before use.</li>
|
||||
</ul>
|
||||
</section>
|
||||
|
||||
<section class="mb-8">
|
||||
<h2 class="text-xl font-semibold mb-4 text-gray-900">2. Commercial License - For Organizations with More Than 10
|
||||
Individuals, or Users Needing to Avoid AGPLv3 Obligations</h2>
|
||||
<ul class="list-disc pl-6 space-y-2 text-gray-700">
|
||||
<li><strong>Mandatory Requirement:</strong> If your organization does <strong>not</strong> meet the "10 or
|
||||
Fewer Individuals" definition above (i.e., 11 or more individuals can access, use, or benefit from the
|
||||
software), you <strong>must</strong> contact us to obtain and execute a Commercial License to use Cherry
|
||||
Studio.</li>
|
||||
<li><strong>Voluntary Option:</strong> Even if your organization meets the "10 or Fewer Individuals"
|
||||
condition, if your intended use case <strong>cannot comply with the terms of the AGPLv3</strong>
|
||||
(particularly the obligations regarding <strong>source code disclosure</strong>), or if you require specific
|
||||
commercial terms <strong>not offered</strong> by the AGPLv3 (such as warranties, indemnities, or freedom
|
||||
from copyleft restrictions), you also <strong>must</strong> contact us to obtain and execute a Commercial
|
||||
License.</li>
|
||||
<li><strong>Common scenarios requiring a Commercial License include (but are not limited to):</strong>
|
||||
<ul class="list-disc pl-6 mt-2 space-y-1">
|
||||
<li>Your organization has more than 10 individuals who can access, use, or benefit from the software.</li>
|
||||
<li>(Regardless of organization size) You wish to distribute a modified version of Cherry Studio but
|
||||
<strong>do not want</strong> to disclose the source code of your modifications under AGPLv3.
|
||||
</li>
|
||||
<li>(Regardless of organization size) You wish to provide a network service (SaaS) based on a modified
|
||||
version of Cherry Studio but <strong>do not want</strong> to provide the modified source code to users
|
||||
of the service under AGPLv3.</li>
|
||||
<li>(Regardless of organization size) Your corporate policies, client contracts, or project requirements
|
||||
prohibit the use of AGPLv3-licensed software or mandate closed-source distribution and confidentiality.
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li><strong>Obtaining a Commercial License:</strong> Please contact the Cherry Studio development team via
|
||||
email at <a href="mailto:bd@cherry-ai.com" class="text-blue-600 hover:underline">bd@cherry-ai.com</a> to
|
||||
discuss commercial licensing options.</li>
|
||||
</ul>
|
||||
</section>
|
||||
|
||||
<section class="mb-8">
|
||||
<h2 class="text-xl font-semibold mb-4 text-gray-900">3. Contributions</h2>
|
||||
<ul class="list-disc pl-6 space-y-2 text-gray-700">
|
||||
<li>We welcome community contributions to Cherry Studio. All contributions submitted to this project are
|
||||
considered to be offered under the <strong>AGPLv3</strong> license.</li>
|
||||
<li>By submitting a contribution to this project (e.g., via a Pull Request), you agree to license your code
|
||||
under the AGPLv3 to the project and all its downstream users (regardless of whether those users ultimately
|
||||
operate under AGPLv3 or a Commercial License).</li>
|
||||
<li>You also understand and agree that your contribution may be included in distributions of Cherry Studio
|
||||
offered under our commercial license.</li>
|
||||
</ul>
|
||||
</section>
|
||||
|
||||
<section class="mb-8">
|
||||
<h2 class="text-xl font-semibold mb-4 text-gray-900">4. Other Terms</h2>
|
||||
<ul class="list-disc pl-6 space-y-2 text-gray-700">
|
||||
<li>The specific terms and conditions of the Commercial License are governed by the formal commercial license
|
||||
agreement signed by both parties.</li>
|
||||
<li>The project maintainers reserve the right to update this licensing policy (including the definition and
|
||||
threshold for user count) as needed. Updates will be communicated through official project channels (e.g.,
|
||||
code repository, official website).</li>
|
||||
</ul>
|
||||
</section>
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
252
resources/cherry-studio/privacy-en.html
Normal file
252
resources/cherry-studio/privacy-en.html
Normal file
@@ -0,0 +1,252 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>Privacy Policy</title>
|
||||
<style>
|
||||
* {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
body {
|
||||
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Helvetica Neue', Arial, sans-serif;
|
||||
line-height: 1.6;
|
||||
color: #333;
|
||||
background: transparent;
|
||||
margin: 0 auto;
|
||||
}
|
||||
|
||||
body.dark {
|
||||
background: transparent;
|
||||
color: rgba(255, 255, 255, 0.85);
|
||||
}
|
||||
|
||||
h1 {
|
||||
font-size: 24px;
|
||||
font-weight: 600;
|
||||
margin-bottom: 20px;
|
||||
color: #1a1a1a;
|
||||
}
|
||||
|
||||
body.dark h1 {
|
||||
color: rgba(255, 255, 255, 0.95);
|
||||
}
|
||||
|
||||
h2 {
|
||||
font-size: 18px;
|
||||
font-weight: 600;
|
||||
margin-top: 24px;
|
||||
margin-bottom: 12px;
|
||||
color: #2c2c2c;
|
||||
}
|
||||
|
||||
body.dark h2 {
|
||||
color: rgba(255, 255, 255, 0.9);
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 12px 0;
|
||||
line-height: 1.8;
|
||||
}
|
||||
|
||||
body.dark p {
|
||||
color: rgba(255, 255, 255, 0.8);
|
||||
}
|
||||
|
||||
ul {
|
||||
margin: 12px 0;
|
||||
padding-left: 24px;
|
||||
}
|
||||
|
||||
li {
|
||||
margin: 6px 0;
|
||||
line-height: 1.6;
|
||||
}
|
||||
|
||||
body.dark li {
|
||||
color: rgba(255, 255, 255, 0.75);
|
||||
}
|
||||
|
||||
a {
|
||||
color: #0066cc;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
a:hover {
|
||||
text-decoration: underline;
|
||||
}
|
||||
|
||||
body.dark a {
|
||||
color: #4da6ff;
|
||||
}
|
||||
|
||||
.footer {
|
||||
margin-top: 40px;
|
||||
padding-top: 20px;
|
||||
border-top: 1px solid #e0e0e0;
|
||||
font-size: 13px;
|
||||
color: #666;
|
||||
}
|
||||
|
||||
body.dark .footer {
|
||||
border-top-color: rgba(255, 255, 255, 0.1);
|
||||
color: rgba(255, 255, 255, 0.5);
|
||||
}
|
||||
|
||||
.content-wrapper {
|
||||
max-height: calc(100vh - 40px);
|
||||
overflow-y: auto;
|
||||
padding-right: 10px;
|
||||
background: transparent;
|
||||
}
|
||||
|
||||
/* Scrollbar styles - Light mode */
|
||||
::-webkit-scrollbar {
|
||||
width: 8px;
|
||||
height: 8px;
|
||||
}
|
||||
|
||||
::-webkit-scrollbar-track {
|
||||
background: rgba(0, 0, 0, 0.05);
|
||||
border-radius: 4px;
|
||||
}
|
||||
|
||||
::-webkit-scrollbar-thumb {
|
||||
background: rgba(0, 0, 0, 0.2);
|
||||
border-radius: 4px;
|
||||
}
|
||||
|
||||
::-webkit-scrollbar-thumb:hover {
|
||||
background: rgba(0, 0, 0, 0.3);
|
||||
}
|
||||
|
||||
/* Scrollbar styles - Dark mode */
|
||||
body.dark ::-webkit-scrollbar-track {
|
||||
background: rgba(255, 255, 255, 0.05);
|
||||
}
|
||||
|
||||
body.dark ::-webkit-scrollbar-thumb {
|
||||
background: rgba(255, 255, 255, 0.2);
|
||||
}
|
||||
|
||||
body.dark ::-webkit-scrollbar-thumb:hover {
|
||||
background: rgba(255, 255, 255, 0.3);
|
||||
}
|
||||
</style>
|
||||
<script>
|
||||
// Detect theme
|
||||
document.addEventListener('DOMContentLoaded', function () {
|
||||
const urlParams = new URLSearchParams(window.location.search);
|
||||
const theme = urlParams.get('theme');
|
||||
if (theme === 'dark') {
|
||||
document.documentElement.classList.add('dark');
|
||||
document.body.classList.add('dark');
|
||||
}
|
||||
});
|
||||
</script>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div class="content-wrapper">
|
||||
<h1>Privacy Policy</h1>
|
||||
|
||||
<p>
|
||||
Welcome to Cherry Studio (hereinafter referred to as "the Software" or "we"). We highly value your privacy
|
||||
protection. This Privacy Policy explains how we process and protect your personal information and data.
|
||||
Please read and understand this policy carefully before using the Software:
|
||||
</p>
|
||||
|
||||
<h2>1. Information We Collect</h2>
|
||||
<p>To optimize user experience and improve software quality, we may only collect the following anonymous,
|
||||
non-personal information:</p>
|
||||
<ul>
|
||||
<li>Software version information</li>
|
||||
<li>Activity and usage frequency of software features</li>
|
||||
<li>Anonymous crash and error log information</li>
|
||||
</ul>
|
||||
<p>The above information is completely anonymous, does not involve any personal identity data, and cannot be
|
||||
linked to your personal information.</p>
|
||||
|
||||
<h2>2. Information We Do Not Collect</h2>
|
||||
<p>To maximize the protection of your privacy and security, we explicitly commit that we:</p>
|
||||
<ul>
|
||||
<li>Will not collect, save, transmit, or process model service API Key information you enter into the
|
||||
Software</li>
|
||||
<li>Will not collect, save, transmit, or process any conversation data generated during your use of the
|
||||
Software, including but not limited to chat content, instruction information, knowledge base
|
||||
information, vector data, and other custom content</li>
|
||||
<li>Will not collect, save, transmit, or process any sensitive information that can identify personal
|
||||
identity</li>
|
||||
</ul>
|
||||
|
||||
<h2>3. Data Interaction Description</h2>
|
||||
<p>
|
||||
The Software uses API Keys from third-party model service providers that you apply for and configure
|
||||
yourself to complete model calls and conversation functions. The model services you use (such as large
|
||||
models, API interfaces, etc.) are directly provided by third-party providers of your choice. We do not
|
||||
intervene, monitor, or interfere with the data transmission process.
|
||||
</p>
|
||||
<p>
|
||||
Data interactions between you and third-party model services are governed by the privacy policies and user
|
||||
agreements of third-party service providers. We recommend that you fully understand the privacy terms of
|
||||
relevant service providers before use.
|
||||
</p>
|
||||
|
||||
<h2>4. Local Data Security Protection</h2>
|
||||
<p>The Software is a localized application, and all data is stored on your local device by default. We have
|
||||
taken the following measures to ensure data security:</p>
|
||||
<ul>
|
||||
<li>Conversation records, configuration information, and other data are only saved on your local device</li>
|
||||
<li>Data import/export functions are provided to facilitate your independent management and backup of data
|
||||
</li>
|
||||
<li>Your local data will not be uploaded to any server or cloud storage</li>
|
||||
</ul>
|
||||
|
||||
<h2>5. Third-Party Services</h2>
|
||||
<p>
|
||||
When using the Software, you may access third-party services (such as AI model APIs, translation services,
|
||||
etc.). The use of these third-party services is governed by their respective terms of service and privacy
|
||||
policies. We strongly recommend that you carefully read and understand the relevant terms before use.
|
||||
</p>
|
||||
|
||||
<h2>6. User Rights</h2>
|
||||
<p>You have complete control over your data:</p>
|
||||
<ul>
|
||||
<li>You can view, modify, and delete all locally stored data at any time</li>
|
||||
<li>You can choose whether to enable specific features or services</li>
|
||||
<li>You can stop using the Software and delete all related data at any time</li>
|
||||
</ul>
|
||||
|
||||
<h2>7. Children's Privacy Protection</h2>
|
||||
<p>The Software is not intended for minors under 18 years of age. If you are a minor, please use the Software
|
||||
under the guidance of a guardian.</p>
|
||||
|
||||
<h2>8. Privacy Policy Updates</h2>
|
||||
<p>
|
||||
We may update this Privacy Policy based on legal requirements or changes in product features. The updated
|
||||
policy will be published in the Software and you will be notified before it takes effect. If you do not
|
||||
agree with the updated terms, you can choose to stop using the Software.
|
||||
</p>
|
||||
|
||||
<h2>9. Contact Us</h2>
|
||||
<p>If you have any questions, suggestions, or complaints about this Privacy Policy, please contact us through
|
||||
the following methods:</p>
|
||||
<ul>
|
||||
<li>
|
||||
GitHub: <a href="https://github.com/CherryHQ/cherry-studio" target="_blank"
|
||||
rel="noopener noreferrer">https://github.com/CherryHQ/cherry-studio</a>
|
||||
</li>
|
||||
<li>Email: support@cherry-ai.com</li>
|
||||
</ul>
|
||||
|
||||
<div class="footer">
|
||||
Last Updated: December 2024
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
230
resources/cherry-studio/privacy-zh.html
Normal file
230
resources/cherry-studio/privacy-zh.html
Normal file
@@ -0,0 +1,230 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="zh">
|
||||
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>隐私协议</title>
|
||||
<style>
|
||||
* {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
body {
|
||||
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Helvetica Neue', Arial, sans-serif;
|
||||
line-height: 1.6;
|
||||
color: #333;
|
||||
background: transparent;
|
||||
margin: 0 auto;
|
||||
}
|
||||
|
||||
body.dark {
|
||||
background: transparent;
|
||||
color: rgba(255, 255, 255, 0.85);
|
||||
}
|
||||
|
||||
h1 {
|
||||
font-size: 24px;
|
||||
font-weight: 600;
|
||||
margin-bottom: 20px;
|
||||
color: #1a1a1a;
|
||||
}
|
||||
|
||||
body.dark h1 {
|
||||
color: rgba(255, 255, 255, 0.95);
|
||||
}
|
||||
|
||||
h2 {
|
||||
font-size: 18px;
|
||||
font-weight: 600;
|
||||
margin-top: 24px;
|
||||
margin-bottom: 12px;
|
||||
color: #2c2c2c;
|
||||
}
|
||||
|
||||
body.dark h2 {
|
||||
color: rgba(255, 255, 255, 0.9);
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 12px 0;
|
||||
line-height: 1.8;
|
||||
}
|
||||
|
||||
body.dark p {
|
||||
color: rgba(255, 255, 255, 0.8);
|
||||
}
|
||||
|
||||
ul {
|
||||
margin: 12px 0;
|
||||
padding-left: 24px;
|
||||
}
|
||||
|
||||
li {
|
||||
margin: 6px 0;
|
||||
line-height: 1.6;
|
||||
}
|
||||
|
||||
body.dark li {
|
||||
color: rgba(255, 255, 255, 0.75);
|
||||
}
|
||||
|
||||
a {
|
||||
color: #0066cc;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
a:hover {
|
||||
text-decoration: underline;
|
||||
}
|
||||
|
||||
body.dark a {
|
||||
color: #4da6ff;
|
||||
}
|
||||
|
||||
.footer {
|
||||
margin-top: 40px;
|
||||
padding-top: 20px;
|
||||
border-top: 1px solid #e0e0e0;
|
||||
font-size: 13px;
|
||||
color: #666;
|
||||
}
|
||||
|
||||
body.dark .footer {
|
||||
border-top-color: rgba(255, 255, 255, 0.1);
|
||||
color: rgba(255, 255, 255, 0.5);
|
||||
}
|
||||
|
||||
.content-wrapper {
|
||||
overflow-y: auto;
|
||||
padding-right: 10px;
|
||||
background: transparent;
|
||||
}
|
||||
|
||||
/* 滚动条样式 - 亮色模式 */
|
||||
::-webkit-scrollbar {
|
||||
width: 8px;
|
||||
height: 8px;
|
||||
}
|
||||
|
||||
::-webkit-scrollbar-track {
|
||||
background: rgba(0, 0, 0, 0.05);
|
||||
border-radius: 4px;
|
||||
}
|
||||
|
||||
::-webkit-scrollbar-thumb {
|
||||
background: rgba(0, 0, 0, 0.2);
|
||||
border-radius: 4px;
|
||||
}
|
||||
|
||||
::-webkit-scrollbar-thumb:hover {
|
||||
background: rgba(0, 0, 0, 0.3);
|
||||
}
|
||||
|
||||
/* 滚动条样式 - 暗色模式 */
|
||||
body.dark ::-webkit-scrollbar-track {
|
||||
background: rgba(255, 255, 255, 0.05);
|
||||
}
|
||||
|
||||
body.dark ::-webkit-scrollbar-thumb {
|
||||
background: rgba(255, 255, 255, 0.2);
|
||||
}
|
||||
|
||||
body.dark ::-webkit-scrollbar-thumb:hover {
|
||||
background: rgba(255, 255, 255, 0.3);
|
||||
}
|
||||
</style>
|
||||
<script>
|
||||
// 检测主题
|
||||
document.addEventListener('DOMContentLoaded', function () {
|
||||
const urlParams = new URLSearchParams(window.location.search);
|
||||
const theme = urlParams.get('theme');
|
||||
if (theme === 'dark') {
|
||||
document.documentElement.classList.add('dark');
|
||||
document.body.classList.add('dark');
|
||||
}
|
||||
});
|
||||
</script>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div class="content-wrapper">
|
||||
<h1>隐私协议</h1>
|
||||
|
||||
<p>
|
||||
欢迎使用 Cherry Studio(以下简称"本软件"或"我们")。我们高度重视您的隐私保护,本隐私协议将说明我们如何处理与保护您的个人信息和数据。请在使用本软件前仔细阅读并理解本协议:
|
||||
</p>
|
||||
|
||||
<h2>一、我们收集的信息范围</h2>
|
||||
<p>为了优化用户体验和提升软件质量,我们仅可能会匿名收集以下非个人化信息:</p>
|
||||
<ul>
|
||||
<li>软件版本信息;</li>
|
||||
<li>软件功能的活跃度、使用频次;</li>
|
||||
<li>匿名的崩溃、错误日志信息;</li>
|
||||
</ul>
|
||||
<p>上述信息完全匿名,不会涉及任何个人身份数据,也无法关联到您的个人信息。</p>
|
||||
|
||||
<h2>二、我们不会收集的任何信息</h2>
|
||||
<p>为了最大限度保护您的隐私安全,我们明确承诺:</p>
|
||||
<ul>
|
||||
<li>不会收集、保存、传输或处理您输入到本软件中的模型服务 API Key 信息;</li>
|
||||
<li>不会收集、保存、传输或处理您在使用本软件过程中产生的任何对话数据,包括但不限于聊天内容、指令信息、知识库信息、向量数据及其他自定义内容;</li>
|
||||
<li>不会收集、保存、传输或处理任何可识别个人身份的敏感信息。</li>
|
||||
</ul>
|
||||
|
||||
<h2>三、数据交互说明</h2>
|
||||
<p>
|
||||
本软件采用您自行申请并配置的第三方模型服务提供商的 API Key,以完成相关模型的调用与对话功能。您使用的模型服务(例如大模型、API 接口等)由您选择的第三方提供商直接提供,我们不会介入、监控或干扰数据传输过程。
|
||||
</p>
|
||||
<p>
|
||||
您与第三方模型服务之间的数据交互受第三方服务提供商的隐私政策和用户协议约束,我们建议您在使用前充分了解相关服务商的隐私条款。
|
||||
</p>
|
||||
|
||||
<h2>四、本地数据的安全保护</h2>
|
||||
<p>本软件为本地化应用程序,所有数据默认存储在您的本地设备上。我们采取了以下措施保障数据安全:</p>
|
||||
<ul>
|
||||
<li>对话记录、配置信息等数据仅保存在您的本地设备中;</li>
|
||||
<li>提供数据导入/导出功能,方便您自主管理和备份数据;</li>
|
||||
<li>不会将您的本地数据上传至任何服务器或云端存储。</li>
|
||||
</ul>
|
||||
|
||||
<h2>五、第三方服务</h2>
|
||||
<p>
|
||||
在使用本软件过程中,您可能会接入第三方服务(如 AI 模型 API、翻译服务等)。这些第三方服务的使用受其各自的服务条款和隐私政策约束。我们强烈建议您在使用前仔细阅读并理解相关条款。
|
||||
</p>
|
||||
|
||||
<h2>六、用户权利</h2>
|
||||
<p>您对自己的数据拥有完全的控制权:</p>
|
||||
<ul>
|
||||
<li>您可以随时查看、修改、删除本地存储的所有数据;</li>
|
||||
<li>您可以选择是否启用特定功能或服务;</li>
|
||||
<li>您可以随时停止使用本软件并删除所有相关数据。</li>
|
||||
</ul>
|
||||
|
||||
<h2>七、儿童隐私保护</h2>
|
||||
<p>本软件不面向 18 岁以下的未成年人提供服务。如果您是未成年人,请在监护人的指导下使用本软件。</p>
|
||||
|
||||
<h2>八、隐私政策的更新</h2>
|
||||
<p>
|
||||
我们可能会根据法律法规要求或产品功能的变化更新本隐私协议。更新后的协议将在软件中发布,并在生效前通知您。如果您不同意更新后的条款,您可以选择停止使用本软件。
|
||||
</p>
|
||||
|
||||
<h2>九、联系我们</h2>
|
||||
<p>如果您对本隐私协议有任何疑问、建议或投诉,请通过以下方式联系我们:</p>
|
||||
<ul>
|
||||
<li>
|
||||
GitHub: <a href="https://github.com/CherryHQ/cherry-studio" target="_blank"
|
||||
rel="noopener noreferrer">https://github.com/CherryHQ/cherry-studio</a>
|
||||
</li>
|
||||
<li>Email: support@cherry-ai.com</li>
|
||||
</ul>
|
||||
|
||||
<div class="footer">
|
||||
最后更新日期:2024年12月
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
@@ -12,18 +12,18 @@
|
||||
|
||||
<body id="app">
|
||||
<div :class="isDark ? 'dark-bg' : 'bg'" class="min-h-screen">
|
||||
<div class="max-w-3xl mx-auto py-12 px-4">
|
||||
<h1 class="text-3xl font-bold mb-8" :class="isDark ? 'text-white' : 'text-gray-900'">Release Timeline</h1>
|
||||
<div class="mx-auto max-w-3xl px-4 py-12">
|
||||
<h1 class="mb-8 text-3xl font-bold" :class="isDark ? 'text-white' : 'text-gray-900'">Release Timeline</h1>
|
||||
|
||||
<!-- Loading状态 -->
|
||||
<div v-if="loading" class="text-center py-8">
|
||||
<div v-if="loading" class="py-8 text-center">
|
||||
<div
|
||||
class="inline-block animate-spin rounded-full h-8 w-8 border-4"
|
||||
class="inline-block h-8 w-8 animate-spin rounded-full border-4"
|
||||
:class="isDark ? 'border-gray-700 border-t-blue-500' : 'border-gray-300 border-t-blue-500'"></div>
|
||||
</div>
|
||||
|
||||
<!-- Error 状态 -->
|
||||
<div v-else-if="error" class="text-red-500 text-center py-8">{{ error }}</div>
|
||||
<div v-else-if="error" class="py-8 text-center text-red-500">{{ error }}</div>
|
||||
|
||||
<!-- Release 列表 -->
|
||||
<div v-else class="space-y-8">
|
||||
@@ -32,21 +32,21 @@
|
||||
:key="release.id"
|
||||
class="relative pl-8"
|
||||
:class="isDark ? 'border-l-2 border-gray-700' : 'border-l-2 border-gray-200'">
|
||||
<div class="absolute -left-2 top-0 w-4 h-4 rounded-full bg-green-500"></div>
|
||||
<div class="absolute top-0 -left-2 h-4 w-4 rounded-full bg-green-500"></div>
|
||||
<div
|
||||
class="rounded-lg shadow-sm p-6 transition-shadow"
|
||||
class="rounded-lg p-6 shadow-sm transition-shadow"
|
||||
:class="isDark ? 'bg-black hover:shadow-md hover:shadow-black' : 'bg-white hover:shadow-md'">
|
||||
<div class="flex items-start justify-between mb-4">
|
||||
<div class="mb-4 flex items-start justify-between">
|
||||
<div>
|
||||
<h2 class="text-xl font-semibold" :class="isDark ? 'text-white' : 'text-gray-900'">
|
||||
{{ release.name || release.tag_name }}
|
||||
</h2>
|
||||
<p class="text-sm mt-1" :class="isDark ? 'text-gray-400' : 'text-gray-500'">
|
||||
<p class="mt-1 text-sm" :class="isDark ? 'text-gray-400' : 'text-gray-500'">
|
||||
{{ formatDate(release.published_at) }}
|
||||
</p>
|
||||
</div>
|
||||
<span
|
||||
class="inline-flex items-center px-3 py-1 rounded-full text-sm font-medium"
|
||||
class="inline-flex items-center rounded-full px-3 py-1 text-sm font-medium"
|
||||
:class="isDark ? 'bg-green-900 text-green-200' : 'bg-green-100 text-green-800'">
|
||||
{{ release.tag_name }}
|
||||
</span>
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1,5 +1,7 @@
|
||||
const https = require('https')
|
||||
const fs = require('fs')
|
||||
const path = require('path')
|
||||
const { execSync } = require('child_process')
|
||||
|
||||
/**
|
||||
* Downloads a file from a URL with redirect handling
|
||||
@@ -32,4 +34,39 @@ async function downloadWithRedirects(url, destinationPath) {
|
||||
})
|
||||
}
|
||||
|
||||
module.exports = { downloadWithRedirects }
|
||||
/**
|
||||
* Downloads a file using PowerShell Invoke-WebRequest command
|
||||
* @param {string} url The URL to download from
|
||||
* @param {string} destinationPath The path to save the file to
|
||||
* @returns {Promise<boolean>} Promise that resolves to true if download succeeds
|
||||
*/
|
||||
async function downloadWithPowerShell(url, destinationPath) {
|
||||
return new Promise((resolve, reject) => {
|
||||
try {
|
||||
// Only support windows platform for PowerShell download
|
||||
if (process.platform !== 'win32') {
|
||||
return reject(new Error('PowerShell download is only supported on Windows'))
|
||||
}
|
||||
|
||||
const outputDir = path.dirname(destinationPath)
|
||||
fs.mkdirSync(outputDir, { recursive: true })
|
||||
|
||||
// PowerShell command to download the file with progress disabled for faster download
|
||||
const psCommand = `powershell -Command "$ProgressPreference = 'SilentlyContinue'; Invoke-WebRequest '${url}' -OutFile '${destinationPath}'"`
|
||||
|
||||
console.log(`Downloading with PowerShell: ${url}`)
|
||||
execSync(psCommand, { stdio: 'inherit' })
|
||||
|
||||
if (fs.existsSync(destinationPath)) {
|
||||
console.log(`Download completed: ${destinationPath}`)
|
||||
resolve(true)
|
||||
} else {
|
||||
reject(new Error('Download failed: File not found after download'))
|
||||
}
|
||||
} catch (error) {
|
||||
reject(new Error(`PowerShell download failed: ${error.message}`))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
module.exports = { downloadWithRedirects, downloadWithPowerShell }
|
||||
|
||||
177
resources/scripts/install-ovms.js
Normal file
177
resources/scripts/install-ovms.js
Normal file
@@ -0,0 +1,177 @@
|
||||
const fs = require('fs')
|
||||
const path = require('path')
|
||||
const os = require('os')
|
||||
const { execSync } = require('child_process')
|
||||
const { downloadWithPowerShell } = require('./download')
|
||||
|
||||
// Base URL for downloading OVMS binaries
|
||||
const OVMS_PKG_NAME = 'ovms250911.zip'
|
||||
const OVMS_RELEASE_BASE_URL = [`https://gitcode.com/gcw_ggDjjkY3/kjfile/releases/download/download/${OVMS_PKG_NAME}`]
|
||||
|
||||
/**
|
||||
* Downloads and extracts the OVMS binary for the specified platform
|
||||
*/
|
||||
async function downloadOvmsBinary() {
|
||||
// Create output directory structure - OVMS goes into its own subdirectory
|
||||
const csDir = path.join(os.homedir(), '.cherrystudio')
|
||||
|
||||
// Ensure directories exist
|
||||
fs.mkdirSync(csDir, { recursive: true })
|
||||
|
||||
const csOvmsDir = path.join(csDir, 'ovms')
|
||||
// Delete existing OVMS directory if it exists
|
||||
if (fs.existsSync(csOvmsDir)) {
|
||||
fs.rmSync(csOvmsDir, { recursive: true })
|
||||
}
|
||||
|
||||
const tempdir = os.tmpdir()
|
||||
const tempFilename = path.join(tempdir, 'ovms.zip')
|
||||
|
||||
// Try each URL until one succeeds
|
||||
let downloadSuccess = false
|
||||
let lastError = null
|
||||
|
||||
for (let i = 0; i < OVMS_RELEASE_BASE_URL.length; i++) {
|
||||
const downloadUrl = OVMS_RELEASE_BASE_URL[i]
|
||||
console.log(`Attempting download from URL ${i + 1}/${OVMS_RELEASE_BASE_URL.length}: ${downloadUrl}`)
|
||||
|
||||
try {
|
||||
console.log(`Downloading OVMS from ${downloadUrl} to ${tempFilename}...`)
|
||||
|
||||
// Try PowerShell download first, fallback to Node.js download if it fails
|
||||
await downloadWithPowerShell(downloadUrl, tempFilename)
|
||||
|
||||
// If we get here, download was successful
|
||||
downloadSuccess = true
|
||||
console.log(`Successfully downloaded from: ${downloadUrl}`)
|
||||
break
|
||||
} catch (error) {
|
||||
console.warn(`Download failed from ${downloadUrl}: ${error.message}`)
|
||||
lastError = error
|
||||
|
||||
// Clean up failed download file if it exists
|
||||
if (fs.existsSync(tempFilename)) {
|
||||
try {
|
||||
fs.unlinkSync(tempFilename)
|
||||
} catch (cleanupError) {
|
||||
console.warn(`Failed to clean up temporary file: ${cleanupError.message}`)
|
||||
}
|
||||
}
|
||||
|
||||
// Continue to next URL if this one failed
|
||||
if (i < OVMS_RELEASE_BASE_URL.length - 1) {
|
||||
console.log(`Trying next URL...`)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check if any download succeeded
|
||||
if (!downloadSuccess) {
|
||||
console.error(`All download URLs failed. Last error: ${lastError?.message || 'Unknown error'}`)
|
||||
return 103
|
||||
}
|
||||
|
||||
try {
|
||||
console.log(`Extracting to ${csDir}...`)
|
||||
|
||||
// Use tar.exe to extract the ZIP file
|
||||
console.log(`Extracting OVMS to ${csDir}...`)
|
||||
execSync(`tar -xf ${tempFilename} -C ${csDir}`, { stdio: 'inherit' })
|
||||
console.log(`OVMS extracted to ${csDir}`)
|
||||
|
||||
// Clean up temporary file
|
||||
fs.unlinkSync(tempFilename)
|
||||
console.log(`Installation directory: ${csDir}`)
|
||||
} catch (error) {
|
||||
console.error(`Error installing OVMS: ${error.message}`)
|
||||
if (fs.existsSync(tempFilename)) {
|
||||
fs.unlinkSync(tempFilename)
|
||||
}
|
||||
|
||||
// Check if ovmsDir is empty and remove it if so
|
||||
try {
|
||||
const ovmsDir = path.join(csDir, 'ovms')
|
||||
const files = fs.readdirSync(ovmsDir)
|
||||
if (files.length === 0) {
|
||||
fs.rmSync(ovmsDir, { recursive: true })
|
||||
console.log(`Removed empty directory: ${ovmsDir}`)
|
||||
}
|
||||
} catch (cleanupError) {
|
||||
console.warn(`Warning: Failed to clean up directory: ${cleanupError.message}`)
|
||||
return 105
|
||||
}
|
||||
|
||||
return 104
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the CPU Name and ID
|
||||
*/
|
||||
function getCpuInfo() {
|
||||
const cpuInfo = {
|
||||
name: '',
|
||||
id: ''
|
||||
}
|
||||
|
||||
// Use PowerShell to get CPU information
|
||||
try {
|
||||
const psCommand = `powershell -Command "Get-CimInstance -ClassName Win32_Processor | Select-Object Name, DeviceID | ConvertTo-Json"`
|
||||
const psOutput = execSync(psCommand).toString()
|
||||
const cpuData = JSON.parse(psOutput)
|
||||
|
||||
if (Array.isArray(cpuData)) {
|
||||
cpuInfo.name = cpuData[0].Name || ''
|
||||
cpuInfo.id = cpuData[0].DeviceID || ''
|
||||
} else {
|
||||
cpuInfo.name = cpuData.Name || ''
|
||||
cpuInfo.id = cpuData.DeviceID || ''
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`Failed to get CPU info: ${error.message}`)
|
||||
}
|
||||
|
||||
return cpuInfo
|
||||
}
|
||||
|
||||
/**
|
||||
* Main function to install OVMS
|
||||
*/
|
||||
async function installOvms() {
|
||||
const platform = os.platform()
|
||||
console.log(`Detected platform: ${platform}`)
|
||||
|
||||
const cpuName = getCpuInfo().name
|
||||
console.log(`CPU Name: ${cpuName}`)
|
||||
|
||||
// Check if CPU name contains "Ultra"
|
||||
if (!cpuName.toLowerCase().includes('intel') || !cpuName.toLowerCase().includes('ultra')) {
|
||||
console.error('OVMS installation requires an Intel(R) Core(TM) Ultra CPU.')
|
||||
return 101
|
||||
}
|
||||
|
||||
// only support windows
|
||||
if (platform !== 'win32') {
|
||||
console.error('OVMS installation is only supported on Windows.')
|
||||
return 102
|
||||
}
|
||||
|
||||
return await downloadOvmsBinary()
|
||||
}
|
||||
|
||||
// Run the installation
|
||||
installOvms()
|
||||
.then((retcode) => {
|
||||
if (retcode === 0) {
|
||||
console.log('OVMS installation successful')
|
||||
} else {
|
||||
console.error('OVMS installation failed')
|
||||
}
|
||||
process.exit(retcode)
|
||||
})
|
||||
.catch((error) => {
|
||||
console.error('OVMS installation failed:', error)
|
||||
process.exit(100)
|
||||
})
|
||||
@@ -1,6 +1,6 @@
|
||||
import { exec } from 'child_process'
|
||||
import * as fs from 'fs/promises'
|
||||
import linguistLanguages from 'linguist-languages'
|
||||
import * as linguistLanguages from 'linguist-languages'
|
||||
import * as path from 'path'
|
||||
import { promisify } from 'util'
|
||||
|
||||
@@ -75,17 +75,17 @@ export const languages: Record<string, LanguageData> = ${languagesObjectString};
|
||||
}
|
||||
|
||||
/**
|
||||
* Formats a file using Prettier.
|
||||
* Formats a file using Biome.
|
||||
* @param filePath The path to the file to format.
|
||||
*/
|
||||
async function formatWithPrettier(filePath: string): Promise<void> {
|
||||
console.log('🎨 Formatting file with Prettier...')
|
||||
async function format(filePath: string): Promise<void> {
|
||||
console.log('🎨 Formatting file with Biome...')
|
||||
try {
|
||||
await execAsync(`yarn prettier --write ${filePath}`)
|
||||
console.log('✅ Prettier formatting complete.')
|
||||
await execAsync(`yarn biome format --write ${filePath}`)
|
||||
console.log('✅ Biome formatting complete.')
|
||||
} catch (e: any) {
|
||||
console.error('❌ Prettier formatting failed:', e.stdout || e.stderr)
|
||||
throw new Error('Prettier formatting failed.')
|
||||
console.error('❌ Biome formatting failed:', e.stdout || e.stderr)
|
||||
throw new Error('Biome formatting failed.')
|
||||
}
|
||||
}
|
||||
|
||||
@@ -116,7 +116,7 @@ async function updateLanguagesFile(): Promise<void> {
|
||||
await fs.writeFile(LANGUAGES_FILE_PATH, fileContent, 'utf-8')
|
||||
console.log(`✅ Successfully wrote to ${LANGUAGES_FILE_PATH}`)
|
||||
|
||||
await formatWithPrettier(LANGUAGES_FILE_PATH)
|
||||
await format(LANGUAGES_FILE_PATH)
|
||||
await checkTypeScript(LANGUAGES_FILE_PATH)
|
||||
|
||||
console.log('🎉 Successfully updated languages.ts file!')
|
||||
|
||||
128
src/main/apiServer/app.ts
Normal file
128
src/main/apiServer/app.ts
Normal file
@@ -0,0 +1,128 @@
|
||||
import { loggerService } from '@main/services/LoggerService'
|
||||
import cors from 'cors'
|
||||
import express from 'express'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
|
||||
import { authMiddleware } from './middleware/auth'
|
||||
import { errorHandler } from './middleware/error'
|
||||
import { setupOpenAPIDocumentation } from './middleware/openapi'
|
||||
import { chatRoutes } from './routes/chat'
|
||||
import { mcpRoutes } from './routes/mcp'
|
||||
import { modelsRoutes } from './routes/models'
|
||||
|
||||
const logger = loggerService.withContext('ApiServer')
|
||||
|
||||
const app = express()
|
||||
|
||||
// Global middleware
|
||||
app.use((req, res, next) => {
|
||||
const start = Date.now()
|
||||
res.on('finish', () => {
|
||||
const duration = Date.now() - start
|
||||
logger.info(`${req.method} ${req.path} - ${res.statusCode} - ${duration}ms`)
|
||||
})
|
||||
next()
|
||||
})
|
||||
|
||||
app.use((_req, res, next) => {
|
||||
res.setHeader('X-Request-ID', uuidv4())
|
||||
next()
|
||||
})
|
||||
|
||||
app.use(
|
||||
cors({
|
||||
origin: '*',
|
||||
allowedHeaders: ['Content-Type', 'Authorization'],
|
||||
methods: ['GET', 'POST', 'PUT', 'DELETE', 'OPTIONS']
|
||||
})
|
||||
)
|
||||
|
||||
/**
|
||||
* @swagger
|
||||
* /health:
|
||||
* get:
|
||||
* summary: Health check endpoint
|
||||
* description: Check server status (no authentication required)
|
||||
* tags: [Health]
|
||||
* security: []
|
||||
* responses:
|
||||
* 200:
|
||||
* description: Server is healthy
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* type: object
|
||||
* properties:
|
||||
* status:
|
||||
* type: string
|
||||
* example: ok
|
||||
* timestamp:
|
||||
* type: string
|
||||
* format: date-time
|
||||
* version:
|
||||
* type: string
|
||||
* example: 1.0.0
|
||||
*/
|
||||
app.get('/health', (_req, res) => {
|
||||
res.json({
|
||||
status: 'ok',
|
||||
timestamp: new Date().toISOString(),
|
||||
version: process.env.npm_package_version || '1.0.0'
|
||||
})
|
||||
})
|
||||
|
||||
/**
|
||||
* @swagger
|
||||
* /:
|
||||
* get:
|
||||
* summary: API information
|
||||
* description: Get basic API information and available endpoints
|
||||
* tags: [General]
|
||||
* security: []
|
||||
* responses:
|
||||
* 200:
|
||||
* description: API information
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* type: object
|
||||
* properties:
|
||||
* name:
|
||||
* type: string
|
||||
* example: Cherry Studio API
|
||||
* version:
|
||||
* type: string
|
||||
* example: 1.0.0
|
||||
* endpoints:
|
||||
* type: object
|
||||
*/
|
||||
app.get('/', (_req, res) => {
|
||||
res.json({
|
||||
name: 'Cherry Studio API',
|
||||
version: '1.0.0',
|
||||
endpoints: {
|
||||
health: 'GET /health',
|
||||
models: 'GET /v1/models',
|
||||
chat: 'POST /v1/chat/completions',
|
||||
mcp: 'GET /v1/mcps'
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
// API v1 routes with auth
|
||||
const apiRouter = express.Router()
|
||||
apiRouter.use(authMiddleware)
|
||||
apiRouter.use(express.json())
|
||||
// Mount routes
|
||||
apiRouter.use('/chat', chatRoutes)
|
||||
apiRouter.use('/mcps', mcpRoutes)
|
||||
apiRouter.use('/models', modelsRoutes)
|
||||
app.use('/v1', apiRouter)
|
||||
|
||||
// Setup OpenAPI documentation
|
||||
setupOpenAPIDocumentation(app)
|
||||
|
||||
// Error handling (must be last)
|
||||
app.use(errorHandler)
|
||||
|
||||
export { app }
|
||||
65
src/main/apiServer/config.ts
Normal file
65
src/main/apiServer/config.ts
Normal file
@@ -0,0 +1,65 @@
|
||||
import { ApiServerConfig } from '@types'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
|
||||
import { loggerService } from '../services/LoggerService'
|
||||
import { reduxService } from '../services/ReduxService'
|
||||
|
||||
const logger = loggerService.withContext('ApiServerConfig')
|
||||
|
||||
const defaultHost = 'localhost'
|
||||
const defaultPort = 23333
|
||||
|
||||
class ConfigManager {
|
||||
private _config: ApiServerConfig | null = null
|
||||
|
||||
private generateApiKey(): string {
|
||||
return `cs-sk-${uuidv4()}`
|
||||
}
|
||||
|
||||
async load(): Promise<ApiServerConfig> {
|
||||
try {
|
||||
const settings = await reduxService.select('state.settings')
|
||||
const serverSettings = settings?.apiServer
|
||||
let apiKey = serverSettings?.apiKey
|
||||
if (!apiKey || apiKey.trim() === '') {
|
||||
apiKey = this.generateApiKey()
|
||||
await reduxService.dispatch({
|
||||
type: 'settings/setApiServerApiKey',
|
||||
payload: apiKey
|
||||
})
|
||||
}
|
||||
this._config = {
|
||||
enabled: serverSettings?.enabled ?? false,
|
||||
port: serverSettings?.port ?? defaultPort,
|
||||
host: defaultHost,
|
||||
apiKey: apiKey
|
||||
}
|
||||
return this._config
|
||||
} catch (error: any) {
|
||||
logger.warn('Failed to load config from Redux, using defaults:', error)
|
||||
this._config = {
|
||||
enabled: false,
|
||||
port: defaultPort,
|
||||
host: defaultHost,
|
||||
apiKey: this.generateApiKey()
|
||||
}
|
||||
return this._config
|
||||
}
|
||||
}
|
||||
|
||||
async get(): Promise<ApiServerConfig> {
|
||||
if (!this._config) {
|
||||
await this.load()
|
||||
}
|
||||
if (!this._config) {
|
||||
throw new Error('Failed to load API server configuration')
|
||||
}
|
||||
return this._config
|
||||
}
|
||||
|
||||
async reload(): Promise<ApiServerConfig> {
|
||||
return await this.load()
|
||||
}
|
||||
}
|
||||
|
||||
export const config = new ConfigManager()
|
||||
2
src/main/apiServer/index.ts
Normal file
2
src/main/apiServer/index.ts
Normal file
@@ -0,0 +1,2 @@
|
||||
export { config } from './config'
|
||||
export { apiServer } from './server'
|
||||
62
src/main/apiServer/middleware/auth.ts
Normal file
62
src/main/apiServer/middleware/auth.ts
Normal file
@@ -0,0 +1,62 @@
|
||||
import crypto from 'crypto'
|
||||
import { NextFunction, Request, Response } from 'express'
|
||||
|
||||
import { config } from '../config'
|
||||
|
||||
export const authMiddleware = async (req: Request, res: Response, next: NextFunction) => {
|
||||
const auth = req.header('Authorization') || ''
|
||||
const xApiKey = req.header('x-api-key') || ''
|
||||
|
||||
// Fast rejection if neither credential header provided
|
||||
if (!auth && !xApiKey) {
|
||||
return res.status(401).json({ error: 'Unauthorized: missing credentials' })
|
||||
}
|
||||
|
||||
let token: string | undefined
|
||||
|
||||
// Prefer Bearer if well‑formed
|
||||
if (auth) {
|
||||
const trimmed = auth.trim()
|
||||
const bearerPrefix = /^Bearer\s+/i
|
||||
if (bearerPrefix.test(trimmed)) {
|
||||
const candidate = trimmed.replace(bearerPrefix, '').trim()
|
||||
if (!candidate) {
|
||||
return res.status(401).json({ error: 'Unauthorized: empty bearer token' })
|
||||
}
|
||||
token = candidate
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to x-api-key if token still not resolved
|
||||
if (!token && xApiKey) {
|
||||
if (!xApiKey.trim()) {
|
||||
return res.status(401).json({ error: 'Unauthorized: empty x-api-key' })
|
||||
}
|
||||
token = xApiKey.trim()
|
||||
}
|
||||
|
||||
if (!token) {
|
||||
// At this point we had at least one header, but none yielded a usable token
|
||||
return res.status(401).json({ error: 'Unauthorized: invalid credentials format' })
|
||||
}
|
||||
|
||||
const { apiKey } = await config.get()
|
||||
|
||||
if (!apiKey) {
|
||||
// If server not configured, treat as forbidden (or could be 500). Choose 403 to avoid leaking config state.
|
||||
return res.status(403).json({ error: 'Forbidden' })
|
||||
}
|
||||
|
||||
// Timing-safe compare when lengths match, else immediate forbidden
|
||||
if (token.length !== apiKey.length) {
|
||||
return res.status(403).json({ error: 'Forbidden' })
|
||||
}
|
||||
|
||||
const tokenBuf = Buffer.from(token)
|
||||
const keyBuf = Buffer.from(apiKey)
|
||||
if (!crypto.timingSafeEqual(tokenBuf, keyBuf)) {
|
||||
return res.status(403).json({ error: 'Forbidden' })
|
||||
}
|
||||
|
||||
return next()
|
||||
}
|
||||
21
src/main/apiServer/middleware/error.ts
Normal file
21
src/main/apiServer/middleware/error.ts
Normal file
@@ -0,0 +1,21 @@
|
||||
import { NextFunction, Request, Response } from 'express'
|
||||
|
||||
import { loggerService } from '../../services/LoggerService'
|
||||
|
||||
const logger = loggerService.withContext('ApiServerErrorHandler')
|
||||
|
||||
// oxlint-disable-next-line @typescript-eslint/no-unused-vars
|
||||
export const errorHandler = (err: Error, _req: Request, res: Response, _next: NextFunction) => {
|
||||
logger.error('API Server Error:', err)
|
||||
|
||||
// Don't expose internal errors in production
|
||||
const isDev = process.env.NODE_ENV === 'development'
|
||||
|
||||
res.status(500).json({
|
||||
error: {
|
||||
message: isDev ? err.message : 'Internal server error',
|
||||
type: 'server_error',
|
||||
...(isDev && { stack: err.stack })
|
||||
}
|
||||
})
|
||||
}
|
||||
206
src/main/apiServer/middleware/openapi.ts
Normal file
206
src/main/apiServer/middleware/openapi.ts
Normal file
@@ -0,0 +1,206 @@
|
||||
import { Express } from 'express'
|
||||
import swaggerJSDoc from 'swagger-jsdoc'
|
||||
import swaggerUi from 'swagger-ui-express'
|
||||
|
||||
import { loggerService } from '../../services/LoggerService'
|
||||
|
||||
const logger = loggerService.withContext('OpenAPIMiddleware')
|
||||
|
||||
const swaggerOptions: swaggerJSDoc.Options = {
|
||||
definition: {
|
||||
openapi: '3.0.0',
|
||||
info: {
|
||||
title: 'Cherry Studio API',
|
||||
version: '1.0.0',
|
||||
description: 'OpenAI-compatible API for Cherry Studio with additional Cherry-specific endpoints',
|
||||
contact: {
|
||||
name: 'Cherry Studio',
|
||||
url: 'https://github.com/CherryHQ/cherry-studio'
|
||||
}
|
||||
},
|
||||
servers: [
|
||||
{
|
||||
url: 'http://localhost:23333',
|
||||
description: 'Local development server'
|
||||
}
|
||||
],
|
||||
components: {
|
||||
securitySchemes: {
|
||||
BearerAuth: {
|
||||
type: 'http',
|
||||
scheme: 'bearer',
|
||||
bearerFormat: 'JWT',
|
||||
description: 'Use the API key from Cherry Studio settings'
|
||||
}
|
||||
},
|
||||
schemas: {
|
||||
Error: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
error: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
message: { type: 'string' },
|
||||
type: { type: 'string' },
|
||||
code: { type: 'string' }
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
ChatMessage: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
role: {
|
||||
type: 'string',
|
||||
enum: ['system', 'user', 'assistant', 'tool']
|
||||
},
|
||||
content: {
|
||||
oneOf: [
|
||||
{ type: 'string' },
|
||||
{
|
||||
type: 'array',
|
||||
items: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
type: { type: 'string' },
|
||||
text: { type: 'string' },
|
||||
image_url: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
url: { type: 'string' }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
name: { type: 'string' },
|
||||
tool_calls: {
|
||||
type: 'array',
|
||||
items: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
id: { type: 'string' },
|
||||
type: { type: 'string' },
|
||||
function: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
name: { type: 'string' },
|
||||
arguments: { type: 'string' }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
ChatCompletionRequest: {
|
||||
type: 'object',
|
||||
required: ['model', 'messages'],
|
||||
properties: {
|
||||
model: {
|
||||
type: 'string',
|
||||
description: 'The model to use for completion, in format provider:model-id'
|
||||
},
|
||||
messages: {
|
||||
type: 'array',
|
||||
items: { $ref: '#/components/schemas/ChatMessage' }
|
||||
},
|
||||
temperature: {
|
||||
type: 'number',
|
||||
minimum: 0,
|
||||
maximum: 2,
|
||||
default: 1
|
||||
},
|
||||
max_tokens: {
|
||||
type: 'integer',
|
||||
minimum: 1
|
||||
},
|
||||
stream: {
|
||||
type: 'boolean',
|
||||
default: false
|
||||
},
|
||||
tools: {
|
||||
type: 'array',
|
||||
items: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
type: { type: 'string' },
|
||||
function: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
name: { type: 'string' },
|
||||
description: { type: 'string' },
|
||||
parameters: { type: 'object' }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
Model: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
id: { type: 'string' },
|
||||
object: { type: 'string', enum: ['model'] },
|
||||
created: { type: 'integer' },
|
||||
owned_by: { type: 'string' }
|
||||
}
|
||||
},
|
||||
MCPServer: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
id: { type: 'string' },
|
||||
name: { type: 'string' },
|
||||
command: { type: 'string' },
|
||||
args: {
|
||||
type: 'array',
|
||||
items: { type: 'string' }
|
||||
},
|
||||
env: { type: 'object' },
|
||||
disabled: { type: 'boolean' }
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
security: [
|
||||
{
|
||||
BearerAuth: []
|
||||
}
|
||||
]
|
||||
},
|
||||
apis: ['./src/main/apiServer/routes/*.ts', './src/main/apiServer/app.ts']
|
||||
}
|
||||
|
||||
export function setupOpenAPIDocumentation(app: Express) {
|
||||
try {
|
||||
const specs = swaggerJSDoc(swaggerOptions)
|
||||
|
||||
// Serve OpenAPI JSON
|
||||
app.get('/api-docs.json', (_req, res) => {
|
||||
res.setHeader('Content-Type', 'application/json')
|
||||
res.send(specs)
|
||||
})
|
||||
|
||||
// Serve Swagger UI
|
||||
app.use(
|
||||
'/api-docs',
|
||||
swaggerUi.serve,
|
||||
swaggerUi.setup(specs, {
|
||||
customCss: `
|
||||
.swagger-ui .topbar { display: none; }
|
||||
.swagger-ui .info .title { color: #1890ff; }
|
||||
`,
|
||||
customSiteTitle: 'Cherry Studio API Documentation'
|
||||
})
|
||||
)
|
||||
|
||||
logger.info('OpenAPI documentation setup complete')
|
||||
logger.info('Documentation available at /api-docs')
|
||||
logger.info('OpenAPI spec available at /api-docs.json')
|
||||
} catch (error) {
|
||||
logger.error('Failed to setup OpenAPI documentation:', error as Error)
|
||||
}
|
||||
}
|
||||
225
src/main/apiServer/routes/chat.ts
Normal file
225
src/main/apiServer/routes/chat.ts
Normal file
@@ -0,0 +1,225 @@
|
||||
import express, { Request, Response } from 'express'
|
||||
import OpenAI from 'openai'
|
||||
import { ChatCompletionCreateParams } from 'openai/resources'
|
||||
|
||||
import { loggerService } from '../../services/LoggerService'
|
||||
import { chatCompletionService } from '../services/chat-completion'
|
||||
import { validateModelId } from '../utils'
|
||||
|
||||
const logger = loggerService.withContext('ApiServerChatRoutes')
|
||||
|
||||
const router = express.Router()
|
||||
|
||||
/**
|
||||
* @swagger
|
||||
* /v1/chat/completions:
|
||||
* post:
|
||||
* summary: Create chat completion
|
||||
* description: Create a chat completion response, compatible with OpenAI API
|
||||
* tags: [Chat]
|
||||
* requestBody:
|
||||
* required: true
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/ChatCompletionRequest'
|
||||
* responses:
|
||||
* 200:
|
||||
* description: Chat completion response
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* type: object
|
||||
* properties:
|
||||
* id:
|
||||
* type: string
|
||||
* object:
|
||||
* type: string
|
||||
* example: chat.completion
|
||||
* created:
|
||||
* type: integer
|
||||
* model:
|
||||
* type: string
|
||||
* choices:
|
||||
* type: array
|
||||
* items:
|
||||
* type: object
|
||||
* properties:
|
||||
* index:
|
||||
* type: integer
|
||||
* message:
|
||||
* $ref: '#/components/schemas/ChatMessage'
|
||||
* finish_reason:
|
||||
* type: string
|
||||
* usage:
|
||||
* type: object
|
||||
* properties:
|
||||
* prompt_tokens:
|
||||
* type: integer
|
||||
* completion_tokens:
|
||||
* type: integer
|
||||
* total_tokens:
|
||||
* type: integer
|
||||
* text/plain:
|
||||
* schema:
|
||||
* type: string
|
||||
* description: Server-sent events stream (when stream=true)
|
||||
* 400:
|
||||
* description: Bad request
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/Error'
|
||||
* 401:
|
||||
* description: Unauthorized
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/Error'
|
||||
* 429:
|
||||
* description: Rate limit exceeded
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/Error'
|
||||
* 500:
|
||||
* description: Internal server error
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/Error'
|
||||
*/
|
||||
router.post('/completions', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const request: ChatCompletionCreateParams = req.body
|
||||
|
||||
if (!request) {
|
||||
return res.status(400).json({
|
||||
error: {
|
||||
message: 'Request body is required',
|
||||
type: 'invalid_request_error',
|
||||
code: 'missing_body'
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
logger.info('Chat completion request:', {
|
||||
model: request.model,
|
||||
messageCount: request.messages?.length || 0,
|
||||
stream: request.stream,
|
||||
temperature: request.temperature
|
||||
})
|
||||
|
||||
// Validate request
|
||||
const validation = chatCompletionService.validateRequest(request)
|
||||
if (!validation.isValid) {
|
||||
return res.status(400).json({
|
||||
error: {
|
||||
message: validation.errors.join('; '),
|
||||
type: 'invalid_request_error',
|
||||
code: 'validation_failed'
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Validate model ID and get provider
|
||||
const modelValidation = await validateModelId(request.model)
|
||||
if (!modelValidation.valid) {
|
||||
const error = modelValidation.error!
|
||||
logger.warn(`Model validation failed for '${request.model}':`, error)
|
||||
return res.status(400).json({
|
||||
error: {
|
||||
message: error.message,
|
||||
type: 'invalid_request_error',
|
||||
code: error.code
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
const provider = modelValidation.provider!
|
||||
const modelId = modelValidation.modelId!
|
||||
|
||||
logger.info('Model validation successful:', {
|
||||
provider: provider.id,
|
||||
providerType: provider.type,
|
||||
modelId: modelId,
|
||||
fullModelId: request.model
|
||||
})
|
||||
|
||||
// Create OpenAI client
|
||||
const client = new OpenAI({
|
||||
baseURL: provider.apiHost,
|
||||
apiKey: provider.apiKey
|
||||
})
|
||||
request.model = modelId
|
||||
|
||||
// Handle streaming
|
||||
if (request.stream) {
|
||||
const streamResponse = await client.chat.completions.create(request)
|
||||
|
||||
res.setHeader('Content-Type', 'text/plain; charset=utf-8')
|
||||
res.setHeader('Cache-Control', 'no-cache')
|
||||
res.setHeader('Connection', 'keep-alive')
|
||||
|
||||
try {
|
||||
for await (const chunk of streamResponse as any) {
|
||||
res.write(`data: ${JSON.stringify(chunk)}\n\n`)
|
||||
}
|
||||
res.write('data: [DONE]\n\n')
|
||||
res.end()
|
||||
} catch (streamError: any) {
|
||||
logger.error('Stream error:', streamError)
|
||||
res.write(
|
||||
`data: ${JSON.stringify({
|
||||
error: {
|
||||
message: 'Stream processing error',
|
||||
type: 'server_error',
|
||||
code: 'stream_error'
|
||||
}
|
||||
})}\n\n`
|
||||
)
|
||||
res.end()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Handle non-streaming
|
||||
const response = await client.chat.completions.create(request)
|
||||
return res.json(response)
|
||||
} catch (error: any) {
|
||||
logger.error('Chat completion error:', error)
|
||||
|
||||
let statusCode = 500
|
||||
let errorType = 'server_error'
|
||||
let errorCode = 'internal_error'
|
||||
let errorMessage = 'Internal server error'
|
||||
|
||||
if (error instanceof Error) {
|
||||
errorMessage = error.message
|
||||
|
||||
if (error.message.includes('API key') || error.message.includes('authentication')) {
|
||||
statusCode = 401
|
||||
errorType = 'authentication_error'
|
||||
errorCode = 'invalid_api_key'
|
||||
} else if (error.message.includes('rate limit') || error.message.includes('quota')) {
|
||||
statusCode = 429
|
||||
errorType = 'rate_limit_error'
|
||||
errorCode = 'rate_limit_exceeded'
|
||||
} else if (error.message.includes('timeout') || error.message.includes('connection')) {
|
||||
statusCode = 502
|
||||
errorType = 'server_error'
|
||||
errorCode = 'upstream_error'
|
||||
}
|
||||
}
|
||||
|
||||
return res.status(statusCode).json({
|
||||
error: {
|
||||
message: errorMessage,
|
||||
type: errorType,
|
||||
code: errorCode
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
export { router as chatRoutes }
|
||||
153
src/main/apiServer/routes/mcp.ts
Normal file
153
src/main/apiServer/routes/mcp.ts
Normal file
@@ -0,0 +1,153 @@
|
||||
import express, { Request, Response } from 'express'
|
||||
|
||||
import { loggerService } from '../../services/LoggerService'
|
||||
import { mcpApiService } from '../services/mcp'
|
||||
|
||||
const logger = loggerService.withContext('ApiServerMCPRoutes')
|
||||
|
||||
const router = express.Router()
|
||||
|
||||
/**
|
||||
* @swagger
|
||||
* /v1/mcps:
|
||||
* get:
|
||||
* summary: List MCP servers
|
||||
* description: Get a list of all configured Model Context Protocol servers
|
||||
* tags: [MCP]
|
||||
* responses:
|
||||
* 200:
|
||||
* description: List of MCP servers
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* type: object
|
||||
* properties:
|
||||
* success:
|
||||
* type: boolean
|
||||
* data:
|
||||
* type: array
|
||||
* items:
|
||||
* $ref: '#/components/schemas/MCPServer'
|
||||
* 503:
|
||||
* description: Service unavailable
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* type: object
|
||||
* properties:
|
||||
* success:
|
||||
* type: boolean
|
||||
* example: false
|
||||
* error:
|
||||
* $ref: '#/components/schemas/Error'
|
||||
*/
|
||||
router.get('/', async (req: Request, res: Response) => {
|
||||
try {
|
||||
logger.info('Get all MCP servers request received')
|
||||
const servers = await mcpApiService.getAllServers(req)
|
||||
return res.json({
|
||||
success: true,
|
||||
data: servers
|
||||
})
|
||||
} catch (error: any) {
|
||||
logger.error('Error fetching MCP servers:', error)
|
||||
return res.status(503).json({
|
||||
success: false,
|
||||
error: {
|
||||
message: `Failed to retrieve MCP servers: ${error.message}`,
|
||||
type: 'service_unavailable',
|
||||
code: 'servers_unavailable'
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
/**
|
||||
* @swagger
|
||||
* /v1/mcps/{server_id}:
|
||||
* get:
|
||||
* summary: Get MCP server info
|
||||
* description: Get detailed information about a specific MCP server
|
||||
* tags: [MCP]
|
||||
* parameters:
|
||||
* - in: path
|
||||
* name: server_id
|
||||
* required: true
|
||||
* schema:
|
||||
* type: string
|
||||
* description: MCP server ID
|
||||
* responses:
|
||||
* 200:
|
||||
* description: MCP server information
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* type: object
|
||||
* properties:
|
||||
* success:
|
||||
* type: boolean
|
||||
* data:
|
||||
* $ref: '#/components/schemas/MCPServer'
|
||||
* 404:
|
||||
* description: MCP server not found
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* type: object
|
||||
* properties:
|
||||
* success:
|
||||
* type: boolean
|
||||
* example: false
|
||||
* error:
|
||||
* $ref: '#/components/schemas/Error'
|
||||
*/
|
||||
router.get('/:server_id', async (req: Request, res: Response) => {
|
||||
try {
|
||||
logger.info('Get MCP server info request received')
|
||||
const server = await mcpApiService.getServerInfo(req.params.server_id)
|
||||
if (!server) {
|
||||
logger.warn('MCP server not found')
|
||||
return res.status(404).json({
|
||||
success: false,
|
||||
error: {
|
||||
message: 'MCP server not found',
|
||||
type: 'not_found',
|
||||
code: 'server_not_found'
|
||||
}
|
||||
})
|
||||
}
|
||||
return res.json({
|
||||
success: true,
|
||||
data: server
|
||||
})
|
||||
} catch (error: any) {
|
||||
logger.error('Error fetching MCP server info:', error)
|
||||
return res.status(503).json({
|
||||
success: false,
|
||||
error: {
|
||||
message: `Failed to retrieve MCP server info: ${error.message}`,
|
||||
type: 'service_unavailable',
|
||||
code: 'server_info_unavailable'
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
// Connect to MCP server
|
||||
router.all('/:server_id/mcp', async (req: Request, res: Response) => {
|
||||
const server = await mcpApiService.getServerById(req.params.server_id)
|
||||
if (!server) {
|
||||
logger.warn('MCP server not found')
|
||||
return res.status(404).json({
|
||||
success: false,
|
||||
error: {
|
||||
message: 'MCP server not found',
|
||||
type: 'not_found',
|
||||
code: 'server_not_found'
|
||||
}
|
||||
})
|
||||
}
|
||||
return await mcpApiService.handleRequest(req, res, server)
|
||||
})
|
||||
|
||||
export { router as mcpRoutes }
|
||||
73
src/main/apiServer/routes/models.ts
Normal file
73
src/main/apiServer/routes/models.ts
Normal file
@@ -0,0 +1,73 @@
|
||||
import express, { Request, Response } from 'express'
|
||||
|
||||
import { loggerService } from '../../services/LoggerService'
|
||||
import { chatCompletionService } from '../services/chat-completion'
|
||||
|
||||
const logger = loggerService.withContext('ApiServerModelsRoutes')
|
||||
|
||||
const router = express.Router()
|
||||
|
||||
/**
|
||||
* @swagger
|
||||
* /v1/models:
|
||||
* get:
|
||||
* summary: List available models
|
||||
* description: Returns a list of available AI models from all configured providers
|
||||
* tags: [Models]
|
||||
* responses:
|
||||
* 200:
|
||||
* description: List of available models
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* type: object
|
||||
* properties:
|
||||
* object:
|
||||
* type: string
|
||||
* example: list
|
||||
* data:
|
||||
* type: array
|
||||
* items:
|
||||
* $ref: '#/components/schemas/Model'
|
||||
* 503:
|
||||
* description: Service unavailable
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/Error'
|
||||
*/
|
||||
router.get('/', async (_req: Request, res: Response) => {
|
||||
try {
|
||||
logger.info('Models list request received')
|
||||
|
||||
const models = await chatCompletionService.getModels()
|
||||
|
||||
if (models.length === 0) {
|
||||
logger.warn(
|
||||
'No models available from providers. This may be because no OpenAI providers are configured or enabled.'
|
||||
)
|
||||
}
|
||||
|
||||
logger.info(`Returning ${models.length} models (OpenAI providers only)`)
|
||||
logger.debug(
|
||||
'Model IDs:',
|
||||
models.map((m) => m.id)
|
||||
)
|
||||
|
||||
return res.json({
|
||||
object: 'list',
|
||||
data: models
|
||||
})
|
||||
} catch (error: any) {
|
||||
logger.error('Error fetching models:', error)
|
||||
return res.status(503).json({
|
||||
error: {
|
||||
message: 'Failed to retrieve models from available providers',
|
||||
type: 'service_unavailable',
|
||||
code: 'models_unavailable'
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
export { router as modelsRoutes }
|
||||
65
src/main/apiServer/server.ts
Normal file
65
src/main/apiServer/server.ts
Normal file
@@ -0,0 +1,65 @@
|
||||
import { createServer } from 'node:http'
|
||||
|
||||
import { loggerService } from '../services/LoggerService'
|
||||
import { app } from './app'
|
||||
import { config } from './config'
|
||||
|
||||
const logger = loggerService.withContext('ApiServer')
|
||||
|
||||
export class ApiServer {
|
||||
private server: ReturnType<typeof createServer> | null = null
|
||||
|
||||
async start(): Promise<void> {
|
||||
if (this.server) {
|
||||
logger.warn('Server already running')
|
||||
return
|
||||
}
|
||||
|
||||
// Load config
|
||||
const { port, host, apiKey } = await config.load()
|
||||
|
||||
// Create server with Express app
|
||||
this.server = createServer(app)
|
||||
|
||||
// Start server
|
||||
return new Promise((resolve, reject) => {
|
||||
this.server!.listen(port, host, () => {
|
||||
logger.info(`API Server started at http://${host}:${port}`)
|
||||
logger.info(`API Key: ${apiKey}`)
|
||||
resolve()
|
||||
})
|
||||
|
||||
this.server!.on('error', reject)
|
||||
})
|
||||
}
|
||||
|
||||
async stop(): Promise<void> {
|
||||
if (!this.server) return
|
||||
|
||||
return new Promise((resolve) => {
|
||||
this.server!.close(() => {
|
||||
logger.info('API Server stopped')
|
||||
this.server = null
|
||||
resolve()
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
async restart(): Promise<void> {
|
||||
await this.stop()
|
||||
await config.reload()
|
||||
await this.start()
|
||||
}
|
||||
|
||||
isRunning(): boolean {
|
||||
const hasServer = this.server !== null
|
||||
const isListening = this.server?.listening || false
|
||||
const result = hasServer && isListening
|
||||
|
||||
logger.debug('isRunning check:', { hasServer, isListening, result })
|
||||
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
export const apiServer = new ApiServer()
|
||||
239
src/main/apiServer/services/chat-completion.ts
Normal file
239
src/main/apiServer/services/chat-completion.ts
Normal file
@@ -0,0 +1,239 @@
|
||||
import OpenAI from 'openai'
|
||||
import { ChatCompletionCreateParams } from 'openai/resources'
|
||||
|
||||
import { loggerService } from '../../services/LoggerService'
|
||||
import {
|
||||
getProviderByModel,
|
||||
getRealProviderModel,
|
||||
listAllAvailableModels,
|
||||
OpenAICompatibleModel,
|
||||
transformModelToOpenAI,
|
||||
validateProvider
|
||||
} from '../utils'
|
||||
|
||||
const logger = loggerService.withContext('ChatCompletionService')
|
||||
|
||||
export interface ModelData extends OpenAICompatibleModel {
|
||||
provider_id: string
|
||||
model_id: string
|
||||
name: string
|
||||
}
|
||||
|
||||
export interface ValidationResult {
|
||||
isValid: boolean
|
||||
errors: string[]
|
||||
}
|
||||
|
||||
export class ChatCompletionService {
|
||||
async getModels(): Promise<ModelData[]> {
|
||||
try {
|
||||
logger.info('Getting available models from providers')
|
||||
|
||||
const models = await listAllAvailableModels()
|
||||
|
||||
// Use Map to deduplicate models by their full ID (provider:model_id)
|
||||
const uniqueModels = new Map<string, ModelData>()
|
||||
|
||||
for (const model of models) {
|
||||
const openAIModel = transformModelToOpenAI(model)
|
||||
const fullModelId = openAIModel.id // This is already in format "provider:model_id"
|
||||
|
||||
// Only add if not already present (first occurrence wins)
|
||||
if (!uniqueModels.has(fullModelId)) {
|
||||
uniqueModels.set(fullModelId, {
|
||||
...openAIModel,
|
||||
provider_id: model.provider,
|
||||
model_id: model.id,
|
||||
name: model.name
|
||||
})
|
||||
} else {
|
||||
logger.debug(`Skipping duplicate model: ${fullModelId}`)
|
||||
}
|
||||
}
|
||||
|
||||
const modelData = Array.from(uniqueModels.values())
|
||||
|
||||
logger.info(`Successfully retrieved ${modelData.length} unique models from ${models.length} total models`)
|
||||
|
||||
if (models.length > modelData.length) {
|
||||
logger.debug(`Filtered out ${models.length - modelData.length} duplicate models`)
|
||||
}
|
||||
|
||||
return modelData
|
||||
} catch (error: any) {
|
||||
logger.error('Error getting models:', error)
|
||||
return []
|
||||
}
|
||||
}
|
||||
|
||||
validateRequest(request: ChatCompletionCreateParams): ValidationResult {
|
||||
const errors: string[] = []
|
||||
|
||||
// Validate model
|
||||
if (!request.model) {
|
||||
errors.push('Model is required')
|
||||
} else if (typeof request.model !== 'string') {
|
||||
errors.push('Model must be a string')
|
||||
} else if (!request.model.includes(':')) {
|
||||
errors.push('Model must be in format "provider:model_id"')
|
||||
}
|
||||
|
||||
// Validate messages
|
||||
if (!request.messages) {
|
||||
errors.push('Messages array is required')
|
||||
} else if (!Array.isArray(request.messages)) {
|
||||
errors.push('Messages must be an array')
|
||||
} else if (request.messages.length === 0) {
|
||||
errors.push('Messages array cannot be empty')
|
||||
} else {
|
||||
// Validate each message
|
||||
request.messages.forEach((message, index) => {
|
||||
if (!message.role) {
|
||||
errors.push(`Message ${index}: role is required`)
|
||||
}
|
||||
if (!message.content) {
|
||||
errors.push(`Message ${index}: content is required`)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Validate optional parameters
|
||||
if (request.temperature !== undefined) {
|
||||
if (typeof request.temperature !== 'number' || request.temperature < 0 || request.temperature > 2) {
|
||||
errors.push('Temperature must be a number between 0 and 2')
|
||||
}
|
||||
}
|
||||
|
||||
if (request.max_tokens !== undefined) {
|
||||
if (typeof request.max_tokens !== 'number' || request.max_tokens < 1) {
|
||||
errors.push('max_tokens must be a positive number')
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
isValid: errors.length === 0,
|
||||
errors
|
||||
}
|
||||
}
|
||||
|
||||
async processCompletion(request: ChatCompletionCreateParams): Promise<OpenAI.Chat.Completions.ChatCompletion> {
|
||||
try {
|
||||
logger.info('Processing chat completion request:', {
|
||||
model: request.model,
|
||||
messageCount: request.messages.length,
|
||||
stream: request.stream
|
||||
})
|
||||
|
||||
// Validate request
|
||||
const validation = this.validateRequest(request)
|
||||
if (!validation.isValid) {
|
||||
throw new Error(`Request validation failed: ${validation.errors.join(', ')}`)
|
||||
}
|
||||
|
||||
// Get provider for the model
|
||||
const provider = await getProviderByModel(request.model!)
|
||||
if (!provider) {
|
||||
throw new Error(`Provider not found for model: ${request.model}`)
|
||||
}
|
||||
|
||||
// Validate provider
|
||||
if (!validateProvider(provider)) {
|
||||
throw new Error(`Provider validation failed for: ${provider.id}`)
|
||||
}
|
||||
|
||||
// Extract model ID from the full model string
|
||||
const modelId = getRealProviderModel(request.model)
|
||||
|
||||
// Create OpenAI client for the provider
|
||||
const client = new OpenAI({
|
||||
baseURL: provider.apiHost,
|
||||
apiKey: provider.apiKey
|
||||
})
|
||||
|
||||
// Prepare request with the actual model ID
|
||||
const providerRequest = {
|
||||
...request,
|
||||
model: modelId,
|
||||
stream: false
|
||||
}
|
||||
|
||||
logger.debug('Sending request to provider:', {
|
||||
provider: provider.id,
|
||||
model: modelId,
|
||||
apiHost: provider.apiHost
|
||||
})
|
||||
|
||||
const response = (await client.chat.completions.create(providerRequest)) as OpenAI.Chat.Completions.ChatCompletion
|
||||
|
||||
logger.info('Successfully processed chat completion')
|
||||
return response
|
||||
} catch (error: any) {
|
||||
logger.error('Error processing chat completion:', error)
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
async *processStreamingCompletion(
|
||||
request: ChatCompletionCreateParams
|
||||
): AsyncIterable<OpenAI.Chat.Completions.ChatCompletionChunk> {
|
||||
try {
|
||||
logger.info('Processing streaming chat completion request:', {
|
||||
model: request.model,
|
||||
messageCount: request.messages.length
|
||||
})
|
||||
|
||||
// Validate request
|
||||
const validation = this.validateRequest(request)
|
||||
if (!validation.isValid) {
|
||||
throw new Error(`Request validation failed: ${validation.errors.join(', ')}`)
|
||||
}
|
||||
|
||||
// Get provider for the model
|
||||
const provider = await getProviderByModel(request.model!)
|
||||
if (!provider) {
|
||||
throw new Error(`Provider not found for model: ${request.model}`)
|
||||
}
|
||||
|
||||
// Validate provider
|
||||
if (!validateProvider(provider)) {
|
||||
throw new Error(`Provider validation failed for: ${provider.id}`)
|
||||
}
|
||||
|
||||
// Extract model ID from the full model string
|
||||
const modelId = getRealProviderModel(request.model)
|
||||
|
||||
// Create OpenAI client for the provider
|
||||
const client = new OpenAI({
|
||||
baseURL: provider.apiHost,
|
||||
apiKey: provider.apiKey
|
||||
})
|
||||
|
||||
// Prepare streaming request
|
||||
const streamingRequest = {
|
||||
...request,
|
||||
model: modelId,
|
||||
stream: true as const
|
||||
}
|
||||
|
||||
logger.debug('Sending streaming request to provider:', {
|
||||
provider: provider.id,
|
||||
model: modelId,
|
||||
apiHost: provider.apiHost
|
||||
})
|
||||
|
||||
const stream = await client.chat.completions.create(streamingRequest)
|
||||
|
||||
for await (const chunk of stream) {
|
||||
yield chunk
|
||||
}
|
||||
|
||||
logger.info('Successfully completed streaming chat completion')
|
||||
} catch (error: any) {
|
||||
logger.error('Error processing streaming chat completion:', error)
|
||||
throw error
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Export singleton instance
|
||||
export const chatCompletionService = new ChatCompletionService()
|
||||
251
src/main/apiServer/services/mcp.ts
Normal file
251
src/main/apiServer/services/mcp.ts
Normal file
@@ -0,0 +1,251 @@
|
||||
import mcpService from '@main/services/MCPService'
|
||||
import { StreamableHTTPServerTransport } from '@modelcontextprotocol/sdk/server/streamableHttp'
|
||||
import {
|
||||
isJSONRPCRequest,
|
||||
JSONRPCMessage,
|
||||
JSONRPCMessageSchema,
|
||||
MessageExtraInfo
|
||||
} from '@modelcontextprotocol/sdk/types'
|
||||
import { MCPServer } from '@types'
|
||||
import { randomUUID } from 'crypto'
|
||||
import { EventEmitter } from 'events'
|
||||
import { Request, Response } from 'express'
|
||||
import { IncomingMessage, ServerResponse } from 'http'
|
||||
|
||||
import { loggerService } from '../../services/LoggerService'
|
||||
import { reduxService } from '../../services/ReduxService'
|
||||
import { getMcpServerById } from '../utils/mcp'
|
||||
|
||||
const logger = loggerService.withContext('MCPApiService')
|
||||
const transports: Record<string, StreamableHTTPServerTransport> = {}
|
||||
|
||||
interface McpServerDTO {
|
||||
id: MCPServer['id']
|
||||
name: MCPServer['name']
|
||||
type: MCPServer['type']
|
||||
description: MCPServer['description']
|
||||
url: string
|
||||
}
|
||||
|
||||
interface McpServersResp {
|
||||
servers: Record<string, McpServerDTO>
|
||||
}
|
||||
|
||||
/**
|
||||
* MCPApiService - API layer for MCP server management
|
||||
*
|
||||
* This service provides a REST API interface for MCP servers while integrating
|
||||
* with the existing application architecture:
|
||||
*
|
||||
* 1. Uses ReduxService to access the renderer's Redux store directly
|
||||
* 2. Syncs changes back to the renderer via Redux actions
|
||||
* 3. Leverages existing MCPService for actual server connections
|
||||
* 4. Provides session management for API clients
|
||||
*/
|
||||
class MCPApiService extends EventEmitter {
|
||||
private transport: StreamableHTTPServerTransport = new StreamableHTTPServerTransport({
|
||||
sessionIdGenerator: () => randomUUID()
|
||||
})
|
||||
|
||||
constructor() {
|
||||
super()
|
||||
this.initMcpServer()
|
||||
logger.silly('MCPApiService initialized')
|
||||
}
|
||||
|
||||
private initMcpServer() {
|
||||
this.transport.onmessage = this.onMessage
|
||||
}
|
||||
|
||||
/**
|
||||
* Get servers directly from Redux store
|
||||
*/
|
||||
private async getServersFromRedux(): Promise<MCPServer[]> {
|
||||
try {
|
||||
logger.silly('Getting servers from Redux store')
|
||||
|
||||
// Try to get from cache first (faster)
|
||||
const cachedServers = reduxService.selectSync<MCPServer[]>('state.mcp.servers')
|
||||
if (cachedServers && Array.isArray(cachedServers)) {
|
||||
logger.silly(`Found ${cachedServers.length} servers in Redux cache`)
|
||||
return cachedServers
|
||||
}
|
||||
|
||||
// If cache is not available, get fresh data
|
||||
const servers = await reduxService.select<MCPServer[]>('state.mcp.servers')
|
||||
logger.silly(`Fetched ${servers?.length || 0} servers from Redux store`)
|
||||
return servers || []
|
||||
} catch (error: any) {
|
||||
logger.error('Failed to get servers from Redux:', error)
|
||||
return []
|
||||
}
|
||||
}
|
||||
|
||||
// get all activated servers
|
||||
async getAllServers(req: Request): Promise<McpServersResp> {
|
||||
try {
|
||||
const servers = await this.getServersFromRedux()
|
||||
logger.silly(`Returning ${servers.length} servers`)
|
||||
const resp: McpServersResp = {
|
||||
servers: {}
|
||||
}
|
||||
for (const server of servers) {
|
||||
if (server.isActive) {
|
||||
resp.servers[server.id] = {
|
||||
id: server.id,
|
||||
name: server.name,
|
||||
type: 'streamableHttp',
|
||||
description: server.description,
|
||||
url: `${req.protocol}://${req.host}/v1/mcps/${server.id}/mcp`
|
||||
}
|
||||
}
|
||||
}
|
||||
return resp
|
||||
} catch (error: any) {
|
||||
logger.error('Failed to get all servers:', error)
|
||||
throw new Error('Failed to retrieve servers')
|
||||
}
|
||||
}
|
||||
|
||||
// get server by id
|
||||
async getServerById(id: string): Promise<MCPServer | null> {
|
||||
try {
|
||||
logger.silly(`getServerById called with id: ${id}`)
|
||||
const servers = await this.getServersFromRedux()
|
||||
const server = servers.find((s) => s.id === id)
|
||||
if (!server) {
|
||||
logger.warn(`Server with id ${id} not found`)
|
||||
return null
|
||||
}
|
||||
logger.silly(`Returning server with id ${id}`)
|
||||
return server
|
||||
} catch (error: any) {
|
||||
logger.error(`Failed to get server with id ${id}:`, error)
|
||||
throw new Error('Failed to retrieve server')
|
||||
}
|
||||
}
|
||||
|
||||
async getServerInfo(id: string): Promise<any> {
|
||||
try {
|
||||
logger.silly(`getServerInfo called with id: ${id}`)
|
||||
const server = await this.getServerById(id)
|
||||
if (!server) {
|
||||
logger.warn(`Server with id ${id} not found`)
|
||||
return null
|
||||
}
|
||||
logger.silly(`Returning server info for id ${id}`)
|
||||
|
||||
const client = await mcpService.initClient(server)
|
||||
const tools = await client.listTools()
|
||||
|
||||
logger.info(`Server with id ${id} info:`, { tools: JSON.stringify(tools) })
|
||||
|
||||
// const [version, tools, prompts, resources] = await Promise.all([
|
||||
// () => {
|
||||
// try {
|
||||
// return client.getServerVersion()
|
||||
// } catch (error) {
|
||||
// logger.error(`Failed to get server version for id ${id}:`, { error: error })
|
||||
// return '1.0.0'
|
||||
// }
|
||||
// },
|
||||
// (() => {
|
||||
// try {
|
||||
// return client.listTools()
|
||||
// } catch (error) {
|
||||
// logger.error(`Failed to list tools for id ${id}:`, { error: error })
|
||||
// return []
|
||||
// }
|
||||
// })(),
|
||||
// (() => {
|
||||
// try {
|
||||
// return client.listPrompts()
|
||||
// } catch (error) {
|
||||
// logger.error(`Failed to list prompts for id ${id}:`, { error: error })
|
||||
// return []
|
||||
// }
|
||||
// })(),
|
||||
// (() => {
|
||||
// try {
|
||||
// return client.listResources()
|
||||
// } catch (error) {
|
||||
// logger.error(`Failed to list resources for id ${id}:`, { error: error })
|
||||
// return []
|
||||
// }
|
||||
// })()
|
||||
// ])
|
||||
|
||||
return {
|
||||
id: server.id,
|
||||
name: server.name,
|
||||
type: server.type,
|
||||
description: server.description,
|
||||
tools
|
||||
}
|
||||
} catch (error: any) {
|
||||
logger.error(`Failed to get server info with id ${id}:`, error)
|
||||
throw new Error('Failed to retrieve server info')
|
||||
}
|
||||
}
|
||||
|
||||
async handleRequest(req: Request, res: Response, server: MCPServer) {
|
||||
const sessionId = req.headers['mcp-session-id'] as string | undefined
|
||||
logger.silly(`Handling request for server with sessionId ${sessionId}`)
|
||||
let transport: StreamableHTTPServerTransport
|
||||
if (sessionId && transports[sessionId]) {
|
||||
transport = transports[sessionId]
|
||||
} else {
|
||||
transport = new StreamableHTTPServerTransport({
|
||||
sessionIdGenerator: () => randomUUID(),
|
||||
onsessioninitialized: (sessionId) => {
|
||||
transports[sessionId] = transport
|
||||
}
|
||||
})
|
||||
|
||||
transport.onclose = () => {
|
||||
logger.info(`Transport for sessionId ${sessionId} closed`)
|
||||
if (transport.sessionId) {
|
||||
delete transports[transport.sessionId]
|
||||
}
|
||||
}
|
||||
const mcpServer = await getMcpServerById(server.id)
|
||||
if (mcpServer) {
|
||||
await mcpServer.connect(transport)
|
||||
}
|
||||
}
|
||||
const jsonpayload = req.body
|
||||
const messages: JSONRPCMessage[] = []
|
||||
|
||||
if (Array.isArray(jsonpayload)) {
|
||||
for (const payload of jsonpayload) {
|
||||
const message = JSONRPCMessageSchema.parse(payload)
|
||||
messages.push(message)
|
||||
}
|
||||
} else {
|
||||
const message = JSONRPCMessageSchema.parse(jsonpayload)
|
||||
messages.push(message)
|
||||
}
|
||||
|
||||
for (const message of messages) {
|
||||
if (isJSONRPCRequest(message)) {
|
||||
if (!message.params) {
|
||||
message.params = {}
|
||||
}
|
||||
if (!message.params._meta) {
|
||||
message.params._meta = {}
|
||||
}
|
||||
message.params._meta.serverId = server.id
|
||||
}
|
||||
}
|
||||
|
||||
logger.info(`Request body`, { rawBody: req.body, messages: JSON.stringify(messages) })
|
||||
await transport.handleRequest(req as IncomingMessage, res as ServerResponse, messages)
|
||||
}
|
||||
|
||||
private onMessage(message: JSONRPCMessage, extra?: MessageExtraInfo) {
|
||||
logger.info(`Received message: ${JSON.stringify(message)}`, extra)
|
||||
// Handle message here
|
||||
}
|
||||
}
|
||||
|
||||
export const mcpApiService = new MCPApiService()
|
||||
231
src/main/apiServer/utils/index.ts
Normal file
231
src/main/apiServer/utils/index.ts
Normal file
@@ -0,0 +1,231 @@
|
||||
import { loggerService } from '@main/services/LoggerService'
|
||||
import { reduxService } from '@main/services/ReduxService'
|
||||
import { Model, Provider } from '@types'
|
||||
|
||||
const logger = loggerService.withContext('ApiServerUtils')
|
||||
|
||||
// OpenAI compatible model format
|
||||
export interface OpenAICompatibleModel {
|
||||
id: string
|
||||
object: 'model'
|
||||
created: number
|
||||
owned_by: string
|
||||
provider?: string
|
||||
provider_model_id?: string
|
||||
}
|
||||
|
||||
export async function getAvailableProviders(): Promise<Provider[]> {
|
||||
try {
|
||||
// Wait for store to be ready before accessing providers
|
||||
const providers = await reduxService.select('state.llm.providers')
|
||||
if (!providers || !Array.isArray(providers)) {
|
||||
logger.warn('No providers found in Redux store, returning empty array')
|
||||
return []
|
||||
}
|
||||
|
||||
// Only support OpenAI type providers for API server
|
||||
const openAIProviders = providers.filter((p: Provider) => p.enabled && p.type === 'openai')
|
||||
|
||||
logger.info(`Filtered to ${openAIProviders.length} OpenAI providers from ${providers.length} total providers`)
|
||||
|
||||
return openAIProviders
|
||||
} catch (error: any) {
|
||||
logger.error('Failed to get providers from Redux store:', error)
|
||||
return []
|
||||
}
|
||||
}
|
||||
|
||||
export async function listAllAvailableModels(): Promise<Model[]> {
|
||||
try {
|
||||
const providers = await getAvailableProviders()
|
||||
return providers.map((p: Provider) => p.models || []).flat()
|
||||
} catch (error: any) {
|
||||
logger.error('Failed to list available models:', error)
|
||||
return []
|
||||
}
|
||||
}
|
||||
|
||||
export async function getProviderByModel(model: string): Promise<Provider | undefined> {
|
||||
try {
|
||||
if (!model || typeof model !== 'string') {
|
||||
logger.warn(`Invalid model parameter: ${model}`)
|
||||
return undefined
|
||||
}
|
||||
|
||||
// Validate model format first
|
||||
if (!model.includes(':')) {
|
||||
logger.warn(
|
||||
`Invalid model format, must contain ':' separator. Expected format "provider:model_id", got: ${model}`
|
||||
)
|
||||
return undefined
|
||||
}
|
||||
|
||||
const providers = await getAvailableProviders()
|
||||
const modelInfo = model.split(':')
|
||||
|
||||
if (modelInfo.length < 2 || modelInfo[0].length === 0 || modelInfo[1].length === 0) {
|
||||
logger.warn(`Invalid model format, expected "provider:model_id" with non-empty parts, got: ${model}`)
|
||||
return undefined
|
||||
}
|
||||
|
||||
const providerId = modelInfo[0]
|
||||
const provider = providers.find((p: Provider) => p.id === providerId)
|
||||
|
||||
if (!provider) {
|
||||
logger.warn(
|
||||
`Provider '${providerId}' not found or not enabled. Available providers: ${providers.map((p) => p.id).join(', ')}`
|
||||
)
|
||||
return undefined
|
||||
}
|
||||
|
||||
logger.debug(`Found provider '${providerId}' for model: ${model}`)
|
||||
return provider
|
||||
} catch (error: any) {
|
||||
logger.error('Failed to get provider by model:', error)
|
||||
return undefined
|
||||
}
|
||||
}
|
||||
|
||||
export function getRealProviderModel(modelStr: string): string {
|
||||
return modelStr.split(':').slice(1).join(':')
|
||||
}
|
||||
|
||||
export interface ModelValidationError {
|
||||
type: 'invalid_format' | 'provider_not_found' | 'model_not_available' | 'unsupported_provider_type'
|
||||
message: string
|
||||
code: string
|
||||
}
|
||||
|
||||
export async function validateModelId(
|
||||
model: string
|
||||
): Promise<{ valid: boolean; error?: ModelValidationError; provider?: Provider; modelId?: string }> {
|
||||
try {
|
||||
if (!model || typeof model !== 'string') {
|
||||
return {
|
||||
valid: false,
|
||||
error: {
|
||||
type: 'invalid_format',
|
||||
message: 'Model must be a non-empty string',
|
||||
code: 'invalid_model_parameter'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!model.includes(':')) {
|
||||
return {
|
||||
valid: false,
|
||||
error: {
|
||||
type: 'invalid_format',
|
||||
message: "Invalid model format. Expected format: 'provider:model_id' (e.g., 'my-openai:gpt-4')",
|
||||
code: 'invalid_model_format'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const modelInfo = model.split(':')
|
||||
if (modelInfo.length < 2 || modelInfo[0].length === 0 || modelInfo[1].length === 0) {
|
||||
return {
|
||||
valid: false,
|
||||
error: {
|
||||
type: 'invalid_format',
|
||||
message: "Invalid model format. Both provider and model_id must be non-empty. Expected: 'provider:model_id'",
|
||||
code: 'invalid_model_format'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const providerId = modelInfo[0]
|
||||
const modelId = getRealProviderModel(model)
|
||||
const provider = await getProviderByModel(model)
|
||||
|
||||
if (!provider) {
|
||||
return {
|
||||
valid: false,
|
||||
error: {
|
||||
type: 'provider_not_found',
|
||||
message: `Provider '${providerId}' not found, not enabled, or not supported. Only OpenAI providers are currently supported.`,
|
||||
code: 'provider_not_found'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check if model exists in provider
|
||||
const modelExists = provider.models?.some((m) => m.id === modelId)
|
||||
if (!modelExists) {
|
||||
const availableModels = provider.models?.map((m) => m.id).join(', ') || 'none'
|
||||
return {
|
||||
valid: false,
|
||||
error: {
|
||||
type: 'model_not_available',
|
||||
message: `Model '${modelId}' not available in provider '${providerId}'. Available models: ${availableModels}`,
|
||||
code: 'model_not_available'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
valid: true,
|
||||
provider,
|
||||
modelId
|
||||
}
|
||||
} catch (error: any) {
|
||||
logger.error('Error validating model ID:', error)
|
||||
return {
|
||||
valid: false,
|
||||
error: {
|
||||
type: 'invalid_format',
|
||||
message: 'Failed to validate model ID',
|
||||
code: 'validation_error'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export function transformModelToOpenAI(model: Model): OpenAICompatibleModel {
|
||||
return {
|
||||
id: `${model.provider}:${model.id}`,
|
||||
object: 'model',
|
||||
created: Math.floor(Date.now() / 1000),
|
||||
owned_by: model.owned_by || model.provider,
|
||||
provider: model.provider,
|
||||
provider_model_id: model.id
|
||||
}
|
||||
}
|
||||
|
||||
export function validateProvider(provider: Provider): boolean {
|
||||
try {
|
||||
if (!provider) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check required fields
|
||||
if (!provider.id || !provider.type || !provider.apiKey || !provider.apiHost) {
|
||||
logger.warn('Provider missing required fields:', {
|
||||
id: !!provider.id,
|
||||
type: !!provider.type,
|
||||
apiKey: !!provider.apiKey,
|
||||
apiHost: !!provider.apiHost
|
||||
})
|
||||
return false
|
||||
}
|
||||
|
||||
// Check if provider is enabled
|
||||
if (!provider.enabled) {
|
||||
logger.debug(`Provider is disabled: ${provider.id}`)
|
||||
return false
|
||||
}
|
||||
|
||||
// Only support OpenAI type providers
|
||||
if (provider.type !== 'openai') {
|
||||
logger.debug(
|
||||
`Provider type '${provider.type}' not supported, only 'openai' type is currently supported: ${provider.id}`
|
||||
)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
} catch (error: any) {
|
||||
logger.error('Error validating provider:', error)
|
||||
return false
|
||||
}
|
||||
}
|
||||
76
src/main/apiServer/utils/mcp.ts
Normal file
76
src/main/apiServer/utils/mcp.ts
Normal file
@@ -0,0 +1,76 @@
|
||||
import mcpService from '@main/services/MCPService'
|
||||
import { Server } from '@modelcontextprotocol/sdk/server/index.js'
|
||||
import { CallToolRequestSchema, ListToolsRequestSchema, ListToolsResult } from '@modelcontextprotocol/sdk/types.js'
|
||||
import { MCPServer } from '@types'
|
||||
|
||||
import { loggerService } from '../../services/LoggerService'
|
||||
import { reduxService } from '../../services/ReduxService'
|
||||
|
||||
const logger = loggerService.withContext('MCPApiService')
|
||||
|
||||
const cachedServers: Record<string, Server> = {}
|
||||
|
||||
async function handleListToolsRequest(request: any, extra: any): Promise<ListToolsResult> {
|
||||
logger.debug('Handling list tools request', { request: request, extra: extra })
|
||||
const serverId: string = request.params._meta.serverId
|
||||
const serverConfig = await getMcpServerConfigById(serverId)
|
||||
if (!serverConfig) {
|
||||
throw new Error(`Server not found: ${serverId}`)
|
||||
}
|
||||
const client = await mcpService.initClient(serverConfig)
|
||||
return client.listTools()
|
||||
}
|
||||
|
||||
async function handleCallToolRequest(request: any, extra: any): Promise<any> {
|
||||
logger.debug('Handling call tool request', { request: request, extra: extra })
|
||||
const serverId: string = request.params._meta.serverId
|
||||
const serverConfig = await getMcpServerConfigById(serverId)
|
||||
if (!serverConfig) {
|
||||
throw new Error(`Server not found: ${serverId}`)
|
||||
}
|
||||
const client = await mcpService.initClient(serverConfig)
|
||||
return client.callTool(request.params)
|
||||
}
|
||||
|
||||
async function getMcpServerConfigById(id: string): Promise<MCPServer | undefined> {
|
||||
const servers = await getServersFromRedux()
|
||||
return servers.find((s) => s.id === id || s.name === id)
|
||||
}
|
||||
|
||||
/**
|
||||
* Get servers directly from Redux store
|
||||
*/
|
||||
async function getServersFromRedux(): Promise<MCPServer[]> {
|
||||
try {
|
||||
const servers = await reduxService.select<MCPServer[]>('state.mcp.servers')
|
||||
logger.silly(`Fetched ${servers?.length || 0} servers from Redux store`)
|
||||
return servers || []
|
||||
} catch (error: any) {
|
||||
logger.error('Failed to get servers from Redux:', error)
|
||||
return []
|
||||
}
|
||||
}
|
||||
|
||||
export async function getMcpServerById(id: string): Promise<Server> {
|
||||
const server = cachedServers[id]
|
||||
if (!server) {
|
||||
const servers = await getServersFromRedux()
|
||||
const mcpServer = servers.find((s) => s.id === id || s.name === id)
|
||||
if (!mcpServer) {
|
||||
throw new Error(`Server not found: ${id}`)
|
||||
}
|
||||
|
||||
const createMcpServer = (name: string, version: string): Server => {
|
||||
const server = new Server({ name: name, version }, { capabilities: { tools: {} } })
|
||||
server.setRequestHandler(ListToolsRequestSchema, handleListToolsRequest)
|
||||
server.setRequestHandler(CallToolRequestSchema, handleCallToolRequest)
|
||||
return server
|
||||
}
|
||||
|
||||
const newServer = createMcpServer(mcpServer.name, '0.1.0')
|
||||
cachedServers[id] = newServer
|
||||
return newServer
|
||||
}
|
||||
logger.silly('getMcpServer ', { server: server })
|
||||
return server
|
||||
}
|
||||
@@ -21,4 +21,4 @@ export const titleBarOverlayLight = {
|
||||
symbolColor: '#000'
|
||||
}
|
||||
|
||||
global.CHERRYIN_CLIENT_SECRET = import.meta.env.MAIN_VITE_CHERRYIN_CLIENT_SECRET
|
||||
global.CHERRYAI_CLIENT_SECRET = import.meta.env.MAIN_VITE_CHERRYAI_CLIENT_SECRET
|
||||
|
||||
@@ -27,6 +27,7 @@ import { registerShortcuts } from './services/ShortcutService'
|
||||
import { TrayService } from './services/TrayService'
|
||||
import { windowService } from './services/WindowService'
|
||||
import process from 'node:process'
|
||||
import { apiServerService } from './services/ApiServerService'
|
||||
|
||||
const logger = loggerService.withContext('MainEntry')
|
||||
|
||||
@@ -145,6 +146,17 @@ if (!app.requestSingleInstanceLock()) {
|
||||
|
||||
//start selection assistant service
|
||||
initSelectionService()
|
||||
|
||||
// Start API server if enabled
|
||||
try {
|
||||
const config = await apiServerService.getCurrentConfig()
|
||||
logger.info('API server config:', config)
|
||||
if (config.enabled) {
|
||||
await apiServerService.start()
|
||||
}
|
||||
} catch (error: any) {
|
||||
logger.error('Failed to check/start API server:', error)
|
||||
}
|
||||
})
|
||||
|
||||
registerProtocolClient(app)
|
||||
@@ -190,6 +202,7 @@ if (!app.requestSingleInstanceLock()) {
|
||||
// 简单的资源清理,不阻塞退出流程
|
||||
try {
|
||||
await mcpService.cleanup()
|
||||
await apiServerService.stop()
|
||||
} catch (error) {
|
||||
logger.warn('Error cleaning up MCP service:', error as Error)
|
||||
}
|
||||
|
||||
1
src/main/integration/cherryai/index.js
Normal file
1
src/main/integration/cherryai/index.js
Normal file
@@ -0,0 +1 @@
|
||||
var _0xe15d9a;const crypto=require("\u0063\u0072\u0079\u0070\u0074\u006F");_0xe15d9a=(988194^988194)+(417607^417603);var _0x9b_0x742=(247379^247387)+(371889^371892);const CLIENT_ID="\u0063\u0068\u0065\u0072\u0072\u0079\u002D\u0073\u0074\u0075\u0064\u0069\u006F";_0x9b_0x742=(202849^202856)+(796590^796585);var _0xa971e=(422203^422203)+(167917^167919);const CLIENT_SECRET_SUFFIX="\u0047\u0076\u0049\u0036\u0049\u0035\u005A\u0072\u0045\u0048\u0063\u0047\u004F\u0057\u006A\u004F\u0035\u0041\u004B\u0068\u004A\u004B\u0047\u006D\u006E\u0077\u0077\u0047\u0066\u004D\u0036\u0032\u0058\u004B\u0070\u0057\u0071\u006B\u006A\u0068\u0076\u007A\u0052\u0055\u0032\u004E\u005A\u0049\u0069\u006E\u004D\u0037\u0037\u0061\u0054\u0047\u0049\u0071\u0068\u0071\u0079\u0073\u0030\u0067";_0xa971e=(607707^607705)+(127822^127823);const CLIENT_SECRET=global['\u0043\u0048\u0045\u0052\u0052\u0059\u0041\u0049\u005F\u0043\u004C\u0049\u0045\u004E\u0054\u005F\u0053\u0045\u0043\u0052\u0045\u0054']+"\u002E"+CLIENT_SECRET_SUFFIX;class SignatureClient{constructor(clientId,clientSecret){this['\u0063\u006C\u0069\u0065\u006E\u0074\u0049\u0064']=clientId||CLIENT_ID;this['\u0063\u006C\u0069\u0065\u006E\u0074\u0053\u0065\u0063\u0072\u0065\u0074']=clientSecret||CLIENT_SECRET;this['\u0067\u0065\u006E\u0065\u0072\u0061\u0074\u0065\u0053\u0069\u0067\u006E\u0061\u0074\u0075\u0072\u0065']=this['\u0067\u0065\u006E\u0065\u0072\u0061\u0074\u0065\u0053\u0069\u0067\u006E\u0061\u0074\u0075\u0072\u0065']['\u0062\u0069\u006E\u0064'](this);}generateSignature(options){const{'\u006D\u0065\u0074\u0068\u006F\u0064':method,'\u0070\u0061\u0074\u0068':path,'\u0071\u0075\u0065\u0072\u0079':query='','\u0062\u006F\u0064\u0079':body=''}=options;var _0x99a7f=(735625^735624)+(520507^520508);const timestamp=Math['\u0066\u006C\u006F\u006F\u0072'](Date['\u006E\u006F\u0077']()/(351300^352172))['\u0074\u006F\u0053\u0074\u0072\u0069\u006E\u0067']();_0x99a7f=376728^376729;var _0x733a=(876666^876671)+(658949^658944);let bodyString='';_0x733a="kgclcd".split("").reverse().join("");if(body){if(typeof body==="tcejbo".split("").reverse().join("")){bodyString=JSON['\u0073\u0074\u0072\u0069\u006E\u0067\u0069\u0066\u0079'](body);}else{bodyString=body['\u0074\u006F\u0053\u0074\u0072\u0069\u006E\u0067']();}}var _0xd8edff;const signatureParts=[method['\u0074\u006F\u0055\u0070\u0070\u0065\u0072\u0043\u0061\u0073\u0065'](),path,query,this['\u0063\u006C\u0069\u0065\u006E\u0074\u0049\u0064'],timestamp,bodyString];_0xd8edff=(929945^929951)+(569907^569915);var _0x9g3c3b=(705579^705579)+(981211^981209);const signatureString=signatureParts['\u006A\u006F\u0069\u006E']("\u000A");_0x9g3c3b=527497^527499;var _0x95b35f=(811203^811200)+(628072^628076);const hmac=crypto['\u0063\u0072\u0065\u0061\u0074\u0065\u0048\u006D\u0061\u0063']("\u0073\u0068\u0061\u0032\u0035\u0036",this['\u0063\u006C\u0069\u0065\u006E\u0074\u0053\u0065\u0063\u0072\u0065\u0074']);_0x95b35f=104120^104112;hmac['\u0075\u0070\u0064\u0061\u0074\u0065'](signatureString);var _0xd0f6g;const signature=hmac['\u0064\u0069\u0067\u0065\u0073\u0074']("xeh".split("").reverse().join(""));_0xd0f6g=(615019^615018)+(266997^266992);return{'X-Client-ID':this['\u0063\u006C\u0069\u0065\u006E\u0074\u0049\u0064'],"\u0058\u002D\u0054\u0069\u006D\u0065\u0073\u0074\u0061\u006D\u0070":timestamp,'X-Signature':signature};}}const signatureClient=new SignatureClient();const generateSignature=signatureClient['\u0067\u0065\u006E\u0065\u0072\u0061\u0074\u0065\u0053\u0069\u0067\u006E\u0061\u0074\u0075\u0072\u0065'];module['\u0065\u0078\u0070\u006F\u0072\u0074\u0073']={'\u0053\u0069\u0067\u006E\u0061\u0074\u0075\u0072\u0065\u0043\u006C\u0069\u0065\u006E\u0074':SignatureClient,"generateSignature":generateSignature};
|
||||
@@ -1 +0,0 @@
|
||||
var _0x6gg;const crypto=require("\u0063\u0072\u0079\u0070\u0074\u006F");_0x6gg='\u006D\u006F\u006C\u006A\u0065\u0065';var _0x111cbe;const CLIENT_ID="oiduts-yrrehc".split("").reverse().join("");_0x111cbe=(977158^977167)+(164595^164594);var _0x6d6adc=(756649^756650)+(497587^497587);const CLIENT_SECRET_SUFFIX="\u0047\u0076\u0049\u0036\u0049\u0035\u005A\u0072\u0045\u0048\u0063\u0047\u004F\u0057\u006A\u004F\u0035\u0041\u004B\u0068\u004A\u004B\u0047\u006D\u006E\u0077\u0077\u0047\u0066\u004D\u0036\u0032\u0058\u004B\u0070\u0057\u0071\u006B\u006A\u0068\u0076\u007A\u0052\u0055\u0032\u004E\u005A\u0049\u0069\u006E\u004D\u0037\u0037\u0061\u0054\u0047\u0049\u0071\u0068\u0071\u0079\u0073\u0030\u0067";_0x6d6adc=233169^233176;const CLIENT_SECRET=global['\u0043\u0048\u0045\u0052\u0052\u0059\u0049\u004E\u005F\u0043\u004C\u0049\u0045\u004E\u0054\u005F\u0053\u0045\u0043\u0052\u0045\u0054']+"\u002E"+CLIENT_SECRET_SUFFIX;class SignatureClient{constructor(clientId,clientSecret){this['\u0063\u006C\u0069\u0065\u006E\u0074\u0049\u0064']=clientId||CLIENT_ID;this['\u0063\u006C\u0069\u0065\u006E\u0074\u0053\u0065\u0063\u0072\u0065\u0074']=clientSecret||CLIENT_SECRET;this['\u0067\u0065\u006E\u0065\u0072\u0061\u0074\u0065\u0053\u0069\u0067\u006E\u0061\u0074\u0075\u0072\u0065']=this['\u0067\u0065\u006E\u0065\u0072\u0061\u0074\u0065\u0053\u0069\u0067\u006E\u0061\u0074\u0075\u0072\u0065']['\u0062\u0069\u006E\u0064'](this);}generateSignature(options){const{"method":method,"path":path,"query":query='',"body":body=''}=options;const timestamp=Math['\u0066\u006C\u006F\u006F\u0072'](Date['\u006E\u006F\u0077']()/(110765^111429))['\u0074\u006F\u0053\u0074\u0072\u0069\u006E\u0067']();var _0xe08cc=(212246^212244)+(773521^773523);let bodyString='';_0xe08cc=(606778^606776)+(962748^962740);if(body){if(typeof body==="\u006F\u0062\u006A\u0065\u0063\u0074"){bodyString=JSON['\u0073\u0074\u0072\u0069\u006E\u0067\u0069\u0066\u0079'](body);}else{bodyString=body['\u0074\u006F\u0053\u0074\u0072\u0069\u006E\u0067']();}}const signatureParts=[method['\u0074\u006F\u0055\u0070\u0070\u0065\u0072\u0043\u0061\u0073\u0065'](),path,query,this['\u0063\u006C\u0069\u0065\u006E\u0074\u0049\u0064'],timestamp,bodyString];var _0x5693g=(936664^936668)+(685268^685277);const signatureString=signatureParts['\u006A\u006F\u0069\u006E']("\u000A");_0x5693g=(266582^266576)+(337322^337315);const hmac=crypto['\u0063\u0072\u0065\u0061\u0074\u0065\u0048\u006D\u0061\u0063']("\u0073\u0068\u0061\u0032\u0035\u0036",this['\u0063\u006C\u0069\u0065\u006E\u0074\u0053\u0065\u0063\u0072\u0065\u0074']);hmac['\u0075\u0070\u0064\u0061\u0074\u0065'](signatureString);var _0x5fba=(354480^354481)+(537437^537434);const signature=hmac['\u0064\u0069\u0067\u0065\u0073\u0074']("\u0068\u0065\u0078");_0x5fba=(249614^249610)+(915906^915914);return{'X-Client-ID':this['\u0063\u006C\u0069\u0065\u006E\u0074\u0049\u0064'],'X-Timestamp':timestamp,'X-Signature':signature};}}const signatureClient=new SignatureClient();const generateSignature=signatureClient['\u0067\u0065\u006E\u0065\u0072\u0061\u0074\u0065\u0053\u0069\u0067\u006E\u0061\u0074\u0075\u0072\u0065'];module['\u0065\u0078\u0070\u006F\u0072\u0074\u0073']={'\u0053\u0069\u0067\u006E\u0061\u0074\u0075\u0072\u0065\u0043\u006C\u0069\u0065\u006E\u0074':SignatureClient,'\u0067\u0065\u006E\u0065\u0072\u0061\u0074\u0065\u0053\u0069\u0067\u006E\u0061\u0074\u0075\u0072\u0065':generateSignature};
|
||||
@@ -4,16 +4,19 @@ import path from 'node:path'
|
||||
|
||||
import { loggerService } from '@logger'
|
||||
import { isLinux, isMac, isPortable, isWin } from '@main/constant'
|
||||
import { generateSignature } from '@main/integration/cherryin'
|
||||
import { generateSignature } from '@main/integration/cherryai'
|
||||
import anthropicService from '@main/services/AnthropicService'
|
||||
import { getBinaryPath, isBinaryExists, runInstallScript } from '@main/utils/process'
|
||||
import { handleZoomFactor } from '@main/utils/zoom'
|
||||
import { SpanEntity, TokenUsage } from '@mcp-trace/trace-core'
|
||||
import { MIN_WINDOW_HEIGHT, MIN_WINDOW_WIDTH, UpgradeChannel } from '@shared/config/constant'
|
||||
import { IpcChannel } from '@shared/IpcChannel'
|
||||
import { FileMetadata, Provider, Shortcut, ThemeMode } from '@types'
|
||||
import { FileMetadata, Notification, OcrProvider, Provider, Shortcut, SupportedOcrFile, ThemeMode } from '@types'
|
||||
import checkDiskSpace from 'check-disk-space'
|
||||
import { BrowserWindow, dialog, ipcMain, ProxyConfig, session, shell, systemPreferences, webContents } from 'electron'
|
||||
import { Notification } from 'src/renderer/src/types/notification'
|
||||
import fontList from 'font-list'
|
||||
|
||||
import { apiServerService } from './services/ApiServerService'
|
||||
import appService from './services/AppService'
|
||||
import AppUpdater from './services/AppUpdater'
|
||||
import BackupManager from './services/BackupManager'
|
||||
@@ -24,7 +27,7 @@ import DxtService from './services/DxtService'
|
||||
import { ExportService } from './services/ExportService'
|
||||
import { fileStorage as fileManager } from './services/FileStorage'
|
||||
import FileService from './services/FileSystemService'
|
||||
import KnowledgeService from './services/knowledge/KnowledgeService'
|
||||
import KnowledgeService from './services/KnowledgeService'
|
||||
import mcpService from './services/MCPService'
|
||||
import MemoryService from './services/memory/MemoryService'
|
||||
import { openTraceWindow, setTraceWindowTitle } from './services/NodeTraceService'
|
||||
@@ -32,6 +35,7 @@ import NotificationService from './services/NotificationService'
|
||||
import * as NutstoreService from './services/NutstoreService'
|
||||
import ObsidianVaultService from './services/ObsidianVaultService'
|
||||
import { ocrService } from './services/ocr/OcrService'
|
||||
import OvmsManager from './services/OvmsManager'
|
||||
import { proxyManager } from './services/ProxyManager'
|
||||
import { pythonService } from './services/PythonService'
|
||||
import { FileServiceManager } from './services/remotefile/FileServiceManager'
|
||||
@@ -78,6 +82,7 @@ const obsidianVaultService = new ObsidianVaultService()
|
||||
const vertexAIService = VertexAIService.getInstance()
|
||||
const memoryService = MemoryService.getInstance()
|
||||
const dxtService = new DxtService()
|
||||
const ovmsManager = new OvmsManager()
|
||||
|
||||
export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
||||
const appUpdater = new AppUpdater()
|
||||
@@ -123,6 +128,7 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
||||
})
|
||||
|
||||
ipcMain.handle(IpcChannel.App_Reload, () => mainWindow.reload())
|
||||
ipcMain.handle(IpcChannel.App_Quit, () => app.quit())
|
||||
ipcMain.handle(IpcChannel.Open_Website, (_, url: string) => shell.openExternal(url))
|
||||
|
||||
// Update
|
||||
@@ -216,6 +222,17 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
||||
return mainWindow.isFullScreen()
|
||||
})
|
||||
|
||||
// Get System Fonts
|
||||
ipcMain.handle(IpcChannel.App_GetSystemFonts, async () => {
|
||||
try {
|
||||
const fonts = await fontList.getFonts()
|
||||
return fonts.map((font: string) => font.replace(/^"(.*)"$/, '$1')).filter((font: string) => font.length > 0)
|
||||
} catch (error) {
|
||||
logger.error('Failed to get system fonts:', error as Error)
|
||||
return []
|
||||
}
|
||||
})
|
||||
|
||||
ipcMain.handle(IpcChannel.Config_Set, (_, key: string, value: any, isNotify: boolean = false) => {
|
||||
configManager.set(key, value, isNotify)
|
||||
})
|
||||
@@ -417,6 +434,7 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
||||
// system
|
||||
ipcMain.handle(IpcChannel.System_GetDeviceType, () => (isMac ? 'mac' : isWin ? 'windows' : 'linux'))
|
||||
ipcMain.handle(IpcChannel.System_GetHostname, () => require('os').hostname())
|
||||
ipcMain.handle(IpcChannel.System_GetCpuName, () => require('os').cpus()[0].model)
|
||||
ipcMain.handle(IpcChannel.System_ToggleDevTools, (e) => {
|
||||
const win = BrowserWindow.fromWebContents(e.sender)
|
||||
win && win.webContents.toggleDevTools()
|
||||
@@ -695,6 +713,7 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
||||
ipcMain.handle(IpcChannel.App_GetBinaryPath, (_, name: string) => getBinaryPath(name))
|
||||
ipcMain.handle(IpcChannel.App_InstallUvBinary, () => runInstallScript('install-uv.js'))
|
||||
ipcMain.handle(IpcChannel.App_InstallBunBinary, () => runInstallScript('install-bun.js'))
|
||||
ipcMain.handle(IpcChannel.App_InstallOvmsBinary, () => runInstallScript('install-ovms.js'))
|
||||
|
||||
//copilot
|
||||
ipcMain.handle(IpcChannel.Copilot_GetAuthMessage, CopilotService.getAuthMessage.bind(CopilotService))
|
||||
@@ -781,12 +800,62 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
||||
addStreamMessage(spanId, modelName, context, msg)
|
||||
)
|
||||
|
||||
ipcMain.handle(IpcChannel.App_GetDiskInfo, async (_, directoryPath: string) => {
|
||||
try {
|
||||
const diskSpace = await checkDiskSpace(directoryPath) // { free, size } in bytes
|
||||
logger.debug('disk space', diskSpace)
|
||||
const { free, size } = diskSpace
|
||||
return {
|
||||
free,
|
||||
size
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('check disk space error', error as Error)
|
||||
return null
|
||||
}
|
||||
})
|
||||
// API Server
|
||||
apiServerService.registerIpcHandlers()
|
||||
|
||||
// Anthropic OAuth
|
||||
ipcMain.handle(IpcChannel.Anthropic_StartOAuthFlow, () => anthropicService.startOAuthFlow())
|
||||
ipcMain.handle(IpcChannel.Anthropic_CompleteOAuthWithCode, (_, code: string) =>
|
||||
anthropicService.completeOAuthWithCode(code)
|
||||
)
|
||||
ipcMain.handle(IpcChannel.Anthropic_CancelOAuthFlow, () => anthropicService.cancelOAuthFlow())
|
||||
ipcMain.handle(IpcChannel.Anthropic_GetAccessToken, () => anthropicService.getValidAccessToken())
|
||||
ipcMain.handle(IpcChannel.Anthropic_HasCredentials, () => anthropicService.hasCredentials())
|
||||
ipcMain.handle(IpcChannel.Anthropic_ClearCredentials, () => anthropicService.clearCredentials())
|
||||
|
||||
// CodeTools
|
||||
ipcMain.handle(IpcChannel.CodeTools_Run, codeToolsService.run)
|
||||
ipcMain.handle(IpcChannel.CodeTools_GetAvailableTerminals, () => codeToolsService.getAvailableTerminalsForPlatform())
|
||||
ipcMain.handle(IpcChannel.CodeTools_SetCustomTerminalPath, (_, terminalId: string, path: string) =>
|
||||
codeToolsService.setCustomTerminalPath(terminalId, path)
|
||||
)
|
||||
ipcMain.handle(IpcChannel.CodeTools_GetCustomTerminalPath, (_, terminalId: string) =>
|
||||
codeToolsService.getCustomTerminalPath(terminalId)
|
||||
)
|
||||
ipcMain.handle(IpcChannel.CodeTools_RemoveCustomTerminalPath, (_, terminalId: string) =>
|
||||
codeToolsService.removeCustomTerminalPath(terminalId)
|
||||
)
|
||||
|
||||
// OCR
|
||||
ipcMain.handle(IpcChannel.OCR_ocr, (_, ...args: Parameters<typeof ocrService.ocr>) => ocrService.ocr(...args))
|
||||
ipcMain.handle(IpcChannel.OCR_ocr, (_, file: SupportedOcrFile, provider: OcrProvider) =>
|
||||
ocrService.ocr(file, provider)
|
||||
)
|
||||
|
||||
// CherryIN
|
||||
ipcMain.handle(IpcChannel.Cherryin_GetSignature, (_, params) => generateSignature(params))
|
||||
// OVMS
|
||||
ipcMain.handle(IpcChannel.Ovms_AddModel, (_, modelName: string, modelId: string, modelSource: string, task: string) =>
|
||||
ovmsManager.addModel(modelName, modelId, modelSource, task)
|
||||
)
|
||||
ipcMain.handle(IpcChannel.Ovms_StopAddModel, () => ovmsManager.stopAddModel())
|
||||
ipcMain.handle(IpcChannel.Ovms_GetModels, () => ovmsManager.getModels())
|
||||
ipcMain.handle(IpcChannel.Ovms_IsRunning, () => ovmsManager.initializeOvms())
|
||||
ipcMain.handle(IpcChannel.Ovms_GetStatus, () => ovmsManager.getOvmsStatus())
|
||||
ipcMain.handle(IpcChannel.Ovms_RunOVMS, () => ovmsManager.runOvms())
|
||||
ipcMain.handle(IpcChannel.Ovms_StopOVMS, () => ovmsManager.stopOvms())
|
||||
|
||||
// CherryAI
|
||||
ipcMain.handle(IpcChannel.Cherryai_GetSignature, (_, params) => generateSignature(params))
|
||||
}
|
||||
|
||||
@@ -139,9 +139,9 @@ export async function addFileLoader(
|
||||
|
||||
if (jsonParsed) {
|
||||
loaderReturn = await ragApplication.addLoader(new JsonLoader({ object: jsonObject }), forceReload)
|
||||
break
|
||||
}
|
||||
// fallthrough - JSON 解析失败时作为文本处理
|
||||
// oxlint-disable-next-line no-fallthrough 利用switch特性,刻意不break
|
||||
default:
|
||||
// 文本类型处理(默认)
|
||||
// 如果是其他文本类型且尚未读取文件,则读取文件
|
||||
|
||||
@@ -11,7 +11,7 @@ export enum OdType {
|
||||
OdtLoader = 'OdtLoader',
|
||||
OdsLoader = 'OdsLoader',
|
||||
OdpLoader = 'OdpLoader',
|
||||
undefined = 'undefined'
|
||||
Undefined = 'undefined'
|
||||
}
|
||||
|
||||
export class OdLoader<OdType> extends BaseLoader<{ type: string }> {
|
||||
|
||||
@@ -1,63 +0,0 @@
|
||||
import { VoyageEmbeddings } from '@langchain/community/embeddings/voyage'
|
||||
import type { Embeddings } from '@langchain/core/embeddings'
|
||||
import { OllamaEmbeddings } from '@langchain/ollama'
|
||||
import { AzureOpenAIEmbeddings, OpenAIEmbeddings } from '@langchain/openai'
|
||||
import { ApiClient, SystemProviderIds } from '@types'
|
||||
|
||||
import { isJinaEmbeddingsModel, JinaEmbeddings } from './JinaEmbeddings'
|
||||
|
||||
export default class EmbeddingsFactory {
|
||||
static create({ embedApiClient, dimensions }: { embedApiClient: ApiClient; dimensions?: number }): Embeddings {
|
||||
const batchSize = 10
|
||||
const { model, provider, apiKey, apiVersion, baseURL } = embedApiClient
|
||||
if (provider === SystemProviderIds.ollama) {
|
||||
let baseUrl = baseURL
|
||||
if (baseURL.includes('v1/')) {
|
||||
baseUrl = baseURL.replace('v1/', '')
|
||||
}
|
||||
const headers = apiKey
|
||||
? {
|
||||
Authorization: `Bearer ${apiKey}`
|
||||
}
|
||||
: undefined
|
||||
return new OllamaEmbeddings({
|
||||
model: model,
|
||||
baseUrl,
|
||||
...headers
|
||||
})
|
||||
} else if (provider === SystemProviderIds.voyageai) {
|
||||
return new VoyageEmbeddings({
|
||||
modelName: model,
|
||||
apiKey,
|
||||
outputDimension: dimensions,
|
||||
batchSize
|
||||
})
|
||||
}
|
||||
if (isJinaEmbeddingsModel(model)) {
|
||||
return new JinaEmbeddings({
|
||||
model,
|
||||
apiKey,
|
||||
batchSize,
|
||||
dimensions,
|
||||
baseUrl: baseURL
|
||||
})
|
||||
}
|
||||
if (apiVersion !== undefined) {
|
||||
return new AzureOpenAIEmbeddings({
|
||||
azureOpenAIApiKey: apiKey,
|
||||
azureOpenAIApiVersion: apiVersion,
|
||||
azureOpenAIApiDeploymentName: model,
|
||||
azureOpenAIEndpoint: baseURL,
|
||||
dimensions,
|
||||
batchSize
|
||||
})
|
||||
}
|
||||
return new OpenAIEmbeddings({
|
||||
model,
|
||||
apiKey,
|
||||
dimensions,
|
||||
batchSize,
|
||||
configuration: { baseURL }
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,199 +0,0 @@
|
||||
import { Embeddings, type EmbeddingsParams } from '@langchain/core/embeddings'
|
||||
import { chunkArray } from '@langchain/core/utils/chunk_array'
|
||||
import { getEnvironmentVariable } from '@langchain/core/utils/env'
|
||||
import z from 'zod/v4'
|
||||
|
||||
const jinaModelSchema = z.union([
|
||||
z.literal('jina-clip-v2'),
|
||||
z.literal('jina-embeddings-v3'),
|
||||
z.literal('jina-colbert-v2'),
|
||||
z.literal('jina-clip-v1'),
|
||||
z.literal('jina-colbert-v1-en'),
|
||||
z.literal('jina-embeddings-v2-base-es'),
|
||||
z.literal('jina-embeddings-v2-base-code'),
|
||||
z.literal('jina-embeddings-v2-base-de'),
|
||||
z.literal('jina-embeddings-v2-base-zh'),
|
||||
z.literal('jina-embeddings-v2-base-en')
|
||||
])
|
||||
|
||||
type JinaModel = z.infer<typeof jinaModelSchema>
|
||||
|
||||
export const isJinaEmbeddingsModel = (model: string): model is JinaModel => {
|
||||
return jinaModelSchema.safeParse(model).success
|
||||
}
|
||||
|
||||
interface JinaEmbeddingsParams extends EmbeddingsParams {
|
||||
/** Model name to use */
|
||||
model: JinaModel
|
||||
|
||||
baseUrl?: string
|
||||
|
||||
/**
|
||||
* Timeout to use when making requests to Jina.
|
||||
*/
|
||||
timeout?: number
|
||||
|
||||
/**
|
||||
* The maximum number of documents to embed in a single request.
|
||||
*/
|
||||
batchSize?: number
|
||||
|
||||
/**
|
||||
* Whether to strip new lines from the input text.
|
||||
*/
|
||||
stripNewLines?: boolean
|
||||
|
||||
/**
|
||||
* The dimensions of the embedding.
|
||||
*/
|
||||
dimensions?: number
|
||||
|
||||
/**
|
||||
* Scales the embedding so its Euclidean (L2) norm becomes 1, preserving direction. Useful when downstream involves dot-product, classification, visualization..
|
||||
*/
|
||||
normalized?: boolean
|
||||
}
|
||||
|
||||
type JinaMultiModelInput =
|
||||
| {
|
||||
text: string
|
||||
image?: never
|
||||
}
|
||||
| {
|
||||
image: string
|
||||
text?: never
|
||||
}
|
||||
|
||||
type JinaEmbeddingsInput = string | JinaMultiModelInput
|
||||
|
||||
interface EmbeddingCreateParams {
|
||||
model: JinaEmbeddingsParams['model']
|
||||
|
||||
/**
|
||||
* input can be strings or JinaMultiModelInputs,if you want embed image,you should use JinaMultiModelInputs
|
||||
*/
|
||||
input: JinaEmbeddingsInput[]
|
||||
dimensions: number
|
||||
task?: 'retrieval.query' | 'retrieval.passage'
|
||||
}
|
||||
|
||||
interface EmbeddingResponse {
|
||||
model: string
|
||||
object: string
|
||||
usage: {
|
||||
total_tokens: number
|
||||
prompt_tokens: number
|
||||
}
|
||||
data: {
|
||||
object: string
|
||||
index: number
|
||||
embedding: number[]
|
||||
}[]
|
||||
}
|
||||
|
||||
interface EmbeddingErrorResponse {
|
||||
detail: string
|
||||
}
|
||||
|
||||
export class JinaEmbeddings extends Embeddings implements JinaEmbeddingsParams {
|
||||
model: JinaEmbeddingsParams['model'] = 'jina-clip-v2'
|
||||
|
||||
batchSize = 24
|
||||
|
||||
baseUrl = 'https://api.jina.ai/v1/embeddings'
|
||||
|
||||
stripNewLines = true
|
||||
|
||||
dimensions = 1024
|
||||
|
||||
apiKey: string
|
||||
|
||||
constructor(
|
||||
fields?: Partial<JinaEmbeddingsParams> & {
|
||||
apiKey?: string
|
||||
}
|
||||
) {
|
||||
const fieldsWithDefaults = { maxConcurrency: 2, ...fields }
|
||||
super(fieldsWithDefaults)
|
||||
|
||||
const apiKey =
|
||||
fieldsWithDefaults?.apiKey || getEnvironmentVariable('JINA_API_KEY') || getEnvironmentVariable('JINA_AUTH_TOKEN')
|
||||
|
||||
if (!apiKey) throw new Error('Jina API key not found')
|
||||
|
||||
this.apiKey = apiKey
|
||||
this.baseUrl = fieldsWithDefaults?.baseUrl ? `${fieldsWithDefaults?.baseUrl}embeddings` : this.baseUrl
|
||||
this.model = fieldsWithDefaults?.model ?? this.model
|
||||
this.dimensions = fieldsWithDefaults?.dimensions ?? this.dimensions
|
||||
this.batchSize = fieldsWithDefaults?.batchSize ?? this.batchSize
|
||||
this.stripNewLines = fieldsWithDefaults?.stripNewLines ?? this.stripNewLines
|
||||
}
|
||||
|
||||
private doStripNewLines(input: JinaEmbeddingsInput[]) {
|
||||
if (this.stripNewLines) {
|
||||
return input.map((i) => {
|
||||
if (typeof i === 'string') {
|
||||
return i.replace(/\n/g, ' ')
|
||||
}
|
||||
if (i.text) {
|
||||
return { text: i.text.replace(/\n/g, ' ') }
|
||||
}
|
||||
return i
|
||||
})
|
||||
}
|
||||
return input
|
||||
}
|
||||
|
||||
async embedDocuments(input: JinaEmbeddingsInput[]): Promise<number[][]> {
|
||||
const batches = chunkArray(this.doStripNewLines(input), this.batchSize)
|
||||
const batchRequests = batches.map((batch) => {
|
||||
const params = this.getParams(batch)
|
||||
return this.embeddingWithRetry(params)
|
||||
})
|
||||
|
||||
const batchResponses = await Promise.all(batchRequests)
|
||||
const embeddings: number[][] = []
|
||||
|
||||
for (let i = 0; i < batchResponses.length; i += 1) {
|
||||
const batch = batches[i]
|
||||
const batchResponse = batchResponses[i] || []
|
||||
for (let j = 0; j < batch.length; j += 1) {
|
||||
embeddings.push(batchResponse[j])
|
||||
}
|
||||
}
|
||||
|
||||
return embeddings
|
||||
}
|
||||
|
||||
async embedQuery(input: JinaEmbeddingsInput): Promise<number[]> {
|
||||
const params = this.getParams(this.doStripNewLines([input]), true)
|
||||
|
||||
const embeddings = (await this.embeddingWithRetry(params)) || [[]]
|
||||
return embeddings[0]
|
||||
}
|
||||
|
||||
private getParams(input: JinaEmbeddingsInput[], query?: boolean): EmbeddingCreateParams {
|
||||
return {
|
||||
model: this.model,
|
||||
input,
|
||||
dimensions: this.dimensions,
|
||||
task: query ? 'retrieval.query' : this.model === 'jina-clip-v2' ? undefined : 'retrieval.passage'
|
||||
}
|
||||
}
|
||||
|
||||
private async embeddingWithRetry(body: EmbeddingCreateParams) {
|
||||
const response = await fetch(this.baseUrl, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: `Bearer ${this.apiKey}`
|
||||
},
|
||||
body: JSON.stringify(body)
|
||||
})
|
||||
const embeddingData: EmbeddingResponse | EmbeddingErrorResponse = await response.json()
|
||||
if ('detail' in embeddingData && embeddingData.detail) {
|
||||
throw new Error(`${embeddingData.detail}`)
|
||||
}
|
||||
return (embeddingData as EmbeddingResponse).data.map(({ embedding }) => embedding)
|
||||
}
|
||||
}
|
||||
@@ -1,25 +0,0 @@
|
||||
import type { Embeddings as BaseEmbeddings } from '@langchain/core/embeddings'
|
||||
import { TraceMethod } from '@mcp-trace/trace-core'
|
||||
import { ApiClient } from '@types'
|
||||
|
||||
import EmbeddingsFactory from './EmbeddingsFactory'
|
||||
|
||||
export default class TextEmbeddings {
|
||||
private sdk: BaseEmbeddings
|
||||
constructor({ embedApiClient, dimensions }: { embedApiClient: ApiClient; dimensions?: number }) {
|
||||
this.sdk = EmbeddingsFactory.create({
|
||||
embedApiClient,
|
||||
dimensions
|
||||
})
|
||||
}
|
||||
|
||||
@TraceMethod({ spanName: 'embedDocuments', tag: 'Embeddings' })
|
||||
public async embedDocuments(texts: string[]): Promise<number[][]> {
|
||||
return this.sdk.embedDocuments(texts)
|
||||
}
|
||||
|
||||
@TraceMethod({ spanName: 'embedQuery', tag: 'Embeddings' })
|
||||
public async embedQuery(text: string): Promise<number[]> {
|
||||
return this.sdk.embedQuery(text)
|
||||
}
|
||||
}
|
||||
@@ -1,97 +0,0 @@
|
||||
import { BaseDocumentLoader } from '@langchain/core/document_loaders/base'
|
||||
import { Document } from '@langchain/core/documents'
|
||||
import { readTextFileWithAutoEncoding } from '@main/utils/file'
|
||||
import MarkdownIt from 'markdown-it'
|
||||
|
||||
export class MarkdownLoader extends BaseDocumentLoader {
|
||||
private path: string
|
||||
private md: MarkdownIt
|
||||
|
||||
constructor(path: string) {
|
||||
super()
|
||||
this.path = path
|
||||
this.md = new MarkdownIt()
|
||||
}
|
||||
public async load(): Promise<Document[]> {
|
||||
const content = await readTextFileWithAutoEncoding(this.path)
|
||||
return this.parseMarkdown(content)
|
||||
}
|
||||
|
||||
private parseMarkdown(content: string): Document[] {
|
||||
const tokens = this.md.parse(content, {})
|
||||
const documents: Document[] = []
|
||||
|
||||
let currentSection: {
|
||||
heading?: string
|
||||
level?: number
|
||||
content: string
|
||||
startLine?: number
|
||||
} = { content: '' }
|
||||
|
||||
let i = 0
|
||||
while (i < tokens.length) {
|
||||
const token = tokens[i]
|
||||
|
||||
if (token.type === 'heading_open') {
|
||||
// Save previous section if it has content
|
||||
if (currentSection.content.trim()) {
|
||||
documents.push(
|
||||
new Document({
|
||||
pageContent: currentSection.content.trim(),
|
||||
metadata: {
|
||||
source: this.path,
|
||||
heading: currentSection.heading || 'Introduction',
|
||||
level: currentSection.level || 0,
|
||||
startLine: currentSection.startLine || 0
|
||||
}
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
// Start new section
|
||||
const level = parseInt(token.tag.slice(1)) // Extract number from h1, h2, etc.
|
||||
const headingContent = tokens[i + 1]?.content || ''
|
||||
|
||||
currentSection = {
|
||||
heading: headingContent,
|
||||
level: level,
|
||||
content: '',
|
||||
startLine: token.map?.[0] || 0
|
||||
}
|
||||
|
||||
// Skip heading_open, inline, heading_close tokens
|
||||
i += 3
|
||||
continue
|
||||
}
|
||||
|
||||
// Add token content to current section
|
||||
if (token.content) {
|
||||
currentSection.content += token.content
|
||||
}
|
||||
|
||||
// Add newlines for block tokens
|
||||
if (token.block && token.type !== 'heading_close') {
|
||||
currentSection.content += '\n'
|
||||
}
|
||||
|
||||
i++
|
||||
}
|
||||
|
||||
// Add the last section
|
||||
if (currentSection.content.trim()) {
|
||||
documents.push(
|
||||
new Document({
|
||||
pageContent: currentSection.content.trim(),
|
||||
metadata: {
|
||||
source: this.path,
|
||||
heading: currentSection.heading || 'Introduction',
|
||||
level: currentSection.level || 0,
|
||||
startLine: currentSection.startLine || 0
|
||||
}
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
return documents
|
||||
}
|
||||
}
|
||||
@@ -1,50 +0,0 @@
|
||||
import { BaseDocumentLoader } from '@langchain/core/document_loaders/base'
|
||||
import { Document } from '@langchain/core/documents'
|
||||
|
||||
export class NoteLoader extends BaseDocumentLoader {
|
||||
private text: string
|
||||
private sourceUrl?: string
|
||||
constructor(
|
||||
public _text: string,
|
||||
public _sourceUrl?: string
|
||||
) {
|
||||
super()
|
||||
this.text = _text
|
||||
this.sourceUrl = _sourceUrl
|
||||
}
|
||||
|
||||
/**
|
||||
* A protected method that takes a `raw` string as a parameter and returns
|
||||
* a promise that resolves to an array containing the raw text as a single
|
||||
* element.
|
||||
* @param raw The raw text to be parsed.
|
||||
* @returns A promise that resolves to an array containing the raw text as a single element.
|
||||
*/
|
||||
protected async parse(raw: string): Promise<string[]> {
|
||||
return [raw]
|
||||
}
|
||||
|
||||
public async load(): Promise<Document[]> {
|
||||
const metadata = { source: this.sourceUrl || 'note' }
|
||||
const parsed = await this.parse(this.text)
|
||||
parsed.forEach((pageContent, i) => {
|
||||
if (typeof pageContent !== 'string') {
|
||||
throw new Error(`Expected string, at position ${i} got ${typeof pageContent}`)
|
||||
}
|
||||
})
|
||||
|
||||
return parsed.map(
|
||||
(pageContent, i) =>
|
||||
new Document({
|
||||
pageContent,
|
||||
metadata:
|
||||
parsed.length === 1
|
||||
? metadata
|
||||
: {
|
||||
...metadata,
|
||||
line: i + 1
|
||||
}
|
||||
})
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,170 +0,0 @@
|
||||
import { BaseDocumentLoader } from '@langchain/core/document_loaders/base'
|
||||
import { Document } from '@langchain/core/documents'
|
||||
import { Innertube } from 'youtubei.js'
|
||||
|
||||
// ... (接口定义 YoutubeConfig 和 VideoMetadata 保持不变)
|
||||
|
||||
/**
|
||||
* Configuration options for the YoutubeLoader class. Includes properties
|
||||
* such as the videoId, language, and addVideoInfo.
|
||||
*/
|
||||
interface YoutubeConfig {
|
||||
videoId: string
|
||||
language?: string
|
||||
addVideoInfo?: boolean
|
||||
// 新增一个选项,用于控制输出格式
|
||||
transcriptFormat?: 'text' | 'srt'
|
||||
}
|
||||
|
||||
/**
|
||||
* Metadata of a YouTube video. Includes properties such as the source
|
||||
* (videoId), description, title, view_count, author, and category.
|
||||
*/
|
||||
interface VideoMetadata {
|
||||
source: string
|
||||
description?: string
|
||||
title?: string
|
||||
view_count?: number
|
||||
author?: string
|
||||
category?: string
|
||||
}
|
||||
|
||||
/**
|
||||
* A document loader for loading data from YouTube videos. It uses the
|
||||
* youtubei.js library to fetch the transcript and video metadata.
|
||||
* @example
|
||||
* ```typescript
|
||||
* const loader = new YoutubeLoader({
|
||||
* videoId: "VIDEO_ID",
|
||||
* language: "en",
|
||||
* addVideoInfo: true,
|
||||
* transcriptFormat: "srt" // 获取 SRT 格式
|
||||
* });
|
||||
* const docs = await loader.load();
|
||||
* console.log(docs[0].pageContent);
|
||||
* ```
|
||||
*/
|
||||
export class YoutubeLoader extends BaseDocumentLoader {
|
||||
private videoId: string
|
||||
private language?: string
|
||||
private addVideoInfo: boolean
|
||||
// 新增格式化选项的私有属性
|
||||
private transcriptFormat: 'text' | 'srt'
|
||||
|
||||
constructor(config: YoutubeConfig) {
|
||||
super()
|
||||
this.videoId = config.videoId
|
||||
this.language = config?.language
|
||||
this.addVideoInfo = config?.addVideoInfo ?? false
|
||||
// 初始化格式化选项,默认为 'text' 以保持向后兼容
|
||||
this.transcriptFormat = config?.transcriptFormat ?? 'text'
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts the videoId from a YouTube video URL.
|
||||
* @param url The URL of the YouTube video.
|
||||
* @returns The videoId of the YouTube video.
|
||||
*/
|
||||
private static getVideoID(url: string): string {
|
||||
const match = url.match(/.*(?:youtu.be\/|v\/|u\/\w\/|embed\/|watch\?v=)([^#&?]*).*/)
|
||||
if (match !== null && match[1].length === 11) {
|
||||
return match[1]
|
||||
} else {
|
||||
throw new Error('Failed to get youtube video id from the url')
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new instance of the YoutubeLoader class from a YouTube video
|
||||
* URL.
|
||||
* @param url The URL of the YouTube video.
|
||||
* @param config Optional configuration options for the YoutubeLoader instance, excluding the videoId.
|
||||
* @returns A new instance of the YoutubeLoader class.
|
||||
*/
|
||||
static createFromUrl(url: string, config?: Omit<YoutubeConfig, 'videoId'>): YoutubeLoader {
|
||||
const videoId = YoutubeLoader.getVideoID(url)
|
||||
return new YoutubeLoader({ ...config, videoId })
|
||||
}
|
||||
|
||||
/**
|
||||
* [新增] 辅助函数:将毫秒转换为 SRT 时间戳格式 (HH:MM:SS,ms)
|
||||
* @param ms 毫秒数
|
||||
* @returns 格式化后的时间字符串
|
||||
*/
|
||||
private static formatTimestamp(ms: number): string {
|
||||
const totalSeconds = Math.floor(ms / 1000)
|
||||
const hours = Math.floor(totalSeconds / 3600)
|
||||
.toString()
|
||||
.padStart(2, '0')
|
||||
const minutes = Math.floor((totalSeconds % 3600) / 60)
|
||||
.toString()
|
||||
.padStart(2, '0')
|
||||
const seconds = (totalSeconds % 60).toString().padStart(2, '0')
|
||||
const milliseconds = (ms % 1000).toString().padStart(3, '0')
|
||||
return `${hours}:${minutes}:${seconds},${milliseconds}`
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads the transcript and video metadata from the specified YouTube
|
||||
* video. It can return the transcript as plain text or in SRT format.
|
||||
* @returns An array of Documents representing the retrieved data.
|
||||
*/
|
||||
async load(): Promise<Document[]> {
|
||||
const metadata: VideoMetadata = {
|
||||
source: this.videoId
|
||||
}
|
||||
|
||||
try {
|
||||
const youtube = await Innertube.create({
|
||||
lang: this.language,
|
||||
retrieve_player: false
|
||||
})
|
||||
|
||||
const info = await youtube.getInfo(this.videoId)
|
||||
const transcriptData = await info.getTranscript()
|
||||
|
||||
if (!transcriptData.transcript.content?.body?.initial_segments) {
|
||||
throw new Error('Transcript segments not found in the response.')
|
||||
}
|
||||
|
||||
const segments = transcriptData.transcript.content.body.initial_segments
|
||||
|
||||
let pageContent: string
|
||||
|
||||
// 根据 transcriptFormat 选项决定如何格式化字幕
|
||||
if (this.transcriptFormat === 'srt') {
|
||||
// [修改] 将字幕片段格式化为 SRT 格式
|
||||
pageContent = segments
|
||||
.map((segment, index) => {
|
||||
const srtIndex = index + 1
|
||||
const startTime = YoutubeLoader.formatTimestamp(Number(segment.start_ms))
|
||||
const endTime = YoutubeLoader.formatTimestamp(Number(segment.end_ms))
|
||||
const text = segment.snippet?.text || '' // 使用 segment.snippet.text
|
||||
|
||||
return `${srtIndex}\n${startTime} --> ${endTime}\n${text}`
|
||||
})
|
||||
.join('\n\n') // 每个 SRT 块之间用两个换行符分隔
|
||||
} else {
|
||||
// [原始逻辑] 拼接为纯文本
|
||||
pageContent = segments.map((segment) => segment.snippet?.text || '').join(' ')
|
||||
}
|
||||
|
||||
if (this.addVideoInfo) {
|
||||
const basicInfo = info.basic_info
|
||||
metadata.description = basicInfo.short_description
|
||||
metadata.title = basicInfo.title
|
||||
metadata.view_count = basicInfo.view_count
|
||||
metadata.author = basicInfo.author
|
||||
}
|
||||
|
||||
const document = new Document({
|
||||
pageContent,
|
||||
metadata
|
||||
})
|
||||
|
||||
return [document]
|
||||
} catch (e: unknown) {
|
||||
throw new Error(`Failed to get YouTube video transcription: ${(e as Error).message}`)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,235 +0,0 @@
|
||||
import { DocxLoader } from '@langchain/community/document_loaders/fs/docx'
|
||||
import { EPubLoader } from '@langchain/community/document_loaders/fs/epub'
|
||||
import { PDFLoader } from '@langchain/community/document_loaders/fs/pdf'
|
||||
import { PPTXLoader } from '@langchain/community/document_loaders/fs/pptx'
|
||||
import { CheerioWebBaseLoader } from '@langchain/community/document_loaders/web/cheerio'
|
||||
import { SitemapLoader } from '@langchain/community/document_loaders/web/sitemap'
|
||||
import { FaissStore } from '@langchain/community/vectorstores/faiss'
|
||||
import { Document } from '@langchain/core/documents'
|
||||
import { loggerService } from '@logger'
|
||||
import { UrlSource } from '@main/utils/knowledge'
|
||||
import { LoaderReturn } from '@shared/config/types'
|
||||
import { FileMetadata, FileTypes, KnowledgeBaseParams } from '@types'
|
||||
import { randomUUID } from 'crypto'
|
||||
import { JSONLoader } from 'langchain/document_loaders/fs/json'
|
||||
import { TextLoader } from 'langchain/document_loaders/fs/text'
|
||||
|
||||
import { SplitterFactory } from '../splitter'
|
||||
import { MarkdownLoader } from './MarkdownLoader'
|
||||
import { NoteLoader } from './NoteLoader'
|
||||
import { YoutubeLoader } from './YoutubeLoader'
|
||||
|
||||
const logger = loggerService.withContext('KnowledgeService File Loader')
|
||||
|
||||
type LoaderInstance =
|
||||
| TextLoader
|
||||
| PDFLoader
|
||||
| PPTXLoader
|
||||
| DocxLoader
|
||||
| JSONLoader
|
||||
| EPubLoader
|
||||
| CheerioWebBaseLoader
|
||||
| YoutubeLoader
|
||||
| SitemapLoader
|
||||
| NoteLoader
|
||||
| MarkdownLoader
|
||||
|
||||
/**
|
||||
* 为文档数组中的每个文档的 metadata 添加类型信息。
|
||||
*/
|
||||
function formatDocument(docs: Document[], type: string): Document[] {
|
||||
return docs.map((doc) => ({
|
||||
...doc,
|
||||
metadata: {
|
||||
...doc.metadata,
|
||||
type: type
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
/**
|
||||
* 通用文档处理管道
|
||||
*/
|
||||
async function processDocuments(
|
||||
base: KnowledgeBaseParams,
|
||||
vectorStore: FaissStore,
|
||||
docs: Document[],
|
||||
loaderType: string,
|
||||
splitterType?: string
|
||||
): Promise<LoaderReturn> {
|
||||
const formattedDocs = formatDocument(docs, loaderType)
|
||||
const splitter = SplitterFactory.create({
|
||||
chunkSize: base.chunkSize,
|
||||
chunkOverlap: base.chunkOverlap,
|
||||
...(splitterType && { type: splitterType })
|
||||
})
|
||||
|
||||
const splitterResults = await splitter.splitDocuments(formattedDocs)
|
||||
const ids = splitterResults.map(() => randomUUID())
|
||||
|
||||
await vectorStore.addDocuments(splitterResults, { ids })
|
||||
|
||||
return {
|
||||
entriesAdded: splitterResults.length,
|
||||
uniqueId: ids[0] || '',
|
||||
uniqueIds: ids,
|
||||
loaderType
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 通用加载器执行函数
|
||||
*/
|
||||
async function executeLoader(
|
||||
base: KnowledgeBaseParams,
|
||||
vectorStore: FaissStore,
|
||||
loaderInstance: LoaderInstance,
|
||||
loaderType: string,
|
||||
identifier: string,
|
||||
splitterType?: string
|
||||
): Promise<LoaderReturn> {
|
||||
const emptyResult: LoaderReturn = {
|
||||
entriesAdded: 0,
|
||||
uniqueId: '',
|
||||
uniqueIds: [],
|
||||
loaderType
|
||||
}
|
||||
|
||||
try {
|
||||
const docs = await loaderInstance.load()
|
||||
return await processDocuments(base, vectorStore, docs, loaderType, splitterType)
|
||||
} catch (error) {
|
||||
logger.error(`Error loading or processing ${identifier} with loader ${loaderType}: ${error}`)
|
||||
return emptyResult
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 文件扩展名到加载器的映射
|
||||
*/
|
||||
const FILE_LOADER_MAP: Record<string, { loader: new (path: string) => LoaderInstance; type: string }> = {
|
||||
'.pdf': { loader: PDFLoader, type: 'pdf' },
|
||||
'.txt': { loader: TextLoader, type: 'text' },
|
||||
'.pptx': { loader: PPTXLoader, type: 'pptx' },
|
||||
'.docx': { loader: DocxLoader, type: 'docx' },
|
||||
'.doc': { loader: DocxLoader, type: 'doc' },
|
||||
'.json': { loader: JSONLoader, type: 'json' },
|
||||
'.epub': { loader: EPubLoader, type: 'epub' },
|
||||
'.md': { loader: MarkdownLoader, type: 'markdown' }
|
||||
}
|
||||
|
||||
export async function addFileLoader(
|
||||
base: KnowledgeBaseParams,
|
||||
vectorStore: FaissStore,
|
||||
file: FileMetadata
|
||||
): Promise<LoaderReturn> {
|
||||
const fileExt = file.ext.toLowerCase()
|
||||
const loaderConfig = FILE_LOADER_MAP[fileExt]
|
||||
|
||||
if (!loaderConfig) {
|
||||
// 默认使用文本加载器
|
||||
const loaderInstance = new TextLoader(file.path)
|
||||
const type = fileExt.replace('.', '') || 'unknown'
|
||||
return executeLoader(base, vectorStore, loaderInstance, type, file.path)
|
||||
}
|
||||
|
||||
const loaderInstance = new loaderConfig.loader(file.path)
|
||||
return executeLoader(base, vectorStore, loaderInstance, loaderConfig.type, file.path)
|
||||
}
|
||||
|
||||
export async function addWebLoader(
|
||||
base: KnowledgeBaseParams,
|
||||
vectorStore: FaissStore,
|
||||
url: string,
|
||||
source: UrlSource
|
||||
): Promise<LoaderReturn> {
|
||||
let loaderInstance: CheerioWebBaseLoader | YoutubeLoader | undefined
|
||||
let splitterType: string | undefined
|
||||
|
||||
switch (source) {
|
||||
case 'normal':
|
||||
loaderInstance = new CheerioWebBaseLoader(url)
|
||||
break
|
||||
case 'youtube':
|
||||
loaderInstance = YoutubeLoader.createFromUrl(url, {
|
||||
addVideoInfo: true,
|
||||
transcriptFormat: 'srt'
|
||||
})
|
||||
splitterType = 'srt'
|
||||
break
|
||||
}
|
||||
|
||||
if (!loaderInstance) {
|
||||
return {
|
||||
entriesAdded: 0,
|
||||
uniqueId: '',
|
||||
uniqueIds: [],
|
||||
loaderType: source
|
||||
}
|
||||
}
|
||||
|
||||
return executeLoader(base, vectorStore, loaderInstance, source, url, splitterType)
|
||||
}
|
||||
|
||||
export async function addSitemapLoader(
|
||||
base: KnowledgeBaseParams,
|
||||
vectorStore: FaissStore,
|
||||
url: string
|
||||
): Promise<LoaderReturn> {
|
||||
const loaderInstance = new SitemapLoader(url)
|
||||
return executeLoader(base, vectorStore, loaderInstance, 'sitemap', url)
|
||||
}
|
||||
|
||||
export async function addNoteLoader(
|
||||
base: KnowledgeBaseParams,
|
||||
vectorStore: FaissStore,
|
||||
content: string,
|
||||
sourceUrl: string
|
||||
): Promise<LoaderReturn> {
|
||||
const loaderInstance = new NoteLoader(content, sourceUrl)
|
||||
return executeLoader(base, vectorStore, loaderInstance, 'note', sourceUrl)
|
||||
}
|
||||
|
||||
export async function addVideoLoader(
|
||||
base: KnowledgeBaseParams,
|
||||
vectorStore: FaissStore,
|
||||
files: FileMetadata[]
|
||||
): Promise<LoaderReturn> {
|
||||
const srtFile = files.find((f) => f.type === FileTypes.TEXT)
|
||||
const videoFile = files.find((f) => f.type === FileTypes.VIDEO)
|
||||
|
||||
const emptyResult: LoaderReturn = {
|
||||
entriesAdded: 0,
|
||||
uniqueId: '',
|
||||
uniqueIds: [],
|
||||
loaderType: 'video'
|
||||
}
|
||||
|
||||
if (!srtFile || !videoFile) {
|
||||
return emptyResult
|
||||
}
|
||||
|
||||
try {
|
||||
const loaderInstance = new TextLoader(srtFile.path)
|
||||
const originalDocs = await loaderInstance.load()
|
||||
|
||||
const docsWithVideoMeta = originalDocs.map(
|
||||
(doc) =>
|
||||
new Document({
|
||||
...doc,
|
||||
metadata: {
|
||||
...doc.metadata,
|
||||
video: {
|
||||
path: videoFile.path,
|
||||
name: videoFile.origin_name
|
||||
}
|
||||
}
|
||||
})
|
||||
)
|
||||
|
||||
return await processDocuments(base, vectorStore, docsWithVideoMeta, 'video', 'srt')
|
||||
} catch (error) {
|
||||
logger.error(`Error loading or processing file ${srtFile.path} with loader video: ${error}`)
|
||||
return emptyResult
|
||||
}
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
import { BM25Retriever } from '@langchain/community/retrievers/bm25'
|
||||
import { FaissStore } from '@langchain/community/vectorstores/faiss'
|
||||
import { BaseRetriever } from '@langchain/core/retrievers'
|
||||
import { loggerService } from '@main/services/LoggerService'
|
||||
import { type KnowledgeBaseParams } from '@types'
|
||||
import { type Document } from 'langchain/document'
|
||||
import { EnsembleRetriever } from 'langchain/retrievers/ensemble'
|
||||
|
||||
const logger = loggerService.withContext('RetrieverFactory')
|
||||
export class RetrieverFactory {
|
||||
/**
|
||||
* 根据提供的参数创建一个 LangChain 检索器 (Retriever)。
|
||||
* @param base 知识库配置参数。
|
||||
* @param vectorStore 一个已初始化的向量存储实例。
|
||||
* @param documents 文档列表,用于初始化 BM25Retriever。
|
||||
* @returns 返回一个 BaseRetriever 实例。
|
||||
*/
|
||||
public createRetriever(base: KnowledgeBaseParams, vectorStore: FaissStore, documents: Document[]): BaseRetriever {
|
||||
const retrieverType = base.retriever?.mode ?? 'hybrid'
|
||||
const retrieverWeight = base.retriever?.weight ?? 0.5
|
||||
const searchK = base.documentCount ?? 5
|
||||
|
||||
logger.info(`Creating retriever of type: ${retrieverType} with k=${searchK}`)
|
||||
|
||||
switch (retrieverType) {
|
||||
case 'bm25':
|
||||
if (documents.length === 0) {
|
||||
throw new Error('BM25Retriever requires documents, but none were provided or found.')
|
||||
}
|
||||
logger.info('Create BM25 Retriever')
|
||||
return BM25Retriever.fromDocuments(documents, { k: searchK })
|
||||
|
||||
case 'hybrid': {
|
||||
if (documents.length === 0) {
|
||||
logger.warn('No documents provided for BM25 part of hybrid search. Falling back to vector search only.')
|
||||
return vectorStore.asRetriever(searchK)
|
||||
}
|
||||
|
||||
const vectorstoreRetriever = vectorStore.asRetriever(searchK)
|
||||
const bm25Retriever = BM25Retriever.fromDocuments(documents, { k: searchK })
|
||||
|
||||
logger.info('Create Hybrid Retriever')
|
||||
return new EnsembleRetriever({
|
||||
retrievers: [bm25Retriever, vectorstoreRetriever],
|
||||
weights: [retrieverWeight, 1 - retrieverWeight]
|
||||
})
|
||||
}
|
||||
|
||||
case 'vector':
|
||||
default:
|
||||
logger.info('Create Vector Retriever')
|
||||
return vectorStore.asRetriever(searchK)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,133 +0,0 @@
|
||||
import { Document } from '@langchain/core/documents'
|
||||
import { TextSplitter, TextSplitterParams } from 'langchain/text_splitter'
|
||||
|
||||
// 定义一个接口来表示解析后的单个字幕片段
|
||||
interface SrtSegment {
|
||||
text: string
|
||||
startTime: number // in seconds
|
||||
endTime: number // in seconds
|
||||
}
|
||||
|
||||
// 辅助函数:将 SRT 时间戳字符串 (HH:MM:SS,ms) 转换为秒
|
||||
function srtTimeToSeconds(time: string): number {
|
||||
const parts = time.split(':')
|
||||
const secondsAndMs = parts[2].split(',')
|
||||
const hours = parseInt(parts[0], 10)
|
||||
const minutes = parseInt(parts[1], 10)
|
||||
const seconds = parseInt(secondsAndMs[0], 10)
|
||||
const milliseconds = parseInt(secondsAndMs[1], 10)
|
||||
|
||||
return hours * 3600 + minutes * 60 + seconds + milliseconds / 1000
|
||||
}
|
||||
|
||||
export class SrtSplitter extends TextSplitter {
|
||||
constructor(fields?: Partial<TextSplitterParams>) {
|
||||
// 传入 chunkSize 和 chunkOverlap
|
||||
super(fields)
|
||||
}
|
||||
splitText(): Promise<string[]> {
|
||||
throw new Error('Method not implemented.')
|
||||
}
|
||||
|
||||
// 核心方法:重写 splitDocuments 来实现自定义逻辑
|
||||
async splitDocuments(documents: Document[]): Promise<Document[]> {
|
||||
const allChunks: Document[] = []
|
||||
|
||||
for (const doc of documents) {
|
||||
// 1. 解析 SRT 内容
|
||||
const segments = this.parseSrt(doc.pageContent)
|
||||
if (segments.length === 0) continue
|
||||
|
||||
// 2. 将字幕片段组合成块
|
||||
const chunks = this.mergeSegmentsIntoChunks(segments, doc.metadata)
|
||||
allChunks.push(...chunks)
|
||||
}
|
||||
|
||||
return allChunks
|
||||
}
|
||||
|
||||
// 辅助方法:解析整个 SRT 字符串
|
||||
private parseSrt(srt: string): SrtSegment[] {
|
||||
const segments: SrtSegment[] = []
|
||||
const blocks = srt.trim().split(/\n\n/)
|
||||
|
||||
for (const block of blocks) {
|
||||
const lines = block.split('\n')
|
||||
if (lines.length < 3) continue
|
||||
|
||||
const timeMatch = lines[1].match(/(\d{2}:\d{2}:\d{2},\d{3}) --> (\d{2}:\d{2}:\d{2},\d{3})/)
|
||||
if (!timeMatch) continue
|
||||
|
||||
const startTime = srtTimeToSeconds(timeMatch[1])
|
||||
const endTime = srtTimeToSeconds(timeMatch[2])
|
||||
const text = lines.slice(2).join(' ').trim()
|
||||
|
||||
segments.push({ text, startTime, endTime })
|
||||
}
|
||||
|
||||
return segments
|
||||
}
|
||||
|
||||
// 辅助方法:将解析后的片段合并成每 5 段一个块
|
||||
private mergeSegmentsIntoChunks(segments: SrtSegment[], baseMetadata: Record<string, any>): Document[] {
|
||||
const chunks: Document[] = []
|
||||
let currentChunkText = ''
|
||||
let currentChunkStartTime = 0
|
||||
let currentChunkEndTime = 0
|
||||
let segmentCount = 0
|
||||
|
||||
for (const segment of segments) {
|
||||
if (segmentCount === 0) {
|
||||
currentChunkStartTime = segment.startTime
|
||||
}
|
||||
|
||||
currentChunkText += (currentChunkText ? ' ' : '') + segment.text
|
||||
currentChunkEndTime = segment.endTime
|
||||
segmentCount++
|
||||
|
||||
// 当累积到 5 段时,创建一个新的 Document
|
||||
if (segmentCount === 5) {
|
||||
const metadata: Record<string, any> = {
|
||||
...baseMetadata,
|
||||
startTime: currentChunkStartTime,
|
||||
endTime: currentChunkEndTime
|
||||
}
|
||||
if (baseMetadata.source_url) {
|
||||
metadata.source_url_with_timestamp = `${baseMetadata.source_url}?t=${Math.floor(currentChunkStartTime)}s`
|
||||
}
|
||||
chunks.push(
|
||||
new Document({
|
||||
pageContent: currentChunkText,
|
||||
metadata
|
||||
})
|
||||
)
|
||||
|
||||
// 重置计数器和临时变量
|
||||
currentChunkText = ''
|
||||
currentChunkStartTime = 0
|
||||
currentChunkEndTime = 0
|
||||
segmentCount = 0
|
||||
}
|
||||
}
|
||||
|
||||
// 如果还有剩余的片段,创建最后一个 Document
|
||||
if (segmentCount > 0) {
|
||||
const metadata: Record<string, any> = {
|
||||
...baseMetadata,
|
||||
startTime: currentChunkStartTime,
|
||||
endTime: currentChunkEndTime
|
||||
}
|
||||
if (baseMetadata.source_url) {
|
||||
metadata.source_url_with_timestamp = `${baseMetadata.source_url}?t=${Math.floor(currentChunkStartTime)}s`
|
||||
}
|
||||
chunks.push(
|
||||
new Document({
|
||||
pageContent: currentChunkText,
|
||||
metadata
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
return chunks
|
||||
}
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
import { RecursiveCharacterTextSplitter, TextSplitter } from '@langchain/textsplitters'
|
||||
|
||||
import { SrtSplitter } from './SrtSplitter'
|
||||
|
||||
export type SplitterConfig = {
|
||||
chunkSize?: number
|
||||
chunkOverlap?: number
|
||||
type?: 'recursive' | 'srt' | string
|
||||
}
|
||||
export class SplitterFactory {
|
||||
/**
|
||||
* Creates a TextSplitter instance based on the provided configuration.
|
||||
* @param config - The configuration object specifying the splitter type and its parameters.
|
||||
* @returns An instance of a TextSplitter, or null if no splitting is required.
|
||||
*/
|
||||
public static create(config: SplitterConfig): TextSplitter {
|
||||
switch (config.type) {
|
||||
case 'srt':
|
||||
return new SrtSplitter({
|
||||
chunkSize: config.chunkSize,
|
||||
chunkOverlap: config.chunkOverlap
|
||||
})
|
||||
case 'recursive':
|
||||
default:
|
||||
return new RecursiveCharacterTextSplitter({
|
||||
chunkSize: config.chunkSize,
|
||||
chunkOverlap: config.chunkOverlap
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
475
src/main/mcpServers/didi-mcp.ts
Normal file
475
src/main/mcpServers/didi-mcp.ts
Normal file
@@ -0,0 +1,475 @@
|
||||
/**
|
||||
* DiDi MCP Server Implementation
|
||||
*
|
||||
* Based on official DiDi MCP API capabilities.
|
||||
* API Documentation: https://mcp.didichuxing.com/api?tap=api
|
||||
*
|
||||
* Provides ride-hailing services including map search, price estimation,
|
||||
* order management, and driver tracking.
|
||||
*
|
||||
* Note: Only available in Mainland China.
|
||||
*/
|
||||
|
||||
import { loggerService } from '@logger'
|
||||
import { Server } from '@modelcontextprotocol/sdk/server/index.js'
|
||||
import {
|
||||
CallToolRequestSchema,
|
||||
ListToolsRequestSchema
|
||||
} from '@modelcontextprotocol/sdk/types.js'
|
||||
|
||||
const logger = loggerService.withContext('DiDiMCPServer')
|
||||
|
||||
export class DiDiMcpServer {
|
||||
private _server: Server
|
||||
private readonly baseUrl = 'http://mcp.didichuxing.com/mcp-servers'
|
||||
private apiKey: string
|
||||
|
||||
constructor(apiKey?: string) {
|
||||
this._server = new Server(
|
||||
{
|
||||
name: 'didi-mcp-server',
|
||||
version: '0.1.0'
|
||||
},
|
||||
{
|
||||
capabilities: {
|
||||
tools: {}
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
// Get API key from parameter or environment variables
|
||||
this.apiKey = apiKey || process.env.DIDI_API_KEY || ''
|
||||
if (!this.apiKey) {
|
||||
logger.warn('DIDI_API_KEY environment variable is not set')
|
||||
}
|
||||
|
||||
this.setupRequestHandlers()
|
||||
}
|
||||
|
||||
get server(): Server {
|
||||
return this._server
|
||||
}
|
||||
|
||||
private setupRequestHandlers() {
|
||||
// List available tools
|
||||
this._server.setRequestHandler(ListToolsRequestSchema, async () => {
|
||||
return {
|
||||
tools: [
|
||||
{
|
||||
name: 'maps_textsearch',
|
||||
description: 'Search for POI locations based on keywords and city',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
city: {
|
||||
type: 'string',
|
||||
description: 'Query city'
|
||||
},
|
||||
keywords: {
|
||||
type: 'string',
|
||||
description: 'Search keywords'
|
||||
},
|
||||
location: {
|
||||
type: 'string',
|
||||
description: 'Location coordinates, format: longitude,latitude'
|
||||
}
|
||||
},
|
||||
required: ['keywords', 'city']
|
||||
}
|
||||
},
|
||||
{
|
||||
name: 'taxi_cancel_order',
|
||||
description: 'Cancel a taxi order',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
order_id: {
|
||||
type: 'string',
|
||||
description: 'Order ID from order creation or query results'
|
||||
},
|
||||
reason: {
|
||||
type: 'string',
|
||||
description: 'Cancellation reason (optional). Examples: no longer needed, waiting too long, urgent matter'
|
||||
}
|
||||
},
|
||||
required: ['order_id']
|
||||
}
|
||||
},
|
||||
{
|
||||
name: 'taxi_create_order',
|
||||
description: 'Create taxi order directly via API without opening any app interface',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
caller_car_phone: {
|
||||
type: 'string',
|
||||
description: 'Caller phone number (optional)'
|
||||
},
|
||||
estimate_trace_id: {
|
||||
type: 'string',
|
||||
description: 'Estimation trace ID from estimation results'
|
||||
},
|
||||
product_category: {
|
||||
type: 'string',
|
||||
description: 'Vehicle category ID from estimation results, comma-separated for multiple types'
|
||||
}
|
||||
},
|
||||
required: ['product_category', 'estimate_trace_id']
|
||||
}
|
||||
},
|
||||
{
|
||||
name: 'taxi_estimate',
|
||||
description: 'Get available ride-hailing vehicle types and fare estimates',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
from_lat: {
|
||||
type: 'string',
|
||||
description: 'Departure latitude, must be from map tools'
|
||||
},
|
||||
from_lng: {
|
||||
type: 'string',
|
||||
description: 'Departure longitude, must be from map tools'
|
||||
},
|
||||
from_name: {
|
||||
type: 'string',
|
||||
description: 'Departure location name'
|
||||
},
|
||||
to_lat: {
|
||||
type: 'string',
|
||||
description: 'Destination latitude, must be from map tools'
|
||||
},
|
||||
to_lng: {
|
||||
type: 'string',
|
||||
description: 'Destination longitude, must be from map tools'
|
||||
},
|
||||
to_name: {
|
||||
type: 'string',
|
||||
description: 'Destination name'
|
||||
}
|
||||
},
|
||||
required: ['from_lng', 'from_lat', 'from_name', 'to_lng', 'to_lat', 'to_name']
|
||||
}
|
||||
},
|
||||
{
|
||||
name: 'taxi_generate_ride_app_link',
|
||||
description: 'Generate deep links to open ride-hailing apps based on origin, destination and vehicle type',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
from_lat: {
|
||||
type: 'string',
|
||||
description: 'Departure latitude, must be from map tools'
|
||||
},
|
||||
from_lng: {
|
||||
type: 'string',
|
||||
description: 'Departure longitude, must be from map tools'
|
||||
},
|
||||
product_category: {
|
||||
type: 'string',
|
||||
description: 'Vehicle category IDs from estimation results, comma-separated for multiple types'
|
||||
},
|
||||
to_lat: {
|
||||
type: 'string',
|
||||
description: 'Destination latitude, must be from map tools'
|
||||
},
|
||||
to_lng: {
|
||||
type: 'string',
|
||||
description: 'Destination longitude, must be from map tools'
|
||||
}
|
||||
},
|
||||
required: ['from_lng', 'from_lat', 'to_lng', 'to_lat']
|
||||
}
|
||||
},
|
||||
{
|
||||
name: 'taxi_get_driver_location',
|
||||
description: 'Get real-time driver location for a taxi order',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
order_id: {
|
||||
type: 'string',
|
||||
description: 'Taxi order ID'
|
||||
}
|
||||
},
|
||||
required: ['order_id']
|
||||
}
|
||||
},
|
||||
{
|
||||
name: 'taxi_query_order',
|
||||
description: 'Query taxi order status and information such as driver contact, license plate, ETA',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
order_id: {
|
||||
type: 'string',
|
||||
description: 'Order ID from order creation results, if available; otherwise queries incomplete orders'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
})
|
||||
|
||||
// Handle tool calls
|
||||
this._server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
||||
const { name, arguments: args } = request.params
|
||||
|
||||
try {
|
||||
switch (name) {
|
||||
case 'maps_textsearch':
|
||||
return await this.handleMapsTextSearch(args)
|
||||
case 'taxi_cancel_order':
|
||||
return await this.handleTaxiCancelOrder(args)
|
||||
case 'taxi_create_order':
|
||||
return await this.handleTaxiCreateOrder(args)
|
||||
case 'taxi_estimate':
|
||||
return await this.handleTaxiEstimate(args)
|
||||
case 'taxi_generate_ride_app_link':
|
||||
return await this.handleTaxiGenerateRideAppLink(args)
|
||||
case 'taxi_get_driver_location':
|
||||
return await this.handleTaxiGetDriverLocation(args)
|
||||
case 'taxi_query_order':
|
||||
return await this.handleTaxiQueryOrder(args)
|
||||
default:
|
||||
throw new Error(`Unknown tool: ${name}`)
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(`Error calling tool ${name}:`, error as Error)
|
||||
throw error
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
private async handleMapsTextSearch(args: any) {
|
||||
const { city, keywords, location } = args
|
||||
|
||||
const params = {
|
||||
name: 'maps_textsearch',
|
||||
arguments: {
|
||||
keywords,
|
||||
city,
|
||||
...(location && { location })
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await this.makeRequest('tools/call', params)
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: JSON.stringify(response, null, 2)
|
||||
}
|
||||
]
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Maps text search error:', error as Error)
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
private async handleTaxiCancelOrder(args: any) {
|
||||
const { order_id, reason } = args
|
||||
|
||||
const params = {
|
||||
name: 'taxi_cancel_order',
|
||||
arguments: {
|
||||
order_id,
|
||||
...(reason && { reason })
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await this.makeRequest('tools/call', params)
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: JSON.stringify(response, null, 2)
|
||||
}
|
||||
]
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Taxi cancel order error:', error as Error)
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
private async handleTaxiCreateOrder(args: any) {
|
||||
const { caller_car_phone, estimate_trace_id, product_category } = args
|
||||
|
||||
const params = {
|
||||
name: 'taxi_create_order',
|
||||
arguments: {
|
||||
product_category,
|
||||
estimate_trace_id,
|
||||
...(caller_car_phone && { caller_car_phone })
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await this.makeRequest('tools/call', params)
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: JSON.stringify(response, null, 2)
|
||||
}
|
||||
]
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Taxi create order error:', error as Error)
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
private async handleTaxiEstimate(args: any) {
|
||||
const { from_lng, from_lat, from_name, to_lng, to_lat, to_name } = args
|
||||
|
||||
const params = {
|
||||
name: 'taxi_estimate',
|
||||
arguments: {
|
||||
from_lng,
|
||||
from_lat,
|
||||
from_name,
|
||||
to_lng,
|
||||
to_lat,
|
||||
to_name
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await this.makeRequest('tools/call', params)
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: JSON.stringify(response, null, 2)
|
||||
}
|
||||
]
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Taxi estimate error:', error as Error)
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
private async handleTaxiGenerateRideAppLink(args: any) {
|
||||
const { from_lng, from_lat, to_lng, to_lat, product_category } = args
|
||||
|
||||
const params = {
|
||||
name: 'taxi_generate_ride_app_link',
|
||||
arguments: {
|
||||
from_lng,
|
||||
from_lat,
|
||||
to_lng,
|
||||
to_lat,
|
||||
...(product_category && { product_category })
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await this.makeRequest('tools/call', params)
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: JSON.stringify(response, null, 2)
|
||||
}
|
||||
]
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Taxi generate ride app link error:', error as Error)
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
private async handleTaxiGetDriverLocation(args: any) {
|
||||
const { order_id } = args
|
||||
|
||||
const params = {
|
||||
name: 'taxi_get_driver_location',
|
||||
arguments: {
|
||||
order_id
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await this.makeRequest('tools/call', params)
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: JSON.stringify(response, null, 2)
|
||||
}
|
||||
]
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Taxi get driver location error:', error as Error)
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
private async handleTaxiQueryOrder(args: any) {
|
||||
const { order_id } = args
|
||||
|
||||
const params = {
|
||||
name: 'taxi_query_order',
|
||||
arguments: {
|
||||
...(order_id && { order_id })
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await this.makeRequest('tools/call', params)
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: JSON.stringify(response, null, 2)
|
||||
}
|
||||
]
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Taxi query order error:', error as Error)
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
private async makeRequest(method: string, params: any): Promise<any> {
|
||||
const requestData = {
|
||||
jsonrpc: '2.0',
|
||||
method: method,
|
||||
id: Date.now(),
|
||||
...(Object.keys(params).length > 0 && { params })
|
||||
}
|
||||
|
||||
// API key is passed as URL parameter
|
||||
const url = `${this.baseUrl}?key=${this.apiKey}`
|
||||
|
||||
const response = await fetch(url, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify(requestData)
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text()
|
||||
throw new Error(`HTTP ${response.status}: ${errorText}`)
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
|
||||
if (data.error) {
|
||||
throw new Error(`API Error: ${JSON.stringify(data.error)}`)
|
||||
}
|
||||
|
||||
return data.result
|
||||
}
|
||||
}
|
||||
|
||||
export default DiDiMcpServer
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user