Compare commits
91 Commits
shortcut-c
...
fix/openmi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
61dd97bd0e | ||
|
|
d6e7ce330e | ||
|
|
4f7d8731ea | ||
|
|
2b5ac5ab51 | ||
|
|
060fcd2ce6 | ||
|
|
a6182eaf85 | ||
|
|
649f9420a4 | ||
|
|
2552d97ea7 | ||
|
|
803f4b5a64 | ||
|
|
31f8fff6e2 | ||
|
|
2663cb19ce | ||
|
|
ce5d46bfc7 | ||
|
|
c1fa24522d | ||
|
|
2f66f5b511 | ||
|
|
2d8555c326 | ||
|
|
e2c8edab61 | ||
|
|
5e0a66fa1f | ||
|
|
bc8b0a8d53 | ||
|
|
e43562423e | ||
|
|
120ac122eb | ||
|
|
9013fcba14 | ||
|
|
c32f4badbd | ||
|
|
66f66fe08e | ||
|
|
d5826c2dc7 | ||
|
|
85a628f8dd | ||
|
|
ed453750fe | ||
|
|
57d9a31c0f | ||
|
|
58afbe8a79 | ||
|
|
9a10516b52 | ||
|
|
e268e69597 | ||
|
|
10e78ac60e | ||
|
|
44b2b859da | ||
|
|
bfef0c5580 | ||
|
|
1e8055031a | ||
|
|
8e33ff8d90 | ||
|
|
a619000340 | ||
|
|
78278ce96d | ||
|
|
76483d828e | ||
|
|
816a92c609 | ||
|
|
83e4d4363f | ||
|
|
1103449a4f | ||
|
|
56c7a7f066 | ||
|
|
caa59c4c50 | ||
|
|
2546dfbe5d | ||
|
|
5fea202a7d | ||
|
|
7dce1d776b | ||
|
|
346af4d338 | ||
|
|
abd5d3b96f | ||
|
|
49bd298d37 | ||
|
|
714a28ac29 | ||
|
|
0cf81c04c8 | ||
|
|
4186e9c990 | ||
|
|
d8f68a6056 | ||
|
|
11bf50e722 | ||
|
|
32a84311aa | ||
|
|
6eaa2b2461 | ||
|
|
9f00f00546 | ||
|
|
bd94d23343 | ||
|
|
5f1c14e2c0 | ||
|
|
cdc12d5092 | ||
|
|
e5967fd874 | ||
|
|
e2f1d80697 | ||
|
|
28bc89ac7c | ||
|
|
dc06c103e0 | ||
|
|
1f0381aebe | ||
|
|
fb02a61a48 | ||
|
|
562fbb3ff7 | ||
|
|
1018ad87b8 | ||
|
|
82ca35fc29 | ||
|
|
fe53b0914a | ||
|
|
67a379641f | ||
|
|
9dbc6fbf67 | ||
|
|
8da43ab794 | ||
|
|
2a06c606e1 | ||
|
|
b6dcf2f5fa | ||
|
|
68e0d8b0f1 | ||
|
|
7f1c234ac1 | ||
|
|
c1fd23742f | ||
|
|
d792bf7fe0 | ||
|
|
f8a599322f | ||
|
|
aa810a7ead | ||
|
|
b586e1796e | ||
|
|
fa2ec69fa9 | ||
|
|
dd8690b592 | ||
|
|
09e6b9741e | ||
|
|
0767952a6f | ||
|
|
72299f833a | ||
|
|
7badaf02b9 | ||
|
|
dfbfc2869c | ||
|
|
1575e97168 | ||
|
|
e0a2ed0481 |
2
.github/ISSUE_TEMPLATE/0_bug_report.yml
vendored
2
.github/ISSUE_TEMPLATE/0_bug_report.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: 🐛 Bug Report (English)
|
||||
name: 🐛 Bug Report
|
||||
description: Create a report to help us improve
|
||||
title: '[Bug]: '
|
||||
labels: ['BUG']
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/1_feature_request.yml
vendored
2
.github/ISSUE_TEMPLATE/1_feature_request.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: 💡 Feature Request (English)
|
||||
name: 💡 Feature Request
|
||||
description: Suggest an idea for this project
|
||||
title: '[Feature]: '
|
||||
labels: ['feature']
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/3_others.yml
vendored
2
.github/ISSUE_TEMPLATE/3_others.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: 🤔 Other Questions (English)
|
||||
name: 🤔 Other Questions
|
||||
description: Submit questions that don't fit into bug reports or feature requests
|
||||
title: '[Other]: '
|
||||
body:
|
||||
|
||||
89
.github/workflows/auto-i18n.yml
vendored
89
.github/workflows/auto-i18n.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Auto I18N
|
||||
name: Auto I18N Weekly
|
||||
|
||||
env:
|
||||
TRANSLATION_API_KEY: ${{ secrets.TRANSLATE_API_KEY }}
|
||||
@@ -7,14 +7,15 @@ env:
|
||||
TRANSLATION_BASE_LOCALE: ${{ vars.AUTO_I18N_BASE_LOCALE || 'en-us'}}
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
schedule:
|
||||
# Runs at 00:00 UTC every Sunday.
|
||||
# This corresponds to 08:00 AM UTC+8 (Beijing time) every Sunday.
|
||||
- cron: "0 0 * * 0"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
auto-i18n:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name == 'workflow_dispatch' || github.event.pull_request.head.repo.full_name == 'CherryHQ/cherry-studio'
|
||||
name: Auto I18N
|
||||
permissions:
|
||||
contents: write
|
||||
@@ -24,45 +25,69 @@ jobs:
|
||||
- name: 🐈⬛ Checkout
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.ref }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: 📦 Setting Node.js
|
||||
uses: actions/setup-node@v5
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: 20
|
||||
package-manager-cache: false
|
||||
node-version: 22
|
||||
|
||||
- name: 📦 Install dependencies in isolated directory
|
||||
- name: 📦 Install corepack
|
||||
run: corepack enable && corepack prepare yarn@4.9.1 --activate
|
||||
|
||||
- name: 📂 Get yarn cache directory path
|
||||
id: yarn-cache-dir-path
|
||||
run: echo "dir=$(yarn config get cacheFolder)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: 💾 Cache yarn dependencies
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
${{ steps.yarn-cache-dir-path.outputs.dir }}
|
||||
node_modules
|
||||
key: ${{ runner.os }}-yarn-${{ hashFiles('**/yarn.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-yarn-
|
||||
|
||||
- name: 📦 Install dependencies
|
||||
run: |
|
||||
# 在临时目录安装依赖
|
||||
mkdir -p /tmp/translation-deps
|
||||
cd /tmp/translation-deps
|
||||
echo '{"dependencies": {"@cherrystudio/openai": "^6.5.0", "cli-progress": "^3.12.0", "tsx": "^4.20.3", "@biomejs/biome": "2.2.4"}}' > package.json
|
||||
npm install --no-package-lock
|
||||
|
||||
# 设置 NODE_PATH 让项目能找到这些依赖
|
||||
echo "NODE_PATH=/tmp/translation-deps/node_modules" >> $GITHUB_ENV
|
||||
yarn install
|
||||
|
||||
- name: 🏃♀️ Translate
|
||||
run: npx tsx scripts/sync-i18n.ts && npx tsx scripts/auto-translate-i18n.ts
|
||||
run: yarn sync:i18n && yarn auto:i18n
|
||||
|
||||
- name: 🔍 Format
|
||||
run: cd /tmp/translation-deps && npx biome format --config-path /home/runner/work/cherry-studio/cherry-studio/biome.jsonc --write /home/runner/work/cherry-studio/cherry-studio/src/renderer/src/i18n/
|
||||
run: yarn format
|
||||
|
||||
- name: 🔄 Commit changes
|
||||
- name: 🔍 Check for changes
|
||||
id: git_status
|
||||
run: |
|
||||
git config --local user.email "action@github.com"
|
||||
git config --local user.name "GitHub Action"
|
||||
git add .
|
||||
# Check if there are any uncommitted changes
|
||||
git reset -- package.json yarn.lock # 不提交 package.json 和 yarn.lock 的更改
|
||||
if git diff --cached --quiet; then
|
||||
echo "No changes to commit"
|
||||
else
|
||||
git commit -m "fix(i18n): Auto update translations for PR #${{ github.event.pull_request.number }}"
|
||||
fi
|
||||
git diff --exit-code --quiet || echo "::set-output name=has_changes::true"
|
||||
git status --porcelain
|
||||
|
||||
- name: 🚀 Push changes
|
||||
uses: ad-m/github-push-action@master
|
||||
- name: 📅 Set current date for PR title
|
||||
id: set_date
|
||||
run: echo "CURRENT_DATE=$(date +'%b %d, %Y')" >> $GITHUB_ENV # e.g., "Jun 06, 2024"
|
||||
|
||||
- name: 🚀 Create Pull Request if changes exist
|
||||
if: steps.git_status.outputs.has_changes == 'true'
|
||||
uses: peter-evans/create-pull-request@v6
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
branch: ${{ github.event.pull_request.head.ref }}
|
||||
token: ${{ secrets.GITHUB_TOKEN }} # Use the built-in GITHUB_TOKEN for bot actions
|
||||
commit-message: "feat(bot): Weekly automated script run"
|
||||
title: "🤖 Weekly Automated Update: ${{ env.CURRENT_DATE }}"
|
||||
body: |
|
||||
This PR includes changes generated by the weekly auto i18n.
|
||||
Review the changes before merging.
|
||||
|
||||
---
|
||||
_Generated by the automated weekly workflow_
|
||||
branch: "auto-i18n-weekly-${{ github.run_id }}" # Unique branch name
|
||||
base: "main" # Or 'develop', set your base branch
|
||||
delete-branch: true # Delete the branch after merging or closing the PR
|
||||
|
||||
- name: 📢 Notify if no changes
|
||||
if: steps.git_status.outputs.has_changes != 'true'
|
||||
run: echo "Bot script ran, but no changes were detected. No PR created."
|
||||
|
||||
10
.github/workflows/github-issue-tracker.yml
vendored
10
.github/workflows/github-issue-tracker.yml
vendored
@@ -5,7 +5,7 @@ on:
|
||||
types: [opened]
|
||||
schedule:
|
||||
# Run every day at 8:30 Beijing Time (00:30 UTC)
|
||||
- cron: '30 0 * * *'
|
||||
- cron: "30 0 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
@@ -54,9 +54,9 @@ jobs:
|
||||
|
||||
- name: Setup Node.js
|
||||
if: steps.check_time.outputs.should_delay == 'false'
|
||||
uses: actions/setup-node@v4
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: '20'
|
||||
node-version: 22
|
||||
|
||||
- name: Process issue with Claude
|
||||
if: steps.check_time.outputs.should_delay == 'false'
|
||||
@@ -121,9 +121,9 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: '20'
|
||||
node-version: 22
|
||||
|
||||
- name: Process pending issues with Claude
|
||||
uses: anthropics/claude-code-action@main
|
||||
|
||||
4
.github/workflows/issue-management.yml
vendored
4
.github/workflows/issue-management.yml
vendored
@@ -21,7 +21,7 @@ jobs:
|
||||
contents: none
|
||||
steps:
|
||||
- name: Close needs-more-info issues
|
||||
uses: actions/stale@v9
|
||||
uses: actions/stale@v10
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
only-labels: 'needs-more-info'
|
||||
@@ -42,7 +42,7 @@ jobs:
|
||||
days-before-pr-close: -1
|
||||
|
||||
- name: Close inactive issues
|
||||
uses: actions/stale@v9
|
||||
uses: actions/stale@v10
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
days-before-stale: ${{ env.daysBeforeStale }}
|
||||
|
||||
10
.github/workflows/nightly-build.yml
vendored
10
.github/workflows/nightly-build.yml
vendored
@@ -3,7 +3,7 @@ name: Nightly Build
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 17 * * *' # 1:00 BJ Time
|
||||
- cron: "0 17 * * *" # 1:00 BJ Time
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
@@ -56,9 +56,9 @@ jobs:
|
||||
ref: main
|
||||
|
||||
- name: Install Node.js
|
||||
uses: actions/setup-node@v5
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: 20
|
||||
node-version: 22
|
||||
|
||||
- name: macos-latest dependencies fix
|
||||
if: matrix.os == 'macos-latest'
|
||||
@@ -66,7 +66,7 @@ jobs:
|
||||
brew install python-setuptools
|
||||
|
||||
- name: Install corepack
|
||||
run: corepack enable && corepack prepare yarn@4.6.0 --activate
|
||||
run: corepack enable && corepack prepare yarn@4.9.1 --activate
|
||||
|
||||
- name: Get yarn cache directory path
|
||||
id: yarn-cache-dir-path
|
||||
@@ -208,7 +208,7 @@ jobs:
|
||||
echo "总计: $(find renamed-artifacts -type f | wc -l) 个文件"
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: cherry-studio-nightly-${{ steps.date.outputs.date }}-${{ matrix.os }}
|
||||
path: renamed-artifacts/*
|
||||
|
||||
6
.github/workflows/pr-ci.yml
vendored
6
.github/workflows/pr-ci.yml
vendored
@@ -24,12 +24,12 @@ jobs:
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Install Node.js
|
||||
uses: actions/setup-node@v5
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: 20
|
||||
node-version: 22
|
||||
|
||||
- name: Install corepack
|
||||
run: corepack enable && corepack prepare yarn@4.6.0 --activate
|
||||
run: corepack enable && corepack prepare yarn@4.9.1 --activate
|
||||
|
||||
- name: Get yarn cache directory path
|
||||
id: yarn-cache-dir-path
|
||||
|
||||
12
.github/workflows/release.yml
vendored
12
.github/workflows/release.yml
vendored
@@ -4,9 +4,9 @@ on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: 'Release tag (e.g. v1.0.0)'
|
||||
description: "Release tag (e.g. v1.0.0)"
|
||||
required: true
|
||||
default: 'v1.0.0'
|
||||
default: "v1.0.0"
|
||||
push:
|
||||
tags:
|
||||
- v*.*.*
|
||||
@@ -47,9 +47,9 @@ jobs:
|
||||
npm version "$VERSION" --no-git-tag-version --allow-same-version
|
||||
|
||||
- name: Install Node.js
|
||||
uses: actions/setup-node@v5
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: 20
|
||||
node-version: 22
|
||||
|
||||
- name: macos-latest dependencies fix
|
||||
if: matrix.os == 'macos-latest'
|
||||
@@ -57,7 +57,7 @@ jobs:
|
||||
brew install python-setuptools
|
||||
|
||||
- name: Install corepack
|
||||
run: corepack enable && corepack prepare yarn@4.6.0 --activate
|
||||
run: corepack enable && corepack prepare yarn@4.9.1 --activate
|
||||
|
||||
- name: Get yarn cache directory path
|
||||
id: yarn-cache-dir-path
|
||||
@@ -127,5 +127,5 @@ jobs:
|
||||
allowUpdates: true
|
||||
makeLatest: false
|
||||
tag: ${{ steps.get-tag.outputs.tag }}
|
||||
artifacts: 'dist/*.exe,dist/*.zip,dist/*.dmg,dist/*.AppImage,dist/*.snap,dist/*.deb,dist/*.rpm,dist/*.tar.gz,dist/latest*.yml,dist/rc*.yml,dist/beta*.yml,dist/*.blockmap'
|
||||
artifacts: "dist/*.exe,dist/*.zip,dist/*.dmg,dist/*.AppImage,dist/*.snap,dist/*.deb,dist/*.rpm,dist/*.tar.gz,dist/latest*.yml,dist/rc*.yml,dist/beta*.yml,dist/*.blockmap"
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
@@ -22,7 +22,6 @@
|
||||
"eslint.config.mjs"
|
||||
],
|
||||
"overrides": [
|
||||
// set different env
|
||||
{
|
||||
"env": {
|
||||
"node": true
|
||||
@@ -36,8 +35,7 @@
|
||||
"files": [
|
||||
"src/renderer/**/*.{ts,tsx}",
|
||||
"packages/aiCore/**",
|
||||
"packages/extension-table-plus/**",
|
||||
"resources/js/**"
|
||||
"packages/extension-table-plus/**"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -53,76 +51,24 @@
|
||||
"node": true
|
||||
},
|
||||
"files": ["src/preload/**"]
|
||||
},
|
||||
{
|
||||
"files": ["packages/ai-sdk-provider/**"],
|
||||
"globals": {
|
||||
"fetch": "readonly"
|
||||
}
|
||||
}
|
||||
],
|
||||
// We don't use the React plugin here because its behavior differs slightly from that of ESLint's React plugin.
|
||||
"plugins": ["unicorn", "typescript", "oxc", "import"],
|
||||
"rules": {
|
||||
"constructor-super": "error",
|
||||
"for-direction": "error",
|
||||
"getter-return": "error",
|
||||
"no-array-constructor": "off",
|
||||
// "import/no-cycle": "error", // tons of error, bro
|
||||
"no-async-promise-executor": "error",
|
||||
"no-caller": "warn",
|
||||
"no-case-declarations": "error",
|
||||
"no-class-assign": "error",
|
||||
"no-compare-neg-zero": "error",
|
||||
"no-cond-assign": "error",
|
||||
"no-const-assign": "error",
|
||||
"no-constant-binary-expression": "error",
|
||||
"no-constant-condition": "error",
|
||||
"no-control-regex": "error",
|
||||
"no-debugger": "error",
|
||||
"no-delete-var": "error",
|
||||
"no-dupe-args": "error",
|
||||
"no-dupe-class-members": "error",
|
||||
"no-dupe-else-if": "error",
|
||||
"no-dupe-keys": "error",
|
||||
"no-duplicate-case": "error",
|
||||
"no-empty": "error",
|
||||
"no-empty-character-class": "error",
|
||||
"no-empty-pattern": "error",
|
||||
"no-empty-static-block": "error",
|
||||
"no-eval": "warn",
|
||||
"no-ex-assign": "error",
|
||||
"no-extra-boolean-cast": "error",
|
||||
"no-fallthrough": "warn",
|
||||
"no-func-assign": "error",
|
||||
"no-global-assign": "error",
|
||||
"no-import-assign": "error",
|
||||
"no-invalid-regexp": "error",
|
||||
"no-irregular-whitespace": "error",
|
||||
"no-loss-of-precision": "error",
|
||||
"no-misleading-character-class": "error",
|
||||
"no-new-native-nonconstructor": "error",
|
||||
"no-nonoctal-decimal-escape": "error",
|
||||
"no-obj-calls": "error",
|
||||
"no-octal": "error",
|
||||
"no-prototype-builtins": "error",
|
||||
"no-redeclare": "error",
|
||||
"no-regex-spaces": "error",
|
||||
"no-self-assign": "error",
|
||||
"no-setter-return": "error",
|
||||
"no-shadow-restricted-names": "error",
|
||||
"no-sparse-arrays": "error",
|
||||
"no-this-before-super": "error",
|
||||
"no-unassigned-vars": "warn",
|
||||
"no-undef": "error",
|
||||
"no-unexpected-multiline": "error",
|
||||
"no-unreachable": "error",
|
||||
"no-unsafe-finally": "error",
|
||||
"no-unsafe-negation": "error",
|
||||
"no-unsafe-optional-chaining": "error",
|
||||
"no-unused-expressions": "off", // this rule disallow us to use expression to call function, like `condition && fn()`
|
||||
"no-unused-labels": "error",
|
||||
"no-unused-private-class-members": "error",
|
||||
"no-unused-expressions": "off",
|
||||
"no-unused-vars": ["warn", { "caughtErrors": "none" }],
|
||||
"no-useless-backreference": "error",
|
||||
"no-useless-catch": "error",
|
||||
"no-useless-escape": "error",
|
||||
"no-useless-rename": "warn",
|
||||
"no-with": "error",
|
||||
"oxc/bad-array-method-on-arguments": "warn",
|
||||
"oxc/bad-char-at-comparison": "warn",
|
||||
"oxc/bad-comparison-sequence": "warn",
|
||||
@@ -134,19 +80,17 @@
|
||||
"oxc/erasing-op": "warn",
|
||||
"oxc/missing-throw": "warn",
|
||||
"oxc/number-arg-out-of-range": "warn",
|
||||
"oxc/only-used-in-recursion": "off", // manually off bacause of existing warning. may turn it on in the future
|
||||
"oxc/only-used-in-recursion": "off",
|
||||
"oxc/uninvoked-array-callback": "warn",
|
||||
"require-yield": "error",
|
||||
"typescript/await-thenable": "warn",
|
||||
// "typescript/ban-ts-comment": "error",
|
||||
"typescript/consistent-type-imports": "error",
|
||||
"typescript/no-array-constructor": "error",
|
||||
// "typescript/consistent-type-imports": "error",
|
||||
"typescript/no-array-delete": "warn",
|
||||
"typescript/no-base-to-string": "warn",
|
||||
"typescript/no-duplicate-enum-values": "error",
|
||||
"typescript/no-duplicate-type-constituents": "warn",
|
||||
"typescript/no-empty-object-type": "off",
|
||||
"typescript/no-explicit-any": "off", // not safe but too many errors
|
||||
"typescript/no-explicit-any": "off",
|
||||
"typescript/no-extra-non-null-assertion": "error",
|
||||
"typescript/no-floating-promises": "warn",
|
||||
"typescript/no-for-in-array": "warn",
|
||||
@@ -155,7 +99,7 @@
|
||||
"typescript/no-misused-new": "error",
|
||||
"typescript/no-misused-spread": "warn",
|
||||
"typescript/no-namespace": "error",
|
||||
"typescript/no-non-null-asserted-optional-chain": "off", // it's off now. but may turn it on.
|
||||
"typescript/no-non-null-asserted-optional-chain": "off",
|
||||
"typescript/no-redundant-type-constituents": "warn",
|
||||
"typescript/no-require-imports": "off",
|
||||
"typescript/no-this-alias": "error",
|
||||
@@ -173,20 +117,18 @@
|
||||
"typescript/triple-slash-reference": "error",
|
||||
"typescript/unbound-method": "warn",
|
||||
"unicorn/no-await-in-promise-methods": "warn",
|
||||
"unicorn/no-empty-file": "off", // manually off bacause of existing warning. may turn it on in the future
|
||||
"unicorn/no-empty-file": "off",
|
||||
"unicorn/no-invalid-fetch-options": "warn",
|
||||
"unicorn/no-invalid-remove-event-listener": "warn",
|
||||
"unicorn/no-new-array": "off", // manually off bacause of existing warning. may turn it on in the future
|
||||
"unicorn/no-new-array": "off",
|
||||
"unicorn/no-single-promise-in-promise-methods": "warn",
|
||||
"unicorn/no-thenable": "off", // manually off bacause of existing warning. may turn it on in the future
|
||||
"unicorn/no-thenable": "off",
|
||||
"unicorn/no-unnecessary-await": "warn",
|
||||
"unicorn/no-useless-fallback-in-spread": "warn",
|
||||
"unicorn/no-useless-length-check": "warn",
|
||||
"unicorn/no-useless-spread": "off", // manually off bacause of existing warning. may turn it on in the future
|
||||
"unicorn/no-useless-spread": "off",
|
||||
"unicorn/prefer-set-size": "warn",
|
||||
"unicorn/prefer-string-starts-ends-with": "warn",
|
||||
"use-isnan": "error",
|
||||
"valid-typeof": "error"
|
||||
"unicorn/prefer-string-starts-ends-with": "warn"
|
||||
},
|
||||
"settings": {
|
||||
"jsdoc": {
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
diff --git a/dist/index.mjs b/dist/index.mjs
|
||||
index 69ab1599c76801dc1167551b6fa283dded123466..f0af43bba7ad1196fe05338817e65b4ebda40955 100644
|
||||
--- a/dist/index.mjs
|
||||
+++ b/dist/index.mjs
|
||||
@@ -477,7 +477,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
|
||||
|
||||
// src/get-model-path.ts
|
||||
function getModelPath(modelId) {
|
||||
- return modelId.includes("/") ? modelId : `models/${modelId}`;
|
||||
+ return modelId?.includes("models/") ? modelId : `models/${modelId}`;
|
||||
}
|
||||
|
||||
// src/google-generative-ai-options.ts
|
||||
26
.yarn/patches/@ai-sdk-google-npm-2.0.31-b0de047210.patch
vendored
Normal file
26
.yarn/patches/@ai-sdk-google-npm-2.0.31-b0de047210.patch
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
diff --git a/dist/index.js b/dist/index.js
|
||||
index ff305b112779b718f21a636a27b1196125a332d9..cf32ff5086d4d9e56f8fe90c98724559083bafc3 100644
|
||||
--- a/dist/index.js
|
||||
+++ b/dist/index.js
|
||||
@@ -471,7 +471,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
|
||||
|
||||
// src/get-model-path.ts
|
||||
function getModelPath(modelId) {
|
||||
- return modelId.includes("/") ? modelId : `models/${modelId}`;
|
||||
+ return modelId.includes("models/") ? modelId : `models/${modelId}`;
|
||||
}
|
||||
|
||||
// src/google-generative-ai-options.ts
|
||||
diff --git a/dist/index.mjs b/dist/index.mjs
|
||||
index 57659290f1cec74878a385626ad75b2a4d5cd3fc..d04e5927ec3725b6ffdb80868bfa1b5a48849537 100644
|
||||
--- a/dist/index.mjs
|
||||
+++ b/dist/index.mjs
|
||||
@@ -477,7 +477,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
|
||||
|
||||
// src/get-model-path.ts
|
||||
function getModelPath(modelId) {
|
||||
- return modelId.includes("/") ? modelId : `models/${modelId}`;
|
||||
+ return modelId.includes("models/") ? modelId : `models/${modelId}`;
|
||||
}
|
||||
|
||||
// src/google-generative-ai-options.ts
|
||||
74
.yarn/patches/@ai-sdk-openai-npm-2.0.64-48f99f5bf3.patch
vendored
Normal file
74
.yarn/patches/@ai-sdk-openai-npm-2.0.64-48f99f5bf3.patch
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
diff --git a/dist/index.js b/dist/index.js
|
||||
index 992c85ac6656e51c3471af741583533c5a7bf79f..83c05952a07aebb95fc6c62f9ddb8aa96b52ac0d 100644
|
||||
--- a/dist/index.js
|
||||
+++ b/dist/index.js
|
||||
@@ -274,6 +274,7 @@ var openaiChatResponseSchema = (0, import_provider_utils3.lazyValidator)(
|
||||
message: import_v42.z.object({
|
||||
role: import_v42.z.literal("assistant").nullish(),
|
||||
content: import_v42.z.string().nullish(),
|
||||
+ reasoning_content: import_v42.z.string().nullish(),
|
||||
tool_calls: import_v42.z.array(
|
||||
import_v42.z.object({
|
||||
id: import_v42.z.string().nullish(),
|
||||
@@ -340,6 +341,7 @@ var openaiChatChunkSchema = (0, import_provider_utils3.lazyValidator)(
|
||||
delta: import_v42.z.object({
|
||||
role: import_v42.z.enum(["assistant"]).nullish(),
|
||||
content: import_v42.z.string().nullish(),
|
||||
+ reasoning_content: import_v42.z.string().nullish(),
|
||||
tool_calls: import_v42.z.array(
|
||||
import_v42.z.object({
|
||||
index: import_v42.z.number(),
|
||||
@@ -785,6 +787,13 @@ var OpenAIChatLanguageModel = class {
|
||||
if (text != null && text.length > 0) {
|
||||
content.push({ type: "text", text });
|
||||
}
|
||||
+ const reasoning = choice.message.reasoning_content;
|
||||
+ if (reasoning != null && reasoning.length > 0) {
|
||||
+ content.push({
|
||||
+ type: 'reasoning',
|
||||
+ text: reasoning
|
||||
+ });
|
||||
+ }
|
||||
for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
|
||||
content.push({
|
||||
type: "tool-call",
|
||||
@@ -866,6 +875,7 @@ var OpenAIChatLanguageModel = class {
|
||||
};
|
||||
let metadataExtracted = false;
|
||||
let isActiveText = false;
|
||||
+ let isActiveReasoning = false;
|
||||
const providerMetadata = { openai: {} };
|
||||
return {
|
||||
stream: response.pipeThrough(
|
||||
@@ -923,6 +933,21 @@ var OpenAIChatLanguageModel = class {
|
||||
return;
|
||||
}
|
||||
const delta = choice.delta;
|
||||
+ const reasoningContent = delta.reasoning_content;
|
||||
+ if (reasoningContent) {
|
||||
+ if (!isActiveReasoning) {
|
||||
+ controller.enqueue({
|
||||
+ type: 'reasoning-start',
|
||||
+ id: 'reasoning-0',
|
||||
+ });
|
||||
+ isActiveReasoning = true;
|
||||
+ }
|
||||
+ controller.enqueue({
|
||||
+ type: 'reasoning-delta',
|
||||
+ id: 'reasoning-0',
|
||||
+ delta: reasoningContent,
|
||||
+ });
|
||||
+ }
|
||||
if (delta.content != null) {
|
||||
if (!isActiveText) {
|
||||
controller.enqueue({ type: "text-start", id: "0" });
|
||||
@@ -1035,6 +1060,9 @@ var OpenAIChatLanguageModel = class {
|
||||
}
|
||||
},
|
||||
flush(controller) {
|
||||
+ if (isActiveReasoning) {
|
||||
+ controller.enqueue({ type: 'reasoning-end', id: 'reasoning-0' });
|
||||
+ }
|
||||
if (isActiveText) {
|
||||
controller.enqueue({ type: "text-end", id: "0" });
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
diff --git a/sdk.mjs b/sdk.mjs
|
||||
index 10162e5b1624f8ce667768943347a6e41089ad2f..32568ae08946590e382270c88d85fba81187568e 100755
|
||||
index 8cc6aaf0b25bcdf3c579ec95cde12d419fcb2a71..3b3b8beaea5ad2bbac26a15f792058306d0b059f 100755
|
||||
--- a/sdk.mjs
|
||||
+++ b/sdk.mjs
|
||||
@@ -6213,7 +6213,7 @@ function createAbortController(maxListeners = DEFAULT_MAX_LISTENERS) {
|
||||
@@ -11,7 +11,7 @@ index 10162e5b1624f8ce667768943347a6e41089ad2f..32568ae08946590e382270c88d85fba8
|
||||
import { createInterface } from "readline";
|
||||
|
||||
// ../src/utils/fsOperations.ts
|
||||
@@ -6487,14 +6487,11 @@ class ProcessTransport {
|
||||
@@ -6505,14 +6505,11 @@ class ProcessTransport {
|
||||
const errorMessage = isNativeBinary(pathToClaudeCodeExecutable) ? `Claude Code native binary not found at ${pathToClaudeCodeExecutable}. Please ensure Claude Code is installed via native installer or specify a valid path with options.pathToClaudeCodeExecutable.` : `Claude Code executable not found at ${pathToClaudeCodeExecutable}. Is options.pathToClaudeCodeExecutable set?`;
|
||||
throw new ReferenceError(errorMessage);
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
diff --git a/dist/utils/tiktoken.cjs b/dist/utils/tiktoken.cjs
|
||||
index 973b0d0e75aeaf8de579419af31b879b32975413..f23c7caa8b9dc8bd404132725346a4786f6b278b 100644
|
||||
--- a/dist/utils/tiktoken.cjs
|
||||
+++ b/dist/utils/tiktoken.cjs
|
||||
@@ -1,25 +1,14 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.encodingForModel = exports.getEncoding = void 0;
|
||||
-const lite_1 = require("js-tiktoken/lite");
|
||||
const async_caller_js_1 = require("./async_caller.cjs");
|
||||
const cache = {};
|
||||
const caller = /* #__PURE__ */ new async_caller_js_1.AsyncCaller({});
|
||||
async function getEncoding(encoding) {
|
||||
- if (!(encoding in cache)) {
|
||||
- cache[encoding] = caller
|
||||
- .fetch(`https://tiktoken.pages.dev/js/${encoding}.json`)
|
||||
- .then((res) => res.json())
|
||||
- .then((data) => new lite_1.Tiktoken(data))
|
||||
- .catch((e) => {
|
||||
- delete cache[encoding];
|
||||
- throw e;
|
||||
- });
|
||||
- }
|
||||
- return await cache[encoding];
|
||||
+ throw new Error("TikToken Not implemented");
|
||||
}
|
||||
exports.getEncoding = getEncoding;
|
||||
async function encodingForModel(model) {
|
||||
- return getEncoding((0, lite_1.getEncodingNameForModel)(model));
|
||||
+ throw new Error("TikToken Not implemented");
|
||||
}
|
||||
exports.encodingForModel = encodingForModel;
|
||||
diff --git a/dist/utils/tiktoken.js b/dist/utils/tiktoken.js
|
||||
index 8e41ee6f00f2f9c7fa2c59fa2b2f4297634b97aa..aa5f314a6349ad0d1c5aea8631a56aad099176e0 100644
|
||||
--- a/dist/utils/tiktoken.js
|
||||
+++ b/dist/utils/tiktoken.js
|
||||
@@ -1,20 +1,9 @@
|
||||
-import { Tiktoken, getEncodingNameForModel, } from "js-tiktoken/lite";
|
||||
import { AsyncCaller } from "./async_caller.js";
|
||||
const cache = {};
|
||||
const caller = /* #__PURE__ */ new AsyncCaller({});
|
||||
export async function getEncoding(encoding) {
|
||||
- if (!(encoding in cache)) {
|
||||
- cache[encoding] = caller
|
||||
- .fetch(`https://tiktoken.pages.dev/js/${encoding}.json`)
|
||||
- .then((res) => res.json())
|
||||
- .then((data) => new Tiktoken(data))
|
||||
- .catch((e) => {
|
||||
- delete cache[encoding];
|
||||
- throw e;
|
||||
- });
|
||||
- }
|
||||
- return await cache[encoding];
|
||||
+ throw new Error("TikToken Not implemented");
|
||||
}
|
||||
export async function encodingForModel(model) {
|
||||
- return getEncoding(getEncodingNameForModel(model));
|
||||
+ throw new Error("TikToken Not implemented");
|
||||
}
|
||||
diff --git a/package.json b/package.json
|
||||
index 36072aecf700fca1bc49832a19be832eca726103..90b8922fba1c3d1b26f78477c891b07816d6238a 100644
|
||||
--- a/package.json
|
||||
+++ b/package.json
|
||||
@@ -37,7 +37,6 @@
|
||||
"ansi-styles": "^5.0.0",
|
||||
"camelcase": "6",
|
||||
"decamelize": "1.2.0",
|
||||
- "js-tiktoken": "^1.0.12",
|
||||
"langsmith": ">=0.2.8 <0.4.0",
|
||||
"mustache": "^4.2.0",
|
||||
"p-queue": "^6.6.2",
|
||||
68
.yarn/patches/@langchain-core-npm-1.0.2-183ef83fe4.patch
vendored
Normal file
68
.yarn/patches/@langchain-core-npm-1.0.2-183ef83fe4.patch
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
diff --git a/dist/utils/tiktoken.cjs b/dist/utils/tiktoken.cjs
|
||||
index c5b41f121d2e3d24c3a4969e31fa1acffdcad3b9..ec724489dcae79ee6c61acf2d4d84bd19daef036 100644
|
||||
--- a/dist/utils/tiktoken.cjs
|
||||
+++ b/dist/utils/tiktoken.cjs
|
||||
@@ -1,6 +1,5 @@
|
||||
const require_rolldown_runtime = require('../_virtual/rolldown_runtime.cjs');
|
||||
const require_utils_async_caller = require('./async_caller.cjs');
|
||||
-const js_tiktoken_lite = require_rolldown_runtime.__toESM(require("js-tiktoken/lite"));
|
||||
|
||||
//#region src/utils/tiktoken.ts
|
||||
var tiktoken_exports = {};
|
||||
@@ -11,14 +10,10 @@ require_rolldown_runtime.__export(tiktoken_exports, {
|
||||
const cache = {};
|
||||
const caller = /* @__PURE__ */ new require_utils_async_caller.AsyncCaller({});
|
||||
async function getEncoding(encoding) {
|
||||
- if (!(encoding in cache)) cache[encoding] = caller.fetch(`https://tiktoken.pages.dev/js/${encoding}.json`).then((res) => res.json()).then((data) => new js_tiktoken_lite.Tiktoken(data)).catch((e) => {
|
||||
- delete cache[encoding];
|
||||
- throw e;
|
||||
- });
|
||||
- return await cache[encoding];
|
||||
+ throw new Error("TikToken Not implemented");
|
||||
}
|
||||
async function encodingForModel(model) {
|
||||
- return getEncoding((0, js_tiktoken_lite.getEncodingNameForModel)(model));
|
||||
+ throw new Error("TikToken Not implemented");
|
||||
}
|
||||
|
||||
//#endregion
|
||||
diff --git a/dist/utils/tiktoken.js b/dist/utils/tiktoken.js
|
||||
index 641acca03cb92f04a6fa5c9c31f1880ce635572e..707389970ad957aa0ff20ef37fa8dd2875be737c 100644
|
||||
--- a/dist/utils/tiktoken.js
|
||||
+++ b/dist/utils/tiktoken.js
|
||||
@@ -1,6 +1,5 @@
|
||||
import { __export } from "../_virtual/rolldown_runtime.js";
|
||||
import { AsyncCaller } from "./async_caller.js";
|
||||
-import { Tiktoken, getEncodingNameForModel } from "js-tiktoken/lite";
|
||||
|
||||
//#region src/utils/tiktoken.ts
|
||||
var tiktoken_exports = {};
|
||||
@@ -11,14 +10,10 @@ __export(tiktoken_exports, {
|
||||
const cache = {};
|
||||
const caller = /* @__PURE__ */ new AsyncCaller({});
|
||||
async function getEncoding(encoding) {
|
||||
- if (!(encoding in cache)) cache[encoding] = caller.fetch(`https://tiktoken.pages.dev/js/${encoding}.json`).then((res) => res.json()).then((data) => new Tiktoken(data)).catch((e) => {
|
||||
- delete cache[encoding];
|
||||
- throw e;
|
||||
- });
|
||||
- return await cache[encoding];
|
||||
+ throw new Error("TikToken Not implemented");
|
||||
}
|
||||
async function encodingForModel(model) {
|
||||
- return getEncoding(getEncodingNameForModel(model));
|
||||
+ throw new Error("TikToken Not implemented");
|
||||
}
|
||||
|
||||
//#endregion
|
||||
diff --git a/package.json b/package.json
|
||||
index a24f8fc61de58526051999260f2ebee5f136354b..e885359e8966e7730c51772533ce37e01edb3046 100644
|
||||
--- a/package.json
|
||||
+++ b/package.json
|
||||
@@ -20,7 +20,6 @@
|
||||
"ansi-styles": "^5.0.0",
|
||||
"camelcase": "6",
|
||||
"decamelize": "1.2.0",
|
||||
- "js-tiktoken": "^1.0.12",
|
||||
"langsmith": "^0.3.64",
|
||||
"mustache": "^4.2.0",
|
||||
"p-queue": "^6.6.2",
|
||||
@@ -1,19 +0,0 @@
|
||||
diff --git a/dist/embeddings.js b/dist/embeddings.js
|
||||
index 1f8154be3e9c22442a915eb4b85fa6d2a21b0d0c..dc13ef4a30e6c282824a5357bcee9bd0ae222aab 100644
|
||||
--- a/dist/embeddings.js
|
||||
+++ b/dist/embeddings.js
|
||||
@@ -214,10 +214,12 @@ export class OpenAIEmbeddings extends Embeddings {
|
||||
* @returns Promise that resolves to an embedding for the document.
|
||||
*/
|
||||
async embedQuery(text) {
|
||||
+ const isBaiduCloud = this.clientConfig.baseURL.includes('baidubce.com')
|
||||
+ const input = this.stripNewLines ? text.replace(/\n/g, ' ') : text
|
||||
const params = {
|
||||
model: this.model,
|
||||
- input: this.stripNewLines ? text.replace(/\n/g, " ") : text,
|
||||
- };
|
||||
+ input: isBaiduCloud ? [input] : input
|
||||
+ }
|
||||
if (this.dimensions) {
|
||||
params.dimensions = this.dimensions;
|
||||
}
|
||||
17
.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch
vendored
Normal file
17
.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
diff --git a/dist/embeddings.js b/dist/embeddings.js
|
||||
index 6f4b928d3e4717309382e1b5c2e31ab5bc6c5af0..bc79429c88a6d27d4997a2740c4d8ae0707f5991 100644
|
||||
--- a/dist/embeddings.js
|
||||
+++ b/dist/embeddings.js
|
||||
@@ -94,9 +94,11 @@ var OpenAIEmbeddings = class extends Embeddings {
|
||||
* @returns Promise that resolves to an embedding for the document.
|
||||
*/
|
||||
async embedQuery(text) {
|
||||
+ const isBaiduCloud = this.clientConfig.baseURL.includes('baidubce.com');
|
||||
+ const input = this.stripNewLines ? text.replace(/\n/g, " ") : text
|
||||
const params = {
|
||||
model: this.model,
|
||||
- input: this.stripNewLines ? text.replace(/\n/g, " ") : text
|
||||
+ input: isBaiduCloud ? [input] : input
|
||||
};
|
||||
if (this.dimensions) params.dimensions = this.dimensions;
|
||||
if (this.encodingFormat) params.encoding_format = this.encodingFormat;
|
||||
@@ -7,12 +7,10 @@ This file provides guidance to AI coding assistants when working with code in th
|
||||
- **Keep it clear**: Write code that is easy to read, maintain, and explain.
|
||||
- **Match the house style**: Reuse existing patterns, naming, and conventions.
|
||||
- **Search smart**: Prefer `ast-grep` for semantic queries; fall back to `rg`/`grep` when needed.
|
||||
- **Build with HeroUI**: Use HeroUI for every new UI component; never add `antd` or `styled-components`.
|
||||
- **Log centrally**: Route all logging through `loggerService` with the right context—no `console.log`.
|
||||
- **Research via subagent**: Lean on `subagent` for external docs, APIs, news, and references.
|
||||
- **Always propose before executing**: Before making any changes, clearly explain your planned approach and wait for explicit user approval to ensure alignment and prevent unwanted modifications.
|
||||
- **Write conventional commits with emoji**: Commit small, focused changes using emoji-prefixed Conventional Commit messages (e.g., `✨ feat:`, `🐛 fix:`, `♻️ refactor:`, `
|
||||
📝 docs:`).
|
||||
- **Write conventional commits**: Commit small, focused changes using Conventional Commit messages (e.g., `feat:`, `fix:`, `refactor:`, `docs:`).
|
||||
|
||||
## Development Commands
|
||||
|
||||
@@ -41,7 +39,6 @@ This file provides guidance to AI coding assistants when working with code in th
|
||||
- **Services** (`src/main/services/`): MCPService, KnowledgeService, WindowService, etc.
|
||||
- **Build System**: Electron-Vite with experimental rolldown-vite, yarn workspaces.
|
||||
- **State Management**: Redux Toolkit (`src/renderer/src/store/`) for predictable state.
|
||||
- **UI Components**: HeroUI (`@heroui/*`) for all new UI elements.
|
||||
|
||||
### Logging
|
||||
```typescript
|
||||
|
||||
@@ -77,7 +77,7 @@ Please review the following critical information before submitting your Pull Req
|
||||
Our core team is currently focused on significant architectural updates that involve these data structures. To ensure stability and focus during this period, contributions of this nature will be temporarily managed internally.
|
||||
|
||||
* **PRs that require changes to Redux state shape or IndexedDB schemas will be closed.**
|
||||
* **This restriction is temporary and will be lifted with the release of `v2.0.0`.** You can track the progress of `v2.0.0` and its related discussions on issue [#10162](https://github.com/YOUR_ORG/YOUR_REPO/issues/10162) (please replace with your actual repo link).
|
||||
* **This restriction is temporary and will be lifted with the release of `v2.0.0`.** You can track the progress of `v2.0.0` and its related discussions on issue [#10162](https://github.com/CherryHQ/cherry-studio/pull/10162).
|
||||
|
||||
We highly encourage contributions for:
|
||||
* Bug fixes 🐞
|
||||
|
||||
12
README.md
12
README.md
@@ -82,7 +82,7 @@ Cherry Studio is a desktop client that supports multiple LLM providers, availabl
|
||||
1. **Diverse LLM Provider Support**:
|
||||
|
||||
- ☁️ Major LLM Cloud Services: OpenAI, Gemini, Anthropic, and more
|
||||
- 🔗 AI Web Service Integration: Claude, Perplexity, Poe, and others
|
||||
- 🔗 AI Web Service Integration: Claude, Perplexity, [Poe](https://poe.com/), and others
|
||||
- 💻 Local Model Support with Ollama, LM Studio
|
||||
|
||||
2. **AI Assistants & Conversations**:
|
||||
@@ -238,10 +238,6 @@ The Enterprise Edition addresses core challenges in team collaboration by centra
|
||||
|
||||
## ✨ Online Demo
|
||||
|
||||
> 🚧 **Public Beta Notice**
|
||||
>
|
||||
> The Enterprise Edition is currently in its early public beta stage, and we are actively iterating and optimizing its features. We are aware that it may not be perfectly stable yet. If you encounter any issues or have valuable suggestions during your trial, we would be very grateful if you could contact us via email to provide feedback.
|
||||
|
||||
**🔗 [Cherry Studio Enterprise](https://www.cherry-ai.com/enterprise)**
|
||||
|
||||
## Version Comparison
|
||||
@@ -249,7 +245,7 @@ The Enterprise Edition addresses core challenges in team collaboration by centra
|
||||
| Feature | Community Edition | Enterprise Edition |
|
||||
| :---------------- | :----------------------------------------- | :-------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Open Source** | ✅ Yes | ⭕️ Partially released to customers |
|
||||
| **Cost** | Free for Personal Use / Commercial License | Buyout / Subscription Fee |
|
||||
| **Cost** | [AGPL-3.0 License](https://github.com/CherryHQ/cherry-studio?tab=AGPL-3.0-1-ov-file) | Buyout / Subscription Fee |
|
||||
| **Admin Backend** | — | ● Centralized **Model** Access<br>● **Employee** Management<br>● Shared **Knowledge Base**<br>● **Access** Control<br>● **Data** Backup |
|
||||
| **Server** | — | ✅ Dedicated Private Deployment |
|
||||
|
||||
@@ -262,8 +258,12 @@ We believe the Enterprise Edition will become your team's AI productivity engine
|
||||
|
||||
# 🔗 Related Projects
|
||||
|
||||
- [new-api](https://github.com/QuantumNous/new-api): The next-generation LLM gateway and AI asset management system supports multiple languages.
|
||||
|
||||
- [one-api](https://github.com/songquanpeng/one-api): LLM API management and distribution system supporting mainstream models like OpenAI, Azure, and Anthropic. Features a unified API interface, suitable for key management and secondary distribution.
|
||||
|
||||
- [Poe](https://poe.com/): Poe gives you access to the best AI, all in one place. Explore GPT-5, Claude Opus 4.1, DeepSeek-R1, Veo 3, ElevenLabs, and millions of others.
|
||||
|
||||
- [ublacklist](https://github.com/iorate/ublacklist): Blocks specific sites from appearing in Google search results
|
||||
|
||||
# 🚀 Contributors
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
},
|
||||
"files": {
|
||||
"ignoreUnknown": false,
|
||||
"includes": ["**"],
|
||||
"includes": ["**", "!**/.claude/**"],
|
||||
"maxSize": 2097152
|
||||
},
|
||||
"formatter": {
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
{
|
||||
"$schema": "https://ui.shadcn.com/schema.json",
|
||||
"aliases": {
|
||||
"components": "@renderer/ui/third-party",
|
||||
"hooks": "@renderer/hooks",
|
||||
"lib": "@renderer/lib",
|
||||
"ui": "@renderer/ui",
|
||||
"utils": "@renderer/utils"
|
||||
},
|
||||
"iconLibrary": "lucide",
|
||||
"rsc": false,
|
||||
"style": "new-york",
|
||||
"tailwind": {
|
||||
"baseColor": "zinc",
|
||||
"config": "",
|
||||
"css": "src/renderer/src/assets/styles/tailwind.css",
|
||||
"cssVariables": true,
|
||||
"prefix": ""
|
||||
},
|
||||
"tsx": true
|
||||
}
|
||||
@@ -81,7 +81,7 @@ git commit --signoff -m "Your commit message"
|
||||
我们的核心团队目前正专注于涉及这些数据结构的关键架构更新和基础工作。为确保在此期间的稳定性与专注,此类贡献将暂时由内部进行管理。
|
||||
|
||||
* **需要更改 Redux 状态结构或 IndexedDB schema 的 PR 将会被关闭。**
|
||||
* **此限制是临时性的,并将在 `v2.0.0` 版本发布后解除。** 您可以通过 Issue [#10162](https://github.com/YOUR_ORG/YOUR_REPO/issues/10162) (请替换为您的实际仓库链接) 跟踪 `v2.0.0` 的进展及相关讨论。
|
||||
* **此限制是临时性的,并将在 `v2.0.0` 版本发布后解除。** 您可以通过 Issue [#10162](https://github.com/CherryHQ/cherry-studio/pull/10162) 跟踪 `v2.0.0` 的进展及相关讨论。
|
||||
|
||||
我们非常鼓励以下类型的贡献:
|
||||
* 错误修复 🐞
|
||||
|
||||
@@ -18,13 +18,13 @@ yarn
|
||||
|
||||
### Setup Node.js
|
||||
|
||||
Download and install [Node.js v20.x.x](https://nodejs.org/en/download)
|
||||
Download and install [Node.js v22.x.x](https://nodejs.org/en/download)
|
||||
|
||||
### Setup Yarn
|
||||
|
||||
```bash
|
||||
corepack enable
|
||||
corepack prepare yarn@4.6.0 --activate
|
||||
corepack prepare yarn@4.9.1 --activate
|
||||
```
|
||||
|
||||
### Install Dependencies
|
||||
|
||||
@@ -11,6 +11,8 @@ The Test Plan is divided into the RC channel and the Beta channel, with the foll
|
||||
|
||||
Users can enable the "Test Plan" and select the version channel in the software's `Settings` > `About`. Please note that the versions in the "Test Plan" cannot guarantee data consistency, so be sure to back up your data before using them.
|
||||
|
||||
After enabling the RC channel or Beta channel, if a stable version is released, users will still be upgraded to the stable version.
|
||||
|
||||
Users are welcome to submit issues or provide feedback through other channels for any bugs encountered during testing. Your feedback is very important to us.
|
||||
|
||||
## Developer Guide
|
||||
|
||||
@@ -11,6 +11,8 @@
|
||||
|
||||
用户可以在软件的`设置`-`关于`中,开启“测试计划”并选择版本通道。请注意“测试计划”的版本无法保证数据的一致性,请使用前一定要备份数据。
|
||||
|
||||
用户选择RC版通道或Beta版通道后,若发布了正式版,仍旧会升级到正式版。
|
||||
|
||||
用户在测试过程中发现的BUG,欢迎提交issue或通过其他渠道反馈。用户的反馈对我们非常重要。
|
||||
|
||||
## 开发者指南
|
||||
|
||||
@@ -21,6 +21,8 @@ files:
|
||||
- "**/*"
|
||||
- "!**/{.vscode,.yarn,.yarn-lock,.github,.cursorrules,.prettierrc}"
|
||||
- "!electron.vite.config.{js,ts,mjs,cjs}}"
|
||||
- "!.*"
|
||||
- "!components.json"
|
||||
- "!**/{.eslintignore,.eslintrc.js,.eslintrc.json,.eslintcache,root.eslint.config.js,eslint.config.js,.eslintrc.cjs,.prettierignore,.prettierrc.yaml,eslint.config.mjs,dev-app-update.yml,CHANGELOG.md,README.md,biome.jsonc}"
|
||||
- "!**/{.env,.env.*,.npmrc,pnpm-lock.yaml}"
|
||||
- "!**/{tsconfig.json,tsconfig.tsbuildinfo,tsconfig.node.json,tsconfig.web.json}"
|
||||
@@ -133,60 +135,42 @@ artifactBuildCompleted: scripts/artifact-build-completed.js
|
||||
releaseInfo:
|
||||
releaseNotes: |
|
||||
<!--LANG:en-->
|
||||
What's New in v1.7.0-beta.2
|
||||
What's New in v1.7.0-beta.6
|
||||
|
||||
New Features:
|
||||
- Session Settings: Manage session-specific settings and model configurations independently
|
||||
- Notes Full-Text Search: Search across all notes with match highlighting
|
||||
- Built-in DiDi MCP Server: Integration with DiDi ride-hailing services (China only)
|
||||
- Intel OV OCR: Hardware-accelerated OCR using Intel NPU
|
||||
- Auto-start API Server: Automatically starts when agents exist
|
||||
- Enhanced Input Bar: Completely redesigned input bar with improved responsiveness and functionality
|
||||
- Better File Handling: Improved drag-and-drop and paste support for images and documents
|
||||
- Smart Tool Suggestions: Enhanced quick panel with better item selection and keyboard shortcuts
|
||||
|
||||
Improvements:
|
||||
- Agent model selection now requires explicit user choice
|
||||
- Added Mistral AI provider support
|
||||
- Added NewAPI generic provider support
|
||||
- Improved navbar layout consistency across different modes
|
||||
- Enhanced chat component responsiveness
|
||||
- Better code block display on small screens
|
||||
- Updated OVMS to 2025.3 official release
|
||||
- Added Greek language support
|
||||
- Smoother Input Experience: Better auto-resizing and text handling in chat input
|
||||
- Enhanced AI Performance: Improved connection stability and response speed
|
||||
- More Reliable File Uploads: Better support for various file types and upload scenarios
|
||||
- Cleaner Interface: Optimized UI elements for better visual consistency
|
||||
|
||||
Bug Fixes:
|
||||
- Fixed GitHub Copilot gpt-5-codex streaming issues
|
||||
- Fixed assistant creation failures
|
||||
- Fixed translate auto-copy functionality
|
||||
- Fixed miniapps external link opening
|
||||
- Fixed message layout and overflow issues
|
||||
- Fixed API key parsing to preserve spaces
|
||||
- Fixed agent display in different navbar layouts
|
||||
- Fixed image selection issue when adding custom AI providers
|
||||
- Fixed file upload problems with certain API configurations
|
||||
- Fixed input bar responsiveness issues
|
||||
- Fixed quick panel not working properly in some situations
|
||||
|
||||
<!--LANG:zh-CN-->
|
||||
v1.7.0-beta.2 新特性
|
||||
v1.7.0-beta.6 新特性
|
||||
|
||||
新功能:
|
||||
- 会话设置:独立管理会话特定的设置和模型配置
|
||||
- 笔记全文搜索:跨所有笔记搜索并高亮匹配内容
|
||||
- 内置滴滴 MCP 服务器:集成滴滴打车服务(仅限中国地区)
|
||||
- Intel OV OCR:使用 Intel NPU 的硬件加速 OCR
|
||||
- 自动启动 API 服务器:当存在 Agent 时自动启动
|
||||
- 增强输入栏:完全重新设计的输入栏,响应更灵敏,功能更强大
|
||||
- 更好的文件处理:改进的拖拽和粘贴功能,支持图片和文档
|
||||
- 智能工具建议:增强的快速面板,更好的项目选择和键盘快捷键
|
||||
|
||||
改进:
|
||||
- Agent 模型选择现在需要用户显式选择
|
||||
- 添加 Mistral AI 提供商支持
|
||||
- 添加 NewAPI 通用提供商支持
|
||||
- 改进不同模式下的导航栏布局一致性
|
||||
- 增强聊天组件响应式设计
|
||||
- 优化小屏幕代码块显示
|
||||
- 更新 OVMS 至 2025.3 正式版
|
||||
- 添加希腊语支持
|
||||
- 更流畅的输入体验:聊天输入框的自动调整和文本处理更佳
|
||||
- 增强 AI 性能:改进连接稳定性和响应速度
|
||||
- 更可靠的文件上传:更好地支持各种文件类型和上传场景
|
||||
- 更简洁的界面:优化 UI 元素,视觉一致性更好
|
||||
|
||||
问题修复:
|
||||
- 修复 GitHub Copilot gpt-5-codex 流式传输问题
|
||||
- 修复助手创建失败
|
||||
- 修复翻译自动复制功能
|
||||
- 修复小程序外部链接打开
|
||||
- 修复消息布局和溢出问题
|
||||
- 修复 API 密钥解析以保留空格
|
||||
- 修复不同导航栏布局中的 Agent 显示
|
||||
- 修复添加自定义 AI 提供商时的图片选择问题
|
||||
- 修复某些 API 配置下的文件上传问题
|
||||
- 修复输入栏响应性问题
|
||||
- 修复快速面板在某些情况下无法正常工作的问题
|
||||
<!--LANG:END-->
|
||||
|
||||
@@ -95,7 +95,8 @@ export default defineConfig({
|
||||
'@cherrystudio/ai-core/provider': resolve('packages/aiCore/src/core/providers'),
|
||||
'@cherrystudio/ai-core/built-in/plugins': resolve('packages/aiCore/src/core/plugins/built-in'),
|
||||
'@cherrystudio/ai-core': resolve('packages/aiCore/src'),
|
||||
'@cherrystudio/extension-table-plus': resolve('packages/extension-table-plus/src')
|
||||
'@cherrystudio/extension-table-plus': resolve('packages/extension-table-plus/src'),
|
||||
'@cherrystudio/ai-sdk-provider': resolve('packages/ai-sdk-provider/src')
|
||||
}
|
||||
},
|
||||
optimizeDeps: {
|
||||
|
||||
50
package.json
50
package.json
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "CherryStudio",
|
||||
"version": "1.7.0-beta.2",
|
||||
"version": "1.7.0-beta.3",
|
||||
"private": true,
|
||||
"description": "A powerful AI assistant for producer.",
|
||||
"main": "./out/main/index.js",
|
||||
@@ -78,10 +78,11 @@
|
||||
"release:aicore": "yarn workspace @cherrystudio/ai-core version patch --immediate && yarn workspace @cherrystudio/ai-core npm publish --access public"
|
||||
},
|
||||
"dependencies": {
|
||||
"@anthropic-ai/claude-agent-sdk": "patch:@anthropic-ai/claude-agent-sdk@npm%3A0.1.25#~/.yarn/patches/@anthropic-ai-claude-agent-sdk-npm-0.1.25-08bbabb5d3.patch",
|
||||
"@anthropic-ai/claude-agent-sdk": "patch:@anthropic-ai/claude-agent-sdk@npm%3A0.1.30#~/.yarn/patches/@anthropic-ai-claude-agent-sdk-npm-0.1.30-b50a299674.patch",
|
||||
"@libsql/client": "0.14.0",
|
||||
"@libsql/win32-x64-msvc": "^0.4.7",
|
||||
"@napi-rs/system-ocr": "patch:@napi-rs/system-ocr@npm%3A1.0.2#~/.yarn/patches/@napi-rs-system-ocr-npm-1.0.2-59e7a78e8b.patch",
|
||||
"@paymoapp/electron-shutdown-handler": "^1.1.2",
|
||||
"@strongtz/win32-arm64-msvc": "^0.4.7",
|
||||
"express": "^5.1.0",
|
||||
"font-list": "^2.0.0",
|
||||
@@ -92,8 +93,10 @@
|
||||
"node-stream-zip": "^1.15.0",
|
||||
"officeparser": "^4.2.0",
|
||||
"os-proxy-config": "^1.1.2",
|
||||
"qrcode.react": "^4.2.0",
|
||||
"selection-hook": "^1.0.12",
|
||||
"sharp": "^0.34.3",
|
||||
"socket.io": "^4.8.1",
|
||||
"swagger-jsdoc": "^6.2.8",
|
||||
"swagger-ui-express": "^5.0.1",
|
||||
"tesseract.js": "patch:tesseract.js@npm%3A6.0.1#~/.yarn/patches/tesseract.js-npm-6.0.1-2562a7e46d.patch",
|
||||
@@ -103,17 +106,19 @@
|
||||
"@agentic/exa": "^7.3.3",
|
||||
"@agentic/searxng": "^7.3.3",
|
||||
"@agentic/tavily": "^7.3.3",
|
||||
"@ai-sdk/amazon-bedrock": "^3.0.35",
|
||||
"@ai-sdk/google-vertex": "^3.0.40",
|
||||
"@ai-sdk/huggingface": "patch:@ai-sdk/huggingface@npm%3A0.0.4#~/.yarn/patches/@ai-sdk-huggingface-npm-0.0.4-8080836bc1.patch",
|
||||
"@ai-sdk/mistral": "^2.0.19",
|
||||
"@ai-sdk/perplexity": "^2.0.13",
|
||||
"@ai-sdk/amazon-bedrock": "^3.0.53",
|
||||
"@ai-sdk/cerebras": "^1.0.31",
|
||||
"@ai-sdk/gateway": "^2.0.9",
|
||||
"@ai-sdk/google-vertex": "^3.0.62",
|
||||
"@ai-sdk/huggingface": "patch:@ai-sdk/huggingface@npm%3A0.0.8#~/.yarn/patches/@ai-sdk-huggingface-npm-0.0.8-d4d0aaac93.patch",
|
||||
"@ai-sdk/mistral": "^2.0.23",
|
||||
"@ai-sdk/perplexity": "^2.0.17",
|
||||
"@ant-design/v5-patch-for-react-19": "^1.0.3",
|
||||
"@anthropic-ai/sdk": "^0.41.0",
|
||||
"@anthropic-ai/vertex-sdk": "patch:@anthropic-ai/vertex-sdk@npm%3A0.11.4#~/.yarn/patches/@anthropic-ai-vertex-sdk-npm-0.11.4-c19cb41edb.patch",
|
||||
"@aws-sdk/client-bedrock": "^3.840.0",
|
||||
"@aws-sdk/client-bedrock-runtime": "^3.840.0",
|
||||
"@aws-sdk/client-s3": "^3.840.0",
|
||||
"@aws-sdk/client-bedrock": "^3.910.0",
|
||||
"@aws-sdk/client-bedrock-runtime": "^3.910.0",
|
||||
"@aws-sdk/client-s3": "^3.910.0",
|
||||
"@biomejs/biome": "2.2.4",
|
||||
"@cherrystudio/ai-core": "workspace:^1.0.0-alpha.18",
|
||||
"@cherrystudio/embedjs": "^0.1.31",
|
||||
@@ -144,9 +149,10 @@
|
||||
"@eslint/js": "^9.22.0",
|
||||
"@google/genai": "patch:@google/genai@npm%3A1.0.1#~/.yarn/patches/@google-genai-npm-1.0.1-e26f0f9af7.patch",
|
||||
"@hello-pangea/dnd": "^18.0.1",
|
||||
"@heroui/react": "^2.8.3",
|
||||
"@kangfenmao/keyv-storage": "^0.1.0",
|
||||
"@langchain/community": "^0.3.50",
|
||||
"@langchain/community": "^1.0.0",
|
||||
"@langchain/core": "patch:@langchain/core@npm%3A1.0.2#~/.yarn/patches/@langchain-core-npm-1.0.2-183ef83fe4.patch",
|
||||
"@langchain/openai": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
||||
"@mistralai/mistralai": "^1.7.5",
|
||||
"@modelcontextprotocol/sdk": "^1.17.5",
|
||||
"@mozilla/readability": "^0.6.0",
|
||||
@@ -227,7 +233,7 @@
|
||||
"@viz-js/lang-dot": "^1.0.5",
|
||||
"@viz-js/viz": "^3.14.0",
|
||||
"@xyflow/react": "^12.4.4",
|
||||
"ai": "^5.0.68",
|
||||
"ai": "^5.0.90",
|
||||
"antd": "patch:antd@npm%3A5.27.0#~/.yarn/patches/antd-npm-5.27.0-aa91c36546.patch",
|
||||
"archiver": "^7.0.1",
|
||||
"async-mutex": "^0.5.0",
|
||||
@@ -237,7 +243,7 @@
|
||||
"check-disk-space": "3.4.0",
|
||||
"cheerio": "^1.1.2",
|
||||
"chokidar": "^4.0.3",
|
||||
"claude-code-plugins": "1.0.1",
|
||||
"claude-code-plugins": "1.0.3",
|
||||
"cli-progress": "^3.12.0",
|
||||
"clsx": "^2.1.1",
|
||||
"code-inspector-plugin": "^0.20.14",
|
||||
@@ -344,6 +350,7 @@
|
||||
"striptags": "^3.2.0",
|
||||
"styled-components": "^6.1.11",
|
||||
"swr": "^2.3.6",
|
||||
"tailwind-merge": "^3.3.1",
|
||||
"tailwindcss": "^4.1.13",
|
||||
"tar": "^7.4.3",
|
||||
"tiny-pinyin": "^1.3.2",
|
||||
@@ -369,12 +376,11 @@
|
||||
"zod": "^4.1.5"
|
||||
},
|
||||
"resolutions": {
|
||||
"@smithy/types": "4.7.1",
|
||||
"@codemirror/language": "6.11.3",
|
||||
"@codemirror/lint": "6.8.5",
|
||||
"@codemirror/view": "6.38.1",
|
||||
"@langchain/core@npm:^0.3.26": "patch:@langchain/core@npm%3A0.3.44#~/.yarn/patches/@langchain-core-npm-0.3.44-41d5c3cb0a.patch",
|
||||
"@langchain/openai@npm:^0.3.16": "patch:@langchain/openai@npm%3A0.3.16#~/.yarn/patches/@langchain-openai-npm-0.3.16-e525b59526.patch",
|
||||
"@langchain/openai@npm:>=0.1.0 <0.4.0": "patch:@langchain/openai@npm%3A0.3.16#~/.yarn/patches/@langchain-openai-npm-0.3.16-e525b59526.patch",
|
||||
"@langchain/core@npm:^0.3.26": "patch:@langchain/core@npm%3A1.0.2#~/.yarn/patches/@langchain-core-npm-1.0.2-183ef83fe4.patch",
|
||||
"app-builder-lib@npm:26.0.13": "patch:app-builder-lib@npm%3A26.0.13#~/.yarn/patches/app-builder-lib-npm-26.0.13-a064c9e1d0.patch",
|
||||
"app-builder-lib@npm:26.0.15": "patch:app-builder-lib@npm%3A26.0.15#~/.yarn/patches/app-builder-lib-npm-26.0.15-360e5b0476.patch",
|
||||
"atomically@npm:^1.7.0": "patch:atomically@npm%3A1.7.0#~/.yarn/patches/atomically-npm-1.7.0-e742e5293b.patch",
|
||||
@@ -390,14 +396,20 @@
|
||||
"undici": "6.21.2",
|
||||
"vite": "npm:rolldown-vite@7.1.5",
|
||||
"tesseract.js@npm:*": "patch:tesseract.js@npm%3A6.0.1#~/.yarn/patches/tesseract.js-npm-6.0.1-2562a7e46d.patch",
|
||||
"@ai-sdk/google@npm:2.0.20": "patch:@ai-sdk/google@npm%3A2.0.20#~/.yarn/patches/@ai-sdk-google-npm-2.0.20-b9102f9d54.patch",
|
||||
"@ai-sdk/openai@npm:^2.0.52": "patch:@ai-sdk/openai@npm%3A2.0.52#~/.yarn/patches/@ai-sdk-openai-npm-2.0.52-b36d949c76.patch",
|
||||
"@img/sharp-darwin-arm64": "0.34.3",
|
||||
"@img/sharp-darwin-x64": "0.34.3",
|
||||
"@img/sharp-linux-arm": "0.34.3",
|
||||
"@img/sharp-linux-arm64": "0.34.3",
|
||||
"@img/sharp-linux-x64": "0.34.3",
|
||||
"@img/sharp-win32-x64": "0.34.3",
|
||||
"openai@npm:5.12.2": "npm:@cherrystudio/openai@6.5.0"
|
||||
"openai@npm:5.12.2": "npm:@cherrystudio/openai@6.5.0",
|
||||
"@langchain/openai@npm:>=0.1.0 <0.6.0": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
||||
"@langchain/openai@npm:^0.3.16": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
||||
"@langchain/openai@npm:>=0.2.0 <0.7.0": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
||||
"@ai-sdk/openai@npm:2.0.64": "patch:@ai-sdk/openai@npm%3A2.0.64#~/.yarn/patches/@ai-sdk-openai-npm-2.0.64-48f99f5bf3.patch",
|
||||
"@ai-sdk/openai@npm:^2.0.42": "patch:@ai-sdk/openai@npm%3A2.0.64#~/.yarn/patches/@ai-sdk-openai-npm-2.0.64-48f99f5bf3.patch",
|
||||
"@ai-sdk/google@npm:2.0.31": "patch:@ai-sdk/google@npm%3A2.0.31#~/.yarn/patches/@ai-sdk-google-npm-2.0.31-b0de047210.patch"
|
||||
},
|
||||
"packageManager": "yarn@4.9.1",
|
||||
"lint-staged": {
|
||||
|
||||
39
packages/ai-sdk-provider/README.md
Normal file
39
packages/ai-sdk-provider/README.md
Normal file
@@ -0,0 +1,39 @@
|
||||
# @cherrystudio/ai-sdk-provider
|
||||
|
||||
CherryIN provider bundle for the [Vercel AI SDK](https://ai-sdk.dev/).
|
||||
It exposes the CherryIN OpenAI-compatible entrypoints and dynamically routes Anthropic and Gemini model ids to their CherryIN upstream equivalents.
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
npm install ai @cherrystudio/ai-sdk-provider @ai-sdk/anthropic @ai-sdk/google @ai-sdk/openai
|
||||
# or
|
||||
yarn add ai @cherrystudio/ai-sdk-provider @ai-sdk/anthropic @ai-sdk/google @ai-sdk/openai
|
||||
```
|
||||
|
||||
> **Note**: This package requires peer dependencies `ai`, `@ai-sdk/anthropic`, `@ai-sdk/google`, and `@ai-sdk/openai` to be installed.
|
||||
|
||||
## Usage
|
||||
|
||||
```ts
|
||||
import { createCherryIn, cherryIn } from '@cherrystudio/ai-sdk-provider'
|
||||
|
||||
const cherryInProvider = createCherryIn({
|
||||
apiKey: process.env.CHERRYIN_API_KEY,
|
||||
// optional overrides:
|
||||
// baseURL: 'https://open.cherryin.net/v1',
|
||||
// anthropicBaseURL: 'https://open.cherryin.net/anthropic',
|
||||
// geminiBaseURL: 'https://open.cherryin.net/gemini/v1beta',
|
||||
})
|
||||
|
||||
// Chat models will auto-route based on the model id prefix:
|
||||
const openaiModel = cherryInProvider.chat('gpt-4o-mini')
|
||||
const anthropicModel = cherryInProvider.chat('claude-3-5-sonnet-latest')
|
||||
const geminiModel = cherryInProvider.chat('gemini-2.0-pro-exp')
|
||||
|
||||
const { text } = await openaiModel.invoke('Hello CherryIN!')
|
||||
```
|
||||
|
||||
The provider also exposes `completion`, `responses`, `embedding`, `image`, `transcription`, and `speech` helpers aligned with the upstream APIs.
|
||||
|
||||
See [AI SDK docs](https://ai-sdk.dev/providers/community-providers/custom-providers) for configuring custom providers.
|
||||
64
packages/ai-sdk-provider/package.json
Normal file
64
packages/ai-sdk-provider/package.json
Normal file
@@ -0,0 +1,64 @@
|
||||
{
|
||||
"name": "@cherrystudio/ai-sdk-provider",
|
||||
"version": "0.1.0",
|
||||
"description": "Cherry Studio AI SDK provider bundle with CherryIN routing.",
|
||||
"keywords": [
|
||||
"ai-sdk",
|
||||
"provider",
|
||||
"cherryin",
|
||||
"vercel-ai-sdk",
|
||||
"cherry-studio"
|
||||
],
|
||||
"author": "Cherry Studio",
|
||||
"license": "MIT",
|
||||
"homepage": "https://github.com/CherryHQ/cherry-studio",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/CherryHQ/cherry-studio.git",
|
||||
"directory": "packages/ai-sdk-provider"
|
||||
},
|
||||
"bugs": {
|
||||
"url": "https://github.com/CherryHQ/cherry-studio/issues"
|
||||
},
|
||||
"type": "module",
|
||||
"main": "dist/index.cjs",
|
||||
"module": "dist/index.js",
|
||||
"types": "dist/index.d.ts",
|
||||
"files": [
|
||||
"dist"
|
||||
],
|
||||
"scripts": {
|
||||
"build": "tsdown",
|
||||
"dev": "tsc -w",
|
||||
"clean": "rm -rf dist",
|
||||
"test": "vitest run",
|
||||
"test:watch": "vitest"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@ai-sdk/anthropic": "^2.0.29",
|
||||
"@ai-sdk/google": "^2.0.23",
|
||||
"@ai-sdk/openai": "^2.0.64",
|
||||
"ai": "^5.0.26"
|
||||
},
|
||||
"dependencies": {
|
||||
"@ai-sdk/provider": "^2.0.0",
|
||||
"@ai-sdk/provider-utils": "^3.0.12"
|
||||
},
|
||||
"devDependencies": {
|
||||
"tsdown": "^0.13.3",
|
||||
"typescript": "^5.8.2",
|
||||
"vitest": "^3.2.4"
|
||||
},
|
||||
"sideEffects": false,
|
||||
"engines": {
|
||||
"node": ">=18.0.0"
|
||||
},
|
||||
"exports": {
|
||||
".": {
|
||||
"types": "./dist/index.d.ts",
|
||||
"import": "./dist/index.js",
|
||||
"require": "./dist/index.cjs",
|
||||
"default": "./dist/index.js"
|
||||
}
|
||||
}
|
||||
}
|
||||
319
packages/ai-sdk-provider/src/cherryin-provider.ts
Normal file
319
packages/ai-sdk-provider/src/cherryin-provider.ts
Normal file
@@ -0,0 +1,319 @@
|
||||
import { AnthropicMessagesLanguageModel } from '@ai-sdk/anthropic/internal'
|
||||
import { GoogleGenerativeAILanguageModel } from '@ai-sdk/google/internal'
|
||||
import type { OpenAIProviderSettings } from '@ai-sdk/openai'
|
||||
import {
|
||||
OpenAIChatLanguageModel,
|
||||
OpenAICompletionLanguageModel,
|
||||
OpenAIEmbeddingModel,
|
||||
OpenAIImageModel,
|
||||
OpenAIResponsesLanguageModel,
|
||||
OpenAISpeechModel,
|
||||
OpenAITranscriptionModel
|
||||
} from '@ai-sdk/openai/internal'
|
||||
import {
|
||||
type EmbeddingModelV2,
|
||||
type ImageModelV2,
|
||||
type LanguageModelV2,
|
||||
type ProviderV2,
|
||||
type SpeechModelV2,
|
||||
type TranscriptionModelV2
|
||||
} from '@ai-sdk/provider'
|
||||
import type { FetchFunction } from '@ai-sdk/provider-utils'
|
||||
import { loadApiKey, withoutTrailingSlash } from '@ai-sdk/provider-utils'
|
||||
|
||||
export const CHERRYIN_PROVIDER_NAME = 'cherryin' as const
|
||||
export const DEFAULT_CHERRYIN_BASE_URL = 'https://open.cherryin.net/v1'
|
||||
export const DEFAULT_CHERRYIN_ANTHROPIC_BASE_URL = 'https://open.cherryin.net/v1'
|
||||
export const DEFAULT_CHERRYIN_GEMINI_BASE_URL = 'https://open.cherryin.net/v1beta/models'
|
||||
|
||||
const ANTHROPIC_PREFIX = /^anthropic\//i
|
||||
const GEMINI_PREFIX = /^google\//i
|
||||
// const GEMINI_EXCLUDED_SUFFIXES = ['-nothink', '-search']
|
||||
|
||||
type HeaderValue = string | undefined
|
||||
|
||||
type HeadersInput = Record<string, HeaderValue> | (() => Record<string, HeaderValue>)
|
||||
|
||||
export interface CherryInProviderSettings {
|
||||
/**
|
||||
* CherryIN API key.
|
||||
*
|
||||
* If omitted, the provider will read the `CHERRYIN_API_KEY` environment variable.
|
||||
*/
|
||||
apiKey?: string
|
||||
/**
|
||||
* Optional custom fetch implementation.
|
||||
*/
|
||||
fetch?: FetchFunction
|
||||
/**
|
||||
* Base URL for OpenAI-compatible CherryIN endpoints.
|
||||
*
|
||||
* Defaults to `https://open.cherryin.net/v1`.
|
||||
*/
|
||||
baseURL?: string
|
||||
/**
|
||||
* Base URL for Anthropic-compatible endpoints.
|
||||
*
|
||||
* Defaults to `https://open.cherryin.net/anthropic`.
|
||||
*/
|
||||
anthropicBaseURL?: string
|
||||
/**
|
||||
* Base URL for Gemini-compatible endpoints.
|
||||
*
|
||||
* Defaults to `https://open.cherryin.net/gemini/v1beta`.
|
||||
*/
|
||||
geminiBaseURL?: string
|
||||
/**
|
||||
* Optional static headers applied to every request.
|
||||
*/
|
||||
headers?: HeadersInput
|
||||
}
|
||||
|
||||
export interface CherryInProvider extends ProviderV2 {
|
||||
(modelId: string, settings?: OpenAIProviderSettings): LanguageModelV2
|
||||
languageModel(modelId: string, settings?: OpenAIProviderSettings): LanguageModelV2
|
||||
chat(modelId: string, settings?: OpenAIProviderSettings): LanguageModelV2
|
||||
responses(modelId: string): LanguageModelV2
|
||||
completion(modelId: string, settings?: OpenAIProviderSettings): LanguageModelV2
|
||||
embedding(modelId: string, settings?: OpenAIProviderSettings): EmbeddingModelV2<string>
|
||||
textEmbedding(modelId: string, settings?: OpenAIProviderSettings): EmbeddingModelV2<string>
|
||||
textEmbeddingModel(modelId: string, settings?: OpenAIProviderSettings): EmbeddingModelV2<string>
|
||||
image(modelId: string, settings?: OpenAIProviderSettings): ImageModelV2
|
||||
imageModel(modelId: string, settings?: OpenAIProviderSettings): ImageModelV2
|
||||
transcription(modelId: string): TranscriptionModelV2
|
||||
transcriptionModel(modelId: string): TranscriptionModelV2
|
||||
speech(modelId: string): SpeechModelV2
|
||||
speechModel(modelId: string): SpeechModelV2
|
||||
}
|
||||
|
||||
const resolveApiKey = (options: CherryInProviderSettings): string =>
|
||||
loadApiKey({
|
||||
apiKey: options.apiKey,
|
||||
environmentVariableName: 'CHERRYIN_API_KEY',
|
||||
description: 'CherryIN'
|
||||
})
|
||||
|
||||
const isAnthropicModel = (modelId: string) => ANTHROPIC_PREFIX.test(modelId)
|
||||
const isGeminiModel = (modelId: string) => GEMINI_PREFIX.test(modelId)
|
||||
|
||||
const createCustomFetch = (originalFetch?: any) => {
|
||||
return async (url: string, options: any) => {
|
||||
if (options?.body) {
|
||||
try {
|
||||
const body = JSON.parse(options.body)
|
||||
if (body.tools && Array.isArray(body.tools) && body.tools.length === 0 && body.tool_choice) {
|
||||
delete body.tool_choice
|
||||
options.body = JSON.stringify(body)
|
||||
}
|
||||
} catch (error) {
|
||||
// ignore error
|
||||
}
|
||||
}
|
||||
|
||||
return originalFetch ? originalFetch(url, options) : fetch(url, options)
|
||||
}
|
||||
}
|
||||
class CherryInOpenAIChatLanguageModel extends OpenAIChatLanguageModel {
|
||||
constructor(modelId: string, settings: any) {
|
||||
super(modelId, {
|
||||
...settings,
|
||||
fetch: createCustomFetch(settings.fetch)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const resolveConfiguredHeaders = (headers?: HeadersInput): Record<string, HeaderValue> => {
|
||||
if (typeof headers === 'function') {
|
||||
return { ...headers() }
|
||||
}
|
||||
return headers ? { ...headers } : {}
|
||||
}
|
||||
|
||||
const toBearerToken = (authorization?: string) => (authorization ? authorization.replace(/^Bearer\s+/i, '') : undefined)
|
||||
|
||||
const createJsonHeadersGetter = (options: CherryInProviderSettings): (() => Record<string, HeaderValue>) => {
|
||||
return () => ({
|
||||
Authorization: `Bearer ${resolveApiKey(options)}`,
|
||||
'Content-Type': 'application/json',
|
||||
...resolveConfiguredHeaders(options.headers)
|
||||
})
|
||||
}
|
||||
|
||||
const createAuthHeadersGetter = (options: CherryInProviderSettings): (() => Record<string, HeaderValue>) => {
|
||||
return () => ({
|
||||
Authorization: `Bearer ${resolveApiKey(options)}`,
|
||||
...resolveConfiguredHeaders(options.headers)
|
||||
})
|
||||
}
|
||||
|
||||
export const createCherryIn = (options: CherryInProviderSettings = {}): CherryInProvider => {
|
||||
const {
|
||||
baseURL = DEFAULT_CHERRYIN_BASE_URL,
|
||||
anthropicBaseURL = DEFAULT_CHERRYIN_ANTHROPIC_BASE_URL,
|
||||
geminiBaseURL = DEFAULT_CHERRYIN_GEMINI_BASE_URL,
|
||||
fetch
|
||||
} = options
|
||||
|
||||
const getJsonHeaders = createJsonHeadersGetter(options)
|
||||
const getAuthHeaders = createAuthHeadersGetter(options)
|
||||
|
||||
const url = ({ path }: { path: string; modelId: string }) => `${withoutTrailingSlash(baseURL)}${path}`
|
||||
|
||||
const createAnthropicModel = (modelId: string) =>
|
||||
new AnthropicMessagesLanguageModel(modelId, {
|
||||
provider: `${CHERRYIN_PROVIDER_NAME}.anthropic`,
|
||||
baseURL: anthropicBaseURL,
|
||||
headers: () => {
|
||||
const headers = getJsonHeaders()
|
||||
const apiKey = toBearerToken(headers.Authorization)
|
||||
return {
|
||||
...headers,
|
||||
'x-api-key': apiKey
|
||||
}
|
||||
},
|
||||
fetch,
|
||||
supportedUrls: () => ({
|
||||
'image/*': [/^https?:\/\/.*$/]
|
||||
})
|
||||
})
|
||||
|
||||
const createGeminiModel = (modelId: string) =>
|
||||
new GoogleGenerativeAILanguageModel(modelId, {
|
||||
provider: `${CHERRYIN_PROVIDER_NAME}.google`,
|
||||
baseURL: geminiBaseURL,
|
||||
headers: () => {
|
||||
const headers = getJsonHeaders()
|
||||
const apiKey = toBearerToken(headers.Authorization)
|
||||
return {
|
||||
...headers,
|
||||
'x-goog-api-key': apiKey
|
||||
}
|
||||
},
|
||||
fetch,
|
||||
generateId: () => `${CHERRYIN_PROVIDER_NAME}-${Date.now()}`,
|
||||
supportedUrls: () => ({})
|
||||
})
|
||||
|
||||
const createOpenAIChatModel = (modelId: string, settings: OpenAIProviderSettings = {}) =>
|
||||
new CherryInOpenAIChatLanguageModel(modelId, {
|
||||
provider: `${CHERRYIN_PROVIDER_NAME}.openai-chat`,
|
||||
url,
|
||||
headers: () => ({
|
||||
...getJsonHeaders(),
|
||||
...settings.headers
|
||||
}),
|
||||
fetch
|
||||
})
|
||||
|
||||
const createChatModel = (modelId: string, settings: OpenAIProviderSettings = {}) => {
|
||||
if (isAnthropicModel(modelId)) {
|
||||
return createAnthropicModel(modelId)
|
||||
}
|
||||
if (isGeminiModel(modelId)) {
|
||||
return createGeminiModel(modelId)
|
||||
}
|
||||
return new OpenAIResponsesLanguageModel(modelId, {
|
||||
provider: `${CHERRYIN_PROVIDER_NAME}.openai`,
|
||||
url,
|
||||
headers: () => ({
|
||||
...getJsonHeaders(),
|
||||
...settings.headers
|
||||
}),
|
||||
fetch
|
||||
})
|
||||
}
|
||||
|
||||
const createCompletionModel = (modelId: string, settings: OpenAIProviderSettings = {}) =>
|
||||
new OpenAICompletionLanguageModel(modelId, {
|
||||
provider: `${CHERRYIN_PROVIDER_NAME}.completion`,
|
||||
url,
|
||||
headers: () => ({
|
||||
...getJsonHeaders(),
|
||||
...settings.headers
|
||||
}),
|
||||
fetch
|
||||
})
|
||||
|
||||
const createEmbeddingModel = (modelId: string, settings: OpenAIProviderSettings = {}) =>
|
||||
new OpenAIEmbeddingModel(modelId, {
|
||||
provider: `${CHERRYIN_PROVIDER_NAME}.embeddings`,
|
||||
url,
|
||||
headers: () => ({
|
||||
...getJsonHeaders(),
|
||||
...settings.headers
|
||||
}),
|
||||
fetch
|
||||
})
|
||||
|
||||
const createResponsesModel = (modelId: string) =>
|
||||
new OpenAIResponsesLanguageModel(modelId, {
|
||||
provider: `${CHERRYIN_PROVIDER_NAME}.responses`,
|
||||
url,
|
||||
headers: () => ({
|
||||
...getJsonHeaders()
|
||||
}),
|
||||
fetch
|
||||
})
|
||||
|
||||
const createImageModel = (modelId: string, settings: OpenAIProviderSettings = {}) =>
|
||||
new OpenAIImageModel(modelId, {
|
||||
provider: `${CHERRYIN_PROVIDER_NAME}.image`,
|
||||
url,
|
||||
headers: () => ({
|
||||
...getJsonHeaders(),
|
||||
...settings.headers
|
||||
}),
|
||||
fetch
|
||||
})
|
||||
|
||||
const createTranscriptionModel = (modelId: string) =>
|
||||
new OpenAITranscriptionModel(modelId, {
|
||||
provider: `${CHERRYIN_PROVIDER_NAME}.transcription`,
|
||||
url,
|
||||
headers: () => ({
|
||||
...getAuthHeaders()
|
||||
}),
|
||||
fetch
|
||||
})
|
||||
|
||||
const createSpeechModel = (modelId: string) =>
|
||||
new OpenAISpeechModel(modelId, {
|
||||
provider: `${CHERRYIN_PROVIDER_NAME}.speech`,
|
||||
url,
|
||||
headers: () => ({
|
||||
...getJsonHeaders()
|
||||
}),
|
||||
fetch
|
||||
})
|
||||
|
||||
const provider: CherryInProvider = function (modelId: string, settings?: OpenAIProviderSettings) {
|
||||
if (new.target) {
|
||||
throw new Error('CherryIN provider function cannot be called with the new keyword.')
|
||||
}
|
||||
|
||||
return createChatModel(modelId, settings)
|
||||
}
|
||||
|
||||
provider.languageModel = createChatModel
|
||||
provider.chat = createOpenAIChatModel
|
||||
|
||||
provider.responses = createResponsesModel
|
||||
provider.completion = createCompletionModel
|
||||
|
||||
provider.embedding = createEmbeddingModel
|
||||
provider.textEmbedding = createEmbeddingModel
|
||||
provider.textEmbeddingModel = createEmbeddingModel
|
||||
|
||||
provider.image = createImageModel
|
||||
provider.imageModel = createImageModel
|
||||
|
||||
provider.transcription = createTranscriptionModel
|
||||
provider.transcriptionModel = createTranscriptionModel
|
||||
|
||||
provider.speech = createSpeechModel
|
||||
provider.speechModel = createSpeechModel
|
||||
|
||||
return provider
|
||||
}
|
||||
|
||||
export const cherryIn = createCherryIn()
|
||||
1
packages/ai-sdk-provider/src/index.ts
Normal file
1
packages/ai-sdk-provider/src/index.ts
Normal file
@@ -0,0 +1 @@
|
||||
export * from './cherryin-provider'
|
||||
19
packages/ai-sdk-provider/tsconfig.json
Normal file
19
packages/ai-sdk-provider/tsconfig.json
Normal file
@@ -0,0 +1,19 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"allowSyntheticDefaultImports": true,
|
||||
"declaration": true,
|
||||
"esModuleInterop": true,
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"module": "ESNext",
|
||||
"moduleResolution": "bundler",
|
||||
"noEmitOnError": false,
|
||||
"outDir": "./dist",
|
||||
"resolveJsonModule": true,
|
||||
"rootDir": "./src",
|
||||
"skipLibCheck": true,
|
||||
"strict": true,
|
||||
"target": "ES2020"
|
||||
},
|
||||
"exclude": ["node_modules", "dist"],
|
||||
"include": ["src/**/*"]
|
||||
}
|
||||
12
packages/ai-sdk-provider/tsdown.config.ts
Normal file
12
packages/ai-sdk-provider/tsdown.config.ts
Normal file
@@ -0,0 +1,12 @@
|
||||
import { defineConfig } from 'tsdown'
|
||||
|
||||
export default defineConfig({
|
||||
entry: {
|
||||
index: 'src/index.ts'
|
||||
},
|
||||
outDir: 'dist',
|
||||
format: ['esm', 'cjs'],
|
||||
clean: true,
|
||||
dts: true,
|
||||
tsconfig: 'tsconfig.json'
|
||||
})
|
||||
@@ -36,14 +36,16 @@
|
||||
"ai": "^5.0.26"
|
||||
},
|
||||
"dependencies": {
|
||||
"@ai-sdk/anthropic": "^2.0.27",
|
||||
"@ai-sdk/azure": "^2.0.49",
|
||||
"@ai-sdk/deepseek": "^1.0.23",
|
||||
"@ai-sdk/openai": "^2.0.48",
|
||||
"@ai-sdk/openai-compatible": "^1.0.22",
|
||||
"@ai-sdk/anthropic": "^2.0.43",
|
||||
"@ai-sdk/azure": "^2.0.66",
|
||||
"@ai-sdk/deepseek": "^1.0.27",
|
||||
"@ai-sdk/google": "patch:@ai-sdk/google@npm%3A2.0.31#~/.yarn/patches/@ai-sdk-google-npm-2.0.31-b0de047210.patch",
|
||||
"@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.64#~/.yarn/patches/@ai-sdk-openai-npm-2.0.64-48f99f5bf3.patch",
|
||||
"@ai-sdk/openai-compatible": "^1.0.26",
|
||||
"@ai-sdk/provider": "^2.0.0",
|
||||
"@ai-sdk/provider-utils": "^3.0.12",
|
||||
"@ai-sdk/xai": "^2.0.26",
|
||||
"@ai-sdk/provider-utils": "^3.0.16",
|
||||
"@ai-sdk/xai": "^2.0.31",
|
||||
"@cherrystudio/ai-sdk-provider": "workspace:*",
|
||||
"zod": "^4.1.5"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
* 中间件管理器
|
||||
* 专注于 AI SDK 中间件的管理,与插件系统分离
|
||||
*/
|
||||
import { LanguageModelV2Middleware } from '@ai-sdk/provider'
|
||||
import type { LanguageModelV2Middleware } from '@ai-sdk/provider'
|
||||
|
||||
/**
|
||||
* 创建中间件列表
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/**
|
||||
* 中间件系统类型定义
|
||||
*/
|
||||
import { LanguageModelV2Middleware } from '@ai-sdk/provider'
|
||||
import type { LanguageModelV2Middleware } from '@ai-sdk/provider'
|
||||
|
||||
/**
|
||||
* 具名中间件接口
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
* 模型包装工具函数
|
||||
* 用于将中间件应用到LanguageModel上
|
||||
*/
|
||||
import { LanguageModelV2, LanguageModelV2Middleware } from '@ai-sdk/provider'
|
||||
import type { LanguageModelV2, LanguageModelV2Middleware } from '@ai-sdk/provider'
|
||||
import { wrapLanguageModel } from 'ai'
|
||||
|
||||
/**
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
* 集成了来自 ModelCreator 的特殊处理逻辑
|
||||
*/
|
||||
|
||||
import { EmbeddingModelV2, ImageModelV2, LanguageModelV2, LanguageModelV2Middleware } from '@ai-sdk/provider'
|
||||
import type { EmbeddingModelV2, ImageModelV2, LanguageModelV2, LanguageModelV2Middleware } from '@ai-sdk/provider'
|
||||
|
||||
import { wrapModelWithMiddlewares } from '../middleware/wrapper'
|
||||
import { DEFAULT_SEPARATOR, globalRegistryManagement } from '../providers/RegistryManagement'
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/**
|
||||
* Creation 模块类型定义
|
||||
*/
|
||||
import { LanguageModelV2Middleware } from '@ai-sdk/provider'
|
||||
import type { LanguageModelV2Middleware } from '@ai-sdk/provider'
|
||||
|
||||
import type { ProviderId, ProviderSettingsMap } from '../providers/types'
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { ExtractProviderOptions, ProviderOptionsMap, TypedProviderOptions } from './types'
|
||||
import type { ExtractProviderOptions, ProviderOptionsMap, TypedProviderOptions } from './types'
|
||||
|
||||
/**
|
||||
* 创建特定供应商的选项
|
||||
|
||||
@@ -10,7 +10,7 @@ import type { AiRequestContext } from '../../types'
|
||||
import { StreamEventManager } from './StreamEventManager'
|
||||
import { type TagConfig, TagExtractor } from './tagExtraction'
|
||||
import { ToolExecutor } from './ToolExecutor'
|
||||
import { PromptToolUseConfig, ToolUseResult } from './type'
|
||||
import type { PromptToolUseConfig, ToolUseResult } from './type'
|
||||
|
||||
/**
|
||||
* 工具使用标签配置
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { ToolSet } from 'ai'
|
||||
import type { ToolSet } from 'ai'
|
||||
|
||||
import { AiRequestContext } from '../..'
|
||||
import type { AiRequestContext } from '../..'
|
||||
|
||||
/**
|
||||
* 解析结果类型
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
import { anthropic } from '@ai-sdk/anthropic'
|
||||
import { google } from '@ai-sdk/google'
|
||||
import { openai } from '@ai-sdk/openai'
|
||||
import { InferToolInput, InferToolOutput, type Tool } from 'ai'
|
||||
import type { InferToolInput, InferToolOutput } from 'ai'
|
||||
import { type Tool } from 'ai'
|
||||
|
||||
import { ProviderOptionsMap } from '../../../options/types'
|
||||
import { OpenRouterSearchConfig } from './openrouter'
|
||||
import { createOpenRouterOptions, createXaiOptions, mergeProviderOptions } from '../../../options'
|
||||
import type { ProviderOptionsMap } from '../../../options/types'
|
||||
import type { OpenRouterSearchConfig } from './openrouter'
|
||||
|
||||
/**
|
||||
* 从 AI SDK 的工具函数中提取参数类型,以确保类型安全。
|
||||
@@ -94,3 +96,56 @@ export type WebSearchToolInputSchema = {
|
||||
google: InferToolInput<GoogleWebSearchTool>
|
||||
'openai-chat': InferToolInput<OpenAIChatWebSearchTool>
|
||||
}
|
||||
|
||||
export const switchWebSearchTool = (providerId: string, config: WebSearchPluginConfig, params: any) => {
|
||||
switch (providerId) {
|
||||
case 'openai': {
|
||||
if (config.openai) {
|
||||
if (!params.tools) params.tools = {}
|
||||
params.tools.web_search = openai.tools.webSearch(config.openai)
|
||||
}
|
||||
break
|
||||
}
|
||||
case 'openai-chat': {
|
||||
if (config['openai-chat']) {
|
||||
if (!params.tools) params.tools = {}
|
||||
params.tools.web_search_preview = openai.tools.webSearchPreview(config['openai-chat'])
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
case 'anthropic': {
|
||||
if (config.anthropic) {
|
||||
if (!params.tools) params.tools = {}
|
||||
params.tools.web_search = anthropic.tools.webSearch_20250305(config.anthropic)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
case 'google': {
|
||||
// case 'google-vertex':
|
||||
if (!params.tools) params.tools = {}
|
||||
params.tools.web_search = google.tools.googleSearch(config.google || {})
|
||||
break
|
||||
}
|
||||
|
||||
case 'xai': {
|
||||
if (config.xai) {
|
||||
const searchOptions = createXaiOptions({
|
||||
searchParameters: { ...config.xai, mode: 'on' }
|
||||
})
|
||||
params.providerOptions = mergeProviderOptions(params.providerOptions, searchOptions)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
case 'openrouter': {
|
||||
if (config.openrouter) {
|
||||
const searchOptions = createOpenRouterOptions(config.openrouter)
|
||||
params.providerOptions = mergeProviderOptions(params.providerOptions, searchOptions)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
return params
|
||||
}
|
||||
|
||||
@@ -2,14 +2,11 @@
|
||||
* Web Search Plugin
|
||||
* 提供统一的网络搜索能力,支持多个 AI Provider
|
||||
*/
|
||||
import { anthropic } from '@ai-sdk/anthropic'
|
||||
import { google } from '@ai-sdk/google'
|
||||
import { openai } from '@ai-sdk/openai'
|
||||
|
||||
import { createOpenRouterOptions, createXaiOptions, mergeProviderOptions } from '../../../options'
|
||||
import { definePlugin } from '../../'
|
||||
import type { AiRequestContext } from '../../types'
|
||||
import { DEFAULT_WEB_SEARCH_CONFIG, WebSearchPluginConfig } from './helper'
|
||||
import type { WebSearchPluginConfig } from './helper'
|
||||
import { DEFAULT_WEB_SEARCH_CONFIG, switchWebSearchTool } from './helper'
|
||||
|
||||
/**
|
||||
* 网络搜索插件
|
||||
@@ -23,56 +20,13 @@ export const webSearchPlugin = (config: WebSearchPluginConfig = DEFAULT_WEB_SEAR
|
||||
|
||||
transformParams: async (params: any, context: AiRequestContext) => {
|
||||
const { providerId } = context
|
||||
switch (providerId) {
|
||||
case 'openai': {
|
||||
if (config.openai) {
|
||||
if (!params.tools) params.tools = {}
|
||||
params.tools.web_search = openai.tools.webSearch(config.openai)
|
||||
}
|
||||
break
|
||||
}
|
||||
case 'openai-chat': {
|
||||
if (config['openai-chat']) {
|
||||
if (!params.tools) params.tools = {}
|
||||
params.tools.web_search_preview = openai.tools.webSearchPreview(config['openai-chat'])
|
||||
}
|
||||
break
|
||||
}
|
||||
switchWebSearchTool(providerId, config, params)
|
||||
|
||||
case 'anthropic': {
|
||||
if (config.anthropic) {
|
||||
if (!params.tools) params.tools = {}
|
||||
params.tools.web_search = anthropic.tools.webSearch_20250305(config.anthropic)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
case 'google': {
|
||||
// case 'google-vertex':
|
||||
if (!params.tools) params.tools = {}
|
||||
params.tools.web_search = google.tools.googleSearch(config.google || {})
|
||||
break
|
||||
}
|
||||
|
||||
case 'xai': {
|
||||
if (config.xai) {
|
||||
const searchOptions = createXaiOptions({
|
||||
searchParameters: { ...config.xai, mode: 'on' }
|
||||
})
|
||||
params.providerOptions = mergeProviderOptions(params.providerOptions, searchOptions)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
case 'openrouter': {
|
||||
if (config.openrouter) {
|
||||
const searchOptions = createOpenRouterOptions(config.openrouter)
|
||||
params.providerOptions = mergeProviderOptions(params.providerOptions, searchOptions)
|
||||
}
|
||||
break
|
||||
}
|
||||
if (providerId === 'cherryin' || providerId === 'cherryin-chat') {
|
||||
// cherryin.gemini
|
||||
const _providerId = params.model.provider.split('.')[1]
|
||||
switchWebSearchTool(_providerId, config, params)
|
||||
}
|
||||
|
||||
return params
|
||||
}
|
||||
})
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { AiPlugin, AiRequestContext } from './types'
|
||||
import type { AiPlugin, AiRequestContext } from './types'
|
||||
|
||||
/**
|
||||
* 插件管理器
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
* 例如: aihubmix:anthropic:claude-3.5-sonnet
|
||||
*/
|
||||
|
||||
import { ProviderV2 } from '@ai-sdk/provider'
|
||||
import type { ProviderV2 } from '@ai-sdk/provider'
|
||||
import { customProvider } from 'ai'
|
||||
|
||||
import { globalRegistryManagement } from './RegistryManagement'
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
* 基于 AI SDK 原生的 createProviderRegistry
|
||||
*/
|
||||
|
||||
import { EmbeddingModelV2, ImageModelV2, LanguageModelV2, ProviderV2 } from '@ai-sdk/provider'
|
||||
import type { EmbeddingModelV2, ImageModelV2, LanguageModelV2, ProviderV2 } from '@ai-sdk/provider'
|
||||
import { createProviderRegistry, type ProviderRegistryProvider } from 'ai'
|
||||
|
||||
type PROVIDERS = Record<string, ProviderV2>
|
||||
|
||||
@@ -10,10 +10,12 @@ import { createGoogleGenerativeAI } from '@ai-sdk/google'
|
||||
import { createHuggingFace } from '@ai-sdk/huggingface'
|
||||
import { createOpenAI, type OpenAIProviderSettings } from '@ai-sdk/openai'
|
||||
import { createOpenAICompatible } from '@ai-sdk/openai-compatible'
|
||||
import { LanguageModelV2 } from '@ai-sdk/provider'
|
||||
import type { LanguageModelV2 } from '@ai-sdk/provider'
|
||||
import { createXai } from '@ai-sdk/xai'
|
||||
import { type CherryInProviderSettings, createCherryIn } from '@cherrystudio/ai-sdk-provider'
|
||||
import { createOpenRouter } from '@openrouter/ai-sdk-provider'
|
||||
import { customProvider, Provider } from 'ai'
|
||||
import type { Provider } from 'ai'
|
||||
import { customProvider } from 'ai'
|
||||
import * as z from 'zod'
|
||||
|
||||
/**
|
||||
@@ -30,6 +32,8 @@ export const baseProviderIds = [
|
||||
'azure-responses',
|
||||
'deepseek',
|
||||
'openrouter',
|
||||
'cherryin',
|
||||
'cherryin-chat',
|
||||
'huggingface'
|
||||
] as const
|
||||
|
||||
@@ -135,6 +139,26 @@ export const baseProviders = [
|
||||
creator: createOpenRouter,
|
||||
supportsImageGeneration: true
|
||||
},
|
||||
{
|
||||
id: 'cherryin',
|
||||
name: 'CherryIN',
|
||||
creator: createCherryIn,
|
||||
supportsImageGeneration: true
|
||||
},
|
||||
{
|
||||
id: 'cherryin-chat',
|
||||
name: 'CherryIN Chat',
|
||||
creator: (options: CherryInProviderSettings) => {
|
||||
const provider = createCherryIn(options)
|
||||
return customProvider({
|
||||
fallbackProvider: {
|
||||
...provider,
|
||||
languageModel: (modelId: string) => provider.chat(modelId)
|
||||
}
|
||||
})
|
||||
},
|
||||
supportsImageGeneration: true
|
||||
},
|
||||
{
|
||||
id: 'huggingface',
|
||||
name: 'HuggingFace',
|
||||
|
||||
@@ -4,7 +4,7 @@ import { type DeepSeekProviderSettings } from '@ai-sdk/deepseek'
|
||||
import { type GoogleGenerativeAIProviderSettings } from '@ai-sdk/google'
|
||||
import { type OpenAIProviderSettings } from '@ai-sdk/openai'
|
||||
import { type OpenAICompatibleProviderSettings } from '@ai-sdk/openai-compatible'
|
||||
import {
|
||||
import type {
|
||||
EmbeddingModelV2 as EmbeddingModel,
|
||||
ImageModelV2 as ImageModel,
|
||||
LanguageModelV2 as LanguageModel,
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { ImageModelV2 } from '@ai-sdk/provider'
|
||||
import type { ImageModelV2 } from '@ai-sdk/provider'
|
||||
import { experimental_generateImage as aiGenerateImage, NoImageGeneratedError } from 'ai'
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
|
||||
|
||||
@@ -2,12 +2,12 @@
|
||||
* 运行时执行器
|
||||
* 专注于插件化的AI调用处理
|
||||
*/
|
||||
import { ImageModelV2, LanguageModelV2, LanguageModelV2Middleware } from '@ai-sdk/provider'
|
||||
import type { ImageModelV2, LanguageModelV2, LanguageModelV2Middleware } from '@ai-sdk/provider'
|
||||
import type { LanguageModel } from 'ai'
|
||||
import {
|
||||
experimental_generateImage as _generateImage,
|
||||
generateObject as _generateObject,
|
||||
generateText as _generateText,
|
||||
LanguageModel,
|
||||
streamObject as _streamObject,
|
||||
streamText as _streamText
|
||||
} from 'ai'
|
||||
|
||||
@@ -11,7 +11,7 @@ export type { RuntimeConfig } from './types'
|
||||
|
||||
// === 便捷工厂函数 ===
|
||||
|
||||
import { LanguageModelV2Middleware } from '@ai-sdk/provider'
|
||||
import type { LanguageModelV2Middleware } from '@ai-sdk/provider'
|
||||
|
||||
import { type AiPlugin } from '../plugins'
|
||||
import { type ProviderId, type ProviderSettingsMap } from '../providers/types'
|
||||
|
||||
@@ -1,6 +1,13 @@
|
||||
/* eslint-disable @eslint-react/naming-convention/context-name */
|
||||
import { ImageModelV2 } from '@ai-sdk/provider'
|
||||
import { experimental_generateImage, generateObject, generateText, LanguageModel, streamObject, streamText } from 'ai'
|
||||
import type { ImageModelV2 } from '@ai-sdk/provider'
|
||||
import type {
|
||||
experimental_generateImage,
|
||||
generateObject,
|
||||
generateText,
|
||||
LanguageModel,
|
||||
streamObject,
|
||||
streamText
|
||||
} from 'ai'
|
||||
|
||||
import { type AiPlugin, createContext, PluginManager } from '../plugins'
|
||||
import { type ProviderId } from '../providers/types'
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
/**
|
||||
* Runtime 层类型定义
|
||||
*/
|
||||
import { ImageModelV2 } from '@ai-sdk/provider'
|
||||
import { experimental_generateImage, generateObject, generateText, streamObject, streamText } from 'ai'
|
||||
import type { ImageModelV2 } from '@ai-sdk/provider'
|
||||
import type { experimental_generateImage, generateObject, generateText, streamObject, streamText } from 'ai'
|
||||
|
||||
import { type ModelConfig } from '../models/types'
|
||||
import { type AiPlugin } from '../plugins'
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { Extension, Node } from '@tiptap/core'
|
||||
import type { Node } from '@tiptap/core'
|
||||
import { Extension } from '@tiptap/core'
|
||||
|
||||
import type { TableCellOptions } from '../cell/index.js'
|
||||
import { TableCell } from '../cell/index.js'
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { SpanKind, SpanStatusCode } from '@opentelemetry/api'
|
||||
import { ReadableSpan } from '@opentelemetry/sdk-trace-base'
|
||||
import type { ReadableSpan } from '@opentelemetry/sdk-trace-base'
|
||||
|
||||
import { SpanEntity } from '../types/config'
|
||||
import type { SpanEntity } from '../types/config'
|
||||
|
||||
/**
|
||||
* convert ReadableSpan to SpanEntity
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { ReadableSpan } from '@opentelemetry/sdk-trace-base'
|
||||
import type { ReadableSpan } from '@opentelemetry/sdk-trace-base'
|
||||
|
||||
export interface TraceCache {
|
||||
createSpan: (span: ReadableSpan) => void
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { ExportResult, ExportResultCode } from '@opentelemetry/core'
|
||||
import { ReadableSpan, SpanExporter } from '@opentelemetry/sdk-trace-base'
|
||||
import type { ExportResult } from '@opentelemetry/core'
|
||||
import { ExportResultCode } from '@opentelemetry/core'
|
||||
import type { ReadableSpan, SpanExporter } from '@opentelemetry/sdk-trace-base'
|
||||
|
||||
export type SaveFunction = (spans: ReadableSpan[]) => Promise<void>
|
||||
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
import { Context, trace } from '@opentelemetry/api'
|
||||
import { BatchSpanProcessor, BufferConfig, ReadableSpan, Span, SpanExporter } from '@opentelemetry/sdk-trace-base'
|
||||
import type { Context } from '@opentelemetry/api'
|
||||
import { trace } from '@opentelemetry/api'
|
||||
import type { BufferConfig, ReadableSpan, Span, SpanExporter } from '@opentelemetry/sdk-trace-base'
|
||||
import { BatchSpanProcessor } from '@opentelemetry/sdk-trace-base'
|
||||
|
||||
import { TraceCache } from '../core/traceCache'
|
||||
import type { TraceCache } from '../core/traceCache'
|
||||
|
||||
export class CacheBatchSpanProcessor extends BatchSpanProcessor {
|
||||
private cache: TraceCache
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { Context } from '@opentelemetry/api'
|
||||
import { BatchSpanProcessor, BufferConfig, ReadableSpan, Span, SpanExporter } from '@opentelemetry/sdk-trace-base'
|
||||
import { EventEmitter } from 'stream'
|
||||
import type { Context } from '@opentelemetry/api'
|
||||
import type { BufferConfig, ReadableSpan, Span, SpanExporter } from '@opentelemetry/sdk-trace-base'
|
||||
import { BatchSpanProcessor } from '@opentelemetry/sdk-trace-base'
|
||||
import type { EventEmitter } from 'stream'
|
||||
|
||||
import { convertSpanToSpanEntity } from '../core/spanConvert'
|
||||
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
import { Context, trace } from '@opentelemetry/api'
|
||||
import { BatchSpanProcessor, BufferConfig, ReadableSpan, Span, SpanExporter } from '@opentelemetry/sdk-trace-base'
|
||||
import type { Context } from '@opentelemetry/api'
|
||||
import { trace } from '@opentelemetry/api'
|
||||
import type { BufferConfig, ReadableSpan, Span, SpanExporter } from '@opentelemetry/sdk-trace-base'
|
||||
import { BatchSpanProcessor } from '@opentelemetry/sdk-trace-base'
|
||||
|
||||
export type SpanFunction = (span: ReadableSpan) => void
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { Link } from '@opentelemetry/api'
|
||||
import { TimedEvent } from '@opentelemetry/sdk-trace-base'
|
||||
import type { Link } from '@opentelemetry/api'
|
||||
import type { TimedEvent } from '@opentelemetry/sdk-trace-base'
|
||||
|
||||
export type AttributeValue =
|
||||
| string
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
import { trace, Tracer } from '@opentelemetry/api'
|
||||
import type { Tracer } from '@opentelemetry/api'
|
||||
import { trace } from '@opentelemetry/api'
|
||||
import { AsyncLocalStorageContextManager } from '@opentelemetry/context-async-hooks'
|
||||
import { W3CTraceContextPropagator } from '@opentelemetry/core'
|
||||
import { OTLPTraceExporter } from '@opentelemetry/exporter-trace-otlp-http'
|
||||
import { BatchSpanProcessor, ConsoleSpanExporter, SpanProcessor } from '@opentelemetry/sdk-trace-base'
|
||||
import type { SpanProcessor } from '@opentelemetry/sdk-trace-base'
|
||||
import { BatchSpanProcessor, ConsoleSpanExporter } from '@opentelemetry/sdk-trace-base'
|
||||
import { NodeTracerProvider } from '@opentelemetry/sdk-trace-node'
|
||||
|
||||
import { defaultConfig, TraceConfig } from '../trace-core/types/config'
|
||||
import type { TraceConfig } from '../trace-core/types/config'
|
||||
import { defaultConfig } from '../trace-core/types/config'
|
||||
|
||||
export class NodeTracer {
|
||||
private static provider: NodeTracerProvider
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { Context, ContextManager, ROOT_CONTEXT } from '@opentelemetry/api'
|
||||
import type { Context, ContextManager } from '@opentelemetry/api'
|
||||
import { ROOT_CONTEXT } from '@opentelemetry/api'
|
||||
|
||||
export class TopicContextManager implements ContextManager {
|
||||
private topicContextStack: Map<string, Context[]>
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { Context, context } from '@opentelemetry/api'
|
||||
import type { Context } from '@opentelemetry/api'
|
||||
import { context } from '@opentelemetry/api'
|
||||
|
||||
const originalPromise = globalThis.Promise
|
||||
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
import { W3CTraceContextPropagator } from '@opentelemetry/core'
|
||||
import { OTLPTraceExporter } from '@opentelemetry/exporter-trace-otlp-http'
|
||||
import { BatchSpanProcessor, ConsoleSpanExporter, SpanProcessor } from '@opentelemetry/sdk-trace-base'
|
||||
import type { SpanProcessor } from '@opentelemetry/sdk-trace-base'
|
||||
import { BatchSpanProcessor, ConsoleSpanExporter } from '@opentelemetry/sdk-trace-base'
|
||||
import { WebTracerProvider } from '@opentelemetry/sdk-trace-web'
|
||||
|
||||
import { defaultConfig, TraceConfig } from '../trace-core/types/config'
|
||||
import type { TraceConfig } from '../trace-core/types/config'
|
||||
import { defaultConfig } from '../trace-core/types/config'
|
||||
import { TopicContextManager } from './TopicContextManager'
|
||||
|
||||
export const contextManager = new TopicContextManager()
|
||||
|
||||
@@ -189,6 +189,7 @@ export enum IpcChannel {
|
||||
Fs_ReadText = 'fs:readText',
|
||||
File_OpenWithRelativePath = 'file:openWithRelativePath',
|
||||
File_IsTextFile = 'file:isTextFile',
|
||||
File_ListDirectory = 'file:listDirectory',
|
||||
File_GetDirectoryStructure = 'file:getDirectoryStructure',
|
||||
File_CheckFileName = 'file:checkFileName',
|
||||
File_ValidateNotesDirectory = 'file:validateNotesDirectory',
|
||||
@@ -322,6 +323,7 @@ export enum IpcChannel {
|
||||
ApiServer_Stop = 'api-server:stop',
|
||||
ApiServer_Restart = 'api-server:restart',
|
||||
ApiServer_GetStatus = 'api-server:get-status',
|
||||
ApiServer_Ready = 'api-server:ready',
|
||||
// NOTE: This api is not be used.
|
||||
ApiServer_GetConfig = 'api-server:get-config',
|
||||
|
||||
@@ -363,5 +365,12 @@ export enum IpcChannel {
|
||||
ClaudeCodePlugin_ListInstalled = 'claudeCodePlugin:list-installed',
|
||||
ClaudeCodePlugin_InvalidateCache = 'claudeCodePlugin:invalidate-cache',
|
||||
ClaudeCodePlugin_ReadContent = 'claudeCodePlugin:read-content',
|
||||
ClaudeCodePlugin_WriteContent = 'claudeCodePlugin:write-content'
|
||||
ClaudeCodePlugin_WriteContent = 'claudeCodePlugin:write-content',
|
||||
|
||||
// WebSocket
|
||||
WebSocket_Start = 'webSocket:start',
|
||||
WebSocket_Stop = 'webSocket:stop',
|
||||
WebSocket_Status = 'webSocket:status',
|
||||
WebSocket_SendFile = 'webSocket:send-file',
|
||||
WebSocket_GetAllCandidates = 'webSocket:get-all-candidates'
|
||||
}
|
||||
|
||||
@@ -9,9 +9,9 @@
|
||||
*/
|
||||
|
||||
import Anthropic from '@anthropic-ai/sdk'
|
||||
import { TextBlockParam } from '@anthropic-ai/sdk/resources'
|
||||
import type { TextBlockParam } from '@anthropic-ai/sdk/resources'
|
||||
import { loggerService } from '@logger'
|
||||
import { Provider } from '@types'
|
||||
import type { Provider } from '@types'
|
||||
import type { ModelMessage } from 'ai'
|
||||
|
||||
const logger = loggerService.withContext('anthropic-sdk')
|
||||
|
||||
@@ -470,3 +470,6 @@ export const MACOS_TERMINALS_WITH_COMMANDS: TerminalConfigWithCommand[] = [
|
||||
})
|
||||
}
|
||||
]
|
||||
|
||||
// resources/scripts should be maintained manually
|
||||
export const HOME_CHERRY_DIR = '.cherrystudio'
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { ProcessingStatus } from '@types'
|
||||
import type { ProcessingStatus } from '@types'
|
||||
|
||||
export type LoaderReturn = {
|
||||
entriesAdded: number
|
||||
@@ -31,3 +31,16 @@ export type WebviewKeyEvent = {
|
||||
shift: boolean
|
||||
alt: boolean
|
||||
}
|
||||
|
||||
export interface WebSocketStatusResponse {
|
||||
isRunning: boolean
|
||||
port?: number
|
||||
ip?: string
|
||||
clientConnected: boolean
|
||||
}
|
||||
|
||||
export interface WebSocketCandidatesResponse {
|
||||
host: string
|
||||
interface: string
|
||||
priority: number
|
||||
}
|
||||
|
||||
1
resources/database/drizzle/0002_wealthy_naoko.sql
Normal file
1
resources/database/drizzle/0002_wealthy_naoko.sql
Normal file
@@ -0,0 +1 @@
|
||||
ALTER TABLE `sessions` ADD `slash_commands` text;
|
||||
346
resources/database/drizzle/meta/0002_snapshot.json
Normal file
346
resources/database/drizzle/meta/0002_snapshot.json
Normal file
@@ -0,0 +1,346 @@
|
||||
{
|
||||
"version": "6",
|
||||
"dialect": "sqlite",
|
||||
"id": "0cf3d79e-69bf-4dba-8df4-996b9b67d2e8",
|
||||
"prevId": "dabab6db-a2cd-4e96-b06e-6cb87d445a87",
|
||||
"tables": {
|
||||
"agents": {
|
||||
"name": "agents",
|
||||
"columns": {
|
||||
"id": {
|
||||
"name": "id",
|
||||
"type": "text",
|
||||
"primaryKey": true,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"type": {
|
||||
"name": "type",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"name": {
|
||||
"name": "name",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"description": {
|
||||
"name": "description",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": false,
|
||||
"autoincrement": false
|
||||
},
|
||||
"accessible_paths": {
|
||||
"name": "accessible_paths",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": false,
|
||||
"autoincrement": false
|
||||
},
|
||||
"instructions": {
|
||||
"name": "instructions",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": false,
|
||||
"autoincrement": false
|
||||
},
|
||||
"model": {
|
||||
"name": "model",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"plan_model": {
|
||||
"name": "plan_model",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": false,
|
||||
"autoincrement": false
|
||||
},
|
||||
"small_model": {
|
||||
"name": "small_model",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": false,
|
||||
"autoincrement": false
|
||||
},
|
||||
"mcps": {
|
||||
"name": "mcps",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": false,
|
||||
"autoincrement": false
|
||||
},
|
||||
"allowed_tools": {
|
||||
"name": "allowed_tools",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": false,
|
||||
"autoincrement": false
|
||||
},
|
||||
"configuration": {
|
||||
"name": "configuration",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": false,
|
||||
"autoincrement": false
|
||||
},
|
||||
"created_at": {
|
||||
"name": "created_at",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"updated_at": {
|
||||
"name": "updated_at",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
}
|
||||
},
|
||||
"indexes": {},
|
||||
"foreignKeys": {},
|
||||
"compositePrimaryKeys": {},
|
||||
"uniqueConstraints": {},
|
||||
"checkConstraints": {}
|
||||
},
|
||||
"session_messages": {
|
||||
"name": "session_messages",
|
||||
"columns": {
|
||||
"id": {
|
||||
"name": "id",
|
||||
"type": "integer",
|
||||
"primaryKey": true,
|
||||
"notNull": true,
|
||||
"autoincrement": true
|
||||
},
|
||||
"session_id": {
|
||||
"name": "session_id",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"role": {
|
||||
"name": "role",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"content": {
|
||||
"name": "content",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"agent_session_id": {
|
||||
"name": "agent_session_id",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": false,
|
||||
"autoincrement": false,
|
||||
"default": "''"
|
||||
},
|
||||
"metadata": {
|
||||
"name": "metadata",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": false,
|
||||
"autoincrement": false
|
||||
},
|
||||
"created_at": {
|
||||
"name": "created_at",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"updated_at": {
|
||||
"name": "updated_at",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
}
|
||||
},
|
||||
"indexes": {},
|
||||
"foreignKeys": {},
|
||||
"compositePrimaryKeys": {},
|
||||
"uniqueConstraints": {},
|
||||
"checkConstraints": {}
|
||||
},
|
||||
"migrations": {
|
||||
"name": "migrations",
|
||||
"columns": {
|
||||
"version": {
|
||||
"name": "version",
|
||||
"type": "integer",
|
||||
"primaryKey": true,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"tag": {
|
||||
"name": "tag",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"executed_at": {
|
||||
"name": "executed_at",
|
||||
"type": "integer",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
}
|
||||
},
|
||||
"indexes": {},
|
||||
"foreignKeys": {},
|
||||
"compositePrimaryKeys": {},
|
||||
"uniqueConstraints": {},
|
||||
"checkConstraints": {}
|
||||
},
|
||||
"sessions": {
|
||||
"name": "sessions",
|
||||
"columns": {
|
||||
"id": {
|
||||
"name": "id",
|
||||
"type": "text",
|
||||
"primaryKey": true,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"agent_type": {
|
||||
"name": "agent_type",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"agent_id": {
|
||||
"name": "agent_id",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"name": {
|
||||
"name": "name",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"description": {
|
||||
"name": "description",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": false,
|
||||
"autoincrement": false
|
||||
},
|
||||
"accessible_paths": {
|
||||
"name": "accessible_paths",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": false,
|
||||
"autoincrement": false
|
||||
},
|
||||
"instructions": {
|
||||
"name": "instructions",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": false,
|
||||
"autoincrement": false
|
||||
},
|
||||
"model": {
|
||||
"name": "model",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"plan_model": {
|
||||
"name": "plan_model",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": false,
|
||||
"autoincrement": false
|
||||
},
|
||||
"small_model": {
|
||||
"name": "small_model",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": false,
|
||||
"autoincrement": false
|
||||
},
|
||||
"mcps": {
|
||||
"name": "mcps",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": false,
|
||||
"autoincrement": false
|
||||
},
|
||||
"allowed_tools": {
|
||||
"name": "allowed_tools",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": false,
|
||||
"autoincrement": false
|
||||
},
|
||||
"slash_commands": {
|
||||
"name": "slash_commands",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": false,
|
||||
"autoincrement": false
|
||||
},
|
||||
"configuration": {
|
||||
"name": "configuration",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": false,
|
||||
"autoincrement": false
|
||||
},
|
||||
"created_at": {
|
||||
"name": "created_at",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
},
|
||||
"updated_at": {
|
||||
"name": "updated_at",
|
||||
"type": "text",
|
||||
"primaryKey": false,
|
||||
"notNull": true,
|
||||
"autoincrement": false
|
||||
}
|
||||
},
|
||||
"indexes": {},
|
||||
"foreignKeys": {},
|
||||
"compositePrimaryKeys": {},
|
||||
"uniqueConstraints": {},
|
||||
"checkConstraints": {}
|
||||
}
|
||||
},
|
||||
"views": {},
|
||||
"enums": {},
|
||||
"_meta": {
|
||||
"schemas": {},
|
||||
"tables": {},
|
||||
"columns": {}
|
||||
},
|
||||
"internal": {
|
||||
"indexes": {}
|
||||
}
|
||||
}
|
||||
@@ -15,6 +15,13 @@
|
||||
"when": 1758187378775,
|
||||
"tag": "0001_woozy_captain_flint",
|
||||
"breakpoints": true
|
||||
},
|
||||
{
|
||||
"idx": 2,
|
||||
"version": "6",
|
||||
"when": 1762526423527,
|
||||
"tag": "0002_wealthy_naoko",
|
||||
"breakpoints": true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
;(() => {
|
||||
let messageId = 0
|
||||
const pendingCalls = new Map()
|
||||
|
||||
function api(method, ...args) {
|
||||
const id = messageId++
|
||||
return new Promise((resolve, reject) => {
|
||||
pendingCalls.set(id, { resolve, reject })
|
||||
window.parent.postMessage({ id, type: 'api-call', method, args }, '*')
|
||||
})
|
||||
}
|
||||
|
||||
window.addEventListener('message', (event) => {
|
||||
if (event.data.type === 'api-response') {
|
||||
const { id, result, error } = event.data
|
||||
const pendingCall = pendingCalls.get(id)
|
||||
if (pendingCall) {
|
||||
if (error) {
|
||||
pendingCall.reject(new Error(error))
|
||||
} else {
|
||||
pendingCall.resolve(result)
|
||||
}
|
||||
pendingCalls.delete(id)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
window.api = new Proxy(
|
||||
{},
|
||||
{
|
||||
get: (target, prop) => {
|
||||
return (...args) => api(prop, ...args)
|
||||
}
|
||||
}
|
||||
)
|
||||
})()
|
||||
@@ -1,5 +0,0 @@
|
||||
export function getQueryParam(paramName) {
|
||||
const url = new URL(window.location.href)
|
||||
const params = new URLSearchParams(url.search)
|
||||
return params.get(paramName)
|
||||
}
|
||||
@@ -7,7 +7,7 @@ const { downloadWithRedirects } = require('./download')
|
||||
|
||||
// Base URL for downloading bun binaries
|
||||
const BUN_RELEASE_BASE_URL = 'https://gitcode.com/CherryHQ/bun/releases/download'
|
||||
const DEFAULT_BUN_VERSION = '1.2.17' // Default fallback version
|
||||
const DEFAULT_BUN_VERSION = '1.3.1' // Default fallback version
|
||||
|
||||
// Mapping of platform+arch to binary package name
|
||||
const BUN_PACKAGES = {
|
||||
|
||||
@@ -7,28 +7,29 @@ const { downloadWithRedirects } = require('./download')
|
||||
|
||||
// Base URL for downloading uv binaries
|
||||
const UV_RELEASE_BASE_URL = 'https://gitcode.com/CherryHQ/uv/releases/download'
|
||||
const DEFAULT_UV_VERSION = '0.7.13'
|
||||
const DEFAULT_UV_VERSION = '0.9.5'
|
||||
|
||||
// Mapping of platform+arch to binary package name
|
||||
const UV_PACKAGES = {
|
||||
'darwin-arm64': 'uv-aarch64-apple-darwin.zip',
|
||||
'darwin-x64': 'uv-x86_64-apple-darwin.zip',
|
||||
'darwin-arm64': 'uv-aarch64-apple-darwin.tar.gz',
|
||||
'darwin-x64': 'uv-x86_64-apple-darwin.tar.gz',
|
||||
'win32-arm64': 'uv-aarch64-pc-windows-msvc.zip',
|
||||
'win32-ia32': 'uv-i686-pc-windows-msvc.zip',
|
||||
'win32-x64': 'uv-x86_64-pc-windows-msvc.zip',
|
||||
'linux-arm64': 'uv-aarch64-unknown-linux-gnu.zip',
|
||||
'linux-ia32': 'uv-i686-unknown-linux-gnu.zip',
|
||||
'linux-ppc64': 'uv-powerpc64-unknown-linux-gnu.zip',
|
||||
'linux-ppc64le': 'uv-powerpc64le-unknown-linux-gnu.zip',
|
||||
'linux-s390x': 'uv-s390x-unknown-linux-gnu.zip',
|
||||
'linux-x64': 'uv-x86_64-unknown-linux-gnu.zip',
|
||||
'linux-armv7l': 'uv-armv7-unknown-linux-gnueabihf.zip',
|
||||
'linux-arm64': 'uv-aarch64-unknown-linux-gnu.tar.gz',
|
||||
'linux-ia32': 'uv-i686-unknown-linux-gnu.tar.gz',
|
||||
'linux-ppc64': 'uv-powerpc64-unknown-linux-gnu.tar.gz',
|
||||
'linux-ppc64le': 'uv-powerpc64le-unknown-linux-gnu.tar.gz',
|
||||
'linux-riscv64': 'uv-riscv64gc-unknown-linux-gnu.tar.gz',
|
||||
'linux-s390x': 'uv-s390x-unknown-linux-gnu.tar.gz',
|
||||
'linux-x64': 'uv-x86_64-unknown-linux-gnu.tar.gz',
|
||||
'linux-armv7l': 'uv-armv7-unknown-linux-gnueabihf.tar.gz',
|
||||
// MUSL variants
|
||||
'linux-musl-arm64': 'uv-aarch64-unknown-linux-musl.zip',
|
||||
'linux-musl-ia32': 'uv-i686-unknown-linux-musl.zip',
|
||||
'linux-musl-x64': 'uv-x86_64-unknown-linux-musl.zip',
|
||||
'linux-musl-armv6l': 'uv-arm-unknown-linux-musleabihf.zip',
|
||||
'linux-musl-armv7l': 'uv-armv7-unknown-linux-musleabihf.zip'
|
||||
'linux-musl-arm64': 'uv-aarch64-unknown-linux-musl.tar.gz',
|
||||
'linux-musl-ia32': 'uv-i686-unknown-linux-musl.tar.gz',
|
||||
'linux-musl-x64': 'uv-x86_64-unknown-linux-musl.tar.gz',
|
||||
'linux-musl-armv6l': 'uv-arm-unknown-linux-musleabihf.tar.gz',
|
||||
'linux-musl-armv7l': 'uv-armv7-unknown-linux-musleabihf.tar.gz'
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -56,6 +57,7 @@ async function downloadUvBinary(platform, arch, version = DEFAULT_UV_VERSION, is
|
||||
const downloadUrl = `${UV_RELEASE_BASE_URL}/${version}/${packageName}`
|
||||
const tempdir = os.tmpdir()
|
||||
const tempFilename = path.join(tempdir, packageName)
|
||||
const isTarGz = packageName.endsWith('.tar.gz')
|
||||
|
||||
try {
|
||||
console.log(`Downloading uv ${version} for ${platformKey}...`)
|
||||
@@ -65,34 +67,58 @@ async function downloadUvBinary(platform, arch, version = DEFAULT_UV_VERSION, is
|
||||
|
||||
console.log(`Extracting ${packageName} to ${binDir}...`)
|
||||
|
||||
const zip = new StreamZip.async({ file: tempFilename })
|
||||
if (isTarGz) {
|
||||
// Use tar command to extract tar.gz files (macOS and Linux)
|
||||
const tempExtractDir = path.join(tempdir, `uv-extract-${Date.now()}`)
|
||||
fs.mkdirSync(tempExtractDir, { recursive: true })
|
||||
|
||||
// Get all entries in the zip file
|
||||
const entries = await zip.entries()
|
||||
execSync(`tar -xzf "${tempFilename}" -C "${tempExtractDir}"`, { stdio: 'inherit' })
|
||||
|
||||
// Extract files directly to binDir, flattening the directory structure
|
||||
for (const entry of Object.values(entries)) {
|
||||
if (!entry.isDirectory) {
|
||||
// Get just the filename without path
|
||||
const filename = path.basename(entry.name)
|
||||
const outputPath = path.join(binDir, filename)
|
||||
|
||||
console.log(`Extracting ${entry.name} -> ${filename}`)
|
||||
await zip.extract(entry.name, outputPath)
|
||||
// Make executable files executable on Unix-like systems
|
||||
if (platform !== 'win32') {
|
||||
try {
|
||||
// Find all files in the extracted directory and move them to binDir
|
||||
const findAndMoveFiles = (dir) => {
|
||||
const entries = fs.readdirSync(dir, { withFileTypes: true })
|
||||
for (const entry of entries) {
|
||||
const fullPath = path.join(dir, entry.name)
|
||||
if (entry.isDirectory()) {
|
||||
findAndMoveFiles(fullPath)
|
||||
} else {
|
||||
const filename = path.basename(entry.name)
|
||||
const outputPath = path.join(binDir, filename)
|
||||
fs.copyFileSync(fullPath, outputPath)
|
||||
console.log(`Extracted ${entry.name} -> ${outputPath}`)
|
||||
// Make executable on Unix-like systems
|
||||
fs.chmodSync(outputPath, 0o755)
|
||||
} catch (chmodError) {
|
||||
console.error(`Warning: Failed to set executable permissions on ${filename}`)
|
||||
return 102
|
||||
}
|
||||
}
|
||||
console.log(`Extracted ${entry.name} -> ${outputPath}`)
|
||||
}
|
||||
|
||||
findAndMoveFiles(tempExtractDir)
|
||||
|
||||
// Clean up temporary extraction directory
|
||||
fs.rmSync(tempExtractDir, { recursive: true })
|
||||
} else {
|
||||
// Use StreamZip for zip files (Windows)
|
||||
const zip = new StreamZip.async({ file: tempFilename })
|
||||
|
||||
// Get all entries in the zip file
|
||||
const entries = await zip.entries()
|
||||
|
||||
// Extract files directly to binDir, flattening the directory structure
|
||||
for (const entry of Object.values(entries)) {
|
||||
if (!entry.isDirectory) {
|
||||
// Get just the filename without path
|
||||
const filename = path.basename(entry.name)
|
||||
const outputPath = path.join(binDir, filename)
|
||||
|
||||
console.log(`Extracting ${entry.name} -> ${filename}`)
|
||||
await zip.extract(entry.name, outputPath)
|
||||
console.log(`Extracted ${entry.name} -> ${outputPath}`)
|
||||
}
|
||||
}
|
||||
|
||||
await zip.close()
|
||||
}
|
||||
|
||||
await zip.close()
|
||||
fs.unlinkSync(tempFilename)
|
||||
console.log(`Successfully installed uv ${version} for ${platform}-${arch}`)
|
||||
return 0
|
||||
|
||||
@@ -1,88 +0,0 @@
|
||||
const https = require('https')
|
||||
const { loggerService } = require('@logger')
|
||||
|
||||
const logger = loggerService.withContext('IpService')
|
||||
|
||||
/**
|
||||
* 获取用户的IP地址所在国家
|
||||
* @returns {Promise<string>} 返回国家代码,默认为'CN'
|
||||
*/
|
||||
async function getIpCountry() {
|
||||
return new Promise((resolve) => {
|
||||
// 添加超时控制
|
||||
const timeout = setTimeout(() => {
|
||||
logger.info('IP Address Check Timeout, default to China Mirror')
|
||||
resolve('CN')
|
||||
}, 5000)
|
||||
|
||||
const options = {
|
||||
hostname: 'ipinfo.io',
|
||||
path: '/json',
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'User-Agent':
|
||||
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
|
||||
'Accept-Language': 'en-US,en;q=0.9'
|
||||
}
|
||||
}
|
||||
|
||||
const req = https.request(options, (res) => {
|
||||
clearTimeout(timeout)
|
||||
let data = ''
|
||||
|
||||
res.on('data', (chunk) => {
|
||||
data += chunk
|
||||
})
|
||||
|
||||
res.on('end', () => {
|
||||
try {
|
||||
const parsed = JSON.parse(data)
|
||||
const country = parsed.country || 'CN'
|
||||
logger.info(`Detected user IP address country: ${country}`)
|
||||
resolve(country)
|
||||
} catch (error) {
|
||||
logger.error('Failed to parse IP address information:', error.message)
|
||||
resolve('CN')
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
req.on('error', (error) => {
|
||||
clearTimeout(timeout)
|
||||
logger.error('Failed to get IP address information:', error.message)
|
||||
resolve('CN')
|
||||
})
|
||||
|
||||
req.end()
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* 检查用户是否在中国
|
||||
* @returns {Promise<boolean>} 如果用户在中国返回true,否则返回false
|
||||
*/
|
||||
async function isUserInChina() {
|
||||
const country = await getIpCountry()
|
||||
return country.toLowerCase() === 'cn'
|
||||
}
|
||||
|
||||
/**
|
||||
* 根据用户位置获取适合的npm镜像URL
|
||||
* @returns {Promise<string>} 返回npm镜像URL
|
||||
*/
|
||||
async function getNpmRegistryUrl() {
|
||||
const inChina = await isUserInChina()
|
||||
if (inChina) {
|
||||
logger.info('User in China, using Taobao npm mirror')
|
||||
return 'https://registry.npmmirror.com'
|
||||
} else {
|
||||
logger.info('User not in China, using default npm mirror')
|
||||
return 'https://registry.npmjs.org'
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
getIpCountry,
|
||||
isUserInChina,
|
||||
getNpmRegistryUrl
|
||||
}
|
||||
@@ -18,8 +18,10 @@ import { sortedObjectByKeys } from './sort'
|
||||
// ========== SCRIPT CONFIGURATION AREA - MODIFY SETTINGS HERE ==========
|
||||
const SCRIPT_CONFIG = {
|
||||
// 🔧 Concurrency Control Configuration
|
||||
MAX_CONCURRENT_TRANSLATIONS: 5, // Max concurrent requests (Make sure the concurrency level does not exceed your provider's limits.)
|
||||
TRANSLATION_DELAY_MS: 100, // Delay between requests to avoid rate limiting (Recommended: 100-500ms, Range: 0-5000ms)
|
||||
MAX_CONCURRENT_TRANSLATIONS: process.env.TRANSLATION_MAX_CONCURRENT_REQUESTS
|
||||
? parseInt(process.env.TRANSLATION_MAX_CONCURRENT_REQUESTS)
|
||||
: 5, // Max concurrent requests (Make sure the concurrency level does not exceed your provider's limits.)
|
||||
TRANSLATION_DELAY_MS: process.env.TRANSLATION_DELAY_MS ? parseInt(process.env.TRANSLATION_DELAY_MS) : 500, // Delay between requests to avoid rate limiting (Recommended: 100-500ms, Range: 0-5000ms)
|
||||
|
||||
// 🔑 API Configuration
|
||||
API_KEY: process.env.TRANSLATION_API_KEY || '', // API key from environment variable
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { ApiServerConfig } from '@types'
|
||||
import type { ApiServerConfig } from '@types'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
|
||||
import { loggerService } from '../services/LoggerService'
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import crypto from 'crypto'
|
||||
import { NextFunction, Request, Response } from 'express'
|
||||
import type { NextFunction, Request, Response } from 'express'
|
||||
|
||||
import { config } from '../config'
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { NextFunction, Request, Response } from 'express'
|
||||
import type { NextFunction, Request, Response } from 'express'
|
||||
|
||||
import { loggerService } from '../../services/LoggerService'
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { Express } from 'express'
|
||||
import type { Express } from 'express'
|
||||
import swaggerJSDoc from 'swagger-jsdoc'
|
||||
import swaggerUi from 'swagger-ui-express'
|
||||
|
||||
@@ -171,7 +171,7 @@ const swaggerOptions: swaggerJSDoc.Options = {
|
||||
}
|
||||
]
|
||||
},
|
||||
apis: ['./src/main/apiServer/routes/*.ts', './src/main/apiServer/app.ts']
|
||||
apis: ['./src/main/apiServer/routes/**/*.ts', './src/main/apiServer/app.ts']
|
||||
}
|
||||
|
||||
export function setupOpenAPIDocumentation(app: Express) {
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import { loggerService } from '@logger'
|
||||
import { AgentModelValidationError, agentService, sessionService } from '@main/services/agents'
|
||||
import { ListAgentsResponse, type ReplaceAgentRequest, type UpdateAgentRequest } from '@types'
|
||||
import { Request, Response } from 'express'
|
||||
import type { ListAgentsResponse } from '@types'
|
||||
import { type ReplaceAgentRequest, type UpdateAgentRequest } from '@types'
|
||||
import type { Request, Response } from 'express'
|
||||
|
||||
import type { ValidationRequest } from '../validators/zodValidator'
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ import { loggerService } from '@logger'
|
||||
import { MESSAGE_STREAM_TIMEOUT_MS } from '@main/apiServer/config/timeouts'
|
||||
import { createStreamAbortController, STREAM_TIMEOUT_REASON } from '@main/apiServer/utils/createStreamAbortController'
|
||||
import { agentService, sessionMessageService, sessionService } from '@main/services/agents'
|
||||
import { Request, Response } from 'express'
|
||||
import type { Request, Response } from 'express'
|
||||
|
||||
const logger = loggerService.withContext('ApiServerMessagesHandlers')
|
||||
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import { loggerService } from '@logger'
|
||||
import { AgentModelValidationError, sessionMessageService, sessionService } from '@main/services/agents'
|
||||
import { ListAgentSessionsResponse, type ReplaceSessionRequest, UpdateSessionResponse } from '@types'
|
||||
import { Request, Response } from 'express'
|
||||
import type { ListAgentSessionsResponse, UpdateSessionResponse } from '@types'
|
||||
import { type ReplaceSessionRequest } from '@types'
|
||||
import type { Request, Response } from 'express'
|
||||
|
||||
import type { ValidationRequest } from '../validators/zodValidator'
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { Request, Response } from 'express'
|
||||
import type { Request, Response } from 'express'
|
||||
|
||||
import { agentService } from '../../../../services/agents'
|
||||
import { loggerService } from '../../../../services/LoggerService'
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { NextFunction, Request, Response } from 'express'
|
||||
import { ZodError, ZodType } from 'zod'
|
||||
import type { NextFunction, Request, Response } from 'express'
|
||||
import type { ZodType } from 'zod'
|
||||
import { ZodError } from 'zod'
|
||||
|
||||
export interface ValidationRequest extends Request {
|
||||
validatedBody?: any
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { ChatCompletionCreateParams } from '@cherrystudio/openai/resources'
|
||||
import express, { Request, Response } from 'express'
|
||||
import type { ChatCompletionCreateParams } from '@cherrystudio/openai/resources'
|
||||
import type { Request, Response } from 'express'
|
||||
import express from 'express'
|
||||
|
||||
import { loggerService } from '../../services/LoggerService'
|
||||
import {
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import express, { Request, Response } from 'express'
|
||||
import type { Request, Response } from 'express'
|
||||
import express from 'express'
|
||||
|
||||
import { loggerService } from '../../services/LoggerService'
|
||||
import { mcpApiService } from '../services/mcp'
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import { MessageCreateParams } from '@anthropic-ai/sdk/resources'
|
||||
import type { MessageCreateParams } from '@anthropic-ai/sdk/resources'
|
||||
import { loggerService } from '@logger'
|
||||
import { Provider } from '@types'
|
||||
import express, { Request, Response } from 'express'
|
||||
import type { Provider } from '@types'
|
||||
import type { Request, Response } from 'express'
|
||||
import express from 'express'
|
||||
|
||||
import { messagesService } from '../services/messages'
|
||||
import { getProviderById, validateModelId } from '../utils'
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
import { ApiModelsFilterSchema, ApiModelsResponse } from '@types'
|
||||
import express, { Request, Response } from 'express'
|
||||
import type { ApiModelsResponse } from '@types'
|
||||
import { ApiModelsFilterSchema } from '@types'
|
||||
import type { Request, Response } from 'express'
|
||||
import express from 'express'
|
||||
|
||||
import { loggerService } from '../../services/LoggerService'
|
||||
import { modelsService } from '../services/models'
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
import { createServer } from 'node:http'
|
||||
|
||||
import { loggerService } from '@logger'
|
||||
import { IpcChannel } from '@shared/IpcChannel'
|
||||
|
||||
import { agentService } from '../services/agents'
|
||||
import { windowService } from '../services/WindowService'
|
||||
import { app } from './app'
|
||||
import { config } from './config'
|
||||
|
||||
@@ -43,6 +45,13 @@ export class ApiServer {
|
||||
return new Promise((resolve, reject) => {
|
||||
this.server!.listen(port, host, () => {
|
||||
logger.info('API server started', { host, port })
|
||||
|
||||
// Notify renderer that API server is ready
|
||||
const mainWindow = windowService.getMainWindow()
|
||||
if (mainWindow && !mainWindow.isDestroyed()) {
|
||||
mainWindow.webContents.send(IpcChannel.ApiServer_Ready)
|
||||
}
|
||||
|
||||
resolve()
|
||||
})
|
||||
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
import OpenAI from '@cherrystudio/openai'
|
||||
import { ChatCompletionCreateParams, ChatCompletionCreateParamsStreaming } from '@cherrystudio/openai/resources'
|
||||
import { Provider } from '@types'
|
||||
import type { ChatCompletionCreateParams, ChatCompletionCreateParamsStreaming } from '@cherrystudio/openai/resources'
|
||||
import type { Provider } from '@types'
|
||||
|
||||
import { loggerService } from '../../services/LoggerService'
|
||||
import { ModelValidationError, validateModelId } from '../utils'
|
||||
import type { ModelValidationError } from '../utils'
|
||||
import { validateModelId } from '../utils'
|
||||
|
||||
const logger = loggerService.withContext('ChatCompletionService')
|
||||
|
||||
|
||||
@@ -1,16 +1,12 @@
|
||||
import mcpService from '@main/services/MCPService'
|
||||
import { StreamableHTTPServerTransport } from '@modelcontextprotocol/sdk/server/streamableHttp'
|
||||
import {
|
||||
isJSONRPCRequest,
|
||||
JSONRPCMessage,
|
||||
JSONRPCMessageSchema,
|
||||
MessageExtraInfo
|
||||
} from '@modelcontextprotocol/sdk/types'
|
||||
import { MCPServer } from '@types'
|
||||
import type { JSONRPCMessage, MessageExtraInfo } from '@modelcontextprotocol/sdk/types'
|
||||
import { isJSONRPCRequest, JSONRPCMessageSchema } from '@modelcontextprotocol/sdk/types'
|
||||
import type { MCPServer } from '@types'
|
||||
import { randomUUID } from 'crypto'
|
||||
import { EventEmitter } from 'events'
|
||||
import { Request, Response } from 'express'
|
||||
import { IncomingMessage, ServerResponse } from 'http'
|
||||
import type { Request, Response } from 'express'
|
||||
import type { IncomingMessage, ServerResponse } from 'http'
|
||||
|
||||
import { loggerService } from '../../services/LoggerService'
|
||||
import { getMcpServerById, getMCPServersFromRedux } from '../utils/mcp'
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
import Anthropic from '@anthropic-ai/sdk'
|
||||
import { MessageCreateParams, MessageStreamEvent } from '@anthropic-ai/sdk/resources'
|
||||
import type Anthropic from '@anthropic-ai/sdk'
|
||||
import type { MessageCreateParams, MessageStreamEvent } from '@anthropic-ai/sdk/resources'
|
||||
import { loggerService } from '@logger'
|
||||
import anthropicService from '@main/services/AnthropicService'
|
||||
import { buildClaudeCodeSystemMessage, getSdkClient } from '@shared/anthropic'
|
||||
import { Provider } from '@types'
|
||||
import { Response } from 'express'
|
||||
import type { Provider } from '@types'
|
||||
import type { Response } from 'express'
|
||||
|
||||
const logger = loggerService.withContext('MessagesService')
|
||||
const EXCLUDED_FORWARD_HEADERS: ReadonlySet<string> = new Set([
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user