Compare commits

...

10 Commits

Author SHA1 Message Date
beyondkmp
04cb96558b update libsql 2025-11-20 15:59:15 +08:00
亢奋猫
0f1a487bb0 refactor: simplify agent creation form (#11369)
* refactor(AgentModal): simplify agent type handling and update default values

- Removed unused agent type options and related logic.
- Updated default agent name from 'Claude Code' to 'Agent'.
- Adjusted padding in button styles and textarea rows for better UI consistency.
- Cleaned up unnecessary imports and code comments for improved readability.

* refactor(AgentSettings): clean up and enhance name setting component

- Removed unused imports and commented-out code in AgentModal and EssentialSettings.
- Updated NameSetting to include an emoji avatar picker for enhanced user experience.
- Simplified the logic for updating the agent's name and avatar.
- Improved overall readability and maintainability of the code.
2025-11-20 10:42:49 +08:00
亢奋猫
2df8bb58df fix: remove light background from MCP NpxUv install alerts (#11372)
- Remove 'banner' prop from Alert components in InstallNpxUv
- Set SettingContainer background to 'inherit' in MCP settings
- Fixes the light background color issue in NpxUv interface

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-authored-by: Claude <noreply@anthropic.com>
2025-11-20 10:41:41 +08:00
defi-failure
62976f6fe0 refactor: namespace tool call ids with session id to prevent conflicts (#11319) 2025-11-20 10:35:11 +08:00
MyPrototypeWhat
77529b3cd3 chore: update ai-core release scripts and bump version to 1.0.7 (#11370)
* chore: update ai-core release scripts and bump version to 1.0.7

* chore: update ai-sdk-provider release script to include build step and enhance type exports in webSearchPlugin and providers

* chore: bump @cherrystudio/ai-core version to 1.0.8 and update dependencies in package.json and yarn.lock

* chore: bump @cherrystudio/ai-core version to 1.0.9 and @cherrystudio/ai-sdk-provider version to 0.1.2 in package.json and yarn.lock

---------

Co-authored-by: suyao <sy20010504@gmail.com>
2025-11-19 20:44:22 +08:00
SuYao
c8e9a10190 bump ai core version (#11363)
* bump ai core version

* chore

* chore: add patch for @ai-sdk/openai and update peer dependencies in aiCore

* chore: update installation instructions in README to include @ai-sdk/google and @ai-sdk/openai

* chore: bump @cherrystudio/ai-core version to 1.0.6 in package.json and yarn.lock

---------

Co-authored-by: MyPrototypeWhat <daoquqiexing@gmail.com>
2025-11-19 18:13:33 +08:00
scientia
0e011ff35f fix: fix api-host for vercel ai-gateway provider (#11321)
Co-authored-by: scientia <wangdenghui@xiaomi.com>
2025-11-19 17:11:17 +08:00
MyPrototypeWhat
40a64a7c92 feat(options): enhance provider key handling for cherryin in buildPro… (#11361)
feat(options): enhance provider key handling for cherryin in buildProviderOptions function
2025-11-19 16:25:29 +08:00
Phantom
dc9503ef8b feat: support gemini 3 (#11356)
* feat(reasoning): add support for gemini-3-pro-preview model

Update regex pattern to include gemini-3-pro-preview as a supported thinking model
Add tests for new gemini-3 model support and edge cases

* fix(reasoning): update gemini model regex to include stable versions

Add support for stable versions of gemini-3-flash and gemini-3-pro in the model regex pattern. Update tests to verify both preview and stable versions are correctly identified.

* feat(providers): add vertexai provider check function

Add isVertexAiProvider function to consistently check for vertexai provider type and use it in websearch model detection

* feat(websearch): update gemini search regex to include v3 models

Add support for gemini 3.x models in the search regex pattern, including preview versions

* feat(vision): add support for gemini-3 models and add tests

Add regex pattern for gemini-3 models in visionAllowedModels
Create comprehensive test suite for isVisionModel function

* refactor(vision): make vision-related model constants private

Remove unused isNotSupportedImageSizeModel function and change exports to const declarations for internal use only

* chore(deps): update @ai-sdk/google to v2.0.36 and related dependencies

update @ai-sdk/google dependency from v2.0.31 to v2.0.36 to include fixes for model path handling and tool support for newer Gemini models

* chore: remove outdated @ai-sdk-google patch file

* chore: remove outdated @ai-sdk/google patch dependency
2025-11-19 14:05:14 +08:00
beyondkmp
f2c8484c48 feat: enable local crash mini dump file (#11348)
* feat: enabel loca crash mini file dump

* update version
2025-11-18 18:27:57 +08:00
35 changed files with 1016 additions and 365 deletions

View File

@@ -1,26 +0,0 @@
diff --git a/dist/index.js b/dist/index.js
index ff305b112779b718f21a636a27b1196125a332d9..cf32ff5086d4d9e56f8fe90c98724559083bafc3 100644
--- a/dist/index.js
+++ b/dist/index.js
@@ -471,7 +471,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
// src/get-model-path.ts
function getModelPath(modelId) {
- return modelId.includes("/") ? modelId : `models/${modelId}`;
+ return modelId.includes("models/") ? modelId : `models/${modelId}`;
}
// src/google-generative-ai-options.ts
diff --git a/dist/index.mjs b/dist/index.mjs
index 57659290f1cec74878a385626ad75b2a4d5cd3fc..d04e5927ec3725b6ffdb80868bfa1b5a48849537 100644
--- a/dist/index.mjs
+++ b/dist/index.mjs
@@ -477,7 +477,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
// src/get-model-path.ts
function getModelPath(modelId) {
- return modelId.includes("/") ? modelId : `models/${modelId}`;
+ return modelId.includes("models/") ? modelId : `models/${modelId}`;
}
// src/google-generative-ai-options.ts

View File

@@ -0,0 +1,152 @@
diff --git a/dist/index.js b/dist/index.js
index c2ef089c42e13a8ee4a833899a415564130e5d79..75efa7baafb0f019fb44dd50dec1641eee8879e7 100644
--- a/dist/index.js
+++ b/dist/index.js
@@ -471,7 +471,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
// src/get-model-path.ts
function getModelPath(modelId) {
- return modelId.includes("/") ? modelId : `models/${modelId}`;
+ return modelId.includes("models/") ? modelId : `models/${modelId}`;
}
// src/google-generative-ai-options.ts
diff --git a/dist/index.mjs b/dist/index.mjs
index d75c0cc13c41192408c1f3f2d29d76a7bffa6268..ada730b8cb97d9b7d4cb32883a1d1ff416404d9b 100644
--- a/dist/index.mjs
+++ b/dist/index.mjs
@@ -477,7 +477,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
// src/get-model-path.ts
function getModelPath(modelId) {
- return modelId.includes("/") ? modelId : `models/${modelId}`;
+ return modelId.includes("models/") ? modelId : `models/${modelId}`;
}
// src/google-generative-ai-options.ts
diff --git a/dist/internal/index.js b/dist/internal/index.js
index 277cac8dc734bea2fb4f3e9a225986b402b24f48..bb704cd79e602eb8b0cee1889e42497d59ccdb7a 100644
--- a/dist/internal/index.js
+++ b/dist/internal/index.js
@@ -432,7 +432,15 @@ function prepareTools({
var _a;
tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
const toolWarnings = [];
- const isGemini2 = modelId.includes("gemini-2");
+ // These changes could be safely removed when @ai-sdk/google v3 released.
+ const isLatest = (
+ [
+ 'gemini-flash-latest',
+ 'gemini-flash-lite-latest',
+ 'gemini-pro-latest',
+ ]
+ ).some(id => id === modelId);
+ const isGemini2OrNewer = modelId.includes("gemini-2") || modelId.includes("gemini-3") || isLatest;
const supportsDynamicRetrieval = modelId.includes("gemini-1.5-flash") && !modelId.includes("-8b");
const supportsFileSearch = modelId.includes("gemini-2.5");
if (tools == null) {
@@ -458,7 +466,7 @@ function prepareTools({
providerDefinedTools.forEach((tool) => {
switch (tool.id) {
case "google.google_search":
- if (isGemini2) {
+ if (isGemini2OrNewer) {
googleTools2.push({ googleSearch: {} });
} else if (supportsDynamicRetrieval) {
googleTools2.push({
@@ -474,7 +482,7 @@ function prepareTools({
}
break;
case "google.url_context":
- if (isGemini2) {
+ if (isGemini2OrNewer) {
googleTools2.push({ urlContext: {} });
} else {
toolWarnings.push({
@@ -485,7 +493,7 @@ function prepareTools({
}
break;
case "google.code_execution":
- if (isGemini2) {
+ if (isGemini2OrNewer) {
googleTools2.push({ codeExecution: {} });
} else {
toolWarnings.push({
@@ -507,7 +515,7 @@ function prepareTools({
}
break;
case "google.vertex_rag_store":
- if (isGemini2) {
+ if (isGemini2OrNewer) {
googleTools2.push({
retrieval: {
vertex_rag_store: {
diff --git a/dist/internal/index.mjs b/dist/internal/index.mjs
index 03b7cc591be9b58bcc2e775a96740d9f98862a10..347d2c12e1cee79f0f8bb258f3844fb0522a6485 100644
--- a/dist/internal/index.mjs
+++ b/dist/internal/index.mjs
@@ -424,7 +424,15 @@ function prepareTools({
var _a;
tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
const toolWarnings = [];
- const isGemini2 = modelId.includes("gemini-2");
+ // These changes could be safely removed when @ai-sdk/google v3 released.
+ const isLatest = (
+ [
+ 'gemini-flash-latest',
+ 'gemini-flash-lite-latest',
+ 'gemini-pro-latest',
+ ]
+ ).some(id => id === modelId);
+ const isGemini2OrNewer = modelId.includes("gemini-2") || modelId.includes("gemini-3") || isLatest;
const supportsDynamicRetrieval = modelId.includes("gemini-1.5-flash") && !modelId.includes("-8b");
const supportsFileSearch = modelId.includes("gemini-2.5");
if (tools == null) {
@@ -450,7 +458,7 @@ function prepareTools({
providerDefinedTools.forEach((tool) => {
switch (tool.id) {
case "google.google_search":
- if (isGemini2) {
+ if (isGemini2OrNewer) {
googleTools2.push({ googleSearch: {} });
} else if (supportsDynamicRetrieval) {
googleTools2.push({
@@ -466,7 +474,7 @@ function prepareTools({
}
break;
case "google.url_context":
- if (isGemini2) {
+ if (isGemini2OrNewer) {
googleTools2.push({ urlContext: {} });
} else {
toolWarnings.push({
@@ -477,7 +485,7 @@ function prepareTools({
}
break;
case "google.code_execution":
- if (isGemini2) {
+ if (isGemini2OrNewer) {
googleTools2.push({ codeExecution: {} });
} else {
toolWarnings.push({
@@ -499,7 +507,7 @@ function prepareTools({
}
break;
case "google.vertex_rag_store":
- if (isGemini2) {
+ if (isGemini2OrNewer) {
googleTools2.push({
retrieval: {
vertex_rag_store: {
@@ -1434,9 +1442,7 @@ var googleTools = {
vertexRagStore
};
export {
- GoogleGenerativeAILanguageModel,
getGroundingMetadataSchema,
- getUrlContextMetadataSchema,
- googleTools
+ getUrlContextMetadataSchema, GoogleGenerativeAILanguageModel, googleTools
};
//# sourceMappingURL=index.mjs.map
\ No newline at end of file

View File

@@ -74,14 +74,15 @@
"format:check": "biome format && biome lint",
"prepare": "git config blame.ignoreRevsFile .git-blame-ignore-revs && husky",
"claude": "dotenv -e .env -- claude",
"release:aicore:alpha": "yarn workspace @cherrystudio/ai-core version prerelease --immediate && yarn workspace @cherrystudio/ai-core npm publish --tag alpha --access public",
"release:aicore:beta": "yarn workspace @cherrystudio/ai-core version prerelease --immediate && yarn workspace @cherrystudio/ai-core npm publish --tag beta --access public",
"release:aicore": "yarn workspace @cherrystudio/ai-core version patch --immediate && yarn workspace @cherrystudio/ai-core npm publish --access public"
"release:aicore:alpha": "yarn workspace @cherrystudio/ai-core version prerelease --preid alpha --immediate && yarn workspace @cherrystudio/ai-core build && yarn workspace @cherrystudio/ai-core npm publish --tag alpha --access public",
"release:aicore:beta": "yarn workspace @cherrystudio/ai-core version prerelease --preid beta --immediate && yarn workspace @cherrystudio/ai-core build && yarn workspace @cherrystudio/ai-core npm publish --tag beta --access public",
"release:aicore": "yarn workspace @cherrystudio/ai-core version patch --immediate && yarn workspace @cherrystudio/ai-core build && yarn workspace @cherrystudio/ai-core npm publish --access public",
"release:ai-sdk-provider": "yarn workspace @cherrystudio/ai-sdk-provider version patch --immediate && yarn workspace @cherrystudio/ai-sdk-provider build && yarn workspace @cherrystudio/ai-sdk-provider npm publish --access public"
},
"dependencies": {
"@anthropic-ai/claude-agent-sdk": "patch:@anthropic-ai/claude-agent-sdk@npm%3A0.1.30#~/.yarn/patches/@anthropic-ai-claude-agent-sdk-npm-0.1.30-b50a299674.patch",
"@libsql/client": "0.14.0",
"@libsql/win32-x64-msvc": "^0.4.7",
"@libsql/client": "0.15.15",
"@libsql/win32-x64-msvc": "^0.5.22",
"@napi-rs/system-ocr": "patch:@napi-rs/system-ocr@npm%3A1.0.2#~/.yarn/patches/@napi-rs-system-ocr-npm-1.0.2-59e7a78e8b.patch",
"@paymoapp/electron-shutdown-handler": "^1.1.2",
"@strongtz/win32-arm64-msvc": "^0.4.7",
@@ -111,10 +112,11 @@
"@ai-sdk/anthropic": "^2.0.44",
"@ai-sdk/cerebras": "^1.0.31",
"@ai-sdk/gateway": "^2.0.9",
"@ai-sdk/google": "^2.0.32",
"@ai-sdk/google-vertex": "^3.0.62",
"@ai-sdk/google": "patch:@ai-sdk/google@npm%3A2.0.36#~/.yarn/patches/@ai-sdk-google-npm-2.0.36-6f3cc06026.patch",
"@ai-sdk/google-vertex": "^3.0.68",
"@ai-sdk/huggingface": "patch:@ai-sdk/huggingface@npm%3A0.0.8#~/.yarn/patches/@ai-sdk-huggingface-npm-0.0.8-d4d0aaac93.patch",
"@ai-sdk/mistral": "^2.0.23",
"@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.64#~/.yarn/patches/@ai-sdk-openai-npm-2.0.64-48f99f5bf3.patch",
"@ai-sdk/perplexity": "^2.0.17",
"@ant-design/v5-patch-for-react-19": "^1.0.3",
"@anthropic-ai/sdk": "^0.41.0",
@@ -123,7 +125,7 @@
"@aws-sdk/client-bedrock-runtime": "^3.910.0",
"@aws-sdk/client-s3": "^3.910.0",
"@biomejs/biome": "2.2.4",
"@cherrystudio/ai-core": "workspace:^1.0.0-alpha.18",
"@cherrystudio/ai-core": "workspace:^1.0.9",
"@cherrystudio/embedjs": "^0.1.31",
"@cherrystudio/embedjs-libsql": "^0.1.31",
"@cherrystudio/embedjs-loader-csv": "^0.1.31",
@@ -384,10 +386,10 @@
"@codemirror/lint": "6.8.5",
"@codemirror/view": "6.38.1",
"@langchain/core@npm:^0.3.26": "patch:@langchain/core@npm%3A1.0.2#~/.yarn/patches/@langchain-core-npm-1.0.2-183ef83fe4.patch",
"@libsql/client": "0.15.15",
"atomically@npm:^1.7.0": "patch:atomically@npm%3A1.7.0#~/.yarn/patches/atomically-npm-1.7.0-e742e5293b.patch",
"esbuild": "^0.25.0",
"file-stream-rotator@npm:^0.6.1": "patch:file-stream-rotator@npm%3A0.6.1#~/.yarn/patches/file-stream-rotator-npm-0.6.1-eab45fb13d.patch",
"libsql@npm:^0.4.4": "patch:libsql@npm%3A0.4.7#~/.yarn/patches/libsql-npm-0.4.7-444e260fb1.patch",
"node-abi": "4.24.0",
"openai@npm:^4.77.0": "npm:@cherrystudio/openai@6.5.0",
"openai@npm:^4.87.3": "npm:@cherrystudio/openai@6.5.0",
@@ -410,7 +412,7 @@
"@langchain/openai@npm:>=0.2.0 <0.7.0": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
"@ai-sdk/openai@npm:2.0.64": "patch:@ai-sdk/openai@npm%3A2.0.64#~/.yarn/patches/@ai-sdk-openai-npm-2.0.64-48f99f5bf3.patch",
"@ai-sdk/openai@npm:^2.0.42": "patch:@ai-sdk/openai@npm%3A2.0.64#~/.yarn/patches/@ai-sdk-openai-npm-2.0.64-48f99f5bf3.patch",
"@ai-sdk/google@npm:2.0.31": "patch:@ai-sdk/google@npm%3A2.0.31#~/.yarn/patches/@ai-sdk-google-npm-2.0.31-b0de047210.patch"
"@ai-sdk/google@npm:2.0.36": "patch:@ai-sdk/google@npm%3A2.0.36#~/.yarn/patches/@ai-sdk-google-npm-2.0.36-6f3cc06026.patch"
},
"packageManager": "yarn@4.9.1",
"lint-staged": {

View File

@@ -1,6 +1,6 @@
{
"name": "@cherrystudio/ai-sdk-provider",
"version": "0.1.0",
"version": "0.1.2",
"description": "Cherry Studio AI SDK provider bundle with CherryIN routing.",
"keywords": [
"ai-sdk",

View File

@@ -71,7 +71,7 @@ Cherry Studio AI Core 是一个基于 Vercel AI SDK 的统一 AI Provider 接口
## 安装
```bash
npm install @cherrystudio/ai-core ai
npm install @cherrystudio/ai-core ai @ai-sdk/google @ai-sdk/openai
```
### React Native

View File

@@ -1,6 +1,6 @@
{
"name": "@cherrystudio/ai-core",
"version": "1.0.1",
"version": "1.0.9",
"description": "Cherry Studio AI Core - Unified AI Provider Interface Based on Vercel AI SDK",
"main": "dist/index.js",
"module": "dist/index.mjs",
@@ -33,19 +33,19 @@
},
"homepage": "https://github.com/CherryHQ/cherry-studio#readme",
"peerDependencies": {
"@ai-sdk/google": "^2.0.36",
"@ai-sdk/openai": "^2.0.64",
"@cherrystudio/ai-sdk-provider": "^0.1.2",
"ai": "^5.0.26"
},
"dependencies": {
"@ai-sdk/anthropic": "^2.0.43",
"@ai-sdk/azure": "^2.0.66",
"@ai-sdk/deepseek": "^1.0.27",
"@ai-sdk/google": "patch:@ai-sdk/google@npm%3A2.0.31#~/.yarn/patches/@ai-sdk-google-npm-2.0.31-b0de047210.patch",
"@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.64#~/.yarn/patches/@ai-sdk-openai-npm-2.0.64-48f99f5bf3.patch",
"@ai-sdk/openai-compatible": "^1.0.26",
"@ai-sdk/provider": "^2.0.0",
"@ai-sdk/provider-utils": "^3.0.16",
"@ai-sdk/xai": "^2.0.31",
"@cherrystudio/ai-sdk-provider": "workspace:*",
"zod": "^4.1.5"
},
"devDependencies": {

View File

@@ -4,12 +4,7 @@
*/
export const BUILT_IN_PLUGIN_PREFIX = 'built-in:'
export { googleToolsPlugin } from './googleToolsPlugin'
export { createLoggingPlugin } from './logging'
export { createPromptToolUsePlugin } from './toolUsePlugin/promptToolUsePlugin'
export type {
PromptToolUseConfig,
ToolUseRequestContext,
ToolUseResult
} from './toolUsePlugin/type'
export { webSearchPlugin, type WebSearchPluginConfig } from './webSearchPlugin'
export * from './googleToolsPlugin'
export * from './toolUsePlugin/promptToolUsePlugin'
export * from './toolUsePlugin/type'
export * from './webSearchPlugin'

View File

@@ -32,7 +32,7 @@ export const webSearchPlugin = (config: WebSearchPluginConfig = DEFAULT_WEB_SEAR
})
// 导出类型定义供开发者使用
export type { WebSearchPluginConfig, WebSearchToolOutputSchema } from './helper'
export * from './helper'
// 默认导出
export default webSearchPlugin

View File

@@ -44,7 +44,7 @@ export {
// ==================== 基础数据和类型 ====================
// 基础Provider数据源
export { baseProviderIds, baseProviders } from './schemas'
export { baseProviderIds, baseProviders, isBaseProvider } from './schemas'
// 类型定义和Schema
export type {

View File

@@ -7,7 +7,6 @@ import { createAzure } from '@ai-sdk/azure'
import { type AzureOpenAIProviderSettings } from '@ai-sdk/azure'
import { createDeepSeek } from '@ai-sdk/deepseek'
import { createGoogleGenerativeAI } from '@ai-sdk/google'
import { createHuggingFace } from '@ai-sdk/huggingface'
import { createOpenAI, type OpenAIProviderSettings } from '@ai-sdk/openai'
import { createOpenAICompatible } from '@ai-sdk/openai-compatible'
import type { LanguageModelV2 } from '@ai-sdk/provider'
@@ -33,8 +32,7 @@ export const baseProviderIds = [
'deepseek',
'openrouter',
'cherryin',
'cherryin-chat',
'huggingface'
'cherryin-chat'
] as const
/**
@@ -158,12 +156,6 @@ export const baseProviders = [
})
},
supportsImageGeneration: true
},
{
id: 'huggingface',
name: 'HuggingFace',
creator: createHuggingFace,
supportsImageGeneration: true
}
] as const satisfies BaseProvider[]

View File

@@ -41,6 +41,7 @@ export enum IpcChannel {
App_SetFullScreen = 'app:set-full-screen',
App_IsFullScreen = 'app:is-full-screen',
App_GetSystemFonts = 'app:get-system-fonts',
APP_CrashRenderProcess = 'app:crash-render-process',
App_MacIsProcessTrusted = 'app:mac-is-process-trusted',
App_MacRequestProcessTrust = 'app:mac-request-process-trust',

View File

@@ -8,7 +8,7 @@ import '@main/config'
import { loggerService } from '@logger'
import { electronApp, optimizer } from '@electron-toolkit/utils'
import { replaceDevtoolsFont } from '@main/utils/windowUtil'
import { app } from 'electron'
import { app, crashReporter } from 'electron'
import installExtension, { REACT_DEVELOPER_TOOLS, REDUX_DEVTOOLS } from 'electron-devtools-installer'
import { isDev, isLinux, isWin } from './constant'
@@ -37,6 +37,14 @@ import { initWebviewHotkeys } from './services/WebviewService'
const logger = loggerService.withContext('MainEntry')
// enable local crash reports
crashReporter.start({
companyName: 'CherryHQ',
productName: 'CherryStudio',
submitURL: '',
uploadToServer: false
})
/**
* Disable hardware acceleration if setting is enabled
*/

View File

@@ -1038,4 +1038,8 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
ipcMain.handle(IpcChannel.WebSocket_Status, WebSocketService.getStatus)
ipcMain.handle(IpcChannel.WebSocket_SendFile, WebSocketService.sendFile)
ipcMain.handle(IpcChannel.WebSocket_GetAllCandidates, WebSocketService.getAllCandidates)
ipcMain.handle(IpcChannel.APP_CrashRenderProcess, () => {
mainWindow.webContents.forcefullyCrashRenderer()
})
}

View File

@@ -25,7 +25,7 @@ describe('stripLocalCommandTags', () => {
describe('Claude → AiSDK transform', () => {
it('handles tool call streaming lifecycle', () => {
const state = new ClaudeStreamState()
const state = new ClaudeStreamState({ agentSessionId: baseStreamMetadata.session_id })
const parts: ReturnType<typeof transformSDKMessageToStreamParts>[number][] = []
const messages: SDKMessage[] = [
@@ -182,14 +182,14 @@ describe('Claude → AiSDK transform', () => {
(typeof parts)[number],
{ type: 'tool-result' }
>
expect(toolResult.toolCallId).toBe('tool-1')
expect(toolResult.toolCallId).toBe('session-123:tool-1')
expect(toolResult.toolName).toBe('Bash')
expect(toolResult.input).toEqual({ command: 'ls' })
expect(toolResult.output).toBe('ok')
})
it('handles streaming text completion', () => {
const state = new ClaudeStreamState()
const state = new ClaudeStreamState({ agentSessionId: baseStreamMetadata.session_id })
const parts: ReturnType<typeof transformSDKMessageToStreamParts>[number][] = []
const messages: SDKMessage[] = [

View File

@@ -10,8 +10,21 @@
* Every Claude turn gets its own instance. `resetStep` should be invoked once the finish event has
* been emitted to avoid leaking state into the next turn.
*/
import { loggerService } from '@logger'
import type { FinishReason, LanguageModelUsage, ProviderMetadata } from 'ai'
/**
* Builds a namespaced tool call ID by combining session ID with raw tool call ID.
* This ensures tool calls from different sessions don't conflict even if they have
* the same raw ID from the SDK.
*
* @param sessionId - The agent session ID
* @param rawToolCallId - The raw tool call ID from SDK (e.g., "WebFetch_0")
*/
export function buildNamespacedToolCallId(sessionId: string, rawToolCallId: string): string {
return `${sessionId}:${rawToolCallId}`
}
/**
* Shared fields for every block that Claude can stream (text, reasoning, tool).
*/
@@ -34,6 +47,7 @@ type ReasoningBlockState = BaseBlockState & {
type ToolBlockState = BaseBlockState & {
kind: 'tool'
toolCallId: string
rawToolCallId: string
toolName: string
inputBuffer: string
providerMetadata?: ProviderMetadata
@@ -48,12 +62,17 @@ type PendingUsageState = {
}
type PendingToolCall = {
rawToolCallId: string
toolCallId: string
toolName: string
input: unknown
providerMetadata?: ProviderMetadata
}
type ClaudeStreamStateOptions = {
agentSessionId: string
}
/**
* Tracks the lifecycle of Claude streaming blocks (text, thinking, tool calls)
* across individual websocket events. The transformer relies on this class to
@@ -61,12 +80,20 @@ type PendingToolCall = {
* usage/finish metadata once Anthropic closes a message.
*/
export class ClaudeStreamState {
private logger
private readonly agentSessionId: string
private blocksByIndex = new Map<number, BlockState>()
private toolIndexById = new Map<string, number>()
private toolIndexByNamespacedId = new Map<string, number>()
private pendingUsage: PendingUsageState = {}
private pendingToolCalls = new Map<string, PendingToolCall>()
private stepActive = false
constructor(options: ClaudeStreamStateOptions) {
this.logger = loggerService.withContext('ClaudeStreamState')
this.agentSessionId = options.agentSessionId
this.logger.silly('ClaudeStreamState', options)
}
/** Marks the beginning of a new AiSDK step. */
beginStep(): void {
this.stepActive = true
@@ -104,19 +131,21 @@ export class ClaudeStreamState {
/** Caches tool metadata so subsequent input deltas and results can find it. */
openToolBlock(
index: number,
params: { toolCallId: string; toolName: string; providerMetadata?: ProviderMetadata }
params: { rawToolCallId: string; toolName: string; providerMetadata?: ProviderMetadata }
): ToolBlockState {
const toolCallId = buildNamespacedToolCallId(this.agentSessionId, params.rawToolCallId)
const block: ToolBlockState = {
kind: 'tool',
id: params.toolCallId,
id: toolCallId,
index,
toolCallId: params.toolCallId,
toolCallId,
rawToolCallId: params.rawToolCallId,
toolName: params.toolName,
inputBuffer: '',
providerMetadata: params.providerMetadata
}
this.blocksByIndex.set(index, block)
this.toolIndexById.set(params.toolCallId, index)
this.toolIndexByNamespacedId.set(toolCallId, index)
return block
}
@@ -125,13 +154,17 @@ export class ClaudeStreamState {
}
getToolBlockById(toolCallId: string): ToolBlockState | undefined {
const index = this.toolIndexById.get(toolCallId)
const index = this.toolIndexByNamespacedId.get(toolCallId)
if (index === undefined) return undefined
const block = this.blocksByIndex.get(index)
if (!block || block.kind !== 'tool') return undefined
return block
}
getToolBlockByRawId(rawToolCallId: string): ToolBlockState | undefined {
return this.getToolBlockById(buildNamespacedToolCallId(this.agentSessionId, rawToolCallId))
}
/** Appends streamed text to a text block, returning the updated state when present. */
appendTextDelta(index: number, text: string): TextBlockState | undefined {
const block = this.blocksByIndex.get(index)
@@ -158,10 +191,12 @@ export class ClaudeStreamState {
/** Records a tool call to be consumed once its result arrives from the user. */
registerToolCall(
toolCallId: string,
rawToolCallId: string,
payload: { toolName: string; input: unknown; providerMetadata?: ProviderMetadata }
): void {
this.pendingToolCalls.set(toolCallId, {
const toolCallId = buildNamespacedToolCallId(this.agentSessionId, rawToolCallId)
this.pendingToolCalls.set(rawToolCallId, {
rawToolCallId,
toolCallId,
toolName: payload.toolName,
input: payload.input,
@@ -170,10 +205,10 @@ export class ClaudeStreamState {
}
/** Retrieves and clears the buffered tool call metadata for the given id. */
consumePendingToolCall(toolCallId: string): PendingToolCall | undefined {
const entry = this.pendingToolCalls.get(toolCallId)
consumePendingToolCall(rawToolCallId: string): PendingToolCall | undefined {
const entry = this.pendingToolCalls.get(rawToolCallId)
if (entry) {
this.pendingToolCalls.delete(toolCallId)
this.pendingToolCalls.delete(rawToolCallId)
}
return entry
}
@@ -183,12 +218,12 @@ export class ClaudeStreamState {
* completion so that downstream tool results can reference the original call.
*/
completeToolBlock(toolCallId: string, input: unknown, providerMetadata?: ProviderMetadata): void {
const block = this.getToolBlockByRawId(toolCallId)
this.registerToolCall(toolCallId, {
toolName: this.getToolBlockById(toolCallId)?.toolName ?? 'unknown',
toolName: block?.toolName ?? 'unknown',
input,
providerMetadata
})
const block = this.getToolBlockById(toolCallId)
if (block) {
block.resolvedInput = input
}
@@ -200,7 +235,7 @@ export class ClaudeStreamState {
if (!block) return undefined
this.blocksByIndex.delete(index)
if (block.kind === 'tool') {
this.toolIndexById.delete(block.toolCallId)
this.toolIndexByNamespacedId.delete(block.toolCallId)
}
return block
}
@@ -227,7 +262,7 @@ export class ClaudeStreamState {
/** Drops cached block metadata for the currently active message. */
resetBlocks(): void {
this.blocksByIndex.clear()
this.toolIndexById.clear()
this.toolIndexByNamespacedId.clear()
}
/** Resets the entire step lifecycle after emitting a terminal frame. */
@@ -236,6 +271,10 @@ export class ClaudeStreamState {
this.resetPendingUsage()
this.stepActive = false
}
getNamespacedToolCallId(rawToolCallId: string): string {
return buildNamespacedToolCallId(this.agentSessionId, rawToolCallId)
}
}
export type { PendingToolCall }

View File

@@ -13,6 +13,7 @@ import { app } from 'electron'
import type { GetAgentSessionResponse } from '../..'
import type { AgentServiceInterface, AgentStream, AgentStreamEvent } from '../../interfaces/AgentStreamInterface'
import { sessionService } from '../SessionService'
import { buildNamespacedToolCallId } from './claude-stream-state'
import { promptForToolApproval } from './tool-permissions'
import { ClaudeStreamState, transformSDKMessageToStreamParts } from './transform'
@@ -150,7 +151,10 @@ class ClaudeCodeService implements AgentServiceInterface {
return { behavior: 'allow', updatedInput: input }
}
return promptForToolApproval(toolName, input, options)
return promptForToolApproval(toolName, input, {
...options,
toolCallId: buildNamespacedToolCallId(session.id, options.toolUseID)
})
}
// Build SDK options from parameters
@@ -346,7 +350,7 @@ class ClaudeCodeService implements AgentServiceInterface {
const jsonOutput: SDKMessage[] = []
let hasCompleted = false
const startTime = Date.now()
const streamState = new ClaudeStreamState()
const streamState = new ClaudeStreamState({ agentSessionId: sessionId })
try {
for await (const message of query({ prompt: promptStream, options })) {

View File

@@ -37,6 +37,7 @@ type RendererPermissionRequestPayload = {
requestId: string
toolName: string
toolId: string
toolCallId: string
description?: string
requiresPermissions: boolean
input: Record<string, unknown>
@@ -206,10 +207,19 @@ const ensureIpcHandlersRegistered = () => {
})
}
type PromptForToolApprovalOptions = {
signal: AbortSignal
suggestions?: PermissionUpdate[]
// NOTICE: This ID is namespaced with session ID, not the raw SDK tool call ID.
// Format: `${sessionId}:${rawToolCallId}`, e.g., `session_123:WebFetch_0`
toolCallId: string
}
export async function promptForToolApproval(
toolName: string,
input: Record<string, unknown>,
options?: { signal: AbortSignal; suggestions?: PermissionUpdate[] }
options: PromptForToolApprovalOptions
): Promise<PermissionResult> {
if (shouldAutoApproveTools) {
logger.debug('promptForToolApproval auto-approving tool for test', {
@@ -245,6 +255,7 @@ export async function promptForToolApproval(
logger.info('Requesting user approval for tool usage', {
requestId,
toolName,
toolCallId: options.toolCallId,
description: toolMetadata?.description
})
@@ -252,6 +263,7 @@ export async function promptForToolApproval(
requestId,
toolName,
toolId: toolMetadata?.id ?? toolName,
toolCallId: options.toolCallId,
description: toolMetadata?.description,
requiresPermissions: toolMetadata?.requirePermissions ?? false,
input: sanitizedInput,
@@ -266,6 +278,7 @@ export async function promptForToolApproval(
logger.debug('Registering tool permission request', {
requestId,
toolName,
toolCallId: options.toolCallId,
requiresPermissions: requestPayload.requiresPermissions,
timeoutMs: TOOL_APPROVAL_TIMEOUT_MS,
suggestionCount: sanitizedSuggestions.length
@@ -273,7 +286,11 @@ export async function promptForToolApproval(
return new Promise<PermissionResult>((resolve) => {
const timeout = setTimeout(() => {
logger.info('User tool permission request timed out', { requestId, toolName })
logger.info('User tool permission request timed out', {
requestId,
toolName,
toolCallId: options.toolCallId
})
finalizeRequest(requestId, { behavior: 'deny', message: 'Timed out waiting for approval' }, 'timeout')
}, TOOL_APPROVAL_TIMEOUT_MS)
@@ -287,7 +304,11 @@ export async function promptForToolApproval(
if (options?.signal) {
const abortListener = () => {
logger.info('Tool permission request aborted before user responded', { requestId, toolName })
logger.info('Tool permission request aborted before user responded', {
requestId,
toolName,
toolCallId: options.toolCallId
})
finalizeRequest(requestId, defaultDenyUpdate, 'aborted')
}

View File

@@ -243,9 +243,10 @@ function handleAssistantToolUse(
state: ClaudeStreamState,
chunks: AgentStreamPart[]
): void {
const toolCallId = state.getNamespacedToolCallId(block.id)
chunks.push({
type: 'tool-call',
toolCallId: block.id,
toolCallId,
toolName: block.name,
input: block.input,
providerExecuted: true,
@@ -331,10 +332,11 @@ function handleUserMessage(
if (block.type === 'tool_result') {
const toolResult = block as ToolResultContent
const pendingCall = state.consumePendingToolCall(toolResult.tool_use_id)
const toolCallId = pendingCall?.toolCallId ?? state.getNamespacedToolCallId(toolResult.tool_use_id)
if (toolResult.is_error) {
chunks.push({
type: 'tool-error',
toolCallId: toolResult.tool_use_id,
toolCallId,
toolName: pendingCall?.toolName ?? 'unknown',
input: pendingCall?.input,
error: toolResult.content,
@@ -343,7 +345,7 @@ function handleUserMessage(
} else {
chunks.push({
type: 'tool-result',
toolCallId: toolResult.tool_use_id,
toolCallId,
toolName: pendingCall?.toolName ?? 'unknown',
input: pendingCall?.input,
output: toolResult.content,
@@ -514,7 +516,7 @@ function handleContentBlockStart(
}
case 'tool_use': {
const block = state.openToolBlock(index, {
toolCallId: contentBlock.id,
rawToolCallId: contentBlock.id,
toolName: contentBlock.name,
providerMetadata
})

View File

@@ -111,6 +111,7 @@ const api = {
setFullScreen: (value: boolean): Promise<void> => ipcRenderer.invoke(IpcChannel.App_SetFullScreen, value),
isFullScreen: (): Promise<boolean> => ipcRenderer.invoke(IpcChannel.App_IsFullScreen),
getSystemFonts: (): Promise<string[]> => ipcRenderer.invoke(IpcChannel.App_GetSystemFonts),
mockCrashRenderProcess: () => ipcRenderer.invoke(IpcChannel.APP_CrashRenderProcess),
mac: {
isProcessTrusted: (): Promise<boolean> => ipcRenderer.invoke(IpcChannel.App_MacIsProcessTrusted),
requestProcessTrust: (): Promise<boolean> => ipcRenderer.invoke(IpcChannel.App_MacRequestProcessTrust)

View File

@@ -99,9 +99,6 @@ export function buildProviderOptions(
serviceTier: serviceTierSetting
}
break
case 'huggingface':
providerSpecificOptions = buildOpenAIProviderOptions(assistant, model, capabilities)
break
case 'anthropic':
providerSpecificOptions = buildAnthropicProviderOptions(assistant, model, capabilities)
break
@@ -144,6 +141,9 @@ export function buildProviderOptions(
case 'bedrock':
providerSpecificOptions = buildBedrockProviderOptions(assistant, model, capabilities)
break
case 'huggingface':
providerSpecificOptions = buildOpenAIProviderOptions(assistant, model, capabilities)
break
default:
// 对于其他 provider使用通用的构建逻辑
providerSpecificOptions = {
@@ -162,13 +162,17 @@ export function buildProviderOptions(
...getCustomParameters(assistant)
}
const rawProviderKey =
let rawProviderKey =
{
'google-vertex': 'google',
'google-vertex-anthropic': 'anthropic',
'ai-gateway': 'gateway'
}[rawProviderId] || rawProviderId
if (rawProviderKey === 'cherryin') {
rawProviderKey = { gemini: 'google' }[actualProvider.type] || actualProvider.type
}
// 返回 AI Core SDK 要求的格式:{ 'providerId': providerOptions }
return {
[rawProviderKey]: providerSpecificOptions

View File

@@ -1,5 +1,4 @@
import { loggerService } from '@logger'
import ClaudeIcon from '@renderer/assets/images/models/claude.png'
import { ErrorBoundary } from '@renderer/components/ErrorBoundary'
import { TopView } from '@renderer/components/TopView'
import { permissionModeCards } from '@renderer/config/agent'
@@ -9,7 +8,6 @@ import SelectAgentBaseModelButton from '@renderer/pages/home/components/SelectAg
import type {
AddAgentForm,
AgentEntity,
AgentType,
ApiModel,
BaseAgentForm,
PermissionMode,
@@ -17,30 +15,22 @@ import type {
UpdateAgentForm
} from '@renderer/types'
import { AgentConfigurationSchema, isAgentType } from '@renderer/types'
import { Avatar, Button, Input, Modal, Select } from 'antd'
import { Button, Input, Modal, Select } from 'antd'
import { AlertTriangleIcon } from 'lucide-react'
import type { ChangeEvent, FormEvent } from 'react'
import { useCallback, useEffect, useMemo, useRef, useState } from 'react'
import { useTranslation } from 'react-i18next'
import styled from 'styled-components'
import type { BaseOption } from './shared'
const { TextArea } = Input
const logger = loggerService.withContext('AddAgentPopup')
interface AgentTypeOption extends BaseOption {
type: 'type'
key: AgentEntity['type']
name: AgentEntity['name']
}
type AgentWithTools = AgentEntity & { tools?: Tool[] }
const buildAgentForm = (existing?: AgentWithTools): BaseAgentForm => ({
type: existing?.type ?? 'claude-code',
name: existing?.name ?? 'Claude Code',
name: existing?.name ?? 'Agent',
description: existing?.description,
instructions: existing?.instructions,
model: existing?.model ?? '',
@@ -100,54 +90,6 @@ const PopupContainer: React.FC<Props> = ({ agent, afterSubmit, resolve }) => {
})
}, [])
// add supported agents type here.
const agentConfig = useMemo(
() =>
[
{
type: 'type',
key: 'claude-code',
label: 'Claude Code',
name: 'Claude Code',
avatar: ClaudeIcon
}
] as const satisfies AgentTypeOption[],
[]
)
const agentOptions = useMemo(
() =>
agentConfig.map((option) => ({
value: option.key,
label: (
<OptionWrapper>
<Avatar src={option.avatar} size={24} />
<span>{option.label}</span>
</OptionWrapper>
)
})),
[agentConfig]
)
const onAgentTypeChange = useCallback(
(value: AgentType) => {
const prevConfig = agentConfig.find((config) => config.key === form.type)
let newName: string | undefined = form.name
if (prevConfig && prevConfig.name === form.name) {
const newConfig = agentConfig.find((config) => config.key === value)
if (newConfig) {
newName = newConfig.name
}
}
setForm((prev) => ({
...prev,
type: value,
name: newName
}))
},
[agentConfig, form.name, form.type]
)
const onNameChange = useCallback((e: ChangeEvent<HTMLInputElement>) => {
setForm((prev) => ({
...prev,
@@ -155,12 +97,12 @@ const PopupContainer: React.FC<Props> = ({ agent, afterSubmit, resolve }) => {
}))
}, [])
const onDescChange = useCallback((e: ChangeEvent<HTMLTextAreaElement>) => {
setForm((prev) => ({
...prev,
description: e.target.value
}))
}, [])
// const onDescChange = useCallback((e: ChangeEvent<HTMLTextAreaElement>) => {
// setForm((prev) => ({
// ...prev,
// description: e.target.value
// }))
// }, [])
const onInstChange = useCallback((e: ChangeEvent<HTMLTextAreaElement>) => {
setForm((prev) => ({
@@ -334,16 +276,6 @@ const PopupContainer: React.FC<Props> = ({ agent, afterSubmit, resolve }) => {
<StyledForm onSubmit={onSubmit}>
<FormContent>
<FormRow>
<FormItem style={{ flex: 1 }}>
<Label>{t('agent.type.label')}</Label>
<Select
value={form.type}
onChange={onAgentTypeChange}
options={agentOptions}
disabled={isEditing(agent)}
style={{ width: '100%' }}
/>
</FormItem>
<FormItem style={{ flex: 1 }}>
<Label>
{t('common.name')} <RequiredMark>*</RequiredMark>
@@ -363,7 +295,7 @@ const PopupContainer: React.FC<Props> = ({ agent, afterSubmit, resolve }) => {
avatarSize={24}
iconSize={16}
buttonStyle={{
padding: '8px 12px',
padding: '3px 8px',
width: '100%',
border: '1px solid var(--color-border)',
borderRadius: 6,
@@ -382,7 +314,6 @@ const PopupContainer: React.FC<Props> = ({ agent, afterSubmit, resolve }) => {
onChange={onPermissionModeChange}
style={{ width: '100%' }}
placeholder={t('agent.settings.tooling.permissionMode.placeholder', 'Select permission mode')}
dropdownStyle={{ minWidth: '500px' }}
optionLabelProp="label">
{permissionModeCards.map((item) => (
<Select.Option key={item.mode} value={item.mode} label={t(item.titleKey, item.titleFallback)}>
@@ -438,10 +369,10 @@ const PopupContainer: React.FC<Props> = ({ agent, afterSubmit, resolve }) => {
<TextArea rows={3} value={form.instructions ?? ''} onChange={onInstChange} />
</FormItem>
<FormItem>
{/* <FormItem>
<Label>{t('common.description')}</Label>
<TextArea rows={2} value={form.description ?? ''} onChange={onDescChange} />
</FormItem>
<TextArea rows={1} value={form.description ?? ''} onChange={onDescChange} />
</FormItem> */}
</FormContent>
<FormFooter>
@@ -575,14 +506,7 @@ const FormFooter = styled.div`
display: flex;
justify-content: flex-end;
gap: 8px;
padding-top: 16px;
border-top: 1px solid var(--color-border);
`
const OptionWrapper = styled.div`
display: flex;
align-items: center;
gap: 8px;
padding: 10px;
`
const PermissionOptionWrapper = styled.div`

View File

@@ -1,6 +1,12 @@
import { describe, expect, it, vi } from 'vitest'
import { isDoubaoSeedAfter251015, isDoubaoThinkingAutoModel, isLingReasoningModel } from '../models/reasoning'
import {
isDoubaoSeedAfter251015,
isDoubaoThinkingAutoModel,
isGeminiReasoningModel,
isLingReasoningModel,
isSupportedThinkingTokenGeminiModel
} from '../models/reasoning'
vi.mock('@renderer/store', () => ({
default: {
@@ -231,3 +237,284 @@ describe('Ling Models', () => {
})
})
})
describe('Gemini Models', () => {
describe('isSupportedThinkingTokenGeminiModel', () => {
it('should return true for gemini 2.5 models', () => {
expect(
isSupportedThinkingTokenGeminiModel({
id: 'gemini-2.5-flash',
name: '',
provider: '',
group: ''
})
).toBe(true)
expect(
isSupportedThinkingTokenGeminiModel({
id: 'gemini-2.5-pro',
name: '',
provider: '',
group: ''
})
).toBe(true)
expect(
isSupportedThinkingTokenGeminiModel({
id: 'gemini-2.5-flash-latest',
name: '',
provider: '',
group: ''
})
).toBe(true)
expect(
isSupportedThinkingTokenGeminiModel({
id: 'gemini-2.5-pro-latest',
name: '',
provider: '',
group: ''
})
).toBe(true)
})
it('should return true for gemini latest models', () => {
expect(
isSupportedThinkingTokenGeminiModel({
id: 'gemini-flash-latest',
name: '',
provider: '',
group: ''
})
).toBe(true)
expect(
isSupportedThinkingTokenGeminiModel({
id: 'gemini-pro-latest',
name: '',
provider: '',
group: ''
})
).toBe(true)
expect(
isSupportedThinkingTokenGeminiModel({
id: 'gemini-flash-lite-latest',
name: '',
provider: '',
group: ''
})
).toBe(true)
})
it('should return true for gemini 3 models', () => {
// Preview versions
expect(
isSupportedThinkingTokenGeminiModel({
id: 'gemini-3-pro-preview',
name: '',
provider: '',
group: ''
})
).toBe(true)
expect(
isSupportedThinkingTokenGeminiModel({
id: 'google/gemini-3-pro-preview',
name: '',
provider: '',
group: ''
})
).toBe(true)
// Future stable versions
expect(
isSupportedThinkingTokenGeminiModel({
id: 'gemini-3-flash',
name: '',
provider: '',
group: ''
})
).toBe(true)
expect(
isSupportedThinkingTokenGeminiModel({
id: 'gemini-3-pro',
name: '',
provider: '',
group: ''
})
).toBe(true)
expect(
isSupportedThinkingTokenGeminiModel({
id: 'google/gemini-3-flash',
name: '',
provider: '',
group: ''
})
).toBe(true)
expect(
isSupportedThinkingTokenGeminiModel({
id: 'google/gemini-3-pro',
name: '',
provider: '',
group: ''
})
).toBe(true)
})
it('should return false for image and tts models', () => {
expect(
isSupportedThinkingTokenGeminiModel({
id: 'gemini-2.5-flash-image',
name: '',
provider: '',
group: ''
})
).toBe(false)
expect(
isSupportedThinkingTokenGeminiModel({
id: 'gemini-2.5-flash-preview-tts',
name: '',
provider: '',
group: ''
})
).toBe(false)
})
it('should return false for older gemini models', () => {
expect(
isSupportedThinkingTokenGeminiModel({
id: 'gemini-1.5-flash',
name: '',
provider: '',
group: ''
})
).toBe(false)
expect(
isSupportedThinkingTokenGeminiModel({
id: 'gemini-1.5-pro',
name: '',
provider: '',
group: ''
})
).toBe(false)
expect(
isSupportedThinkingTokenGeminiModel({
id: 'gemini-1.0-pro',
name: '',
provider: '',
group: ''
})
).toBe(false)
})
})
describe('isGeminiReasoningModel', () => {
it('should return true for gemini thinking models', () => {
expect(
isGeminiReasoningModel({
id: 'gemini-2.0-flash-thinking',
name: '',
provider: '',
group: ''
})
).toBe(true)
expect(
isGeminiReasoningModel({
id: 'gemini-thinking-exp',
name: '',
provider: '',
group: ''
})
).toBe(true)
})
it('should return true for supported thinking token gemini models', () => {
expect(
isGeminiReasoningModel({
id: 'gemini-2.5-flash',
name: '',
provider: '',
group: ''
})
).toBe(true)
expect(
isGeminiReasoningModel({
id: 'gemini-2.5-pro',
name: '',
provider: '',
group: ''
})
).toBe(true)
})
it('should return true for gemini-3 models', () => {
// Preview versions
expect(
isGeminiReasoningModel({
id: 'gemini-3-pro-preview',
name: '',
provider: '',
group: ''
})
).toBe(true)
expect(
isGeminiReasoningModel({
id: 'google/gemini-3-pro-preview',
name: '',
provider: '',
group: ''
})
).toBe(true)
// Future stable versions
expect(
isGeminiReasoningModel({
id: 'gemini-3-flash',
name: '',
provider: '',
group: ''
})
).toBe(true)
expect(
isGeminiReasoningModel({
id: 'gemini-3-pro',
name: '',
provider: '',
group: ''
})
).toBe(true)
expect(
isGeminiReasoningModel({
id: 'google/gemini-3-flash',
name: '',
provider: '',
group: ''
})
).toBe(true)
expect(
isGeminiReasoningModel({
id: 'google/gemini-3-pro',
name: '',
provider: '',
group: ''
})
).toBe(true)
})
it('should return false for older gemini models without thinking', () => {
expect(
isGeminiReasoningModel({
id: 'gemini-1.5-flash',
name: '',
provider: '',
group: ''
})
).toBe(false)
expect(
isGeminiReasoningModel({
id: 'gemini-1.5-pro',
name: '',
provider: '',
group: ''
})
).toBe(false)
})
it('should return false for undefined model', () => {
expect(isGeminiReasoningModel(undefined)).toBe(false)
})
})
})

View File

@@ -0,0 +1,167 @@
import { describe, expect, it, vi } from 'vitest'
import { isVisionModel } from '../models/vision'
vi.mock('@renderer/store', () => ({
default: {
getState: () => ({
llm: {
settings: {}
}
})
}
}))
// FIXME: Idk why it's imported. Maybe circular dependency somewhere
vi.mock('@renderer/services/AssistantService.ts', () => ({
getDefaultAssistant: () => {
return {
id: 'default',
name: 'default',
emoji: '😀',
prompt: '',
topics: [],
messages: [],
type: 'assistant',
regularPhrases: [],
settings: {}
}
},
getProviderByModel: () => null
}))
describe('isVisionModel', () => {
describe('Gemini Models', () => {
it('should return true for gemini 1.5 models', () => {
expect(
isVisionModel({
id: 'gemini-1.5-flash',
name: '',
provider: '',
group: ''
})
).toBe(true)
expect(
isVisionModel({
id: 'gemini-1.5-pro',
name: '',
provider: '',
group: ''
})
).toBe(true)
})
it('should return true for gemini 2.x models', () => {
expect(
isVisionModel({
id: 'gemini-2.0-flash',
name: '',
provider: '',
group: ''
})
).toBe(true)
expect(
isVisionModel({
id: 'gemini-2.0-pro',
name: '',
provider: '',
group: ''
})
).toBe(true)
expect(
isVisionModel({
id: 'gemini-2.5-flash',
name: '',
provider: '',
group: ''
})
).toBe(true)
expect(
isVisionModel({
id: 'gemini-2.5-pro',
name: '',
provider: '',
group: ''
})
).toBe(true)
})
it('should return true for gemini latest models', () => {
expect(
isVisionModel({
id: 'gemini-flash-latest',
name: '',
provider: '',
group: ''
})
).toBe(true)
expect(
isVisionModel({
id: 'gemini-pro-latest',
name: '',
provider: '',
group: ''
})
).toBe(true)
expect(
isVisionModel({
id: 'gemini-flash-lite-latest',
name: '',
provider: '',
group: ''
})
).toBe(true)
})
it('should return true for gemini 3 models', () => {
// Preview versions
expect(
isVisionModel({
id: 'gemini-3-pro-preview',
name: '',
provider: '',
group: ''
})
).toBe(true)
// Future stable versions
expect(
isVisionModel({
id: 'gemini-3-flash',
name: '',
provider: '',
group: ''
})
).toBe(true)
expect(
isVisionModel({
id: 'gemini-3-pro',
name: '',
provider: '',
group: ''
})
).toBe(true)
})
it('should return true for gemini exp models', () => {
expect(
isVisionModel({
id: 'gemini-exp-1206',
name: '',
provider: '',
group: ''
})
).toBe(true)
})
it('should return false for gemini 1.0 models', () => {
expect(
isVisionModel({
id: 'gemini-1.0-pro',
name: '',
provider: '',
group: ''
})
).toBe(false)
})
})
})

View File

@@ -0,0 +1,64 @@
import { describe, expect, it, vi } from 'vitest'
import { GEMINI_SEARCH_REGEX } from '../models/websearch'
vi.mock('@renderer/store', () => ({
default: {
getState: () => ({
llm: {
settings: {}
}
})
}
}))
// FIXME: Idk why it's imported. Maybe circular dependency somewhere
vi.mock('@renderer/services/AssistantService.ts', () => ({
getDefaultAssistant: () => {
return {
id: 'default',
name: 'default',
emoji: '😀',
prompt: '',
topics: [],
messages: [],
type: 'assistant',
regularPhrases: [],
settings: {}
}
},
getProviderByModel: () => null
}))
describe('Gemini Search Models', () => {
describe('GEMINI_SEARCH_REGEX', () => {
it('should match gemini 2.x models', () => {
expect(GEMINI_SEARCH_REGEX.test('gemini-2.0-flash')).toBe(true)
expect(GEMINI_SEARCH_REGEX.test('gemini-2.0-pro')).toBe(true)
expect(GEMINI_SEARCH_REGEX.test('gemini-2.5-flash')).toBe(true)
expect(GEMINI_SEARCH_REGEX.test('gemini-2.5-pro')).toBe(true)
expect(GEMINI_SEARCH_REGEX.test('gemini-2.5-flash-latest')).toBe(true)
expect(GEMINI_SEARCH_REGEX.test('gemini-2.5-pro-latest')).toBe(true)
})
it('should match gemini latest models', () => {
expect(GEMINI_SEARCH_REGEX.test('gemini-flash-latest')).toBe(true)
expect(GEMINI_SEARCH_REGEX.test('gemini-pro-latest')).toBe(true)
expect(GEMINI_SEARCH_REGEX.test('gemini-flash-lite-latest')).toBe(true)
})
it('should match gemini 3 models', () => {
// Preview versions
expect(GEMINI_SEARCH_REGEX.test('gemini-3-pro-preview')).toBe(true)
// Future stable versions
expect(GEMINI_SEARCH_REGEX.test('gemini-3-flash')).toBe(true)
expect(GEMINI_SEARCH_REGEX.test('gemini-3-pro')).toBe(true)
})
it('should not match older gemini models', () => {
expect(GEMINI_SEARCH_REGEX.test('gemini-1.5-flash')).toBe(false)
expect(GEMINI_SEARCH_REGEX.test('gemini-1.5-pro')).toBe(false)
expect(GEMINI_SEARCH_REGEX.test('gemini-1.0-pro')).toBe(false)
})
})
})

View File

@@ -254,7 +254,7 @@ export function isGeminiReasoningModel(model?: Model): boolean {
// Gemini 支持思考模式的模型正则
export const GEMINI_THINKING_MODEL_REGEX =
/gemini-(?:2\.5.*(?:-latest)?|flash-latest|pro-latest|flash-lite-latest)(?:-[\w-]+)*$/i
/gemini-(?:2\.5.*(?:-latest)?|3-(?:flash|pro)(?:-preview)?|flash-latest|pro-latest|flash-lite-latest)(?:-[\w-]+)*$/i
export const isSupportedThinkingTokenGeminiModel = (model: Model): boolean => {
const modelId = getLowerBaseModelName(model.id, '/')

View File

@@ -12,6 +12,7 @@ const visionAllowedModels = [
'gemini-1\\.5',
'gemini-2\\.0',
'gemini-2\\.5',
'gemini-3-(?:flash|pro)(?:-preview)?',
'gemini-(flash|pro|flash-lite)-latest',
'gemini-exp',
'claude-3',
@@ -64,13 +65,13 @@ const visionExcludedModels = [
'o1-preview',
'AIDC-AI/Marco-o1'
]
export const VISION_REGEX = new RegExp(
const VISION_REGEX = new RegExp(
`\\b(?!(?:${visionExcludedModels.join('|')})\\b)(${visionAllowedModels.join('|')})\\b`,
'i'
)
// For middleware to identify models that must use the dedicated Image API
export const DEDICATED_IMAGE_MODELS = [
const DEDICATED_IMAGE_MODELS = [
'grok-2-image',
'grok-2-image-1212',
'grok-2-image-latest',
@@ -79,7 +80,7 @@ export const DEDICATED_IMAGE_MODELS = [
'gpt-image-1'
]
export const IMAGE_ENHANCEMENT_MODELS = [
const IMAGE_ENHANCEMENT_MODELS = [
'grok-2-image(?:-[\\w-]+)?',
'qwen-image-edit',
'gpt-image-1',
@@ -90,9 +91,9 @@ export const IMAGE_ENHANCEMENT_MODELS = [
const IMAGE_ENHANCEMENT_MODELS_REGEX = new RegExp(IMAGE_ENHANCEMENT_MODELS.join('|'), 'i')
// Models that should auto-enable image generation button when selected
export const AUTO_ENABLE_IMAGE_MODELS = ['gemini-2.5-flash-image', ...DEDICATED_IMAGE_MODELS]
const AUTO_ENABLE_IMAGE_MODELS = ['gemini-2.5-flash-image', ...DEDICATED_IMAGE_MODELS]
export const OPENAI_TOOL_USE_IMAGE_GENERATION_MODELS = [
const OPENAI_TOOL_USE_IMAGE_GENERATION_MODELS = [
'o3',
'gpt-4o',
'gpt-4o-mini',
@@ -102,9 +103,9 @@ export const OPENAI_TOOL_USE_IMAGE_GENERATION_MODELS = [
'gpt-5'
]
export const OPENAI_IMAGE_GENERATION_MODELS = [...OPENAI_TOOL_USE_IMAGE_GENERATION_MODELS, 'gpt-image-1']
const OPENAI_IMAGE_GENERATION_MODELS = [...OPENAI_TOOL_USE_IMAGE_GENERATION_MODELS, 'gpt-image-1']
export const GENERATE_IMAGE_MODELS = [
const GENERATE_IMAGE_MODELS = [
'gemini-2.0-flash-exp',
'gemini-2.0-flash-exp-image-generation',
'gemini-2.0-flash-preview-image-generation',
@@ -169,22 +170,23 @@ export function isPureGenerateImageModel(model: Model): boolean {
}
// Text to image models
export const TEXT_TO_IMAGE_REGEX = /flux|diffusion|stabilityai|sd-|dall|cogview|janus|midjourney|mj-|image|gpt-image/i
const TEXT_TO_IMAGE_REGEX = /flux|diffusion|stabilityai|sd-|dall|cogview|janus|midjourney|mj-|image|gpt-image/i
export function isTextToImageModel(model: Model): boolean {
const modelId = getLowerBaseModelName(model.id)
return TEXT_TO_IMAGE_REGEX.test(modelId)
}
export function isNotSupportedImageSizeModel(model?: Model): boolean {
if (!model) {
return false
}
// It's not used now
// export function isNotSupportedImageSizeModel(model?: Model): boolean {
// if (!model) {
// return false
// }
const baseName = getLowerBaseModelName(model.id, '/')
// const baseName = getLowerBaseModelName(model.id, '/')
return baseName.includes('grok-2-image')
}
// return baseName.includes('grok-2-image')
// }
/**
* 判断模型是否支持图片增强(包括编辑、增强、修复等)

View File

@@ -3,7 +3,13 @@ import type { Model } from '@renderer/types'
import { SystemProviderIds } from '@renderer/types'
import { getLowerBaseModelName, isUserSelectedModelType } from '@renderer/utils'
import { isGeminiProvider, isNewApiProvider, isOpenAICompatibleProvider, isOpenAIProvider } from '../providers'
import {
isGeminiProvider,
isNewApiProvider,
isOpenAICompatibleProvider,
isOpenAIProvider,
isVertexAiProvider
} from '../providers'
import { isEmbeddingModel, isRerankModel } from './embedding'
import { isAnthropicModel } from './utils'
import { isPureGenerateImageModel, isTextToImageModel } from './vision'
@@ -16,7 +22,7 @@ export const CLAUDE_SUPPORTED_WEBSEARCH_REGEX = new RegExp(
export const GEMINI_FLASH_MODEL_REGEX = new RegExp('gemini.*-flash.*$')
export const GEMINI_SEARCH_REGEX = new RegExp(
'gemini-(?:2.*(?:-latest)?|flash-latest|pro-latest|flash-lite-latest)(?:-[\\w-]+)*$',
'gemini-(?:2.*(?:-latest)?|3-(?:flash|pro)(?:-preview)?|flash-latest|pro-latest|flash-lite-latest)(?:-[\\w-]+)*$',
'i'
)
@@ -107,7 +113,7 @@ export function isWebSearchModel(model: Model): boolean {
}
}
if (isGeminiProvider(provider) || provider.id === SystemProviderIds.vertexai) {
if (isGeminiProvider(provider) || isVertexAiProvider(provider)) {
return GEMINI_SEARCH_REGEX.test(modelId)
}

View File

@@ -686,7 +686,7 @@ export const SYSTEM_PROVIDERS_CONFIG: Record<SystemProviderId, SystemProvider> =
name: 'AI Gateway',
type: 'ai-gateway',
apiKey: '',
apiHost: 'https://ai-gateway.vercel.sh/v1',
apiHost: 'https://ai-gateway.vercel.sh/v1/ai',
models: [],
isSystem: true,
enabled: false
@@ -1571,6 +1571,10 @@ export function isGeminiProvider(provider: Provider): boolean {
return provider.type === 'gemini'
}
export function isVertexAiProvider(provider: Provider): boolean {
return provider.type === 'vertexai'
}
export function isAIGatewayProvider(provider: Provider): boolean {
return provider.type === 'ai-gateway'
}

View File

@@ -1,7 +1,7 @@
import type { PermissionUpdate } from '@anthropic-ai/claude-agent-sdk'
import { loggerService } from '@logger'
import { useAppDispatch, useAppSelector } from '@renderer/store'
import { selectPendingPermissionByToolName, toolPermissionsActions } from '@renderer/store/toolPermissions'
import { selectPendingPermission, toolPermissionsActions } from '@renderer/store/toolPermissions'
import type { NormalToolResponse } from '@renderer/types'
import { Button } from 'antd'
import { ChevronDown, CirclePlay, CircleX } from 'lucide-react'
@@ -17,9 +17,7 @@ interface Props {
export function ToolPermissionRequestCard({ toolResponse }: Props) {
const { t } = useTranslation()
const dispatch = useAppDispatch()
const request = useAppSelector((state) =>
selectPendingPermissionByToolName(state.toolPermissions, toolResponse.tool.name)
)
const request = useAppSelector((state) => selectPendingPermission(state.toolPermissions, toolResponse.toolCallId))
const [now, setNow] = useState(() => Date.now())
const [showDetails, setShowDetails] = useState(false)

View File

@@ -1,21 +1,13 @@
import { getAgentTypeAvatar } from '@renderer/config/agent'
import type { useUpdateAgent } from '@renderer/hooks/agents/useUpdateAgent'
import type { useUpdateSession } from '@renderer/hooks/agents/useUpdateSession'
import { getAgentTypeLabel } from '@renderer/i18n/label'
import type { GetAgentResponse, GetAgentSessionResponse } from '@renderer/types'
import { isAgentEntity } from '@renderer/types'
import { Avatar } from 'antd'
import type { FC } from 'react'
import { useTranslation } from 'react-i18next'
import { AccessibleDirsSetting } from './AccessibleDirsSetting'
import { AvatarSetting } from './AvatarSetting'
import { DescriptionSetting } from './DescriptionSetting'
import { ModelSetting } from './ModelSetting'
import { NameSetting } from './NameSetting'
import { SettingsContainer, SettingsItem, SettingsTitle } from './shared'
// const logger = loggerService.withContext('AgentEssentialSettings')
import { SettingsContainer } from './shared'
type EssentialSettingsProps =
| {
@@ -30,26 +22,10 @@ type EssentialSettingsProps =
}
const EssentialSettings: FC<EssentialSettingsProps> = ({ agentBase, update, showModelSetting = true }) => {
const { t } = useTranslation()
if (!agentBase) return null
const isAgent = isAgentEntity(agentBase)
return (
<SettingsContainer>
{isAgent && (
<SettingsItem inline>
<SettingsTitle>{t('agent.type.label')}</SettingsTitle>
<div className="flex items-center gap-2">
<Avatar size={24} src={getAgentTypeAvatar(agentBase.type)} className="h-6 w-6 text-lg" />
<span>{(agentBase?.name ?? agentBase?.type) ? getAgentTypeLabel(agentBase.type) : ''}</span>
</div>
</SettingsItem>
)}
{isAgent && (
<AvatarSetting agent={agentBase} update={update as ReturnType<typeof useUpdateAgent>['updateAgent']} />
)}
<NameSetting base={agentBase} update={update} />
{showModelSetting && <ModelSetting base={agentBase} update={update} />}
<AccessibleDirsSetting base={agentBase} update={update} />

View File

@@ -1,6 +1,8 @@
import { EmojiAvatarWithPicker } from '@renderer/components/Avatar/EmojiAvatarWithPicker'
import type { AgentBaseWithId, UpdateAgentBaseForm, UpdateAgentFunctionUnion } from '@renderer/types'
import { AgentConfigurationSchema, isAgentEntity, isAgentType } from '@renderer/types'
import { Input } from 'antd'
import { useState } from 'react'
import { useCallback, useState } from 'react'
import { useTranslation } from 'react-i18next'
import { SettingsItem, SettingsTitle } from './shared'
@@ -13,26 +15,61 @@ export interface NameSettingsProps {
export const NameSetting = ({ base, update }: NameSettingsProps) => {
const { t } = useTranslation()
const [name, setName] = useState<string | undefined>(base?.name?.trim())
const updateName = async (name: UpdateAgentBaseForm['name']) => {
if (!base) return
return update({ id: base.id, name: name?.trim() })
}
// Avatar logic
const isAgent = isAgentEntity(base)
const isDefault = isAgent ? isAgentType(base.configuration?.avatar) : false
const [emoji, setEmoji] = useState(isAgent && !isDefault ? (base.configuration?.avatar ?? '⭐️') : '⭐️')
const updateAvatar = useCallback(
(avatar: string) => {
if (!isAgent || !base) return
const parsedConfiguration = AgentConfigurationSchema.parse(base.configuration ?? {})
const payload = {
id: base.id,
configuration: {
...parsedConfiguration,
avatar
}
}
update(payload)
},
[base, update, isAgent]
)
if (!base) return null
return (
<SettingsItem inline>
<SettingsTitle>{t('common.name')}</SettingsTitle>
<Input
placeholder={t('common.agent_one') + t('common.name')}
value={name}
onChange={(e) => setName(e.target.value)}
onBlur={() => {
if (name !== base.name) {
updateName(name)
}
}}
className="max-w-70 flex-1"
/>
<div className="flex max-w-70 flex-1 items-center gap-1">
{isAgent && (
<EmojiAvatarWithPicker
emoji={emoji}
onPick={(emoji: string) => {
setEmoji(emoji)
if (isAgent && emoji === base?.configuration?.avatar) return
updateAvatar(emoji)
}}
/>
)}
<Input
placeholder={t('common.agent_one') + t('common.name')}
value={name}
onChange={(e) => setName(e.target.value)}
onBlur={() => {
if (name !== base.name) {
updateName(name)
}
}}
className="flex-1"
/>
</div>
</SettingsItem>
)
}

View File

@@ -109,7 +109,6 @@ const InstallNpxUv: FC<Props> = ({ mini = false }) => {
<Container>
<Alert
type={isUvInstalled ? 'success' : 'warning'}
banner
style={{ borderRadius: 'var(--list-item-border-radius)' }}
description={
<VStack>
@@ -140,7 +139,6 @@ const InstallNpxUv: FC<Props> = ({ mini = false }) => {
/>
<Alert
type={isBunInstalled ? 'success' : 'warning'}
banner
style={{ borderRadius: 'var(--list-item-border-radius)' }}
description={
<VStack>

View File

@@ -140,7 +140,7 @@ const MCPSettings: FC = () => {
<Route
path="mcp-install"
element={
<SettingContainer theme={theme}>
<SettingContainer style={{ backgroundColor: 'inherit' }}>
<InstallNpxUv />
</SettingContainer>
}

View File

@@ -6,6 +6,7 @@ export type ToolPermissionRequestPayload = {
requestId: string
toolName: string
toolId: string
toolCallId: string
description?: string
requiresPermissions: boolean
input: Record<string, unknown>
@@ -82,12 +83,12 @@ export const selectActiveToolPermission = (state: ToolPermissionsState): ToolPer
return activeEntries[0]
}
export const selectPendingPermissionByToolName = (
export const selectPendingPermission = (
state: ToolPermissionsState,
toolName: string
toolCallId: string
): ToolPermissionEntry | undefined => {
const activeEntries = Object.values(state.requests)
.filter((entry) => entry.toolName === toolName)
.filter((entry) => entry.toolCallId === toolCallId)
.filter(
(entry) => entry.status === 'pending' || entry.status === 'submitting-allow' || entry.status === 'submitting-deny'
)

216
yarn.lock
View File

@@ -102,7 +102,19 @@ __metadata:
languageName: node
linkType: hard
"@ai-sdk/anthropic@npm:2.0.44, @ai-sdk/anthropic@npm:^2.0.44":
"@ai-sdk/anthropic@npm:2.0.45":
version: 2.0.45
resolution: "@ai-sdk/anthropic@npm:2.0.45"
dependencies:
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.17"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/ef0e54f032e3b8324c278f3b25d9b388308204d753404c49fd880709a796c2343aee36d335c99f50e683edd39d5b8b6f42b2e9034e1725d8e0db514e2233d104
languageName: node
linkType: hard
"@ai-sdk/anthropic@npm:^2.0.44":
version: 2.0.44
resolution: "@ai-sdk/anthropic@npm:2.0.44"
dependencies:
@@ -179,54 +191,42 @@ __metadata:
languageName: node
linkType: hard
"@ai-sdk/google-vertex@npm:^3.0.62":
version: 3.0.62
resolution: "@ai-sdk/google-vertex@npm:3.0.62"
"@ai-sdk/google-vertex@npm:^3.0.68":
version: 3.0.68
resolution: "@ai-sdk/google-vertex@npm:3.0.68"
dependencies:
"@ai-sdk/anthropic": "npm:2.0.44"
"@ai-sdk/google": "npm:2.0.31"
"@ai-sdk/anthropic": "npm:2.0.45"
"@ai-sdk/google": "npm:2.0.36"
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.17"
google-auth-library: "npm:^9.15.0"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/673bb51e3e0cbe5235ad5e65379b1cb8f099dbc690ab8552e208553a9f1cc6026d2588e956e73468bc6d267066be276e7a9aba98e32e905809dfbeab4ac0e352
checksum: 10c0/6a3f4cb1e649313b46a0c349c717757071f8b012b0a28e59ab7a55fd35d9600f0043f0a4f57417c4cc49e0d3734e89a1e4fb248fc88795b5286c83395d3f617a
languageName: node
linkType: hard
"@ai-sdk/google@npm:2.0.31":
version: 2.0.31
resolution: "@ai-sdk/google@npm:2.0.31"
"@ai-sdk/google@npm:2.0.36":
version: 2.0.36
resolution: "@ai-sdk/google@npm:2.0.36"
dependencies:
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.17"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/d8f143f058fb62e6e67e30564ec92530d7389c22ad91b1e4bbe781c8570bf718cd417e44dcd4855e347e85c4174538a9a884eac666109e17f20d21467ab3e749
checksum: 10c0/2c6de5e1cf0703b6b932a3f313bf4bc9439897af39c805169ab04bba397185d99b2b1306f3b817f991ca41fdced0365b072ee39e76382c045930256bce47e0e4
languageName: node
linkType: hard
"@ai-sdk/google@npm:^2.0.32":
version: 2.0.32
resolution: "@ai-sdk/google@npm:2.0.32"
"@ai-sdk/google@patch:@ai-sdk/google@npm%3A2.0.36#~/.yarn/patches/@ai-sdk-google-npm-2.0.36-6f3cc06026.patch":
version: 2.0.36
resolution: "@ai-sdk/google@patch:@ai-sdk/google@npm%3A2.0.36#~/.yarn/patches/@ai-sdk-google-npm-2.0.36-6f3cc06026.patch::version=2.0.36&hash=2da8c3"
dependencies:
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.17"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/052de16f1f66188e126168c8a9cc903448104528c7e44d6867bbf555c9067b9d6d44a4c4e0e014838156ba39095cb417f1b76363eb65212ca4d005f3651e58d2
languageName: node
linkType: hard
"@ai-sdk/google@patch:@ai-sdk/google@npm%3A2.0.31#~/.yarn/patches/@ai-sdk-google-npm-2.0.31-b0de047210.patch":
version: 2.0.31
resolution: "@ai-sdk/google@patch:@ai-sdk/google@npm%3A2.0.31#~/.yarn/patches/@ai-sdk-google-npm-2.0.31-b0de047210.patch::version=2.0.31&hash=9f3835"
dependencies:
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.17"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/dd37dfb7abf402caaae3edb2f1a8dab018fddad6ba3190376723e03a2a0c352329c8e41e60df3fb8436b717d9c2ee4b82dff091848f50d026f62565cbdb158f8
checksum: 10c0/ce99a497360377d2917cf3a48278eb6f4337623ce3738ba743cf048c8c2a7731ec4fc27605a50e461e716ed49b3690206ca8e4078f27cb7be162b684bfc2fc22
languageName: node
linkType: hard
@@ -1891,30 +1891,30 @@ __metadata:
languageName: node
linkType: hard
"@cherrystudio/ai-core@workspace:^1.0.0-alpha.18, @cherrystudio/ai-core@workspace:packages/aiCore":
"@cherrystudio/ai-core@workspace:^1.0.9, @cherrystudio/ai-core@workspace:packages/aiCore":
version: 0.0.0-use.local
resolution: "@cherrystudio/ai-core@workspace:packages/aiCore"
dependencies:
"@ai-sdk/anthropic": "npm:^2.0.43"
"@ai-sdk/azure": "npm:^2.0.66"
"@ai-sdk/deepseek": "npm:^1.0.27"
"@ai-sdk/google": "patch:@ai-sdk/google@npm%3A2.0.31#~/.yarn/patches/@ai-sdk-google-npm-2.0.31-b0de047210.patch"
"@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.64#~/.yarn/patches/@ai-sdk-openai-npm-2.0.64-48f99f5bf3.patch"
"@ai-sdk/openai-compatible": "npm:^1.0.26"
"@ai-sdk/provider": "npm:^2.0.0"
"@ai-sdk/provider-utils": "npm:^3.0.16"
"@ai-sdk/xai": "npm:^2.0.31"
"@cherrystudio/ai-sdk-provider": "workspace:*"
tsdown: "npm:^0.12.9"
typescript: "npm:^5.0.0"
vitest: "npm:^3.2.4"
zod: "npm:^4.1.5"
peerDependencies:
"@ai-sdk/google": ^2.0.36
"@ai-sdk/openai": ^2.0.64
"@cherrystudio/ai-sdk-provider": ^0.1.2
ai: ^5.0.26
languageName: unknown
linkType: soft
"@cherrystudio/ai-sdk-provider@workspace:*, @cherrystudio/ai-sdk-provider@workspace:packages/ai-sdk-provider":
"@cherrystudio/ai-sdk-provider@workspace:packages/ai-sdk-provider":
version: 0.0.0-use.local
resolution: "@cherrystudio/ai-sdk-provider@workspace:packages/ai-sdk-provider"
dependencies:
@@ -4558,38 +4558,38 @@ __metadata:
languageName: node
linkType: hard
"@libsql/client@npm:0.14.0, @libsql/client@npm:^0.14.0":
version: 0.14.0
resolution: "@libsql/client@npm:0.14.0"
"@libsql/client@npm:0.15.15":
version: 0.15.15
resolution: "@libsql/client@npm:0.15.15"
dependencies:
"@libsql/core": "npm:^0.14.0"
"@libsql/core": "npm:^0.15.14"
"@libsql/hrana-client": "npm:^0.7.0"
js-base64: "npm:^3.7.5"
libsql: "npm:^0.4.4"
libsql: "npm:^0.5.22"
promise-limit: "npm:^2.7.0"
checksum: 10c0/9c6bab468453df765f647422c772af3578f1e108b663a80b99063f47ed3542db26ae0fcdba2e153d72e6d5089c5caeba947a167a6c065b0191a0832621539335
checksum: 10c0/1ae67280ebe27903ff142b07e2a256c22ef5ada65185286a72823e8eae8d9d2602e0d72e423d3bd64ae57494791bfffff946aa0fc7c2378b55a227ff63f8df69
languageName: node
linkType: hard
"@libsql/core@npm:^0.14.0":
version: 0.14.0
resolution: "@libsql/core@npm:0.14.0"
"@libsql/core@npm:^0.15.14":
version: 0.15.15
resolution: "@libsql/core@npm:0.15.15"
dependencies:
js-base64: "npm:^3.7.5"
checksum: 10c0/327bb991cf191d5a9a9fc0cc1a17123f7ca88f222187a3bde845fbad8ceaeaa1f139882080e4b2969da57b83e576c52702572e2838d1743c6bff75f95e6f774a
checksum: 10c0/0a619689c9504f4239d9745882a128b81e2f6c0547352bbb0d36932261c053bbcbea4435a17f91abe61556bb791f2f1203b36c36b2d4b4f369953d7949bdc40e
languageName: node
linkType: hard
"@libsql/darwin-arm64@npm:0.4.7":
version: 0.4.7
resolution: "@libsql/darwin-arm64@npm:0.4.7"
"@libsql/darwin-arm64@npm:0.5.22":
version: 0.5.22
resolution: "@libsql/darwin-arm64@npm:0.5.22"
conditions: os=darwin & cpu=arm64
languageName: node
linkType: hard
"@libsql/darwin-x64@npm:0.4.7":
version: 0.4.7
resolution: "@libsql/darwin-x64@npm:0.4.7"
"@libsql/darwin-x64@npm:0.5.22":
version: 0.5.22
resolution: "@libsql/darwin-x64@npm:0.5.22"
conditions: os=darwin & cpu=x64
languageName: node
linkType: hard
@@ -4623,38 +4623,52 @@ __metadata:
languageName: node
linkType: hard
"@libsql/linux-arm64-gnu@npm:0.4.7":
version: 0.4.7
resolution: "@libsql/linux-arm64-gnu@npm:0.4.7"
"@libsql/linux-arm-gnueabihf@npm:0.5.22":
version: 0.5.22
resolution: "@libsql/linux-arm-gnueabihf@npm:0.5.22"
conditions: os=linux & cpu=arm
languageName: node
linkType: hard
"@libsql/linux-arm-musleabihf@npm:0.5.22":
version: 0.5.22
resolution: "@libsql/linux-arm-musleabihf@npm:0.5.22"
conditions: os=linux & cpu=arm
languageName: node
linkType: hard
"@libsql/linux-arm64-gnu@npm:0.5.22":
version: 0.5.22
resolution: "@libsql/linux-arm64-gnu@npm:0.5.22"
conditions: os=linux & cpu=arm64
languageName: node
linkType: hard
"@libsql/linux-arm64-musl@npm:0.4.7":
version: 0.4.7
resolution: "@libsql/linux-arm64-musl@npm:0.4.7"
"@libsql/linux-arm64-musl@npm:0.5.22":
version: 0.5.22
resolution: "@libsql/linux-arm64-musl@npm:0.5.22"
conditions: os=linux & cpu=arm64
languageName: node
linkType: hard
"@libsql/linux-x64-gnu@npm:0.4.7":
version: 0.4.7
resolution: "@libsql/linux-x64-gnu@npm:0.4.7"
"@libsql/linux-x64-gnu@npm:0.5.22":
version: 0.5.22
resolution: "@libsql/linux-x64-gnu@npm:0.5.22"
conditions: os=linux & cpu=x64
languageName: node
linkType: hard
"@libsql/linux-x64-musl@npm:0.4.7":
version: 0.4.7
resolution: "@libsql/linux-x64-musl@npm:0.4.7"
"@libsql/linux-x64-musl@npm:0.5.22":
version: 0.5.22
resolution: "@libsql/linux-x64-musl@npm:0.5.22"
conditions: os=linux & cpu=x64
languageName: node
linkType: hard
"@libsql/win32-x64-msvc@npm:0.4.7, @libsql/win32-x64-msvc@npm:^0.4.7":
version: 0.4.7
resolution: "@libsql/win32-x64-msvc@npm:0.4.7"
checksum: 10c0/2fcb8715b6f0571dec145eaaf3fd53c7c5aa5bf408fe1be9d84b10adc8a909bb6ee60b45e0d7052b0c1722c30ac212356a3f1adcdf7f57d5a59b48f36ca5bdf5
"@libsql/win32-x64-msvc@npm:0.5.22, @libsql/win32-x64-msvc@npm:^0.5.22":
version: 0.5.22
resolution: "@libsql/win32-x64-msvc@npm:0.5.22"
checksum: 10c0/1bb2730563c603c03a229faa352897685648659d85ba0872dda60cc02abc469fbd55539ffd8b86c81d00230d76292e5a4d2a763fe44c05694612ce6db6e929aa
conditions: os=win32 & cpu=x64
languageName: node
linkType: hard
@@ -9906,10 +9920,11 @@ __metadata:
"@ai-sdk/anthropic": "npm:^2.0.44"
"@ai-sdk/cerebras": "npm:^1.0.31"
"@ai-sdk/gateway": "npm:^2.0.9"
"@ai-sdk/google": "npm:^2.0.32"
"@ai-sdk/google-vertex": "npm:^3.0.62"
"@ai-sdk/google": "patch:@ai-sdk/google@npm%3A2.0.36#~/.yarn/patches/@ai-sdk-google-npm-2.0.36-6f3cc06026.patch"
"@ai-sdk/google-vertex": "npm:^3.0.68"
"@ai-sdk/huggingface": "patch:@ai-sdk/huggingface@npm%3A0.0.8#~/.yarn/patches/@ai-sdk-huggingface-npm-0.0.8-d4d0aaac93.patch"
"@ai-sdk/mistral": "npm:^2.0.23"
"@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.64#~/.yarn/patches/@ai-sdk-openai-npm-2.0.64-48f99f5bf3.patch"
"@ai-sdk/perplexity": "npm:^2.0.17"
"@ant-design/v5-patch-for-react-19": "npm:^1.0.3"
"@anthropic-ai/claude-agent-sdk": "patch:@anthropic-ai/claude-agent-sdk@npm%3A0.1.30#~/.yarn/patches/@anthropic-ai-claude-agent-sdk-npm-0.1.30-b50a299674.patch"
@@ -9919,7 +9934,7 @@ __metadata:
"@aws-sdk/client-bedrock-runtime": "npm:^3.910.0"
"@aws-sdk/client-s3": "npm:^3.910.0"
"@biomejs/biome": "npm:2.2.4"
"@cherrystudio/ai-core": "workspace:^1.0.0-alpha.18"
"@cherrystudio/ai-core": "workspace:^1.0.9"
"@cherrystudio/embedjs": "npm:^0.1.31"
"@cherrystudio/embedjs-libsql": "npm:^0.1.31"
"@cherrystudio/embedjs-loader-csv": "npm:^0.1.31"
@@ -9952,8 +9967,8 @@ __metadata:
"@langchain/community": "npm:^1.0.0"
"@langchain/core": "patch:@langchain/core@npm%3A1.0.2#~/.yarn/patches/@langchain-core-npm-1.0.2-183ef83fe4.patch"
"@langchain/openai": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch"
"@libsql/client": "npm:0.14.0"
"@libsql/win32-x64-msvc": "npm:^0.4.7"
"@libsql/client": "npm:0.15.15"
"@libsql/win32-x64-msvc": "npm:^0.5.22"
"@mistralai/mistralai": "npm:^1.7.5"
"@modelcontextprotocol/sdk": "npm:^1.17.5"
"@mozilla/readability": "npm:^0.6.0"
@@ -17266,17 +17281,19 @@ __metadata:
languageName: node
linkType: hard
"libsql@npm:0.4.7":
version: 0.4.7
resolution: "libsql@npm:0.4.7"
"libsql@npm:^0.5.22":
version: 0.5.22
resolution: "libsql@npm:0.5.22"
dependencies:
"@libsql/darwin-arm64": "npm:0.4.7"
"@libsql/darwin-x64": "npm:0.4.7"
"@libsql/linux-arm64-gnu": "npm:0.4.7"
"@libsql/linux-arm64-musl": "npm:0.4.7"
"@libsql/linux-x64-gnu": "npm:0.4.7"
"@libsql/linux-x64-musl": "npm:0.4.7"
"@libsql/win32-x64-msvc": "npm:0.4.7"
"@libsql/darwin-arm64": "npm:0.5.22"
"@libsql/darwin-x64": "npm:0.5.22"
"@libsql/linux-arm-gnueabihf": "npm:0.5.22"
"@libsql/linux-arm-musleabihf": "npm:0.5.22"
"@libsql/linux-arm64-gnu": "npm:0.5.22"
"@libsql/linux-arm64-musl": "npm:0.5.22"
"@libsql/linux-x64-gnu": "npm:0.5.22"
"@libsql/linux-x64-musl": "npm:0.5.22"
"@libsql/win32-x64-msvc": "npm:0.5.22"
"@neon-rs/load": "npm:^0.0.4"
detect-libc: "npm:2.0.2"
dependenciesMeta:
@@ -17284,6 +17301,10 @@ __metadata:
optional: true
"@libsql/darwin-x64":
optional: true
"@libsql/linux-arm-gnueabihf":
optional: true
"@libsql/linux-arm-musleabihf":
optional: true
"@libsql/linux-arm64-gnu":
optional: true
"@libsql/linux-arm64-musl":
@@ -17294,41 +17315,8 @@ __metadata:
optional: true
"@libsql/win32-x64-msvc":
optional: true
checksum: 10c0/351952440e6bad3477e5f1bb1b9d6570d16e403b894f4a13c5c7e183a1307b2fb04a2fa902728cb8594a259e1726c51c61b822d545bbc88319b126ad15468a87
conditions: (os=darwin | os=linux | os=win32) & (cpu=x64 | cpu=arm64 | cpu=wasm32)
languageName: node
linkType: hard
"libsql@patch:libsql@npm%3A0.4.7#~/.yarn/patches/libsql-npm-0.4.7-444e260fb1.patch":
version: 0.4.7
resolution: "libsql@patch:libsql@npm%3A0.4.7#~/.yarn/patches/libsql-npm-0.4.7-444e260fb1.patch::version=0.4.7&hash=972e11"
dependencies:
"@libsql/darwin-arm64": "npm:0.4.7"
"@libsql/darwin-x64": "npm:0.4.7"
"@libsql/linux-arm64-gnu": "npm:0.4.7"
"@libsql/linux-arm64-musl": "npm:0.4.7"
"@libsql/linux-x64-gnu": "npm:0.4.7"
"@libsql/linux-x64-musl": "npm:0.4.7"
"@libsql/win32-x64-msvc": "npm:0.4.7"
"@neon-rs/load": "npm:^0.0.4"
detect-libc: "npm:2.0.2"
dependenciesMeta:
"@libsql/darwin-arm64":
optional: true
"@libsql/darwin-x64":
optional: true
"@libsql/linux-arm64-gnu":
optional: true
"@libsql/linux-arm64-musl":
optional: true
"@libsql/linux-x64-gnu":
optional: true
"@libsql/linux-x64-musl":
optional: true
"@libsql/win32-x64-msvc":
optional: true
checksum: 10c0/6098770dc6c31ae0dbfe0821719d184d9bb353ac92553923096f6e3420d3786f240f0b3858f519af0aeada93beb4aa83cb9a9a1a6aa18d625511b484dcb53d07
conditions: (os=darwin | os=linux | os=win32) & (cpu=x64 | cpu=arm64 | cpu=wasm32)
checksum: 10c0/6c34f08fc7408ebee16708ba12e5def9d1b2a4fa166070c956a120133ba9be68ec532e2d0b76bdc7005ef9ef69bf70d2ba7208ed824c4288c2a3d881edd5eaf6
conditions: (os=darwin | os=linux | os=win32) & (cpu=x64 | cpu=arm64 | cpu=wasm32 | cpu=arm)
languageName: node
linkType: hard