Merge branch 'main' of github.com:CherryHQ/cherry-studio

This commit is contained in:
fullex
2025-11-26 13:17:09 +08:00
10 changed files with 935 additions and 119 deletions

View File

@@ -1,8 +1,8 @@
diff --git a/dist/index.js b/dist/index.js
index dc7b74ba55337c491cdf1ab3e39ca68cc4187884..ace8c90591288e42c2957e93c9bf7984f1b22444 100644
index 51ce7e423934fb717cb90245cdfcdb3dae6780e6..0f7f7009e2f41a79a8669d38c8a44867bbff5e1f 100644
--- a/dist/index.js
+++ b/dist/index.js
@@ -472,7 +472,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
@@ -474,7 +474,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
// src/get-model-path.ts
function getModelPath(modelId) {
@@ -12,10 +12,10 @@ index dc7b74ba55337c491cdf1ab3e39ca68cc4187884..ace8c90591288e42c2957e93c9bf7984
// src/google-generative-ai-options.ts
diff --git a/dist/index.mjs b/dist/index.mjs
index 8390439c38cb7eaeb52080862cd6f4c58509e67c..a7647f2e11700dff7e1c8d4ae8f99d3637010733 100644
index f4b77e35c0cbfece85a3ef0d4f4e67aa6dde6271..8d2fecf8155a226006a0bde72b00b6036d4014b6 100644
--- a/dist/index.mjs
+++ b/dist/index.mjs
@@ -478,7 +478,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
@@ -480,7 +480,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
// src/get-model-path.ts
function getModelPath(modelId) {

View File

@@ -1,5 +1,5 @@
diff --git a/dist/index.js b/dist/index.js
index 7481f3b3511078068d87d03855b568b20bb86971..8ac5ec28d2f7ad1b3b0d3f8da945c75674e59637 100644
index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a70ea2b5a2 100644
--- a/dist/index.js
+++ b/dist/index.js
@@ -274,6 +274,7 @@ var openaiChatResponseSchema = (0, import_provider_utils3.lazyValidator)(

View File

@@ -109,15 +109,15 @@
"@agentic/exa": "^7.3.3",
"@agentic/searxng": "^7.3.3",
"@agentic/tavily": "^7.3.3",
"@ai-sdk/amazon-bedrock": "^3.0.56",
"@ai-sdk/anthropic": "^2.0.45",
"@ai-sdk/amazon-bedrock": "^3.0.61",
"@ai-sdk/anthropic": "^2.0.49",
"@ai-sdk/cerebras": "^1.0.31",
"@ai-sdk/gateway": "^2.0.13",
"@ai-sdk/google": "patch:@ai-sdk/google@npm%3A2.0.40#~/.yarn/patches/@ai-sdk-google-npm-2.0.40-47e0eeee83.patch",
"@ai-sdk/google-vertex": "^3.0.72",
"@ai-sdk/gateway": "^2.0.15",
"@ai-sdk/google": "patch:@ai-sdk/google@npm%3A2.0.43#~/.yarn/patches/@ai-sdk-google-npm-2.0.43-689ed559b3.patch",
"@ai-sdk/google-vertex": "^3.0.79",
"@ai-sdk/huggingface": "^0.0.10",
"@ai-sdk/mistral": "^2.0.24",
"@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.71#~/.yarn/patches/@ai-sdk-openai-npm-2.0.71-a88ef00525.patch",
"@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.72#~/.yarn/patches/@ai-sdk-openai-npm-2.0.72-234e68da87.patch",
"@ai-sdk/perplexity": "^2.0.20",
"@ai-sdk/test-server": "^0.0.1",
"@ant-design/v5-patch-for-react-19": "^1.0.3",
@@ -171,7 +171,7 @@
"@opentelemetry/sdk-trace-base": "^2.0.0",
"@opentelemetry/sdk-trace-node": "^2.0.0",
"@opentelemetry/sdk-trace-web": "^2.0.0",
"@opeoginni/github-copilot-openai-compatible": "0.1.21",
"@opeoginni/github-copilot-openai-compatible": "^0.1.21",
"@playwright/test": "^1.52.0",
"@radix-ui/react-context-menu": "^2.2.16",
"@reduxjs/toolkit": "^2.2.5",
@@ -412,12 +412,9 @@
"@langchain/openai@npm:>=0.1.0 <0.6.0": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
"@langchain/openai@npm:^0.3.16": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
"@langchain/openai@npm:>=0.2.0 <0.7.0": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
"@ai-sdk/openai@npm:2.0.64": "patch:@ai-sdk/openai@npm%3A2.0.64#~/.yarn/patches/@ai-sdk-openai-npm-2.0.64-48f99f5bf3.patch",
"@ai-sdk/openai@npm:^2.0.42": "patch:@ai-sdk/openai@npm%3A2.0.71#~/.yarn/patches/@ai-sdk-openai-npm-2.0.71-a88ef00525.patch",
"@ai-sdk/google@npm:2.0.40": "patch:@ai-sdk/google@npm%3A2.0.40#~/.yarn/patches/@ai-sdk-google-npm-2.0.40-47e0eeee83.patch",
"@ai-sdk/openai@npm:2.0.71": "patch:@ai-sdk/openai@npm%3A2.0.71#~/.yarn/patches/@ai-sdk-openai-npm-2.0.71-a88ef00525.patch",
"@ai-sdk/openai-compatible@npm:1.0.27": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch",
"@ai-sdk/openai-compatible@npm:^1.0.19": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch"
"@ai-sdk/openai@npm:^2.0.42": "patch:@ai-sdk/openai@npm%3A2.0.72#~/.yarn/patches/@ai-sdk-openai-npm-2.0.72-234e68da87.patch",
"@ai-sdk/google@npm:^2.0.40": "patch:@ai-sdk/google@npm%3A2.0.40#~/.yarn/patches/@ai-sdk-google-npm-2.0.40-47e0eeee83.patch",
"@ai-sdk/openai-compatible@npm:^1.0.27": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch"
},
"packageManager": "yarn@4.9.1",
"lint-staged": {

View File

@@ -39,13 +39,13 @@
"ai": "^5.0.26"
},
"dependencies": {
"@ai-sdk/anthropic": "^2.0.45",
"@ai-sdk/azure": "^2.0.73",
"@ai-sdk/anthropic": "^2.0.49",
"@ai-sdk/azure": "^2.0.74",
"@ai-sdk/deepseek": "^1.0.29",
"@ai-sdk/openai-compatible": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch",
"@ai-sdk/provider": "^2.0.0",
"@ai-sdk/provider-utils": "^3.0.17",
"@ai-sdk/xai": "^2.0.34",
"@ai-sdk/xai": "^2.0.36",
"zod": "^4.1.5"
},
"devDependencies": {

View File

@@ -106,7 +106,7 @@ export async function buildStreamTextParams(
searchWithTime: store.getState().websearch.searchWithTime
}
const providerOptions = buildProviderOptions(assistant, model, provider, {
const { providerOptions, standardParams } = buildProviderOptions(assistant, model, provider, {
enableReasoning,
enableWebSearch,
enableGenerateImage
@@ -181,11 +181,16 @@ export async function buildStreamTextParams(
}
// 构建基础参数
// Note: standardParams (topK, frequencyPenalty, presencePenalty, stopSequences, seed)
// are extracted from custom parameters and passed directly to streamText()
// instead of being placed in providerOptions
const params: StreamTextParams = {
messages: sdkMessages,
maxOutputTokens: getMaxTokens(assistant, model),
temperature: getTemperature(assistant, model),
topP: getTopP(assistant, model),
// Include AI SDK standard params extracted from custom parameters
...standardParams,
abortSignal: options.requestOptions?.signal,
headers,
providerOptions,

View File

@@ -0,0 +1,652 @@
/**
* extractAiSdkStandardParams Unit Tests
* Tests for extracting AI SDK standard parameters from custom parameters
*/
import { describe, expect, it, vi } from 'vitest'
import { extractAiSdkStandardParams } from '../options'
// Mock logger to prevent errors
vi.mock('@logger', () => ({
loggerService: {
withContext: () => ({
debug: vi.fn(),
error: vi.fn(),
warn: vi.fn(),
info: vi.fn()
})
}
}))
// Mock settings store
vi.mock('@renderer/store/settings', () => ({
default: (state = { settings: {} }) => state
}))
// Mock hooks to prevent uuid errors
vi.mock('@renderer/hooks/useSettings', () => ({
getStoreSetting: vi.fn(() => ({}))
}))
// Mock uuid to prevent errors
vi.mock('uuid', () => ({
v4: vi.fn(() => 'test-uuid')
}))
// Mock AssistantService to prevent uuid errors
vi.mock('@renderer/services/AssistantService', () => ({
getDefaultAssistant: vi.fn(() => ({
id: 'test-assistant',
name: 'Test Assistant',
settings: {}
})),
getDefaultTopic: vi.fn(() => ({
id: 'test-topic',
assistantId: 'test-assistant',
createdAt: new Date().toISOString()
}))
}))
// Mock provider service
vi.mock('@renderer/services/ProviderService', () => ({
getProviderById: vi.fn(() => ({
id: 'test-provider',
name: 'Test Provider'
}))
}))
// Mock config modules
vi.mock('@renderer/config/models', () => ({
isOpenAIModel: vi.fn(() => false),
isQwenMTModel: vi.fn(() => false),
isSupportFlexServiceTierModel: vi.fn(() => false),
isSupportVerbosityModel: vi.fn(() => false),
getModelSupportedVerbosity: vi.fn(() => [])
}))
vi.mock('@renderer/config/translate', () => ({
mapLanguageToQwenMTModel: vi.fn()
}))
vi.mock('@renderer/utils/provider', () => ({
isSupportServiceTierProvider: vi.fn(() => false),
isSupportVerbosityProvider: vi.fn(() => false)
}))
describe('extractAiSdkStandardParams', () => {
describe('Positive cases - Standard parameters extraction', () => {
it('should extract all AI SDK standard parameters', () => {
const customParams = {
maxOutputTokens: 1000,
temperature: 0.7,
topP: 0.9,
topK: 40,
presencePenalty: 0.5,
frequencyPenalty: 0.3,
stopSequences: ['STOP', 'END'],
seed: 42
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
maxOutputTokens: 1000,
temperature: 0.7,
topP: 0.9,
topK: 40,
presencePenalty: 0.5,
frequencyPenalty: 0.3,
stopSequences: ['STOP', 'END'],
seed: 42
})
expect(result.providerParams).toStrictEqual({})
})
it('should extract single standard parameter', () => {
const customParams = {
temperature: 0.8
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
temperature: 0.8
})
expect(result.providerParams).toStrictEqual({})
})
it('should extract topK parameter', () => {
const customParams = {
topK: 50
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
topK: 50
})
expect(result.providerParams).toStrictEqual({})
})
it('should extract frequencyPenalty parameter', () => {
const customParams = {
frequencyPenalty: 0.6
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
frequencyPenalty: 0.6
})
expect(result.providerParams).toStrictEqual({})
})
it('should extract presencePenalty parameter', () => {
const customParams = {
presencePenalty: 0.4
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
presencePenalty: 0.4
})
expect(result.providerParams).toStrictEqual({})
})
it('should extract stopSequences parameter', () => {
const customParams = {
stopSequences: ['HALT', 'TERMINATE']
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
stopSequences: ['HALT', 'TERMINATE']
})
expect(result.providerParams).toStrictEqual({})
})
it('should extract seed parameter', () => {
const customParams = {
seed: 12345
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
seed: 12345
})
expect(result.providerParams).toStrictEqual({})
})
it('should extract maxOutputTokens parameter', () => {
const customParams = {
maxOutputTokens: 2048
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
maxOutputTokens: 2048
})
expect(result.providerParams).toStrictEqual({})
})
it('should extract topP parameter', () => {
const customParams = {
topP: 0.95
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
topP: 0.95
})
expect(result.providerParams).toStrictEqual({})
})
})
describe('Negative cases - Provider-specific parameters', () => {
it('should place all non-standard parameters in providerParams', () => {
const customParams = {
customParam: 'value',
anotherParam: 123,
thirdParam: true
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({})
expect(result.providerParams).toStrictEqual({
customParam: 'value',
anotherParam: 123,
thirdParam: true
})
})
it('should place single provider-specific parameter in providerParams', () => {
const customParams = {
reasoningEffort: 'high'
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({})
expect(result.providerParams).toStrictEqual({
reasoningEffort: 'high'
})
})
it('should place model-specific parameter in providerParams', () => {
const customParams = {
thinking: { type: 'enabled', budgetTokens: 5000 }
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({})
expect(result.providerParams).toStrictEqual({
thinking: { type: 'enabled', budgetTokens: 5000 }
})
})
it('should place serviceTier in providerParams', () => {
const customParams = {
serviceTier: 'auto'
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({})
expect(result.providerParams).toStrictEqual({
serviceTier: 'auto'
})
})
it('should place textVerbosity in providerParams', () => {
const customParams = {
textVerbosity: 'high'
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({})
expect(result.providerParams).toStrictEqual({
textVerbosity: 'high'
})
})
})
describe('Mixed parameters', () => {
it('should correctly separate mixed standard and provider-specific parameters', () => {
const customParams = {
temperature: 0.7,
topK: 40,
customParam: 'custom_value',
reasoningEffort: 'medium',
frequencyPenalty: 0.5,
seed: 999
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
temperature: 0.7,
topK: 40,
frequencyPenalty: 0.5,
seed: 999
})
expect(result.providerParams).toStrictEqual({
customParam: 'custom_value',
reasoningEffort: 'medium'
})
})
it('should handle complex mixed parameters with nested objects', () => {
const customParams = {
topP: 0.9,
presencePenalty: 0.3,
thinking: { type: 'enabled', budgetTokens: 5000 },
stopSequences: ['STOP'],
serviceTier: 'auto',
maxOutputTokens: 4096
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
topP: 0.9,
presencePenalty: 0.3,
stopSequences: ['STOP'],
maxOutputTokens: 4096
})
expect(result.providerParams).toStrictEqual({
thinking: { type: 'enabled', budgetTokens: 5000 },
serviceTier: 'auto'
})
})
it('should handle all standard params with some provider params', () => {
const customParams = {
maxOutputTokens: 2000,
temperature: 0.8,
topP: 0.95,
topK: 50,
presencePenalty: 0.6,
frequencyPenalty: 0.4,
stopSequences: ['END', 'DONE'],
seed: 777,
customApiParam: 'value',
anotherCustomParam: 123
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
maxOutputTokens: 2000,
temperature: 0.8,
topP: 0.95,
topK: 50,
presencePenalty: 0.6,
frequencyPenalty: 0.4,
stopSequences: ['END', 'DONE'],
seed: 777
})
expect(result.providerParams).toStrictEqual({
customApiParam: 'value',
anotherCustomParam: 123
})
})
})
describe('Edge cases', () => {
it('should handle empty object', () => {
const customParams = {}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({})
expect(result.providerParams).toStrictEqual({})
})
it('should handle zero values for numeric parameters', () => {
const customParams = {
temperature: 0,
topK: 0,
seed: 0
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
temperature: 0,
topK: 0,
seed: 0
})
expect(result.providerParams).toStrictEqual({})
})
it('should handle negative values for numeric parameters', () => {
const customParams = {
presencePenalty: -0.5,
frequencyPenalty: -0.3,
seed: -1
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
presencePenalty: -0.5,
frequencyPenalty: -0.3,
seed: -1
})
expect(result.providerParams).toStrictEqual({})
})
it('should handle empty arrays for stopSequences', () => {
const customParams = {
stopSequences: []
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
stopSequences: []
})
expect(result.providerParams).toStrictEqual({})
})
it('should handle null values in mixed parameters', () => {
const customParams = {
temperature: 0.7,
customNull: null,
topK: 40
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
temperature: 0.7,
topK: 40
})
expect(result.providerParams).toStrictEqual({
customNull: null
})
})
it('should handle undefined values in mixed parameters', () => {
const customParams = {
temperature: 0.7,
customUndefined: undefined,
topK: 40
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
temperature: 0.7,
topK: 40
})
expect(result.providerParams).toStrictEqual({
customUndefined: undefined
})
})
it('should handle boolean values for standard parameters', () => {
const customParams = {
temperature: 0.7,
customBoolean: false,
topK: 40
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
temperature: 0.7,
topK: 40
})
expect(result.providerParams).toStrictEqual({
customBoolean: false
})
})
it('should handle very large numeric values', () => {
const customParams = {
maxOutputTokens: 999999,
seed: 2147483647,
topK: 10000
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
maxOutputTokens: 999999,
seed: 2147483647,
topK: 10000
})
expect(result.providerParams).toStrictEqual({})
})
it('should handle decimal values with high precision', () => {
const customParams = {
temperature: 0.123456789,
topP: 0.987654321,
presencePenalty: 0.111111111
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
temperature: 0.123456789,
topP: 0.987654321,
presencePenalty: 0.111111111
})
expect(result.providerParams).toStrictEqual({})
})
})
describe('Case sensitivity', () => {
it('should NOT extract parameters with incorrect case - uppercase first letter', () => {
const customParams = {
Temperature: 0.7,
TopK: 40,
FrequencyPenalty: 0.5
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({})
expect(result.providerParams).toStrictEqual({
Temperature: 0.7,
TopK: 40,
FrequencyPenalty: 0.5
})
})
it('should NOT extract parameters with incorrect case - all uppercase', () => {
const customParams = {
TEMPERATURE: 0.7,
TOPK: 40,
SEED: 42
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({})
expect(result.providerParams).toStrictEqual({
TEMPERATURE: 0.7,
TOPK: 40,
SEED: 42
})
})
it('should NOT extract parameters with incorrect case - all lowercase', () => {
const customParams = {
maxoutputtokens: 1000,
frequencypenalty: 0.5,
stopsequences: ['STOP']
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({})
expect(result.providerParams).toStrictEqual({
maxoutputtokens: 1000,
frequencypenalty: 0.5,
stopsequences: ['STOP']
})
})
it('should correctly extract exact case match while rejecting incorrect case', () => {
const customParams = {
temperature: 0.7,
Temperature: 0.8,
TEMPERATURE: 0.9,
topK: 40,
TopK: 50
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
temperature: 0.7,
topK: 40
})
expect(result.providerParams).toStrictEqual({
Temperature: 0.8,
TEMPERATURE: 0.9,
TopK: 50
})
})
})
describe('Parameter name variations', () => {
it('should NOT extract similar but incorrect parameter names', () => {
const customParams = {
temp: 0.7, // should not match temperature
top_k: 40, // should not match topK
max_tokens: 1000, // should not match maxOutputTokens
freq_penalty: 0.5 // should not match frequencyPenalty
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({})
expect(result.providerParams).toStrictEqual({
temp: 0.7,
top_k: 40,
max_tokens: 1000,
freq_penalty: 0.5
})
})
it('should NOT extract snake_case versions of standard parameters', () => {
const customParams = {
top_k: 40,
top_p: 0.9,
presence_penalty: 0.5,
frequency_penalty: 0.3,
stop_sequences: ['STOP'],
max_output_tokens: 1000
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({})
expect(result.providerParams).toStrictEqual({
top_k: 40,
top_p: 0.9,
presence_penalty: 0.5,
frequency_penalty: 0.3,
stop_sequences: ['STOP'],
max_output_tokens: 1000
})
})
it('should extract exact camelCase parameters only', () => {
const customParams = {
topK: 40, // correct
top_k: 50, // incorrect
topP: 0.9, // correct
top_p: 0.8, // incorrect
frequencyPenalty: 0.5, // correct
frequency_penalty: 0.4 // incorrect
}
const result = extractAiSdkStandardParams(customParams)
expect(result.standardParams).toStrictEqual({
topK: 40,
topP: 0.9,
frequencyPenalty: 0.5
})
expect(result.providerParams).toStrictEqual({
top_k: 50,
top_p: 0.8,
frequency_penalty: 0.4
})
})
})
})

View File

@@ -128,7 +128,20 @@ vi.mock('../reasoning', () => ({
reasoningConfig: { type: 'enabled', budgetTokens: 5000 }
})),
getReasoningEffort: vi.fn(() => ({ reasoningEffort: 'medium' })),
getCustomParameters: vi.fn(() => ({}))
getCustomParameters: vi.fn(() => ({})),
extractAiSdkStandardParams: vi.fn((customParams: Record<string, any>) => {
const AI_SDK_STANDARD_PARAMS = ['topK', 'frequencyPenalty', 'presencePenalty', 'stopSequences', 'seed']
const standardParams: Record<string, any> = {}
const providerParams: Record<string, any> = {}
for (const [key, value] of Object.entries(customParams)) {
if (AI_SDK_STANDARD_PARAMS.includes(key)) {
standardParams[key] = value
} else {
providerParams[key] = value
}
}
return { standardParams, providerParams }
})
}))
vi.mock('../image', () => ({
@@ -184,8 +197,9 @@ describe('options utils', () => {
enableGenerateImage: false
})
expect(result).toHaveProperty('openai')
expect(result.openai).toBeDefined()
expect(result.providerOptions).toHaveProperty('openai')
expect(result.providerOptions.openai).toBeDefined()
expect(result.standardParams).toBeDefined()
})
it('should include reasoning parameters when enabled', () => {
@@ -195,8 +209,8 @@ describe('options utils', () => {
enableGenerateImage: false
})
expect(result.openai).toHaveProperty('reasoningEffort')
expect(result.openai.reasoningEffort).toBe('medium')
expect(result.providerOptions.openai).toHaveProperty('reasoningEffort')
expect(result.providerOptions.openai.reasoningEffort).toBe('medium')
})
it('should include service tier when supported', () => {
@@ -211,8 +225,8 @@ describe('options utils', () => {
enableGenerateImage: false
})
expect(result.openai).toHaveProperty('serviceTier')
expect(result.openai.serviceTier).toBe(OpenAIServiceTiers.auto)
expect(result.providerOptions.openai).toHaveProperty('serviceTier')
expect(result.providerOptions.openai.serviceTier).toBe(OpenAIServiceTiers.auto)
})
})
@@ -239,8 +253,8 @@ describe('options utils', () => {
enableGenerateImage: false
})
expect(result).toHaveProperty('anthropic')
expect(result.anthropic).toBeDefined()
expect(result.providerOptions).toHaveProperty('anthropic')
expect(result.providerOptions.anthropic).toBeDefined()
})
it('should include reasoning parameters when enabled', () => {
@@ -250,8 +264,8 @@ describe('options utils', () => {
enableGenerateImage: false
})
expect(result.anthropic).toHaveProperty('thinking')
expect(result.anthropic.thinking).toEqual({
expect(result.providerOptions.anthropic).toHaveProperty('thinking')
expect(result.providerOptions.anthropic.thinking).toEqual({
type: 'enabled',
budgetTokens: 5000
})
@@ -282,8 +296,8 @@ describe('options utils', () => {
enableGenerateImage: false
})
expect(result).toHaveProperty('google')
expect(result.google).toBeDefined()
expect(result.providerOptions).toHaveProperty('google')
expect(result.providerOptions.google).toBeDefined()
})
it('should include reasoning parameters when enabled', () => {
@@ -293,8 +307,8 @@ describe('options utils', () => {
enableGenerateImage: false
})
expect(result.google).toHaveProperty('thinkingConfig')
expect(result.google.thinkingConfig).toEqual({
expect(result.providerOptions.google).toHaveProperty('thinkingConfig')
expect(result.providerOptions.google.thinkingConfig).toEqual({
include_thoughts: true
})
})
@@ -306,8 +320,8 @@ describe('options utils', () => {
enableGenerateImage: true
})
expect(result.google).toHaveProperty('responseModalities')
expect(result.google.responseModalities).toEqual(['TEXT', 'IMAGE'])
expect(result.providerOptions.google).toHaveProperty('responseModalities')
expect(result.providerOptions.google.responseModalities).toEqual(['TEXT', 'IMAGE'])
})
})
@@ -335,8 +349,8 @@ describe('options utils', () => {
enableGenerateImage: false
})
expect(result).toHaveProperty('xai')
expect(result.xai).toBeDefined()
expect(result.providerOptions).toHaveProperty('xai')
expect(result.providerOptions.xai).toBeDefined()
})
it('should include reasoning parameters when enabled', () => {
@@ -346,8 +360,8 @@ describe('options utils', () => {
enableGenerateImage: false
})
expect(result.xai).toHaveProperty('reasoningEffort')
expect(result.xai.reasoningEffort).toBe('high')
expect(result.providerOptions.xai).toHaveProperty('reasoningEffort')
expect(result.providerOptions.xai.reasoningEffort).toBe('high')
})
})
@@ -374,8 +388,8 @@ describe('options utils', () => {
enableGenerateImage: false
})
expect(result).toHaveProperty('deepseek')
expect(result.deepseek).toBeDefined()
expect(result.providerOptions).toHaveProperty('deepseek')
expect(result.providerOptions.deepseek).toBeDefined()
})
})
@@ -402,8 +416,8 @@ describe('options utils', () => {
enableGenerateImage: false
})
expect(result).toHaveProperty('openrouter')
expect(result.openrouter).toBeDefined()
expect(result.providerOptions).toHaveProperty('openrouter')
expect(result.providerOptions.openrouter).toBeDefined()
})
it('should include web search parameters when enabled', () => {
@@ -413,12 +427,12 @@ describe('options utils', () => {
enableGenerateImage: false
})
expect(result.openrouter).toHaveProperty('enable_search')
expect(result.providerOptions.openrouter).toHaveProperty('enable_search')
})
})
describe('Custom parameters', () => {
it('should merge custom parameters', async () => {
it('should merge custom provider-specific parameters', async () => {
const { getCustomParameters } = await import('../reasoning')
vi.mocked(getCustomParameters).mockReturnValue({
@@ -443,10 +457,88 @@ describe('options utils', () => {
}
)
expect(result.openai).toHaveProperty('custom_param')
expect(result.openai.custom_param).toBe('custom_value')
expect(result.openai).toHaveProperty('another_param')
expect(result.openai.another_param).toBe(123)
expect(result.providerOptions.openai).toHaveProperty('custom_param')
expect(result.providerOptions.openai.custom_param).toBe('custom_value')
expect(result.providerOptions.openai).toHaveProperty('another_param')
expect(result.providerOptions.openai.another_param).toBe(123)
})
it('should extract AI SDK standard params from custom parameters', async () => {
const { getCustomParameters } = await import('../reasoning')
vi.mocked(getCustomParameters).mockReturnValue({
topK: 5,
frequencyPenalty: 0.5,
presencePenalty: 0.3,
seed: 42,
custom_param: 'custom_value'
})
const result = buildProviderOptions(
mockAssistant,
mockModel,
{
id: SystemProviderIds.gemini,
name: 'Google',
type: 'gemini',
apiKey: 'test-key',
apiHost: 'https://generativelanguage.googleapis.com'
} as Provider,
{
enableReasoning: false,
enableWebSearch: false,
enableGenerateImage: false
}
)
// Standard params should be extracted and returned separately
expect(result.standardParams).toEqual({
topK: 5,
frequencyPenalty: 0.5,
presencePenalty: 0.3,
seed: 42
})
// Provider-specific params should still be in providerOptions
expect(result.providerOptions.google).toHaveProperty('custom_param')
expect(result.providerOptions.google.custom_param).toBe('custom_value')
// Standard params should NOT be in providerOptions
expect(result.providerOptions.google).not.toHaveProperty('topK')
expect(result.providerOptions.google).not.toHaveProperty('frequencyPenalty')
expect(result.providerOptions.google).not.toHaveProperty('presencePenalty')
expect(result.providerOptions.google).not.toHaveProperty('seed')
})
it('should handle stopSequences in custom parameters', async () => {
const { getCustomParameters } = await import('../reasoning')
vi.mocked(getCustomParameters).mockReturnValue({
stopSequences: ['STOP', 'END'],
custom_param: 'value'
})
const result = buildProviderOptions(
mockAssistant,
mockModel,
{
id: SystemProviderIds.gemini,
name: 'Google',
type: 'gemini',
apiKey: 'test-key',
apiHost: 'https://generativelanguage.googleapis.com'
} as Provider,
{
enableReasoning: false,
enableWebSearch: false,
enableGenerateImage: false
}
)
expect(result.standardParams).toEqual({
stopSequences: ['STOP', 'END']
})
expect(result.providerOptions.google).not.toHaveProperty('stopSequences')
})
})
@@ -474,8 +566,8 @@ describe('options utils', () => {
enableGenerateImage: true
})
expect(result.google).toHaveProperty('thinkingConfig')
expect(result.google).toHaveProperty('responseModalities')
expect(result.providerOptions.google).toHaveProperty('thinkingConfig')
expect(result.providerOptions.google).toHaveProperty('responseModalities')
})
it('should handle all capabilities enabled', () => {
@@ -485,8 +577,8 @@ describe('options utils', () => {
enableGenerateImage: true
})
expect(result.google).toBeDefined()
expect(Object.keys(result.google).length).toBeGreaterThan(0)
expect(result.providerOptions.google).toBeDefined()
expect(Object.keys(result.providerOptions.google).length).toBeGreaterThan(0)
})
})
@@ -513,7 +605,7 @@ describe('options utils', () => {
enableGenerateImage: false
})
expect(result).toHaveProperty('google')
expect(result.providerOptions).toHaveProperty('google')
})
it('should map google-vertex-anthropic to anthropic', () => {
@@ -538,7 +630,7 @@ describe('options utils', () => {
enableGenerateImage: false
})
expect(result).toHaveProperty('anthropic')
expect(result.providerOptions).toHaveProperty('anthropic')
})
})
})

View File

@@ -31,7 +31,7 @@ import {
type Provider,
type ServiceTier
} from '@renderer/types'
import type { OpenAIVerbosity } from '@renderer/types/aiCoreTypes'
import { type AiSdkParam, isAiSdkParam, type OpenAIVerbosity } from '@renderer/types/aiCoreTypes'
import { isSupportServiceTierProvider, isSupportVerbosityProvider } from '@renderer/utils/provider'
import type { JSONValue } from 'ai'
import { t } from 'i18next'
@@ -96,10 +96,39 @@ function getVerbosity(): OpenAIVerbosity {
return openAI.verbosity
}
/**
* Extract AI SDK standard parameters from custom parameters
* These parameters should be passed directly to streamText() instead of providerOptions
*/
export function extractAiSdkStandardParams(customParams: Record<string, any>): {
standardParams: Partial<Record<AiSdkParam, any>>
providerParams: Record<string, any>
} {
const standardParams: Partial<Record<AiSdkParam, any>> = {}
const providerParams: Record<string, any> = {}
for (const [key, value] of Object.entries(customParams)) {
if (isAiSdkParam(key)) {
standardParams[key] = value
} else {
providerParams[key] = value
}
}
return { standardParams, providerParams }
}
/**
* 构建 AI SDK 的 providerOptions
* 按 provider 类型分离,保持类型安全
* 返回格式:{ 'providerId': providerOptions }
* 返回格式:{
* providerOptions: { 'providerId': providerOptions },
* standardParams: { topK, frequencyPenalty, presencePenalty, stopSequences, seed }
* }
*
* Custom parameters are split into two categories:
* 1. AI SDK standard parameters (topK, frequencyPenalty, etc.) - returned separately to be passed to streamText()
* 2. Provider-specific parameters - merged into providerOptions
*/
export function buildProviderOptions(
assistant: Assistant,
@@ -110,7 +139,10 @@ export function buildProviderOptions(
enableWebSearch: boolean
enableGenerateImage: boolean
}
): Record<string, Record<string, JSONValue>> {
): {
providerOptions: Record<string, Record<string, JSONValue>>
standardParams: Partial<Record<AiSdkParam, any>>
} {
logger.debug('buildProviderOptions', { assistant, model, actualProvider, capabilities })
const rawProviderId = getAiSdkProviderId(actualProvider)
// 构建 provider 特定的选项
@@ -202,10 +234,14 @@ export function buildProviderOptions(
}
}
// 合并自定义参数 provider 特定的选项中
// 获取自定义参数并分离标准参数和 provider 特定参数
const customParams = getCustomParameters(assistant)
const { standardParams, providerParams } = extractAiSdkStandardParams(customParams)
// 合并 provider 特定的自定义参数到 providerSpecificOptions
providerSpecificOptions = {
...providerSpecificOptions,
...getCustomParameters(assistant)
...providerParams
}
let rawProviderKey =
@@ -220,9 +256,12 @@ export function buildProviderOptions(
rawProviderKey = { gemini: 'google' }[actualProvider.type] || actualProvider.type
}
// 返回 AI Core SDK 要求的格式:{ 'providerId': providerOptions }
// 返回 AI Core SDK 要求的格式:{ 'providerId': providerOptions } 以及提取的标准参数
return {
[rawProviderKey]: providerSpecificOptions
providerOptions: {
[rawProviderKey]: providerSpecificOptions
},
standardParams
}
}

View File

@@ -2,6 +2,7 @@ import type OpenAI from '@cherrystudio/openai'
import type { NotNull, NotUndefined } from '@types'
import type { ImageModel, LanguageModel } from 'ai'
import type { generateObject, generateText, ModelMessage, streamObject, streamText } from 'ai'
import * as z from 'zod'
export type StreamTextParams = Omit<Parameters<typeof streamText>[0], 'model' | 'messages'> &
(
@@ -42,3 +43,20 @@ export type OpenAIReasoningEffort = OpenAI.ReasoningEffort
// I pick undefined as the unique falsy type since they seem like share the same meaning according to OpenAI API docs.
// Parameter would not be passed into request if it's undefined.
export type OpenAISummaryText = NotNull<OpenAI.Reasoning['summary']>
const AiSdkParamsSchema = z.enum([
'maxOutputTokens',
'temperature',
'topP',
'topK',
'presencePenalty',
'frequencyPenalty',
'stopSequences',
'seed'
])
export type AiSdkParam = z.infer<typeof AiSdkParamsSchema>
export const isAiSdkParam = (param: string): param is AiSdkParam => {
return AiSdkParamsSchema.safeParse(param).success
}

119
yarn.lock
View File

@@ -74,11 +74,11 @@ __metadata:
languageName: node
linkType: hard
"@ai-sdk/amazon-bedrock@npm:^3.0.56":
version: 3.0.56
resolution: "@ai-sdk/amazon-bedrock@npm:3.0.56"
"@ai-sdk/amazon-bedrock@npm:^3.0.61":
version: 3.0.61
resolution: "@ai-sdk/amazon-bedrock@npm:3.0.61"
dependencies:
"@ai-sdk/anthropic": "npm:2.0.45"
"@ai-sdk/anthropic": "npm:2.0.49"
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.17"
"@smithy/eventstream-codec": "npm:^4.0.1"
@@ -86,32 +86,32 @@ __metadata:
aws4fetch: "npm:^1.0.20"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/1d5607de6b7a450bbdbf4e704f5f5690c6cda861e0f9c99d715f893fa5eab13ca534d63eebe58b42856e3c5c65d795ad5238bf5d0187b6f50343c8dc9a3e8b2b
checksum: 10c0/10514edcaae942a43e8990685d99dc8e66025b54d4ed6e983229867c7be2d4ac6d6aebe7bf8fcea550525ae86946b8439e1702999778a21e0ad7bc3ca6ee40c7
languageName: node
linkType: hard
"@ai-sdk/anthropic@npm:2.0.45, @ai-sdk/anthropic@npm:^2.0.45":
version: 2.0.45
resolution: "@ai-sdk/anthropic@npm:2.0.45"
"@ai-sdk/anthropic@npm:2.0.49, @ai-sdk/anthropic@npm:^2.0.49":
version: 2.0.49
resolution: "@ai-sdk/anthropic@npm:2.0.49"
dependencies:
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.17"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/ef0e54f032e3b8324c278f3b25d9b388308204d753404c49fd880709a796c2343aee36d335c99f50e683edd39d5b8b6f42b2e9034e1725d8e0db514e2233d104
checksum: 10c0/7aedd5b4be51c4601f54361de47db60fa03a3d1926957771294961cc6c88aac6eddcd1529324bc2a71734d36f35103225619f2aedf0c01c99f678e97e2bfff50
languageName: node
linkType: hard
"@ai-sdk/azure@npm:^2.0.73":
version: 2.0.73
resolution: "@ai-sdk/azure@npm:2.0.73"
"@ai-sdk/azure@npm:^2.0.74":
version: 2.0.74
resolution: "@ai-sdk/azure@npm:2.0.74"
dependencies:
"@ai-sdk/openai": "npm:2.0.71"
"@ai-sdk/openai": "npm:2.0.72"
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.17"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/e21ca310d23fcbf485ea2e2a6ec3daf29d36fcc827a31f961a06b4ab0d8cfbf19b58a9172e741a1311f88b663d6fb0608b584dbaa3bbddf08215bab3255b0e39
checksum: 10c0/dccd1959ef43034a0559cdc862af7f351c0a997a56dbeb68b1c844f67d3ff7920f43890e1d18546600eeaac1c54f0c94943b6ce0b43ba4d44ddc3a829b8a71dd
languageName: node
linkType: hard
@@ -141,7 +141,7 @@ __metadata:
languageName: node
linkType: hard
"@ai-sdk/gateway@npm:2.0.13, @ai-sdk/gateway@npm:^2.0.13":
"@ai-sdk/gateway@npm:2.0.13":
version: 2.0.13
resolution: "@ai-sdk/gateway@npm:2.0.13"
dependencies:
@@ -154,42 +154,55 @@ __metadata:
languageName: node
linkType: hard
"@ai-sdk/google-vertex@npm:^3.0.72":
version: 3.0.72
resolution: "@ai-sdk/google-vertex@npm:3.0.72"
"@ai-sdk/gateway@npm:^2.0.15":
version: 2.0.15
resolution: "@ai-sdk/gateway@npm:2.0.15"
dependencies:
"@ai-sdk/anthropic": "npm:2.0.45"
"@ai-sdk/google": "npm:2.0.40"
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.17"
"@vercel/oidc": "npm:3.0.5"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/36cbd8a23d3d9c70f274f808b4efc23867b96476a1195b96d9a65e4b19bd19a2120b70d790338734cab91941bab4bca5613a11236b999706103053d7e6992082
languageName: node
linkType: hard
"@ai-sdk/google-vertex@npm:^3.0.79":
version: 3.0.79
resolution: "@ai-sdk/google-vertex@npm:3.0.79"
dependencies:
"@ai-sdk/anthropic": "npm:2.0.49"
"@ai-sdk/google": "npm:2.0.43"
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.17"
google-auth-library: "npm:^9.15.0"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/ac3f2465f911ba0872a6b3616bda9b80d6ccdde8e56de3ce8395be798614a6cd01957f779d9519f5edd8d2597345162c5c08c489d7b146f21f13647691f961f5
checksum: 10c0/a86949b8d4a855409acdf7dc8d93ad9ea8ccf2bc3849acbe1ecbe4d6d66f06bcb5242f0df8eea24214e78732618b71ec8a019cbbeab16366f9ad3c860c5d8d30
languageName: node
linkType: hard
"@ai-sdk/google@npm:2.0.40":
version: 2.0.40
resolution: "@ai-sdk/google@npm:2.0.40"
"@ai-sdk/google@npm:2.0.43":
version: 2.0.43
resolution: "@ai-sdk/google@npm:2.0.43"
dependencies:
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.17"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/e0a22f24aac9475148177c725ade25ce8a6e4531dd6e51d811d2cee484770f97df876066ce75342b37191e5d7efcc3e0224450ba3c05eb48276e8f2899c6a1e5
checksum: 10c0/5a421a9746cf8cbdf3bb7fb49426453a4fe0e354ea55a0123e628afb7acf9bb19959d512c0f8e6d7dbefbfa7e1cef4502fc146149007258a8eeb57743ac5e9e5
languageName: node
linkType: hard
"@ai-sdk/google@patch:@ai-sdk/google@npm%3A2.0.40#~/.yarn/patches/@ai-sdk-google-npm-2.0.40-47e0eeee83.patch":
version: 2.0.40
resolution: "@ai-sdk/google@patch:@ai-sdk/google@npm%3A2.0.40#~/.yarn/patches/@ai-sdk-google-npm-2.0.40-47e0eeee83.patch::version=2.0.40&hash=c2a2ca"
"@ai-sdk/google@patch:@ai-sdk/google@npm%3A2.0.43#~/.yarn/patches/@ai-sdk-google-npm-2.0.43-689ed559b3.patch":
version: 2.0.43
resolution: "@ai-sdk/google@patch:@ai-sdk/google@npm%3A2.0.43#~/.yarn/patches/@ai-sdk-google-npm-2.0.43-689ed559b3.patch::version=2.0.43&hash=4dde1e"
dependencies:
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.17"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/dec9d156ed9aeb129521f8d03158edbdbbdfc487d5f117c097123398f13670407b0ab03f6602487811d2334cd65377b72aca348cb39a48e149a71c4f728e8436
checksum: 10c0/4cfd17e9c47f2b742d8a0b1ca3532b4dc48753088363b74b01a042f63652174fa9a3fbf655a23f823974c673121dffbd2d192bb0c1bf158da4e2bf498fc76527
languageName: node
linkType: hard
@@ -218,7 +231,7 @@ __metadata:
languageName: node
linkType: hard
"@ai-sdk/openai-compatible@npm:1.0.27":
"@ai-sdk/openai-compatible@npm:1.0.27, @ai-sdk/openai-compatible@npm:^1.0.19":
version: 1.0.27
resolution: "@ai-sdk/openai-compatible@npm:1.0.27"
dependencies:
@@ -242,27 +255,27 @@ __metadata:
languageName: node
linkType: hard
"@ai-sdk/openai@npm:2.0.71":
version: 2.0.71
resolution: "@ai-sdk/openai@npm:2.0.71"
"@ai-sdk/openai@npm:2.0.72":
version: 2.0.72
resolution: "@ai-sdk/openai@npm:2.0.72"
dependencies:
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.17"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/19a0a1648df074ba1c1836bf7b5cd874a3e4e5c2d4efad3bec1ecdcd7f013008b1f573685be2f5d8b6b326a91309f4f6c8b556755d62e6b03c840f9030ad7a5f
checksum: 10c0/64fb8b7b2627b16e1fdcb3a7dd8d26f34d054b3f7bba5de6ef579f1c12c91246d0682caa36c5dae5ed2f29b462cc6013a38d9e80234819030fbf1730e7f8da50
languageName: node
linkType: hard
"@ai-sdk/openai@patch:@ai-sdk/openai@npm%3A2.0.71#~/.yarn/patches/@ai-sdk-openai-npm-2.0.71-a88ef00525.patch":
version: 2.0.71
resolution: "@ai-sdk/openai@patch:@ai-sdk/openai@npm%3A2.0.71#~/.yarn/patches/@ai-sdk-openai-npm-2.0.71-a88ef00525.patch::version=2.0.71&hash=78bebe"
"@ai-sdk/openai@patch:@ai-sdk/openai@npm%3A2.0.72#~/.yarn/patches/@ai-sdk-openai-npm-2.0.72-234e68da87.patch":
version: 2.0.72
resolution: "@ai-sdk/openai@patch:@ai-sdk/openai@npm%3A2.0.72#~/.yarn/patches/@ai-sdk-openai-npm-2.0.72-234e68da87.patch::version=2.0.72&hash=126b76"
dependencies:
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.17"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/a68ba6b32a940e48daa6354667108648ff6a99646eb413ead7a70ca82289874de98206322b5704326f2d9579fcc92f50a1cdf1328368cc337f28213c0da90f5c
checksum: 10c0/fec21ab02aff999b487abdd02c32d526580d47cdf83dd74b02f8faf1423b63ab7da3c374b7a98a15bb94fdcb6deb2851381ce0f52b92c9c030dee06ff2dcf71d
languageName: node
linkType: hard
@@ -318,16 +331,16 @@ __metadata:
languageName: node
linkType: hard
"@ai-sdk/xai@npm:^2.0.34":
version: 2.0.34
resolution: "@ai-sdk/xai@npm:2.0.34"
"@ai-sdk/xai@npm:^2.0.36":
version: 2.0.36
resolution: "@ai-sdk/xai@npm:2.0.36"
dependencies:
"@ai-sdk/openai-compatible": "npm:1.0.27"
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.17"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/e6d5a02edcc8ea8f1b6423faf27a56bd02d4664a021c9b13e18c3f393bc9b64f7781a86782fcfd3b559aacfbea248a1176d7511d03ee184fd1146b84833c9514
checksum: 10c0/f4c84050a96b58442e69f7048253989233da27ca53b54b9372c16adf745806a761d1b0b507516ef003f58cae06e327d30c95a22cbcbb5ec6252d3c4d645f1828
languageName: node
linkType: hard
@@ -1815,13 +1828,13 @@ __metadata:
version: 0.0.0-use.local
resolution: "@cherrystudio/ai-core@workspace:packages/aiCore"
dependencies:
"@ai-sdk/anthropic": "npm:^2.0.45"
"@ai-sdk/azure": "npm:^2.0.73"
"@ai-sdk/anthropic": "npm:^2.0.49"
"@ai-sdk/azure": "npm:^2.0.74"
"@ai-sdk/deepseek": "npm:^1.0.29"
"@ai-sdk/openai-compatible": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch"
"@ai-sdk/provider": "npm:^2.0.0"
"@ai-sdk/provider-utils": "npm:^3.0.17"
"@ai-sdk/xai": "npm:^2.0.34"
"@ai-sdk/xai": "npm:^2.0.36"
tsdown: "npm:^0.12.9"
typescript: "npm:^5.0.0"
vitest: "npm:^3.2.4"
@@ -5250,7 +5263,7 @@ __metadata:
languageName: node
linkType: hard
"@opeoginni/github-copilot-openai-compatible@npm:0.1.21":
"@opeoginni/github-copilot-openai-compatible@npm:^0.1.21":
version: 0.1.21
resolution: "@opeoginni/github-copilot-openai-compatible@npm:0.1.21"
dependencies:
@@ -9978,15 +9991,15 @@ __metadata:
"@agentic/exa": "npm:^7.3.3"
"@agentic/searxng": "npm:^7.3.3"
"@agentic/tavily": "npm:^7.3.3"
"@ai-sdk/amazon-bedrock": "npm:^3.0.56"
"@ai-sdk/anthropic": "npm:^2.0.45"
"@ai-sdk/amazon-bedrock": "npm:^3.0.61"
"@ai-sdk/anthropic": "npm:^2.0.49"
"@ai-sdk/cerebras": "npm:^1.0.31"
"@ai-sdk/gateway": "npm:^2.0.13"
"@ai-sdk/google": "patch:@ai-sdk/google@npm%3A2.0.40#~/.yarn/patches/@ai-sdk-google-npm-2.0.40-47e0eeee83.patch"
"@ai-sdk/google-vertex": "npm:^3.0.72"
"@ai-sdk/gateway": "npm:^2.0.15"
"@ai-sdk/google": "patch:@ai-sdk/google@npm%3A2.0.43#~/.yarn/patches/@ai-sdk-google-npm-2.0.43-689ed559b3.patch"
"@ai-sdk/google-vertex": "npm:^3.0.79"
"@ai-sdk/huggingface": "npm:^0.0.10"
"@ai-sdk/mistral": "npm:^2.0.24"
"@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.71#~/.yarn/patches/@ai-sdk-openai-npm-2.0.71-a88ef00525.patch"
"@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.72#~/.yarn/patches/@ai-sdk-openai-npm-2.0.72-234e68da87.patch"
"@ai-sdk/perplexity": "npm:^2.0.20"
"@ai-sdk/test-server": "npm:^0.0.1"
"@ant-design/v5-patch-for-react-19": "npm:^1.0.3"
@@ -10044,7 +10057,7 @@ __metadata:
"@opentelemetry/sdk-trace-base": "npm:^2.0.0"
"@opentelemetry/sdk-trace-node": "npm:^2.0.0"
"@opentelemetry/sdk-trace-web": "npm:^2.0.0"
"@opeoginni/github-copilot-openai-compatible": "npm:0.1.21"
"@opeoginni/github-copilot-openai-compatible": "npm:^0.1.21"
"@paymoapp/electron-shutdown-handler": "npm:^1.1.2"
"@playwright/test": "npm:^1.52.0"
"@radix-ui/react-context-menu": "npm:^2.2.16"