Compare commits

..

3 Commits

Author SHA1 Message Date
copilot-swe-agent[bot]
b14e48dd78 Fix custom parameters placement for Vercel AI Gateway
For AI Gateway provider, custom parameters are now placed at the body level
instead of being nested inside providerOptions.gateway. This fixes the issue
where parameters like 'tools' were being incorrectly added to
providerOptions.gateway when they should be at the same level as providerOptions.

Fixes #4197

Co-authored-by: DeJeune <67425183+DeJeune@users.noreply.github.com>
2025-12-01 05:13:55 +00:00
copilot-swe-agent[bot]
64fde27f9e Initial plan 2025-12-01 05:05:10 +00:00
kangfenmao
b3a58ec321 chore: update release notes for v1.7.1 2025-11-30 19:57:44 +08:00
11 changed files with 152 additions and 317 deletions

View File

@@ -134,9 +134,9 @@ artifactBuildCompleted: scripts/artifact-build-completed.js
releaseInfo:
releaseNotes: |
<!--LANG:en-->
A New Era of Intelligence with Cherry Studio 1.7.0
A New Era of Intelligence with Cherry Studio 1.7.1
Today we're releasing Cherry Studio 1.7.0 — our most ambitious update yet, introducing Agent: autonomous AI that thinks, plans, and acts.
Today we're releasing Cherry Studio 1.7.1 — our most ambitious update yet, introducing Agent: autonomous AI that thinks, plans, and acts.
For years, AI assistants have been reactive — waiting for your commands, responding to your questions. With Agent, we're changing that. Now, AI can truly work alongside you: understanding complex goals, breaking them into steps, and executing them independently.
@@ -187,9 +187,9 @@ releaseInfo:
The Agent Era is here. We can't wait to see what you'll create.
<!--LANG:zh-CN-->
Cherry Studio 1.7.0:开启智能新纪元
Cherry Studio 1.7.1:开启智能新纪元
今天,我们正式发布 Cherry Studio 1.7.0 —— 迄今最具雄心的版本,带来全新的 Agent能够自主思考、规划和行动的 AI。
今天,我们正式发布 Cherry Studio 1.7.1 —— 迄今最具雄心的版本,带来全新的 Agent能够自主思考、规划和行动的 AI。
多年来AI 助手一直是被动的——等待你的指令回应你的问题。Agent 改变了这一切。现在AI 能够真正与你并肩工作:理解复杂目标,将其拆解为步骤,并独立执行。

View File

@@ -1,6 +1,6 @@
{
"name": "CherryStudio",
"version": "1.7.0",
"version": "1.7.1",
"private": true,
"description": "A powerful AI assistant for producer.",
"main": "./out/main/index.js",

View File

@@ -7,10 +7,10 @@
* 2. 暂时保持接口兼容性
*/
import type { GatewayLanguageModelEntry } from '@ai-sdk/gateway'
import { createExecutor } from '@cherrystudio/ai-core'
import { loggerService } from '@logger'
import { getEnableDeveloperMode } from '@renderer/hooks/useSettings'
import { normalizeGatewayModels, normalizeSdkModels } from '@renderer/services/models/ModelAdapter'
import { addSpan, endSpan } from '@renderer/services/SpanManagerService'
import type { StartSpanParams } from '@renderer/trace/types/ModelSpanEntity'
import { type Assistant, type GenerateImageParams, type Model, type Provider, SystemProviderIds } from '@renderer/types'
@@ -481,11 +481,18 @@ export default class ModernAiProvider {
// 代理其他方法到原有实现
public async models() {
if (this.actualProvider.id === SystemProviderIds['ai-gateway']) {
const gatewayModels = (await gateway.getAvailableModels()).models
return normalizeGatewayModels(this.actualProvider, gatewayModels)
const formatModel = function (models: GatewayLanguageModelEntry[]): Model[] {
return models.map((m) => ({
id: m.id,
name: m.name,
provider: 'gateway',
group: m.id.split('/')[0],
description: m.description ?? undefined
}))
}
return formatModel((await gateway.getAvailableModels()).models)
}
const sdkModels = await this.legacyProvider.models()
return normalizeSdkModels(this.actualProvider, sdkModels)
return this.legacyProvider.models()
}
public async getEmbeddingDimensions(model: Model): Promise<number> {

View File

@@ -107,7 +107,7 @@ export async function buildStreamTextParams(
searchWithTime: store.getState().websearch.searchWithTime
}
const { providerOptions, standardParams } = buildProviderOptions(assistant, model, provider, {
const { providerOptions, standardParams, bodyParams } = buildProviderOptions(assistant, model, provider, {
enableReasoning,
enableWebSearch,
enableGenerateImage
@@ -185,6 +185,7 @@ export async function buildStreamTextParams(
// Note: standardParams (topK, frequencyPenalty, presencePenalty, stopSequences, seed)
// are extracted from custom parameters and passed directly to streamText()
// instead of being placed in providerOptions
// Note: bodyParams are custom parameters for AI Gateway that should be at body level
const params: StreamTextParams = {
messages: sdkMessages,
maxOutputTokens: getMaxTokens(assistant, model),
@@ -192,6 +193,8 @@ export async function buildStreamTextParams(
topP: getTopP(assistant, model),
// Include AI SDK standard params extracted from custom parameters
...standardParams,
// Include body-level params for AI Gateway custom parameters
...bodyParams,
abortSignal: options.requestOptions?.signal,
headers,
providerOptions,

View File

@@ -37,7 +37,7 @@ vi.mock('@cherrystudio/ai-core/provider', async (importOriginal) => {
},
customProviderIdSchema: {
safeParse: vi.fn((id) => {
const customProviders = ['google-vertex', 'google-vertex-anthropic', 'bedrock']
const customProviders = ['google-vertex', 'google-vertex-anthropic', 'bedrock', 'ai-gateway']
if (customProviders.includes(id)) {
return { success: true, data: id }
}
@@ -56,7 +56,8 @@ vi.mock('../provider/factory', () => ({
[SystemProviderIds.anthropic]: 'anthropic',
[SystemProviderIds.grok]: 'xai',
[SystemProviderIds.deepseek]: 'deepseek',
[SystemProviderIds.openrouter]: 'openrouter'
[SystemProviderIds.openrouter]: 'openrouter',
[SystemProviderIds['ai-gateway']]: 'ai-gateway'
}
return mapping[provider.id] || provider.id
})
@@ -204,6 +205,8 @@ describe('options utils', () => {
expect(result.providerOptions).toHaveProperty('openai')
expect(result.providerOptions.openai).toBeDefined()
expect(result.standardParams).toBeDefined()
expect(result.bodyParams).toBeDefined()
expect(result.bodyParams).toEqual({})
})
it('should include reasoning parameters when enabled', () => {
@@ -696,5 +699,90 @@ describe('options utils', () => {
})
})
})
describe('AI Gateway provider', () => {
const aiGatewayProvider: Provider = {
id: SystemProviderIds['ai-gateway'],
name: 'AI Gateway',
type: 'ai-gateway',
apiKey: 'test-key',
apiHost: 'https://ai-gateway.vercel.sh/v1/ai',
isSystem: true,
models: [] as Model[]
} as Provider
const aiGatewayModel: Model = {
id: 'openai/gpt-4',
name: 'GPT-4',
provider: SystemProviderIds['ai-gateway']
} as Model
it('should build basic AI Gateway options with empty bodyParams', () => {
const result = buildProviderOptions(mockAssistant, aiGatewayModel, aiGatewayProvider, {
enableReasoning: false,
enableWebSearch: false,
enableGenerateImage: false
})
expect(result.providerOptions).toHaveProperty('gateway')
expect(result.providerOptions.gateway).toBeDefined()
expect(result.bodyParams).toEqual({})
})
it('should place custom parameters in bodyParams for AI Gateway instead of providerOptions', async () => {
const { getCustomParameters } = await import('../reasoning')
vi.mocked(getCustomParameters).mockReturnValue({
tools: [{ id: 'openai.image_generation' }],
custom_param: 'custom_value'
})
const result = buildProviderOptions(mockAssistant, aiGatewayModel, aiGatewayProvider, {
enableReasoning: false,
enableWebSearch: false,
enableGenerateImage: false
})
// Custom parameters should be in bodyParams, NOT in providerOptions.gateway
expect(result.bodyParams).toHaveProperty('tools')
expect(result.bodyParams.tools).toEqual([{ id: 'openai.image_generation' }])
expect(result.bodyParams).toHaveProperty('custom_param')
expect(result.bodyParams.custom_param).toBe('custom_value')
// providerOptions.gateway should NOT contain custom parameters
expect(result.providerOptions.gateway).not.toHaveProperty('tools')
expect(result.providerOptions.gateway).not.toHaveProperty('custom_param')
})
it('should still extract AI SDK standard params from custom parameters for AI Gateway', async () => {
const { getCustomParameters } = await import('../reasoning')
vi.mocked(getCustomParameters).mockReturnValue({
topK: 5,
frequencyPenalty: 0.5,
tools: [{ id: 'openai.image_generation' }]
})
const result = buildProviderOptions(mockAssistant, aiGatewayModel, aiGatewayProvider, {
enableReasoning: false,
enableWebSearch: false,
enableGenerateImage: false
})
// Standard params should be extracted and returned separately
expect(result.standardParams).toEqual({
topK: 5,
frequencyPenalty: 0.5
})
// Custom params (non-standard) should be in bodyParams
expect(result.bodyParams).toHaveProperty('tools')
expect(result.bodyParams.tools).toEqual([{ id: 'openai.image_generation' }])
// Neither should be in providerOptions.gateway
expect(result.providerOptions.gateway).not.toHaveProperty('topK')
expect(result.providerOptions.gateway).not.toHaveProperty('tools')
})
})
})
})

View File

@@ -155,6 +155,7 @@ export function buildProviderOptions(
): {
providerOptions: Record<string, Record<string, JSONValue>>
standardParams: Partial<Record<AiSdkParam, any>>
bodyParams: Record<string, any>
} {
logger.debug('buildProviderOptions', { assistant, model, actualProvider, capabilities })
const rawProviderId = getAiSdkProviderId(actualProvider)
@@ -253,12 +254,6 @@ export function buildProviderOptions(
const customParams = getCustomParameters(assistant)
const { standardParams, providerParams } = extractAiSdkStandardParams(customParams)
// 合并 provider 特定的自定义参数到 providerSpecificOptions
providerSpecificOptions = {
...providerSpecificOptions,
...providerParams
}
let rawProviderKey =
{
'google-vertex': 'google',
@@ -273,12 +268,27 @@ export function buildProviderOptions(
rawProviderKey = { gemini: 'google', ['openai-response']: 'openai' }[actualProvider.type] || actualProvider.type
}
// 返回 AI Core SDK 要求的格式:{ 'providerId': providerOptions } 以及提取的标准参数
// For AI Gateway, custom parameters should be placed at body level, not inside providerOptions.gateway
// See: https://github.com/CherryHQ/cherry-studio/issues/4197
let bodyParams: Record<string, any> = {}
if (rawProviderKey === 'gateway') {
// Custom parameters go to body level for AI Gateway
bodyParams = providerParams
} else {
// For other providers, merge custom parameters into providerSpecificOptions
providerSpecificOptions = {
...providerSpecificOptions,
...providerParams
}
}
// 返回 AI Core SDK 要求的格式:{ 'providerId': providerOptions } 以及提取的标准参数和 body 参数
return {
providerOptions: {
[rawProviderKey]: providerSpecificOptions
},
standardParams
standardParams,
bodyParams
}
}

View File

@@ -18,7 +18,7 @@ import NewApiAddModelPopup from '@renderer/pages/settings/ProviderSettings/Model
import NewApiBatchAddModelPopup from '@renderer/pages/settings/ProviderSettings/ModelList/NewApiBatchAddModelPopup'
import { fetchModels } from '@renderer/services/ApiService'
import type { Model, Provider } from '@renderer/types'
import { filterModelsByKeywords, getFancyProviderName } from '@renderer/utils'
import { filterModelsByKeywords, getDefaultGroupName, getFancyProviderName } from '@renderer/utils'
import { isFreeModel } from '@renderer/utils/model'
import { isNewApiProvider } from '@renderer/utils/provider'
import { Button, Empty, Flex, Modal, Spin, Tabs, Tooltip } from 'antd'
@@ -183,7 +183,25 @@ const PopupContainer: React.FC<Props> = ({ providerId, resolve }) => {
setLoadingModels(true)
try {
const models = await fetchModels(provider)
const filteredModels = models.filter((model) => !isEmpty(model.name))
// TODO: More robust conversion
const filteredModels = models
.map((model) => ({
// @ts-ignore modelId
id: model?.id || model?.name,
// @ts-ignore name
name: model?.display_name || model?.displayName || model?.name || model?.id,
provider: provider.id,
// @ts-ignore group
group: getDefaultGroupName(model?.id || model?.name, provider.id),
// @ts-ignore description
description: model?.description || '',
// @ts-ignore owned_by
owned_by: model?.owned_by || '',
// @ts-ignore supported_endpoint_types
supported_endpoint_types: model?.supported_endpoint_types
}))
.filter((model) => !isEmpty(model.name))
setListModels(filteredModels)
} catch (error) {
logger.error(`Failed to load models for provider ${getFancyProviderName(provider)}`, error as Error)

View File

@@ -13,6 +13,7 @@ import type { Assistant, MCPServer, MCPTool, Model, Provider } from '@renderer/t
import type { StreamTextParams } from '@renderer/types/aiCoreTypes'
import { type Chunk, ChunkType } from '@renderer/types/chunk'
import type { Message, ResponseError } from '@renderer/types/newMessage'
import type { SdkModel } from '@renderer/types/sdk'
import { removeSpecialCharactersForTopicName, uuid } from '@renderer/utils'
import { abortCompletion, readyToAbort } from '@renderer/utils/abortController'
import { isToolUseModeFunction } from '@renderer/utils/assistant'
@@ -423,7 +424,7 @@ export function hasApiKey(provider: Provider) {
// return undefined
// }
export async function fetchModels(provider: Provider): Promise<Model[]> {
export async function fetchModels(provider: Provider): Promise<SdkModel[]> {
const AI = new AiProviderNew(provider)
try {

View File

@@ -1,102 +0,0 @@
import type { GatewayLanguageModelEntry } from '@ai-sdk/gateway'
import { normalizeGatewayModels, normalizeSdkModels } from '@renderer/services/models/ModelAdapter'
import type { Model, Provider } from '@renderer/types'
import type { EndpointType } from '@renderer/types/index'
import type { SdkModel } from '@renderer/types/sdk'
import { describe, expect, it } from 'vitest'
const createProvider = (overrides: Partial<Provider> = {}): Provider => ({
id: 'openai',
type: 'openai',
name: 'OpenAI',
apiKey: 'test-key',
apiHost: 'https://example.com/v1',
models: [],
...overrides
})
describe('ModelAdapter', () => {
it('adapts generic SDK models into internal models', () => {
const provider = createProvider({ id: 'openai' })
const models = normalizeSdkModels(provider, [
{
id: 'gpt-4o-mini',
display_name: 'GPT-4o mini',
description: 'General purpose model',
owned_by: 'openai'
} as unknown as SdkModel
])
expect(models).toHaveLength(1)
expect(models[0]).toMatchObject({
id: 'gpt-4o-mini',
name: 'GPT-4o mini',
provider: 'openai',
group: 'gpt-4o',
description: 'General purpose model',
owned_by: 'openai'
} as Partial<Model>)
})
it('preserves supported endpoint types for New API models', () => {
const provider = createProvider({ id: 'new-api' })
const endpointTypes: EndpointType[] = ['openai', 'image-generation']
const [model] = normalizeSdkModels(provider, [
{
id: 'new-api-model',
name: 'New API Model',
supported_endpoint_types: endpointTypes
} as unknown as SdkModel
])
expect(model.supported_endpoint_types).toEqual(endpointTypes)
})
it('filters unsupported endpoint types while keeping valid ones', () => {
const provider = createProvider({ id: 'new-api' })
const [model] = normalizeSdkModels(provider, [
{
id: 'another-model',
name: 'Another Model',
supported_endpoint_types: ['openai', 'unknown-endpoint', 'gemini']
} as unknown as SdkModel
])
expect(model.supported_endpoint_types).toEqual(['openai', 'gemini'])
})
it('adapts ai-gateway entries through the same adapter', () => {
const provider = createProvider({ id: 'ai-gateway', type: 'ai-gateway' })
const [model] = normalizeGatewayModels(provider, [
{
id: 'openai/gpt-4o',
name: 'OpenAI GPT-4o',
description: 'Gateway entry',
specification: {
specificationVersion: 'v2',
provider: 'openai',
modelId: 'gpt-4o'
}
} as GatewayLanguageModelEntry
])
expect(model).toMatchObject({
id: 'openai/gpt-4o',
group: 'openai',
provider: 'ai-gateway',
description: 'Gateway entry'
})
})
it('drops invalid entries without ids or names', () => {
const provider = createProvider()
const models = normalizeSdkModels(provider, [
{
id: '',
name: ''
} as unknown as SdkModel
])
expect(models).toHaveLength(0)
})
})

View File

@@ -1,180 +0,0 @@
import type { GatewayLanguageModelEntry } from '@ai-sdk/gateway'
import { loggerService } from '@logger'
import { type EndpointType, EndPointTypeSchema, type Model, type Provider } from '@renderer/types'
import type { NewApiModel, SdkModel } from '@renderer/types/sdk'
import { getDefaultGroupName } from '@renderer/utils/naming'
import * as z from 'zod'
const logger = loggerService.withContext('ModelAdapter')
const EndpointTypeArraySchema = z.array(EndPointTypeSchema).nonempty()
const NormalizedModelSchema = z.object({
id: z.string().trim().min(1),
name: z.string().trim().min(1),
provider: z.string().trim().min(1),
group: z.string().trim().min(1),
description: z.string().optional(),
owned_by: z.string().optional(),
supported_endpoint_types: EndpointTypeArraySchema.optional()
})
type NormalizedModelInput = z.input<typeof NormalizedModelSchema>
export function normalizeSdkModels(provider: Provider, models: SdkModel[]): Model[] {
return normalizeModels(models, (entry) => adaptSdkModel(provider, entry))
}
export function normalizeGatewayModels(provider: Provider, models: GatewayLanguageModelEntry[]): Model[] {
return normalizeModels(models, (entry) => adaptGatewayModel(provider, entry))
}
function normalizeModels<T>(models: T[], transformer: (entry: T) => Model | null): Model[] {
const uniqueModels: Model[] = []
const seen = new Set<string>()
for (const entry of models) {
const normalized = transformer(entry)
if (!normalized) continue
if (seen.has(normalized.id)) continue
seen.add(normalized.id)
uniqueModels.push(normalized)
}
return uniqueModels
}
function adaptSdkModel(provider: Provider, model: SdkModel): Model | null {
const id = pickPreferredString([(model as any)?.id, (model as any)?.modelId])
const name = pickPreferredString([
(model as any)?.display_name,
(model as any)?.displayName,
(model as any)?.name,
id
])
if (!id || !name) {
logger.warn('Skip SDK model with missing id or name', {
providerId: provider.id,
modelSnippet: summarizeModel(model)
})
return null
}
const candidate: NormalizedModelInput = {
id,
name,
provider: provider.id,
group: getDefaultGroupName(id, provider.id),
description: pickPreferredString([(model as any)?.description, (model as any)?.summary]),
owned_by: pickPreferredString([(model as any)?.owned_by, (model as any)?.publisher])
}
const supportedEndpointTypes = pickSupportedEndpointTypes(provider.id, model)
if (supportedEndpointTypes) {
candidate.supported_endpoint_types = supportedEndpointTypes
}
return validateModel(candidate, model)
}
function adaptGatewayModel(provider: Provider, model: GatewayLanguageModelEntry): Model | null {
const id = model?.id?.trim()
const name = model?.name?.trim() || id
if (!id || !name) {
logger.warn('Skip gateway model with missing id or name', {
providerId: provider.id,
modelSnippet: summarizeModel(model)
})
return null
}
const candidate: NormalizedModelInput = {
id,
name,
provider: provider.id,
group: getDefaultGroupName(id, provider.id),
description: model.description ?? undefined
}
return validateModel(candidate, model)
}
function pickPreferredString(values: Array<unknown>): string | undefined {
for (const value of values) {
if (typeof value === 'string') {
const trimmed = value.trim()
if (trimmed.length > 0) {
return trimmed
}
}
}
return undefined
}
function pickSupportedEndpointTypes(providerId: string, model: SdkModel): EndpointType[] | undefined {
const candidate =
(model as Partial<NewApiModel>).supported_endpoint_types ??
((model as Record<string, unknown>).supported_endpoint_types as EndpointType[] | undefined)
if (!Array.isArray(candidate) || candidate.length === 0) {
return undefined
}
const supported: EndpointType[] = []
const unsupported: unknown[] = []
for (const value of candidate) {
const parsed = EndPointTypeSchema.safeParse(value)
if (parsed.success) {
supported.push(parsed.data)
} else {
unsupported.push(value)
}
}
if (unsupported.length > 0) {
logger.warn('Pruned unsupported endpoint types', {
providerId,
values: unsupported,
modelSnippet: summarizeModel(model)
})
}
return supported.length > 0 ? supported : undefined
}
function validateModel(candidate: NormalizedModelInput, source: unknown): Model | null {
const parsed = NormalizedModelSchema.safeParse(candidate)
if (!parsed.success) {
logger.warn('Discard invalid model entry', {
providerId: candidate.provider,
issues: parsed.error.issues,
modelSnippet: summarizeModel(source)
})
return null
}
return parsed.data
}
function summarizeModel(model: unknown) {
if (!model || typeof model !== 'object') {
return model
}
const { id, name, display_name, displayName, description, owned_by, supported_endpoint_types } = model as Record<
string,
unknown
>
return {
id,
name,
display_name,
displayName,
description,
owned_by,
supported_endpoint_types
}
}

View File

@@ -7,8 +7,6 @@ import type { CSSProperties } from 'react'
export * from './file'
export * from './note'
import * as z from 'zod'
import type { StreamTextParams } from './aiCoreTypes'
import type { Chunk } from './chunk'
import type { FileMetadata } from './file'
@@ -242,15 +240,7 @@ export type ModelType = 'text' | 'vision' | 'embedding' | 'reasoning' | 'functio
export type ModelTag = Exclude<ModelType, 'text'> | 'free'
// "image-generation" is also openai endpoint, but specifically for image generation.
export const EndPointTypeSchema = z.enum([
'openai',
'openai-response',
'anthropic',
'gemini',
'image-generation',
'jina-rerank'
])
export type EndpointType = z.infer<typeof EndPointTypeSchema>
export type EndpointType = 'openai' | 'openai-response' | 'anthropic' | 'gemini' | 'image-generation' | 'jina-rerank'
export type ModelPricing = {
input_per_million_tokens: number