Compare commits

...

4 Commits

Author SHA1 Message Date
icarus
18777b3872 style: format 2025-12-02 12:04:49 +08:00
icarus
8e09da1ca0 fix(verbosity): fix wrong verbosity type definition and handling in #11281 2025-12-02 12:04:39 +08:00
icarus
b588a43405 feat(models): add null as supported verbosity level for OpenAI models
Update model utils and types to include null as a valid verbosity level option alongside undefined. This provides more flexibility in controlling verbosity behavior, with null representing an explicit "off" state. Tests and UI components are updated to reflect this change.
2025-12-02 11:51:18 +08:00
icarus
5d0ad18e45 fix(settings): fix wrong type caused by as assertion and migration
Add migration step 180 to properly handle 'undefined' string values in OpenAI settings
Update selector components to use value conversion helpers for summaryText and verbosity
2025-12-02 11:16:52 +08:00
8 changed files with 59 additions and 43 deletions

View File

@@ -9,6 +9,7 @@ import {
import { REFERENCE_PROMPT } from '@renderer/config/prompts'
import { getLMStudioKeepAliveTime } from '@renderer/hooks/useLMStudio'
import { getAssistantSettings } from '@renderer/services/AssistantService'
import type { RootState } from '@renderer/store'
import type {
Assistant,
GenerateImageParams,
@@ -245,23 +246,20 @@ export abstract class BaseApiClient<
protected getVerbosity(model?: Model): OpenAIVerbosity {
try {
const state = window.store?.getState()
const state = window.store?.getState() as RootState
const verbosity = state?.settings?.openAI?.verbosity
if (verbosity && ['low', 'medium', 'high'].includes(verbosity)) {
// If model is provided, check if the verbosity is supported by the model
if (model) {
const supportedVerbosity = getModelSupportedVerbosity(model)
// Use user's verbosity if supported, otherwise use the first supported option
return supportedVerbosity.includes(verbosity) ? verbosity : supportedVerbosity[0]
}
return verbosity
// If model is provided, check if the verbosity is supported by the model
if (model) {
const supportedVerbosity = getModelSupportedVerbosity(model)
// Use user's verbosity if supported, otherwise use the first supported option
return supportedVerbosity.includes(verbosity) ? verbosity : supportedVerbosity[0]
}
return verbosity
} catch (error) {
logger.warn('Failed to get verbosity from state:', error as Error)
logger.warn('Failed to get verbosity from state. Fallback to undefined.', error as Error)
return undefined
}
return 'medium'
}
protected getTimeout(model: Model) {

View File

@@ -32,7 +32,6 @@ import {
isSupportedThinkingTokenModel,
isSupportedThinkingTokenQwenModel,
isSupportedThinkingTokenZhipuModel,
isSupportVerbosityModel,
isVisionModel,
MODEL_SUPPORTED_REASONING_EFFORT,
ZHIPU_RESULT_TOKENS
@@ -714,13 +713,8 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
...modalities,
// groq 有不同的 service tier 配置,不符合 openai 接口类型
service_tier: this.getServiceTier(model) as OpenAIServiceTier,
...(isSupportVerbosityModel(model)
? {
text: {
verbosity: this.getVerbosity(model)
}
}
: {}),
// verbosity. getVerbosity ensure the returned value is valid.
verbosity: this.getVerbosity(model),
...this.getProviderSpecificParameters(assistant, model),
...reasoningEffort,
// ...getOpenAIWebSearchParams(model, enableWebSearch),

View File

@@ -222,18 +222,22 @@ describe('model utils', () => {
describe('getModelSupportedVerbosity', () => {
it('returns only "high" for GPT-5 Pro models', () => {
expect(getModelSupportedVerbosity(createModel({ id: 'gpt-5-pro' }))).toEqual([undefined, 'high'])
expect(getModelSupportedVerbosity(createModel({ id: 'gpt-5-pro-2025-10-06' }))).toEqual([undefined, 'high'])
expect(getModelSupportedVerbosity(createModel({ id: 'gpt-5-pro' }))).toEqual([undefined, null, 'high'])
expect(getModelSupportedVerbosity(createModel({ id: 'gpt-5-pro-2025-10-06' }))).toEqual([
undefined,
null,
'high'
])
})
it('returns all levels for non-Pro GPT-5 models', () => {
const previewModel = createModel({ id: 'gpt-5-preview' })
expect(getModelSupportedVerbosity(previewModel)).toEqual([undefined, 'low', 'medium', 'high'])
expect(getModelSupportedVerbosity(previewModel)).toEqual([undefined, null, 'low', 'medium', 'high'])
})
it('returns all levels for GPT-5.1 models', () => {
const gpt51Model = createModel({ id: 'gpt-5.1-preview' })
expect(getModelSupportedVerbosity(gpt51Model)).toEqual([undefined, 'low', 'medium', 'high'])
expect(getModelSupportedVerbosity(gpt51Model)).toEqual([undefined, null, 'low', 'medium', 'high'])
})
it('returns only undefined for non-GPT-5 models', () => {

View File

@@ -10,7 +10,8 @@ import {
isGPT51SeriesModel,
isOpenAIChatCompletionOnlyModel,
isOpenAIOpenWeightModel,
isOpenAIReasoningModel
isOpenAIReasoningModel,
isSupportVerbosityModel
} from './openai'
import { isQwenMTModel } from './qwen'
import { isGenerateImageModel, isTextToImageModel, isVisionModel } from './vision'
@@ -154,10 +155,10 @@ const MODEL_SUPPORTED_VERBOSITY: readonly {
* For GPT-5-pro, only 'high' is supported; for other GPT-5 models, 'low', 'medium', and 'high' are supported.
* For GPT-5.1 series models, 'low', 'medium', and 'high' are supported.
* @param model - The model to check
* @returns An array of supported verbosity levels, always including `undefined` as the first element
* @returns An array of supported verbosity levels, always including `undefined` as the first element and `null` when applicable
*/
export const getModelSupportedVerbosity = (model: Model | undefined | null): OpenAIVerbosity[] => {
if (!model) {
if (!model || !isSupportVerbosityModel(model)) {
return [undefined]
}
@@ -165,7 +166,7 @@ export const getModelSupportedVerbosity = (model: Model | undefined | null): Ope
for (const { validator, values } of MODEL_SUPPORTED_VERBOSITY) {
if (validator(model)) {
supportedValues = [...values]
supportedValues = [null, ...values]
break
}
}

View File

@@ -24,12 +24,12 @@ import { useTranslation } from 'react-i18next'
import { useSelector } from 'react-redux'
type VerbosityOption = {
value: NonNullable<OpenAIVerbosity> | 'undefined'
value: NonNullable<OpenAIVerbosity> | 'undefined' | 'null'
label: string
}
type SummaryTextOption = {
value: NonNullable<OpenAISummaryText> | 'undefined'
value: NonNullable<OpenAISummaryText> | 'undefined' | 'null'
label: string
}
@@ -85,6 +85,10 @@ const OpenAISettingsGroup: FC<Props> = ({ model, providerId, SettingGroup, Setti
value: 'undefined',
label: t('common.ignore')
},
{
value: 'null',
label: t('common.off')
},
{
value: 'auto',
label: t('settings.openai.summary_text_mode.auto')
@@ -105,6 +109,10 @@ const OpenAISettingsGroup: FC<Props> = ({ model, providerId, SettingGroup, Setti
value: 'undefined',
label: t('common.ignore')
},
{
value: 'null',
label: t('common.off')
},
{
value: 'low',
label: t('settings.openai.verbosity.low')
@@ -203,9 +211,9 @@ const OpenAISettingsGroup: FC<Props> = ({ model, providerId, SettingGroup, Setti
</Tooltip>
</SettingRowTitleSmall>
<Selector
value={summaryText}
value={toOptionValue(summaryText)}
onChange={(value) => {
setSummaryText(value as OpenAISummaryText)
setSummaryText(toRealValue(value))
}}
options={summaryTextOptions}
/>
@@ -222,9 +230,9 @@ const OpenAISettingsGroup: FC<Props> = ({ model, providerId, SettingGroup, Setti
</Tooltip>
</SettingRowTitleSmall>
<Selector
value={verbosity}
value={toOptionValue(verbosity)}
onChange={(value) => {
setVerbosity(value as OpenAIVerbosity)
setVerbosity(toRealValue(value))
}}
options={verbosityOptions}
/>

View File

@@ -2906,6 +2906,23 @@ const migrateConfig = {
logger.error('migrate 179 error', error as Error)
return state
}
},
'180': (state: RootState) => {
try {
// @ts-expect-error
if (state.settings.openAI.summaryText === 'undefined') {
state.settings.openAI.summaryText = undefined
}
// @ts-expect-error
if (state.settings.openAI.verbosity === 'undefined') {
state.settings.openAI.summaryText = undefined
}
logger.info('migrate 180 success')
return state
} catch (error) {
logger.error('migrate 180 error', error as Error)
return state
}
}
}

View File

@@ -1,5 +1,5 @@
import type OpenAI from '@cherrystudio/openai'
import type { NotNull, NotUndefined } from '@types'
import type { NotUndefined } from '@types'
import type { ImageModel, LanguageModel } from 'ai'
import type { generateObject, generateText, ModelMessage, streamObject, streamText } from 'ai'
import * as z from 'zod'
@@ -32,17 +32,15 @@ export type GenerateObjectParams = Omit<Parameters<typeof generateObject>[0], 'm
export type AiSdkModel = LanguageModel | ImageModel
// The original type unite both undefined and null.
// I pick undefined as the unique falsy type since they seem like share the same meaning according to OpenAI API docs.
// Parameter would not be passed into request if it's undefined.
export type OpenAIVerbosity = NotNull<OpenAI.Responses.ResponseTextConfig['verbosity']>
export type OpenAIVerbosity = OpenAI.Responses.ResponseTextConfig['verbosity']
export type ValidOpenAIVerbosity = NotUndefined<OpenAIVerbosity>
export type OpenAIReasoningEffort = OpenAI.ReasoningEffort
// The original type unite both undefined and null.
// I pick undefined as the unique falsy type since they seem like share the same meaning according to OpenAI API docs.
// Parameter would not be passed into request if it's undefined.
export type OpenAISummaryText = NotNull<OpenAI.Reasoning['summary']>
export type OpenAISummaryText = OpenAI.Reasoning['summary']
const AiSdkParamsSchema = z.enum([
'maxOutputTokens',

View File

@@ -128,10 +128,6 @@ export type OpenAIExtraBody = {
source_lang: 'auto'
target_lang: string
}
// for gpt-5 series models verbosity control
text?: {
verbosity?: 'low' | 'medium' | 'high'
}
}
// image is for openrouter. audio is ignored for now
export type OpenAIModality = OpenAI.ChatCompletionModality | 'image'