Compare commits
4 Commits
feat/backu
...
feat/qwen3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d618cdf19e | ||
|
|
9120136f45 | ||
|
|
8747974359 | ||
|
|
69e9b9855e |
@@ -11,3 +11,47 @@ export const StreamlineGoodHealthAndWellBeing = (props: SVGProps<SVGSVGElement>)
|
||||
</svg>
|
||||
)
|
||||
}
|
||||
|
||||
export function MdiLightbulbOffOutline(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="1em" height="1em" viewBox="0 0 24 24" {...props}>
|
||||
{/* Icon from Material Design Icons by Pictogrammers - https://github.com/Templarian/MaterialDesign/blob/master/LICENSE */}
|
||||
<path
|
||||
fill="currentColor"
|
||||
d="M12 2C9.76 2 7.78 3.05 6.5 4.68l1.43 1.43C8.84 4.84 10.32 4 12 4a5 5 0 0 1 5 5c0 1.68-.84 3.16-2.11 4.06l1.42 1.44C17.94 13.21 19 11.24 19 9a7 7 0 0 0-7-7M3.28 4L2 5.27L5.04 8.3C5 8.53 5 8.76 5 9c0 2.38 1.19 4.47 3 5.74V17a1 1 0 0 0 1 1h5.73l4 4L20 20.72zm3.95 6.5l5.5 5.5H10v-2.42a5 5 0 0 1-2.77-3.08M9 20v1a1 1 0 0 0 1 1h4a1 1 0 0 0 1-1v-1z"></path>
|
||||
</svg>
|
||||
)
|
||||
}
|
||||
|
||||
export function MdiLightbulbOn10(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="1em" height="1em" viewBox="0 0 24 24" {...props}>
|
||||
{/* Icon from Material Design Icons by Pictogrammers - https://github.com/Templarian/MaterialDesign/blob/master/LICENSE */}
|
||||
<path
|
||||
fill="currentColor"
|
||||
d="M1 11h3v2H1zm18.1-7.5L17 5.6L18.4 7l2.1-2.1zM11 1h2v3h-2zM4.9 3.5L3.5 4.9L5.6 7L7 5.6zM10 22c0 .6.4 1 1 1h2c.6 0 1-.4 1-1v-1h-4zm2-16c-3.3 0-6 2.7-6 6c0 2.2 1.2 4.2 3 5.2V19c0 .6.4 1 1 1h4c.6 0 1-.4 1-1v-1.8c1.8-1 3-3 3-5.2c0-3.3-2.7-6-6-6m1 9.9V17h-2v-1.1c-1.7-.4-3-2-3-3.9c0-2.2 1.8-4 4-4s4 1.8 4 4c0 1.9-1.3 3.4-3 3.9m7-4.9h3v2h-3z"></path>
|
||||
</svg>
|
||||
)
|
||||
}
|
||||
|
||||
export function MdiLightbulbOn50(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="1em" height="1em" viewBox="0 0 24 24" {...props}>
|
||||
{/* Icon from Material Design Icons by Pictogrammers - https://github.com/Templarian/MaterialDesign/blob/master/LICENSE */}
|
||||
<path
|
||||
fill="currentColor"
|
||||
d="M1 11h3v2H1zm9 11c0 .6.4 1 1 1h2c.6 0 1-.4 1-1v-1h-4zm3-21h-2v3h2zM4.9 3.5L3.5 4.9L5.6 7L7 5.6zM20 11v2h3v-2zm-.9-7.5L17 5.6L18.4 7l2.1-2.1zM18 12c0 2.2-1.2 4.2-3 5.2V19c0 .6-.4 1-1 1h-4c-.6 0-1-.4-1-1v-1.8c-1.8-1-3-3-3-5.2c0-3.3 2.7-6 6-6s6 2.7 6 6M8 12c0 .35.05.68.14 1h7.72c.09-.32.14-.65.14-1c0-2.21-1.79-4-4-4s-4 1.79-4 4"></path>
|
||||
</svg>
|
||||
)
|
||||
}
|
||||
|
||||
export function MdiLightbulbOn90(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="1em" height="1em" viewBox="0 0 24 24" {...props}>
|
||||
{/* Icon from Material Design Icons by Pictogrammers - https://github.com/Templarian/MaterialDesign/blob/master/LICENSE */}
|
||||
<path
|
||||
fill="currentColor"
|
||||
d="M7 5.6L5.6 7L3.5 4.9l1.4-1.4zM10 22c0 .6.4 1 1 1h2c.6 0 1-.4 1-1v-1h-4zm-9-9h3v-2H1zM13 1h-2v3h2zm7 10v2h3v-2zm-.9-7.5L17 5.6L18.4 7l2.1-2.1zM18 12c0 2.2-1.2 4.2-3 5.2V19c0 .6-.4 1-1 1h-4c-.6 0-1-.4-1-1v-1.8c-1.8-1-3-3-3-5.2c0-3.3 2.7-6 6-6s6 2.7 6 6m-6-4c-1 0-1.91.38-2.61 1h5.22C13.91 8.38 13 8 12 8"></path>
|
||||
</svg>
|
||||
)
|
||||
}
|
||||
|
||||
81
src/renderer/src/components/ThinkingPanel/ThinkingSelect.tsx
Normal file
81
src/renderer/src/components/ThinkingPanel/ThinkingSelect.tsx
Normal file
@@ -0,0 +1,81 @@
|
||||
import { isSupportedReasoningEffortGrokModel } from '@renderer/config/models'
|
||||
import { Assistant, Model } from '@renderer/types'
|
||||
import { List } from 'antd'
|
||||
import { useMemo } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import styled from 'styled-components'
|
||||
|
||||
import { ReasoningEffortOptions } from './index'
|
||||
|
||||
interface ThinkingSelectProps {
|
||||
model: Model
|
||||
assistant: Assistant
|
||||
value: ReasoningEffortOptions
|
||||
onChange: (value: ReasoningEffortOptions) => void
|
||||
}
|
||||
|
||||
interface OptionType {
|
||||
label: string
|
||||
value: ReasoningEffortOptions
|
||||
}
|
||||
|
||||
export default function ThinkingSelect({ model, value, onChange }: ThinkingSelectProps) {
|
||||
const { t } = useTranslation()
|
||||
|
||||
const baseOptions = useMemo(
|
||||
() =>
|
||||
[
|
||||
{ label: t('assistants.settings.reasoning_effort.low'), value: 'low' },
|
||||
{ label: t('assistants.settings.reasoning_effort.medium'), value: 'medium' },
|
||||
{ label: t('assistants.settings.reasoning_effort.high'), value: 'high' }
|
||||
] as OptionType[],
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
[]
|
||||
)
|
||||
|
||||
const options = useMemo(
|
||||
() =>
|
||||
isSupportedReasoningEffortGrokModel(model)
|
||||
? baseOptions.filter((option) => option.value === 'low' || option.value === 'high')
|
||||
: baseOptions,
|
||||
[model, baseOptions]
|
||||
)
|
||||
|
||||
return (
|
||||
<List
|
||||
dataSource={options}
|
||||
renderItem={(option) => (
|
||||
<StyledListItem $isSelected={value === option.value} onClick={() => onChange(option.value)}>
|
||||
<ReasoningEffortLabel>{option.label}</ReasoningEffortLabel>
|
||||
</StyledListItem>
|
||||
)}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
const ReasoningEffortLabel = styled.div`
|
||||
font-size: 16px;
|
||||
font-family: Ubuntu;
|
||||
`
|
||||
|
||||
const StyledListItem = styled(List.Item)<{ $isSelected: boolean }>`
|
||||
cursor: pointer;
|
||||
padding: 8px 16px;
|
||||
margin: 4px 0;
|
||||
font-family: Ubuntu;
|
||||
border-radius: var(--list-item-border-radius);
|
||||
font-size: 16px;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
justify-content: space-between;
|
||||
transition: all 0.3s;
|
||||
background-color: ${(props) => (props.$isSelected ? 'var(--color-background-soft)' : 'transparent')};
|
||||
|
||||
.ant-list-item {
|
||||
border: none !important;
|
||||
}
|
||||
|
||||
&:hover {
|
||||
background-color: var(--color-background-soft);
|
||||
}
|
||||
`
|
||||
172
src/renderer/src/components/ThinkingPanel/ThinkingSlider.tsx
Normal file
172
src/renderer/src/components/ThinkingPanel/ThinkingSlider.tsx
Normal file
@@ -0,0 +1,172 @@
|
||||
import { InfoCircleOutlined } from '@ant-design/icons'
|
||||
import { Model } from '@renderer/types'
|
||||
import { Button, InputNumber, Slider, Tooltip } from 'antd'
|
||||
import { useEffect, useState } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import styled from 'styled-components'
|
||||
|
||||
import { isSupportedThinkingTokenGeminiModel } from '../../config/models'
|
||||
|
||||
interface ThinkingSliderProps {
|
||||
model: Model
|
||||
value: number | null
|
||||
min: number
|
||||
max: number
|
||||
onChange: (value: number | null) => void
|
||||
}
|
||||
|
||||
export default function ThinkingSlider({ model, value, min, max, onChange }: ThinkingSliderProps) {
|
||||
const [mode, setMode] = useState<'default' | 'custom'>(value === null ? 'default' : 'custom')
|
||||
const [customValue, setCustomValue] = useState<number>(value === null ? 0 : value)
|
||||
const { t } = useTranslation()
|
||||
useEffect(() => {
|
||||
if (value === null) {
|
||||
setMode('default')
|
||||
} else {
|
||||
setMode('custom')
|
||||
setCustomValue(value)
|
||||
}
|
||||
}, [value])
|
||||
|
||||
const handleModeChange = (newMode: 'default' | 'custom') => {
|
||||
setMode(newMode)
|
||||
if (newMode === 'default') {
|
||||
onChange(null)
|
||||
} else {
|
||||
onChange(customValue)
|
||||
}
|
||||
}
|
||||
|
||||
const handleCustomValueChange = (newValue: number | null) => {
|
||||
if (newValue !== null) {
|
||||
setCustomValue(newValue)
|
||||
onChange(newValue)
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<Container>
|
||||
{isSupportedThinkingTokenGeminiModel(model) && (
|
||||
<ButtonGroup>
|
||||
<Tooltip title={t('chat.input.thinking.mode.default.tip')}>
|
||||
<ModeButton type={mode === 'default' ? 'primary' : 'text'} onClick={() => handleModeChange('default')}>
|
||||
{t('chat.input.thinking.mode.default')}
|
||||
</ModeButton>
|
||||
</Tooltip>
|
||||
<Tooltip title={t('chat.input.thinking.mode.custom.tip')}>
|
||||
<ModeButton type={mode === 'custom' ? 'primary' : 'text'} onClick={() => handleModeChange('custom')}>
|
||||
{t('chat.input.thinking.mode.custom')}
|
||||
</ModeButton>
|
||||
</Tooltip>
|
||||
</ButtonGroup>
|
||||
)}
|
||||
|
||||
{mode === 'custom' && (
|
||||
<CustomControls>
|
||||
<SliderContainer>
|
||||
<Slider
|
||||
min={min}
|
||||
max={max}
|
||||
value={customValue}
|
||||
onChange={handleCustomValueChange}
|
||||
tooltip={{ formatter: null }}
|
||||
/>
|
||||
<SliderMarks>
|
||||
<span>0</span>
|
||||
<span>{max.toLocaleString()}</span>
|
||||
</SliderMarks>
|
||||
</SliderContainer>
|
||||
|
||||
<InputContainer>
|
||||
<StyledInputNumber
|
||||
min={min}
|
||||
max={max}
|
||||
value={customValue}
|
||||
onChange={(value) => handleCustomValueChange(Number(value))}
|
||||
controls={false}
|
||||
/>
|
||||
<Tooltip title={t('chat.input.thinking.mode.tokens.tip')}>
|
||||
<InfoCircleOutlined style={{ color: 'var(--color-text-2)' }} />
|
||||
</Tooltip>
|
||||
</InputContainer>
|
||||
</CustomControls>
|
||||
)}
|
||||
</Container>
|
||||
)
|
||||
}
|
||||
|
||||
const Container = styled.div`
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 12px;
|
||||
width: 100%;
|
||||
min-width: 320px;
|
||||
padding: 4px;
|
||||
`
|
||||
|
||||
const ButtonGroup = styled.div`
|
||||
display: flex;
|
||||
gap: 8px;
|
||||
justify-content: center;
|
||||
margin-bottom: 4px;
|
||||
`
|
||||
|
||||
const ModeButton = styled(Button)`
|
||||
min-width: 90px;
|
||||
height: 28px;
|
||||
border-radius: 14px;
|
||||
padding: 0 16px;
|
||||
font-size: 13px;
|
||||
|
||||
&:hover {
|
||||
background-color: var(--color-background-soft);
|
||||
}
|
||||
|
||||
&.ant-btn-primary {
|
||||
background-color: var(--color-primary);
|
||||
border-color: var(--color-primary);
|
||||
|
||||
&:hover {
|
||||
background-color: var(--color-primary);
|
||||
opacity: 0.9;
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
const CustomControls = styled.div`
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 12px;
|
||||
`
|
||||
|
||||
const SliderContainer = styled.div`
|
||||
flex: 1;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 4px;
|
||||
min-width: 180px;
|
||||
`
|
||||
|
||||
const SliderMarks = styled.div`
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
color: var(--color-text-2);
|
||||
font-size: 12px;
|
||||
`
|
||||
|
||||
const InputContainer = styled.div`
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 8px;
|
||||
`
|
||||
|
||||
const StyledInputNumber = styled(InputNumber)`
|
||||
width: 70px;
|
||||
|
||||
.ant-input-number-input {
|
||||
height: 28px;
|
||||
text-align: center;
|
||||
font-size: 13px;
|
||||
padding: 0 8px;
|
||||
}
|
||||
`
|
||||
120
src/renderer/src/components/ThinkingPanel/index.tsx
Normal file
120
src/renderer/src/components/ThinkingPanel/index.tsx
Normal file
@@ -0,0 +1,120 @@
|
||||
import { DEFAULT_MAX_TOKENS } from '@renderer/config/constant'
|
||||
import {
|
||||
isSupportedReasoningEffortModel,
|
||||
isSupportedThinkingTokenClaudeModel,
|
||||
isSupportedThinkingTokenModel
|
||||
} from '@renderer/config/models'
|
||||
import { useAssistant } from '@renderer/hooks/useAssistant'
|
||||
import { Assistant, Model } from '@renderer/types'
|
||||
import { useCallback, useEffect, useMemo } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
|
||||
import ThinkingSelect from './ThinkingSelect'
|
||||
import ThinkingSlider from './ThinkingSlider'
|
||||
|
||||
const THINKING_TOKEN_MAP: Record<string, { min: number; max: number }> = {
|
||||
// Gemini models
|
||||
'gemini-.*$': { min: 0, max: 24576 },
|
||||
|
||||
// Qwen models
|
||||
'qwen-plus-.*$': { min: 0, max: 38912 },
|
||||
'qwen-turbo-.*$': { min: 0, max: 38912 },
|
||||
'qwen3-0\\.6b$': { min: 0, max: 30720 },
|
||||
'qwen3-1\\.7b$': { min: 0, max: 30720 },
|
||||
'qwen3-.*$': { min: 0, max: 38912 },
|
||||
|
||||
// Claude models
|
||||
'claude-3[.-]7.*sonnet.*$': { min: 0, max: 64000 }
|
||||
}
|
||||
|
||||
export type ReasoningEffortOptions = 'low' | 'medium' | 'high'
|
||||
|
||||
// Helper function to find matching token limit
|
||||
const findTokenLimit = (modelId: string): { min: number; max: number } | undefined => {
|
||||
for (const [pattern, limits] of Object.entries(THINKING_TOKEN_MAP)) {
|
||||
if (new RegExp(pattern).test(modelId)) {
|
||||
return limits
|
||||
}
|
||||
}
|
||||
return undefined
|
||||
}
|
||||
|
||||
interface ThinkingPanelProps {
|
||||
model: Model
|
||||
assistant: Assistant
|
||||
}
|
||||
|
||||
export default function ThinkingPanel({ model, assistant }: ThinkingPanelProps) {
|
||||
const { updateAssistantSettings } = useAssistant(assistant.id)
|
||||
const isSupportedThinkingToken = isSupportedThinkingTokenModel(model)
|
||||
const isSupportedReasoningEffort = isSupportedReasoningEffortModel(model)
|
||||
const thinkingTokenRange = findTokenLimit(model.id)
|
||||
const { t } = useTranslation()
|
||||
|
||||
// 获取当前的thinking_budget值
|
||||
// 如果thinking_budget未设置,则使用null表示默认行为
|
||||
const currentThinkingBudget =
|
||||
assistant.settings?.thinking_budget !== undefined ? assistant.settings.thinking_budget : null
|
||||
|
||||
// 获取maxTokens值
|
||||
const maxTokens = assistant.settings?.maxTokens || DEFAULT_MAX_TOKENS
|
||||
|
||||
// 检查budgetTokens是否大于maxTokens
|
||||
const isBudgetExceedingMax = useMemo(() => {
|
||||
if (currentThinkingBudget === null) return false
|
||||
return currentThinkingBudget > maxTokens
|
||||
}, [currentThinkingBudget, maxTokens])
|
||||
|
||||
useEffect(() => {
|
||||
if (isBudgetExceedingMax && isSupportedThinkingTokenClaudeModel(model)) {
|
||||
window.message.error(t('chat.input.thinking.budget_exceeds_max'))
|
||||
}
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [isBudgetExceedingMax, model])
|
||||
|
||||
const onTokenChange = useCallback(
|
||||
(value: number | null) => {
|
||||
// 如果值为null,则删除thinking_budget设置,使用默认行为
|
||||
if (value === null) {
|
||||
updateAssistantSettings({ thinking_budget: undefined })
|
||||
} else {
|
||||
updateAssistantSettings({ thinking_budget: value })
|
||||
}
|
||||
},
|
||||
[updateAssistantSettings]
|
||||
)
|
||||
|
||||
const onReasoningEffortChange = useCallback(
|
||||
(value: ReasoningEffortOptions) => {
|
||||
updateAssistantSettings({ reasoning_effort: value })
|
||||
},
|
||||
[updateAssistantSettings]
|
||||
)
|
||||
|
||||
if (isSupportedThinkingToken) {
|
||||
return (
|
||||
<>
|
||||
<ThinkingSlider
|
||||
model={model}
|
||||
value={currentThinkingBudget}
|
||||
min={thinkingTokenRange?.min ?? 0}
|
||||
max={thinkingTokenRange?.max ?? 0}
|
||||
onChange={onTokenChange}
|
||||
/>
|
||||
</>
|
||||
)
|
||||
}
|
||||
|
||||
if (isSupportedReasoningEffort) {
|
||||
return (
|
||||
<ThinkingSelect
|
||||
assistant={assistant}
|
||||
model={model}
|
||||
value={assistant.settings?.reasoning_effort || 'medium'}
|
||||
onChange={onReasoningEffortChange}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
@@ -210,6 +210,7 @@ export const FUNCTION_CALLING_MODELS = [
|
||||
'o1(?:-[\\w-]+)?',
|
||||
'claude',
|
||||
'qwen',
|
||||
'qwen3',
|
||||
'hunyuan',
|
||||
'deepseek',
|
||||
'glm-4(?:-[\\w-]+)?',
|
||||
@@ -2218,30 +2219,40 @@ export function isVisionModel(model: Model): boolean {
|
||||
return VISION_REGEX.test(model.id) || model.type?.includes('vision') || false
|
||||
}
|
||||
|
||||
export function isOpenAIoSeries(model: Model): boolean {
|
||||
export function isOpenAIReasoningModel(model: Model): boolean {
|
||||
return model.id.includes('o1') || model.id.includes('o3') || model.id.includes('o4')
|
||||
}
|
||||
|
||||
export function isSupportedReasoningEffortOpenAIModel(model: Model): boolean {
|
||||
return (
|
||||
(model.id.includes('o1') && !(model.id.includes('o1-preview') || model.id.includes('o1-mini'))) ||
|
||||
model.id.includes('o3') ||
|
||||
model.id.includes('o4')
|
||||
)
|
||||
}
|
||||
|
||||
export function isOpenAIWebSearch(model: Model): boolean {
|
||||
return model.id.includes('gpt-4o-search-preview') || model.id.includes('gpt-4o-mini-search-preview')
|
||||
}
|
||||
|
||||
export function isSupportedThinkingTokenModel(model?: Model): boolean {
|
||||
if (!model) {
|
||||
return false
|
||||
}
|
||||
|
||||
return (
|
||||
isSupportedThinkingTokenGeminiModel(model) ||
|
||||
isSupportedThinkingTokenQwenModel(model) ||
|
||||
isSupportedThinkingTokenClaudeModel(model)
|
||||
)
|
||||
}
|
||||
|
||||
export function isSupportedReasoningEffortModel(model?: Model): boolean {
|
||||
if (!model) {
|
||||
return false
|
||||
}
|
||||
|
||||
if (
|
||||
model.id.includes('claude-3-7-sonnet') ||
|
||||
model.id.includes('claude-3.7-sonnet') ||
|
||||
isOpenAIoSeries(model) ||
|
||||
isGrokReasoningModel(model) ||
|
||||
isGemini25ReasoningModel(model)
|
||||
) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
return isSupportedReasoningEffortOpenAIModel(model) || isSupportedReasoningEffortGrokModel(model)
|
||||
}
|
||||
|
||||
export function isGrokModel(model?: Model): boolean {
|
||||
@@ -2263,7 +2274,9 @@ export function isGrokReasoningModel(model?: Model): boolean {
|
||||
return false
|
||||
}
|
||||
|
||||
export function isGemini25ReasoningModel(model?: Model): boolean {
|
||||
export const isSupportedReasoningEffortGrokModel = isGrokReasoningModel
|
||||
|
||||
export function isGeminiReasoningModel(model?: Model): boolean {
|
||||
if (!model) {
|
||||
return false
|
||||
}
|
||||
@@ -2275,6 +2288,51 @@ export function isGemini25ReasoningModel(model?: Model): boolean {
|
||||
return false
|
||||
}
|
||||
|
||||
export const isSupportedThinkingTokenGeminiModel = isGeminiReasoningModel
|
||||
|
||||
export function isQwenReasoningModel(model?: Model): boolean {
|
||||
if (!model) {
|
||||
return false
|
||||
}
|
||||
|
||||
if (isSupportedThinkingTokenQwenModel(model)) {
|
||||
return true
|
||||
}
|
||||
|
||||
if (model.id.includes('qwq') || model.id.includes('qvq')) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
export function isSupportedThinkingTokenQwenModel(model?: Model): boolean {
|
||||
if (!model) {
|
||||
return false
|
||||
}
|
||||
|
||||
return (
|
||||
model.id.includes('qwen3') ||
|
||||
[
|
||||
'qwen-plus-latest',
|
||||
'qwen-plus-0428',
|
||||
'qwen-plus-2025-04-28',
|
||||
'qwen-turbo-latest',
|
||||
'qwen-turbo-0428',
|
||||
'qwen-turbo-2025-04-28'
|
||||
].includes(model.id)
|
||||
)
|
||||
}
|
||||
|
||||
export function isClaudeReasoningModel(model?: Model): boolean {
|
||||
if (!model) {
|
||||
return false
|
||||
}
|
||||
return model.id.includes('claude-3-7-sonnet') || model.id.includes('claude-3.7-sonnet')
|
||||
}
|
||||
|
||||
export const isSupportedThinkingTokenClaudeModel = isClaudeReasoningModel
|
||||
|
||||
export function isReasoningModel(model?: Model): boolean {
|
||||
if (!model) {
|
||||
return false
|
||||
@@ -2284,15 +2342,14 @@ export function isReasoningModel(model?: Model): boolean {
|
||||
return REASONING_REGEX.test(model.name) || model.type?.includes('reasoning') || false
|
||||
}
|
||||
|
||||
if (model.id.includes('claude-3-7-sonnet') || model.id.includes('claude-3.7-sonnet') || isOpenAIoSeries(model)) {
|
||||
return true
|
||||
}
|
||||
|
||||
if (isGemini25ReasoningModel(model)) {
|
||||
return true
|
||||
}
|
||||
|
||||
if (model.id.includes('glm-z1')) {
|
||||
if (
|
||||
isClaudeReasoningModel(model) ||
|
||||
isOpenAIReasoningModel(model) ||
|
||||
isGeminiReasoningModel(model) ||
|
||||
isQwenReasoningModel(model) ||
|
||||
isGrokReasoningModel(model) ||
|
||||
model.id.includes('glm-z1')
|
||||
) {
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
@@ -56,11 +56,10 @@
|
||||
"settings.preset_messages": "Preset Messages",
|
||||
"settings.prompt": "Prompt Settings",
|
||||
"settings.reasoning_effort": "Reasoning effort",
|
||||
"settings.reasoning_effort.high": "high",
|
||||
"settings.reasoning_effort.low": "low",
|
||||
"settings.reasoning_effort.medium": "medium",
|
||||
"settings.reasoning_effort.off": "off",
|
||||
"settings.reasoning_effort.tip": "Only supported by OpenAI o-series, Anthropic, and Grok reasoning models",
|
||||
"settings.reasoning_effort.off": "Off",
|
||||
"settings.reasoning_effort.high": "Think harder",
|
||||
"settings.reasoning_effort.low": "Think less",
|
||||
"settings.reasoning_effort.medium": "Think normally",
|
||||
"settings.more": "Assistant Settings"
|
||||
},
|
||||
"auth": {
|
||||
@@ -250,7 +249,14 @@
|
||||
"input.upload.upload_from_local": "Upload local file...",
|
||||
"input.web_search.builtin": "Model Built-in",
|
||||
"input.web_search.builtin.enabled_content": "Use the built-in web search function of the model",
|
||||
"input.web_search.builtin.disabled_content": "The current model does not support web search"
|
||||
"input.web_search.builtin.disabled_content": "The current model does not support web search",
|
||||
"input.thinking": "Thinking",
|
||||
"input.thinking.mode.default": "Default",
|
||||
"input.thinking.mode.default.tip": "The model will automatically determine the number of tokens to think",
|
||||
"input.thinking.mode.custom": "Custom",
|
||||
"input.thinking.mode.custom.tip": "The maximum number of tokens the model can think. Need to consider the context limit of the model, otherwise an error will be reported",
|
||||
"input.thinking.mode.tokens.tip": "Set the number of thinking tokens to use.",
|
||||
"input.thinking.budget_exceeds_max": "Thinking budget exceeds the maximum token number"
|
||||
},
|
||||
"code_block": {
|
||||
"collapse": "Collapse",
|
||||
|
||||
@@ -56,11 +56,10 @@
|
||||
"settings.preset_messages": "プリセットメッセージ",
|
||||
"settings.prompt": "プロンプト設定",
|
||||
"settings.reasoning_effort": "思考連鎖の長さ",
|
||||
"settings.reasoning_effort.high": "長い",
|
||||
"settings.reasoning_effort.low": "短い",
|
||||
"settings.reasoning_effort.medium": "中程度",
|
||||
"settings.reasoning_effort.off": "オフ",
|
||||
"settings.reasoning_effort.tip": "OpenAI o-series、Anthropic、および Grok の推論モデルのみサポート",
|
||||
"settings.reasoning_effort.high": "最大限の思考",
|
||||
"settings.reasoning_effort.low": "少しの思考",
|
||||
"settings.reasoning_effort.medium": "普通の思考",
|
||||
"settings.more": "アシスタント設定"
|
||||
},
|
||||
"auth": {
|
||||
@@ -250,7 +249,14 @@
|
||||
"input.upload.upload_from_local": "ローカルファイルをアップロード...",
|
||||
"input.web_search.builtin": "モデル内蔵",
|
||||
"input.web_search.builtin.enabled_content": "モデル内蔵のウェブ検索機能を使用",
|
||||
"input.web_search.builtin.disabled_content": "現在のモデルはウェブ検索をサポートしていません"
|
||||
"input.web_search.builtin.disabled_content": "現在のモデルはウェブ検索をサポートしていません",
|
||||
"input.thinking": "思考",
|
||||
"input.thinking.mode.default": "デフォルト",
|
||||
"input.thinking.mode.custom": "カスタム",
|
||||
"input.thinking.mode.custom.tip": "モデルが最大で思考できるトークン数。モデルのコンテキスト制限を考慮する必要があります。そうしないとエラーが発生します",
|
||||
"input.thinking.mode.default.tip": "モデルが自動的に思考のトークン数を決定します",
|
||||
"input.thinking.mode.tokens.tip": "思考のトークン数を設定します",
|
||||
"input.thinking.budget_exceeds_max": "思考予算が最大トークン数を超えました"
|
||||
},
|
||||
"code_block": {
|
||||
"collapse": "折りたたむ",
|
||||
@@ -1507,7 +1513,7 @@
|
||||
"title": "プライバシー設定",
|
||||
"enable_privacy_mode": "匿名エラーレポートとデータ統計の送信"
|
||||
},
|
||||
"input.show_translate_confirm": "[to be translated]:显示翻译确认对话框"
|
||||
"input.show_translate_confirm": "翻訳確認ダイアログを表示"
|
||||
},
|
||||
"translate": {
|
||||
"any.language": "任意の言語",
|
||||
|
||||
@@ -55,12 +55,10 @@
|
||||
"settings.model": "Настройки модели",
|
||||
"settings.preset_messages": "Предустановленные сообщения",
|
||||
"settings.prompt": "Настройки промптов",
|
||||
"settings.reasoning_effort": "Длина цепочки рассуждений",
|
||||
"settings.reasoning_effort.high": "Длинная",
|
||||
"settings.reasoning_effort.low": "Короткая",
|
||||
"settings.reasoning_effort.medium": "Средняя",
|
||||
"settings.reasoning_effort.off": "Выключено",
|
||||
"settings.reasoning_effort.tip": "Поддерживается только моделями рассуждений OpenAI o-series, Anthropic и Grok",
|
||||
"settings.reasoning_effort.off": "Выключить",
|
||||
"settings.reasoning_effort.high": "Стараюсь думать",
|
||||
"settings.reasoning_effort.low": "Меньше думать",
|
||||
"settings.reasoning_effort.medium": "Среднее",
|
||||
"settings.more": "Настройки ассистента"
|
||||
},
|
||||
"auth": {
|
||||
@@ -250,7 +248,14 @@
|
||||
"input.upload.upload_from_local": "Загрузить локальный файл...",
|
||||
"input.web_search.builtin": "Модель встроена",
|
||||
"input.web_search.builtin.enabled_content": "Используйте встроенную функцию веб-поиска модели",
|
||||
"input.web_search.builtin.disabled_content": "Текущая модель не поддерживает веб-поиск"
|
||||
"input.web_search.builtin.disabled_content": "Текущая модель не поддерживает веб-поиск",
|
||||
"input.thinking": "Мыслим",
|
||||
"input.thinking.mode.default": "По умолчанию",
|
||||
"input.thinking.mode.default.tip": "Модель автоматически определяет количество токенов для размышления",
|
||||
"input.thinking.mode.custom": "Пользовательский",
|
||||
"input.thinking.mode.custom.tip": "Модель может максимально размышлять количество токенов. Необходимо учитывать ограничение контекста модели, иначе будет ошибка",
|
||||
"input.thinking.mode.tokens.tip": "Установите количество токенов для размышления",
|
||||
"input.thinking.budget_exceeds_max": "Бюджет размышления превышает максимальное количество токенов"
|
||||
},
|
||||
"code_block": {
|
||||
"collapse": "Свернуть",
|
||||
@@ -1507,7 +1512,7 @@
|
||||
"title": "Настройки приватности",
|
||||
"enable_privacy_mode": "Анонимная отправка отчетов об ошибках и статистики"
|
||||
},
|
||||
"input.show_translate_confirm": "[to be translated]:显示翻译确认对话框"
|
||||
"input.show_translate_confirm": "Показать диалоговое окно подтверждения перевода"
|
||||
},
|
||||
"translate": {
|
||||
"any.language": "Любой язык",
|
||||
|
||||
@@ -56,11 +56,10 @@
|
||||
"settings.preset_messages": "预设消息",
|
||||
"settings.prompt": "提示词设置",
|
||||
"settings.reasoning_effort": "思维链长度",
|
||||
"settings.reasoning_effort.high": "长",
|
||||
"settings.reasoning_effort.low": "短",
|
||||
"settings.reasoning_effort.medium": "中",
|
||||
"settings.reasoning_effort.off": "关",
|
||||
"settings.reasoning_effort.tip": "仅支持 OpenAI o-series、Anthropic、Grok 推理模型",
|
||||
"settings.reasoning_effort.off": "关闭",
|
||||
"settings.reasoning_effort.low": "浮想",
|
||||
"settings.reasoning_effort.medium": "斟酌",
|
||||
"settings.reasoning_effort.high": "沉思",
|
||||
"settings.more": "助手设置"
|
||||
},
|
||||
"auth": {
|
||||
@@ -133,6 +132,13 @@
|
||||
"input.translating": "翻译中...",
|
||||
"input.send": "发送",
|
||||
"input.settings": "设置",
|
||||
"input.thinking": "思考",
|
||||
"input.thinking.mode.default": "默认",
|
||||
"input.thinking.mode.default.tip": "模型会自动确定思考的 token 数",
|
||||
"input.thinking.mode.custom": "自定义",
|
||||
"input.thinking.mode.custom.tip": "模型最多可以思考的 token 数。需要考虑模型的上下文限制,否则会报错",
|
||||
"input.thinking.mode.tokens.tip": "设置思考的 token 数",
|
||||
"input.thinking.budget_exceeds_max": "思考预算超过最大 token 数",
|
||||
"input.topics": " 话题 ",
|
||||
"input.translate": "翻译成{{target_language}}",
|
||||
"input.upload": "上传图片或文档",
|
||||
|
||||
@@ -56,11 +56,10 @@
|
||||
"settings.preset_messages": "預設訊息",
|
||||
"settings.prompt": "提示詞設定",
|
||||
"settings.reasoning_effort": "思維鏈長度",
|
||||
"settings.reasoning_effort.high": "長",
|
||||
"settings.reasoning_effort.low": "短",
|
||||
"settings.reasoning_effort.medium": "中",
|
||||
"settings.reasoning_effort.off": "關",
|
||||
"settings.reasoning_effort.tip": "僅支援 OpenAI o-series、Anthropic 和 Grok 推理模型",
|
||||
"settings.reasoning_effort.off": "關閉",
|
||||
"settings.reasoning_effort.high": "盡力思考",
|
||||
"settings.reasoning_effort.low": "稍微思考",
|
||||
"settings.reasoning_effort.medium": "正常思考",
|
||||
"settings.more": "助手設定"
|
||||
},
|
||||
"auth": {
|
||||
@@ -250,7 +249,14 @@
|
||||
"input.upload.upload_from_local": "上傳本地文件...",
|
||||
"input.web_search.builtin": "模型內置",
|
||||
"input.web_search.builtin.enabled_content": "使用模型內置的網路搜尋功能",
|
||||
"input.web_search.builtin.disabled_content": "當前模型不支持網路搜尋功能"
|
||||
"input.web_search.builtin.disabled_content": "當前模型不支持網路搜尋功能",
|
||||
"input.thinking": "思考",
|
||||
"input.thinking.mode.default": "預設",
|
||||
"input.thinking.mode.default.tip": "模型會自動確定思考的 token 數",
|
||||
"input.thinking.mode.custom": "自定義",
|
||||
"input.thinking.mode.custom.tip": "模型最多可以思考的 token 數。需要考慮模型的上下文限制,否則會報錯",
|
||||
"input.thinking.mode.tokens.tip": "設置思考的 token 數",
|
||||
"input.thinking.budget_exceeds_max": "思考預算超過最大 token 數"
|
||||
},
|
||||
"code_block": {
|
||||
"collapse": "折疊",
|
||||
|
||||
1
src/renderer/src/locales/zh/translation.json
Normal file
1
src/renderer/src/locales/zh/translation.json
Normal file
@@ -0,0 +1 @@
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { HolderOutlined } from '@ant-design/icons'
|
||||
import { QuickPanelListItem, QuickPanelView, useQuickPanel } from '@renderer/components/QuickPanel'
|
||||
import TranslateButton from '@renderer/components/TranslateButton'
|
||||
import { isGenerateImageModel, isVisionModel, isWebSearchModel } from '@renderer/config/models'
|
||||
import { isGenerateImageModel, isReasoningModel, isVisionModel, isWebSearchModel } from '@renderer/config/models'
|
||||
import db from '@renderer/databases'
|
||||
import { useAssistant } from '@renderer/hooks/useAssistant'
|
||||
import { useKnowledgeBases } from '@renderer/hooks/useKnowledge'
|
||||
@@ -65,6 +65,7 @@ import MentionModelsInput from './MentionModelsInput'
|
||||
import NewContextButton from './NewContextButton'
|
||||
import QuickPhrasesButton, { QuickPhrasesButtonRef } from './QuickPhrasesButton'
|
||||
import SendMessageButton from './SendMessageButton'
|
||||
import ThinkingButton, { ThinkingButtonRef } from './ThinkingButton'
|
||||
import TokenCount from './TokenCount'
|
||||
import WebSearchButton, { WebSearchButtonRef } from './WebSearchButton'
|
||||
|
||||
@@ -130,7 +131,8 @@ const Inputbar: FC<Props> = ({ assistant: _assistant, setActiveTopic, topic }) =
|
||||
const knowledgeBaseButtonRef = useRef<KnowledgeBaseButtonRef>(null)
|
||||
const mcpToolsButtonRef = useRef<MCPToolsButtonRef>(null)
|
||||
const attachmentButtonRef = useRef<AttachmentButtonRef>(null)
|
||||
const webSearchButtonRef = useRef<WebSearchButtonRef>(null)
|
||||
const webSearchButtonRef = useRef<WebSearchButtonRef | null>(null)
|
||||
const thinkingButtonRef = useRef<ThinkingButtonRef | null>(null)
|
||||
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
const debouncedEstimate = useCallback(
|
||||
@@ -918,6 +920,14 @@ const Inputbar: FC<Props> = ({ assistant: _assistant, setActiveTopic, topic }) =
|
||||
setFiles={setFiles}
|
||||
ToolbarButton={ToolbarButton}
|
||||
/>
|
||||
{isReasoningModel(model) && (
|
||||
<ThinkingButton
|
||||
ref={thinkingButtonRef}
|
||||
model={model}
|
||||
assistant={assistant}
|
||||
ToolbarButton={ToolbarButton}
|
||||
/>
|
||||
)}
|
||||
<WebSearchButton ref={webSearchButtonRef} assistant={assistant} ToolbarButton={ToolbarButton} />
|
||||
{showKnowledgeIcon && (
|
||||
<KnowledgeBaseButton
|
||||
@@ -1103,7 +1113,8 @@ const ToolbarButton = styled(Button)`
|
||||
&.active {
|
||||
background-color: var(--color-primary) !important;
|
||||
.anticon,
|
||||
.iconfont {
|
||||
.iconfont,
|
||||
.chevron-icon {
|
||||
color: var(--color-white-soft);
|
||||
}
|
||||
&:hover {
|
||||
|
||||
245
src/renderer/src/pages/home/Inputbar/ThinkingButton.tsx
Normal file
245
src/renderer/src/pages/home/Inputbar/ThinkingButton.tsx
Normal file
@@ -0,0 +1,245 @@
|
||||
import {
|
||||
MdiLightbulbOffOutline,
|
||||
MdiLightbulbOn10,
|
||||
MdiLightbulbOn50,
|
||||
MdiLightbulbOn90
|
||||
} from '@renderer/components/Icons/SVGIcon'
|
||||
import { QuickPanelListItem, useQuickPanel } from '@renderer/components/QuickPanel'
|
||||
import {
|
||||
isSupportedReasoningEffortGrokModel,
|
||||
isSupportedReasoningEffortModel,
|
||||
isSupportedThinkingTokenModel
|
||||
} from '@renderer/config/models'
|
||||
import { useAssistant } from '@renderer/hooks/useAssistant'
|
||||
import { Assistant, Model } from '@renderer/types'
|
||||
import { Tooltip } from 'antd'
|
||||
import { FC, ReactElement, useCallback, useImperativeHandle, useMemo } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
|
||||
export type ReasoningEffortOptions = 'low' | 'medium' | 'high'
|
||||
|
||||
const THINKING_TOKEN_MAP: Record<string, { min: number; max: number }> = {
|
||||
// Gemini models
|
||||
'gemini-.*$': { min: 0, max: 24576 },
|
||||
|
||||
// Qwen models
|
||||
'qwen-plus-.*$': { min: 0, max: 38912 },
|
||||
'qwen-turbo-.*$': { min: 0, max: 38912 },
|
||||
'qwen3-0\\.6b$': { min: 0, max: 30720 },
|
||||
'qwen3-1\\.7b$': { min: 0, max: 30720 },
|
||||
'qwen3-.*$': { min: 0, max: 38912 },
|
||||
|
||||
// Claude models
|
||||
'claude-3[.-]7.*sonnet.*$': { min: 0, max: 64000 }
|
||||
}
|
||||
|
||||
// Helper function to find matching token limit
|
||||
const findTokenLimit = (modelId: string): { min: number; max: number } | undefined => {
|
||||
for (const [pattern, limits] of Object.entries(THINKING_TOKEN_MAP)) {
|
||||
if (new RegExp(pattern).test(modelId)) {
|
||||
return limits
|
||||
}
|
||||
}
|
||||
return undefined
|
||||
}
|
||||
|
||||
// 根据模型和选择的思考档位计算thinking_budget值
|
||||
const calculateThinkingBudget = (model: Model, option: ReasoningEffortOptions | null): number | undefined => {
|
||||
if (!option || !isSupportedThinkingTokenModel(model)) {
|
||||
return undefined
|
||||
}
|
||||
|
||||
const tokenLimits = findTokenLimit(model.id)
|
||||
if (!tokenLimits) return undefined
|
||||
|
||||
const { min, max } = tokenLimits
|
||||
|
||||
switch (option) {
|
||||
case 'low':
|
||||
return Math.floor(min + (max - min) * 0.25)
|
||||
case 'medium':
|
||||
return Math.floor(min + (max - min) * 0.5)
|
||||
case 'high':
|
||||
return Math.floor(min + (max - min) * 0.75)
|
||||
default:
|
||||
return undefined
|
||||
}
|
||||
}
|
||||
|
||||
export interface ThinkingButtonRef {
|
||||
openQuickPanel: () => void
|
||||
}
|
||||
|
||||
interface Props {
|
||||
ref?: React.RefObject<ThinkingButtonRef | null>
|
||||
model: Model
|
||||
assistant: Assistant
|
||||
ToolbarButton: any
|
||||
}
|
||||
|
||||
const ThinkingButton: FC<Props> = ({ ref, model, assistant, ToolbarButton }): ReactElement => {
|
||||
const { t } = useTranslation()
|
||||
const quickPanel = useQuickPanel()
|
||||
const { updateAssistantSettings } = useAssistant(assistant.id)
|
||||
|
||||
const supportedThinkingToken = isSupportedThinkingTokenModel(model)
|
||||
const supportedReasoningEffort = isSupportedReasoningEffortModel(model)
|
||||
const isGrokModel = isSupportedReasoningEffortGrokModel(model)
|
||||
|
||||
// 根据thinking_budget逆推思考档位
|
||||
const inferReasoningEffortFromBudget = useCallback(
|
||||
(model: Model, budget: number | undefined): ReasoningEffortOptions | null => {
|
||||
if (!budget || !supportedThinkingToken) return null
|
||||
|
||||
const tokenLimits = findTokenLimit(model.id)
|
||||
if (!tokenLimits) return null
|
||||
|
||||
const { min, max } = tokenLimits
|
||||
const range = max - min
|
||||
|
||||
// 计算预算在范围内的百分比
|
||||
const normalizedBudget = (budget - min) / range
|
||||
|
||||
// 根据百分比确定档位
|
||||
if (normalizedBudget <= 0.33) return 'low'
|
||||
if (normalizedBudget <= 0.66) return 'medium'
|
||||
return 'high'
|
||||
},
|
||||
[supportedThinkingToken]
|
||||
)
|
||||
|
||||
const currentReasoningEffort = useMemo(() => {
|
||||
// 优先使用显式设置的reasoning_effort
|
||||
if (assistant.settings?.reasoning_effort) {
|
||||
return assistant.settings.reasoning_effort
|
||||
}
|
||||
|
||||
// 如果有thinking_budget但没有reasoning_effort,则推导档位
|
||||
if (assistant.settings?.thinking_budget) {
|
||||
return inferReasoningEffortFromBudget(model, assistant.settings.thinking_budget)
|
||||
}
|
||||
|
||||
return null
|
||||
}, [assistant.settings?.reasoning_effort, assistant.settings?.thinking_budget, inferReasoningEffortFromBudget, model])
|
||||
|
||||
const createThinkingIcon = useCallback((option: ReasoningEffortOptions | null, isActive: boolean = false) => {
|
||||
const iconColor = isActive ? 'var(--color-link)' : 'var(--color-icon)'
|
||||
|
||||
switch (true) {
|
||||
case option === 'low':
|
||||
return <MdiLightbulbOn10 width={18} height={18} style={{ color: iconColor, marginTop: -2 }} />
|
||||
case option === 'medium':
|
||||
return <MdiLightbulbOn50 width={18} height={18} style={{ color: iconColor, marginTop: -2 }} />
|
||||
case option === 'high':
|
||||
return <MdiLightbulbOn90 width={18} height={18} style={{ color: iconColor, marginTop: -2 }} />
|
||||
default:
|
||||
return <MdiLightbulbOffOutline width={18} height={18} style={{ color: iconColor }} />
|
||||
}
|
||||
}, [])
|
||||
|
||||
const onThinkingChange = useCallback(
|
||||
(option: ReasoningEffortOptions | null) => {
|
||||
if (!option) {
|
||||
// 禁用思考
|
||||
updateAssistantSettings({
|
||||
reasoning_effort: undefined,
|
||||
thinking_budget: undefined
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// 启用思考
|
||||
if (supportedReasoningEffort) {
|
||||
updateAssistantSettings({
|
||||
reasoning_effort: option
|
||||
})
|
||||
}
|
||||
|
||||
if (supportedThinkingToken) {
|
||||
const budget = calculateThinkingBudget(model, option)
|
||||
updateAssistantSettings({
|
||||
reasoning_effort: option,
|
||||
thinking_budget: budget
|
||||
})
|
||||
}
|
||||
},
|
||||
[model, supportedReasoningEffort, supportedThinkingToken, updateAssistantSettings]
|
||||
)
|
||||
|
||||
const baseOptions = useMemo(
|
||||
() => [
|
||||
{
|
||||
level: null,
|
||||
label: t('assistants.settings.reasoning_effort.off'),
|
||||
description: '',
|
||||
icon: createThinkingIcon(null),
|
||||
isSelected: currentReasoningEffort === null,
|
||||
action: () => onThinkingChange(null)
|
||||
},
|
||||
{
|
||||
level: 'low',
|
||||
label: t('assistants.settings.reasoning_effort.low'),
|
||||
description: '',
|
||||
icon: createThinkingIcon('low'),
|
||||
isSelected: currentReasoningEffort === 'low',
|
||||
action: () => onThinkingChange('low')
|
||||
},
|
||||
{
|
||||
level: 'medium',
|
||||
label: t('assistants.settings.reasoning_effort.medium'),
|
||||
description: '',
|
||||
icon: createThinkingIcon('medium'),
|
||||
isSelected: currentReasoningEffort === 'medium',
|
||||
action: () => onThinkingChange('medium')
|
||||
},
|
||||
{
|
||||
level: 'high',
|
||||
label: t('assistants.settings.reasoning_effort.high'),
|
||||
description: '',
|
||||
icon: createThinkingIcon('high'),
|
||||
isSelected: currentReasoningEffort === 'high',
|
||||
action: () => onThinkingChange('high')
|
||||
}
|
||||
],
|
||||
[currentReasoningEffort, onThinkingChange, t, createThinkingIcon]
|
||||
)
|
||||
|
||||
const panelItems = useMemo<QuickPanelListItem[]>(() => {
|
||||
return isGrokModel ? baseOptions.filter((option) => option.level === 'low' || option.level === 'high') : baseOptions
|
||||
}, [baseOptions, isGrokModel])
|
||||
|
||||
const openQuickPanel = useCallback(() => {
|
||||
quickPanel.open({
|
||||
title: t('chat.input.thinking'),
|
||||
list: panelItems,
|
||||
symbol: 'thinking'
|
||||
})
|
||||
}, [quickPanel, panelItems, t])
|
||||
|
||||
const handleOpenQuickPanel = useCallback(() => {
|
||||
if (quickPanel.isVisible && quickPanel.symbol === 'thinking') {
|
||||
quickPanel.close()
|
||||
} else {
|
||||
openQuickPanel()
|
||||
}
|
||||
}, [openQuickPanel, quickPanel])
|
||||
|
||||
// 获取当前应显示的图标
|
||||
const getThinkingIcon = useCallback(() => {
|
||||
return createThinkingIcon(currentReasoningEffort, currentReasoningEffort !== null)
|
||||
}, [createThinkingIcon, currentReasoningEffort])
|
||||
|
||||
useImperativeHandle(ref, () => ({
|
||||
openQuickPanel
|
||||
}))
|
||||
|
||||
return (
|
||||
<Tooltip placement="top" title={t('assistants.settings.reasoning_effort')} arrow>
|
||||
<ToolbarButton type="text" onClick={handleOpenQuickPanel}>
|
||||
{getThinkingIcon()}
|
||||
</ToolbarButton>
|
||||
</Tooltip>
|
||||
)
|
||||
}
|
||||
|
||||
export default ThinkingButton
|
||||
@@ -8,13 +8,11 @@ import {
|
||||
isMac,
|
||||
isWindows
|
||||
} from '@renderer/config/constant'
|
||||
import { isGrokReasoningModel, isSupportedReasoningEffortModel } from '@renderer/config/models'
|
||||
import { codeThemes } from '@renderer/context/SyntaxHighlighterProvider'
|
||||
import { useAssistant } from '@renderer/hooks/useAssistant'
|
||||
import { useSettings } from '@renderer/hooks/useSettings'
|
||||
import { SettingDivider, SettingRow, SettingRowTitle, SettingSubtitle } from '@renderer/pages/settings'
|
||||
import AssistantSettingsPopup from '@renderer/pages/settings/AssistantSettings'
|
||||
import { getDefaultModel } from '@renderer/services/AssistantService'
|
||||
import { useAppDispatch } from '@renderer/store'
|
||||
import {
|
||||
SendMessageShortcut,
|
||||
@@ -52,9 +50,9 @@ import {
|
||||
TranslateLanguageVarious
|
||||
} from '@renderer/types'
|
||||
import { modalConfirm } from '@renderer/utils'
|
||||
import { Button, Col, InputNumber, Row, Segmented, Select, Slider, Switch, Tooltip } from 'antd'
|
||||
import { Button, Col, InputNumber, Row, Select, Slider, Switch, Tooltip } from 'antd'
|
||||
import { CircleHelp, RotateCcw, Settings2 } from 'lucide-react'
|
||||
import { FC, useCallback, useEffect, useState } from 'react'
|
||||
import { FC, useEffect, useState } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import styled from 'styled-components'
|
||||
|
||||
@@ -72,7 +70,6 @@ const SettingsTab: FC<Props> = (props) => {
|
||||
const [maxTokens, setMaxTokens] = useState(assistant?.settings?.maxTokens ?? 0)
|
||||
const [fontSizeValue, setFontSizeValue] = useState(fontSize)
|
||||
const [streamOutput, setStreamOutput] = useState(assistant?.settings?.streamOutput ?? true)
|
||||
const [reasoningEffort, setReasoningEffort] = useState(assistant?.settings?.reasoning_effort)
|
||||
const { t } = useTranslation()
|
||||
|
||||
const dispatch = useAppDispatch()
|
||||
@@ -127,17 +124,9 @@ const SettingsTab: FC<Props> = (props) => {
|
||||
}
|
||||
}
|
||||
|
||||
const onReasoningEffortChange = useCallback(
|
||||
(value?: 'low' | 'medium' | 'high') => {
|
||||
updateAssistantSettings({ reasoning_effort: value })
|
||||
},
|
||||
[updateAssistantSettings]
|
||||
)
|
||||
|
||||
const onReset = () => {
|
||||
setTemperature(DEFAULT_TEMPERATURE)
|
||||
setContextCount(DEFAULT_CONTEXTCOUNT)
|
||||
setReasoningEffort(undefined)
|
||||
updateAssistant({
|
||||
...assistant,
|
||||
settings: {
|
||||
@@ -148,7 +137,6 @@ const SettingsTab: FC<Props> = (props) => {
|
||||
maxTokens: DEFAULT_MAX_TOKENS,
|
||||
streamOutput: true,
|
||||
hideMessages: false,
|
||||
reasoning_effort: undefined,
|
||||
customParameters: []
|
||||
}
|
||||
})
|
||||
@@ -160,25 +148,8 @@ const SettingsTab: FC<Props> = (props) => {
|
||||
setEnableMaxTokens(assistant?.settings?.enableMaxTokens ?? false)
|
||||
setMaxTokens(assistant?.settings?.maxTokens ?? DEFAULT_MAX_TOKENS)
|
||||
setStreamOutput(assistant?.settings?.streamOutput ?? true)
|
||||
setReasoningEffort(assistant?.settings?.reasoning_effort)
|
||||
}, [assistant])
|
||||
|
||||
useEffect(() => {
|
||||
// 当是Grok模型时,处理reasoning_effort的设置
|
||||
// For Grok models, only 'low' and 'high' reasoning efforts are supported.
|
||||
// This ensures compatibility with the model's capabilities and avoids unsupported configurations.
|
||||
if (isGrokReasoningModel(assistant?.model || getDefaultModel())) {
|
||||
const currentEffort = assistant?.settings?.reasoning_effort
|
||||
if (!currentEffort || currentEffort === 'low') {
|
||||
setReasoningEffort('low') // Default to 'low' if no effort is set or if it's already 'low'.
|
||||
onReasoningEffortChange('low')
|
||||
} else if (currentEffort === 'medium' || currentEffort === 'high') {
|
||||
setReasoningEffort('high') // Force 'high' for 'medium' or 'high' to simplify the configuration.
|
||||
onReasoningEffortChange('high')
|
||||
}
|
||||
}
|
||||
}, [assistant?.model, assistant?.settings?.reasoning_effort, onReasoningEffortChange])
|
||||
|
||||
const formatSliderTooltip = (value?: number) => {
|
||||
if (value === undefined) return ''
|
||||
return value === 20 ? '∞' : value.toString()
|
||||
@@ -294,46 +265,6 @@ const SettingsTab: FC<Props> = (props) => {
|
||||
</Col>
|
||||
</Row>
|
||||
)}
|
||||
{isSupportedReasoningEffortModel(assistant?.model || getDefaultModel()) && (
|
||||
<>
|
||||
<SettingDivider />
|
||||
<Row align="middle">
|
||||
<Label>{t('assistants.settings.reasoning_effort')}</Label>
|
||||
<Tooltip title={t('assistants.settings.reasoning_effort.tip')}>
|
||||
<CircleHelp size={14} color="var(--color-text-2)" />
|
||||
</Tooltip>
|
||||
</Row>
|
||||
<Row align="middle" gutter={10}>
|
||||
<Col span={24}>
|
||||
<SegmentedContainer>
|
||||
<Segmented
|
||||
value={reasoningEffort || 'off'}
|
||||
onChange={(value) => {
|
||||
const typedValue = value === 'off' ? undefined : (value as 'low' | 'medium' | 'high')
|
||||
setReasoningEffort(typedValue)
|
||||
onReasoningEffortChange(typedValue)
|
||||
}}
|
||||
options={
|
||||
isGrokReasoningModel(assistant?.model || getDefaultModel())
|
||||
? [
|
||||
{ value: 'low', label: t('assistants.settings.reasoning_effort.low') },
|
||||
{ value: 'high', label: t('assistants.settings.reasoning_effort.high') }
|
||||
]
|
||||
: [
|
||||
{ value: 'low', label: t('assistants.settings.reasoning_effort.low') },
|
||||
{ value: 'medium', label: t('assistants.settings.reasoning_effort.medium') },
|
||||
{ value: 'high', label: t('assistants.settings.reasoning_effort.high') },
|
||||
{ value: 'off', label: t('assistants.settings.reasoning_effort.off') }
|
||||
]
|
||||
}
|
||||
name="group"
|
||||
block
|
||||
/>
|
||||
</SegmentedContainer>
|
||||
</Col>
|
||||
</Row>
|
||||
</>
|
||||
)}
|
||||
</SettingGroup>
|
||||
<SettingGroup>
|
||||
<SettingSubtitle style={{ marginTop: 0 }}>{t('settings.messages.title')}</SettingSubtitle>
|
||||
@@ -706,27 +637,6 @@ export const SettingGroup = styled.div<{ theme?: ThemeMode }>`
|
||||
margin-bottom: 10px;
|
||||
`
|
||||
|
||||
// Define the styled component with hover state styling
|
||||
const SegmentedContainer = styled.div`
|
||||
margin-top: 5px;
|
||||
.ant-segmented-item {
|
||||
font-size: 12px;
|
||||
}
|
||||
.ant-segmented-item-selected {
|
||||
background-color: var(--color-primary) !important;
|
||||
color: white !important;
|
||||
}
|
||||
|
||||
.ant-segmented-item:hover:not(.ant-segmented-item-selected) {
|
||||
background-color: var(--color-primary-bg) !important;
|
||||
color: var(--color-primary) !important;
|
||||
}
|
||||
|
||||
.ant-segmented-thumb {
|
||||
background-color: var(--color-primary) !important;
|
||||
}
|
||||
`
|
||||
|
||||
const StyledSelect = styled(Select)`
|
||||
.ant-select-selector {
|
||||
border-radius: 15px !important;
|
||||
|
||||
@@ -6,7 +6,7 @@ import { DEFAULT_CONTEXTCOUNT, DEFAULT_TEMPERATURE } from '@renderer/config/cons
|
||||
import { SettingRow } from '@renderer/pages/settings'
|
||||
import { Assistant, AssistantSettingCustomParameters, AssistantSettings } from '@renderer/types'
|
||||
import { modalConfirm } from '@renderer/utils'
|
||||
import { Button, Col, Divider, Input, InputNumber, Radio, Row, Select, Slider, Switch, Tooltip } from 'antd'
|
||||
import { Button, Col, Divider, Input, InputNumber, Row, Select, Slider, Switch, Tooltip } from 'antd'
|
||||
import { isNull } from 'lodash'
|
||||
import { FC, useEffect, useRef, useState } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
@@ -23,7 +23,6 @@ const AssistantModelSettings: FC<Props> = ({ assistant, updateAssistant, updateA
|
||||
const [contextCount, setContextCount] = useState(assistant?.settings?.contextCount ?? DEFAULT_CONTEXTCOUNT)
|
||||
const [enableMaxTokens, setEnableMaxTokens] = useState(assistant?.settings?.enableMaxTokens ?? false)
|
||||
const [maxTokens, setMaxTokens] = useState(assistant?.settings?.maxTokens ?? 0)
|
||||
const [reasoningEffort, setReasoningEffort] = useState(assistant?.settings?.reasoning_effort)
|
||||
const [streamOutput, setStreamOutput] = useState(assistant?.settings?.streamOutput ?? true)
|
||||
const [defaultModel, setDefaultModel] = useState(assistant?.defaultModel)
|
||||
const [topP, setTopP] = useState(assistant?.settings?.topP ?? 1)
|
||||
@@ -43,10 +42,6 @@ const AssistantModelSettings: FC<Props> = ({ assistant, updateAssistant, updateA
|
||||
}
|
||||
}
|
||||
|
||||
const onReasoningEffortChange = (value) => {
|
||||
updateAssistantSettings({ reasoning_effort: value })
|
||||
}
|
||||
|
||||
const onContextCountChange = (value) => {
|
||||
if (!isNaN(value as number)) {
|
||||
updateAssistantSettings({ contextCount: value })
|
||||
@@ -153,7 +148,6 @@ const AssistantModelSettings: FC<Props> = ({ assistant, updateAssistant, updateA
|
||||
setMaxTokens(0)
|
||||
setStreamOutput(true)
|
||||
setTopP(1)
|
||||
setReasoningEffort(undefined)
|
||||
setCustomParameters([])
|
||||
updateAssistantSettings({
|
||||
temperature: DEFAULT_TEMPERATURE,
|
||||
@@ -162,7 +156,6 @@ const AssistantModelSettings: FC<Props> = ({ assistant, updateAssistant, updateA
|
||||
maxTokens: 0,
|
||||
streamOutput: true,
|
||||
topP: 1,
|
||||
reasoning_effort: undefined,
|
||||
customParameters: []
|
||||
})
|
||||
}
|
||||
@@ -383,27 +376,6 @@ const AssistantModelSettings: FC<Props> = ({ assistant, updateAssistant, updateA
|
||||
/>
|
||||
</SettingRow>
|
||||
<Divider style={{ margin: '10px 0' }} />
|
||||
<SettingRow style={{ minHeight: 30 }}>
|
||||
<Label>
|
||||
{t('assistants.settings.reasoning_effort')}{' '}
|
||||
<Tooltip title={t('assistants.settings.reasoning_effort.tip')}>
|
||||
<QuestionIcon />
|
||||
</Tooltip>
|
||||
</Label>
|
||||
<Radio.Group
|
||||
value={reasoningEffort}
|
||||
buttonStyle="solid"
|
||||
onChange={(e) => {
|
||||
setReasoningEffort(e.target.value)
|
||||
onReasoningEffortChange(e.target.value)
|
||||
}}>
|
||||
<Radio.Button value="low">{t('assistants.settings.reasoning_effort.low')}</Radio.Button>
|
||||
<Radio.Button value="medium">{t('assistants.settings.reasoning_effort.medium')}</Radio.Button>
|
||||
<Radio.Button value="high">{t('assistants.settings.reasoning_effort.high')}</Radio.Button>
|
||||
<Radio.Button value={undefined}>{t('assistants.settings.reasoning_effort.off')}</Radio.Button>
|
||||
</Radio.Group>
|
||||
</SettingRow>
|
||||
<Divider style={{ margin: '10px 0' }} />
|
||||
<SettingRow style={{ minHeight: 30 }}>
|
||||
<Label>{t('models.custom_parameters')}</Label>
|
||||
<Button icon={<PlusOutlined />} onClick={onAddCustomParameter}>
|
||||
|
||||
@@ -23,8 +23,6 @@ import OpenAI from 'openai'
|
||||
import { CompletionsParams } from '.'
|
||||
import BaseProvider from './BaseProvider'
|
||||
|
||||
type ReasoningEffort = 'high' | 'medium' | 'low'
|
||||
|
||||
interface ReasoningConfig {
|
||||
type: 'enabled' | 'disabled'
|
||||
budget_tokens?: number
|
||||
@@ -124,32 +122,17 @@ export default class AnthropicProvider extends BaseProvider {
|
||||
* @param model - The model
|
||||
* @returns The reasoning effort
|
||||
*/
|
||||
private getReasoningEffort(assistant: Assistant, model: Model): ReasoningConfig | undefined {
|
||||
private getBudgetToken(assistant: Assistant, model: Model): ReasoningConfig | undefined {
|
||||
if (!isReasoningModel(model)) {
|
||||
return undefined
|
||||
}
|
||||
|
||||
const effortRatios: Record<ReasoningEffort, number> = {
|
||||
high: 0.8,
|
||||
medium: 0.5,
|
||||
low: 0.2
|
||||
}
|
||||
|
||||
const effort = assistant?.settings?.reasoning_effort as ReasoningEffort
|
||||
const effortRatio = effortRatios[effort]
|
||||
|
||||
if (!effortRatio) {
|
||||
return undefined
|
||||
}
|
||||
|
||||
const isClaude37Sonnet = model.id.includes('claude-3-7-sonnet') || model.id.includes('claude-3.7-sonnet')
|
||||
|
||||
if (!isClaude37Sonnet) {
|
||||
return undefined
|
||||
}
|
||||
|
||||
const maxTokens = assistant?.settings?.maxTokens || DEFAULT_MAX_TOKENS
|
||||
const budgetTokens = Math.trunc(Math.max(Math.min(maxTokens * effortRatio, 32000), 1024))
|
||||
const budgetTokens = assistant?.settings?.thinking_budget || maxTokens
|
||||
|
||||
if (budgetTokens > maxTokens) {
|
||||
return undefined
|
||||
}
|
||||
|
||||
return {
|
||||
type: 'enabled',
|
||||
@@ -200,7 +183,7 @@ export default class AnthropicProvider extends BaseProvider {
|
||||
top_p: this.getTopP(assistant, model),
|
||||
system: systemPrompt,
|
||||
// @ts-ignore thinking
|
||||
thinking: this.getReasoningEffort(assistant, model),
|
||||
thinking: this.getBudgetToken(assistant, model),
|
||||
...this.getCustomParameters(assistant)
|
||||
}
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ import {
|
||||
ToolListUnion
|
||||
} from '@google/genai'
|
||||
import {
|
||||
isGemini25ReasoningModel,
|
||||
isGeminiReasoningModel,
|
||||
isGemmaModel,
|
||||
isGenerateImageModel,
|
||||
isVisionModel,
|
||||
@@ -54,8 +54,6 @@ import OpenAI from 'openai'
|
||||
import { CompletionsParams } from '.'
|
||||
import BaseProvider from './BaseProvider'
|
||||
|
||||
type ReasoningEffort = 'low' | 'medium' | 'high'
|
||||
|
||||
export default class GeminiProvider extends BaseProvider {
|
||||
private sdk: GoogleGenAI
|
||||
|
||||
@@ -213,32 +211,25 @@ export default class GeminiProvider extends BaseProvider {
|
||||
* @param model - The model
|
||||
* @returns The reasoning effort
|
||||
*/
|
||||
private getReasoningEffort(assistant: Assistant, model: Model) {
|
||||
if (isGemini25ReasoningModel(model)) {
|
||||
const effortRatios: Record<ReasoningEffort, number> = {
|
||||
high: 1,
|
||||
medium: 0.5,
|
||||
low: 0.2
|
||||
}
|
||||
const effort = assistant?.settings?.reasoning_effort as ReasoningEffort
|
||||
const effortRatio = effortRatios[effort]
|
||||
const maxBudgetToken = 24576 // https://ai.google.dev/gemini-api/docs/thinking
|
||||
const budgetTokens = Math.max(1024, Math.trunc(maxBudgetToken * effortRatio))
|
||||
if (!effortRatio) {
|
||||
return {
|
||||
thinkingConfig: {
|
||||
thinkingBudget: 0
|
||||
} as ThinkingConfig
|
||||
}
|
||||
private getBudgetToken(assistant: Assistant, model: Model) {
|
||||
if (isGeminiReasoningModel(model)) {
|
||||
// 检查thinking_budget是否明确设置
|
||||
const thinkingBudget = assistant?.settings?.thinking_budget
|
||||
|
||||
// 如果thinking_budget是undefined,使用模型的默认行为
|
||||
if (thinkingBudget === undefined) {
|
||||
return {} // 返回空对象以使用模型默认值
|
||||
}
|
||||
|
||||
// 如果thinking_budget是明确设置的值(包括0),使用该值
|
||||
return {
|
||||
thinkingConfig: {
|
||||
thinkingBudget: budgetTokens,
|
||||
thinkingBudget: thinkingBudget,
|
||||
includeThoughts: true
|
||||
} as ThinkingConfig
|
||||
}
|
||||
}
|
||||
|
||||
return {}
|
||||
}
|
||||
|
||||
@@ -310,7 +301,7 @@ export default class GeminiProvider extends BaseProvider {
|
||||
topP: assistant?.settings?.topP,
|
||||
maxOutputTokens: maxTokens,
|
||||
tools: tools,
|
||||
...this.getReasoningEffort(assistant, model),
|
||||
...this.getBudgetToken(assistant, model),
|
||||
...this.getCustomParameters(assistant)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,15 +1,17 @@
|
||||
import { DEFAULT_MAX_TOKENS } from '@renderer/config/constant'
|
||||
import {
|
||||
getOpenAIWebSearchParams,
|
||||
isGrokReasoningModel,
|
||||
isHunyuanSearchModel,
|
||||
isOpenAIoSeries,
|
||||
isOpenAIWebSearch,
|
||||
isReasoningModel,
|
||||
isSupportedModel,
|
||||
isSupportedReasoningEffortGrokModel,
|
||||
isSupportedReasoningEffortModel,
|
||||
isSupportedReasoningEffortOpenAIModel,
|
||||
isSupportedThinkingTokenClaudeModel,
|
||||
isSupportedThinkingTokenModel,
|
||||
isSupportedThinkingTokenQwenModel,
|
||||
isVisionModel,
|
||||
isZhipuModel,
|
||||
OPENAI_NO_SUPPORT_DEV_ROLE_MODELS
|
||||
isZhipuModel
|
||||
} from '@renderer/config/models'
|
||||
import { getStoreSetting } from '@renderer/hooks/useSettings'
|
||||
import i18n from '@renderer/i18n'
|
||||
@@ -57,8 +59,6 @@ import {
|
||||
import { CompletionsParams } from '.'
|
||||
import BaseProvider from './BaseProvider'
|
||||
|
||||
type ReasoningEffort = 'low' | 'medium' | 'high'
|
||||
|
||||
export default class OpenAIProvider extends BaseProvider {
|
||||
private sdk: OpenAI
|
||||
|
||||
@@ -262,46 +262,67 @@ export default class OpenAIProvider extends BaseProvider {
|
||||
|
||||
if (isReasoningModel(model)) {
|
||||
if (model.provider === 'openrouter') {
|
||||
return {
|
||||
reasoning: {
|
||||
effort: assistant?.settings?.reasoning_effort
|
||||
if (isSupportedReasoningEffortModel(model)) {
|
||||
return {
|
||||
reasoning: {
|
||||
effort: assistant?.settings?.reasoning_effort
|
||||
}
|
||||
}
|
||||
} else if (isSupportedThinkingTokenModel(model)) {
|
||||
return {
|
||||
reasoning: {
|
||||
max_tokens: assistant?.settings?.thinking_budget
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
const enableThinking = assistant?.enableThinking
|
||||
if (isSupportedThinkingTokenQwenModel(model)) {
|
||||
if (enableThinking) {
|
||||
return {
|
||||
enable_thinking: true,
|
||||
thinking_budget: assistant?.settings?.thinking_budget
|
||||
}
|
||||
} else {
|
||||
return {
|
||||
enable_thinking: false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (isGrokReasoningModel(model)) {
|
||||
return {
|
||||
reasoning_effort: assistant?.settings?.reasoning_effort
|
||||
}
|
||||
}
|
||||
|
||||
if (isOpenAIoSeries(model)) {
|
||||
return {
|
||||
reasoning_effort: assistant?.settings?.reasoning_effort
|
||||
}
|
||||
}
|
||||
|
||||
if (model.id.includes('claude-3.7-sonnet') || model.id.includes('claude-3-7-sonnet')) {
|
||||
const effortRatios: Record<ReasoningEffort, number> = {
|
||||
high: 0.8,
|
||||
medium: 0.5,
|
||||
low: 0.2
|
||||
}
|
||||
|
||||
const effort = assistant?.settings?.reasoning_effort as ReasoningEffort
|
||||
const effortRatio = effortRatios[effort]
|
||||
|
||||
if (!effortRatio) {
|
||||
if (isSupportedReasoningEffortGrokModel(model)) {
|
||||
if (enableThinking) {
|
||||
return {
|
||||
reasoning_effort: assistant?.settings?.reasoning_effort
|
||||
}
|
||||
} else {
|
||||
return {}
|
||||
}
|
||||
}
|
||||
|
||||
const maxTokens = assistant?.settings?.maxTokens || DEFAULT_MAX_TOKENS
|
||||
const budgetTokens = Math.trunc(Math.max(Math.min(maxTokens * effortRatio, 32000), 1024))
|
||||
if (isSupportedReasoningEffortOpenAIModel(model)) {
|
||||
if (enableThinking) {
|
||||
return {
|
||||
reasoning_effort: assistant?.settings?.reasoning_effort
|
||||
}
|
||||
} else {
|
||||
return {}
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
thinking: {
|
||||
type: 'enabled',
|
||||
budget_tokens: budgetTokens
|
||||
if (isSupportedThinkingTokenClaudeModel(model)) {
|
||||
if (enableThinking) {
|
||||
return {
|
||||
thinking: {
|
||||
type: 'enabled',
|
||||
budget_tokens: assistant?.settings?.thinking_budget
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return {
|
||||
thinking: {
|
||||
type: 'disabled'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -341,7 +362,7 @@ export default class OpenAIProvider extends BaseProvider {
|
||||
const isEnabledWebSearch = assistant.enableWebSearch || !!assistant.webSearchProviderId
|
||||
messages = addImageFileToContents(messages)
|
||||
let systemMessage = { role: 'system', content: assistant.prompt || '' }
|
||||
if (isOpenAIoSeries(model) && !OPENAI_NO_SUPPORT_DEV_ROLE_MODELS.includes(model.id)) {
|
||||
if (isSupportedReasoningEffortOpenAIModel(model)) {
|
||||
systemMessage = {
|
||||
role: 'developer',
|
||||
content: `Formatting re-enabled${systemMessage ? '\n' + systemMessage.content : ''}`
|
||||
|
||||
@@ -22,6 +22,7 @@ export type Assistant = {
|
||||
enableWebSearch?: boolean
|
||||
webSearchProviderId?: WebSearchProvider['id']
|
||||
enableGenerateImage?: boolean
|
||||
enableThinking?: boolean
|
||||
mcpServers?: MCPServer[]
|
||||
}
|
||||
|
||||
@@ -47,6 +48,7 @@ export type AssistantSettings = {
|
||||
defaultModel?: Model
|
||||
customParameters?: AssistantSettingCustomParameters[]
|
||||
reasoning_effort?: 'low' | 'medium' | 'high'
|
||||
thinking_budget?: number
|
||||
}
|
||||
|
||||
export type Agent = Omit<Assistant, 'model'> & {
|
||||
|
||||
Reference in New Issue
Block a user