Compare commits
15 Commits
feat/copy-
...
copilot/fi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b14e48dd78 | ||
|
|
64fde27f9e | ||
|
|
b3a58ec321 | ||
|
|
0097ca80e2 | ||
|
|
d968df4612 | ||
|
|
2bd680361a | ||
|
|
cc676d4bef | ||
|
|
3b1155b538 | ||
|
|
03ff6e1ca6 | ||
|
|
706fac898a | ||
|
|
f5c144404d | ||
|
|
50a217a638 | ||
|
|
444c13e1e3 | ||
|
|
255b19d6ee | ||
|
|
f1f4831157 |
10
CLAUDE.md
10
CLAUDE.md
@@ -12,7 +12,15 @@ This file provides guidance to AI coding assistants when working with code in th
|
||||
- **Always propose before executing**: Before making any changes, clearly explain your planned approach and wait for explicit user approval to ensure alignment and prevent unwanted modifications.
|
||||
- **Lint, test, and format before completion**: Coding tasks are only complete after running `yarn lint`, `yarn test`, and `yarn format` successfully.
|
||||
- **Write conventional commits**: Commit small, focused changes using Conventional Commit messages (e.g., `feat:`, `fix:`, `refactor:`, `docs:`).
|
||||
- **Follow PR template**: When submitting pull requests, follow the template in `.github/pull_request_template.md` to ensure complete context and documentation.
|
||||
|
||||
## Pull Request Workflow (CRITICAL)
|
||||
|
||||
When creating a Pull Request, you MUST:
|
||||
|
||||
1. **Read the PR template first**: Always read `.github/pull_request_template.md` before creating the PR
|
||||
2. **Follow ALL template sections**: Structure the `--body` parameter to include every section from the template
|
||||
3. **Never skip sections**: Include all sections even if marking them as N/A or "None"
|
||||
4. **Use proper formatting**: Match the template's markdown structure exactly (headings, checkboxes, code blocks)
|
||||
|
||||
## Development Commands
|
||||
|
||||
|
||||
@@ -134,9 +134,9 @@ artifactBuildCompleted: scripts/artifact-build-completed.js
|
||||
releaseInfo:
|
||||
releaseNotes: |
|
||||
<!--LANG:en-->
|
||||
A New Era of Intelligence with Cherry Studio 1.7.0
|
||||
A New Era of Intelligence with Cherry Studio 1.7.1
|
||||
|
||||
Today we're releasing Cherry Studio 1.7.0 — our most ambitious update yet, introducing Agent: autonomous AI that thinks, plans, and acts.
|
||||
Today we're releasing Cherry Studio 1.7.1 — our most ambitious update yet, introducing Agent: autonomous AI that thinks, plans, and acts.
|
||||
|
||||
For years, AI assistants have been reactive — waiting for your commands, responding to your questions. With Agent, we're changing that. Now, AI can truly work alongside you: understanding complex goals, breaking them into steps, and executing them independently.
|
||||
|
||||
@@ -187,9 +187,9 @@ releaseInfo:
|
||||
The Agent Era is here. We can't wait to see what you'll create.
|
||||
|
||||
<!--LANG:zh-CN-->
|
||||
Cherry Studio 1.7.0:开启智能新纪元
|
||||
Cherry Studio 1.7.1:开启智能新纪元
|
||||
|
||||
今天,我们正式发布 Cherry Studio 1.7.0 —— 迄今最具雄心的版本,带来全新的 Agent:能够自主思考、规划和行动的 AI。
|
||||
今天,我们正式发布 Cherry Studio 1.7.1 —— 迄今最具雄心的版本,带来全新的 Agent:能够自主思考、规划和行动的 AI。
|
||||
|
||||
多年来,AI 助手一直是被动的——等待你的指令,回应你的问题。Agent 改变了这一切。现在,AI 能够真正与你并肩工作:理解复杂目标,将其拆解为步骤,并独立执行。
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "CherryStudio",
|
||||
"version": "1.7.0",
|
||||
"version": "1.7.1",
|
||||
"private": true,
|
||||
"description": "A powerful AI assistant for producer.",
|
||||
"main": "./out/main/index.js",
|
||||
|
||||
@@ -69,6 +69,7 @@ export interface CherryInProviderSettings {
|
||||
headers?: HeadersInput
|
||||
/**
|
||||
* Optional endpoint type to distinguish different endpoint behaviors.
|
||||
* "image-generation" is also openai endpoint, but specifically for image generation.
|
||||
*/
|
||||
endpointType?: 'openai' | 'openai-response' | 'anthropic' | 'gemini' | 'image-generation' | 'jina-rerank'
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
// src/main/services/agents/services/claudecode/index.ts
|
||||
import { EventEmitter } from 'node:events'
|
||||
import { createRequire } from 'node:module'
|
||||
import path from 'node:path'
|
||||
|
||||
import type {
|
||||
CanUseTool,
|
||||
@@ -121,7 +122,11 @@ class ClaudeCodeService implements AgentServiceInterface {
|
||||
// TODO: support set small model in UI
|
||||
ANTHROPIC_DEFAULT_HAIKU_MODEL: modelInfo.modelId,
|
||||
ELECTRON_RUN_AS_NODE: '1',
|
||||
ELECTRON_NO_ATTACH_CONSOLE: '1'
|
||||
ELECTRON_NO_ATTACH_CONSOLE: '1',
|
||||
// Set CLAUDE_CONFIG_DIR to app's userData directory to avoid path encoding issues
|
||||
// on Windows when the username contains non-ASCII characters (e.g., Chinese characters)
|
||||
// This prevents the SDK from using the user's home directory which may have encoding problems
|
||||
CLAUDE_CONFIG_DIR: path.join(app.getPath('userData'), '.claude')
|
||||
}
|
||||
|
||||
const errorChunks: string[] = []
|
||||
|
||||
@@ -107,7 +107,7 @@ export async function buildStreamTextParams(
|
||||
searchWithTime: store.getState().websearch.searchWithTime
|
||||
}
|
||||
|
||||
const { providerOptions, standardParams } = buildProviderOptions(assistant, model, provider, {
|
||||
const { providerOptions, standardParams, bodyParams } = buildProviderOptions(assistant, model, provider, {
|
||||
enableReasoning,
|
||||
enableWebSearch,
|
||||
enableGenerateImage
|
||||
@@ -185,6 +185,7 @@ export async function buildStreamTextParams(
|
||||
// Note: standardParams (topK, frequencyPenalty, presencePenalty, stopSequences, seed)
|
||||
// are extracted from custom parameters and passed directly to streamText()
|
||||
// instead of being placed in providerOptions
|
||||
// Note: bodyParams are custom parameters for AI Gateway that should be at body level
|
||||
const params: StreamTextParams = {
|
||||
messages: sdkMessages,
|
||||
maxOutputTokens: getMaxTokens(assistant, model),
|
||||
@@ -192,6 +193,8 @@ export async function buildStreamTextParams(
|
||||
topP: getTopP(assistant, model),
|
||||
// Include AI SDK standard params extracted from custom parameters
|
||||
...standardParams,
|
||||
// Include body-level params for AI Gateway custom parameters
|
||||
...bodyParams,
|
||||
abortSignal: options.requestOptions?.signal,
|
||||
headers,
|
||||
providerOptions,
|
||||
|
||||
@@ -245,8 +245,8 @@ export class AiSdkSpanAdapter {
|
||||
'gen_ai.usage.output_tokens'
|
||||
]
|
||||
|
||||
const completionTokens = attributes[inputsTokenKeys.find((key) => attributes[key]) || '']
|
||||
const promptTokens = attributes[outputTokenKeys.find((key) => attributes[key]) || '']
|
||||
const promptTokens = attributes[inputsTokenKeys.find((key) => attributes[key]) || '']
|
||||
const completionTokens = attributes[outputTokenKeys.find((key) => attributes[key]) || '']
|
||||
|
||||
if (completionTokens !== undefined || promptTokens !== undefined) {
|
||||
const usage: TokenUsage = {
|
||||
|
||||
@@ -0,0 +1,53 @@
|
||||
import type { Span } from '@opentelemetry/api'
|
||||
import { SpanKind, SpanStatusCode } from '@opentelemetry/api'
|
||||
import { describe, expect, it, vi } from 'vitest'
|
||||
|
||||
import { AiSdkSpanAdapter } from '../AiSdkSpanAdapter'
|
||||
|
||||
vi.mock('@logger', () => ({
|
||||
loggerService: {
|
||||
withContext: () => ({
|
||||
debug: vi.fn(),
|
||||
error: vi.fn(),
|
||||
info: vi.fn(),
|
||||
warn: vi.fn()
|
||||
})
|
||||
}
|
||||
}))
|
||||
|
||||
describe('AiSdkSpanAdapter', () => {
|
||||
const createMockSpan = (attributes: Record<string, unknown>): Span => {
|
||||
const span = {
|
||||
spanContext: () => ({
|
||||
traceId: 'trace-id',
|
||||
spanId: 'span-id'
|
||||
}),
|
||||
_attributes: attributes,
|
||||
_events: [],
|
||||
name: 'test span',
|
||||
status: { code: SpanStatusCode.OK },
|
||||
kind: SpanKind.CLIENT,
|
||||
startTime: [0, 0] as [number, number],
|
||||
endTime: [0, 1] as [number, number],
|
||||
ended: true,
|
||||
parentSpanId: '',
|
||||
links: []
|
||||
}
|
||||
return span as unknown as Span
|
||||
}
|
||||
|
||||
it('maps prompt and completion usage tokens to the correct fields', () => {
|
||||
const attributes = {
|
||||
'ai.usage.promptTokens': 321,
|
||||
'ai.usage.completionTokens': 654
|
||||
}
|
||||
|
||||
const span = createMockSpan(attributes)
|
||||
const result = AiSdkSpanAdapter.convertToSpanEntity({ span })
|
||||
|
||||
expect(result.usage).toBeDefined()
|
||||
expect(result.usage?.prompt_tokens).toBe(321)
|
||||
expect(result.usage?.completion_tokens).toBe(654)
|
||||
expect(result.usage?.total_tokens).toBe(975)
|
||||
})
|
||||
})
|
||||
@@ -37,7 +37,7 @@ vi.mock('@cherrystudio/ai-core/provider', async (importOriginal) => {
|
||||
},
|
||||
customProviderIdSchema: {
|
||||
safeParse: vi.fn((id) => {
|
||||
const customProviders = ['google-vertex', 'google-vertex-anthropic', 'bedrock']
|
||||
const customProviders = ['google-vertex', 'google-vertex-anthropic', 'bedrock', 'ai-gateway']
|
||||
if (customProviders.includes(id)) {
|
||||
return { success: true, data: id }
|
||||
}
|
||||
@@ -56,7 +56,8 @@ vi.mock('../provider/factory', () => ({
|
||||
[SystemProviderIds.anthropic]: 'anthropic',
|
||||
[SystemProviderIds.grok]: 'xai',
|
||||
[SystemProviderIds.deepseek]: 'deepseek',
|
||||
[SystemProviderIds.openrouter]: 'openrouter'
|
||||
[SystemProviderIds.openrouter]: 'openrouter',
|
||||
[SystemProviderIds['ai-gateway']]: 'ai-gateway'
|
||||
}
|
||||
return mapping[provider.id] || provider.id
|
||||
})
|
||||
@@ -204,6 +205,8 @@ describe('options utils', () => {
|
||||
expect(result.providerOptions).toHaveProperty('openai')
|
||||
expect(result.providerOptions.openai).toBeDefined()
|
||||
expect(result.standardParams).toBeDefined()
|
||||
expect(result.bodyParams).toBeDefined()
|
||||
expect(result.bodyParams).toEqual({})
|
||||
})
|
||||
|
||||
it('should include reasoning parameters when enabled', () => {
|
||||
@@ -696,5 +699,90 @@ describe('options utils', () => {
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('AI Gateway provider', () => {
|
||||
const aiGatewayProvider: Provider = {
|
||||
id: SystemProviderIds['ai-gateway'],
|
||||
name: 'AI Gateway',
|
||||
type: 'ai-gateway',
|
||||
apiKey: 'test-key',
|
||||
apiHost: 'https://ai-gateway.vercel.sh/v1/ai',
|
||||
isSystem: true,
|
||||
models: [] as Model[]
|
||||
} as Provider
|
||||
|
||||
const aiGatewayModel: Model = {
|
||||
id: 'openai/gpt-4',
|
||||
name: 'GPT-4',
|
||||
provider: SystemProviderIds['ai-gateway']
|
||||
} as Model
|
||||
|
||||
it('should build basic AI Gateway options with empty bodyParams', () => {
|
||||
const result = buildProviderOptions(mockAssistant, aiGatewayModel, aiGatewayProvider, {
|
||||
enableReasoning: false,
|
||||
enableWebSearch: false,
|
||||
enableGenerateImage: false
|
||||
})
|
||||
|
||||
expect(result.providerOptions).toHaveProperty('gateway')
|
||||
expect(result.providerOptions.gateway).toBeDefined()
|
||||
expect(result.bodyParams).toEqual({})
|
||||
})
|
||||
|
||||
it('should place custom parameters in bodyParams for AI Gateway instead of providerOptions', async () => {
|
||||
const { getCustomParameters } = await import('../reasoning')
|
||||
|
||||
vi.mocked(getCustomParameters).mockReturnValue({
|
||||
tools: [{ id: 'openai.image_generation' }],
|
||||
custom_param: 'custom_value'
|
||||
})
|
||||
|
||||
const result = buildProviderOptions(mockAssistant, aiGatewayModel, aiGatewayProvider, {
|
||||
enableReasoning: false,
|
||||
enableWebSearch: false,
|
||||
enableGenerateImage: false
|
||||
})
|
||||
|
||||
// Custom parameters should be in bodyParams, NOT in providerOptions.gateway
|
||||
expect(result.bodyParams).toHaveProperty('tools')
|
||||
expect(result.bodyParams.tools).toEqual([{ id: 'openai.image_generation' }])
|
||||
expect(result.bodyParams).toHaveProperty('custom_param')
|
||||
expect(result.bodyParams.custom_param).toBe('custom_value')
|
||||
|
||||
// providerOptions.gateway should NOT contain custom parameters
|
||||
expect(result.providerOptions.gateway).not.toHaveProperty('tools')
|
||||
expect(result.providerOptions.gateway).not.toHaveProperty('custom_param')
|
||||
})
|
||||
|
||||
it('should still extract AI SDK standard params from custom parameters for AI Gateway', async () => {
|
||||
const { getCustomParameters } = await import('../reasoning')
|
||||
|
||||
vi.mocked(getCustomParameters).mockReturnValue({
|
||||
topK: 5,
|
||||
frequencyPenalty: 0.5,
|
||||
tools: [{ id: 'openai.image_generation' }]
|
||||
})
|
||||
|
||||
const result = buildProviderOptions(mockAssistant, aiGatewayModel, aiGatewayProvider, {
|
||||
enableReasoning: false,
|
||||
enableWebSearch: false,
|
||||
enableGenerateImage: false
|
||||
})
|
||||
|
||||
// Standard params should be extracted and returned separately
|
||||
expect(result.standardParams).toEqual({
|
||||
topK: 5,
|
||||
frequencyPenalty: 0.5
|
||||
})
|
||||
|
||||
// Custom params (non-standard) should be in bodyParams
|
||||
expect(result.bodyParams).toHaveProperty('tools')
|
||||
expect(result.bodyParams.tools).toEqual([{ id: 'openai.image_generation' }])
|
||||
|
||||
// Neither should be in providerOptions.gateway
|
||||
expect(result.providerOptions.gateway).not.toHaveProperty('topK')
|
||||
expect(result.providerOptions.gateway).not.toHaveProperty('tools')
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -144,7 +144,7 @@ describe('reasoning utils', () => {
|
||||
expect(result).toEqual({})
|
||||
})
|
||||
|
||||
it('should disable reasoning for OpenRouter when no reasoning effort set', async () => {
|
||||
it('should not override reasoning for OpenRouter when reasoning effort undefined', async () => {
|
||||
const { isReasoningModel } = await import('@renderer/config/models')
|
||||
|
||||
vi.mocked(isReasoningModel).mockReturnValue(true)
|
||||
@@ -161,6 +161,29 @@ describe('reasoning utils', () => {
|
||||
settings: {}
|
||||
} as Assistant
|
||||
|
||||
const result = getReasoningEffort(assistant, model)
|
||||
expect(result).toEqual({})
|
||||
})
|
||||
|
||||
it('should disable reasoning for OpenRouter when reasoning effort explicitly none', async () => {
|
||||
const { isReasoningModel } = await import('@renderer/config/models')
|
||||
|
||||
vi.mocked(isReasoningModel).mockReturnValue(true)
|
||||
|
||||
const model: Model = {
|
||||
id: 'anthropic/claude-sonnet-4',
|
||||
name: 'Claude Sonnet 4',
|
||||
provider: SystemProviderIds.openrouter
|
||||
} as Model
|
||||
|
||||
const assistant: Assistant = {
|
||||
id: 'test',
|
||||
name: 'Test',
|
||||
settings: {
|
||||
reasoning_effort: 'none'
|
||||
}
|
||||
} as Assistant
|
||||
|
||||
const result = getReasoningEffort(assistant, model)
|
||||
expect(result).toEqual({ reasoning: { enabled: false, exclude: true } })
|
||||
})
|
||||
@@ -269,7 +292,9 @@ describe('reasoning utils', () => {
|
||||
const assistant: Assistant = {
|
||||
id: 'test',
|
||||
name: 'Test',
|
||||
settings: {}
|
||||
settings: {
|
||||
reasoning_effort: 'none'
|
||||
}
|
||||
} as Assistant
|
||||
|
||||
const result = getReasoningEffort(assistant, model)
|
||||
|
||||
@@ -155,6 +155,7 @@ export function buildProviderOptions(
|
||||
): {
|
||||
providerOptions: Record<string, Record<string, JSONValue>>
|
||||
standardParams: Partial<Record<AiSdkParam, any>>
|
||||
bodyParams: Record<string, any>
|
||||
} {
|
||||
logger.debug('buildProviderOptions', { assistant, model, actualProvider, capabilities })
|
||||
const rawProviderId = getAiSdkProviderId(actualProvider)
|
||||
@@ -253,12 +254,6 @@ export function buildProviderOptions(
|
||||
const customParams = getCustomParameters(assistant)
|
||||
const { standardParams, providerParams } = extractAiSdkStandardParams(customParams)
|
||||
|
||||
// 合并 provider 特定的自定义参数到 providerSpecificOptions
|
||||
providerSpecificOptions = {
|
||||
...providerSpecificOptions,
|
||||
...providerParams
|
||||
}
|
||||
|
||||
let rawProviderKey =
|
||||
{
|
||||
'google-vertex': 'google',
|
||||
@@ -273,12 +268,27 @@ export function buildProviderOptions(
|
||||
rawProviderKey = { gemini: 'google', ['openai-response']: 'openai' }[actualProvider.type] || actualProvider.type
|
||||
}
|
||||
|
||||
// 返回 AI Core SDK 要求的格式:{ 'providerId': providerOptions } 以及提取的标准参数
|
||||
// For AI Gateway, custom parameters should be placed at body level, not inside providerOptions.gateway
|
||||
// See: https://github.com/CherryHQ/cherry-studio/issues/4197
|
||||
let bodyParams: Record<string, any> = {}
|
||||
if (rawProviderKey === 'gateway') {
|
||||
// Custom parameters go to body level for AI Gateway
|
||||
bodyParams = providerParams
|
||||
} else {
|
||||
// For other providers, merge custom parameters into providerSpecificOptions
|
||||
providerSpecificOptions = {
|
||||
...providerSpecificOptions,
|
||||
...providerParams
|
||||
}
|
||||
}
|
||||
|
||||
// 返回 AI Core SDK 要求的格式:{ 'providerId': providerOptions } 以及提取的标准参数和 body 参数
|
||||
return {
|
||||
providerOptions: {
|
||||
[rawProviderKey]: providerSpecificOptions
|
||||
},
|
||||
standardParams
|
||||
standardParams,
|
||||
bodyParams
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -16,10 +16,8 @@ import {
|
||||
isGPT5SeriesModel,
|
||||
isGPT51SeriesModel,
|
||||
isGrok4FastReasoningModel,
|
||||
isGrokReasoningModel,
|
||||
isOpenAIDeepResearchModel,
|
||||
isOpenAIModel,
|
||||
isOpenAIReasoningModel,
|
||||
isQwenAlwaysThinkModel,
|
||||
isQwenReasoningModel,
|
||||
isReasoningModel,
|
||||
@@ -64,30 +62,22 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
}
|
||||
const reasoningEffort = assistant?.settings?.reasoning_effort
|
||||
|
||||
// Handle undefined and 'none' reasoningEffort.
|
||||
// TODO: They should be separated.
|
||||
if (!reasoningEffort || reasoningEffort === 'none') {
|
||||
// reasoningEffort is not set, no extra reasoning setting
|
||||
// Generally, for every model which supports reasoning control, the reasoning effort won't be undefined.
|
||||
// It's for some reasoning models that don't support reasoning control, such as deepseek reasoner.
|
||||
if (!reasoningEffort) {
|
||||
return {}
|
||||
}
|
||||
|
||||
// Handle 'none' reasoningEffort. It's explicitly off.
|
||||
if (reasoningEffort === 'none') {
|
||||
// openrouter: use reasoning
|
||||
if (model.provider === SystemProviderIds.openrouter) {
|
||||
// Don't disable reasoning for Gemini models that support thinking tokens
|
||||
if (isSupportedThinkingTokenGeminiModel(model) && !GEMINI_FLASH_MODEL_REGEX.test(model.id)) {
|
||||
return {}
|
||||
}
|
||||
// 'none' is not an available value for effort for now.
|
||||
// I think they should resolve this issue soon, so I'll just go ahead and use this value.
|
||||
if (isGPT51SeriesModel(model) && reasoningEffort === 'none') {
|
||||
return { reasoning: { effort: 'none' } }
|
||||
}
|
||||
// Don't disable reasoning for models that require it
|
||||
if (
|
||||
isGrokReasoningModel(model) ||
|
||||
isOpenAIReasoningModel(model) ||
|
||||
isQwenAlwaysThinkModel(model) ||
|
||||
model.id.includes('seed-oss') ||
|
||||
model.id.includes('minimax-m2')
|
||||
) {
|
||||
return {}
|
||||
}
|
||||
return { reasoning: { enabled: false, exclude: true } }
|
||||
}
|
||||
|
||||
@@ -101,11 +91,6 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
return { enable_thinking: false }
|
||||
}
|
||||
|
||||
// claude
|
||||
if (isSupportedThinkingTokenClaudeModel(model)) {
|
||||
return {}
|
||||
}
|
||||
|
||||
// gemini
|
||||
if (isSupportedThinkingTokenGeminiModel(model)) {
|
||||
if (GEMINI_FLASH_MODEL_REGEX.test(model.id)) {
|
||||
@@ -118,8 +103,10 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
logger.warn(`Model ${model.id} cannot disable reasoning. Fallback to empty reasoning param.`)
|
||||
return {}
|
||||
}
|
||||
return {}
|
||||
}
|
||||
|
||||
// use thinking, doubao, zhipu, etc.
|
||||
@@ -139,6 +126,7 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
}
|
||||
}
|
||||
|
||||
logger.warn(`Model ${model.id} doesn't match any disable reasoning behavior. Fallback to empty reasoning param.`)
|
||||
return {}
|
||||
}
|
||||
|
||||
@@ -293,6 +281,7 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
}
|
||||
|
||||
// OpenRouter models, use reasoning
|
||||
// FIXME: duplicated openrouter handling. remove one
|
||||
if (model.provider === SystemProviderIds.openrouter) {
|
||||
if (isSupportedReasoningEffortModel(model) || isSupportedThinkingTokenModel(model)) {
|
||||
return {
|
||||
|
||||
@@ -215,6 +215,10 @@
|
||||
border-top: none !important;
|
||||
}
|
||||
|
||||
.ant-collapse-header-text {
|
||||
overflow-x: hidden;
|
||||
}
|
||||
|
||||
.ant-slider .ant-slider-handle::after {
|
||||
box-shadow: 0 1px 4px 0px rgb(128 128 128 / 50%) !important;
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ import { convertImageToPng } from '@renderer/utils/image'
|
||||
import type { ImageProps as AntImageProps } from 'antd'
|
||||
import { Dropdown, Image as AntImage, Space } from 'antd'
|
||||
import { Base64 } from 'js-base64'
|
||||
import { DownloadIcon } from 'lucide-react'
|
||||
import { DownloadIcon, ImageIcon } from 'lucide-react'
|
||||
import mime from 'mime'
|
||||
import React from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
@@ -72,15 +72,9 @@ const ImageViewer: React.FC<ImageViewerProps> = ({ src, style, ...props }) => {
|
||||
|
||||
const getContextMenuItems = (src: string, size: number = 14) => {
|
||||
return [
|
||||
{
|
||||
key: 'copy-image',
|
||||
label: t('common.copy'),
|
||||
icon: <CopyIcon size={size} />,
|
||||
onClick: () => handleCopyImage(src)
|
||||
},
|
||||
{
|
||||
key: 'copy-url',
|
||||
label: t('preview.copy.src'),
|
||||
label: t('common.copy'),
|
||||
icon: <CopyIcon size={size} />,
|
||||
onClick: () => {
|
||||
navigator.clipboard.writeText(src)
|
||||
@@ -92,6 +86,12 @@ const ImageViewer: React.FC<ImageViewerProps> = ({ src, style, ...props }) => {
|
||||
label: t('common.download'),
|
||||
icon: <DownloadIcon size={size} />,
|
||||
onClick: () => download(src)
|
||||
},
|
||||
{
|
||||
key: 'copy-image',
|
||||
label: t('preview.copy.image'),
|
||||
icon: <ImageIcon size={size} />,
|
||||
onClick: () => handleCopyImage(src)
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -460,16 +460,19 @@ export const isSupportedThinkingTokenZhipuModel = (model: Model): boolean => {
|
||||
}
|
||||
|
||||
export const isDeepSeekHybridInferenceModel = (model: Model) => {
|
||||
const modelId = getLowerBaseModelName(model.id)
|
||||
// deepseek官方使用chat和reasoner做推理控制,其他provider需要单独判断,id可能会有所差别
|
||||
// openrouter: deepseek/deepseek-chat-v3.1 不知道会不会有其他provider仿照ds官方分出一个同id的作为非思考模式的模型,这里有风险
|
||||
// Matches: "deepseek-v3" followed by ".digit" or "-digit".
|
||||
// Optionally, this can be followed by ".alphanumeric_sequence" or "-alphanumeric_sequence"
|
||||
// until the end of the string.
|
||||
// Examples: deepseek-v3.1, deepseek-v3-1, deepseek-v3.1.2, deepseek-v3.1-alpha
|
||||
// Does NOT match: deepseek-v3.123 (missing separator after '1'), deepseek-v3.x (x isn't a digit)
|
||||
// TODO: move to utils and add test cases
|
||||
return /deepseek-v3(?:\.\d|-\d)(?:(\.|-)\w+)?$/.test(modelId) || modelId.includes('deepseek-chat-v3.1')
|
||||
const { idResult, nameResult } = withModelIdAndNameAsId(model, (model) => {
|
||||
const modelId = getLowerBaseModelName(model.id)
|
||||
// deepseek官方使用chat和reasoner做推理控制,其他provider需要单独判断,id可能会有所差别
|
||||
// openrouter: deepseek/deepseek-chat-v3.1 不知道会不会有其他provider仿照ds官方分出一个同id的作为非思考模式的模型,这里有风险
|
||||
// Matches: "deepseek-v3" followed by ".digit" or "-digit".
|
||||
// Optionally, this can be followed by ".alphanumeric_sequence" or "-alphanumeric_sequence"
|
||||
// until the end of the string.
|
||||
// Examples: deepseek-v3.1, deepseek-v3-1, deepseek-v3.1.2, deepseek-v3.1-alpha
|
||||
// Does NOT match: deepseek-v3.123 (missing separator after '1'), deepseek-v3.x (x isn't a digit)
|
||||
// TODO: move to utils and add test cases
|
||||
return /deepseek-v3(?:\.\d|-\d)(?:(\.|-)\w+)?$/.test(modelId) || modelId.includes('deepseek-chat-v3.1')
|
||||
})
|
||||
return idResult || nameResult
|
||||
}
|
||||
|
||||
export const isLingReasoningModel = (model?: Model): boolean => {
|
||||
@@ -523,7 +526,6 @@ export function isReasoningModel(model?: Model): boolean {
|
||||
REASONING_REGEX.test(model.name) ||
|
||||
isSupportedThinkingTokenDoubaoModel(model) ||
|
||||
isDeepSeekHybridInferenceModel(model) ||
|
||||
isDeepSeekHybridInferenceModel({ ...model, id: model.name }) ||
|
||||
false
|
||||
)
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { throttle } from 'lodash'
|
||||
import { useEffect, useRef } from 'react'
|
||||
import { useEffect, useMemo, useRef } from 'react'
|
||||
|
||||
import { useTimer } from './useTimer'
|
||||
|
||||
@@ -12,13 +12,18 @@ import { useTimer } from './useTimer'
|
||||
*/
|
||||
export default function useScrollPosition(key: string, throttleWait?: number) {
|
||||
const containerRef = useRef<HTMLDivElement>(null)
|
||||
const scrollKey = `scroll:${key}`
|
||||
const scrollKey = useMemo(() => `scroll:${key}`, [key])
|
||||
const scrollKeyRef = useRef(scrollKey)
|
||||
const { setTimeoutTimer } = useTimer()
|
||||
|
||||
useEffect(() => {
|
||||
scrollKeyRef.current = scrollKey
|
||||
}, [scrollKey])
|
||||
|
||||
const handleScroll = throttle(() => {
|
||||
const position = containerRef.current?.scrollTop ?? 0
|
||||
window.requestAnimationFrame(() => {
|
||||
window.keyv.set(scrollKey, position)
|
||||
window.keyv.set(scrollKeyRef.current, position)
|
||||
})
|
||||
}, throttleWait ?? 100)
|
||||
|
||||
@@ -28,5 +33,9 @@ export default function useScrollPosition(key: string, throttleWait?: number) {
|
||||
setTimeoutTimer('scrollEffect', scroll, 50)
|
||||
}, [scrollKey, setTimeoutTimer])
|
||||
|
||||
useEffect(() => {
|
||||
return () => handleScroll.cancel()
|
||||
}, [handleScroll])
|
||||
|
||||
return { containerRef, handleScroll }
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { useEffect, useRef } from 'react'
|
||||
import { useCallback, useEffect, useRef } from 'react'
|
||||
|
||||
/**
|
||||
* 定时器管理 Hook,用于管理 setTimeout 和 setInterval 定时器,支持通过 key 来标识不同的定时器
|
||||
@@ -43,10 +43,38 @@ export const useTimer = () => {
|
||||
const timeoutMapRef = useRef(new Map<string, NodeJS.Timeout>())
|
||||
const intervalMapRef = useRef(new Map<string, NodeJS.Timeout>())
|
||||
|
||||
/**
|
||||
* 清除指定 key 的 setTimeout 定时器
|
||||
* @param key - 定时器标识符
|
||||
*/
|
||||
const clearTimeoutTimer = useCallback((key: string) => {
|
||||
clearTimeout(timeoutMapRef.current.get(key))
|
||||
timeoutMapRef.current.delete(key)
|
||||
}, [])
|
||||
|
||||
/**
|
||||
* 清除指定 key 的 setInterval 定时器
|
||||
* @param key - 定时器标识符
|
||||
*/
|
||||
const clearIntervalTimer = useCallback((key: string) => {
|
||||
clearInterval(intervalMapRef.current.get(key))
|
||||
intervalMapRef.current.delete(key)
|
||||
}, [])
|
||||
|
||||
/**
|
||||
* 清除所有定时器,包括 setTimeout 和 setInterval
|
||||
*/
|
||||
const clearAllTimers = useCallback(() => {
|
||||
timeoutMapRef.current.forEach((timer) => clearTimeout(timer))
|
||||
intervalMapRef.current.forEach((timer) => clearInterval(timer))
|
||||
timeoutMapRef.current.clear()
|
||||
intervalMapRef.current.clear()
|
||||
}, [])
|
||||
|
||||
// 组件卸载时自动清理所有定时器
|
||||
useEffect(() => {
|
||||
return () => clearAllTimers()
|
||||
}, [])
|
||||
}, [clearAllTimers])
|
||||
|
||||
/**
|
||||
* 设置一个 setTimeout 定时器
|
||||
@@ -65,12 +93,15 @@ export const useTimer = () => {
|
||||
* cleanup();
|
||||
* ```
|
||||
*/
|
||||
const setTimeoutTimer = (key: string, ...args: Parameters<typeof setTimeout>) => {
|
||||
clearTimeout(timeoutMapRef.current.get(key))
|
||||
const timer = setTimeout(...args)
|
||||
timeoutMapRef.current.set(key, timer)
|
||||
return () => clearTimeoutTimer(key)
|
||||
}
|
||||
const setTimeoutTimer = useCallback(
|
||||
(key: string, ...args: Parameters<typeof setTimeout>) => {
|
||||
clearTimeout(timeoutMapRef.current.get(key))
|
||||
const timer = setTimeout(...args)
|
||||
timeoutMapRef.current.set(key, timer)
|
||||
return () => clearTimeoutTimer(key)
|
||||
},
|
||||
[clearTimeoutTimer]
|
||||
)
|
||||
|
||||
/**
|
||||
* 设置一个 setInterval 定时器
|
||||
@@ -89,56 +120,31 @@ export const useTimer = () => {
|
||||
* cleanup();
|
||||
* ```
|
||||
*/
|
||||
const setIntervalTimer = (key: string, ...args: Parameters<typeof setInterval>) => {
|
||||
clearInterval(intervalMapRef.current.get(key))
|
||||
const timer = setInterval(...args)
|
||||
intervalMapRef.current.set(key, timer)
|
||||
return () => clearIntervalTimer(key)
|
||||
}
|
||||
|
||||
/**
|
||||
* 清除指定 key 的 setTimeout 定时器
|
||||
* @param key - 定时器标识符
|
||||
*/
|
||||
const clearTimeoutTimer = (key: string) => {
|
||||
clearTimeout(timeoutMapRef.current.get(key))
|
||||
timeoutMapRef.current.delete(key)
|
||||
}
|
||||
|
||||
/**
|
||||
* 清除指定 key 的 setInterval 定时器
|
||||
* @param key - 定时器标识符
|
||||
*/
|
||||
const clearIntervalTimer = (key: string) => {
|
||||
clearInterval(intervalMapRef.current.get(key))
|
||||
intervalMapRef.current.delete(key)
|
||||
}
|
||||
const setIntervalTimer = useCallback(
|
||||
(key: string, ...args: Parameters<typeof setInterval>) => {
|
||||
clearInterval(intervalMapRef.current.get(key))
|
||||
const timer = setInterval(...args)
|
||||
intervalMapRef.current.set(key, timer)
|
||||
return () => clearIntervalTimer(key)
|
||||
},
|
||||
[clearIntervalTimer]
|
||||
)
|
||||
|
||||
/**
|
||||
* 清除所有 setTimeout 定时器
|
||||
*/
|
||||
const clearAllTimeoutTimers = () => {
|
||||
const clearAllTimeoutTimers = useCallback(() => {
|
||||
timeoutMapRef.current.forEach((timer) => clearTimeout(timer))
|
||||
timeoutMapRef.current.clear()
|
||||
}
|
||||
}, [])
|
||||
|
||||
/**
|
||||
* 清除所有 setInterval 定时器
|
||||
*/
|
||||
const clearAllIntervalTimers = () => {
|
||||
const clearAllIntervalTimers = useCallback(() => {
|
||||
intervalMapRef.current.forEach((timer) => clearInterval(timer))
|
||||
intervalMapRef.current.clear()
|
||||
}
|
||||
|
||||
/**
|
||||
* 清除所有定时器,包括 setTimeout 和 setInterval
|
||||
*/
|
||||
const clearAllTimers = () => {
|
||||
timeoutMapRef.current.forEach((timer) => clearTimeout(timer))
|
||||
intervalMapRef.current.forEach((timer) => clearInterval(timer))
|
||||
timeoutMapRef.current.clear()
|
||||
intervalMapRef.current.clear()
|
||||
}
|
||||
}, [])
|
||||
|
||||
return {
|
||||
setTimeoutTimer,
|
||||
|
||||
@@ -280,6 +280,7 @@
|
||||
"denied": "Tool request was denied.",
|
||||
"timeout": "Tool request timed out before receiving approval."
|
||||
},
|
||||
"toolPendingFallback": "Tool",
|
||||
"waiting": "Waiting for tool permission decision..."
|
||||
},
|
||||
"type": {
|
||||
@@ -1208,7 +1209,7 @@
|
||||
"endpoint_type": {
|
||||
"anthropic": "Anthropic",
|
||||
"gemini": "Gemini",
|
||||
"image-generation": "Image Generation",
|
||||
"image-generation": "Image Generation (OpenAI)",
|
||||
"jina-rerank": "Jina Rerank",
|
||||
"openai": "OpenAI",
|
||||
"openai-response": "OpenAI-Response"
|
||||
@@ -2509,8 +2510,7 @@
|
||||
},
|
||||
"preview": {
|
||||
"copy": {
|
||||
"image": "Copy as image",
|
||||
"src": "Copy Image Source"
|
||||
"image": "Copy as image"
|
||||
},
|
||||
"dialog": "Open Dialog",
|
||||
"label": "Preview",
|
||||
|
||||
@@ -280,6 +280,7 @@
|
||||
"denied": "工具请求已被拒绝。",
|
||||
"timeout": "工具请求在收到批准前超时。"
|
||||
},
|
||||
"toolPendingFallback": "工具",
|
||||
"waiting": "等待工具权限决定..."
|
||||
},
|
||||
"type": {
|
||||
@@ -1208,7 +1209,7 @@
|
||||
"endpoint_type": {
|
||||
"anthropic": "Anthropic",
|
||||
"gemini": "Gemini",
|
||||
"image-generation": "图片生成",
|
||||
"image-generation": "图像生成 (OpenAI)",
|
||||
"jina-rerank": "Jina 重排序",
|
||||
"openai": "OpenAI",
|
||||
"openai-response": "OpenAI-Response"
|
||||
@@ -2509,8 +2510,7 @@
|
||||
},
|
||||
"preview": {
|
||||
"copy": {
|
||||
"image": "复制为图片",
|
||||
"src": "复制图片源"
|
||||
"image": "复制为图片"
|
||||
},
|
||||
"dialog": "打开预览窗口",
|
||||
"label": "预览",
|
||||
|
||||
@@ -280,6 +280,7 @@
|
||||
"denied": "工具請求已被拒絕。",
|
||||
"timeout": "工具請求在收到核准前逾時。"
|
||||
},
|
||||
"toolPendingFallback": "工具",
|
||||
"waiting": "等待工具權限決定..."
|
||||
},
|
||||
"type": {
|
||||
@@ -1208,7 +1209,7 @@
|
||||
"endpoint_type": {
|
||||
"anthropic": "Anthropic",
|
||||
"gemini": "Gemini",
|
||||
"image-generation": "圖片生成",
|
||||
"image-generation": "圖像生成 (OpenAI)",
|
||||
"jina-rerank": "Jina Rerank",
|
||||
"openai": "OpenAI",
|
||||
"openai-response": "OpenAI-Response"
|
||||
@@ -2509,8 +2510,7 @@
|
||||
},
|
||||
"preview": {
|
||||
"copy": {
|
||||
"image": "複製為圖片",
|
||||
"src": "複製圖片來源"
|
||||
"image": "複製為圖片"
|
||||
},
|
||||
"dialog": "開啟預覽窗口",
|
||||
"label": "預覽",
|
||||
|
||||
@@ -280,6 +280,7 @@
|
||||
"denied": "Tool-Anfrage wurde abgelehnt.",
|
||||
"timeout": "Tool-Anfrage ist abgelaufen, bevor eine Genehmigung eingegangen ist."
|
||||
},
|
||||
"toolPendingFallback": "Werkzeug",
|
||||
"waiting": "Warten auf Entscheidung über Tool-Berechtigung..."
|
||||
},
|
||||
"type": {
|
||||
@@ -1208,7 +1209,7 @@
|
||||
"endpoint_type": {
|
||||
"anthropic": "Anthropic",
|
||||
"gemini": "Gemini",
|
||||
"image-generation": "Bildgenerierung",
|
||||
"image-generation": "Bilderzeugung (OpenAI)",
|
||||
"jina-rerank": "Jina Reranking",
|
||||
"openai": "OpenAI",
|
||||
"openai-response": "OpenAI-Response"
|
||||
@@ -2509,8 +2510,7 @@
|
||||
},
|
||||
"preview": {
|
||||
"copy": {
|
||||
"image": "Als Bild kopieren",
|
||||
"src": "Bildquelle kopieren"
|
||||
"image": "Als Bild kopieren"
|
||||
},
|
||||
"dialog": "Vorschaufenster öffnen",
|
||||
"label": "Vorschau",
|
||||
|
||||
@@ -280,6 +280,7 @@
|
||||
"denied": "Το αίτημα για εργαλείο απορρίφθηκε.",
|
||||
"timeout": "Το αίτημα για το εργαλείο έληξε πριν λάβει έγκριση."
|
||||
},
|
||||
"toolPendingFallback": "Εργαλείο",
|
||||
"waiting": "Αναμονή για απόφαση άδειας εργαλείου..."
|
||||
},
|
||||
"type": {
|
||||
@@ -1208,7 +1209,7 @@
|
||||
"endpoint_type": {
|
||||
"anthropic": "Anthropic",
|
||||
"gemini": "Gemini",
|
||||
"image-generation": "Δημιουργία Εικόνας",
|
||||
"image-generation": "Δημιουργία Εικόνων (OpenAI)",
|
||||
"jina-rerank": "Επαναταξινόμηση Jina",
|
||||
"openai": "OpenAI",
|
||||
"openai-response": "Απάντηση OpenAI"
|
||||
@@ -2509,8 +2510,7 @@
|
||||
},
|
||||
"preview": {
|
||||
"copy": {
|
||||
"image": "Αντιγραφή ως εικόνα",
|
||||
"src": "Αντιγραφή πηγής εικόνας"
|
||||
"image": "Αντιγραφή ως εικόνα"
|
||||
},
|
||||
"dialog": "Άνοιγμα παραθύρου προεπισκόπησης",
|
||||
"label": "Προεπισκόπηση",
|
||||
|
||||
@@ -280,6 +280,7 @@
|
||||
"denied": "La solicitud de herramienta fue denegada.",
|
||||
"timeout": "La solicitud de herramienta expiró antes de recibir la aprobación."
|
||||
},
|
||||
"toolPendingFallback": "Herramienta",
|
||||
"waiting": "Esperando la decisión de permiso de la herramienta..."
|
||||
},
|
||||
"type": {
|
||||
@@ -1208,7 +1209,7 @@
|
||||
"endpoint_type": {
|
||||
"anthropic": "Anthropic",
|
||||
"gemini": "Gemini",
|
||||
"image-generation": "Generación de imágenes",
|
||||
"image-generation": "Generación de Imágenes (OpenAI)",
|
||||
"jina-rerank": "Reordenamiento Jina",
|
||||
"openai": "OpenAI",
|
||||
"openai-response": "Respuesta de OpenAI"
|
||||
@@ -2509,8 +2510,7 @@
|
||||
},
|
||||
"preview": {
|
||||
"copy": {
|
||||
"image": "Copiar como imagen",
|
||||
"src": "Copia la fuente de la imagen"
|
||||
"image": "Copiar como imagen"
|
||||
},
|
||||
"dialog": "Abrir la ventana de vista previa",
|
||||
"label": "Vista previa",
|
||||
|
||||
@@ -280,6 +280,7 @@
|
||||
"denied": "La demande d'outil a été refusée.",
|
||||
"timeout": "La demande d'outil a expiré avant d'obtenir l'approbation."
|
||||
},
|
||||
"toolPendingFallback": "Outil",
|
||||
"waiting": "En attente de la décision d'autorisation de l'outil..."
|
||||
},
|
||||
"type": {
|
||||
@@ -1208,7 +1209,7 @@
|
||||
"endpoint_type": {
|
||||
"anthropic": "Anthropic",
|
||||
"gemini": "Gemini",
|
||||
"image-generation": "Génération d'images",
|
||||
"image-generation": "Génération d'images (OpenAI)",
|
||||
"jina-rerank": "Reclassement Jina",
|
||||
"openai": "OpenAI",
|
||||
"openai-response": "Réponse OpenAI"
|
||||
@@ -2509,8 +2510,7 @@
|
||||
},
|
||||
"preview": {
|
||||
"copy": {
|
||||
"image": "Copier en tant qu'image",
|
||||
"src": "Copier la source de l'image"
|
||||
"image": "Copier en tant qu'image"
|
||||
},
|
||||
"dialog": "Ouvrir la fenêtre d'aperçu",
|
||||
"label": "Aperçu",
|
||||
|
||||
@@ -280,6 +280,7 @@
|
||||
"denied": "ツールリクエストは拒否されました。",
|
||||
"timeout": "ツールリクエストは承認を受ける前にタイムアウトしました。"
|
||||
},
|
||||
"toolPendingFallback": "ツール",
|
||||
"waiting": "ツールの許可決定を待っています..."
|
||||
},
|
||||
"type": {
|
||||
@@ -1208,7 +1209,7 @@
|
||||
"endpoint_type": {
|
||||
"anthropic": "Anthropic",
|
||||
"gemini": "Gemini",
|
||||
"image-generation": "画像生成",
|
||||
"image-generation": "画像生成 (OpenAI)",
|
||||
"jina-rerank": "Jina Rerank",
|
||||
"openai": "OpenAI",
|
||||
"openai-response": "OpenAI-Response"
|
||||
@@ -2509,8 +2510,7 @@
|
||||
},
|
||||
"preview": {
|
||||
"copy": {
|
||||
"image": "画像としてコピー",
|
||||
"src": "画像ソースをコピー"
|
||||
"image": "画像としてコピー"
|
||||
},
|
||||
"dialog": "ダイアログを開く",
|
||||
"label": "プレビュー",
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
"error": {
|
||||
"failed": "Falha ao excluir o agente"
|
||||
},
|
||||
"title": "删除代理"
|
||||
"title": "Excluir Agente"
|
||||
},
|
||||
"edit": {
|
||||
"title": "Agent Editor"
|
||||
@@ -111,7 +111,7 @@
|
||||
"label": "Modo de permissão",
|
||||
"options": {
|
||||
"acceptEdits": "Aceitar edições automaticamente",
|
||||
"bypassPermissions": "忽略检查 de permissão",
|
||||
"bypassPermissions": "Ignorar verificações de permissão",
|
||||
"default": "Padrão (perguntar antes de continuar)",
|
||||
"plan": "Modo de planejamento (plano sujeito a aprovação)"
|
||||
},
|
||||
@@ -150,7 +150,7 @@
|
||||
},
|
||||
"success": {
|
||||
"install": "Plugin instalado com sucesso",
|
||||
"uninstall": "插件 desinstalado com sucesso"
|
||||
"uninstall": "Plugin desinstalado com sucesso"
|
||||
},
|
||||
"tab": "plug-in",
|
||||
"type": {
|
||||
@@ -280,6 +280,7 @@
|
||||
"denied": "Solicitação de ferramenta foi negada.",
|
||||
"timeout": "A solicitação da ferramenta expirou antes de receber aprovação."
|
||||
},
|
||||
"toolPendingFallback": "Ferramenta",
|
||||
"waiting": "Aguardando decisão de permissão da ferramenta..."
|
||||
},
|
||||
"type": {
|
||||
@@ -1134,7 +1135,7 @@
|
||||
"duplicate": "Duplicar",
|
||||
"edit": "Editar",
|
||||
"enabled": "Ativado",
|
||||
"error": "错误",
|
||||
"error": "Erro",
|
||||
"errors": {
|
||||
"create_message": "Falha ao criar mensagem",
|
||||
"validation": "Falha na verificação"
|
||||
@@ -1208,7 +1209,7 @@
|
||||
"endpoint_type": {
|
||||
"anthropic": "Anthropic",
|
||||
"gemini": "Gemini",
|
||||
"image-generation": "Geração de Imagem",
|
||||
"image-generation": "Geração de Imagens (OpenAI)",
|
||||
"jina-rerank": "Jina Reordenar",
|
||||
"openai": "OpenAI",
|
||||
"openai-response": "Resposta OpenAI"
|
||||
@@ -2509,8 +2510,7 @@
|
||||
},
|
||||
"preview": {
|
||||
"copy": {
|
||||
"image": "Copiar como imagem",
|
||||
"src": "Copiar Origem da Imagem"
|
||||
"image": "Copiar como imagem"
|
||||
},
|
||||
"dialog": "Abrir janela de pré-visualização",
|
||||
"label": "Pré-visualização",
|
||||
|
||||
@@ -280,6 +280,7 @@
|
||||
"denied": "Запрос на инструмент был отклонён.",
|
||||
"timeout": "Запрос на инструмент превысил время ожидания до получения подтверждения."
|
||||
},
|
||||
"toolPendingFallback": "Инструмент",
|
||||
"waiting": "Ожидание решения о разрешении на использование инструмента..."
|
||||
},
|
||||
"type": {
|
||||
@@ -1208,7 +1209,7 @@
|
||||
"endpoint_type": {
|
||||
"anthropic": "Anthropic",
|
||||
"gemini": "Gemini",
|
||||
"image-generation": "Изображение",
|
||||
"image-generation": "Генерация изображений (OpenAI)",
|
||||
"jina-rerank": "Jina Rerank",
|
||||
"openai": "OpenAI",
|
||||
"openai-response": "OpenAI-Response"
|
||||
@@ -2509,8 +2510,7 @@
|
||||
},
|
||||
"preview": {
|
||||
"copy": {
|
||||
"image": "Скопировать как изображение",
|
||||
"src": "Копировать источник изображения"
|
||||
"image": "Скопировать как изображение"
|
||||
},
|
||||
"dialog": "Открыть диалог",
|
||||
"label": "Предварительный просмотр",
|
||||
|
||||
@@ -9,6 +9,7 @@ import { getModel } from '@renderer/hooks/useModel'
|
||||
import { useSettings } from '@renderer/hooks/useSettings'
|
||||
import { useTextareaResize } from '@renderer/hooks/useTextareaResize'
|
||||
import { useTimer } from '@renderer/hooks/useTimer'
|
||||
import { CacheService } from '@renderer/services/CacheService'
|
||||
import { pauseTrace } from '@renderer/services/SpanManagerService'
|
||||
import { estimateUserPromptUsage } from '@renderer/services/TokenService'
|
||||
import { useAppDispatch, useAppSelector } from '@renderer/store'
|
||||
@@ -41,19 +42,10 @@ import { getInputbarConfig } from './registry'
|
||||
import { TopicType } from './types'
|
||||
|
||||
const logger = loggerService.withContext('AgentSessionInputbar')
|
||||
const agentSessionDraftCache = new Map<string, string>()
|
||||
|
||||
const readDraftFromCache = (key: string): string => {
|
||||
return agentSessionDraftCache.get(key) ?? ''
|
||||
}
|
||||
const DRAFT_CACHE_TTL = 24 * 60 * 60 * 1000 // 24 hours
|
||||
|
||||
const writeDraftToCache = (key: string, value: string) => {
|
||||
if (!value) {
|
||||
agentSessionDraftCache.delete(key)
|
||||
} else {
|
||||
agentSessionDraftCache.set(key, value)
|
||||
}
|
||||
}
|
||||
const getAgentDraftCacheKey = (agentId: string) => `agent-session-draft-${agentId}`
|
||||
|
||||
type Props = {
|
||||
agentId: string
|
||||
@@ -170,16 +162,15 @@ const AgentSessionInputbarInner: FC<InnerProps> = ({ assistant, agentId, session
|
||||
const scope = TopicType.Session
|
||||
const config = getInputbarConfig(scope)
|
||||
|
||||
// Use shared hooks for text and textarea management
|
||||
const initialDraft = useMemo(() => readDraftFromCache(agentId), [agentId])
|
||||
const persistDraft = useCallback((next: string) => writeDraftToCache(agentId, next), [agentId])
|
||||
// Use shared hooks for text and textarea management with draft persistence
|
||||
const draftCacheKey = getAgentDraftCacheKey(agentId)
|
||||
const {
|
||||
text,
|
||||
setText,
|
||||
isEmpty: inputEmpty
|
||||
} = useInputText({
|
||||
initialValue: initialDraft,
|
||||
onChange: persistDraft
|
||||
initialValue: CacheService.get<string>(draftCacheKey) ?? '',
|
||||
onChange: (value) => CacheService.set(draftCacheKey, value, DRAFT_CACHE_TTL)
|
||||
})
|
||||
const {
|
||||
textareaRef,
|
||||
@@ -431,6 +422,7 @@ const AgentSessionInputbarInner: FC<InnerProps> = ({ assistant, agentId, session
|
||||
})
|
||||
)
|
||||
|
||||
// Clear text after successful send (draft is cleared automatically via onChange)
|
||||
setText('')
|
||||
setTimeoutTimer('agentSession_sendMessage', () => setText(''), 500)
|
||||
} catch (error) {
|
||||
|
||||
@@ -14,7 +14,6 @@ import { useInputText } from '@renderer/hooks/useInputText'
|
||||
import { useMessageOperations, useTopicLoading } from '@renderer/hooks/useMessageOperations'
|
||||
import { useSettings } from '@renderer/hooks/useSettings'
|
||||
import { useShortcut } from '@renderer/hooks/useShortcuts'
|
||||
import { useSidebarIconShow } from '@renderer/hooks/useSidebarIcon'
|
||||
import { useTextareaResize } from '@renderer/hooks/useTextareaResize'
|
||||
import { useTimer } from '@renderer/hooks/useTimer'
|
||||
import {
|
||||
@@ -24,6 +23,7 @@ import {
|
||||
useInputbarToolsState
|
||||
} from '@renderer/pages/home/Inputbar/context/InputbarToolsProvider'
|
||||
import { getDefaultTopic } from '@renderer/services/AssistantService'
|
||||
import { CacheService } from '@renderer/services/CacheService'
|
||||
import { EVENT_NAMES, EventEmitter } from '@renderer/services/EventService'
|
||||
import FileManager from '@renderer/services/FileManager'
|
||||
import { checkRateLimit, getUserMessage } from '@renderer/services/MessagesService'
|
||||
@@ -39,7 +39,7 @@ import { getSendMessageShortcutLabel } from '@renderer/utils/input'
|
||||
import { documentExts, imageExts, textExts } from '@shared/config/constant'
|
||||
import { debounce } from 'lodash'
|
||||
import type { FC } from 'react'
|
||||
import React, { useCallback, useEffect, useMemo, useRef, useState } from 'react'
|
||||
import React, { useCallback, useEffect, useEffectEvent, useMemo, useRef, useState } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
|
||||
import { InputbarCore } from './components/InputbarCore'
|
||||
@@ -51,6 +51,17 @@ import TokenCount from './TokenCount'
|
||||
|
||||
const logger = loggerService.withContext('Inputbar')
|
||||
|
||||
const INPUTBAR_DRAFT_CACHE_KEY = 'inputbar-draft'
|
||||
const DRAFT_CACHE_TTL = 24 * 60 * 60 * 1000 // 24 hours
|
||||
|
||||
const getMentionedModelsCacheKey = (assistantId: string) => `inputbar-mentioned-models-${assistantId}`
|
||||
|
||||
const getValidatedCachedModels = (assistantId: string): Model[] => {
|
||||
const cached = CacheService.get<Model[]>(getMentionedModelsCacheKey(assistantId))
|
||||
if (!Array.isArray(cached)) return []
|
||||
return cached.filter((model) => model?.id && model?.name)
|
||||
}
|
||||
|
||||
interface Props {
|
||||
assistant: Assistant
|
||||
setActiveTopic: (topic: Topic) => void
|
||||
@@ -80,16 +91,18 @@ const Inputbar: FC<Props> = ({ assistant: initialAssistant, setActiveTopic, topi
|
||||
toggleExpanded: () => {}
|
||||
})
|
||||
|
||||
const [initialMentionedModels] = useState(() => getValidatedCachedModels(initialAssistant.id))
|
||||
|
||||
const initialState = useMemo(
|
||||
() => ({
|
||||
files: [] as FileType[],
|
||||
mentionedModels: [] as Model[],
|
||||
mentionedModels: initialMentionedModels,
|
||||
selectedKnowledgeBases: initialAssistant.knowledge_bases ?? [],
|
||||
isExpanded: false,
|
||||
couldAddImageFile: false,
|
||||
extensions: [] as string[]
|
||||
}),
|
||||
[initialAssistant.knowledge_bases]
|
||||
[initialMentionedModels, initialAssistant.knowledge_bases]
|
||||
)
|
||||
|
||||
return (
|
||||
@@ -121,7 +134,10 @@ const InputbarInner: FC<InputbarInnerProps> = ({ assistant: initialAssistant, se
|
||||
const { setFiles, setMentionedModels, setSelectedKnowledgeBases } = useInputbarToolsDispatch()
|
||||
const { setCouldAddImageFile } = useInputbarToolsInternalDispatch()
|
||||
|
||||
const { text, setText } = useInputText()
|
||||
const { text, setText } = useInputText({
|
||||
initialValue: CacheService.get<string>(INPUTBAR_DRAFT_CACHE_KEY) ?? '',
|
||||
onChange: (value) => CacheService.set(INPUTBAR_DRAFT_CACHE_KEY, value, DRAFT_CACHE_TTL)
|
||||
})
|
||||
const {
|
||||
textareaRef,
|
||||
resize: resizeTextArea,
|
||||
@@ -133,7 +149,6 @@ const InputbarInner: FC<InputbarInnerProps> = ({ assistant: initialAssistant, se
|
||||
minHeight: 30
|
||||
})
|
||||
|
||||
const showKnowledgeIcon = useSidebarIconShow('knowledge')
|
||||
const { assistant, addTopic, model, setModel, updateAssistant } = useAssistant(initialAssistant.id)
|
||||
const { sendMessageShortcut, showInputEstimatedTokens, enableQuickPanelTriggers } = useSettings()
|
||||
const [estimateTokenCount, setEstimateTokenCount] = useState(0)
|
||||
@@ -190,6 +205,15 @@ const InputbarInner: FC<InputbarInnerProps> = ({ assistant: initialAssistant, se
|
||||
setCouldAddImageFile(canAddImageFile)
|
||||
}, [canAddImageFile, setCouldAddImageFile])
|
||||
|
||||
const onUnmount = useEffectEvent((id: string) => {
|
||||
CacheService.set(getMentionedModelsCacheKey(id), mentionedModels, DRAFT_CACHE_TTL)
|
||||
})
|
||||
|
||||
useEffect(() => {
|
||||
return () => onUnmount(assistant.id)
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [assistant.id])
|
||||
|
||||
const placeholderText = enableQuickPanelTriggers
|
||||
? t('chat.input.placeholder', { key: getSendMessageShortcutLabel(sendMessageShortcut) })
|
||||
: t('chat.input.placeholder_without_triggers', {
|
||||
@@ -381,9 +405,10 @@ const InputbarInner: FC<InputbarInnerProps> = ({ assistant: initialAssistant, se
|
||||
focusTextarea
|
||||
])
|
||||
|
||||
// TODO: Just use assistant.knowledge_bases as selectedKnowledgeBases. context state is overdesigned.
|
||||
useEffect(() => {
|
||||
setSelectedKnowledgeBases(showKnowledgeIcon ? (assistant.knowledge_bases ?? []) : [])
|
||||
}, [assistant.knowledge_bases, setSelectedKnowledgeBases, showKnowledgeIcon])
|
||||
setSelectedKnowledgeBases(assistant.knowledge_bases ?? [])
|
||||
}, [assistant.knowledge_bases, setSelectedKnowledgeBases])
|
||||
|
||||
useEffect(() => {
|
||||
// Disable web search if model doesn't support it
|
||||
|
||||
@@ -156,11 +156,8 @@ export const InputbarCore: FC<InputbarCoreProps> = ({
|
||||
|
||||
const setText = useCallback<React.Dispatch<React.SetStateAction<string>>>(
|
||||
(value) => {
|
||||
if (typeof value === 'function') {
|
||||
onTextChange(value(textRef.current))
|
||||
} else {
|
||||
onTextChange(value)
|
||||
}
|
||||
const newText = typeof value === 'function' ? value(textRef.current) : value
|
||||
onTextChange(newText)
|
||||
},
|
||||
[onTextChange]
|
||||
)
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import { useAssistant } from '@renderer/hooks/useAssistant'
|
||||
import { useSidebarIconShow } from '@renderer/hooks/useSidebarIcon'
|
||||
import { defineTool, registerTool, TopicType } from '@renderer/pages/home/Inputbar/types'
|
||||
import type { KnowledgeBase } from '@renderer/types'
|
||||
import { isPromptToolUse, isSupportedToolUse } from '@renderer/utils/mcp-tools'
|
||||
@@ -30,7 +29,6 @@ const knowledgeBaseTool = defineTool({
|
||||
render: function KnowledgeBaseToolRender(context) {
|
||||
const { assistant, state, actions, quickPanel } = context
|
||||
|
||||
const knowledgeSidebarEnabled = useSidebarIconShow('knowledge')
|
||||
const { updateAssistant } = useAssistant(assistant.id)
|
||||
|
||||
const handleSelect = useCallback(
|
||||
@@ -41,10 +39,6 @@ const knowledgeBaseTool = defineTool({
|
||||
[updateAssistant, actions]
|
||||
)
|
||||
|
||||
if (!knowledgeSidebarEnabled) {
|
||||
return null
|
||||
}
|
||||
|
||||
return (
|
||||
<KnowledgeBaseButton
|
||||
quickPanel={quickPanel}
|
||||
|
||||
@@ -102,10 +102,12 @@ const ThinkingBlock: React.FC<Props> = ({ block }) => {
|
||||
)
|
||||
}
|
||||
|
||||
const normalizeThinkingTime = (value?: number) => (typeof value === 'number' && Number.isFinite(value) ? value : 0)
|
||||
|
||||
const ThinkingTimeSeconds = memo(
|
||||
({ blockThinkingTime, isThinking }: { blockThinkingTime: number; isThinking: boolean }) => {
|
||||
const { t } = useTranslation()
|
||||
const [displayTime, setDisplayTime] = useState(blockThinkingTime)
|
||||
const [displayTime, setDisplayTime] = useState(normalizeThinkingTime(blockThinkingTime))
|
||||
|
||||
const timer = useRef<NodeJS.Timeout | null>(null)
|
||||
|
||||
@@ -121,7 +123,7 @@ const ThinkingTimeSeconds = memo(
|
||||
clearInterval(timer.current)
|
||||
timer.current = null
|
||||
}
|
||||
setDisplayTime(blockThinkingTime)
|
||||
setDisplayTime(normalizeThinkingTime(blockThinkingTime))
|
||||
}
|
||||
|
||||
return () => {
|
||||
@@ -132,10 +134,10 @@ const ThinkingTimeSeconds = memo(
|
||||
}
|
||||
}, [isThinking, blockThinkingTime])
|
||||
|
||||
const thinkingTimeSeconds = useMemo(
|
||||
() => ((displayTime < 1000 ? 100 : displayTime) / 1000).toFixed(1),
|
||||
[displayTime]
|
||||
)
|
||||
const thinkingTimeSeconds = useMemo(() => {
|
||||
const safeTime = normalizeThinkingTime(displayTime)
|
||||
return ((safeTime < 1000 ? 100 : safeTime) / 1000).toFixed(1)
|
||||
}, [displayTime])
|
||||
|
||||
return isThinking
|
||||
? t('chat.thinking', {
|
||||
|
||||
@@ -255,6 +255,20 @@ describe('ThinkingBlock', () => {
|
||||
unmount()
|
||||
})
|
||||
})
|
||||
|
||||
it('should clamp invalid thinking times to a safe default', () => {
|
||||
const testCases = [undefined, Number.NaN, Number.POSITIVE_INFINITY]
|
||||
|
||||
testCases.forEach((thinking_millsec) => {
|
||||
const block = createThinkingBlock({
|
||||
thinking_millsec: thinking_millsec as any,
|
||||
status: MessageBlockStatus.SUCCESS
|
||||
})
|
||||
const { unmount } = renderThinkingBlock(block)
|
||||
expect(getThinkingTimeText()).toHaveTextContent('0.1s')
|
||||
unmount()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('collapse behavior', () => {
|
||||
|
||||
@@ -10,6 +10,7 @@ import { useSettings } from '@renderer/hooks/useSettings'
|
||||
import { useTimer } from '@renderer/hooks/useTimer'
|
||||
import type { RootState } from '@renderer/store'
|
||||
// import { selectCurrentTopicId } from '@renderer/store/newMessage'
|
||||
import { scrollIntoView } from '@renderer/utils/dom'
|
||||
import { Button, Drawer, Tooltip } from 'antd'
|
||||
import type { FC } from 'react'
|
||||
import { useCallback, useEffect, useRef, useState } from 'react'
|
||||
@@ -118,7 +119,8 @@ const ChatNavigation: FC<ChatNavigationProps> = ({ containerId }) => {
|
||||
}
|
||||
|
||||
const scrollToMessage = (element: HTMLElement) => {
|
||||
element.scrollIntoView({ behavior: 'smooth', block: 'start' })
|
||||
// Use container: 'nearest' to keep scroll within the chat pane (Chromium-only, see #11565, #11567)
|
||||
scrollIntoView(element, { behavior: 'smooth', block: 'start', container: 'nearest' })
|
||||
}
|
||||
|
||||
const scrollToTop = () => {
|
||||
|
||||
@@ -15,6 +15,7 @@ import { estimateMessageUsage } from '@renderer/services/TokenService'
|
||||
import type { Assistant, Topic } from '@renderer/types'
|
||||
import type { Message, MessageBlock } from '@renderer/types/newMessage'
|
||||
import { classNames, cn } from '@renderer/utils'
|
||||
import { scrollIntoView } from '@renderer/utils/dom'
|
||||
import { isMessageProcessing } from '@renderer/utils/messageUtils/is'
|
||||
import { Divider } from 'antd'
|
||||
import type { Dispatch, FC, SetStateAction } from 'react'
|
||||
@@ -79,9 +80,10 @@ const MessageItem: FC<Props> = ({
|
||||
|
||||
useEffect(() => {
|
||||
if (isEditing && messageContainerRef.current) {
|
||||
messageContainerRef.current.scrollIntoView({
|
||||
scrollIntoView(messageContainerRef.current, {
|
||||
behavior: 'smooth',
|
||||
block: 'center'
|
||||
block: 'center',
|
||||
container: 'nearest'
|
||||
})
|
||||
}
|
||||
}, [isEditing])
|
||||
@@ -124,7 +126,7 @@ const MessageItem: FC<Props> = ({
|
||||
const messageHighlightHandler = useCallback(
|
||||
(highlight: boolean = true) => {
|
||||
if (messageContainerRef.current) {
|
||||
messageContainerRef.current.scrollIntoView({ behavior: 'smooth' })
|
||||
scrollIntoView(messageContainerRef.current, { behavior: 'smooth', block: 'center', container: 'nearest' })
|
||||
if (highlight) {
|
||||
setTimeoutTimer(
|
||||
'messageHighlightHandler',
|
||||
|
||||
@@ -12,6 +12,7 @@ import { newMessagesActions } from '@renderer/store/newMessage'
|
||||
// import { updateMessageThunk } from '@renderer/store/thunk/messageThunk'
|
||||
import type { Message } from '@renderer/types/newMessage'
|
||||
import { isEmoji, removeLeadingEmoji } from '@renderer/utils'
|
||||
import { scrollIntoView } from '@renderer/utils/dom'
|
||||
import { getMainTextContent } from '@renderer/utils/messageUtils/find'
|
||||
import { Avatar } from 'antd'
|
||||
import { CircleChevronDown } from 'lucide-react'
|
||||
@@ -119,7 +120,7 @@ const MessageAnchorLine: FC<MessageLineProps> = ({ messages }) => {
|
||||
() => {
|
||||
const messageElement = document.getElementById(`message-${message.id}`)
|
||||
if (messageElement) {
|
||||
messageElement.scrollIntoView({ behavior: 'auto', block: 'start' })
|
||||
scrollIntoView(messageElement, { behavior: 'auto', block: 'start', container: 'nearest' })
|
||||
}
|
||||
},
|
||||
100
|
||||
@@ -141,7 +142,7 @@ const MessageAnchorLine: FC<MessageLineProps> = ({ messages }) => {
|
||||
return
|
||||
}
|
||||
|
||||
messageElement.scrollIntoView({ behavior: 'smooth', block: 'start' })
|
||||
scrollIntoView(messageElement, { behavior: 'smooth', block: 'start', container: 'nearest' })
|
||||
},
|
||||
[setSelectedMessage]
|
||||
)
|
||||
|
||||
@@ -10,6 +10,7 @@ import type { MultiModelMessageStyle } from '@renderer/store/settings'
|
||||
import type { Topic } from '@renderer/types'
|
||||
import type { Message } from '@renderer/types/newMessage'
|
||||
import { classNames } from '@renderer/utils'
|
||||
import { scrollIntoView } from '@renderer/utils/dom'
|
||||
import { Popover } from 'antd'
|
||||
import type { ComponentProps } from 'react'
|
||||
import { memo, useCallback, useEffect, useMemo, useState } from 'react'
|
||||
@@ -73,7 +74,7 @@ const MessageGroup = ({ messages, topic, registerMessageElement }: Props) => {
|
||||
() => {
|
||||
const messageElement = document.getElementById(`message-${message.id}`)
|
||||
if (messageElement) {
|
||||
messageElement.scrollIntoView({ behavior: 'smooth', block: 'start' })
|
||||
scrollIntoView(messageElement, { behavior: 'smooth', block: 'start', container: 'nearest' })
|
||||
}
|
||||
},
|
||||
200
|
||||
@@ -132,7 +133,7 @@ const MessageGroup = ({ messages, topic, registerMessageElement }: Props) => {
|
||||
setSelectedMessage(message)
|
||||
} else {
|
||||
// 直接滚动
|
||||
element.scrollIntoView({ behavior: 'smooth', block: 'start' })
|
||||
scrollIntoView(element, { behavior: 'smooth', block: 'start', container: 'nearest' })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ import type { RootState } from '@renderer/store'
|
||||
import { messageBlocksSelectors } from '@renderer/store/messageBlock'
|
||||
import type { Message } from '@renderer/types/newMessage'
|
||||
import { MessageBlockType } from '@renderer/types/newMessage'
|
||||
import { scrollIntoView } from '@renderer/utils/dom'
|
||||
import type { FC } from 'react'
|
||||
import React, { useMemo, useRef } from 'react'
|
||||
import { useSelector } from 'react-redux'
|
||||
@@ -72,10 +73,10 @@ const MessageOutline: FC<MessageOutlineProps> = ({ message }) => {
|
||||
const parent = messageOutlineContainerRef.current?.parentElement
|
||||
const messageContentContainer = parent?.querySelector('.message-content-container')
|
||||
if (messageContentContainer) {
|
||||
const headingElement = messageContentContainer.querySelector(`#${id}`)
|
||||
const headingElement = messageContentContainer.querySelector<HTMLElement>(`#${id}`)
|
||||
if (headingElement) {
|
||||
const scrollBlock = ['horizontal', 'grid'].includes(message.multiModelMessageStyle ?? '') ? 'nearest' : 'start'
|
||||
headingElement.scrollIntoView({ behavior: 'smooth', block: scrollBlock })
|
||||
scrollIntoView(headingElement, { behavior: 'smooth', block: scrollBlock, container: 'nearest' })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,8 +5,6 @@ import { Terminal } from 'lucide-react'
|
||||
import { ToolTitle } from './GenericTools'
|
||||
import type { BashToolInput as BashToolInputType, BashToolOutput as BashToolOutputType } from './types'
|
||||
|
||||
const MAX_TAG_LENGTH = 100
|
||||
|
||||
export function BashTool({
|
||||
input,
|
||||
output
|
||||
@@ -17,12 +15,10 @@ export function BashTool({
|
||||
// 如果有输出,计算输出行数
|
||||
const outputLines = output ? output.split('\n').length : 0
|
||||
|
||||
// 处理命令字符串的截断,添加空值检查
|
||||
// 处理命令字符串,添加空值检查
|
||||
const command = input?.command ?? ''
|
||||
const needsTruncate = command.length > MAX_TAG_LENGTH
|
||||
const displayCommand = needsTruncate ? `${command.slice(0, MAX_TAG_LENGTH)}...` : command
|
||||
|
||||
const tagContent = <Tag className="whitespace-pre-wrap break-all font-mono">{displayCommand}</Tag>
|
||||
const tagContent = <Tag className="!m-0 max-w-full truncate font-mono">{command}</Tag>
|
||||
|
||||
return {
|
||||
key: 'tool',
|
||||
@@ -34,16 +30,12 @@ export function BashTool({
|
||||
params={input?.description}
|
||||
stats={output ? `${outputLines} ${outputLines === 1 ? 'line' : 'lines'}` : undefined}
|
||||
/>
|
||||
<div className="mt-1">
|
||||
{needsTruncate ? (
|
||||
<Popover
|
||||
content={<div className="max-w-xl whitespace-pre-wrap break-all font-mono">{command}</div>}
|
||||
trigger="hover">
|
||||
{tagContent}
|
||||
</Popover>
|
||||
) : (
|
||||
tagContent
|
||||
)}
|
||||
<div className="mt-1 max-w-full">
|
||||
<Popover
|
||||
content={<div className="max-w-xl whitespace-pre-wrap break-all font-mono text-xs">{command}</div>}
|
||||
trigger="hover">
|
||||
{tagContent}
|
||||
</Popover>
|
||||
</div>
|
||||
</>
|
||||
),
|
||||
|
||||
@@ -18,9 +18,9 @@ export function ToolTitle({
|
||||
}) {
|
||||
return (
|
||||
<div className={`flex items-center gap-1 ${className}`}>
|
||||
{icon}
|
||||
{label && <span className="font-medium text-sm">{label}</span>}
|
||||
{params && <span className="flex-shrink-0 text-muted-foreground text-xs">{params}</span>}
|
||||
{icon && <span className="flex flex-shrink-0">{icon}</span>}
|
||||
{label && <span className="flex-shrink-0 font-medium text-sm">{label}</span>}
|
||||
{params && <span className="min-w-0 truncate text-muted-foreground text-xs">{params}</span>}
|
||||
{stats && <span className="flex-shrink-0 text-muted-foreground text-xs">{stats}</span>}
|
||||
</div>
|
||||
)
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
import { loggerService } from '@logger'
|
||||
import { useAppSelector } from '@renderer/store'
|
||||
import { selectPendingPermission } from '@renderer/store/toolPermissions'
|
||||
import type { NormalToolResponse } from '@renderer/types'
|
||||
import type { CollapseProps } from 'antd'
|
||||
import { Collapse } from 'antd'
|
||||
import { Collapse, Spin } from 'antd'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
|
||||
// 导出所有类型
|
||||
export * from './types'
|
||||
@@ -83,17 +86,41 @@ function ToolContent({ toolName, input, output }: { toolName: AgentToolsType; in
|
||||
// 统一的组件渲染入口
|
||||
export function MessageAgentTools({ toolResponse }: { toolResponse: NormalToolResponse }) {
|
||||
const { arguments: args, response, tool, status } = toolResponse
|
||||
logger.info('Rendering agent tool response', {
|
||||
logger.debug('Rendering agent tool response', {
|
||||
tool: tool,
|
||||
arguments: args,
|
||||
status,
|
||||
response
|
||||
})
|
||||
|
||||
const pendingPermission = useAppSelector((state) =>
|
||||
selectPendingPermission(state.toolPermissions, toolResponse.toolCallId)
|
||||
)
|
||||
|
||||
if (status === 'pending') {
|
||||
return <ToolPermissionRequestCard toolResponse={toolResponse} />
|
||||
if (pendingPermission) {
|
||||
return <ToolPermissionRequestCard toolResponse={toolResponse} />
|
||||
}
|
||||
return <ToolPendingIndicator toolName={tool?.name} description={tool?.description} />
|
||||
}
|
||||
|
||||
return (
|
||||
<ToolContent toolName={tool.name as AgentToolsType} input={args as ToolInput} output={response as ToolOutput} />
|
||||
)
|
||||
}
|
||||
|
||||
function ToolPendingIndicator({ toolName, description }: { toolName?: string; description?: string }) {
|
||||
const { t } = useTranslation()
|
||||
const label = toolName || t('agent.toolPermission.toolPendingFallback', 'Tool')
|
||||
const detail = description?.trim() || t('agent.toolPermission.executing')
|
||||
|
||||
return (
|
||||
<div className="flex w-full max-w-xl items-center gap-3 rounded-xl border border-default-200 bg-default-100 px-4 py-3 shadow-sm">
|
||||
<Spin size="small" />
|
||||
<div className="flex flex-col gap-1">
|
||||
<span className="font-semibold text-default-700 text-sm">{label}</span>
|
||||
<span className="text-default-500 text-xs">{detail}</span>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ import type { FetchChatCompletionParams } from '@renderer/types'
|
||||
import type { Assistant, MCPServer, MCPTool, Model, Provider } from '@renderer/types'
|
||||
import type { StreamTextParams } from '@renderer/types/aiCoreTypes'
|
||||
import { type Chunk, ChunkType } from '@renderer/types/chunk'
|
||||
import type { Message } from '@renderer/types/newMessage'
|
||||
import type { Message, ResponseError } from '@renderer/types/newMessage'
|
||||
import type { SdkModel } from '@renderer/types/sdk'
|
||||
import { removeSpecialCharactersForTopicName, uuid } from '@renderer/utils'
|
||||
import { abortCompletion, readyToAbort } from '@renderer/utils/abortController'
|
||||
@@ -476,7 +476,7 @@ export async function checkApi(provider: Provider, model: Model, timeout = 15000
|
||||
} else {
|
||||
const abortId = uuid()
|
||||
const signal = readyToAbort(abortId)
|
||||
let chunkError
|
||||
let streamError: ResponseError | undefined
|
||||
const params: StreamTextParams = {
|
||||
system: assistant.prompt,
|
||||
prompt: 'hi',
|
||||
@@ -495,19 +495,18 @@ export async function checkApi(provider: Provider, model: Model, timeout = 15000
|
||||
callType: 'check',
|
||||
onChunk: (chunk: Chunk) => {
|
||||
if (chunk.type === ChunkType.ERROR) {
|
||||
chunkError = chunk.error
|
||||
streamError = chunk.error
|
||||
} else {
|
||||
abortCompletion(abortId)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Try streaming check
|
||||
try {
|
||||
await ai.completions(model.id, params, config)
|
||||
} catch (e) {
|
||||
if (!isAbortError(e) && !isAbortError(chunkError)) {
|
||||
throw e
|
||||
if (!isAbortError(e) && !isAbortError(streamError)) {
|
||||
throw streamError ?? e
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -239,6 +239,7 @@ export type ModelType = 'text' | 'vision' | 'embedding' | 'reasoning' | 'functio
|
||||
|
||||
export type ModelTag = Exclude<ModelType, 'text'> | 'free'
|
||||
|
||||
// "image-generation" is also openai endpoint, but specifically for image generation.
|
||||
export type EndpointType = 'openai' | 'openai-response' | 'anthropic' | 'gemini' | 'image-generation' | 'jina-rerank'
|
||||
|
||||
export type ModelPricing = {
|
||||
|
||||
@@ -234,6 +234,7 @@ export interface Response {
|
||||
error?: ResponseError
|
||||
}
|
||||
|
||||
// FIXME: Weak type safety. It may be a specific class instance which inherits Error in runtime.
|
||||
export type ResponseError = Record<string, any>
|
||||
|
||||
export interface MessageInputBaseParams {
|
||||
|
||||
@@ -1,3 +1,15 @@
|
||||
import { loggerService } from '@logger'
|
||||
|
||||
const logger = loggerService.withContext('utils/dom')
|
||||
|
||||
interface ChromiumScrollIntoViewOptions extends ScrollIntoViewOptions {
|
||||
/**
|
||||
* @see https://developer.mozilla.org/en-US/docs/Web/API/Element/scrollIntoView#container
|
||||
* @see https://github.com/microsoft/TypeScript/issues/62803
|
||||
*/
|
||||
container?: 'all' | 'nearest'
|
||||
}
|
||||
|
||||
/**
|
||||
* Simple wrapper for scrollIntoView with common default options.
|
||||
* Provides a unified interface with sensible defaults.
|
||||
@@ -5,7 +17,12 @@
|
||||
* @param element - The target element to scroll into view
|
||||
* @param options - Scroll options. If not provided, uses { behavior: 'smooth', block: 'center', inline: 'nearest' }
|
||||
*/
|
||||
export function scrollIntoView(element: HTMLElement, options?: ScrollIntoViewOptions): void {
|
||||
export function scrollIntoView(element: HTMLElement, options?: ChromiumScrollIntoViewOptions): void {
|
||||
if (!element) {
|
||||
logger.warn('[scrollIntoView] Unexpected falsy element. Do nothing as fallback.')
|
||||
return
|
||||
}
|
||||
|
||||
const defaultOptions: ScrollIntoViewOptions = {
|
||||
behavior: 'smooth',
|
||||
block: 'center',
|
||||
|
||||
@@ -254,6 +254,17 @@ const HomeWindow: FC<{ draggable?: boolean }> = ({ draggable = true }) => {
|
||||
|
||||
let blockId: string | null = null
|
||||
let thinkingBlockId: string | null = null
|
||||
let thinkingStartTime: number | null = null
|
||||
|
||||
const resolveThinkingDuration = (duration?: number) => {
|
||||
if (typeof duration === 'number' && Number.isFinite(duration)) {
|
||||
return duration
|
||||
}
|
||||
if (thinkingStartTime !== null) {
|
||||
return Math.max(0, performance.now() - thinkingStartTime)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
setIsLoading(true)
|
||||
setIsOutputted(false)
|
||||
@@ -291,6 +302,7 @@ const HomeWindow: FC<{ draggable?: boolean }> = ({ draggable = true }) => {
|
||||
case ChunkType.THINKING_START:
|
||||
{
|
||||
setIsOutputted(true)
|
||||
thinkingStartTime = performance.now()
|
||||
if (thinkingBlockId) {
|
||||
store.dispatch(
|
||||
updateOneBlock({ id: thinkingBlockId, changes: { status: MessageBlockStatus.STREAMING } })
|
||||
@@ -315,9 +327,13 @@ const HomeWindow: FC<{ draggable?: boolean }> = ({ draggable = true }) => {
|
||||
{
|
||||
setIsOutputted(true)
|
||||
if (thinkingBlockId) {
|
||||
if (thinkingStartTime === null) {
|
||||
thinkingStartTime = performance.now()
|
||||
}
|
||||
const thinkingDuration = resolveThinkingDuration(chunk.thinking_millsec)
|
||||
throttledBlockUpdate(thinkingBlockId, {
|
||||
content: chunk.text,
|
||||
thinking_millsec: chunk.thinking_millsec
|
||||
thinking_millsec: thinkingDuration
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -325,14 +341,17 @@ const HomeWindow: FC<{ draggable?: boolean }> = ({ draggable = true }) => {
|
||||
case ChunkType.THINKING_COMPLETE:
|
||||
{
|
||||
if (thinkingBlockId) {
|
||||
const thinkingDuration = resolveThinkingDuration(chunk.thinking_millsec)
|
||||
cancelThrottledBlockUpdate(thinkingBlockId)
|
||||
store.dispatch(
|
||||
updateOneBlock({
|
||||
id: thinkingBlockId,
|
||||
changes: { status: MessageBlockStatus.SUCCESS, thinking_millsec: chunk.thinking_millsec }
|
||||
changes: { status: MessageBlockStatus.SUCCESS, thinking_millsec: thinkingDuration }
|
||||
})
|
||||
)
|
||||
}
|
||||
thinkingStartTime = null
|
||||
thinkingBlockId = null
|
||||
}
|
||||
break
|
||||
case ChunkType.TEXT_START:
|
||||
@@ -404,6 +423,8 @@ const HomeWindow: FC<{ draggable?: boolean }> = ({ draggable = true }) => {
|
||||
if (!isAborted) {
|
||||
throw new Error(chunk.error.message)
|
||||
}
|
||||
thinkingStartTime = null
|
||||
thinkingBlockId = null
|
||||
}
|
||||
//fall through
|
||||
case ChunkType.BLOCK_COMPLETE:
|
||||
|
||||
@@ -41,8 +41,19 @@ export const processMessages = async (
|
||||
|
||||
let textBlockId: string | null = null
|
||||
let thinkingBlockId: string | null = null
|
||||
let thinkingStartTime: number | null = null
|
||||
let textBlockContent: string = ''
|
||||
|
||||
const resolveThinkingDuration = (duration?: number) => {
|
||||
if (typeof duration === 'number' && Number.isFinite(duration)) {
|
||||
return duration
|
||||
}
|
||||
if (thinkingStartTime !== null) {
|
||||
return Math.max(0, performance.now() - thinkingStartTime)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
const assistantMessage = getAssistantMessage({
|
||||
assistant,
|
||||
topic
|
||||
@@ -79,6 +90,7 @@ export const processMessages = async (
|
||||
switch (chunk.type) {
|
||||
case ChunkType.THINKING_START:
|
||||
{
|
||||
thinkingStartTime = performance.now()
|
||||
if (thinkingBlockId) {
|
||||
store.dispatch(
|
||||
updateOneBlock({ id: thinkingBlockId, changes: { status: MessageBlockStatus.STREAMING } })
|
||||
@@ -102,9 +114,13 @@ export const processMessages = async (
|
||||
case ChunkType.THINKING_DELTA:
|
||||
{
|
||||
if (thinkingBlockId) {
|
||||
if (thinkingStartTime === null) {
|
||||
thinkingStartTime = performance.now()
|
||||
}
|
||||
const thinkingDuration = resolveThinkingDuration(chunk.thinking_millsec)
|
||||
throttledBlockUpdate(thinkingBlockId, {
|
||||
content: chunk.text,
|
||||
thinking_millsec: chunk.thinking_millsec
|
||||
thinking_millsec: thinkingDuration
|
||||
})
|
||||
}
|
||||
onStream()
|
||||
@@ -113,6 +129,7 @@ export const processMessages = async (
|
||||
case ChunkType.THINKING_COMPLETE:
|
||||
{
|
||||
if (thinkingBlockId) {
|
||||
const thinkingDuration = resolveThinkingDuration(chunk.thinking_millsec)
|
||||
cancelThrottledBlockUpdate(thinkingBlockId)
|
||||
store.dispatch(
|
||||
updateOneBlock({
|
||||
@@ -120,12 +137,13 @@ export const processMessages = async (
|
||||
changes: {
|
||||
content: chunk.text,
|
||||
status: MessageBlockStatus.SUCCESS,
|
||||
thinking_millsec: chunk.thinking_millsec
|
||||
thinking_millsec: thinkingDuration
|
||||
}
|
||||
})
|
||||
)
|
||||
thinkingBlockId = null
|
||||
}
|
||||
thinkingStartTime = null
|
||||
}
|
||||
break
|
||||
case ChunkType.TEXT_START:
|
||||
@@ -190,6 +208,7 @@ export const processMessages = async (
|
||||
case ChunkType.ERROR:
|
||||
{
|
||||
const blockId = textBlockId || thinkingBlockId
|
||||
thinkingStartTime = null
|
||||
if (blockId) {
|
||||
store.dispatch(
|
||||
updateOneBlock({
|
||||
|
||||
@@ -284,6 +284,54 @@ describe('processMessages', () => {
|
||||
})
|
||||
})
|
||||
|
||||
describe('thinking timer fallback', () => {
|
||||
it('should use local timer when thinking_millsec is missing', async () => {
|
||||
const nowValues = [1000, 1500, 2000]
|
||||
let nowIndex = 0
|
||||
const performanceSpy = vi.spyOn(performance, 'now').mockImplementation(() => {
|
||||
const value = nowValues[Math.min(nowIndex, nowValues.length - 1)]
|
||||
nowIndex += 1
|
||||
return value
|
||||
})
|
||||
|
||||
const mockChunks = [
|
||||
{ type: ChunkType.THINKING_START },
|
||||
{ type: ChunkType.THINKING_DELTA, text: 'Thinking...' },
|
||||
{ type: ChunkType.THINKING_COMPLETE, text: 'Done thinking' },
|
||||
{ type: ChunkType.TEXT_START },
|
||||
{ type: ChunkType.TEXT_COMPLETE, text: 'Final answer' },
|
||||
{ type: ChunkType.BLOCK_COMPLETE }
|
||||
]
|
||||
|
||||
vi.mocked(fetchChatCompletion).mockImplementation(async ({ onChunkReceived }: any) => {
|
||||
for (const chunk of mockChunks) {
|
||||
await onChunkReceived(chunk)
|
||||
}
|
||||
})
|
||||
|
||||
await processMessages(
|
||||
mockAssistant,
|
||||
mockTopic,
|
||||
'test prompt',
|
||||
mockSetAskId,
|
||||
mockOnStream,
|
||||
mockOnFinish,
|
||||
mockOnError
|
||||
)
|
||||
|
||||
const thinkingDeltaCall = vi.mocked(throttledBlockUpdate).mock.calls.find(([id]) => id === 'thinking-block-1')
|
||||
const deltaPayload = thinkingDeltaCall?.[1] as { thinking_millsec?: number } | undefined
|
||||
expect(deltaPayload?.thinking_millsec).toBe(500)
|
||||
|
||||
const thinkingCompleteUpdate = vi
|
||||
.mocked(updateOneBlock)
|
||||
.mock.calls.find(([payload]) => (payload as any)?.changes?.thinking_millsec !== undefined)
|
||||
expect((thinkingCompleteUpdate?.[0] as any)?.changes?.thinking_millsec).toBe(1000)
|
||||
|
||||
performanceSpy.mockRestore()
|
||||
})
|
||||
})
|
||||
|
||||
describe('stream with exceptions', () => {
|
||||
it('should handle error chunks properly', async () => {
|
||||
const mockError = new Error('Stream processing error')
|
||||
|
||||
Reference in New Issue
Block a user