Merge remote-tracking branch 'origin/v2' into fix/v2/inputbar-cache

This commit is contained in:
suyao
2025-12-01 13:53:27 +08:00
50 changed files with 670 additions and 199 deletions

View File

@@ -13,7 +13,15 @@ This file provides guidance to AI coding assistants when working with code in th
- **Always propose before executing**: Before making any changes, clearly explain your planned approach and wait for explicit user approval to ensure alignment and prevent unwanted modifications. - **Always propose before executing**: Before making any changes, clearly explain your planned approach and wait for explicit user approval to ensure alignment and prevent unwanted modifications.
- **Lint, test, and format before completion**: Coding tasks are only complete after running `yarn lint`, `yarn test`, and `yarn format` successfully. - **Lint, test, and format before completion**: Coding tasks are only complete after running `yarn lint`, `yarn test`, and `yarn format` successfully.
- **Write conventional commits**: Commit small, focused changes using Conventional Commit messages (e.g., `feat:`, `fix:`, `refactor:`, `docs:`). - **Write conventional commits**: Commit small, focused changes using Conventional Commit messages (e.g., `feat:`, `fix:`, `refactor:`, `docs:`).
- **Follow PR template**: When submitting pull requests, follow the template in `.github/pull_request_template.md` to ensure complete context and documentation.
## Pull Request Workflow (CRITICAL)
When creating a Pull Request, you MUST:
1. **Read the PR template first**: Always read `.github/pull_request_template.md` before creating the PR
2. **Follow ALL template sections**: Structure the `--body` parameter to include every section from the template
3. **Never skip sections**: Include all sections even if marking them as N/A or "None"
4. **Use proper formatting**: Match the template's markdown structure exactly (headings, checkboxes, code blocks)
## Development Commands ## Development Commands

View File

@@ -135,9 +135,9 @@ artifactBuildCompleted: scripts/artifact-build-completed.js
releaseInfo: releaseInfo:
releaseNotes: | releaseNotes: |
<!--LANG:en--> <!--LANG:en-->
A New Era of Intelligence with Cherry Studio 1.7.0 A New Era of Intelligence with Cherry Studio 1.7.1
Today we're releasing Cherry Studio 1.7.0 — our most ambitious update yet, introducing Agent: autonomous AI that thinks, plans, and acts. Today we're releasing Cherry Studio 1.7.1 — our most ambitious update yet, introducing Agent: autonomous AI that thinks, plans, and acts.
For years, AI assistants have been reactive — waiting for your commands, responding to your questions. With Agent, we're changing that. Now, AI can truly work alongside you: understanding complex goals, breaking them into steps, and executing them independently. For years, AI assistants have been reactive — waiting for your commands, responding to your questions. With Agent, we're changing that. Now, AI can truly work alongside you: understanding complex goals, breaking them into steps, and executing them independently.
@@ -188,9 +188,9 @@ releaseInfo:
The Agent Era is here. We can't wait to see what you'll create. The Agent Era is here. We can't wait to see what you'll create.
<!--LANG:zh-CN--> <!--LANG:zh-CN-->
Cherry Studio 1.7.0:开启智能新纪元 Cherry Studio 1.7.1:开启智能新纪元
今天,我们正式发布 Cherry Studio 1.7.0 —— 迄今最具雄心的版本,带来全新的 Agent能够自主思考、规划和行动的 AI。 今天,我们正式发布 Cherry Studio 1.7.1 —— 迄今最具雄心的版本,带来全新的 Agent能够自主思考、规划和行动的 AI。
多年来AI 助手一直是被动的——等待你的指令回应你的问题。Agent 改变了这一切。现在AI 能够真正与你并肩工作:理解复杂目标,将其拆解为步骤,并独立执行。 多年来AI 助手一直是被动的——等待你的指令回应你的问题。Agent 改变了这一切。现在AI 能够真正与你并肩工作:理解复杂目标,将其拆解为步骤,并独立执行。

View File

@@ -69,6 +69,7 @@ export interface CherryInProviderSettings {
headers?: HeadersInput headers?: HeadersInput
/** /**
* Optional endpoint type to distinguish different endpoint behaviors. * Optional endpoint type to distinguish different endpoint behaviors.
* "image-generation" is also openai endpoint, but specifically for image generation.
*/ */
endpointType?: 'openai' | 'openai-response' | 'anthropic' | 'gemini' | 'image-generation' | 'jina-rerank' endpointType?: 'openai' | 'openai-response' | 'anthropic' | 'gemini' | 'image-generation' | 'jina-rerank'
} }

View File

@@ -1,6 +1,7 @@
// src/main/services/agents/services/claudecode/index.ts // src/main/services/agents/services/claudecode/index.ts
import { EventEmitter } from 'node:events' import { EventEmitter } from 'node:events'
import { createRequire } from 'node:module' import { createRequire } from 'node:module'
import path from 'node:path'
import type { import type {
CanUseTool, CanUseTool,
@@ -121,7 +122,11 @@ class ClaudeCodeService implements AgentServiceInterface {
// TODO: support set small model in UI // TODO: support set small model in UI
ANTHROPIC_DEFAULT_HAIKU_MODEL: modelInfo.modelId, ANTHROPIC_DEFAULT_HAIKU_MODEL: modelInfo.modelId,
ELECTRON_RUN_AS_NODE: '1', ELECTRON_RUN_AS_NODE: '1',
ELECTRON_NO_ATTACH_CONSOLE: '1' ELECTRON_NO_ATTACH_CONSOLE: '1',
// Set CLAUDE_CONFIG_DIR to app's userData directory to avoid path encoding issues
// on Windows when the username contains non-ASCII characters (e.g., Chinese characters)
// This prevents the SDK from using the user's home directory which may have encoding problems
CLAUDE_CONFIG_DIR: path.join(app.getPath('userData'), '.claude')
} }
const errorChunks: string[] = [] const errorChunks: string[] = []

View File

@@ -212,8 +212,9 @@ export class ToolCallChunkHandler {
description: toolName, description: toolName,
type: 'builtin' type: 'builtin'
} as BaseTool } as BaseTool
} else if ((mcpTool = this.mcpTools.find((t) => t.name === toolName) as MCPTool)) { } else if ((mcpTool = this.mcpTools.find((t) => t.id === toolName) as MCPTool)) {
// 如果是客户端执行的 MCP 工具,沿用现有逻辑 // 如果是客户端执行的 MCP 工具,沿用现有逻辑
// toolName is mcpTool.id (registered with id as key in convertMcpToolsToAiSdkTools)
logger.info(`[ToolCallChunkHandler] Handling client-side MCP tool: ${toolName}`) logger.info(`[ToolCallChunkHandler] Handling client-side MCP tool: ${toolName}`)
// mcpTool = this.mcpTools.find((t) => t.name === toolName) as MCPTool // mcpTool = this.mcpTools.find((t) => t.name === toolName) as MCPTool
// if (!mcpTool) { // if (!mcpTool) {

View File

@@ -46,6 +46,7 @@ import type {
GeminiSdkRawOutput, GeminiSdkRawOutput,
GeminiSdkToolCall GeminiSdkToolCall
} from '@renderer/types/sdk' } from '@renderer/types/sdk'
import { getTrailingApiVersion, withoutTrailingApiVersion } from '@renderer/utils'
import { isToolUseModeFunction } from '@renderer/utils/assistant' import { isToolUseModeFunction } from '@renderer/utils/assistant'
import { import {
geminiFunctionCallToMcpTool, geminiFunctionCallToMcpTool,
@@ -163,6 +164,10 @@ export class GeminiAPIClient extends BaseApiClient<
return models return models
} }
override getBaseURL(): string {
return withoutTrailingApiVersion(super.getBaseURL())
}
override async getSdkInstance() { override async getSdkInstance() {
if (this.sdkInstance) { if (this.sdkInstance) {
return this.sdkInstance return this.sdkInstance
@@ -188,6 +193,13 @@ export class GeminiAPIClient extends BaseApiClient<
if (this.provider.isVertex) { if (this.provider.isVertex) {
return 'v1' return 'v1'
} }
// Extract trailing API version from the URL
const trailingVersion = getTrailingApiVersion(this.provider.apiHost || '')
if (trailingVersion) {
return trailingVersion
}
return 'v1beta' return 'v1beta'
} }

View File

@@ -7,7 +7,7 @@ import { isAwsBedrockProvider, isVertexProvider } from '@renderer/utils/provider
// https://docs.claude.com/en/docs/build-with-claude/extended-thinking#interleaved-thinking // https://docs.claude.com/en/docs/build-with-claude/extended-thinking#interleaved-thinking
const INTERLEAVED_THINKING_HEADER = 'interleaved-thinking-2025-05-14' const INTERLEAVED_THINKING_HEADER = 'interleaved-thinking-2025-05-14'
// https://docs.claude.com/en/docs/build-with-claude/context-windows#1m-token-context-window // https://docs.claude.com/en/docs/build-with-claude/context-windows#1m-token-context-window
const CONTEXT_100M_HEADER = 'context-1m-2025-08-07' // const CONTEXT_100M_HEADER = 'context-1m-2025-08-07'
// https://docs.cloud.google.com/vertex-ai/generative-ai/docs/partner-models/claude/web-search // https://docs.cloud.google.com/vertex-ai/generative-ai/docs/partner-models/claude/web-search
const WEBSEARCH_HEADER = 'web-search-2025-03-05' const WEBSEARCH_HEADER = 'web-search-2025-03-05'
@@ -25,7 +25,9 @@ export function addAnthropicHeaders(assistant: Assistant, model: Model): string[
if (isVertexProvider(provider) && assistant.enableWebSearch) { if (isVertexProvider(provider) && assistant.enableWebSearch) {
anthropicHeaders.push(WEBSEARCH_HEADER) anthropicHeaders.push(WEBSEARCH_HEADER)
} }
anthropicHeaders.push(CONTEXT_100M_HEADER) // We may add it by user preference in assistant.settings instead of always adding it.
// See #11540, #11397
// anthropicHeaders.push(CONTEXT_100M_HEADER)
} }
return anthropicHeaders return anthropicHeaders
} }

View File

@@ -245,8 +245,8 @@ export class AiSdkSpanAdapter {
'gen_ai.usage.output_tokens' 'gen_ai.usage.output_tokens'
] ]
const completionTokens = attributes[inputsTokenKeys.find((key) => attributes[key]) || ''] const promptTokens = attributes[inputsTokenKeys.find((key) => attributes[key]) || '']
const promptTokens = attributes[outputTokenKeys.find((key) => attributes[key]) || ''] const completionTokens = attributes[outputTokenKeys.find((key) => attributes[key]) || '']
if (completionTokens !== undefined || promptTokens !== undefined) { if (completionTokens !== undefined || promptTokens !== undefined) {
const usage: TokenUsage = { const usage: TokenUsage = {

View File

@@ -0,0 +1,53 @@
import type { Span } from '@opentelemetry/api'
import { SpanKind, SpanStatusCode } from '@opentelemetry/api'
import { describe, expect, it, vi } from 'vitest'
import { AiSdkSpanAdapter } from '../AiSdkSpanAdapter'
vi.mock('@logger', () => ({
loggerService: {
withContext: () => ({
debug: vi.fn(),
error: vi.fn(),
info: vi.fn(),
warn: vi.fn()
})
}
}))
describe('AiSdkSpanAdapter', () => {
const createMockSpan = (attributes: Record<string, unknown>): Span => {
const span = {
spanContext: () => ({
traceId: 'trace-id',
spanId: 'span-id'
}),
_attributes: attributes,
_events: [],
name: 'test span',
status: { code: SpanStatusCode.OK },
kind: SpanKind.CLIENT,
startTime: [0, 0] as [number, number],
endTime: [0, 1] as [number, number],
ended: true,
parentSpanId: '',
links: []
}
return span as unknown as Span
}
it('maps prompt and completion usage tokens to the correct fields', () => {
const attributes = {
'ai.usage.promptTokens': 321,
'ai.usage.completionTokens': 654
}
const span = createMockSpan(attributes)
const result = AiSdkSpanAdapter.convertToSpanEntity({ span })
expect(result.usage).toBeDefined()
expect(result.usage?.prompt_tokens).toBe(321)
expect(result.usage?.completion_tokens).toBe(654)
expect(result.usage?.total_tokens).toBe(975)
})
})

View File

@@ -144,7 +144,7 @@ describe('reasoning utils', () => {
expect(result).toEqual({}) expect(result).toEqual({})
}) })
it('should disable reasoning for OpenRouter when no reasoning effort set', async () => { it('should not override reasoning for OpenRouter when reasoning effort undefined', async () => {
const { isReasoningModel } = await import('@renderer/config/models') const { isReasoningModel } = await import('@renderer/config/models')
vi.mocked(isReasoningModel).mockReturnValue(true) vi.mocked(isReasoningModel).mockReturnValue(true)
@@ -161,6 +161,29 @@ describe('reasoning utils', () => {
settings: {} settings: {}
} as Assistant } as Assistant
const result = getReasoningEffort(assistant, model)
expect(result).toEqual({})
})
it('should disable reasoning for OpenRouter when reasoning effort explicitly none', async () => {
const { isReasoningModel } = await import('@renderer/config/models')
vi.mocked(isReasoningModel).mockReturnValue(true)
const model: Model = {
id: 'anthropic/claude-sonnet-4',
name: 'Claude Sonnet 4',
provider: SystemProviderIds.openrouter
} as Model
const assistant: Assistant = {
id: 'test',
name: 'Test',
settings: {
reasoning_effort: 'none'
}
} as Assistant
const result = getReasoningEffort(assistant, model) const result = getReasoningEffort(assistant, model)
expect(result).toEqual({ reasoning: { enabled: false, exclude: true } }) expect(result).toEqual({ reasoning: { enabled: false, exclude: true } })
}) })
@@ -269,7 +292,9 @@ describe('reasoning utils', () => {
const assistant: Assistant = { const assistant: Assistant = {
id: 'test', id: 'test',
name: 'Test', name: 'Test',
settings: {} settings: {
reasoning_effort: 'none'
}
} as Assistant } as Assistant
const result = getReasoningEffort(assistant, model) const result = getReasoningEffort(assistant, model)

View File

@@ -16,10 +16,8 @@ import {
isGPT5SeriesModel, isGPT5SeriesModel,
isGPT51SeriesModel, isGPT51SeriesModel,
isGrok4FastReasoningModel, isGrok4FastReasoningModel,
isGrokReasoningModel,
isOpenAIDeepResearchModel, isOpenAIDeepResearchModel,
isOpenAIModel, isOpenAIModel,
isOpenAIReasoningModel,
isQwenAlwaysThinkModel, isQwenAlwaysThinkModel,
isQwenReasoningModel, isQwenReasoningModel,
isReasoningModel, isReasoningModel,
@@ -64,30 +62,22 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
} }
const reasoningEffort = assistant?.settings?.reasoning_effort const reasoningEffort = assistant?.settings?.reasoning_effort
// Handle undefined and 'none' reasoningEffort. // reasoningEffort is not set, no extra reasoning setting
// TODO: They should be separated. // Generally, for every model which supports reasoning control, the reasoning effort won't be undefined.
if (!reasoningEffort || reasoningEffort === 'none') { // It's for some reasoning models that don't support reasoning control, such as deepseek reasoner.
if (!reasoningEffort) {
return {}
}
// Handle 'none' reasoningEffort. It's explicitly off.
if (reasoningEffort === 'none') {
// openrouter: use reasoning // openrouter: use reasoning
if (model.provider === SystemProviderIds.openrouter) { if (model.provider === SystemProviderIds.openrouter) {
// Don't disable reasoning for Gemini models that support thinking tokens
if (isSupportedThinkingTokenGeminiModel(model) && !GEMINI_FLASH_MODEL_REGEX.test(model.id)) {
return {}
}
// 'none' is not an available value for effort for now. // 'none' is not an available value for effort for now.
// I think they should resolve this issue soon, so I'll just go ahead and use this value. // I think they should resolve this issue soon, so I'll just go ahead and use this value.
if (isGPT51SeriesModel(model) && reasoningEffort === 'none') { if (isGPT51SeriesModel(model) && reasoningEffort === 'none') {
return { reasoning: { effort: 'none' } } return { reasoning: { effort: 'none' } }
} }
// Don't disable reasoning for models that require it
if (
isGrokReasoningModel(model) ||
isOpenAIReasoningModel(model) ||
isQwenAlwaysThinkModel(model) ||
model.id.includes('seed-oss') ||
model.id.includes('minimax-m2')
) {
return {}
}
return { reasoning: { enabled: false, exclude: true } } return { reasoning: { enabled: false, exclude: true } }
} }
@@ -101,11 +91,6 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
return { enable_thinking: false } return { enable_thinking: false }
} }
// claude
if (isSupportedThinkingTokenClaudeModel(model)) {
return {}
}
// gemini // gemini
if (isSupportedThinkingTokenGeminiModel(model)) { if (isSupportedThinkingTokenGeminiModel(model)) {
if (GEMINI_FLASH_MODEL_REGEX.test(model.id)) { if (GEMINI_FLASH_MODEL_REGEX.test(model.id)) {
@@ -118,8 +103,10 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
} }
} }
} }
} else {
logger.warn(`Model ${model.id} cannot disable reasoning. Fallback to empty reasoning param.`)
return {}
} }
return {}
} }
// use thinking, doubao, zhipu, etc. // use thinking, doubao, zhipu, etc.
@@ -139,6 +126,7 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
} }
} }
logger.warn(`Model ${model.id} doesn't match any disable reasoning behavior. Fallback to empty reasoning param.`)
return {} return {}
} }
@@ -293,6 +281,7 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
} }
// OpenRouter models, use reasoning // OpenRouter models, use reasoning
// FIXME: duplicated openrouter handling. remove one
if (model.provider === SystemProviderIds.openrouter) { if (model.provider === SystemProviderIds.openrouter) {
if (isSupportedReasoningEffortModel(model) || isSupportedThinkingTokenModel(model)) { if (isSupportedReasoningEffortModel(model) || isSupportedThinkingTokenModel(model)) {
return { return {

View File

@@ -215,6 +215,10 @@
border-top: none !important; border-top: none !important;
} }
.ant-collapse-header-text {
overflow-x: hidden;
}
.ant-slider .ant-slider-handle::after { .ant-slider .ant-slider-handle::after {
box-shadow: 0 1px 4px 0px rgb(128 128 128 / 50%) !important; box-shadow: 0 1px 4px 0px rgb(128 128 128 / 50%) !important;
} }

View File

@@ -10,6 +10,7 @@ import {
} from '@ant-design/icons' } from '@ant-design/icons'
import { loggerService } from '@logger' import { loggerService } from '@logger'
import { download } from '@renderer/utils/download' import { download } from '@renderer/utils/download'
import { convertImageToPng } from '@renderer/utils/image'
import type { ImageProps as AntImageProps } from 'antd' import type { ImageProps as AntImageProps } from 'antd'
import { Dropdown, Image as AntImage, Space } from 'antd' import { Dropdown, Image as AntImage, Space } from 'antd'
import { Base64 } from 'js-base64' import { Base64 } from 'js-base64'
@@ -33,39 +34,38 @@ const ImageViewer: React.FC<ImageViewerProps> = ({ src, style, ...props }) => {
// 复制图片到剪贴板 // 复制图片到剪贴板
const handleCopyImage = async (src: string) => { const handleCopyImage = async (src: string) => {
try { try {
let blob: Blob
if (src.startsWith('data:')) { if (src.startsWith('data:')) {
// 处理 base64 格式的图片 // 处理 base64 格式的图片
const match = src.match(/^data:(image\/\w+);base64,(.+)$/) const match = src.match(/^data:(image\/\w+);base64,(.+)$/)
if (!match) throw new Error('Invalid base64 image format') if (!match) throw new Error('Invalid base64 image format')
const mimeType = match[1] const mimeType = match[1]
const byteArray = Base64.toUint8Array(match[2]) const byteArray = Base64.toUint8Array(match[2])
const blob = new Blob([byteArray as unknown as BlobPart], { type: mimeType }) blob = new Blob([byteArray as unknown as BlobPart], { type: mimeType })
await navigator.clipboard.write([new ClipboardItem({ [mimeType]: blob })])
} else if (src.startsWith('file://')) { } else if (src.startsWith('file://')) {
// 处理本地文件路径 // 处理本地文件路径
const bytes = await window.api.fs.read(src) const bytes = await window.api.fs.read(src)
const mimeType = mime.getType(src) || 'application/octet-stream' const mimeType = mime.getType(src) || 'application/octet-stream'
const blob = new Blob([bytes], { type: mimeType }) blob = new Blob([bytes], { type: mimeType })
await navigator.clipboard.write([
new ClipboardItem({
[mimeType]: blob
})
])
} else { } else {
// 处理 URL 格式的图片 // 处理 URL 格式的图片
const response = await fetch(src) const response = await fetch(src)
const blob = await response.blob() blob = await response.blob()
await navigator.clipboard.write([
new ClipboardItem({
[blob.type]: blob
})
])
} }
// 统一转换为 PNG 以确保兼容性(剪贴板 API 不支持 JPEG
const pngBlob = await convertImageToPng(blob)
const item = new ClipboardItem({
'image/png': pngBlob
})
await navigator.clipboard.write([item])
window.toast.success(t('message.copy.success')) window.toast.success(t('message.copy.success'))
} catch (error) { } catch (error) {
logger.error('Failed to copy image:', error as Error) const err = error as Error
logger.error(`Failed to copy image: ${err.message}`, { stack: err.stack })
window.toast.error(t('message.copy.failed')) window.toast.error(t('message.copy.failed'))
} }
} }

View File

@@ -460,16 +460,19 @@ export const isSupportedThinkingTokenZhipuModel = (model: Model): boolean => {
} }
export const isDeepSeekHybridInferenceModel = (model: Model) => { export const isDeepSeekHybridInferenceModel = (model: Model) => {
const modelId = getLowerBaseModelName(model.id) const { idResult, nameResult } = withModelIdAndNameAsId(model, (model) => {
// deepseek官方使用chat和reasoner做推理控制其他provider需要单独判断id可能会有所差别 const modelId = getLowerBaseModelName(model.id)
// openrouter: deepseek/deepseek-chat-v3.1 不知道会不会有其他provider仿照ds官方分出一个同id的作为非思考模式的模型这里有风险 // deepseek官方使用chat和reasoner做推理控制其他provider需要单独判断id可能会有所差别
// Matches: "deepseek-v3" followed by ".digit" or "-digit". // openrouter: deepseek/deepseek-chat-v3.1 不知道会不会有其他provider仿照ds官方分出一个同id的作为非思考模式的模型这里有风险
// Optionally, this can be followed by ".alphanumeric_sequence" or "-alphanumeric_sequence" // Matches: "deepseek-v3" followed by ".digit" or "-digit".
// until the end of the string. // Optionally, this can be followed by ".alphanumeric_sequence" or "-alphanumeric_sequence"
// Examples: deepseek-v3.1, deepseek-v3-1, deepseek-v3.1.2, deepseek-v3.1-alpha // until the end of the string.
// Does NOT match: deepseek-v3.123 (missing separator after '1'), deepseek-v3.x (x isn't a digit) // Examples: deepseek-v3.1, deepseek-v3-1, deepseek-v3.1.2, deepseek-v3.1-alpha
// TODO: move to utils and add test cases // Does NOT match: deepseek-v3.123 (missing separator after '1'), deepseek-v3.x (x isn't a digit)
return /deepseek-v3(?:\.\d|-\d)(?:(\.|-)\w+)?$/.test(modelId) || modelId.includes('deepseek-chat-v3.1') // TODO: move to utils and add test cases
return /deepseek-v3(?:\.\d|-\d)(?:(\.|-)\w+)?$/.test(modelId) || modelId.includes('deepseek-chat-v3.1')
})
return idResult || nameResult
} }
export const isLingReasoningModel = (model?: Model): boolean => { export const isLingReasoningModel = (model?: Model): boolean => {
@@ -523,7 +526,6 @@ export function isReasoningModel(model?: Model): boolean {
REASONING_REGEX.test(model.name) || REASONING_REGEX.test(model.name) ||
isSupportedThinkingTokenDoubaoModel(model) || isSupportedThinkingTokenDoubaoModel(model) ||
isDeepSeekHybridInferenceModel(model) || isDeepSeekHybridInferenceModel(model) ||
isDeepSeekHybridInferenceModel({ ...model, id: model.name }) ||
false false
) )
} }

View File

@@ -1,6 +1,6 @@
import { cacheService } from '@data/CacheService' import { cacheService } from '@data/CacheService'
import { throttle } from 'lodash' import { throttle } from 'lodash'
import { useEffect, useRef } from 'react' import { useEffect, useMemo, useRef } from 'react'
import { useTimer } from './useTimer' import { useTimer } from './useTimer'
@@ -13,13 +13,18 @@ import { useTimer } from './useTimer'
*/ */
export default function useScrollPosition(key: string, throttleWait?: number) { export default function useScrollPosition(key: string, throttleWait?: number) {
const containerRef = useRef<HTMLDivElement>(null) const containerRef = useRef<HTMLDivElement>(null)
const scrollKey = `scroll:${key}` const scrollKey = useMemo(() => `scroll:${key}`, [key])
const scrollKeyRef = useRef(scrollKey)
const { setTimeoutTimer } = useTimer() const { setTimeoutTimer } = useTimer()
useEffect(() => {
scrollKeyRef.current = scrollKey
}, [scrollKey])
const handleScroll = throttle(() => { const handleScroll = throttle(() => {
const position = containerRef.current?.scrollTop ?? 0 const position = containerRef.current?.scrollTop ?? 0
window.requestAnimationFrame(() => { window.requestAnimationFrame(() => {
cacheService.set(scrollKey, position) cacheService.set(scrollKeyRef.current, position)
}) })
}, throttleWait ?? 100) }, throttleWait ?? 100)
@@ -29,5 +34,9 @@ export default function useScrollPosition(key: string, throttleWait?: number) {
setTimeoutTimer('scrollEffect', scroll, 50) setTimeoutTimer('scrollEffect', scroll, 50)
}, [scrollKey, setTimeoutTimer]) }, [scrollKey, setTimeoutTimer])
useEffect(() => {
return () => handleScroll.cancel()
}, [handleScroll])
return { containerRef, handleScroll } return { containerRef, handleScroll }
} }

View File

@@ -1,4 +1,4 @@
import { useEffect, useRef } from 'react' import { useCallback, useEffect, useRef } from 'react'
/** /**
* 定时器管理 Hook用于管理 setTimeout 和 setInterval 定时器,支持通过 key 来标识不同的定时器 * 定时器管理 Hook用于管理 setTimeout 和 setInterval 定时器,支持通过 key 来标识不同的定时器
@@ -43,10 +43,38 @@ export const useTimer = () => {
const timeoutMapRef = useRef(new Map<string, NodeJS.Timeout>()) const timeoutMapRef = useRef(new Map<string, NodeJS.Timeout>())
const intervalMapRef = useRef(new Map<string, NodeJS.Timeout>()) const intervalMapRef = useRef(new Map<string, NodeJS.Timeout>())
/**
* 清除指定 key 的 setTimeout 定时器
* @param key - 定时器标识符
*/
const clearTimeoutTimer = useCallback((key: string) => {
clearTimeout(timeoutMapRef.current.get(key))
timeoutMapRef.current.delete(key)
}, [])
/**
* 清除指定 key 的 setInterval 定时器
* @param key - 定时器标识符
*/
const clearIntervalTimer = useCallback((key: string) => {
clearInterval(intervalMapRef.current.get(key))
intervalMapRef.current.delete(key)
}, [])
/**
* 清除所有定时器,包括 setTimeout 和 setInterval
*/
const clearAllTimers = useCallback(() => {
timeoutMapRef.current.forEach((timer) => clearTimeout(timer))
intervalMapRef.current.forEach((timer) => clearInterval(timer))
timeoutMapRef.current.clear()
intervalMapRef.current.clear()
}, [])
// 组件卸载时自动清理所有定时器 // 组件卸载时自动清理所有定时器
useEffect(() => { useEffect(() => {
return () => clearAllTimers() return () => clearAllTimers()
}, []) }, [clearAllTimers])
/** /**
* 设置一个 setTimeout 定时器 * 设置一个 setTimeout 定时器
@@ -65,12 +93,15 @@ export const useTimer = () => {
* cleanup(); * cleanup();
* ``` * ```
*/ */
const setTimeoutTimer = (key: string, ...args: Parameters<typeof setTimeout>) => { const setTimeoutTimer = useCallback(
clearTimeout(timeoutMapRef.current.get(key)) (key: string, ...args: Parameters<typeof setTimeout>) => {
const timer = setTimeout(...args) clearTimeout(timeoutMapRef.current.get(key))
timeoutMapRef.current.set(key, timer) const timer = setTimeout(...args)
return () => clearTimeoutTimer(key) timeoutMapRef.current.set(key, timer)
} return () => clearTimeoutTimer(key)
},
[clearTimeoutTimer]
)
/** /**
* 设置一个 setInterval 定时器 * 设置一个 setInterval 定时器
@@ -89,56 +120,31 @@ export const useTimer = () => {
* cleanup(); * cleanup();
* ``` * ```
*/ */
const setIntervalTimer = (key: string, ...args: Parameters<typeof setInterval>) => { const setIntervalTimer = useCallback(
clearInterval(intervalMapRef.current.get(key)) (key: string, ...args: Parameters<typeof setInterval>) => {
const timer = setInterval(...args) clearInterval(intervalMapRef.current.get(key))
intervalMapRef.current.set(key, timer) const timer = setInterval(...args)
return () => clearIntervalTimer(key) intervalMapRef.current.set(key, timer)
} return () => clearIntervalTimer(key)
},
/** [clearIntervalTimer]
* 清除指定 key 的 setTimeout 定时器 )
* @param key - 定时器标识符
*/
const clearTimeoutTimer = (key: string) => {
clearTimeout(timeoutMapRef.current.get(key))
timeoutMapRef.current.delete(key)
}
/**
* 清除指定 key 的 setInterval 定时器
* @param key - 定时器标识符
*/
const clearIntervalTimer = (key: string) => {
clearInterval(intervalMapRef.current.get(key))
intervalMapRef.current.delete(key)
}
/** /**
* 清除所有 setTimeout 定时器 * 清除所有 setTimeout 定时器
*/ */
const clearAllTimeoutTimers = () => { const clearAllTimeoutTimers = useCallback(() => {
timeoutMapRef.current.forEach((timer) => clearTimeout(timer)) timeoutMapRef.current.forEach((timer) => clearTimeout(timer))
timeoutMapRef.current.clear() timeoutMapRef.current.clear()
} }, [])
/** /**
* 清除所有 setInterval 定时器 * 清除所有 setInterval 定时器
*/ */
const clearAllIntervalTimers = () => { const clearAllIntervalTimers = useCallback(() => {
intervalMapRef.current.forEach((timer) => clearInterval(timer)) intervalMapRef.current.forEach((timer) => clearInterval(timer))
intervalMapRef.current.clear() intervalMapRef.current.clear()
} }, [])
/**
* 清除所有定时器,包括 setTimeout 和 setInterval
*/
const clearAllTimers = () => {
timeoutMapRef.current.forEach((timer) => clearTimeout(timer))
intervalMapRef.current.forEach((timer) => clearInterval(timer))
timeoutMapRef.current.clear()
intervalMapRef.current.clear()
}
return { return {
setTimeoutTimer, setTimeoutTimer,

View File

@@ -280,6 +280,7 @@
"denied": "Tool request was denied.", "denied": "Tool request was denied.",
"timeout": "Tool request timed out before receiving approval." "timeout": "Tool request timed out before receiving approval."
}, },
"toolPendingFallback": "Tool",
"waiting": "Waiting for tool permission decision..." "waiting": "Waiting for tool permission decision..."
}, },
"type": { "type": {
@@ -1208,7 +1209,7 @@
"endpoint_type": { "endpoint_type": {
"anthropic": "Anthropic", "anthropic": "Anthropic",
"gemini": "Gemini", "gemini": "Gemini",
"image-generation": "Image Generation", "image-generation": "Image Generation (OpenAI)",
"jina-rerank": "Jina Rerank", "jina-rerank": "Jina Rerank",
"openai": "OpenAI", "openai": "OpenAI",
"openai-response": "OpenAI-Response" "openai-response": "OpenAI-Response"

View File

@@ -280,6 +280,7 @@
"denied": "工具请求已被拒绝。", "denied": "工具请求已被拒绝。",
"timeout": "工具请求在收到批准前超时。" "timeout": "工具请求在收到批准前超时。"
}, },
"toolPendingFallback": "工具",
"waiting": "等待工具权限决定..." "waiting": "等待工具权限决定..."
}, },
"type": { "type": {
@@ -1208,7 +1209,7 @@
"endpoint_type": { "endpoint_type": {
"anthropic": "Anthropic", "anthropic": "Anthropic",
"gemini": "Gemini", "gemini": "Gemini",
"image-generation": "图生成", "image-generation": "图生成 (OpenAI)",
"jina-rerank": "Jina 重排序", "jina-rerank": "Jina 重排序",
"openai": "OpenAI", "openai": "OpenAI",
"openai-response": "OpenAI-Response" "openai-response": "OpenAI-Response"

View File

@@ -280,6 +280,7 @@
"denied": "工具請求已被拒絕。", "denied": "工具請求已被拒絕。",
"timeout": "工具請求在收到核准前逾時。" "timeout": "工具請求在收到核准前逾時。"
}, },
"toolPendingFallback": "工具",
"waiting": "等待工具權限決定..." "waiting": "等待工具權限決定..."
}, },
"type": { "type": {
@@ -1208,7 +1209,7 @@
"endpoint_type": { "endpoint_type": {
"anthropic": "Anthropic", "anthropic": "Anthropic",
"gemini": "Gemini", "gemini": "Gemini",
"image-generation": "圖生成", "image-generation": "圖生成 (OpenAI)",
"jina-rerank": "Jina Rerank", "jina-rerank": "Jina Rerank",
"openai": "OpenAI", "openai": "OpenAI",
"openai-response": "OpenAI-Response" "openai-response": "OpenAI-Response"

View File

@@ -280,6 +280,7 @@
"denied": "Tool-Anfrage wurde abgelehnt.", "denied": "Tool-Anfrage wurde abgelehnt.",
"timeout": "Tool-Anfrage ist abgelaufen, bevor eine Genehmigung eingegangen ist." "timeout": "Tool-Anfrage ist abgelaufen, bevor eine Genehmigung eingegangen ist."
}, },
"toolPendingFallback": "Werkzeug",
"waiting": "Warten auf Entscheidung über Tool-Berechtigung..." "waiting": "Warten auf Entscheidung über Tool-Berechtigung..."
}, },
"type": { "type": {
@@ -1208,7 +1209,7 @@
"endpoint_type": { "endpoint_type": {
"anthropic": "Anthropic", "anthropic": "Anthropic",
"gemini": "Gemini", "gemini": "Gemini",
"image-generation": "Bildgenerierung", "image-generation": "Bilderzeugung (OpenAI)",
"jina-rerank": "Jina Reranking", "jina-rerank": "Jina Reranking",
"openai": "OpenAI", "openai": "OpenAI",
"openai-response": "OpenAI-Response" "openai-response": "OpenAI-Response"

View File

@@ -280,6 +280,7 @@
"denied": "Το αίτημα για εργαλείο απορρίφθηκε.", "denied": "Το αίτημα για εργαλείο απορρίφθηκε.",
"timeout": "Το αίτημα για το εργαλείο έληξε πριν λάβει έγκριση." "timeout": "Το αίτημα για το εργαλείο έληξε πριν λάβει έγκριση."
}, },
"toolPendingFallback": "Εργαλείο",
"waiting": "Αναμονή για απόφαση άδειας εργαλείου..." "waiting": "Αναμονή για απόφαση άδειας εργαλείου..."
}, },
"type": { "type": {
@@ -1208,7 +1209,7 @@
"endpoint_type": { "endpoint_type": {
"anthropic": "Anthropic", "anthropic": "Anthropic",
"gemini": "Gemini", "gemini": "Gemini",
"image-generation": "Δημιουργία Εικόνας", "image-generation": "Δημιουργία Εικόνων (OpenAI)",
"jina-rerank": "Επαναταξινόμηση Jina", "jina-rerank": "Επαναταξινόμηση Jina",
"openai": "OpenAI", "openai": "OpenAI",
"openai-response": "Απάντηση OpenAI" "openai-response": "Απάντηση OpenAI"

View File

@@ -280,6 +280,7 @@
"denied": "La solicitud de herramienta fue denegada.", "denied": "La solicitud de herramienta fue denegada.",
"timeout": "La solicitud de herramienta expiró antes de recibir la aprobación." "timeout": "La solicitud de herramienta expiró antes de recibir la aprobación."
}, },
"toolPendingFallback": "Herramienta",
"waiting": "Esperando la decisión de permiso de la herramienta..." "waiting": "Esperando la decisión de permiso de la herramienta..."
}, },
"type": { "type": {
@@ -1208,7 +1209,7 @@
"endpoint_type": { "endpoint_type": {
"anthropic": "Anthropic", "anthropic": "Anthropic",
"gemini": "Gemini", "gemini": "Gemini",
"image-generation": "Generación de imágenes", "image-generation": "Generación de Imágenes (OpenAI)",
"jina-rerank": "Reordenamiento Jina", "jina-rerank": "Reordenamiento Jina",
"openai": "OpenAI", "openai": "OpenAI",
"openai-response": "Respuesta de OpenAI" "openai-response": "Respuesta de OpenAI"

View File

@@ -280,6 +280,7 @@
"denied": "La demande d'outil a été refusée.", "denied": "La demande d'outil a été refusée.",
"timeout": "La demande d'outil a expiré avant d'obtenir l'approbation." "timeout": "La demande d'outil a expiré avant d'obtenir l'approbation."
}, },
"toolPendingFallback": "Outil",
"waiting": "En attente de la décision d'autorisation de l'outil..." "waiting": "En attente de la décision d'autorisation de l'outil..."
}, },
"type": { "type": {
@@ -1208,7 +1209,7 @@
"endpoint_type": { "endpoint_type": {
"anthropic": "Anthropic", "anthropic": "Anthropic",
"gemini": "Gemini", "gemini": "Gemini",
"image-generation": "Génération d'images", "image-generation": "Génération d'images (OpenAI)",
"jina-rerank": "Reclassement Jina", "jina-rerank": "Reclassement Jina",
"openai": "OpenAI", "openai": "OpenAI",
"openai-response": "Réponse OpenAI" "openai-response": "Réponse OpenAI"

View File

@@ -280,6 +280,7 @@
"denied": "ツールリクエストは拒否されました。", "denied": "ツールリクエストは拒否されました。",
"timeout": "ツールリクエストは承認を受ける前にタイムアウトしました。" "timeout": "ツールリクエストは承認を受ける前にタイムアウトしました。"
}, },
"toolPendingFallback": "ツール",
"waiting": "ツールの許可決定を待っています..." "waiting": "ツールの許可決定を待っています..."
}, },
"type": { "type": {
@@ -1208,7 +1209,7 @@
"endpoint_type": { "endpoint_type": {
"anthropic": "Anthropic", "anthropic": "Anthropic",
"gemini": "Gemini", "gemini": "Gemini",
"image-generation": "画像生成", "image-generation": "画像生成 (OpenAI)",
"jina-rerank": "Jina Rerank", "jina-rerank": "Jina Rerank",
"openai": "OpenAI", "openai": "OpenAI",
"openai-response": "OpenAI-Response" "openai-response": "OpenAI-Response"

View File

@@ -16,7 +16,7 @@
"error": { "error": {
"failed": "Falha ao excluir o agente" "failed": "Falha ao excluir o agente"
}, },
"title": "删除代理" "title": "Excluir Agente"
}, },
"edit": { "edit": {
"title": "Agent Editor" "title": "Agent Editor"
@@ -111,7 +111,7 @@
"label": "Modo de permissão", "label": "Modo de permissão",
"options": { "options": {
"acceptEdits": "Aceitar edições automaticamente", "acceptEdits": "Aceitar edições automaticamente",
"bypassPermissions": "忽略检查 de permissão", "bypassPermissions": "Ignorar verificações de permissão",
"default": "Padrão (perguntar antes de continuar)", "default": "Padrão (perguntar antes de continuar)",
"plan": "Modo de planejamento (plano sujeito a aprovação)" "plan": "Modo de planejamento (plano sujeito a aprovação)"
}, },
@@ -150,7 +150,7 @@
}, },
"success": { "success": {
"install": "Plugin instalado com sucesso", "install": "Plugin instalado com sucesso",
"uninstall": "插件 desinstalado com sucesso" "uninstall": "Plugin desinstalado com sucesso"
}, },
"tab": "plug-in", "tab": "plug-in",
"type": { "type": {
@@ -280,6 +280,7 @@
"denied": "Solicitação de ferramenta foi negada.", "denied": "Solicitação de ferramenta foi negada.",
"timeout": "A solicitação da ferramenta expirou antes de receber aprovação." "timeout": "A solicitação da ferramenta expirou antes de receber aprovação."
}, },
"toolPendingFallback": "Ferramenta",
"waiting": "Aguardando decisão de permissão da ferramenta..." "waiting": "Aguardando decisão de permissão da ferramenta..."
}, },
"type": { "type": {
@@ -1134,7 +1135,7 @@
"duplicate": "Duplicar", "duplicate": "Duplicar",
"edit": "Editar", "edit": "Editar",
"enabled": "Ativado", "enabled": "Ativado",
"error": "错误", "error": "Erro",
"errors": { "errors": {
"create_message": "Falha ao criar mensagem", "create_message": "Falha ao criar mensagem",
"validation": "Falha na verificação" "validation": "Falha na verificação"
@@ -1208,7 +1209,7 @@
"endpoint_type": { "endpoint_type": {
"anthropic": "Anthropic", "anthropic": "Anthropic",
"gemini": "Gemini", "gemini": "Gemini",
"image-generation": "Geração de Imagem", "image-generation": "Geração de Imagens (OpenAI)",
"jina-rerank": "Jina Reordenar", "jina-rerank": "Jina Reordenar",
"openai": "OpenAI", "openai": "OpenAI",
"openai-response": "Resposta OpenAI" "openai-response": "Resposta OpenAI"

View File

@@ -280,6 +280,7 @@
"denied": "Запрос на инструмент был отклонён.", "denied": "Запрос на инструмент был отклонён.",
"timeout": "Запрос на инструмент превысил время ожидания до получения подтверждения." "timeout": "Запрос на инструмент превысил время ожидания до получения подтверждения."
}, },
"toolPendingFallback": "Инструмент",
"waiting": "Ожидание решения о разрешении на использование инструмента..." "waiting": "Ожидание решения о разрешении на использование инструмента..."
}, },
"type": { "type": {
@@ -1208,7 +1209,7 @@
"endpoint_type": { "endpoint_type": {
"anthropic": "Anthropic", "anthropic": "Anthropic",
"gemini": "Gemini", "gemini": "Gemini",
"image-generation": "Изображение", "image-generation": "Генерация изображений (OpenAI)",
"jina-rerank": "Jina Rerank", "jina-rerank": "Jina Rerank",
"openai": "OpenAI", "openai": "OpenAI",
"openai-response": "OpenAI-Response" "openai-response": "OpenAI-Response"

View File

@@ -1,8 +1,8 @@
import { cacheService } from '@data/CacheService'
import { loggerService } from '@logger' import { loggerService } from '@logger'
import type { QuickPanelTriggerInfo } from '@renderer/components/QuickPanel' import type { QuickPanelTriggerInfo } from '@renderer/components/QuickPanel'
import { QuickPanelReservedSymbol, useQuickPanel } from '@renderer/components/QuickPanel' import { QuickPanelReservedSymbol, useQuickPanel } from '@renderer/components/QuickPanel'
import { isGenerateImageModel, isVisionModel } from '@renderer/config/models' import { isGenerateImageModel, isVisionModel } from '@renderer/config/models'
import { cacheService } from '@renderer/data/CacheService'
import { useSession } from '@renderer/hooks/agents/useSession' import { useSession } from '@renderer/hooks/agents/useSession'
import { useInputText } from '@renderer/hooks/useInputText' import { useInputText } from '@renderer/hooks/useInputText'
import { selectNewTopicLoading } from '@renderer/hooks/useMessageOperations' import { selectNewTopicLoading } from '@renderer/hooks/useMessageOperations'
@@ -422,6 +422,7 @@ const AgentSessionInputbarInner: FC<InnerProps> = ({ assistant, agentId, session
}) })
) )
// Clear text after successful send (draft is cleared automatically via onChange)
setText('') setText('')
setTimeoutTimer('agentSession_sendMessage', () => setText(''), 500) setTimeoutTimer('agentSession_sendMessage', () => setText(''), 500)
} catch (error) { } catch (error) {

View File

@@ -1,3 +1,4 @@
import { cacheService } from '@data/CacheService'
import { usePreference } from '@data/hooks/usePreference' import { usePreference } from '@data/hooks/usePreference'
import { loggerService } from '@logger' import { loggerService } from '@logger'
import { import {
@@ -9,13 +10,11 @@ import {
isVisionModels, isVisionModels,
isWebSearchModel isWebSearchModel
} from '@renderer/config/models' } from '@renderer/config/models'
import { cacheService } from '@renderer/data/CacheService'
import db from '@renderer/databases' import db from '@renderer/databases'
import { useAssistant } from '@renderer/hooks/useAssistant' import { useAssistant } from '@renderer/hooks/useAssistant'
import { useInputText } from '@renderer/hooks/useInputText' import { useInputText } from '@renderer/hooks/useInputText'
import { useMessageOperations, useTopicLoading } from '@renderer/hooks/useMessageOperations' import { useMessageOperations, useTopicLoading } from '@renderer/hooks/useMessageOperations'
import { useShortcut } from '@renderer/hooks/useShortcuts' import { useShortcut } from '@renderer/hooks/useShortcuts'
import { useSidebarIconShow } from '@renderer/hooks/useSidebarIcon'
import { useTextareaResize } from '@renderer/hooks/useTextareaResize' import { useTextareaResize } from '@renderer/hooks/useTextareaResize'
import { useTimer } from '@renderer/hooks/useTimer' import { useTimer } from '@renderer/hooks/useTimer'
import { import {
@@ -135,6 +134,10 @@ const InputbarInner: FC<InputbarInnerProps> = ({ assistant: initialAssistant, se
const { setFiles, setMentionedModels, setSelectedKnowledgeBases } = useInputbarToolsDispatch() const { setFiles, setMentionedModels, setSelectedKnowledgeBases } = useInputbarToolsDispatch()
const { setCouldAddImageFile } = useInputbarToolsInternalDispatch() const { setCouldAddImageFile } = useInputbarToolsInternalDispatch()
const { text, setText } = useInputText({
initialValue: cacheService.get<string>(INPUTBAR_DRAFT_CACHE_KEY) ?? '',
onChange: (value) => cacheService.set(INPUTBAR_DRAFT_CACHE_KEY, value, DRAFT_CACHE_TTL)
})
const { text, setText } = useInputText({ const { text, setText } = useInputText({
initialValue: cacheService.get<string>(INPUTBAR_DRAFT_CACHE_KEY) ?? '', initialValue: cacheService.get<string>(INPUTBAR_DRAFT_CACHE_KEY) ?? '',
onChange: (value) => cacheService.set(INPUTBAR_DRAFT_CACHE_KEY, value, DRAFT_CACHE_TTL) onChange: (value) => cacheService.set(INPUTBAR_DRAFT_CACHE_KEY, value, DRAFT_CACHE_TTL)
@@ -150,7 +153,6 @@ const InputbarInner: FC<InputbarInnerProps> = ({ assistant: initialAssistant, se
minHeight: 30 minHeight: 30
}) })
const showKnowledgeIcon = useSidebarIconShow('knowledge')
const { assistant, addTopic, model, setModel, updateAssistant } = useAssistant(initialAssistant.id) const { assistant, addTopic, model, setModel, updateAssistant } = useAssistant(initialAssistant.id)
const [showInputEstimatedTokens] = usePreference('chat.input.show_estimated_tokens') const [showInputEstimatedTokens] = usePreference('chat.input.show_estimated_tokens')
const [sendMessageShortcut] = usePreference('chat.input.send_message_shortcut') const [sendMessageShortcut] = usePreference('chat.input.send_message_shortcut')
@@ -408,9 +410,10 @@ const InputbarInner: FC<InputbarInnerProps> = ({ assistant: initialAssistant, se
focusTextarea focusTextarea
]) ])
// TODO: Just use assistant.knowledge_bases as selectedKnowledgeBases. context state is overdesigned.
useEffect(() => { useEffect(() => {
setSelectedKnowledgeBases(showKnowledgeIcon ? (assistant.knowledge_bases ?? []) : []) setSelectedKnowledgeBases(assistant.knowledge_bases ?? [])
}, [assistant.knowledge_bases, setSelectedKnowledgeBases, showKnowledgeIcon]) }, [assistant.knowledge_bases, setSelectedKnowledgeBases])
useEffect(() => { useEffect(() => {
// Disable web search if model doesn't support it // Disable web search if model doesn't support it

View File

@@ -1,5 +1,4 @@
import { useAssistant } from '@renderer/hooks/useAssistant' import { useAssistant } from '@renderer/hooks/useAssistant'
import { useSidebarIconShow } from '@renderer/hooks/useSidebarIcon'
import { defineTool, registerTool, TopicType } from '@renderer/pages/home/Inputbar/types' import { defineTool, registerTool, TopicType } from '@renderer/pages/home/Inputbar/types'
import type { KnowledgeBase } from '@renderer/types' import type { KnowledgeBase } from '@renderer/types'
import { isPromptToolUse, isSupportedToolUse } from '@renderer/utils/mcp-tools' import { isPromptToolUse, isSupportedToolUse } from '@renderer/utils/mcp-tools'
@@ -30,7 +29,6 @@ const knowledgeBaseTool = defineTool({
render: function KnowledgeBaseToolRender(context) { render: function KnowledgeBaseToolRender(context) {
const { assistant, state, actions, quickPanel } = context const { assistant, state, actions, quickPanel } = context
const knowledgeSidebarEnabled = useSidebarIconShow('knowledge')
const { updateAssistant } = useAssistant(assistant.id) const { updateAssistant } = useAssistant(assistant.id)
const handleSelect = useCallback( const handleSelect = useCallback(
@@ -41,10 +39,6 @@ const knowledgeBaseTool = defineTool({
[updateAssistant, actions] [updateAssistant, actions]
) )
if (!knowledgeSidebarEnabled) {
return null
}
return ( return (
<KnowledgeBaseButton <KnowledgeBaseButton
quickPanel={quickPanel} quickPanel={quickPanel}

View File

@@ -105,10 +105,12 @@ const ThinkingBlock: React.FC<Props> = ({ block }) => {
) )
} }
const normalizeThinkingTime = (value?: number) => (typeof value === 'number' && Number.isFinite(value) ? value : 0)
const ThinkingTimeSeconds = memo( const ThinkingTimeSeconds = memo(
({ blockThinkingTime, isThinking }: { blockThinkingTime: number; isThinking: boolean }) => { ({ blockThinkingTime, isThinking }: { blockThinkingTime: number; isThinking: boolean }) => {
const { t } = useTranslation() const { t } = useTranslation()
const [displayTime, setDisplayTime] = useState(blockThinkingTime) const [displayTime, setDisplayTime] = useState(normalizeThinkingTime(blockThinkingTime))
const timer = useRef<NodeJS.Timeout | null>(null) const timer = useRef<NodeJS.Timeout | null>(null)
@@ -124,7 +126,7 @@ const ThinkingTimeSeconds = memo(
clearInterval(timer.current) clearInterval(timer.current)
timer.current = null timer.current = null
} }
setDisplayTime(blockThinkingTime) setDisplayTime(normalizeThinkingTime(blockThinkingTime))
} }
return () => { return () => {
@@ -135,10 +137,10 @@ const ThinkingTimeSeconds = memo(
} }
}, [isThinking, blockThinkingTime]) }, [isThinking, blockThinkingTime])
const thinkingTimeSeconds = useMemo( const thinkingTimeSeconds = useMemo(() => {
() => ((displayTime < 1000 ? 100 : displayTime) / 1000).toFixed(1), const safeTime = normalizeThinkingTime(displayTime)
[displayTime] return ((safeTime < 1000 ? 100 : safeTime) / 1000).toFixed(1)
) }, [displayTime])
return isThinking return isThinking
? t('chat.thinking', { ? t('chat.thinking', {

View File

@@ -287,6 +287,20 @@ describe('ThinkingBlock', () => {
unmount() unmount()
}) })
}) })
it('should clamp invalid thinking times to a safe default', () => {
const testCases = [undefined, Number.NaN, Number.POSITIVE_INFINITY]
testCases.forEach((thinking_millsec) => {
const block = createThinkingBlock({
thinking_millsec: thinking_millsec as any,
status: MessageBlockStatus.SUCCESS
})
const { unmount } = renderThinkingBlock(block)
expect(getThinkingTimeText()).toHaveTextContent('0.1s')
unmount()
})
})
}) })
describe('collapse behavior', () => { describe('collapse behavior', () => {

View File

@@ -11,6 +11,7 @@ import { usePreference } from '@data/hooks/usePreference'
import { useTimer } from '@renderer/hooks/useTimer' import { useTimer } from '@renderer/hooks/useTimer'
import type { RootState } from '@renderer/store' import type { RootState } from '@renderer/store'
// import { selectCurrentTopicId } from '@renderer/store/newMessage' // import { selectCurrentTopicId } from '@renderer/store/newMessage'
import { scrollIntoView } from '@renderer/utils/dom'
import { Drawer } from 'antd' import { Drawer } from 'antd'
import type { FC } from 'react' import type { FC } from 'react'
import { useCallback, useEffect, useRef, useState } from 'react' import { useCallback, useEffect, useRef, useState } from 'react'
@@ -120,7 +121,8 @@ const ChatNavigation: FC<ChatNavigationProps> = ({ containerId }) => {
} }
const scrollToMessage = (element: HTMLElement) => { const scrollToMessage = (element: HTMLElement) => {
element.scrollIntoView({ behavior: 'smooth', block: 'start' }) // Use container: 'nearest' to keep scroll within the chat pane (Chromium-only, see #11565, #11567)
scrollIntoView(element, { behavior: 'smooth', block: 'start', container: 'nearest' })
} }
const scrollToTop = () => { const scrollToTop = () => {

View File

@@ -15,6 +15,7 @@ import { estimateMessageUsage } from '@renderer/services/TokenService'
import type { Assistant, Topic } from '@renderer/types' import type { Assistant, Topic } from '@renderer/types'
import type { Message, MessageBlock } from '@renderer/types/newMessage' import type { Message, MessageBlock } from '@renderer/types/newMessage'
import { classNames, cn } from '@renderer/utils' import { classNames, cn } from '@renderer/utils'
import { scrollIntoView } from '@renderer/utils/dom'
import { isMessageProcessing } from '@renderer/utils/messageUtils/is' import { isMessageProcessing } from '@renderer/utils/messageUtils/is'
import { Divider } from 'antd' import { Divider } from 'antd'
import type { Dispatch, FC, SetStateAction } from 'react' import type { Dispatch, FC, SetStateAction } from 'react'
@@ -84,9 +85,10 @@ const MessageItem: FC<Props> = ({
useEffect(() => { useEffect(() => {
if (isEditing && messageContainerRef.current) { if (isEditing && messageContainerRef.current) {
messageContainerRef.current.scrollIntoView({ scrollIntoView(messageContainerRef.current, {
behavior: 'smooth', behavior: 'smooth',
block: 'center' block: 'center',
container: 'nearest'
}) })
} }
}, [isEditing]) }, [isEditing])
@@ -129,7 +131,7 @@ const MessageItem: FC<Props> = ({
const messageHighlightHandler = useCallback( const messageHighlightHandler = useCallback(
(highlight: boolean = true) => { (highlight: boolean = true) => {
if (messageContainerRef.current) { if (messageContainerRef.current) {
messageContainerRef.current.scrollIntoView({ behavior: 'smooth' }) scrollIntoView(messageContainerRef.current, { behavior: 'smooth', block: 'center', container: 'nearest' })
if (highlight) { if (highlight) {
setTimeoutTimer( setTimeoutTimer(
'messageHighlightHandler', 'messageHighlightHandler',

View File

@@ -12,6 +12,7 @@ import { newMessagesActions } from '@renderer/store/newMessage'
// import { updateMessageThunk } from '@renderer/store/thunk/messageThunk' // import { updateMessageThunk } from '@renderer/store/thunk/messageThunk'
import type { Message } from '@renderer/types/newMessage' import type { Message } from '@renderer/types/newMessage'
import { isEmoji, removeLeadingEmoji } from '@renderer/utils' import { isEmoji, removeLeadingEmoji } from '@renderer/utils'
import { scrollIntoView } from '@renderer/utils/dom'
import { getMainTextContent } from '@renderer/utils/messageUtils/find' import { getMainTextContent } from '@renderer/utils/messageUtils/find'
import { CircleChevronDown } from 'lucide-react' import { CircleChevronDown } from 'lucide-react'
import { type FC, useCallback, useEffect, useRef, useState } from 'react' import { type FC, useCallback, useEffect, useRef, useState } from 'react'
@@ -118,7 +119,7 @@ const MessageAnchorLine: FC<MessageLineProps> = ({ messages }) => {
() => { () => {
const messageElement = document.getElementById(`message-${message.id}`) const messageElement = document.getElementById(`message-${message.id}`)
if (messageElement) { if (messageElement) {
messageElement.scrollIntoView({ behavior: 'auto', block: 'start' }) scrollIntoView(messageElement, { behavior: 'auto', block: 'start', container: 'nearest' })
} }
}, },
100 100
@@ -140,7 +141,7 @@ const MessageAnchorLine: FC<MessageLineProps> = ({ messages }) => {
return return
} }
messageElement.scrollIntoView({ behavior: 'smooth', block: 'start' }) scrollIntoView(messageElement, { behavior: 'smooth', block: 'start', container: 'nearest' })
}, },
[setSelectedMessage] [setSelectedMessage]
) )

View File

@@ -9,6 +9,7 @@ import { EVENT_NAMES, EventEmitter } from '@renderer/services/EventService'
import type { Topic } from '@renderer/types' import type { Topic } from '@renderer/types'
import type { Message } from '@renderer/types/newMessage' import type { Message } from '@renderer/types/newMessage'
import { classNames } from '@renderer/utils' import { classNames } from '@renderer/utils'
import { scrollIntoView } from '@renderer/utils/dom'
import type { MultiModelMessageStyle } from '@shared/data/preference/preferenceTypes' import type { MultiModelMessageStyle } from '@shared/data/preference/preferenceTypes'
import { Popover } from 'antd' import { Popover } from 'antd'
import type { ComponentProps } from 'react' import type { ComponentProps } from 'react'
@@ -75,7 +76,7 @@ const MessageGroup = ({ messages, topic, registerMessageElement }: Props) => {
() => { () => {
const messageElement = document.getElementById(`message-${message.id}`) const messageElement = document.getElementById(`message-${message.id}`)
if (messageElement) { if (messageElement) {
messageElement.scrollIntoView({ behavior: 'smooth', block: 'start' }) scrollIntoView(messageElement, { behavior: 'smooth', block: 'start', container: 'nearest' })
} }
}, },
200 200
@@ -134,7 +135,7 @@ const MessageGroup = ({ messages, topic, registerMessageElement }: Props) => {
setSelectedMessage(message) setSelectedMessage(message)
} else { } else {
// 直接滚动 // 直接滚动
element.scrollIntoView({ behavior: 'smooth', block: 'start' }) scrollIntoView(element, { behavior: 'smooth', block: 'start', container: 'nearest' })
} }
} }
} }

View File

@@ -3,6 +3,7 @@ import type { RootState } from '@renderer/store'
import { messageBlocksSelectors } from '@renderer/store/messageBlock' import { messageBlocksSelectors } from '@renderer/store/messageBlock'
import type { Message } from '@renderer/types/newMessage' import type { Message } from '@renderer/types/newMessage'
import { MessageBlockType } from '@renderer/types/newMessage' import { MessageBlockType } from '@renderer/types/newMessage'
import { scrollIntoView } from '@renderer/utils/dom'
import type { FC } from 'react' import type { FC } from 'react'
import React, { useMemo, useRef } from 'react' import React, { useMemo, useRef } from 'react'
import { useSelector } from 'react-redux' import { useSelector } from 'react-redux'
@@ -72,10 +73,10 @@ const MessageOutline: FC<MessageOutlineProps> = ({ message }) => {
const parent = messageOutlineContainerRef.current?.parentElement const parent = messageOutlineContainerRef.current?.parentElement
const messageContentContainer = parent?.querySelector('.message-content-container') const messageContentContainer = parent?.querySelector('.message-content-container')
if (messageContentContainer) { if (messageContentContainer) {
const headingElement = messageContentContainer.querySelector(`#${id}`) const headingElement = messageContentContainer.querySelector<HTMLElement>(`#${id}`)
if (headingElement) { if (headingElement) {
const scrollBlock = ['horizontal', 'grid'].includes(message.multiModelMessageStyle ?? '') ? 'nearest' : 'start' const scrollBlock = ['horizontal', 'grid'].includes(message.multiModelMessageStyle ?? '') ? 'nearest' : 'start'
headingElement.scrollIntoView({ behavior: 'smooth', block: scrollBlock }) scrollIntoView(headingElement, { behavior: 'smooth', block: scrollBlock, container: 'nearest' })
} }
} }
} }

View File

@@ -5,8 +5,6 @@ import { Terminal } from 'lucide-react'
import { ToolTitle } from './GenericTools' import { ToolTitle } from './GenericTools'
import type { BashToolInput as BashToolInputType, BashToolOutput as BashToolOutputType } from './types' import type { BashToolInput as BashToolInputType, BashToolOutput as BashToolOutputType } from './types'
const MAX_TAG_LENGTH = 100
export function BashTool({ export function BashTool({
input, input,
output output
@@ -17,12 +15,10 @@ export function BashTool({
// 如果有输出,计算输出行数 // 如果有输出,计算输出行数
const outputLines = output ? output.split('\n').length : 0 const outputLines = output ? output.split('\n').length : 0
// 处理命令字符串的截断,添加空值检查 // 处理命令字符串,添加空值检查
const command = input?.command ?? '' const command = input?.command ?? ''
const needsTruncate = command.length > MAX_TAG_LENGTH
const displayCommand = needsTruncate ? `${command.slice(0, MAX_TAG_LENGTH)}...` : command
const tagContent = <Tag className="whitespace-pre-wrap break-all font-mono">{displayCommand}</Tag> const tagContent = <Tag className="!m-0 max-w-full truncate font-mono">{command}</Tag>
return { return {
key: 'tool', key: 'tool',
@@ -34,16 +30,12 @@ export function BashTool({
params={input?.description} params={input?.description}
stats={output ? `${outputLines} ${outputLines === 1 ? 'line' : 'lines'}` : undefined} stats={output ? `${outputLines} ${outputLines === 1 ? 'line' : 'lines'}` : undefined}
/> />
<div className="mt-1"> <div className="mt-1 max-w-full">
{needsTruncate ? ( <Popover
<Popover content={<div className="max-w-xl whitespace-pre-wrap break-all font-mono text-xs">{command}</div>}
content={<div className="max-w-xl whitespace-pre-wrap break-all font-mono">{command}</div>} trigger="hover">
trigger="hover"> {tagContent}
{tagContent} </Popover>
</Popover>
) : (
tagContent
)}
</div> </div>
</> </>
), ),

View File

@@ -18,9 +18,9 @@ export function ToolTitle({
}) { }) {
return ( return (
<div className={`flex items-center gap-1 ${className}`}> <div className={`flex items-center gap-1 ${className}`}>
{icon} {icon && <span className="flex flex-shrink-0">{icon}</span>}
{label && <span className="font-medium text-sm">{label}</span>} {label && <span className="flex-shrink-0 font-medium text-sm">{label}</span>}
{params && <span className="flex-shrink-0 text-muted-foreground text-xs">{params}</span>} {params && <span className="min-w-0 truncate text-muted-foreground text-xs">{params}</span>}
{stats && <span className="flex-shrink-0 text-muted-foreground text-xs">{stats}</span>} {stats && <span className="flex-shrink-0 text-muted-foreground text-xs">{stats}</span>}
</div> </div>
) )

View File

@@ -1,7 +1,10 @@
import { loggerService } from '@logger' import { loggerService } from '@logger'
import { useAppSelector } from '@renderer/store'
import { selectPendingPermission } from '@renderer/store/toolPermissions'
import type { NormalToolResponse } from '@renderer/types' import type { NormalToolResponse } from '@renderer/types'
import type { CollapseProps } from 'antd' import type { CollapseProps } from 'antd'
import { Collapse } from 'antd' import { Collapse, Spin } from 'antd'
import { useTranslation } from 'react-i18next'
// 导出所有类型 // 导出所有类型
export * from './types' export * from './types'
@@ -83,17 +86,41 @@ function ToolContent({ toolName, input, output }: { toolName: AgentToolsType; in
// 统一的组件渲染入口 // 统一的组件渲染入口
export function MessageAgentTools({ toolResponse }: { toolResponse: NormalToolResponse }) { export function MessageAgentTools({ toolResponse }: { toolResponse: NormalToolResponse }) {
const { arguments: args, response, tool, status } = toolResponse const { arguments: args, response, tool, status } = toolResponse
logger.info('Rendering agent tool response', { logger.debug('Rendering agent tool response', {
tool: tool, tool: tool,
arguments: args, arguments: args,
status,
response response
}) })
const pendingPermission = useAppSelector((state) =>
selectPendingPermission(state.toolPermissions, toolResponse.toolCallId)
)
if (status === 'pending') { if (status === 'pending') {
return <ToolPermissionRequestCard toolResponse={toolResponse} /> if (pendingPermission) {
return <ToolPermissionRequestCard toolResponse={toolResponse} />
}
return <ToolPendingIndicator toolName={tool?.name} description={tool?.description} />
} }
return ( return (
<ToolContent toolName={tool.name as AgentToolsType} input={args as ToolInput} output={response as ToolOutput} /> <ToolContent toolName={tool.name as AgentToolsType} input={args as ToolInput} output={response as ToolOutput} />
) )
} }
function ToolPendingIndicator({ toolName, description }: { toolName?: string; description?: string }) {
const { t } = useTranslation()
const label = toolName || t('agent.toolPermission.toolPendingFallback', 'Tool')
const detail = description?.trim() || t('agent.toolPermission.executing')
return (
<div className="flex w-full max-w-xl items-center gap-3 rounded-xl border border-default-200 bg-default-100 px-4 py-3 shadow-sm">
<Spin size="small" />
<div className="flex flex-col gap-1">
<span className="font-semibold text-default-700 text-sm">{label}</span>
<span className="text-default-500 text-xs">{detail}</span>
</div>
</div>
)
}

View File

@@ -12,7 +12,7 @@ import type { FetchChatCompletionParams } from '@renderer/types'
import type { Assistant, MCPServer, MCPTool, Model, Provider } from '@renderer/types' import type { Assistant, MCPServer, MCPTool, Model, Provider } from '@renderer/types'
import type { StreamTextParams } from '@renderer/types/aiCoreTypes' import type { StreamTextParams } from '@renderer/types/aiCoreTypes'
import { type Chunk, ChunkType } from '@renderer/types/chunk' import { type Chunk, ChunkType } from '@renderer/types/chunk'
import type { Message } from '@renderer/types/newMessage' import type { Message, ResponseError } from '@renderer/types/newMessage'
import type { SdkModel } from '@renderer/types/sdk' import type { SdkModel } from '@renderer/types/sdk'
import { removeSpecialCharactersForTopicName, uuid } from '@renderer/utils' import { removeSpecialCharactersForTopicName, uuid } from '@renderer/utils'
import { abortCompletion, readyToAbort } from '@renderer/utils/abortController' import { abortCompletion, readyToAbort } from '@renderer/utils/abortController'
@@ -476,7 +476,7 @@ export async function checkApi(provider: Provider, model: Model, timeout = 15000
} else { } else {
const abortId = uuid() const abortId = uuid()
const signal = readyToAbort(abortId) const signal = readyToAbort(abortId)
let chunkError let streamError: ResponseError | undefined
const params: StreamTextParams = { const params: StreamTextParams = {
system: assistant.prompt, system: assistant.prompt,
prompt: 'hi', prompt: 'hi',
@@ -495,19 +495,18 @@ export async function checkApi(provider: Provider, model: Model, timeout = 15000
callType: 'check', callType: 'check',
onChunk: (chunk: Chunk) => { onChunk: (chunk: Chunk) => {
if (chunk.type === ChunkType.ERROR) { if (chunk.type === ChunkType.ERROR) {
chunkError = chunk.error streamError = chunk.error
} else { } else {
abortCompletion(abortId) abortCompletion(abortId)
} }
} }
} }
// Try streaming check
try { try {
await ai.completions(model.id, params, config) await ai.completions(model.id, params, config)
} catch (e) { } catch (e) {
if (!isAbortError(e) && !isAbortError(chunkError)) { if (!isAbortError(e) && !isAbortError(streamError)) {
throw e throw streamError ?? e
} }
} }
} }

View File

@@ -239,6 +239,7 @@ export type ModelType = 'text' | 'vision' | 'embedding' | 'reasoning' | 'functio
export type ModelTag = Exclude<ModelType, 'text'> | 'free' export type ModelTag = Exclude<ModelType, 'text'> | 'free'
// "image-generation" is also openai endpoint, but specifically for image generation.
export type EndpointType = 'openai' | 'openai-response' | 'anthropic' | 'gemini' | 'image-generation' | 'jina-rerank' export type EndpointType = 'openai' | 'openai-response' | 'anthropic' | 'gemini' | 'image-generation' | 'jina-rerank'
export type ModelPricing = { export type ModelPricing = {

View File

@@ -234,6 +234,7 @@ export interface Response {
error?: ResponseError error?: ResponseError
} }
// FIXME: Weak type safety. It may be a specific class instance which inherits Error in runtime.
export type ResponseError = Record<string, any> export type ResponseError = Record<string, any>
export interface MessageInputBaseParams { export interface MessageInputBaseParams {

View File

@@ -7,11 +7,13 @@ import {
formatApiKeys, formatApiKeys,
formatAzureOpenAIApiHost, formatAzureOpenAIApiHost,
formatVertexApiHost, formatVertexApiHost,
getTrailingApiVersion,
hasAPIVersion, hasAPIVersion,
maskApiKey, maskApiKey,
routeToEndpoint, routeToEndpoint,
splitApiKeyString, splitApiKeyString,
validateApiHost validateApiHost,
withoutTrailingApiVersion
} from '../api' } from '../api'
vi.mock('@renderer/store', () => { vi.mock('@renderer/store', () => {
@@ -316,4 +318,90 @@ describe('api', () => {
) )
}) })
}) })
describe('getTrailingApiVersion', () => {
it('extracts trailing API version from URL', () => {
expect(getTrailingApiVersion('https://api.example.com/v1')).toBe('v1')
expect(getTrailingApiVersion('https://api.example.com/v2')).toBe('v2')
})
it('extracts trailing API version with alpha/beta suffix', () => {
expect(getTrailingApiVersion('https://api.example.com/v2alpha')).toBe('v2alpha')
expect(getTrailingApiVersion('https://api.example.com/v3beta')).toBe('v3beta')
})
it('extracts trailing API version with trailing slash', () => {
expect(getTrailingApiVersion('https://api.example.com/v1/')).toBe('v1')
expect(getTrailingApiVersion('https://api.example.com/v2beta/')).toBe('v2beta')
})
it('returns undefined when API version is in the middle of path', () => {
expect(getTrailingApiVersion('https://api.example.com/v1/chat')).toBeUndefined()
expect(getTrailingApiVersion('https://api.example.com/v1/completions')).toBeUndefined()
})
it('returns undefined when no trailing version exists', () => {
expect(getTrailingApiVersion('https://api.example.com')).toBeUndefined()
expect(getTrailingApiVersion('https://api.example.com/api')).toBeUndefined()
})
it('extracts trailing version from complex URLs', () => {
expect(getTrailingApiVersion('https://api.example.com/service/v1')).toBe('v1')
expect(getTrailingApiVersion('https://gateway.ai.cloudflare.com/v1/xxx/google-ai-studio/v1beta')).toBe('v1beta')
})
it('only extracts the trailing version when multiple versions exist', () => {
expect(getTrailingApiVersion('https://api.example.com/v1/service/v2')).toBe('v2')
expect(
getTrailingApiVersion('https://gateway.ai.cloudflare.com/v1/xxxxxx/google-ai-studio/google-ai-studio/v1beta')
).toBe('v1beta')
})
it('returns undefined for empty string', () => {
expect(getTrailingApiVersion('')).toBeUndefined()
})
})
describe('withoutTrailingApiVersion', () => {
it('removes trailing API version from URL', () => {
expect(withoutTrailingApiVersion('https://api.example.com/v1')).toBe('https://api.example.com')
expect(withoutTrailingApiVersion('https://api.example.com/v2')).toBe('https://api.example.com')
})
it('removes trailing API version with alpha/beta suffix', () => {
expect(withoutTrailingApiVersion('https://api.example.com/v2alpha')).toBe('https://api.example.com')
expect(withoutTrailingApiVersion('https://api.example.com/v3beta')).toBe('https://api.example.com')
})
it('removes trailing API version with trailing slash', () => {
expect(withoutTrailingApiVersion('https://api.example.com/v1/')).toBe('https://api.example.com')
expect(withoutTrailingApiVersion('https://api.example.com/v2beta/')).toBe('https://api.example.com')
})
it('does not remove API version in the middle of path', () => {
expect(withoutTrailingApiVersion('https://api.example.com/v1/chat')).toBe('https://api.example.com/v1/chat')
expect(withoutTrailingApiVersion('https://api.example.com/v1/completions')).toBe(
'https://api.example.com/v1/completions'
)
})
it('returns URL unchanged when no trailing version exists', () => {
expect(withoutTrailingApiVersion('https://api.example.com')).toBe('https://api.example.com')
expect(withoutTrailingApiVersion('https://api.example.com/api')).toBe('https://api.example.com/api')
})
it('handles complex URLs with version at the end', () => {
expect(withoutTrailingApiVersion('https://api.example.com/service/v1')).toBe('https://api.example.com/service')
})
it('handles URLs with multiple versions but only removes the trailing one', () => {
expect(withoutTrailingApiVersion('https://api.example.com/v1/service/v2')).toBe(
'https://api.example.com/v1/service'
)
})
it('returns empty string unchanged', () => {
expect(withoutTrailingApiVersion('')).toBe('')
})
})
}) })

View File

@@ -12,6 +12,19 @@ export function formatApiKeys(value: string): string {
return value.replaceAll('', ',').replaceAll('\n', ',') return value.replaceAll('', ',').replaceAll('\n', ',')
} }
/**
* Matches a version segment in a path that starts with `/v<number>` and optionally
* continues with `alpha` or `beta`. The segment may be followed by `/` or the end
* of the string (useful for cases like `/v3alpha/resources`).
*/
const VERSION_REGEX_PATTERN = '\\/v\\d+(?:alpha|beta)?(?=\\/|$)'
/**
* Matches an API version at the end of a URL (with optional trailing slash).
* Used to detect and extract versions only from the trailing position.
*/
const TRAILING_VERSION_REGEX = /\/v\d+(?:alpha|beta)?\/?$/i
/** /**
* 判断 host 的 path 中是否包含形如版本的字符串(例如 /v1、/v2beta 等), * 判断 host 的 path 中是否包含形如版本的字符串(例如 /v1、/v2beta 等),
* *
@@ -21,16 +34,14 @@ export function formatApiKeys(value: string): string {
export function hasAPIVersion(host?: string): boolean { export function hasAPIVersion(host?: string): boolean {
if (!host) return false if (!host) return false
// 匹配路径中以 `/v<number>` 开头并可选跟随 `alpha` 或 `beta` 的版本段, const regex = new RegExp(VERSION_REGEX_PATTERN, 'i')
// 该段后面可以跟 `/` 或字符串结束(用于匹配诸如 `/v3alpha/resources` 的情况)。
const versionRegex = /\/v\d+(?:alpha|beta)?(?=\/|$)/i
try { try {
const url = new URL(host) const url = new URL(host)
return versionRegex.test(url.pathname) return regex.test(url.pathname)
} catch { } catch {
// 若无法作为完整 URL 解析,则当作路径直接检测 // 若无法作为完整 URL 解析,则当作路径直接检测
return versionRegex.test(host) return regex.test(host)
} }
} }
@@ -55,7 +66,7 @@ export function withoutTrailingSlash<T extends string>(url: T): T {
* Formats an API host URL by normalizing it and optionally appending an API version. * Formats an API host URL by normalizing it and optionally appending an API version.
* *
* @param host - The API host URL to format. Leading/trailing whitespace will be trimmed and trailing slashes removed. * @param host - The API host URL to format. Leading/trailing whitespace will be trimmed and trailing slashes removed.
* @param isSupportedAPIVerion - Whether the API version is supported. Defaults to `true`. * @param supportApiVersion - Whether the API version is supported. Defaults to `true`.
* @param apiVersion - The API version to append if needed. Defaults to `'v1'`. * @param apiVersion - The API version to append if needed. Defaults to `'v1'`.
* *
* @returns The formatted API host URL. If the host is empty after normalization, returns an empty string. * @returns The formatted API host URL. If the host is empty after normalization, returns an empty string.
@@ -67,13 +78,13 @@ export function withoutTrailingSlash<T extends string>(url: T): T {
* formatApiHost('https://api.example.com#') // Returns 'https://api.example.com#' * formatApiHost('https://api.example.com#') // Returns 'https://api.example.com#'
* formatApiHost('https://api.example.com/v2', true, 'v1') // Returns 'https://api.example.com/v2' * formatApiHost('https://api.example.com/v2', true, 'v1') // Returns 'https://api.example.com/v2'
*/ */
export function formatApiHost(host?: string, isSupportedAPIVerion: boolean = true, apiVersion: string = 'v1'): string { export function formatApiHost(host?: string, supportApiVersion: boolean = true, apiVersion: string = 'v1'): string {
const normalizedHost = withoutTrailingSlash(trim(host)) const normalizedHost = withoutTrailingSlash(trim(host))
if (!normalizedHost) { if (!normalizedHost) {
return '' return ''
} }
if (normalizedHost.endsWith('#') || !isSupportedAPIVerion || hasAPIVersion(normalizedHost)) { if (normalizedHost.endsWith('#') || !supportApiVersion || hasAPIVersion(normalizedHost)) {
return normalizedHost return normalizedHost
} }
return `${normalizedHost}/${apiVersion}` return `${normalizedHost}/${apiVersion}`
@@ -213,3 +224,50 @@ export function splitApiKeyString(keyStr: string): string[] {
.map((k) => k.replace(/\\,/g, ',')) .map((k) => k.replace(/\\,/g, ','))
.filter((k) => k) .filter((k) => k)
} }
/**
* Extracts the trailing API version segment from a URL path.
*
* This function extracts API version patterns (e.g., `v1`, `v2beta`) from the end of a URL.
* Only versions at the end of the path are extracted, not versions in the middle.
* The returned version string does not include leading or trailing slashes.
*
* @param {string} url - The URL string to parse.
* @returns {string | undefined} The trailing API version found (e.g., 'v1', 'v2beta'), or undefined if none found.
*
* @example
* getTrailingApiVersion('https://api.example.com/v1') // 'v1'
* getTrailingApiVersion('https://api.example.com/v2beta/') // 'v2beta'
* getTrailingApiVersion('https://api.example.com/v1/chat') // undefined (version not at end)
* getTrailingApiVersion('https://gateway.ai.cloudflare.com/v1/xxx/v1beta') // 'v1beta'
* getTrailingApiVersion('https://api.example.com') // undefined
*/
export function getTrailingApiVersion(url: string): string | undefined {
const match = url.match(TRAILING_VERSION_REGEX)
if (match) {
// Extract version without leading slash and trailing slash
return match[0].replace(/^\//, '').replace(/\/$/, '')
}
return undefined
}
/**
* Removes the trailing API version segment from a URL path.
*
* This function removes API version patterns (e.g., `/v1`, `/v2beta`) from the end of a URL.
* Only versions at the end of the path are removed, not versions in the middle.
*
* @param {string} url - The URL string to process.
* @returns {string} The URL with the trailing API version removed, or the original URL if no trailing version found.
*
* @example
* withoutTrailingApiVersion('https://api.example.com/v1') // 'https://api.example.com'
* withoutTrailingApiVersion('https://api.example.com/v2beta/') // 'https://api.example.com'
* withoutTrailingApiVersion('https://api.example.com/v1/chat') // 'https://api.example.com/v1/chat' (no change)
* withoutTrailingApiVersion('https://api.example.com') // 'https://api.example.com'
*/
export function withoutTrailingApiVersion(url: string): string {
return url.replace(TRAILING_VERSION_REGEX, '')
}

View File

@@ -1,3 +1,15 @@
import { loggerService } from '@logger'
const logger = loggerService.withContext('utils/dom')
interface ChromiumScrollIntoViewOptions extends ScrollIntoViewOptions {
/**
* @see https://developer.mozilla.org/en-US/docs/Web/API/Element/scrollIntoView#container
* @see https://github.com/microsoft/TypeScript/issues/62803
*/
container?: 'all' | 'nearest'
}
/** /**
* Simple wrapper for scrollIntoView with common default options. * Simple wrapper for scrollIntoView with common default options.
* Provides a unified interface with sensible defaults. * Provides a unified interface with sensible defaults.
@@ -5,7 +17,12 @@
* @param element - The target element to scroll into view * @param element - The target element to scroll into view
* @param options - Scroll options. If not provided, uses { behavior: 'smooth', block: 'center', inline: 'nearest' } * @param options - Scroll options. If not provided, uses { behavior: 'smooth', block: 'center', inline: 'nearest' }
*/ */
export function scrollIntoView(element: HTMLElement, options?: ScrollIntoViewOptions): void { export function scrollIntoView(element: HTMLElement, options?: ChromiumScrollIntoViewOptions): void {
if (!element) {
logger.warn('[scrollIntoView] Unexpected falsy element. Do nothing as fallback.')
return
}
const defaultOptions: ScrollIntoViewOptions = { const defaultOptions: ScrollIntoViewOptions = {
behavior: 'smooth', behavior: 'smooth',
block: 'center', block: 'center',

View File

@@ -566,3 +566,54 @@ export const makeSvgSizeAdaptive = (element: Element): Element => {
return element return element
} }
/**
* 将图片 Blob 转换为 PNG 格式的 Blob
* @param blob 原始图片 Blob
* @returns Promise<Blob> 转换后的 PNG Blob
*/
export const convertImageToPng = async (blob: Blob): Promise<Blob> => {
if (blob.type === 'image/png') {
return blob
}
return new Promise((resolve, reject) => {
const img = new Image()
const url = URL.createObjectURL(blob)
img.onload = () => {
try {
const canvas = document.createElement('canvas')
canvas.width = img.width
canvas.height = img.height
const ctx = canvas.getContext('2d')
if (!ctx) {
URL.revokeObjectURL(url)
reject(new Error('Failed to get canvas context'))
return
}
ctx.drawImage(img, 0, 0)
canvas.toBlob((pngBlob) => {
URL.revokeObjectURL(url)
if (pngBlob) {
resolve(pngBlob)
} else {
reject(new Error('Failed to convert image to png'))
}
}, 'image/png')
} catch (error) {
URL.revokeObjectURL(url)
reject(error)
}
}
img.onerror = () => {
URL.revokeObjectURL(url)
reject(new Error('Failed to load image for conversion'))
}
img.src = url
})
}

View File

@@ -90,7 +90,8 @@ export function openAIToolsToMcpTool(
return undefined return undefined
} }
const tools = mcpTools.filter((mcpTool) => { const tools = mcpTools.filter((mcpTool) => {
return mcpTool.id === toolName || mcpTool.name === toolName // toolName is mcpTool.id (registered with id as function name)
return mcpTool.id === toolName
}) })
if (tools.length > 1) { if (tools.length > 1) {
logger.warn(`Multiple MCP Tools found for tool call: ${toolName}`) logger.warn(`Multiple MCP Tools found for tool call: ${toolName}`)

View File

@@ -256,6 +256,17 @@ const HomeWindow: FC<{ draggable?: boolean }> = ({ draggable = true }) => {
let blockId: string | null = null let blockId: string | null = null
let thinkingBlockId: string | null = null let thinkingBlockId: string | null = null
let thinkingStartTime: number | null = null
const resolveThinkingDuration = (duration?: number) => {
if (typeof duration === 'number' && Number.isFinite(duration)) {
return duration
}
if (thinkingStartTime !== null) {
return Math.max(0, performance.now() - thinkingStartTime)
}
return 0
}
setIsLoading(true) setIsLoading(true)
setIsOutputted(false) setIsOutputted(false)
@@ -293,6 +304,7 @@ const HomeWindow: FC<{ draggable?: boolean }> = ({ draggable = true }) => {
case ChunkType.THINKING_START: case ChunkType.THINKING_START:
{ {
setIsOutputted(true) setIsOutputted(true)
thinkingStartTime = performance.now()
if (thinkingBlockId) { if (thinkingBlockId) {
store.dispatch( store.dispatch(
updateOneBlock({ id: thinkingBlockId, changes: { status: MessageBlockStatus.STREAMING } }) updateOneBlock({ id: thinkingBlockId, changes: { status: MessageBlockStatus.STREAMING } })
@@ -317,9 +329,13 @@ const HomeWindow: FC<{ draggable?: boolean }> = ({ draggable = true }) => {
{ {
setIsOutputted(true) setIsOutputted(true)
if (thinkingBlockId) { if (thinkingBlockId) {
if (thinkingStartTime === null) {
thinkingStartTime = performance.now()
}
const thinkingDuration = resolveThinkingDuration(chunk.thinking_millsec)
throttledBlockUpdate(thinkingBlockId, { throttledBlockUpdate(thinkingBlockId, {
content: chunk.text, content: chunk.text,
thinking_millsec: chunk.thinking_millsec thinking_millsec: thinkingDuration
}) })
} }
} }
@@ -327,14 +343,17 @@ const HomeWindow: FC<{ draggable?: boolean }> = ({ draggable = true }) => {
case ChunkType.THINKING_COMPLETE: case ChunkType.THINKING_COMPLETE:
{ {
if (thinkingBlockId) { if (thinkingBlockId) {
const thinkingDuration = resolveThinkingDuration(chunk.thinking_millsec)
cancelThrottledBlockUpdate(thinkingBlockId) cancelThrottledBlockUpdate(thinkingBlockId)
store.dispatch( store.dispatch(
updateOneBlock({ updateOneBlock({
id: thinkingBlockId, id: thinkingBlockId,
changes: { status: MessageBlockStatus.SUCCESS, thinking_millsec: chunk.thinking_millsec } changes: { status: MessageBlockStatus.SUCCESS, thinking_millsec: thinkingDuration }
}) })
) )
} }
thinkingStartTime = null
thinkingBlockId = null
} }
break break
case ChunkType.TEXT_START: case ChunkType.TEXT_START:
@@ -406,6 +425,8 @@ const HomeWindow: FC<{ draggable?: boolean }> = ({ draggable = true }) => {
if (!isAborted) { if (!isAborted) {
throw new Error(chunk.error.message) throw new Error(chunk.error.message)
} }
thinkingStartTime = null
thinkingBlockId = null
} }
//fall through //fall through
case ChunkType.BLOCK_COMPLETE: case ChunkType.BLOCK_COMPLETE:

View File

@@ -41,8 +41,19 @@ export const processMessages = async (
let textBlockId: string | null = null let textBlockId: string | null = null
let thinkingBlockId: string | null = null let thinkingBlockId: string | null = null
let thinkingStartTime: number | null = null
let textBlockContent: string = '' let textBlockContent: string = ''
const resolveThinkingDuration = (duration?: number) => {
if (typeof duration === 'number' && Number.isFinite(duration)) {
return duration
}
if (thinkingStartTime !== null) {
return Math.max(0, performance.now() - thinkingStartTime)
}
return 0
}
const assistantMessage = getAssistantMessage({ const assistantMessage = getAssistantMessage({
assistant, assistant,
topic topic
@@ -79,6 +90,7 @@ export const processMessages = async (
switch (chunk.type) { switch (chunk.type) {
case ChunkType.THINKING_START: case ChunkType.THINKING_START:
{ {
thinkingStartTime = performance.now()
if (thinkingBlockId) { if (thinkingBlockId) {
store.dispatch( store.dispatch(
updateOneBlock({ id: thinkingBlockId, changes: { status: MessageBlockStatus.STREAMING } }) updateOneBlock({ id: thinkingBlockId, changes: { status: MessageBlockStatus.STREAMING } })
@@ -102,9 +114,13 @@ export const processMessages = async (
case ChunkType.THINKING_DELTA: case ChunkType.THINKING_DELTA:
{ {
if (thinkingBlockId) { if (thinkingBlockId) {
if (thinkingStartTime === null) {
thinkingStartTime = performance.now()
}
const thinkingDuration = resolveThinkingDuration(chunk.thinking_millsec)
throttledBlockUpdate(thinkingBlockId, { throttledBlockUpdate(thinkingBlockId, {
content: chunk.text, content: chunk.text,
thinking_millsec: chunk.thinking_millsec thinking_millsec: thinkingDuration
}) })
} }
onStream() onStream()
@@ -113,6 +129,7 @@ export const processMessages = async (
case ChunkType.THINKING_COMPLETE: case ChunkType.THINKING_COMPLETE:
{ {
if (thinkingBlockId) { if (thinkingBlockId) {
const thinkingDuration = resolveThinkingDuration(chunk.thinking_millsec)
cancelThrottledBlockUpdate(thinkingBlockId) cancelThrottledBlockUpdate(thinkingBlockId)
store.dispatch( store.dispatch(
updateOneBlock({ updateOneBlock({
@@ -120,12 +137,13 @@ export const processMessages = async (
changes: { changes: {
content: chunk.text, content: chunk.text,
status: MessageBlockStatus.SUCCESS, status: MessageBlockStatus.SUCCESS,
thinking_millsec: chunk.thinking_millsec thinking_millsec: thinkingDuration
} }
}) })
) )
thinkingBlockId = null thinkingBlockId = null
} }
thinkingStartTime = null
} }
break break
case ChunkType.TEXT_START: case ChunkType.TEXT_START:
@@ -190,6 +208,7 @@ export const processMessages = async (
case ChunkType.ERROR: case ChunkType.ERROR:
{ {
const blockId = textBlockId || thinkingBlockId const blockId = textBlockId || thinkingBlockId
thinkingStartTime = null
if (blockId) { if (blockId) {
store.dispatch( store.dispatch(
updateOneBlock({ updateOneBlock({

View File

@@ -284,6 +284,54 @@ describe('processMessages', () => {
}) })
}) })
describe('thinking timer fallback', () => {
it('should use local timer when thinking_millsec is missing', async () => {
const nowValues = [1000, 1500, 2000]
let nowIndex = 0
const performanceSpy = vi.spyOn(performance, 'now').mockImplementation(() => {
const value = nowValues[Math.min(nowIndex, nowValues.length - 1)]
nowIndex += 1
return value
})
const mockChunks = [
{ type: ChunkType.THINKING_START },
{ type: ChunkType.THINKING_DELTA, text: 'Thinking...' },
{ type: ChunkType.THINKING_COMPLETE, text: 'Done thinking' },
{ type: ChunkType.TEXT_START },
{ type: ChunkType.TEXT_COMPLETE, text: 'Final answer' },
{ type: ChunkType.BLOCK_COMPLETE }
]
vi.mocked(fetchChatCompletion).mockImplementation(async ({ onChunkReceived }: any) => {
for (const chunk of mockChunks) {
await onChunkReceived(chunk)
}
})
await processMessages(
mockAssistant,
mockTopic,
'test prompt',
mockSetAskId,
mockOnStream,
mockOnFinish,
mockOnError
)
const thinkingDeltaCall = vi.mocked(throttledBlockUpdate).mock.calls.find(([id]) => id === 'thinking-block-1')
const deltaPayload = thinkingDeltaCall?.[1] as { thinking_millsec?: number } | undefined
expect(deltaPayload?.thinking_millsec).toBe(500)
const thinkingCompleteUpdate = vi
.mocked(updateOneBlock)
.mock.calls.find(([payload]) => (payload as any)?.changes?.thinking_millsec !== undefined)
expect((thinkingCompleteUpdate?.[0] as any)?.changes?.thinking_millsec).toBe(1000)
performanceSpy.mockRestore()
})
})
describe('stream with exceptions', () => { describe('stream with exceptions', () => {
it('should handle error chunks properly', async () => { it('should handle error chunks properly', async () => {
const mockError = new Error('Stream processing error') const mockError = new Error('Stream processing error')