Compare commits

..

1 Commits

Author SHA1 Message Date
icarus
afd44780d6 feat: improve ImageViewer context menu UX
- Reorder menu items to prioritize "Copy as Image" as the first action
- Rename "Copy" to "Copy Image Source" for better clarity
- Remove unused ImageIcon import
- Add i18n support for "preview.copy.src" across all locales

This change improves the user experience by making the most common
action (copy image) the first option in the context menu, while also
clarifying what each copy action does.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-29 15:36:25 +08:00
54 changed files with 303 additions and 727 deletions

View File

@@ -12,15 +12,7 @@ This file provides guidance to AI coding assistants when working with code in th
- **Always propose before executing**: Before making any changes, clearly explain your planned approach and wait for explicit user approval to ensure alignment and prevent unwanted modifications.
- **Lint, test, and format before completion**: Coding tasks are only complete after running `yarn lint`, `yarn test`, and `yarn format` successfully.
- **Write conventional commits**: Commit small, focused changes using Conventional Commit messages (e.g., `feat:`, `fix:`, `refactor:`, `docs:`).
## Pull Request Workflow (CRITICAL)
When creating a Pull Request, you MUST:
1. **Read the PR template first**: Always read `.github/pull_request_template.md` before creating the PR
2. **Follow ALL template sections**: Structure the `--body` parameter to include every section from the template
3. **Never skip sections**: Include all sections even if marking them as N/A or "None"
4. **Use proper formatting**: Match the template's markdown structure exactly (headings, checkboxes, code blocks)
- **Follow PR template**: When submitting pull requests, follow the template in `.github/pull_request_template.md` to ensure complete context and documentation.
## Development Commands

View File

@@ -134,9 +134,9 @@ artifactBuildCompleted: scripts/artifact-build-completed.js
releaseInfo:
releaseNotes: |
<!--LANG:en-->
A New Era of Intelligence with Cherry Studio 1.7.1
A New Era of Intelligence with Cherry Studio 1.7.0
Today we're releasing Cherry Studio 1.7.1 — our most ambitious update yet, introducing Agent: autonomous AI that thinks, plans, and acts.
Today we're releasing Cherry Studio 1.7.0 — our most ambitious update yet, introducing Agent: autonomous AI that thinks, plans, and acts.
For years, AI assistants have been reactive — waiting for your commands, responding to your questions. With Agent, we're changing that. Now, AI can truly work alongside you: understanding complex goals, breaking them into steps, and executing them independently.
@@ -187,9 +187,9 @@ releaseInfo:
The Agent Era is here. We can't wait to see what you'll create.
<!--LANG:zh-CN-->
Cherry Studio 1.7.1:开启智能新纪元
Cherry Studio 1.7.0:开启智能新纪元
今天,我们正式发布 Cherry Studio 1.7.1 —— 迄今最具雄心的版本,带来全新的 Agent能够自主思考、规划和行动的 AI。
今天,我们正式发布 Cherry Studio 1.7.0 —— 迄今最具雄心的版本,带来全新的 Agent能够自主思考、规划和行动的 AI。
多年来AI 助手一直是被动的——等待你的指令回应你的问题。Agent 改变了这一切。现在AI 能够真正与你并肩工作:理解复杂目标,将其拆解为步骤,并独立执行。

View File

@@ -1,6 +1,6 @@
{
"name": "CherryStudio",
"version": "1.7.1",
"version": "1.7.0",
"private": true,
"description": "A powerful AI assistant for producer.",
"main": "./out/main/index.js",
@@ -162,7 +162,7 @@
"@langchain/core": "patch:@langchain/core@npm%3A1.0.2#~/.yarn/patches/@langchain-core-npm-1.0.2-183ef83fe4.patch",
"@langchain/openai": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
"@mistralai/mistralai": "^1.7.5",
"@modelcontextprotocol/sdk": "^1.23.0",
"@modelcontextprotocol/sdk": "^1.17.5",
"@mozilla/readability": "^0.6.0",
"@notionhq/client": "^2.2.15",
"@openrouter/ai-sdk-provider": "^1.2.8",

View File

@@ -69,7 +69,6 @@ export interface CherryInProviderSettings {
headers?: HeadersInput
/**
* Optional endpoint type to distinguish different endpoint behaviors.
* "image-generation" is also openai endpoint, but specifically for image generation.
*/
endpointType?: 'openai' | 'openai-response' | 'anthropic' | 'gemini' | 'image-generation' | 'jina-rerank'
}

View File

@@ -42,14 +42,11 @@ import {
type MCPPrompt,
type MCPResource,
type MCPServer,
type MCPTool,
MCPToolInputSchema,
MCPToolOutputSchema
type MCPTool
} from '@types'
import { app, net } from 'electron'
import { EventEmitter } from 'events'
import { v4 as uuidv4 } from 'uuid'
import * as z from 'zod'
import { CacheService } from './CacheService'
import DxtService from './DxtService'
@@ -623,8 +620,6 @@ class McpService {
tools.map((tool: SDKTool) => {
const serverTool: MCPTool = {
...tool,
inputSchema: z.parse(MCPToolInputSchema, tool.inputSchema),
outputSchema: tool.outputSchema ? z.parse(MCPToolOutputSchema, tool.outputSchema) : undefined,
id: buildFunctionCallToolName(server.name, tool.name, server.id),
serverId: server.id,
serverName: server.name,

View File

@@ -1,7 +1,6 @@
// src/main/services/agents/services/claudecode/index.ts
import { EventEmitter } from 'node:events'
import { createRequire } from 'node:module'
import path from 'node:path'
import type {
CanUseTool,
@@ -122,11 +121,7 @@ class ClaudeCodeService implements AgentServiceInterface {
// TODO: support set small model in UI
ANTHROPIC_DEFAULT_HAIKU_MODEL: modelInfo.modelId,
ELECTRON_RUN_AS_NODE: '1',
ELECTRON_NO_ATTACH_CONSOLE: '1',
// Set CLAUDE_CONFIG_DIR to app's userData directory to avoid path encoding issues
// on Windows when the username contains non-ASCII characters (e.g., Chinese characters)
// This prevents the SDK from using the user's home directory which may have encoding problems
CLAUDE_CONFIG_DIR: path.join(app.getPath('userData'), '.claude')
ELECTRON_NO_ATTACH_CONSOLE: '1'
}
const errorChunks: string[] = []

View File

@@ -27,7 +27,6 @@ import { buildAiSdkMiddlewares } from './middleware/AiSdkMiddlewareBuilder'
import { buildPlugins } from './plugins/PluginBuilder'
import { createAiSdkProvider } from './provider/factory'
import {
adaptProvider,
getActualProvider,
isModernSdkSupported,
prepareSpecialProviderConfig,
@@ -65,11 +64,12 @@ export default class ModernAiProvider {
* - URL will be automatically formatted via `formatProviderApiHost`, adding version suffixes like `/v1`
*
* 2. When called with `(model, provider)`:
* - The provided provider will be adapted via `adaptProvider`
* - URL formatting behavior depends on the adapted result
* - **Directly uses the provided provider WITHOUT going through `getActualProvider`**
* - **URL will NOT be automatically formatted, `/v1` suffix will NOT be added**
* - This is legacy behavior kept for backward compatibility
*
* 3. When called with `(provider)`:
* - The provider will be adapted via `adaptProvider`
* - Directly uses the provider without requiring a model
* - Used for operations that don't need a model (e.g., fetchModels)
*
* @example
@@ -77,7 +77,7 @@ export default class ModernAiProvider {
* // Recommended: Auto-format URL
* const ai = new ModernAiProvider(model)
*
* // Provider will be adapted
* // Not recommended: Skip URL formatting (only for special cases)
* const ai = new ModernAiProvider(model, customProvider)
*
* // For operations that don't need a model
@@ -91,12 +91,12 @@ export default class ModernAiProvider {
if (this.isModel(modelOrProvider)) {
// 传入的是 Model
this.model = modelOrProvider
this.actualProvider = provider ? adaptProvider({ provider }) : getActualProvider(modelOrProvider)
this.actualProvider = provider || getActualProvider(modelOrProvider)
// 只保存配置不预先创建executor
this.config = providerToAiSdkConfig(this.actualProvider, modelOrProvider)
} else {
// 传入的是 Provider
this.actualProvider = adaptProvider({ provider: modelOrProvider })
this.actualProvider = modelOrProvider
// model为可选某些操作如fetchModels不需要model
}

View File

@@ -78,13 +78,11 @@ function handleSpecialProviders(model: Model, provider: Provider): Provider {
}
/**
* Format and normalize the API host URL for a provider.
* Handles provider-specific URL formatting rules (e.g., appending version paths, Azure formatting).
*
* @param provider - The provider whose API host is to be formatted.
* @returns A new provider instance with the formatted API host.
* 主要用来对齐AISdk的BaseURL格式
* @param provider
* @returns
*/
export function formatProviderApiHost(provider: Provider): Provider {
function formatProviderApiHost(provider: Provider): Provider {
const formatted = { ...provider }
if (formatted.anthropicApiHost) {
formatted.anthropicApiHost = formatApiHost(formatted.anthropicApiHost)
@@ -116,38 +114,18 @@ export function formatProviderApiHost(provider: Provider): Provider {
}
/**
* Retrieve the effective Provider configuration for the given model.
* Applies all necessary transformations (special-provider handling, URL formatting, etc.).
*
* @param model - The model whose provider is to be resolved.
* @returns A new Provider instance with all adaptations applied.
* 获取实际的Provider配置
* 简化版:将逻辑分解为小函数
*/
export function getActualProvider(model: Model): Provider {
const baseProvider = getProviderByModel(model)
return adaptProvider({ provider: baseProvider, model })
}
// 按顺序处理各种转换
let actualProvider = cloneDeep(baseProvider)
actualProvider = handleSpecialProviders(model, actualProvider)
actualProvider = formatProviderApiHost(actualProvider)
/**
* Transforms a provider configuration by applying model-specific adaptations and normalizing its API host.
* The transformations are applied in the following order:
* 1. Model-specific provider handling (e.g., New-API, system providers, Azure OpenAI)
* 2. API host formatting (provider-specific URL normalization)
*
* @param provider - The base provider configuration to transform.
* @param model - The model associated with the provider; optional but required for special-provider handling.
* @returns A new Provider instance with all transformations applied.
*/
export function adaptProvider({ provider, model }: { provider: Provider; model?: Model }): Provider {
let adaptedProvider = cloneDeep(provider)
// Apply transformations in order
if (model) {
adaptedProvider = handleSpecialProviders(model, adaptedProvider)
}
adaptedProvider = formatProviderApiHost(adaptedProvider)
return adaptedProvider
return actualProvider
}
/**

View File

@@ -245,8 +245,8 @@ export class AiSdkSpanAdapter {
'gen_ai.usage.output_tokens'
]
const promptTokens = attributes[inputsTokenKeys.find((key) => attributes[key]) || '']
const completionTokens = attributes[outputTokenKeys.find((key) => attributes[key]) || '']
const completionTokens = attributes[inputsTokenKeys.find((key) => attributes[key]) || '']
const promptTokens = attributes[outputTokenKeys.find((key) => attributes[key]) || '']
if (completionTokens !== undefined || promptTokens !== undefined) {
const usage: TokenUsage = {

View File

@@ -1,53 +0,0 @@
import type { Span } from '@opentelemetry/api'
import { SpanKind, SpanStatusCode } from '@opentelemetry/api'
import { describe, expect, it, vi } from 'vitest'
import { AiSdkSpanAdapter } from '../AiSdkSpanAdapter'
vi.mock('@logger', () => ({
loggerService: {
withContext: () => ({
debug: vi.fn(),
error: vi.fn(),
info: vi.fn(),
warn: vi.fn()
})
}
}))
describe('AiSdkSpanAdapter', () => {
const createMockSpan = (attributes: Record<string, unknown>): Span => {
const span = {
spanContext: () => ({
traceId: 'trace-id',
spanId: 'span-id'
}),
_attributes: attributes,
_events: [],
name: 'test span',
status: { code: SpanStatusCode.OK },
kind: SpanKind.CLIENT,
startTime: [0, 0] as [number, number],
endTime: [0, 1] as [number, number],
ended: true,
parentSpanId: '',
links: []
}
return span as unknown as Span
}
it('maps prompt and completion usage tokens to the correct fields', () => {
const attributes = {
'ai.usage.promptTokens': 321,
'ai.usage.completionTokens': 654
}
const span = createMockSpan(attributes)
const result = AiSdkSpanAdapter.convertToSpanEntity({ span })
expect(result.usage).toBeDefined()
expect(result.usage?.prompt_tokens).toBe(321)
expect(result.usage?.completion_tokens).toBe(654)
expect(result.usage?.total_tokens).toBe(975)
})
})

View File

@@ -144,7 +144,7 @@ describe('reasoning utils', () => {
expect(result).toEqual({})
})
it('should not override reasoning for OpenRouter when reasoning effort undefined', async () => {
it('should disable reasoning for OpenRouter when no reasoning effort set', async () => {
const { isReasoningModel } = await import('@renderer/config/models')
vi.mocked(isReasoningModel).mockReturnValue(true)
@@ -161,29 +161,6 @@ describe('reasoning utils', () => {
settings: {}
} as Assistant
const result = getReasoningEffort(assistant, model)
expect(result).toEqual({})
})
it('should disable reasoning for OpenRouter when reasoning effort explicitly none', async () => {
const { isReasoningModel } = await import('@renderer/config/models')
vi.mocked(isReasoningModel).mockReturnValue(true)
const model: Model = {
id: 'anthropic/claude-sonnet-4',
name: 'Claude Sonnet 4',
provider: SystemProviderIds.openrouter
} as Model
const assistant: Assistant = {
id: 'test',
name: 'Test',
settings: {
reasoning_effort: 'none'
}
} as Assistant
const result = getReasoningEffort(assistant, model)
expect(result).toEqual({ reasoning: { enabled: false, exclude: true } })
})
@@ -292,9 +269,7 @@ describe('reasoning utils', () => {
const assistant: Assistant = {
id: 'test',
name: 'Test',
settings: {
reasoning_effort: 'none'
}
settings: {}
} as Assistant
const result = getReasoningEffort(assistant, model)

View File

@@ -16,8 +16,10 @@ import {
isGPT5SeriesModel,
isGPT51SeriesModel,
isGrok4FastReasoningModel,
isGrokReasoningModel,
isOpenAIDeepResearchModel,
isOpenAIModel,
isOpenAIReasoningModel,
isQwenAlwaysThinkModel,
isQwenReasoningModel,
isReasoningModel,
@@ -62,22 +64,30 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
}
const reasoningEffort = assistant?.settings?.reasoning_effort
// reasoningEffort is not set, no extra reasoning setting
// Generally, for every model which supports reasoning control, the reasoning effort won't be undefined.
// It's for some reasoning models that don't support reasoning control, such as deepseek reasoner.
if (!reasoningEffort) {
return {}
}
// Handle 'none' reasoningEffort. It's explicitly off.
if (reasoningEffort === 'none') {
// Handle undefined and 'none' reasoningEffort.
// TODO: They should be separated.
if (!reasoningEffort || reasoningEffort === 'none') {
// openrouter: use reasoning
if (model.provider === SystemProviderIds.openrouter) {
// Don't disable reasoning for Gemini models that support thinking tokens
if (isSupportedThinkingTokenGeminiModel(model) && !GEMINI_FLASH_MODEL_REGEX.test(model.id)) {
return {}
}
// 'none' is not an available value for effort for now.
// I think they should resolve this issue soon, so I'll just go ahead and use this value.
if (isGPT51SeriesModel(model) && reasoningEffort === 'none') {
return { reasoning: { effort: 'none' } }
}
// Don't disable reasoning for models that require it
if (
isGrokReasoningModel(model) ||
isOpenAIReasoningModel(model) ||
isQwenAlwaysThinkModel(model) ||
model.id.includes('seed-oss') ||
model.id.includes('minimax-m2')
) {
return {}
}
return { reasoning: { enabled: false, exclude: true } }
}
@@ -91,6 +101,11 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
return { enable_thinking: false }
}
// claude
if (isSupportedThinkingTokenClaudeModel(model)) {
return {}
}
// gemini
if (isSupportedThinkingTokenGeminiModel(model)) {
if (GEMINI_FLASH_MODEL_REGEX.test(model.id)) {
@@ -103,10 +118,8 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
}
}
}
} else {
logger.warn(`Model ${model.id} cannot disable reasoning. Fallback to empty reasoning param.`)
return {}
}
return {}
}
// use thinking, doubao, zhipu, etc.
@@ -126,7 +139,6 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
}
}
logger.warn(`Model ${model.id} doesn't match any disable reasoning behavior. Fallback to empty reasoning param.`)
return {}
}
@@ -281,7 +293,6 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
}
// OpenRouter models, use reasoning
// FIXME: duplicated openrouter handling. remove one
if (model.provider === SystemProviderIds.openrouter) {
if (isSupportedReasoningEffortModel(model) || isSupportedThinkingTokenModel(model)) {
return {

View File

@@ -215,10 +215,6 @@
border-top: none !important;
}
.ant-collapse-header-text {
overflow-x: hidden;
}
.ant-slider .ant-slider-handle::after {
box-shadow: 0 1px 4px 0px rgb(128 128 128 / 50%) !important;
}

View File

@@ -14,7 +14,7 @@ import { convertImageToPng } from '@renderer/utils/image'
import type { ImageProps as AntImageProps } from 'antd'
import { Dropdown, Image as AntImage, Space } from 'antd'
import { Base64 } from 'js-base64'
import { DownloadIcon, ImageIcon } from 'lucide-react'
import { DownloadIcon } from 'lucide-react'
import mime from 'mime'
import React from 'react'
import { useTranslation } from 'react-i18next'
@@ -73,9 +73,15 @@ const ImageViewer: React.FC<ImageViewerProps> = ({ src, style, ...props }) => {
const getContextMenuItems = (src: string, size: number = 14) => {
return [
{
key: 'copy-url',
key: 'copy-image',
label: t('common.copy'),
icon: <CopyIcon size={size} />,
onClick: () => handleCopyImage(src)
},
{
key: 'copy-url',
label: t('preview.copy.src'),
icon: <CopyIcon size={size} />,
onClick: () => {
navigator.clipboard.writeText(src)
window.toast.success(t('message.copy.success'))
@@ -86,12 +92,6 @@ const ImageViewer: React.FC<ImageViewerProps> = ({ src, style, ...props }) => {
label: t('common.download'),
icon: <DownloadIcon size={size} />,
onClick: () => download(src)
},
{
key: 'copy-image',
label: t('preview.copy.image'),
icon: <ImageIcon size={size} />,
onClick: () => handleCopyImage(src)
}
]
}

View File

@@ -460,19 +460,16 @@ export const isSupportedThinkingTokenZhipuModel = (model: Model): boolean => {
}
export const isDeepSeekHybridInferenceModel = (model: Model) => {
const { idResult, nameResult } = withModelIdAndNameAsId(model, (model) => {
const modelId = getLowerBaseModelName(model.id)
// deepseek官方使用chat和reasoner做推理控制其他provider需要单独判断id可能会有所差别
// openrouter: deepseek/deepseek-chat-v3.1 不知道会不会有其他provider仿照ds官方分出一个同id的作为非思考模式的模型这里有风险
// Matches: "deepseek-v3" followed by ".digit" or "-digit".
// Optionally, this can be followed by ".alphanumeric_sequence" or "-alphanumeric_sequence"
// until the end of the string.
// Examples: deepseek-v3.1, deepseek-v3-1, deepseek-v3.1.2, deepseek-v3.1-alpha
// Does NOT match: deepseek-v3.123 (missing separator after '1'), deepseek-v3.x (x isn't a digit)
// TODO: move to utils and add test cases
return /deepseek-v3(?:\.\d|-\d)(?:(\.|-)\w+)?$/.test(modelId) || modelId.includes('deepseek-chat-v3.1')
})
return idResult || nameResult
const modelId = getLowerBaseModelName(model.id)
// deepseek官方使用chat和reasoner做推理控制其他provider需要单独判断id可能会有所差别
// openrouter: deepseek/deepseek-chat-v3.1 不知道会不会有其他provider仿照ds官方分出一个同id的作为非思考模式的模型这里有风险
// Matches: "deepseek-v3" followed by ".digit" or "-digit".
// Optionally, this can be followed by ".alphanumeric_sequence" or "-alphanumeric_sequence"
// until the end of the string.
// Examples: deepseek-v3.1, deepseek-v3-1, deepseek-v3.1.2, deepseek-v3.1-alpha
// Does NOT match: deepseek-v3.123 (missing separator after '1'), deepseek-v3.x (x isn't a digit)
// TODO: move to utils and add test cases
return /deepseek-v3(?:\.\d|-\d)(?:(\.|-)\w+)?$/.test(modelId) || modelId.includes('deepseek-chat-v3.1')
}
export const isLingReasoningModel = (model?: Model): boolean => {
@@ -526,6 +523,7 @@ export function isReasoningModel(model?: Model): boolean {
REASONING_REGEX.test(model.name) ||
isSupportedThinkingTokenDoubaoModel(model) ||
isDeepSeekHybridInferenceModel(model) ||
isDeepSeekHybridInferenceModel({ ...model, id: model.name }) ||
false
)
}

View File

@@ -1,5 +1,5 @@
import { throttle } from 'lodash'
import { useEffect, useMemo, useRef } from 'react'
import { useEffect, useRef } from 'react'
import { useTimer } from './useTimer'
@@ -12,18 +12,13 @@ import { useTimer } from './useTimer'
*/
export default function useScrollPosition(key: string, throttleWait?: number) {
const containerRef = useRef<HTMLDivElement>(null)
const scrollKey = useMemo(() => `scroll:${key}`, [key])
const scrollKeyRef = useRef(scrollKey)
const scrollKey = `scroll:${key}`
const { setTimeoutTimer } = useTimer()
useEffect(() => {
scrollKeyRef.current = scrollKey
}, [scrollKey])
const handleScroll = throttle(() => {
const position = containerRef.current?.scrollTop ?? 0
window.requestAnimationFrame(() => {
window.keyv.set(scrollKeyRef.current, position)
window.keyv.set(scrollKey, position)
})
}, throttleWait ?? 100)
@@ -33,9 +28,5 @@ export default function useScrollPosition(key: string, throttleWait?: number) {
setTimeoutTimer('scrollEffect', scroll, 50)
}, [scrollKey, setTimeoutTimer])
useEffect(() => {
return () => handleScroll.cancel()
}, [handleScroll])
return { containerRef, handleScroll }
}

View File

@@ -1,4 +1,4 @@
import { useCallback, useEffect, useRef } from 'react'
import { useEffect, useRef } from 'react'
/**
* 定时器管理 Hook用于管理 setTimeout 和 setInterval 定时器,支持通过 key 来标识不同的定时器
@@ -43,38 +43,10 @@ export const useTimer = () => {
const timeoutMapRef = useRef(new Map<string, NodeJS.Timeout>())
const intervalMapRef = useRef(new Map<string, NodeJS.Timeout>())
/**
* 清除指定 key 的 setTimeout 定时器
* @param key - 定时器标识符
*/
const clearTimeoutTimer = useCallback((key: string) => {
clearTimeout(timeoutMapRef.current.get(key))
timeoutMapRef.current.delete(key)
}, [])
/**
* 清除指定 key 的 setInterval 定时器
* @param key - 定时器标识符
*/
const clearIntervalTimer = useCallback((key: string) => {
clearInterval(intervalMapRef.current.get(key))
intervalMapRef.current.delete(key)
}, [])
/**
* 清除所有定时器,包括 setTimeout 和 setInterval
*/
const clearAllTimers = useCallback(() => {
timeoutMapRef.current.forEach((timer) => clearTimeout(timer))
intervalMapRef.current.forEach((timer) => clearInterval(timer))
timeoutMapRef.current.clear()
intervalMapRef.current.clear()
}, [])
// 组件卸载时自动清理所有定时器
useEffect(() => {
return () => clearAllTimers()
}, [clearAllTimers])
}, [])
/**
* 设置一个 setTimeout 定时器
@@ -93,15 +65,12 @@ export const useTimer = () => {
* cleanup();
* ```
*/
const setTimeoutTimer = useCallback(
(key: string, ...args: Parameters<typeof setTimeout>) => {
clearTimeout(timeoutMapRef.current.get(key))
const timer = setTimeout(...args)
timeoutMapRef.current.set(key, timer)
return () => clearTimeoutTimer(key)
},
[clearTimeoutTimer]
)
const setTimeoutTimer = (key: string, ...args: Parameters<typeof setTimeout>) => {
clearTimeout(timeoutMapRef.current.get(key))
const timer = setTimeout(...args)
timeoutMapRef.current.set(key, timer)
return () => clearTimeoutTimer(key)
}
/**
* 设置一个 setInterval 定时器
@@ -120,31 +89,56 @@ export const useTimer = () => {
* cleanup();
* ```
*/
const setIntervalTimer = useCallback(
(key: string, ...args: Parameters<typeof setInterval>) => {
clearInterval(intervalMapRef.current.get(key))
const timer = setInterval(...args)
intervalMapRef.current.set(key, timer)
return () => clearIntervalTimer(key)
},
[clearIntervalTimer]
)
const setIntervalTimer = (key: string, ...args: Parameters<typeof setInterval>) => {
clearInterval(intervalMapRef.current.get(key))
const timer = setInterval(...args)
intervalMapRef.current.set(key, timer)
return () => clearIntervalTimer(key)
}
/**
* 清除指定 key 的 setTimeout 定时器
* @param key - 定时器标识符
*/
const clearTimeoutTimer = (key: string) => {
clearTimeout(timeoutMapRef.current.get(key))
timeoutMapRef.current.delete(key)
}
/**
* 清除指定 key 的 setInterval 定时器
* @param key - 定时器标识符
*/
const clearIntervalTimer = (key: string) => {
clearInterval(intervalMapRef.current.get(key))
intervalMapRef.current.delete(key)
}
/**
* 清除所有 setTimeout 定时器
*/
const clearAllTimeoutTimers = useCallback(() => {
const clearAllTimeoutTimers = () => {
timeoutMapRef.current.forEach((timer) => clearTimeout(timer))
timeoutMapRef.current.clear()
}, [])
}
/**
* 清除所有 setInterval 定时器
*/
const clearAllIntervalTimers = useCallback(() => {
const clearAllIntervalTimers = () => {
intervalMapRef.current.forEach((timer) => clearInterval(timer))
intervalMapRef.current.clear()
}, [])
}
/**
* 清除所有定时器,包括 setTimeout 和 setInterval
*/
const clearAllTimers = () => {
timeoutMapRef.current.forEach((timer) => clearTimeout(timer))
intervalMapRef.current.forEach((timer) => clearInterval(timer))
timeoutMapRef.current.clear()
intervalMapRef.current.clear()
}
return {
setTimeoutTimer,

View File

@@ -280,7 +280,6 @@
"denied": "Tool request was denied.",
"timeout": "Tool request timed out before receiving approval."
},
"toolPendingFallback": "Tool",
"waiting": "Waiting for tool permission decision..."
},
"type": {
@@ -1209,7 +1208,7 @@
"endpoint_type": {
"anthropic": "Anthropic",
"gemini": "Gemini",
"image-generation": "Image Generation (OpenAI)",
"image-generation": "Image Generation",
"jina-rerank": "Jina Rerank",
"openai": "OpenAI",
"openai-response": "OpenAI-Response"
@@ -2510,7 +2509,8 @@
},
"preview": {
"copy": {
"image": "Copy as image"
"image": "Copy as image",
"src": "Copy Image Source"
},
"dialog": "Open Dialog",
"label": "Preview",
@@ -4372,7 +4372,7 @@
"url": {
"preview": "Preview: {{url}}",
"reset": "Reset",
"tip": "Add # at the end to disable the automatically appended API version."
"tip": "ending with # forces use of input address"
}
},
"api_host": "API Host",

View File

@@ -280,7 +280,6 @@
"denied": "工具请求已被拒绝。",
"timeout": "工具请求在收到批准前超时。"
},
"toolPendingFallback": "工具",
"waiting": "等待工具权限决定..."
},
"type": {
@@ -1209,7 +1208,7 @@
"endpoint_type": {
"anthropic": "Anthropic",
"gemini": "Gemini",
"image-generation": "图生成 (OpenAI)",
"image-generation": "图生成",
"jina-rerank": "Jina 重排序",
"openai": "OpenAI",
"openai-response": "OpenAI-Response"
@@ -2510,7 +2509,8 @@
},
"preview": {
"copy": {
"image": "复制为图片"
"image": "复制为图片",
"src": "复制图片源"
},
"dialog": "打开预览窗口",
"label": "预览",
@@ -4372,7 +4372,7 @@
"url": {
"preview": "预览: {{url}}",
"reset": "重置",
"tip": "在末尾添加 # 以禁用自动附加的API版本。"
"tip": "# 结尾强制使用输入地址"
}
},
"api_host": "API 地址",

View File

@@ -280,7 +280,6 @@
"denied": "工具請求已被拒絕。",
"timeout": "工具請求在收到核准前逾時。"
},
"toolPendingFallback": "工具",
"waiting": "等待工具權限決定..."
},
"type": {
@@ -1209,7 +1208,7 @@
"endpoint_type": {
"anthropic": "Anthropic",
"gemini": "Gemini",
"image-generation": "圖生成 (OpenAI)",
"image-generation": "圖生成",
"jina-rerank": "Jina Rerank",
"openai": "OpenAI",
"openai-response": "OpenAI-Response"
@@ -2510,7 +2509,8 @@
},
"preview": {
"copy": {
"image": "複製為圖片"
"image": "複製為圖片",
"src": "複製圖片來源"
},
"dialog": "開啟預覽窗口",
"label": "預覽",
@@ -4372,7 +4372,7 @@
"url": {
"preview": "預覽:{{url}}",
"reset": "重設",
"tip": "在末尾添加 # 以停用自動附加的 API 版本。"
"tip": "# 結尾強制使用輸入位址"
}
},
"api_host": "API 主機地址",

View File

@@ -280,7 +280,6 @@
"denied": "Tool-Anfrage wurde abgelehnt.",
"timeout": "Tool-Anfrage ist abgelaufen, bevor eine Genehmigung eingegangen ist."
},
"toolPendingFallback": "Werkzeug",
"waiting": "Warten auf Entscheidung über Tool-Berechtigung..."
},
"type": {
@@ -1209,7 +1208,7 @@
"endpoint_type": {
"anthropic": "Anthropic",
"gemini": "Gemini",
"image-generation": "Bilderzeugung (OpenAI)",
"image-generation": "Bildgenerierung",
"jina-rerank": "Jina Reranking",
"openai": "OpenAI",
"openai-response": "OpenAI-Response"
@@ -2510,7 +2509,8 @@
},
"preview": {
"copy": {
"image": "Als Bild kopieren"
"image": "Als Bild kopieren",
"src": "Bildquelle kopieren"
},
"dialog": "Vorschaufenster öffnen",
"label": "Vorschau",
@@ -4372,7 +4372,7 @@
"url": {
"preview": "Vorschau: {{url}}",
"reset": "Zurücksetzen",
"tip": "Fügen Sie am Ende ein # hinzu, um die automatisch angehängte API-Version zu deaktivieren."
"tip": "# am Ende erzwingt die Verwendung der Eingabe-Adresse"
}
},
"api_host": "API-Adresse",

View File

@@ -280,7 +280,6 @@
"denied": "Το αίτημα για εργαλείο απορρίφθηκε.",
"timeout": "Το αίτημα για το εργαλείο έληξε πριν λάβει έγκριση."
},
"toolPendingFallback": "Εργαλείο",
"waiting": "Αναμονή για απόφαση άδειας εργαλείου..."
},
"type": {
@@ -1209,7 +1208,7 @@
"endpoint_type": {
"anthropic": "Anthropic",
"gemini": "Gemini",
"image-generation": "Δημιουργία Εικόνων (OpenAI)",
"image-generation": "Δημιουργία Εικόνας",
"jina-rerank": "Επαναταξινόμηση Jina",
"openai": "OpenAI",
"openai-response": "Απάντηση OpenAI"
@@ -2510,7 +2509,8 @@
},
"preview": {
"copy": {
"image": "Αντιγραφή ως εικόνα"
"image": "Αντιγραφή ως εικόνα",
"src": "Αντιγραφή πηγής εικόνας"
},
"dialog": "Άνοιγμα παραθύρου προεπισκόπησης",
"label": "Προεπισκόπηση",
@@ -4372,7 +4372,7 @@
"url": {
"preview": "Προεπισκόπηση: {{url}}",
"reset": "Επαναφορά",
"tip": "Προσθέστε το σύμβολο # στο τέλος για να απενεργοποιήσετε την αυτόματα προστιθέμενη έκδοση API."
"tip": "#τέλος ενδεχόμενη χρήση της εισαγωγής διευθύνσεως"
}
},
"api_host": "Διεύθυνση API",

View File

@@ -280,7 +280,6 @@
"denied": "La solicitud de herramienta fue denegada.",
"timeout": "La solicitud de herramienta expiró antes de recibir la aprobación."
},
"toolPendingFallback": "Herramienta",
"waiting": "Esperando la decisión de permiso de la herramienta..."
},
"type": {
@@ -1209,7 +1208,7 @@
"endpoint_type": {
"anthropic": "Anthropic",
"gemini": "Gemini",
"image-generation": "Generación de Imágenes (OpenAI)",
"image-generation": "Generación de imágenes",
"jina-rerank": "Reordenamiento Jina",
"openai": "OpenAI",
"openai-response": "Respuesta de OpenAI"
@@ -2510,7 +2509,8 @@
},
"preview": {
"copy": {
"image": "Copiar como imagen"
"image": "Copiar como imagen",
"src": "Copia la fuente de la imagen"
},
"dialog": "Abrir la ventana de vista previa",
"label": "Vista previa",
@@ -4372,7 +4372,7 @@
"url": {
"preview": "Vista previa: {{url}}",
"reset": "Restablecer",
"tip": "Añada # al final para deshabilitar la versión de la API que se añade automáticamente."
"tip": "forzar uso de dirección de entrada con # al final"
}
},
"api_host": "Dirección API",

View File

@@ -280,7 +280,6 @@
"denied": "La demande d'outil a été refusée.",
"timeout": "La demande d'outil a expiré avant d'obtenir l'approbation."
},
"toolPendingFallback": "Outil",
"waiting": "En attente de la décision d'autorisation de l'outil..."
},
"type": {
@@ -1209,7 +1208,7 @@
"endpoint_type": {
"anthropic": "Anthropic",
"gemini": "Gemini",
"image-generation": "Génération d'images (OpenAI)",
"image-generation": "Génération d'images",
"jina-rerank": "Reclassement Jina",
"openai": "OpenAI",
"openai-response": "Réponse OpenAI"
@@ -2510,7 +2509,8 @@
},
"preview": {
"copy": {
"image": "Copier en tant qu'image"
"image": "Copier en tant qu'image",
"src": "Copier la source de l'image"
},
"dialog": "Ouvrir la fenêtre d'aperçu",
"label": "Aperçu",
@@ -4372,7 +4372,7 @@
"url": {
"preview": "Aperçu : {{url}}",
"reset": "Réinitialiser",
"tip": "Ajoutez # à la fin pour désactiver la version d'API ajoutée automatiquement."
"tip": "forcer l'utilisation de l'adresse d'entrée si terminé par #"
}
},
"api_host": "Adresse API",

View File

@@ -280,7 +280,6 @@
"denied": "ツールリクエストは拒否されました。",
"timeout": "ツールリクエストは承認を受ける前にタイムアウトしました。"
},
"toolPendingFallback": "ツール",
"waiting": "ツールの許可決定を待っています..."
},
"type": {
@@ -1209,7 +1208,7 @@
"endpoint_type": {
"anthropic": "Anthropic",
"gemini": "Gemini",
"image-generation": "画像生成 (OpenAI)",
"image-generation": "画像生成",
"jina-rerank": "Jina Rerank",
"openai": "OpenAI",
"openai-response": "OpenAI-Response"
@@ -2510,7 +2509,8 @@
},
"preview": {
"copy": {
"image": "画像としてコピー"
"image": "画像としてコピー",
"src": "画像ソースをコピー"
},
"dialog": "ダイアログを開く",
"label": "プレビュー",
@@ -4372,7 +4372,7 @@
"url": {
"preview": "プレビュー: {{url}}",
"reset": "リセット",
"tip": "自動的に付加されるAPIバージョンを無効にするには、末尾に#を追加します"
"tip": "#で終わる場合、入力されたアドレスを強制的に使用します"
}
},
"api_host": "APIホスト",

View File

@@ -16,7 +16,7 @@
"error": {
"failed": "Falha ao excluir o agente"
},
"title": "Excluir Agente"
"title": "删除代理"
},
"edit": {
"title": "Agent Editor"
@@ -111,7 +111,7 @@
"label": "Modo de permissão",
"options": {
"acceptEdits": "Aceitar edições automaticamente",
"bypassPermissions": "Ignorar verificações de permissão",
"bypassPermissions": "忽略检查 de permissão",
"default": "Padrão (perguntar antes de continuar)",
"plan": "Modo de planejamento (plano sujeito a aprovação)"
},
@@ -150,7 +150,7 @@
},
"success": {
"install": "Plugin instalado com sucesso",
"uninstall": "Plugin desinstalado com sucesso"
"uninstall": "插件 desinstalado com sucesso"
},
"tab": "plug-in",
"type": {
@@ -280,7 +280,6 @@
"denied": "Solicitação de ferramenta foi negada.",
"timeout": "A solicitação da ferramenta expirou antes de receber aprovação."
},
"toolPendingFallback": "Ferramenta",
"waiting": "Aguardando decisão de permissão da ferramenta..."
},
"type": {
@@ -1135,7 +1134,7 @@
"duplicate": "Duplicar",
"edit": "Editar",
"enabled": "Ativado",
"error": "Erro",
"error": "错误",
"errors": {
"create_message": "Falha ao criar mensagem",
"validation": "Falha na verificação"
@@ -1209,7 +1208,7 @@
"endpoint_type": {
"anthropic": "Anthropic",
"gemini": "Gemini",
"image-generation": "Geração de Imagens (OpenAI)",
"image-generation": "Geração de Imagem",
"jina-rerank": "Jina Reordenar",
"openai": "OpenAI",
"openai-response": "Resposta OpenAI"
@@ -2510,7 +2509,8 @@
},
"preview": {
"copy": {
"image": "Copiar como imagem"
"image": "Copiar como imagem",
"src": "Copiar Origem da Imagem"
},
"dialog": "Abrir janela de pré-visualização",
"label": "Pré-visualização",
@@ -4372,7 +4372,7 @@
"url": {
"preview": "Pré-visualização: {{url}}",
"reset": "Redefinir",
"tip": "Adicione # no final para desativar a versão da API adicionada automaticamente."
"tip": "e forçar o uso do endereço original quando terminar com '#'"
}
},
"api_host": "Endereço API",

View File

@@ -280,7 +280,6 @@
"denied": "Запрос на инструмент был отклонён.",
"timeout": "Запрос на инструмент превысил время ожидания до получения подтверждения."
},
"toolPendingFallback": "Инструмент",
"waiting": "Ожидание решения о разрешении на использование инструмента..."
},
"type": {
@@ -1209,7 +1208,7 @@
"endpoint_type": {
"anthropic": "Anthropic",
"gemini": "Gemini",
"image-generation": "Генерация изображений (OpenAI)",
"image-generation": "Изображение",
"jina-rerank": "Jina Rerank",
"openai": "OpenAI",
"openai-response": "OpenAI-Response"
@@ -2510,7 +2509,8 @@
},
"preview": {
"copy": {
"image": "Скопировать как изображение"
"image": "Скопировать как изображение",
"src": "Копировать источник изображения"
},
"dialog": "Открыть диалог",
"label": "Предварительный просмотр",
@@ -4372,7 +4372,7 @@
"url": {
"preview": "Предпросмотр: {{url}}",
"reset": "Сброс",
"tip": "Добавьте # в конце, чтобы отключить автоматически добавляемую версию API."
"tip": "заканчивая на # принудительно использует введенный адрес"
}
},
"api_host": "Хост API",

View File

@@ -9,7 +9,6 @@ import { getModel } from '@renderer/hooks/useModel'
import { useSettings } from '@renderer/hooks/useSettings'
import { useTextareaResize } from '@renderer/hooks/useTextareaResize'
import { useTimer } from '@renderer/hooks/useTimer'
import { CacheService } from '@renderer/services/CacheService'
import { pauseTrace } from '@renderer/services/SpanManagerService'
import { estimateUserPromptUsage } from '@renderer/services/TokenService'
import { useAppDispatch, useAppSelector } from '@renderer/store'
@@ -42,10 +41,19 @@ import { getInputbarConfig } from './registry'
import { TopicType } from './types'
const logger = loggerService.withContext('AgentSessionInputbar')
const agentSessionDraftCache = new Map<string, string>()
const DRAFT_CACHE_TTL = 24 * 60 * 60 * 1000 // 24 hours
const readDraftFromCache = (key: string): string => {
return agentSessionDraftCache.get(key) ?? ''
}
const getAgentDraftCacheKey = (agentId: string) => `agent-session-draft-${agentId}`
const writeDraftToCache = (key: string, value: string) => {
if (!value) {
agentSessionDraftCache.delete(key)
} else {
agentSessionDraftCache.set(key, value)
}
}
type Props = {
agentId: string
@@ -162,15 +170,16 @@ const AgentSessionInputbarInner: FC<InnerProps> = ({ assistant, agentId, session
const scope = TopicType.Session
const config = getInputbarConfig(scope)
// Use shared hooks for text and textarea management with draft persistence
const draftCacheKey = getAgentDraftCacheKey(agentId)
// Use shared hooks for text and textarea management
const initialDraft = useMemo(() => readDraftFromCache(agentId), [agentId])
const persistDraft = useCallback((next: string) => writeDraftToCache(agentId, next), [agentId])
const {
text,
setText,
isEmpty: inputEmpty
} = useInputText({
initialValue: CacheService.get<string>(draftCacheKey) ?? '',
onChange: (value) => CacheService.set(draftCacheKey, value, DRAFT_CACHE_TTL)
initialValue: initialDraft,
onChange: persistDraft
})
const {
textareaRef,
@@ -422,7 +431,6 @@ const AgentSessionInputbarInner: FC<InnerProps> = ({ assistant, agentId, session
})
)
// Clear text after successful send (draft is cleared automatically via onChange)
setText('')
setTimeoutTimer('agentSession_sendMessage', () => setText(''), 500)
} catch (error) {

View File

@@ -14,6 +14,7 @@ import { useInputText } from '@renderer/hooks/useInputText'
import { useMessageOperations, useTopicLoading } from '@renderer/hooks/useMessageOperations'
import { useSettings } from '@renderer/hooks/useSettings'
import { useShortcut } from '@renderer/hooks/useShortcuts'
import { useSidebarIconShow } from '@renderer/hooks/useSidebarIcon'
import { useTextareaResize } from '@renderer/hooks/useTextareaResize'
import { useTimer } from '@renderer/hooks/useTimer'
import {
@@ -23,7 +24,6 @@ import {
useInputbarToolsState
} from '@renderer/pages/home/Inputbar/context/InputbarToolsProvider'
import { getDefaultTopic } from '@renderer/services/AssistantService'
import { CacheService } from '@renderer/services/CacheService'
import { EVENT_NAMES, EventEmitter } from '@renderer/services/EventService'
import FileManager from '@renderer/services/FileManager'
import { checkRateLimit, getUserMessage } from '@renderer/services/MessagesService'
@@ -39,7 +39,7 @@ import { getSendMessageShortcutLabel } from '@renderer/utils/input'
import { documentExts, imageExts, textExts } from '@shared/config/constant'
import { debounce } from 'lodash'
import type { FC } from 'react'
import React, { useCallback, useEffect, useEffectEvent, useMemo, useRef, useState } from 'react'
import React, { useCallback, useEffect, useMemo, useRef, useState } from 'react'
import { useTranslation } from 'react-i18next'
import { InputbarCore } from './components/InputbarCore'
@@ -51,17 +51,6 @@ import TokenCount from './TokenCount'
const logger = loggerService.withContext('Inputbar')
const INPUTBAR_DRAFT_CACHE_KEY = 'inputbar-draft'
const DRAFT_CACHE_TTL = 24 * 60 * 60 * 1000 // 24 hours
const getMentionedModelsCacheKey = (assistantId: string) => `inputbar-mentioned-models-${assistantId}`
const getValidatedCachedModels = (assistantId: string): Model[] => {
const cached = CacheService.get<Model[]>(getMentionedModelsCacheKey(assistantId))
if (!Array.isArray(cached)) return []
return cached.filter((model) => model?.id && model?.name)
}
interface Props {
assistant: Assistant
setActiveTopic: (topic: Topic) => void
@@ -91,18 +80,16 @@ const Inputbar: FC<Props> = ({ assistant: initialAssistant, setActiveTopic, topi
toggleExpanded: () => {}
})
const [initialMentionedModels] = useState(() => getValidatedCachedModels(initialAssistant.id))
const initialState = useMemo(
() => ({
files: [] as FileType[],
mentionedModels: initialMentionedModels,
mentionedModels: [] as Model[],
selectedKnowledgeBases: initialAssistant.knowledge_bases ?? [],
isExpanded: false,
couldAddImageFile: false,
extensions: [] as string[]
}),
[initialMentionedModels, initialAssistant.knowledge_bases]
[initialAssistant.knowledge_bases]
)
return (
@@ -134,10 +121,7 @@ const InputbarInner: FC<InputbarInnerProps> = ({ assistant: initialAssistant, se
const { setFiles, setMentionedModels, setSelectedKnowledgeBases } = useInputbarToolsDispatch()
const { setCouldAddImageFile } = useInputbarToolsInternalDispatch()
const { text, setText } = useInputText({
initialValue: CacheService.get<string>(INPUTBAR_DRAFT_CACHE_KEY) ?? '',
onChange: (value) => CacheService.set(INPUTBAR_DRAFT_CACHE_KEY, value, DRAFT_CACHE_TTL)
})
const { text, setText } = useInputText()
const {
textareaRef,
resize: resizeTextArea,
@@ -149,6 +133,7 @@ const InputbarInner: FC<InputbarInnerProps> = ({ assistant: initialAssistant, se
minHeight: 30
})
const showKnowledgeIcon = useSidebarIconShow('knowledge')
const { assistant, addTopic, model, setModel, updateAssistant } = useAssistant(initialAssistant.id)
const { sendMessageShortcut, showInputEstimatedTokens, enableQuickPanelTriggers } = useSettings()
const [estimateTokenCount, setEstimateTokenCount] = useState(0)
@@ -205,15 +190,6 @@ const InputbarInner: FC<InputbarInnerProps> = ({ assistant: initialAssistant, se
setCouldAddImageFile(canAddImageFile)
}, [canAddImageFile, setCouldAddImageFile])
const onUnmount = useEffectEvent((id: string) => {
CacheService.set(getMentionedModelsCacheKey(id), mentionedModels, DRAFT_CACHE_TTL)
})
useEffect(() => {
return () => onUnmount(assistant.id)
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [assistant.id])
const placeholderText = enableQuickPanelTriggers
? t('chat.input.placeholder', { key: getSendMessageShortcutLabel(sendMessageShortcut) })
: t('chat.input.placeholder_without_triggers', {
@@ -405,10 +381,9 @@ const InputbarInner: FC<InputbarInnerProps> = ({ assistant: initialAssistant, se
focusTextarea
])
// TODO: Just use assistant.knowledge_bases as selectedKnowledgeBases. context state is overdesigned.
useEffect(() => {
setSelectedKnowledgeBases(assistant.knowledge_bases ?? [])
}, [assistant.knowledge_bases, setSelectedKnowledgeBases])
setSelectedKnowledgeBases(showKnowledgeIcon ? (assistant.knowledge_bases ?? []) : [])
}, [assistant.knowledge_bases, setSelectedKnowledgeBases, showKnowledgeIcon])
useEffect(() => {
// Disable web search if model doesn't support it

View File

@@ -156,8 +156,11 @@ export const InputbarCore: FC<InputbarCoreProps> = ({
const setText = useCallback<React.Dispatch<React.SetStateAction<string>>>(
(value) => {
const newText = typeof value === 'function' ? value(textRef.current) : value
onTextChange(newText)
if (typeof value === 'function') {
onTextChange(value(textRef.current))
} else {
onTextChange(value)
}
},
[onTextChange]
)

View File

@@ -1,4 +1,5 @@
import { useAssistant } from '@renderer/hooks/useAssistant'
import { useSidebarIconShow } from '@renderer/hooks/useSidebarIcon'
import { defineTool, registerTool, TopicType } from '@renderer/pages/home/Inputbar/types'
import type { KnowledgeBase } from '@renderer/types'
import { isPromptToolUse, isSupportedToolUse } from '@renderer/utils/mcp-tools'
@@ -29,6 +30,7 @@ const knowledgeBaseTool = defineTool({
render: function KnowledgeBaseToolRender(context) {
const { assistant, state, actions, quickPanel } = context
const knowledgeSidebarEnabled = useSidebarIconShow('knowledge')
const { updateAssistant } = useAssistant(assistant.id)
const handleSelect = useCallback(
@@ -39,6 +41,10 @@ const knowledgeBaseTool = defineTool({
[updateAssistant, actions]
)
if (!knowledgeSidebarEnabled) {
return null
}
return (
<KnowledgeBaseButton
quickPanel={quickPanel}

View File

@@ -102,12 +102,10 @@ const ThinkingBlock: React.FC<Props> = ({ block }) => {
)
}
const normalizeThinkingTime = (value?: number) => (typeof value === 'number' && Number.isFinite(value) ? value : 0)
const ThinkingTimeSeconds = memo(
({ blockThinkingTime, isThinking }: { blockThinkingTime: number; isThinking: boolean }) => {
const { t } = useTranslation()
const [displayTime, setDisplayTime] = useState(normalizeThinkingTime(blockThinkingTime))
const [displayTime, setDisplayTime] = useState(blockThinkingTime)
const timer = useRef<NodeJS.Timeout | null>(null)
@@ -123,7 +121,7 @@ const ThinkingTimeSeconds = memo(
clearInterval(timer.current)
timer.current = null
}
setDisplayTime(normalizeThinkingTime(blockThinkingTime))
setDisplayTime(blockThinkingTime)
}
return () => {
@@ -134,10 +132,10 @@ const ThinkingTimeSeconds = memo(
}
}, [isThinking, blockThinkingTime])
const thinkingTimeSeconds = useMemo(() => {
const safeTime = normalizeThinkingTime(displayTime)
return ((safeTime < 1000 ? 100 : safeTime) / 1000).toFixed(1)
}, [displayTime])
const thinkingTimeSeconds = useMemo(
() => ((displayTime < 1000 ? 100 : displayTime) / 1000).toFixed(1),
[displayTime]
)
return isThinking
? t('chat.thinking', {

View File

@@ -255,20 +255,6 @@ describe('ThinkingBlock', () => {
unmount()
})
})
it('should clamp invalid thinking times to a safe default', () => {
const testCases = [undefined, Number.NaN, Number.POSITIVE_INFINITY]
testCases.forEach((thinking_millsec) => {
const block = createThinkingBlock({
thinking_millsec: thinking_millsec as any,
status: MessageBlockStatus.SUCCESS
})
const { unmount } = renderThinkingBlock(block)
expect(getThinkingTimeText()).toHaveTextContent('0.1s')
unmount()
})
})
})
describe('collapse behavior', () => {

View File

@@ -10,7 +10,6 @@ import { useSettings } from '@renderer/hooks/useSettings'
import { useTimer } from '@renderer/hooks/useTimer'
import type { RootState } from '@renderer/store'
// import { selectCurrentTopicId } from '@renderer/store/newMessage'
import { scrollIntoView } from '@renderer/utils/dom'
import { Button, Drawer, Tooltip } from 'antd'
import type { FC } from 'react'
import { useCallback, useEffect, useRef, useState } from 'react'
@@ -119,8 +118,7 @@ const ChatNavigation: FC<ChatNavigationProps> = ({ containerId }) => {
}
const scrollToMessage = (element: HTMLElement) => {
// Use container: 'nearest' to keep scroll within the chat pane (Chromium-only, see #11565, #11567)
scrollIntoView(element, { behavior: 'smooth', block: 'start', container: 'nearest' })
element.scrollIntoView({ behavior: 'smooth', block: 'start' })
}
const scrollToTop = () => {

View File

@@ -15,7 +15,6 @@ import { estimateMessageUsage } from '@renderer/services/TokenService'
import type { Assistant, Topic } from '@renderer/types'
import type { Message, MessageBlock } from '@renderer/types/newMessage'
import { classNames, cn } from '@renderer/utils'
import { scrollIntoView } from '@renderer/utils/dom'
import { isMessageProcessing } from '@renderer/utils/messageUtils/is'
import { Divider } from 'antd'
import type { Dispatch, FC, SetStateAction } from 'react'
@@ -80,10 +79,9 @@ const MessageItem: FC<Props> = ({
useEffect(() => {
if (isEditing && messageContainerRef.current) {
scrollIntoView(messageContainerRef.current, {
messageContainerRef.current.scrollIntoView({
behavior: 'smooth',
block: 'center',
container: 'nearest'
block: 'center'
})
}
}, [isEditing])
@@ -126,7 +124,7 @@ const MessageItem: FC<Props> = ({
const messageHighlightHandler = useCallback(
(highlight: boolean = true) => {
if (messageContainerRef.current) {
scrollIntoView(messageContainerRef.current, { behavior: 'smooth', block: 'center', container: 'nearest' })
messageContainerRef.current.scrollIntoView({ behavior: 'smooth' })
if (highlight) {
setTimeoutTimer(
'messageHighlightHandler',

View File

@@ -12,7 +12,6 @@ import { newMessagesActions } from '@renderer/store/newMessage'
// import { updateMessageThunk } from '@renderer/store/thunk/messageThunk'
import type { Message } from '@renderer/types/newMessage'
import { isEmoji, removeLeadingEmoji } from '@renderer/utils'
import { scrollIntoView } from '@renderer/utils/dom'
import { getMainTextContent } from '@renderer/utils/messageUtils/find'
import { Avatar } from 'antd'
import { CircleChevronDown } from 'lucide-react'
@@ -120,7 +119,7 @@ const MessageAnchorLine: FC<MessageLineProps> = ({ messages }) => {
() => {
const messageElement = document.getElementById(`message-${message.id}`)
if (messageElement) {
scrollIntoView(messageElement, { behavior: 'auto', block: 'start', container: 'nearest' })
messageElement.scrollIntoView({ behavior: 'auto', block: 'start' })
}
},
100
@@ -142,7 +141,7 @@ const MessageAnchorLine: FC<MessageLineProps> = ({ messages }) => {
return
}
scrollIntoView(messageElement, { behavior: 'smooth', block: 'start', container: 'nearest' })
messageElement.scrollIntoView({ behavior: 'smooth', block: 'start' })
},
[setSelectedMessage]
)

View File

@@ -10,7 +10,6 @@ import type { MultiModelMessageStyle } from '@renderer/store/settings'
import type { Topic } from '@renderer/types'
import type { Message } from '@renderer/types/newMessage'
import { classNames } from '@renderer/utils'
import { scrollIntoView } from '@renderer/utils/dom'
import { Popover } from 'antd'
import type { ComponentProps } from 'react'
import { memo, useCallback, useEffect, useMemo, useState } from 'react'
@@ -74,7 +73,7 @@ const MessageGroup = ({ messages, topic, registerMessageElement }: Props) => {
() => {
const messageElement = document.getElementById(`message-${message.id}`)
if (messageElement) {
scrollIntoView(messageElement, { behavior: 'smooth', block: 'start', container: 'nearest' })
messageElement.scrollIntoView({ behavior: 'smooth', block: 'start' })
}
},
200
@@ -133,7 +132,7 @@ const MessageGroup = ({ messages, topic, registerMessageElement }: Props) => {
setSelectedMessage(message)
} else {
// 直接滚动
scrollIntoView(element, { behavior: 'smooth', block: 'start', container: 'nearest' })
element.scrollIntoView({ behavior: 'smooth', block: 'start' })
}
}
}

View File

@@ -3,7 +3,6 @@ import type { RootState } from '@renderer/store'
import { messageBlocksSelectors } from '@renderer/store/messageBlock'
import type { Message } from '@renderer/types/newMessage'
import { MessageBlockType } from '@renderer/types/newMessage'
import { scrollIntoView } from '@renderer/utils/dom'
import type { FC } from 'react'
import React, { useMemo, useRef } from 'react'
import { useSelector } from 'react-redux'
@@ -73,10 +72,10 @@ const MessageOutline: FC<MessageOutlineProps> = ({ message }) => {
const parent = messageOutlineContainerRef.current?.parentElement
const messageContentContainer = parent?.querySelector('.message-content-container')
if (messageContentContainer) {
const headingElement = messageContentContainer.querySelector<HTMLElement>(`#${id}`)
const headingElement = messageContentContainer.querySelector(`#${id}`)
if (headingElement) {
const scrollBlock = ['horizontal', 'grid'].includes(message.multiModelMessageStyle ?? '') ? 'nearest' : 'start'
scrollIntoView(headingElement, { behavior: 'smooth', block: scrollBlock, container: 'nearest' })
headingElement.scrollIntoView({ behavior: 'smooth', block: scrollBlock })
}
}
}

View File

@@ -5,6 +5,8 @@ import { Terminal } from 'lucide-react'
import { ToolTitle } from './GenericTools'
import type { BashToolInput as BashToolInputType, BashToolOutput as BashToolOutputType } from './types'
const MAX_TAG_LENGTH = 100
export function BashTool({
input,
output
@@ -15,10 +17,12 @@ export function BashTool({
// 如果有输出,计算输出行数
const outputLines = output ? output.split('\n').length : 0
// 处理命令字符串,添加空值检查
// 处理命令字符串的截断,添加空值检查
const command = input?.command ?? ''
const needsTruncate = command.length > MAX_TAG_LENGTH
const displayCommand = needsTruncate ? `${command.slice(0, MAX_TAG_LENGTH)}...` : command
const tagContent = <Tag className="!m-0 max-w-full truncate font-mono">{command}</Tag>
const tagContent = <Tag className="whitespace-pre-wrap break-all font-mono">{displayCommand}</Tag>
return {
key: 'tool',
@@ -30,12 +34,16 @@ export function BashTool({
params={input?.description}
stats={output ? `${outputLines} ${outputLines === 1 ? 'line' : 'lines'}` : undefined}
/>
<div className="mt-1 max-w-full">
<Popover
content={<div className="max-w-xl whitespace-pre-wrap break-all font-mono text-xs">{command}</div>}
trigger="hover">
{tagContent}
</Popover>
<div className="mt-1">
{needsTruncate ? (
<Popover
content={<div className="max-w-xl whitespace-pre-wrap break-all font-mono">{command}</div>}
trigger="hover">
{tagContent}
</Popover>
) : (
tagContent
)}
</div>
</>
),

View File

@@ -18,9 +18,9 @@ export function ToolTitle({
}) {
return (
<div className={`flex items-center gap-1 ${className}`}>
{icon && <span className="flex flex-shrink-0">{icon}</span>}
{label && <span className="flex-shrink-0 font-medium text-sm">{label}</span>}
{params && <span className="min-w-0 truncate text-muted-foreground text-xs">{params}</span>}
{icon}
{label && <span className="font-medium text-sm">{label}</span>}
{params && <span className="flex-shrink-0 text-muted-foreground text-xs">{params}</span>}
{stats && <span className="flex-shrink-0 text-muted-foreground text-xs">{stats}</span>}
</div>
)

View File

@@ -1,10 +1,7 @@
import { loggerService } from '@logger'
import { useAppSelector } from '@renderer/store'
import { selectPendingPermission } from '@renderer/store/toolPermissions'
import type { NormalToolResponse } from '@renderer/types'
import type { CollapseProps } from 'antd'
import { Collapse, Spin } from 'antd'
import { useTranslation } from 'react-i18next'
import { Collapse } from 'antd'
// 导出所有类型
export * from './types'
@@ -86,41 +83,17 @@ function ToolContent({ toolName, input, output }: { toolName: AgentToolsType; in
// 统一的组件渲染入口
export function MessageAgentTools({ toolResponse }: { toolResponse: NormalToolResponse }) {
const { arguments: args, response, tool, status } = toolResponse
logger.debug('Rendering agent tool response', {
logger.info('Rendering agent tool response', {
tool: tool,
arguments: args,
status,
response
})
const pendingPermission = useAppSelector((state) =>
selectPendingPermission(state.toolPermissions, toolResponse.toolCallId)
)
if (status === 'pending') {
if (pendingPermission) {
return <ToolPermissionRequestCard toolResponse={toolResponse} />
}
return <ToolPendingIndicator toolName={tool?.name} description={tool?.description} />
return <ToolPermissionRequestCard toolResponse={toolResponse} />
}
return (
<ToolContent toolName={tool.name as AgentToolsType} input={args as ToolInput} output={response as ToolOutput} />
)
}
function ToolPendingIndicator({ toolName, description }: { toolName?: string; description?: string }) {
const { t } = useTranslation()
const label = toolName || t('agent.toolPermission.toolPendingFallback', 'Tool')
const detail = description?.trim() || t('agent.toolPermission.executing')
return (
<div className="flex w-full max-w-xl items-center gap-3 rounded-xl border border-default-200 bg-default-100 px-4 py-3 shadow-sm">
<Spin size="small" />
<div className="flex flex-col gap-1">
<span className="font-semibold text-default-700 text-sm">{label}</span>
<span className="text-default-500 text-xs">{detail}</span>
</div>
</div>
)
}

View File

@@ -1,10 +1,8 @@
import { adaptProvider } from '@renderer/aiCore/provider/providerConfig'
import OpenAIAlert from '@renderer/components/Alert/OpenAIAlert'
import { LoadingIcon } from '@renderer/components/Icons'
import { HStack } from '@renderer/components/Layout'
import { ApiKeyListPopup } from '@renderer/components/Popups/ApiKeyListPopup'
import Selector from '@renderer/components/Selector'
import { HelpTooltip } from '@renderer/components/TooltipIcons'
import { isEmbeddingModel, isRerankModel } from '@renderer/config/models'
import { PROVIDER_URLS } from '@renderer/config/providers'
import { useTheme } from '@renderer/context/ThemeProvider'
@@ -21,7 +19,14 @@ import type { SystemProviderId } from '@renderer/types'
import { isSystemProvider, isSystemProviderId, SystemProviderIds } from '@renderer/types'
import type { ApiKeyConnectivity } from '@renderer/types/healthCheck'
import { HealthStatus } from '@renderer/types/healthCheck'
import { formatApiHost, formatApiKeys, getFancyProviderName, validateApiHost } from '@renderer/utils'
import {
formatApiHost,
formatApiKeys,
formatAzureOpenAIApiHost,
formatVertexApiHost,
getFancyProviderName,
validateApiHost
} from '@renderer/utils'
import { formatErrorMessage } from '@renderer/utils/error'
import {
isAIGatewayProvider,
@@ -31,6 +36,7 @@ import {
isNewApiProvider,
isOpenAICompatibleProvider,
isOpenAIProvider,
isSupportAPIVersionProvider,
isVertexProvider
} from '@renderer/utils/provider'
import { Button, Divider, Flex, Input, Select, Space, Switch, Tooltip } from 'antd'
@@ -275,10 +281,12 @@ const ProviderSetting: FC<Props> = ({ providerId }) => {
}, [configuredApiHost, apiHost])
const hostPreview = () => {
const formattedApiHost = adaptProvider({ provider: { ...provider, apiHost } }).apiHost
if (apiHost.endsWith('#')) {
return apiHost.replace('#', '')
}
if (isOpenAICompatibleProvider(provider)) {
return formattedApiHost + '/chat/completions'
return formatApiHost(apiHost, isSupportAPIVersionProvider(provider)) + '/chat/completions'
}
if (isAzureOpenAIProvider(provider)) {
@@ -286,26 +294,29 @@ const ProviderSetting: FC<Props> = ({ providerId }) => {
const path = !['preview', 'v1'].includes(apiVersion)
? `/v1/chat/completion?apiVersion=v1`
: `/v1/responses?apiVersion=v1`
return formattedApiHost + path
return formatAzureOpenAIApiHost(apiHost) + path
}
if (isAnthropicProvider(provider)) {
return formattedApiHost + '/messages'
// AI SDK uses the baseURL with /v1, then appends /messages
// formatApiHost adds /v1 automatically if not present
const normalizedHost = formatApiHost(apiHost)
return normalizedHost + '/messages'
}
if (isGeminiProvider(provider)) {
return formattedApiHost + '/models'
return formatApiHost(apiHost, true, 'v1beta') + '/models'
}
if (isOpenAIProvider(provider)) {
return formattedApiHost + '/responses'
return formatApiHost(apiHost) + '/responses'
}
if (isVertexProvider(provider)) {
return formattedApiHost + '/publishers/google'
return formatVertexApiHost(provider) + '/publishers/google'
}
if (isAIGatewayProvider(provider)) {
return formattedApiHost + '/language-model'
return formatApiHost(apiHost) + '/language-model'
}
return formattedApiHost
return formatApiHost(apiHost)
}
// API key 连通性检查状态指示器,目前仅在失败时显示
@@ -483,21 +494,16 @@ const ProviderSetting: FC<Props> = ({ providerId }) => {
{!isDmxapi && (
<>
<SettingSubtitle style={{ display: 'flex', alignItems: 'center', justifyContent: 'space-between' }}>
<div className="flex items-center gap-1">
<Tooltip title={hostSelectorTooltip} mouseEnterDelay={0.3}>
<div>
<Selector
size={14}
value={activeHostField}
onChange={(value) => setActiveHostField(value as HostField)}
options={hostSelectorOptions}
style={{ paddingLeft: 1, fontWeight: 'bold' }}
placement="bottomLeft"
/>
</div>
</Tooltip>
<HelpTooltip title={t('settings.provider.api.url.tip')}></HelpTooltip>
</div>
<Tooltip title={hostSelectorTooltip} mouseEnterDelay={0.3}>
<Selector
size={14}
value={activeHostField}
onChange={(value) => setActiveHostField(value as HostField)}
options={hostSelectorOptions}
style={{ paddingLeft: 1, fontWeight: 'bold' }}
placement="bottomLeft"
/>
</Tooltip>
<div style={{ display: 'flex', alignItems: 'center', gap: 4 }}>
<Button
type="text"

View File

@@ -12,7 +12,7 @@ import type { FetchChatCompletionParams } from '@renderer/types'
import type { Assistant, MCPServer, MCPTool, Model, Provider } from '@renderer/types'
import type { StreamTextParams } from '@renderer/types/aiCoreTypes'
import { type Chunk, ChunkType } from '@renderer/types/chunk'
import type { Message, ResponseError } from '@renderer/types/newMessage'
import type { Message } from '@renderer/types/newMessage'
import type { SdkModel } from '@renderer/types/sdk'
import { removeSpecialCharactersForTopicName, uuid } from '@renderer/utils'
import { abortCompletion, readyToAbort } from '@renderer/utils/abortController'
@@ -476,7 +476,7 @@ export async function checkApi(provider: Provider, model: Model, timeout = 15000
} else {
const abortId = uuid()
const signal = readyToAbort(abortId)
let streamError: ResponseError | undefined
let chunkError
const params: StreamTextParams = {
system: assistant.prompt,
prompt: 'hi',
@@ -495,18 +495,19 @@ export async function checkApi(provider: Provider, model: Model, timeout = 15000
callType: 'check',
onChunk: (chunk: Chunk) => {
if (chunk.type === ChunkType.ERROR) {
streamError = chunk.error
chunkError = chunk.error
} else {
abortCompletion(abortId)
}
}
}
// Try streaming check
try {
await ai.completions(model.id, params, config)
} catch (e) {
if (!isAbortError(e) && !isAbortError(streamError)) {
throw streamError ?? e
if (!isAbortError(e) && !isAbortError(chunkError)) {
throw e
}
}
}

View File

@@ -239,7 +239,6 @@ export type ModelType = 'text' | 'vision' | 'embedding' | 'reasoning' | 'functio
export type ModelTag = Exclude<ModelType, 'text'> | 'free'
// "image-generation" is also openai endpoint, but specifically for image generation.
export type EndpointType = 'openai' | 'openai-response' | 'anthropic' | 'gemini' | 'image-generation' | 'jina-rerank'
export type ModelPricing = {

View File

@@ -234,7 +234,6 @@ export interface Response {
error?: ResponseError
}
// FIXME: Weak type safety. It may be a specific class instance which inherits Error in runtime.
export type ResponseError = Record<string, any>
export interface MessageInputBaseParams {

View File

@@ -34,15 +34,6 @@ export const MCPToolInputSchema = z
required: z.array(z.string()).optional()
})
.loose()
.transform((schema) => {
if (!schema.properties) {
schema.properties = {}
}
if (!schema.required) {
schema.required = []
}
return schema
})
export interface BuiltinTool extends BaseTool {
inputSchema: z.infer<typeof MCPToolInputSchema>

View File

@@ -13,8 +13,7 @@ import {
routeToEndpoint,
splitApiKeyString,
validateApiHost,
withoutTrailingApiVersion,
withoutTrailingSharp
withoutTrailingApiVersion
} from '../api'
vi.mock('@renderer/store', () => {
@@ -82,27 +81,6 @@ describe('api', () => {
it('keeps host untouched when api version unsupported', () => {
expect(formatApiHost('https://api.example.com', false)).toBe('https://api.example.com')
})
it('removes trailing # and does not append api version when host ends with #', () => {
expect(formatApiHost('https://api.example.com#')).toBe('https://api.example.com')
expect(formatApiHost('http://localhost:5173/#')).toBe('http://localhost:5173/')
expect(formatApiHost(' https://api.openai.com/# ')).toBe('https://api.openai.com/')
})
it('handles trailing # with custom api version settings', () => {
expect(formatApiHost('https://api.example.com#', true, 'v2')).toBe('https://api.example.com')
expect(formatApiHost('https://api.example.com#', false, 'v2')).toBe('https://api.example.com')
})
it('handles host with both trailing # and existing api version', () => {
expect(formatApiHost('https://api.example.com/v2#')).toBe('https://api.example.com/v2')
expect(formatApiHost('https://api.example.com/v3beta#')).toBe('https://api.example.com/v3beta')
})
it('trims whitespace before processing trailing #', () => {
expect(formatApiHost(' https://api.example.com# ')).toBe('https://api.example.com')
expect(formatApiHost('\thttps://api.example.com#\n')).toBe('https://api.example.com')
})
})
describe('hasAPIVersion', () => {
@@ -426,56 +404,4 @@ describe('api', () => {
expect(withoutTrailingApiVersion('')).toBe('')
})
})
describe('withoutTrailingSharp', () => {
it('removes trailing # from URL', () => {
expect(withoutTrailingSharp('https://api.example.com#')).toBe('https://api.example.com')
expect(withoutTrailingSharp('http://localhost:3000#')).toBe('http://localhost:3000')
})
it('returns URL unchanged when no trailing #', () => {
expect(withoutTrailingSharp('https://api.example.com')).toBe('https://api.example.com')
expect(withoutTrailingSharp('http://localhost:3000')).toBe('http://localhost:3000')
})
it('handles URLs with multiple # characters but only removes trailing one', () => {
expect(withoutTrailingSharp('https://api.example.com#path#')).toBe('https://api.example.com#path')
})
it('handles URLs with # in the middle (not trailing)', () => {
expect(withoutTrailingSharp('https://api.example.com#section/path')).toBe('https://api.example.com#section/path')
expect(withoutTrailingSharp('https://api.example.com/v1/chat/completions#')).toBe(
'https://api.example.com/v1/chat/completions'
)
})
it('handles empty string', () => {
expect(withoutTrailingSharp('')).toBe('')
})
it('handles single character #', () => {
expect(withoutTrailingSharp('#')).toBe('')
})
it('preserves whitespace around the URL (pure function)', () => {
expect(withoutTrailingSharp(' https://api.example.com# ')).toBe(' https://api.example.com# ')
expect(withoutTrailingSharp('\thttps://api.example.com#\n')).toBe('\thttps://api.example.com#\n')
})
it('only removes exact trailing # character', () => {
expect(withoutTrailingSharp('https://api.example.com# ')).toBe('https://api.example.com# ')
expect(withoutTrailingSharp(' https://api.example.com#')).toBe(' https://api.example.com')
expect(withoutTrailingSharp('https://api.example.com#\t')).toBe('https://api.example.com#\t')
})
it('handles URLs ending with multiple # characters', () => {
expect(withoutTrailingSharp('https://api.example.com##')).toBe('https://api.example.com#')
expect(withoutTrailingSharp('https://api.example.com###')).toBe('https://api.example.com##')
})
it('preserves URL with trailing # and other content', () => {
expect(withoutTrailingSharp('https://api.example.com/v1#')).toBe('https://api.example.com/v1')
expect(withoutTrailingSharp('https://api.example.com/v2beta#')).toBe('https://api.example.com/v2beta')
})
})
})

View File

@@ -62,23 +62,6 @@ export function withoutTrailingSlash<T extends string>(url: T): T {
return url.replace(/\/$/, '') as T
}
/**
* Removes the trailing '#' from a URL string if it exists.
*
* @template T - The string type to preserve type safety
* @param {T} url - The URL string to process
* @returns {T} The URL string without a trailing '#'
*
* @example
* ```ts
* withoutTrailingSharp('https://example.com#') // 'https://example.com'
* withoutTrailingSharp('https://example.com') // 'https://example.com'
* ```
*/
export function withoutTrailingSharp<T extends string>(url: T): T {
return url.replace(/#$/, '') as T
}
/**
* Formats an API host URL by normalizing it and optionally appending an API version.
*
@@ -87,12 +70,12 @@ export function withoutTrailingSharp<T extends string>(url: T): T {
* @param apiVersion - The API version to append if needed. Defaults to `'v1'`.
*
* @returns The formatted API host URL. If the host is empty after normalization, returns an empty string.
* If the host ends with '#', API version is not supported, or the host already contains a version, returns the normalized host with trailing '#' removed.
* If the host ends with '#', API version is not supported, or the host already contains a version, returns the normalized host as-is.
* Otherwise, returns the host with the API version appended.
*
* @example
* formatApiHost('https://api.example.com/') // Returns 'https://api.example.com/v1'
* formatApiHost('https://api.example.com#') // Returns 'https://api.example.com'
* formatApiHost('https://api.example.com#') // Returns 'https://api.example.com#'
* formatApiHost('https://api.example.com/v2', true, 'v1') // Returns 'https://api.example.com/v2'
*/
export function formatApiHost(host?: string, supportApiVersion: boolean = true, apiVersion: string = 'v1'): string {
@@ -101,13 +84,10 @@ export function formatApiHost(host?: string, supportApiVersion: boolean = true,
return ''
}
const shouldAppendApiVersion = !(normalizedHost.endsWith('#') || !supportApiVersion || hasAPIVersion(normalizedHost))
if (shouldAppendApiVersion) {
return `${normalizedHost}/${apiVersion}`
} else {
return withoutTrailingSharp(normalizedHost)
if (normalizedHost.endsWith('#') || !supportApiVersion || hasAPIVersion(normalizedHost)) {
return normalizedHost
}
return `${normalizedHost}/${apiVersion}`
}
/**

View File

@@ -1,15 +1,3 @@
import { loggerService } from '@logger'
const logger = loggerService.withContext('utils/dom')
interface ChromiumScrollIntoViewOptions extends ScrollIntoViewOptions {
/**
* @see https://developer.mozilla.org/en-US/docs/Web/API/Element/scrollIntoView#container
* @see https://github.com/microsoft/TypeScript/issues/62803
*/
container?: 'all' | 'nearest'
}
/**
* Simple wrapper for scrollIntoView with common default options.
* Provides a unified interface with sensible defaults.
@@ -17,12 +5,7 @@ interface ChromiumScrollIntoViewOptions extends ScrollIntoViewOptions {
* @param element - The target element to scroll into view
* @param options - Scroll options. If not provided, uses { behavior: 'smooth', block: 'center', inline: 'nearest' }
*/
export function scrollIntoView(element: HTMLElement, options?: ChromiumScrollIntoViewOptions): void {
if (!element) {
logger.warn('[scrollIntoView] Unexpected falsy element. Do nothing as fallback.')
return
}
export function scrollIntoView(element: HTMLElement, options?: ScrollIntoViewOptions): void {
const defaultOptions: ScrollIntoViewOptions = {
behavior: 'smooth',
block: 'center',

View File

@@ -136,10 +136,7 @@ export async function callMCPTool(
topicId?: string,
modelName?: string
): Promise<MCPCallToolResponse> {
logger.info(
`Calling Tool: ${toolResponse.id} ${toolResponse.tool.serverName} ${toolResponse.tool.name}`,
toolResponse.tool
)
logger.info(`Calling Tool: ${toolResponse.tool.serverName} ${toolResponse.tool.name}`, toolResponse.tool)
try {
const server = getMcpServerByTool(toolResponse.tool)

View File

@@ -254,17 +254,6 @@ const HomeWindow: FC<{ draggable?: boolean }> = ({ draggable = true }) => {
let blockId: string | null = null
let thinkingBlockId: string | null = null
let thinkingStartTime: number | null = null
const resolveThinkingDuration = (duration?: number) => {
if (typeof duration === 'number' && Number.isFinite(duration)) {
return duration
}
if (thinkingStartTime !== null) {
return Math.max(0, performance.now() - thinkingStartTime)
}
return 0
}
setIsLoading(true)
setIsOutputted(false)
@@ -302,7 +291,6 @@ const HomeWindow: FC<{ draggable?: boolean }> = ({ draggable = true }) => {
case ChunkType.THINKING_START:
{
setIsOutputted(true)
thinkingStartTime = performance.now()
if (thinkingBlockId) {
store.dispatch(
updateOneBlock({ id: thinkingBlockId, changes: { status: MessageBlockStatus.STREAMING } })
@@ -327,13 +315,9 @@ const HomeWindow: FC<{ draggable?: boolean }> = ({ draggable = true }) => {
{
setIsOutputted(true)
if (thinkingBlockId) {
if (thinkingStartTime === null) {
thinkingStartTime = performance.now()
}
const thinkingDuration = resolveThinkingDuration(chunk.thinking_millsec)
throttledBlockUpdate(thinkingBlockId, {
content: chunk.text,
thinking_millsec: thinkingDuration
thinking_millsec: chunk.thinking_millsec
})
}
}
@@ -341,17 +325,14 @@ const HomeWindow: FC<{ draggable?: boolean }> = ({ draggable = true }) => {
case ChunkType.THINKING_COMPLETE:
{
if (thinkingBlockId) {
const thinkingDuration = resolveThinkingDuration(chunk.thinking_millsec)
cancelThrottledBlockUpdate(thinkingBlockId)
store.dispatch(
updateOneBlock({
id: thinkingBlockId,
changes: { status: MessageBlockStatus.SUCCESS, thinking_millsec: thinkingDuration }
changes: { status: MessageBlockStatus.SUCCESS, thinking_millsec: chunk.thinking_millsec }
})
)
}
thinkingStartTime = null
thinkingBlockId = null
}
break
case ChunkType.TEXT_START:
@@ -423,8 +404,6 @@ const HomeWindow: FC<{ draggable?: boolean }> = ({ draggable = true }) => {
if (!isAborted) {
throw new Error(chunk.error.message)
}
thinkingStartTime = null
thinkingBlockId = null
}
//fall through
case ChunkType.BLOCK_COMPLETE:

View File

@@ -41,19 +41,8 @@ export const processMessages = async (
let textBlockId: string | null = null
let thinkingBlockId: string | null = null
let thinkingStartTime: number | null = null
let textBlockContent: string = ''
const resolveThinkingDuration = (duration?: number) => {
if (typeof duration === 'number' && Number.isFinite(duration)) {
return duration
}
if (thinkingStartTime !== null) {
return Math.max(0, performance.now() - thinkingStartTime)
}
return 0
}
const assistantMessage = getAssistantMessage({
assistant,
topic
@@ -90,7 +79,6 @@ export const processMessages = async (
switch (chunk.type) {
case ChunkType.THINKING_START:
{
thinkingStartTime = performance.now()
if (thinkingBlockId) {
store.dispatch(
updateOneBlock({ id: thinkingBlockId, changes: { status: MessageBlockStatus.STREAMING } })
@@ -114,13 +102,9 @@ export const processMessages = async (
case ChunkType.THINKING_DELTA:
{
if (thinkingBlockId) {
if (thinkingStartTime === null) {
thinkingStartTime = performance.now()
}
const thinkingDuration = resolveThinkingDuration(chunk.thinking_millsec)
throttledBlockUpdate(thinkingBlockId, {
content: chunk.text,
thinking_millsec: thinkingDuration
thinking_millsec: chunk.thinking_millsec
})
}
onStream()
@@ -129,7 +113,6 @@ export const processMessages = async (
case ChunkType.THINKING_COMPLETE:
{
if (thinkingBlockId) {
const thinkingDuration = resolveThinkingDuration(chunk.thinking_millsec)
cancelThrottledBlockUpdate(thinkingBlockId)
store.dispatch(
updateOneBlock({
@@ -137,13 +120,12 @@ export const processMessages = async (
changes: {
content: chunk.text,
status: MessageBlockStatus.SUCCESS,
thinking_millsec: thinkingDuration
thinking_millsec: chunk.thinking_millsec
}
})
)
thinkingBlockId = null
}
thinkingStartTime = null
}
break
case ChunkType.TEXT_START:
@@ -208,7 +190,6 @@ export const processMessages = async (
case ChunkType.ERROR:
{
const blockId = textBlockId || thinkingBlockId
thinkingStartTime = null
if (blockId) {
store.dispatch(
updateOneBlock({

View File

@@ -284,54 +284,6 @@ describe('processMessages', () => {
})
})
describe('thinking timer fallback', () => {
it('should use local timer when thinking_millsec is missing', async () => {
const nowValues = [1000, 1500, 2000]
let nowIndex = 0
const performanceSpy = vi.spyOn(performance, 'now').mockImplementation(() => {
const value = nowValues[Math.min(nowIndex, nowValues.length - 1)]
nowIndex += 1
return value
})
const mockChunks = [
{ type: ChunkType.THINKING_START },
{ type: ChunkType.THINKING_DELTA, text: 'Thinking...' },
{ type: ChunkType.THINKING_COMPLETE, text: 'Done thinking' },
{ type: ChunkType.TEXT_START },
{ type: ChunkType.TEXT_COMPLETE, text: 'Final answer' },
{ type: ChunkType.BLOCK_COMPLETE }
]
vi.mocked(fetchChatCompletion).mockImplementation(async ({ onChunkReceived }: any) => {
for (const chunk of mockChunks) {
await onChunkReceived(chunk)
}
})
await processMessages(
mockAssistant,
mockTopic,
'test prompt',
mockSetAskId,
mockOnStream,
mockOnFinish,
mockOnError
)
const thinkingDeltaCall = vi.mocked(throttledBlockUpdate).mock.calls.find(([id]) => id === 'thinking-block-1')
const deltaPayload = thinkingDeltaCall?.[1] as { thinking_millsec?: number } | undefined
expect(deltaPayload?.thinking_millsec).toBe(500)
const thinkingCompleteUpdate = vi
.mocked(updateOneBlock)
.mock.calls.find(([payload]) => (payload as any)?.changes?.thinking_millsec !== undefined)
expect((thinkingCompleteUpdate?.[0] as any)?.changes?.thinking_millsec).toBe(1000)
performanceSpy.mockRestore()
})
})
describe('stream with exceptions', () => {
it('should handle error chunks properly', async () => {
const mockError = new Error('Stream processing error')

View File

@@ -4747,12 +4747,11 @@ __metadata:
languageName: node
linkType: hard
"@modelcontextprotocol/sdk@npm:^1.23.0":
version: 1.23.0
resolution: "@modelcontextprotocol/sdk@npm:1.23.0"
"@modelcontextprotocol/sdk@npm:^1.17.5":
version: 1.17.5
resolution: "@modelcontextprotocol/sdk@npm:1.17.5"
dependencies:
ajv: "npm:^8.17.1"
ajv-formats: "npm:^3.0.1"
ajv: "npm:^6.12.6"
content-type: "npm:^1.0.5"
cors: "npm:^2.8.5"
cross-spawn: "npm:^7.0.5"
@@ -4762,17 +4761,9 @@ __metadata:
express-rate-limit: "npm:^7.5.0"
pkce-challenge: "npm:^5.0.0"
raw-body: "npm:^3.0.0"
zod: "npm:^3.25 || ^4.0"
zod-to-json-schema: "npm:^3.25.0"
peerDependencies:
"@cfworker/json-schema": ^4.1.1
zod: ^3.25 || ^4.0
peerDependenciesMeta:
"@cfworker/json-schema":
optional: true
zod:
optional: false
checksum: 10c0/b0291f921ad9bda06bbf1a61b1bb61ceca1173da5d74d39a411c40428d6ca50a95f0de3a1631f25a44b439220b15c30c1306600bf48bef665ab7ad118d528260
zod: "npm:^3.23.8"
zod-to-json-schema: "npm:^3.24.1"
checksum: 10c0/182b92b5e7c07da428fd23c6de22021c4f9a91f799c02a8ef15def07e4f9361d0fc22303548658fec2a700623535fd44a9dc4d010fb5d803a8f80e3c6c64a45e
languageName: node
linkType: hard
@@ -10055,7 +10046,7 @@ __metadata:
"@libsql/client": "npm:0.14.0"
"@libsql/win32-x64-msvc": "npm:^0.4.7"
"@mistralai/mistralai": "npm:^1.7.5"
"@modelcontextprotocol/sdk": "npm:^1.23.0"
"@modelcontextprotocol/sdk": "npm:^1.17.5"
"@mozilla/readability": "npm:^0.6.0"
"@napi-rs/system-ocr": "patch:@napi-rs/system-ocr@npm%3A1.0.2#~/.yarn/patches/@napi-rs-system-ocr-npm-1.0.2-59e7a78e8b.patch"
"@notionhq/client": "npm:^2.2.15"
@@ -10412,20 +10403,6 @@ __metadata:
languageName: node
linkType: hard
"ajv-formats@npm:^3.0.1":
version: 3.0.1
resolution: "ajv-formats@npm:3.0.1"
dependencies:
ajv: "npm:^8.0.0"
peerDependencies:
ajv: ^8.0.0
peerDependenciesMeta:
ajv:
optional: true
checksum: 10c0/168d6bca1ea9f163b41c8147bae537e67bd963357a5488a1eaf3abe8baa8eec806d4e45f15b10767e6020679315c7e1e5e6803088dfb84efa2b4e9353b83dd0a
languageName: node
linkType: hard
"ajv-keywords@npm:^3.4.1":
version: 3.5.2
resolution: "ajv-keywords@npm:3.5.2"
@@ -10435,7 +10412,7 @@ __metadata:
languageName: node
linkType: hard
"ajv@npm:^6.10.0, ajv@npm:^6.12.0, ajv@npm:^6.12.4":
"ajv@npm:^6.10.0, ajv@npm:^6.12.0, ajv@npm:^6.12.4, ajv@npm:^6.12.6":
version: 6.12.6
resolution: "ajv@npm:6.12.6"
dependencies:
@@ -10447,7 +10424,7 @@ __metadata:
languageName: node
linkType: hard
"ajv@npm:^8.0.0, ajv@npm:^8.17.1, ajv@npm:^8.6.3":
"ajv@npm:^8.0.0, ajv@npm:^8.6.3":
version: 8.17.1
resolution: "ajv@npm:8.17.1"
dependencies:
@@ -26376,15 +26353,6 @@ __metadata:
languageName: node
linkType: hard
"zod-to-json-schema@npm:^3.25.0":
version: 3.25.0
resolution: "zod-to-json-schema@npm:3.25.0"
peerDependencies:
zod: ^3.25 || ^4
checksum: 10c0/2d2cf6ca49752bf3dc5fb37bc8f275eddbbc4020e7958d9c198ea88cd197a5f527459118188a0081b889da6a6474d64c4134cd60951fa70178c125138761c680
languageName: node
linkType: hard
"zod-validation-error@npm:^3.4.0":
version: 3.4.0
resolution: "zod-validation-error@npm:3.4.0"
@@ -26394,20 +26362,13 @@ __metadata:
languageName: node
linkType: hard
"zod@npm:^3.22.4, zod@npm:^3.24.1":
"zod@npm:^3.22.4, zod@npm:^3.23.8, zod@npm:^3.24.1":
version: 3.25.56
resolution: "zod@npm:3.25.56"
checksum: 10c0/3800f01d4b1df932b91354eb1e648f69cc7e5561549e6d2bf83827d930a5f33bbf92926099445f6fc1ebb64ca9c6513ef9ae5e5409cfef6325f354bcf6fc9a24
languageName: node
linkType: hard
"zod@npm:^3.25 || ^4.0":
version: 4.1.13
resolution: "zod@npm:4.1.13"
checksum: 10c0/d7e74e82dba81a91ffc3239cd85bc034abe193a28f7087a94ab258a3e48e9a7ca4141920cac979a0d781495b48fc547777394149f26be04c3dc642f58bbc3941
languageName: node
linkType: hard
"zod@npm:^3.25.0 || ^4.0.0, zod@npm:^3.25.76 || ^4":
version: 4.1.12
resolution: "zod@npm:4.1.12"