Merge remote-tracking branch 'origin/v2' into fix/v2/inputbar-cache
This commit is contained in:
10
CLAUDE.md
10
CLAUDE.md
@@ -13,7 +13,15 @@ This file provides guidance to AI coding assistants when working with code in th
|
||||
- **Always propose before executing**: Before making any changes, clearly explain your planned approach and wait for explicit user approval to ensure alignment and prevent unwanted modifications.
|
||||
- **Lint, test, and format before completion**: Coding tasks are only complete after running `yarn lint`, `yarn test`, and `yarn format` successfully.
|
||||
- **Write conventional commits**: Commit small, focused changes using Conventional Commit messages (e.g., `feat:`, `fix:`, `refactor:`, `docs:`).
|
||||
- **Follow PR template**: When submitting pull requests, follow the template in `.github/pull_request_template.md` to ensure complete context and documentation.
|
||||
|
||||
## Pull Request Workflow (CRITICAL)
|
||||
|
||||
When creating a Pull Request, you MUST:
|
||||
|
||||
1. **Read the PR template first**: Always read `.github/pull_request_template.md` before creating the PR
|
||||
2. **Follow ALL template sections**: Structure the `--body` parameter to include every section from the template
|
||||
3. **Never skip sections**: Include all sections even if marking them as N/A or "None"
|
||||
4. **Use proper formatting**: Match the template's markdown structure exactly (headings, checkboxes, code blocks)
|
||||
|
||||
## Development Commands
|
||||
|
||||
|
||||
@@ -135,9 +135,9 @@ artifactBuildCompleted: scripts/artifact-build-completed.js
|
||||
releaseInfo:
|
||||
releaseNotes: |
|
||||
<!--LANG:en-->
|
||||
A New Era of Intelligence with Cherry Studio 1.7.0
|
||||
A New Era of Intelligence with Cherry Studio 1.7.1
|
||||
|
||||
Today we're releasing Cherry Studio 1.7.0 — our most ambitious update yet, introducing Agent: autonomous AI that thinks, plans, and acts.
|
||||
Today we're releasing Cherry Studio 1.7.1 — our most ambitious update yet, introducing Agent: autonomous AI that thinks, plans, and acts.
|
||||
|
||||
For years, AI assistants have been reactive — waiting for your commands, responding to your questions. With Agent, we're changing that. Now, AI can truly work alongside you: understanding complex goals, breaking them into steps, and executing them independently.
|
||||
|
||||
@@ -188,9 +188,9 @@ releaseInfo:
|
||||
The Agent Era is here. We can't wait to see what you'll create.
|
||||
|
||||
<!--LANG:zh-CN-->
|
||||
Cherry Studio 1.7.0:开启智能新纪元
|
||||
Cherry Studio 1.7.1:开启智能新纪元
|
||||
|
||||
今天,我们正式发布 Cherry Studio 1.7.0 —— 迄今最具雄心的版本,带来全新的 Agent:能够自主思考、规划和行动的 AI。
|
||||
今天,我们正式发布 Cherry Studio 1.7.1 —— 迄今最具雄心的版本,带来全新的 Agent:能够自主思考、规划和行动的 AI。
|
||||
|
||||
多年来,AI 助手一直是被动的——等待你的指令,回应你的问题。Agent 改变了这一切。现在,AI 能够真正与你并肩工作:理解复杂目标,将其拆解为步骤,并独立执行。
|
||||
|
||||
|
||||
@@ -69,6 +69,7 @@ export interface CherryInProviderSettings {
|
||||
headers?: HeadersInput
|
||||
/**
|
||||
* Optional endpoint type to distinguish different endpoint behaviors.
|
||||
* "image-generation" is also openai endpoint, but specifically for image generation.
|
||||
*/
|
||||
endpointType?: 'openai' | 'openai-response' | 'anthropic' | 'gemini' | 'image-generation' | 'jina-rerank'
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
// src/main/services/agents/services/claudecode/index.ts
|
||||
import { EventEmitter } from 'node:events'
|
||||
import { createRequire } from 'node:module'
|
||||
import path from 'node:path'
|
||||
|
||||
import type {
|
||||
CanUseTool,
|
||||
@@ -121,7 +122,11 @@ class ClaudeCodeService implements AgentServiceInterface {
|
||||
// TODO: support set small model in UI
|
||||
ANTHROPIC_DEFAULT_HAIKU_MODEL: modelInfo.modelId,
|
||||
ELECTRON_RUN_AS_NODE: '1',
|
||||
ELECTRON_NO_ATTACH_CONSOLE: '1'
|
||||
ELECTRON_NO_ATTACH_CONSOLE: '1',
|
||||
// Set CLAUDE_CONFIG_DIR to app's userData directory to avoid path encoding issues
|
||||
// on Windows when the username contains non-ASCII characters (e.g., Chinese characters)
|
||||
// This prevents the SDK from using the user's home directory which may have encoding problems
|
||||
CLAUDE_CONFIG_DIR: path.join(app.getPath('userData'), '.claude')
|
||||
}
|
||||
|
||||
const errorChunks: string[] = []
|
||||
|
||||
@@ -212,8 +212,9 @@ export class ToolCallChunkHandler {
|
||||
description: toolName,
|
||||
type: 'builtin'
|
||||
} as BaseTool
|
||||
} else if ((mcpTool = this.mcpTools.find((t) => t.name === toolName) as MCPTool)) {
|
||||
} else if ((mcpTool = this.mcpTools.find((t) => t.id === toolName) as MCPTool)) {
|
||||
// 如果是客户端执行的 MCP 工具,沿用现有逻辑
|
||||
// toolName is mcpTool.id (registered with id as key in convertMcpToolsToAiSdkTools)
|
||||
logger.info(`[ToolCallChunkHandler] Handling client-side MCP tool: ${toolName}`)
|
||||
// mcpTool = this.mcpTools.find((t) => t.name === toolName) as MCPTool
|
||||
// if (!mcpTool) {
|
||||
|
||||
@@ -46,6 +46,7 @@ import type {
|
||||
GeminiSdkRawOutput,
|
||||
GeminiSdkToolCall
|
||||
} from '@renderer/types/sdk'
|
||||
import { getTrailingApiVersion, withoutTrailingApiVersion } from '@renderer/utils'
|
||||
import { isToolUseModeFunction } from '@renderer/utils/assistant'
|
||||
import {
|
||||
geminiFunctionCallToMcpTool,
|
||||
@@ -163,6 +164,10 @@ export class GeminiAPIClient extends BaseApiClient<
|
||||
return models
|
||||
}
|
||||
|
||||
override getBaseURL(): string {
|
||||
return withoutTrailingApiVersion(super.getBaseURL())
|
||||
}
|
||||
|
||||
override async getSdkInstance() {
|
||||
if (this.sdkInstance) {
|
||||
return this.sdkInstance
|
||||
@@ -188,6 +193,13 @@ export class GeminiAPIClient extends BaseApiClient<
|
||||
if (this.provider.isVertex) {
|
||||
return 'v1'
|
||||
}
|
||||
|
||||
// Extract trailing API version from the URL
|
||||
const trailingVersion = getTrailingApiVersion(this.provider.apiHost || '')
|
||||
if (trailingVersion) {
|
||||
return trailingVersion
|
||||
}
|
||||
|
||||
return 'v1beta'
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ import { isAwsBedrockProvider, isVertexProvider } from '@renderer/utils/provider
|
||||
// https://docs.claude.com/en/docs/build-with-claude/extended-thinking#interleaved-thinking
|
||||
const INTERLEAVED_THINKING_HEADER = 'interleaved-thinking-2025-05-14'
|
||||
// https://docs.claude.com/en/docs/build-with-claude/context-windows#1m-token-context-window
|
||||
const CONTEXT_100M_HEADER = 'context-1m-2025-08-07'
|
||||
// const CONTEXT_100M_HEADER = 'context-1m-2025-08-07'
|
||||
// https://docs.cloud.google.com/vertex-ai/generative-ai/docs/partner-models/claude/web-search
|
||||
const WEBSEARCH_HEADER = 'web-search-2025-03-05'
|
||||
|
||||
@@ -25,7 +25,9 @@ export function addAnthropicHeaders(assistant: Assistant, model: Model): string[
|
||||
if (isVertexProvider(provider) && assistant.enableWebSearch) {
|
||||
anthropicHeaders.push(WEBSEARCH_HEADER)
|
||||
}
|
||||
anthropicHeaders.push(CONTEXT_100M_HEADER)
|
||||
// We may add it by user preference in assistant.settings instead of always adding it.
|
||||
// See #11540, #11397
|
||||
// anthropicHeaders.push(CONTEXT_100M_HEADER)
|
||||
}
|
||||
return anthropicHeaders
|
||||
}
|
||||
|
||||
@@ -245,8 +245,8 @@ export class AiSdkSpanAdapter {
|
||||
'gen_ai.usage.output_tokens'
|
||||
]
|
||||
|
||||
const completionTokens = attributes[inputsTokenKeys.find((key) => attributes[key]) || '']
|
||||
const promptTokens = attributes[outputTokenKeys.find((key) => attributes[key]) || '']
|
||||
const promptTokens = attributes[inputsTokenKeys.find((key) => attributes[key]) || '']
|
||||
const completionTokens = attributes[outputTokenKeys.find((key) => attributes[key]) || '']
|
||||
|
||||
if (completionTokens !== undefined || promptTokens !== undefined) {
|
||||
const usage: TokenUsage = {
|
||||
|
||||
@@ -0,0 +1,53 @@
|
||||
import type { Span } from '@opentelemetry/api'
|
||||
import { SpanKind, SpanStatusCode } from '@opentelemetry/api'
|
||||
import { describe, expect, it, vi } from 'vitest'
|
||||
|
||||
import { AiSdkSpanAdapter } from '../AiSdkSpanAdapter'
|
||||
|
||||
vi.mock('@logger', () => ({
|
||||
loggerService: {
|
||||
withContext: () => ({
|
||||
debug: vi.fn(),
|
||||
error: vi.fn(),
|
||||
info: vi.fn(),
|
||||
warn: vi.fn()
|
||||
})
|
||||
}
|
||||
}))
|
||||
|
||||
describe('AiSdkSpanAdapter', () => {
|
||||
const createMockSpan = (attributes: Record<string, unknown>): Span => {
|
||||
const span = {
|
||||
spanContext: () => ({
|
||||
traceId: 'trace-id',
|
||||
spanId: 'span-id'
|
||||
}),
|
||||
_attributes: attributes,
|
||||
_events: [],
|
||||
name: 'test span',
|
||||
status: { code: SpanStatusCode.OK },
|
||||
kind: SpanKind.CLIENT,
|
||||
startTime: [0, 0] as [number, number],
|
||||
endTime: [0, 1] as [number, number],
|
||||
ended: true,
|
||||
parentSpanId: '',
|
||||
links: []
|
||||
}
|
||||
return span as unknown as Span
|
||||
}
|
||||
|
||||
it('maps prompt and completion usage tokens to the correct fields', () => {
|
||||
const attributes = {
|
||||
'ai.usage.promptTokens': 321,
|
||||
'ai.usage.completionTokens': 654
|
||||
}
|
||||
|
||||
const span = createMockSpan(attributes)
|
||||
const result = AiSdkSpanAdapter.convertToSpanEntity({ span })
|
||||
|
||||
expect(result.usage).toBeDefined()
|
||||
expect(result.usage?.prompt_tokens).toBe(321)
|
||||
expect(result.usage?.completion_tokens).toBe(654)
|
||||
expect(result.usage?.total_tokens).toBe(975)
|
||||
})
|
||||
})
|
||||
@@ -144,7 +144,7 @@ describe('reasoning utils', () => {
|
||||
expect(result).toEqual({})
|
||||
})
|
||||
|
||||
it('should disable reasoning for OpenRouter when no reasoning effort set', async () => {
|
||||
it('should not override reasoning for OpenRouter when reasoning effort undefined', async () => {
|
||||
const { isReasoningModel } = await import('@renderer/config/models')
|
||||
|
||||
vi.mocked(isReasoningModel).mockReturnValue(true)
|
||||
@@ -161,6 +161,29 @@ describe('reasoning utils', () => {
|
||||
settings: {}
|
||||
} as Assistant
|
||||
|
||||
const result = getReasoningEffort(assistant, model)
|
||||
expect(result).toEqual({})
|
||||
})
|
||||
|
||||
it('should disable reasoning for OpenRouter when reasoning effort explicitly none', async () => {
|
||||
const { isReasoningModel } = await import('@renderer/config/models')
|
||||
|
||||
vi.mocked(isReasoningModel).mockReturnValue(true)
|
||||
|
||||
const model: Model = {
|
||||
id: 'anthropic/claude-sonnet-4',
|
||||
name: 'Claude Sonnet 4',
|
||||
provider: SystemProviderIds.openrouter
|
||||
} as Model
|
||||
|
||||
const assistant: Assistant = {
|
||||
id: 'test',
|
||||
name: 'Test',
|
||||
settings: {
|
||||
reasoning_effort: 'none'
|
||||
}
|
||||
} as Assistant
|
||||
|
||||
const result = getReasoningEffort(assistant, model)
|
||||
expect(result).toEqual({ reasoning: { enabled: false, exclude: true } })
|
||||
})
|
||||
@@ -269,7 +292,9 @@ describe('reasoning utils', () => {
|
||||
const assistant: Assistant = {
|
||||
id: 'test',
|
||||
name: 'Test',
|
||||
settings: {}
|
||||
settings: {
|
||||
reasoning_effort: 'none'
|
||||
}
|
||||
} as Assistant
|
||||
|
||||
const result = getReasoningEffort(assistant, model)
|
||||
|
||||
@@ -16,10 +16,8 @@ import {
|
||||
isGPT5SeriesModel,
|
||||
isGPT51SeriesModel,
|
||||
isGrok4FastReasoningModel,
|
||||
isGrokReasoningModel,
|
||||
isOpenAIDeepResearchModel,
|
||||
isOpenAIModel,
|
||||
isOpenAIReasoningModel,
|
||||
isQwenAlwaysThinkModel,
|
||||
isQwenReasoningModel,
|
||||
isReasoningModel,
|
||||
@@ -64,30 +62,22 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
}
|
||||
const reasoningEffort = assistant?.settings?.reasoning_effort
|
||||
|
||||
// Handle undefined and 'none' reasoningEffort.
|
||||
// TODO: They should be separated.
|
||||
if (!reasoningEffort || reasoningEffort === 'none') {
|
||||
// openrouter: use reasoning
|
||||
if (model.provider === SystemProviderIds.openrouter) {
|
||||
// Don't disable reasoning for Gemini models that support thinking tokens
|
||||
if (isSupportedThinkingTokenGeminiModel(model) && !GEMINI_FLASH_MODEL_REGEX.test(model.id)) {
|
||||
// reasoningEffort is not set, no extra reasoning setting
|
||||
// Generally, for every model which supports reasoning control, the reasoning effort won't be undefined.
|
||||
// It's for some reasoning models that don't support reasoning control, such as deepseek reasoner.
|
||||
if (!reasoningEffort) {
|
||||
return {}
|
||||
}
|
||||
|
||||
// Handle 'none' reasoningEffort. It's explicitly off.
|
||||
if (reasoningEffort === 'none') {
|
||||
// openrouter: use reasoning
|
||||
if (model.provider === SystemProviderIds.openrouter) {
|
||||
// 'none' is not an available value for effort for now.
|
||||
// I think they should resolve this issue soon, so I'll just go ahead and use this value.
|
||||
if (isGPT51SeriesModel(model) && reasoningEffort === 'none') {
|
||||
return { reasoning: { effort: 'none' } }
|
||||
}
|
||||
// Don't disable reasoning for models that require it
|
||||
if (
|
||||
isGrokReasoningModel(model) ||
|
||||
isOpenAIReasoningModel(model) ||
|
||||
isQwenAlwaysThinkModel(model) ||
|
||||
model.id.includes('seed-oss') ||
|
||||
model.id.includes('minimax-m2')
|
||||
) {
|
||||
return {}
|
||||
}
|
||||
return { reasoning: { enabled: false, exclude: true } }
|
||||
}
|
||||
|
||||
@@ -101,11 +91,6 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
return { enable_thinking: false }
|
||||
}
|
||||
|
||||
// claude
|
||||
if (isSupportedThinkingTokenClaudeModel(model)) {
|
||||
return {}
|
||||
}
|
||||
|
||||
// gemini
|
||||
if (isSupportedThinkingTokenGeminiModel(model)) {
|
||||
if (GEMINI_FLASH_MODEL_REGEX.test(model.id)) {
|
||||
@@ -118,9 +103,11 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
logger.warn(`Model ${model.id} cannot disable reasoning. Fallback to empty reasoning param.`)
|
||||
return {}
|
||||
}
|
||||
}
|
||||
|
||||
// use thinking, doubao, zhipu, etc.
|
||||
if (isSupportedThinkingTokenDoubaoModel(model) || isSupportedThinkingTokenZhipuModel(model)) {
|
||||
@@ -139,6 +126,7 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
}
|
||||
}
|
||||
|
||||
logger.warn(`Model ${model.id} doesn't match any disable reasoning behavior. Fallback to empty reasoning param.`)
|
||||
return {}
|
||||
}
|
||||
|
||||
@@ -293,6 +281,7 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
}
|
||||
|
||||
// OpenRouter models, use reasoning
|
||||
// FIXME: duplicated openrouter handling. remove one
|
||||
if (model.provider === SystemProviderIds.openrouter) {
|
||||
if (isSupportedReasoningEffortModel(model) || isSupportedThinkingTokenModel(model)) {
|
||||
return {
|
||||
|
||||
@@ -215,6 +215,10 @@
|
||||
border-top: none !important;
|
||||
}
|
||||
|
||||
.ant-collapse-header-text {
|
||||
overflow-x: hidden;
|
||||
}
|
||||
|
||||
.ant-slider .ant-slider-handle::after {
|
||||
box-shadow: 0 1px 4px 0px rgb(128 128 128 / 50%) !important;
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import {
|
||||
} from '@ant-design/icons'
|
||||
import { loggerService } from '@logger'
|
||||
import { download } from '@renderer/utils/download'
|
||||
import { convertImageToPng } from '@renderer/utils/image'
|
||||
import type { ImageProps as AntImageProps } from 'antd'
|
||||
import { Dropdown, Image as AntImage, Space } from 'antd'
|
||||
import { Base64 } from 'js-base64'
|
||||
@@ -33,39 +34,38 @@ const ImageViewer: React.FC<ImageViewerProps> = ({ src, style, ...props }) => {
|
||||
// 复制图片到剪贴板
|
||||
const handleCopyImage = async (src: string) => {
|
||||
try {
|
||||
let blob: Blob
|
||||
|
||||
if (src.startsWith('data:')) {
|
||||
// 处理 base64 格式的图片
|
||||
const match = src.match(/^data:(image\/\w+);base64,(.+)$/)
|
||||
if (!match) throw new Error('Invalid base64 image format')
|
||||
const mimeType = match[1]
|
||||
const byteArray = Base64.toUint8Array(match[2])
|
||||
const blob = new Blob([byteArray as unknown as BlobPart], { type: mimeType })
|
||||
await navigator.clipboard.write([new ClipboardItem({ [mimeType]: blob })])
|
||||
blob = new Blob([byteArray as unknown as BlobPart], { type: mimeType })
|
||||
} else if (src.startsWith('file://')) {
|
||||
// 处理本地文件路径
|
||||
const bytes = await window.api.fs.read(src)
|
||||
const mimeType = mime.getType(src) || 'application/octet-stream'
|
||||
const blob = new Blob([bytes], { type: mimeType })
|
||||
await navigator.clipboard.write([
|
||||
new ClipboardItem({
|
||||
[mimeType]: blob
|
||||
})
|
||||
])
|
||||
blob = new Blob([bytes], { type: mimeType })
|
||||
} else {
|
||||
// 处理 URL 格式的图片
|
||||
const response = await fetch(src)
|
||||
const blob = await response.blob()
|
||||
|
||||
await navigator.clipboard.write([
|
||||
new ClipboardItem({
|
||||
[blob.type]: blob
|
||||
})
|
||||
])
|
||||
blob = await response.blob()
|
||||
}
|
||||
|
||||
// 统一转换为 PNG 以确保兼容性(剪贴板 API 不支持 JPEG)
|
||||
const pngBlob = await convertImageToPng(blob)
|
||||
|
||||
const item = new ClipboardItem({
|
||||
'image/png': pngBlob
|
||||
})
|
||||
await navigator.clipboard.write([item])
|
||||
|
||||
window.toast.success(t('message.copy.success'))
|
||||
} catch (error) {
|
||||
logger.error('Failed to copy image:', error as Error)
|
||||
const err = error as Error
|
||||
logger.error(`Failed to copy image: ${err.message}`, { stack: err.stack })
|
||||
window.toast.error(t('message.copy.failed'))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -460,6 +460,7 @@ export const isSupportedThinkingTokenZhipuModel = (model: Model): boolean => {
|
||||
}
|
||||
|
||||
export const isDeepSeekHybridInferenceModel = (model: Model) => {
|
||||
const { idResult, nameResult } = withModelIdAndNameAsId(model, (model) => {
|
||||
const modelId = getLowerBaseModelName(model.id)
|
||||
// deepseek官方使用chat和reasoner做推理控制,其他provider需要单独判断,id可能会有所差别
|
||||
// openrouter: deepseek/deepseek-chat-v3.1 不知道会不会有其他provider仿照ds官方分出一个同id的作为非思考模式的模型,这里有风险
|
||||
@@ -470,6 +471,8 @@ export const isDeepSeekHybridInferenceModel = (model: Model) => {
|
||||
// Does NOT match: deepseek-v3.123 (missing separator after '1'), deepseek-v3.x (x isn't a digit)
|
||||
// TODO: move to utils and add test cases
|
||||
return /deepseek-v3(?:\.\d|-\d)(?:(\.|-)\w+)?$/.test(modelId) || modelId.includes('deepseek-chat-v3.1')
|
||||
})
|
||||
return idResult || nameResult
|
||||
}
|
||||
|
||||
export const isLingReasoningModel = (model?: Model): boolean => {
|
||||
@@ -523,7 +526,6 @@ export function isReasoningModel(model?: Model): boolean {
|
||||
REASONING_REGEX.test(model.name) ||
|
||||
isSupportedThinkingTokenDoubaoModel(model) ||
|
||||
isDeepSeekHybridInferenceModel(model) ||
|
||||
isDeepSeekHybridInferenceModel({ ...model, id: model.name }) ||
|
||||
false
|
||||
)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { cacheService } from '@data/CacheService'
|
||||
import { throttle } from 'lodash'
|
||||
import { useEffect, useRef } from 'react'
|
||||
import { useEffect, useMemo, useRef } from 'react'
|
||||
|
||||
import { useTimer } from './useTimer'
|
||||
|
||||
@@ -13,13 +13,18 @@ import { useTimer } from './useTimer'
|
||||
*/
|
||||
export default function useScrollPosition(key: string, throttleWait?: number) {
|
||||
const containerRef = useRef<HTMLDivElement>(null)
|
||||
const scrollKey = `scroll:${key}`
|
||||
const scrollKey = useMemo(() => `scroll:${key}`, [key])
|
||||
const scrollKeyRef = useRef(scrollKey)
|
||||
const { setTimeoutTimer } = useTimer()
|
||||
|
||||
useEffect(() => {
|
||||
scrollKeyRef.current = scrollKey
|
||||
}, [scrollKey])
|
||||
|
||||
const handleScroll = throttle(() => {
|
||||
const position = containerRef.current?.scrollTop ?? 0
|
||||
window.requestAnimationFrame(() => {
|
||||
cacheService.set(scrollKey, position)
|
||||
cacheService.set(scrollKeyRef.current, position)
|
||||
})
|
||||
}, throttleWait ?? 100)
|
||||
|
||||
@@ -29,5 +34,9 @@ export default function useScrollPosition(key: string, throttleWait?: number) {
|
||||
setTimeoutTimer('scrollEffect', scroll, 50)
|
||||
}, [scrollKey, setTimeoutTimer])
|
||||
|
||||
useEffect(() => {
|
||||
return () => handleScroll.cancel()
|
||||
}, [handleScroll])
|
||||
|
||||
return { containerRef, handleScroll }
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { useEffect, useRef } from 'react'
|
||||
import { useCallback, useEffect, useRef } from 'react'
|
||||
|
||||
/**
|
||||
* 定时器管理 Hook,用于管理 setTimeout 和 setInterval 定时器,支持通过 key 来标识不同的定时器
|
||||
@@ -43,10 +43,38 @@ export const useTimer = () => {
|
||||
const timeoutMapRef = useRef(new Map<string, NodeJS.Timeout>())
|
||||
const intervalMapRef = useRef(new Map<string, NodeJS.Timeout>())
|
||||
|
||||
/**
|
||||
* 清除指定 key 的 setTimeout 定时器
|
||||
* @param key - 定时器标识符
|
||||
*/
|
||||
const clearTimeoutTimer = useCallback((key: string) => {
|
||||
clearTimeout(timeoutMapRef.current.get(key))
|
||||
timeoutMapRef.current.delete(key)
|
||||
}, [])
|
||||
|
||||
/**
|
||||
* 清除指定 key 的 setInterval 定时器
|
||||
* @param key - 定时器标识符
|
||||
*/
|
||||
const clearIntervalTimer = useCallback((key: string) => {
|
||||
clearInterval(intervalMapRef.current.get(key))
|
||||
intervalMapRef.current.delete(key)
|
||||
}, [])
|
||||
|
||||
/**
|
||||
* 清除所有定时器,包括 setTimeout 和 setInterval
|
||||
*/
|
||||
const clearAllTimers = useCallback(() => {
|
||||
timeoutMapRef.current.forEach((timer) => clearTimeout(timer))
|
||||
intervalMapRef.current.forEach((timer) => clearInterval(timer))
|
||||
timeoutMapRef.current.clear()
|
||||
intervalMapRef.current.clear()
|
||||
}, [])
|
||||
|
||||
// 组件卸载时自动清理所有定时器
|
||||
useEffect(() => {
|
||||
return () => clearAllTimers()
|
||||
}, [])
|
||||
}, [clearAllTimers])
|
||||
|
||||
/**
|
||||
* 设置一个 setTimeout 定时器
|
||||
@@ -65,12 +93,15 @@ export const useTimer = () => {
|
||||
* cleanup();
|
||||
* ```
|
||||
*/
|
||||
const setTimeoutTimer = (key: string, ...args: Parameters<typeof setTimeout>) => {
|
||||
const setTimeoutTimer = useCallback(
|
||||
(key: string, ...args: Parameters<typeof setTimeout>) => {
|
||||
clearTimeout(timeoutMapRef.current.get(key))
|
||||
const timer = setTimeout(...args)
|
||||
timeoutMapRef.current.set(key, timer)
|
||||
return () => clearTimeoutTimer(key)
|
||||
}
|
||||
},
|
||||
[clearTimeoutTimer]
|
||||
)
|
||||
|
||||
/**
|
||||
* 设置一个 setInterval 定时器
|
||||
@@ -89,56 +120,31 @@ export const useTimer = () => {
|
||||
* cleanup();
|
||||
* ```
|
||||
*/
|
||||
const setIntervalTimer = (key: string, ...args: Parameters<typeof setInterval>) => {
|
||||
const setIntervalTimer = useCallback(
|
||||
(key: string, ...args: Parameters<typeof setInterval>) => {
|
||||
clearInterval(intervalMapRef.current.get(key))
|
||||
const timer = setInterval(...args)
|
||||
intervalMapRef.current.set(key, timer)
|
||||
return () => clearIntervalTimer(key)
|
||||
}
|
||||
|
||||
/**
|
||||
* 清除指定 key 的 setTimeout 定时器
|
||||
* @param key - 定时器标识符
|
||||
*/
|
||||
const clearTimeoutTimer = (key: string) => {
|
||||
clearTimeout(timeoutMapRef.current.get(key))
|
||||
timeoutMapRef.current.delete(key)
|
||||
}
|
||||
|
||||
/**
|
||||
* 清除指定 key 的 setInterval 定时器
|
||||
* @param key - 定时器标识符
|
||||
*/
|
||||
const clearIntervalTimer = (key: string) => {
|
||||
clearInterval(intervalMapRef.current.get(key))
|
||||
intervalMapRef.current.delete(key)
|
||||
}
|
||||
},
|
||||
[clearIntervalTimer]
|
||||
)
|
||||
|
||||
/**
|
||||
* 清除所有 setTimeout 定时器
|
||||
*/
|
||||
const clearAllTimeoutTimers = () => {
|
||||
const clearAllTimeoutTimers = useCallback(() => {
|
||||
timeoutMapRef.current.forEach((timer) => clearTimeout(timer))
|
||||
timeoutMapRef.current.clear()
|
||||
}
|
||||
}, [])
|
||||
|
||||
/**
|
||||
* 清除所有 setInterval 定时器
|
||||
*/
|
||||
const clearAllIntervalTimers = () => {
|
||||
const clearAllIntervalTimers = useCallback(() => {
|
||||
intervalMapRef.current.forEach((timer) => clearInterval(timer))
|
||||
intervalMapRef.current.clear()
|
||||
}
|
||||
|
||||
/**
|
||||
* 清除所有定时器,包括 setTimeout 和 setInterval
|
||||
*/
|
||||
const clearAllTimers = () => {
|
||||
timeoutMapRef.current.forEach((timer) => clearTimeout(timer))
|
||||
intervalMapRef.current.forEach((timer) => clearInterval(timer))
|
||||
timeoutMapRef.current.clear()
|
||||
intervalMapRef.current.clear()
|
||||
}
|
||||
}, [])
|
||||
|
||||
return {
|
||||
setTimeoutTimer,
|
||||
|
||||
@@ -280,6 +280,7 @@
|
||||
"denied": "Tool request was denied.",
|
||||
"timeout": "Tool request timed out before receiving approval."
|
||||
},
|
||||
"toolPendingFallback": "Tool",
|
||||
"waiting": "Waiting for tool permission decision..."
|
||||
},
|
||||
"type": {
|
||||
@@ -1208,7 +1209,7 @@
|
||||
"endpoint_type": {
|
||||
"anthropic": "Anthropic",
|
||||
"gemini": "Gemini",
|
||||
"image-generation": "Image Generation",
|
||||
"image-generation": "Image Generation (OpenAI)",
|
||||
"jina-rerank": "Jina Rerank",
|
||||
"openai": "OpenAI",
|
||||
"openai-response": "OpenAI-Response"
|
||||
|
||||
@@ -280,6 +280,7 @@
|
||||
"denied": "工具请求已被拒绝。",
|
||||
"timeout": "工具请求在收到批准前超时。"
|
||||
},
|
||||
"toolPendingFallback": "工具",
|
||||
"waiting": "等待工具权限决定..."
|
||||
},
|
||||
"type": {
|
||||
@@ -1208,7 +1209,7 @@
|
||||
"endpoint_type": {
|
||||
"anthropic": "Anthropic",
|
||||
"gemini": "Gemini",
|
||||
"image-generation": "图片生成",
|
||||
"image-generation": "图像生成 (OpenAI)",
|
||||
"jina-rerank": "Jina 重排序",
|
||||
"openai": "OpenAI",
|
||||
"openai-response": "OpenAI-Response"
|
||||
|
||||
@@ -280,6 +280,7 @@
|
||||
"denied": "工具請求已被拒絕。",
|
||||
"timeout": "工具請求在收到核准前逾時。"
|
||||
},
|
||||
"toolPendingFallback": "工具",
|
||||
"waiting": "等待工具權限決定..."
|
||||
},
|
||||
"type": {
|
||||
@@ -1208,7 +1209,7 @@
|
||||
"endpoint_type": {
|
||||
"anthropic": "Anthropic",
|
||||
"gemini": "Gemini",
|
||||
"image-generation": "圖片生成",
|
||||
"image-generation": "圖像生成 (OpenAI)",
|
||||
"jina-rerank": "Jina Rerank",
|
||||
"openai": "OpenAI",
|
||||
"openai-response": "OpenAI-Response"
|
||||
|
||||
@@ -280,6 +280,7 @@
|
||||
"denied": "Tool-Anfrage wurde abgelehnt.",
|
||||
"timeout": "Tool-Anfrage ist abgelaufen, bevor eine Genehmigung eingegangen ist."
|
||||
},
|
||||
"toolPendingFallback": "Werkzeug",
|
||||
"waiting": "Warten auf Entscheidung über Tool-Berechtigung..."
|
||||
},
|
||||
"type": {
|
||||
@@ -1208,7 +1209,7 @@
|
||||
"endpoint_type": {
|
||||
"anthropic": "Anthropic",
|
||||
"gemini": "Gemini",
|
||||
"image-generation": "Bildgenerierung",
|
||||
"image-generation": "Bilderzeugung (OpenAI)",
|
||||
"jina-rerank": "Jina Reranking",
|
||||
"openai": "OpenAI",
|
||||
"openai-response": "OpenAI-Response"
|
||||
|
||||
@@ -280,6 +280,7 @@
|
||||
"denied": "Το αίτημα για εργαλείο απορρίφθηκε.",
|
||||
"timeout": "Το αίτημα για το εργαλείο έληξε πριν λάβει έγκριση."
|
||||
},
|
||||
"toolPendingFallback": "Εργαλείο",
|
||||
"waiting": "Αναμονή για απόφαση άδειας εργαλείου..."
|
||||
},
|
||||
"type": {
|
||||
@@ -1208,7 +1209,7 @@
|
||||
"endpoint_type": {
|
||||
"anthropic": "Anthropic",
|
||||
"gemini": "Gemini",
|
||||
"image-generation": "Δημιουργία Εικόνας",
|
||||
"image-generation": "Δημιουργία Εικόνων (OpenAI)",
|
||||
"jina-rerank": "Επαναταξινόμηση Jina",
|
||||
"openai": "OpenAI",
|
||||
"openai-response": "Απάντηση OpenAI"
|
||||
|
||||
@@ -280,6 +280,7 @@
|
||||
"denied": "La solicitud de herramienta fue denegada.",
|
||||
"timeout": "La solicitud de herramienta expiró antes de recibir la aprobación."
|
||||
},
|
||||
"toolPendingFallback": "Herramienta",
|
||||
"waiting": "Esperando la decisión de permiso de la herramienta..."
|
||||
},
|
||||
"type": {
|
||||
@@ -1208,7 +1209,7 @@
|
||||
"endpoint_type": {
|
||||
"anthropic": "Anthropic",
|
||||
"gemini": "Gemini",
|
||||
"image-generation": "Generación de imágenes",
|
||||
"image-generation": "Generación de Imágenes (OpenAI)",
|
||||
"jina-rerank": "Reordenamiento Jina",
|
||||
"openai": "OpenAI",
|
||||
"openai-response": "Respuesta de OpenAI"
|
||||
|
||||
@@ -280,6 +280,7 @@
|
||||
"denied": "La demande d'outil a été refusée.",
|
||||
"timeout": "La demande d'outil a expiré avant d'obtenir l'approbation."
|
||||
},
|
||||
"toolPendingFallback": "Outil",
|
||||
"waiting": "En attente de la décision d'autorisation de l'outil..."
|
||||
},
|
||||
"type": {
|
||||
@@ -1208,7 +1209,7 @@
|
||||
"endpoint_type": {
|
||||
"anthropic": "Anthropic",
|
||||
"gemini": "Gemini",
|
||||
"image-generation": "Génération d'images",
|
||||
"image-generation": "Génération d'images (OpenAI)",
|
||||
"jina-rerank": "Reclassement Jina",
|
||||
"openai": "OpenAI",
|
||||
"openai-response": "Réponse OpenAI"
|
||||
|
||||
@@ -280,6 +280,7 @@
|
||||
"denied": "ツールリクエストは拒否されました。",
|
||||
"timeout": "ツールリクエストは承認を受ける前にタイムアウトしました。"
|
||||
},
|
||||
"toolPendingFallback": "ツール",
|
||||
"waiting": "ツールの許可決定を待っています..."
|
||||
},
|
||||
"type": {
|
||||
@@ -1208,7 +1209,7 @@
|
||||
"endpoint_type": {
|
||||
"anthropic": "Anthropic",
|
||||
"gemini": "Gemini",
|
||||
"image-generation": "画像生成",
|
||||
"image-generation": "画像生成 (OpenAI)",
|
||||
"jina-rerank": "Jina Rerank",
|
||||
"openai": "OpenAI",
|
||||
"openai-response": "OpenAI-Response"
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
"error": {
|
||||
"failed": "Falha ao excluir o agente"
|
||||
},
|
||||
"title": "删除代理"
|
||||
"title": "Excluir Agente"
|
||||
},
|
||||
"edit": {
|
||||
"title": "Agent Editor"
|
||||
@@ -111,7 +111,7 @@
|
||||
"label": "Modo de permissão",
|
||||
"options": {
|
||||
"acceptEdits": "Aceitar edições automaticamente",
|
||||
"bypassPermissions": "忽略检查 de permissão",
|
||||
"bypassPermissions": "Ignorar verificações de permissão",
|
||||
"default": "Padrão (perguntar antes de continuar)",
|
||||
"plan": "Modo de planejamento (plano sujeito a aprovação)"
|
||||
},
|
||||
@@ -150,7 +150,7 @@
|
||||
},
|
||||
"success": {
|
||||
"install": "Plugin instalado com sucesso",
|
||||
"uninstall": "插件 desinstalado com sucesso"
|
||||
"uninstall": "Plugin desinstalado com sucesso"
|
||||
},
|
||||
"tab": "plug-in",
|
||||
"type": {
|
||||
@@ -280,6 +280,7 @@
|
||||
"denied": "Solicitação de ferramenta foi negada.",
|
||||
"timeout": "A solicitação da ferramenta expirou antes de receber aprovação."
|
||||
},
|
||||
"toolPendingFallback": "Ferramenta",
|
||||
"waiting": "Aguardando decisão de permissão da ferramenta..."
|
||||
},
|
||||
"type": {
|
||||
@@ -1134,7 +1135,7 @@
|
||||
"duplicate": "Duplicar",
|
||||
"edit": "Editar",
|
||||
"enabled": "Ativado",
|
||||
"error": "错误",
|
||||
"error": "Erro",
|
||||
"errors": {
|
||||
"create_message": "Falha ao criar mensagem",
|
||||
"validation": "Falha na verificação"
|
||||
@@ -1208,7 +1209,7 @@
|
||||
"endpoint_type": {
|
||||
"anthropic": "Anthropic",
|
||||
"gemini": "Gemini",
|
||||
"image-generation": "Geração de Imagem",
|
||||
"image-generation": "Geração de Imagens (OpenAI)",
|
||||
"jina-rerank": "Jina Reordenar",
|
||||
"openai": "OpenAI",
|
||||
"openai-response": "Resposta OpenAI"
|
||||
|
||||
@@ -280,6 +280,7 @@
|
||||
"denied": "Запрос на инструмент был отклонён.",
|
||||
"timeout": "Запрос на инструмент превысил время ожидания до получения подтверждения."
|
||||
},
|
||||
"toolPendingFallback": "Инструмент",
|
||||
"waiting": "Ожидание решения о разрешении на использование инструмента..."
|
||||
},
|
||||
"type": {
|
||||
@@ -1208,7 +1209,7 @@
|
||||
"endpoint_type": {
|
||||
"anthropic": "Anthropic",
|
||||
"gemini": "Gemini",
|
||||
"image-generation": "Изображение",
|
||||
"image-generation": "Генерация изображений (OpenAI)",
|
||||
"jina-rerank": "Jina Rerank",
|
||||
"openai": "OpenAI",
|
||||
"openai-response": "OpenAI-Response"
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import { cacheService } from '@data/CacheService'
|
||||
import { loggerService } from '@logger'
|
||||
import type { QuickPanelTriggerInfo } from '@renderer/components/QuickPanel'
|
||||
import { QuickPanelReservedSymbol, useQuickPanel } from '@renderer/components/QuickPanel'
|
||||
import { isGenerateImageModel, isVisionModel } from '@renderer/config/models'
|
||||
import { cacheService } from '@renderer/data/CacheService'
|
||||
import { useSession } from '@renderer/hooks/agents/useSession'
|
||||
import { useInputText } from '@renderer/hooks/useInputText'
|
||||
import { selectNewTopicLoading } from '@renderer/hooks/useMessageOperations'
|
||||
@@ -422,6 +422,7 @@ const AgentSessionInputbarInner: FC<InnerProps> = ({ assistant, agentId, session
|
||||
})
|
||||
)
|
||||
|
||||
// Clear text after successful send (draft is cleared automatically via onChange)
|
||||
setText('')
|
||||
setTimeoutTimer('agentSession_sendMessage', () => setText(''), 500)
|
||||
} catch (error) {
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import { cacheService } from '@data/CacheService'
|
||||
import { usePreference } from '@data/hooks/usePreference'
|
||||
import { loggerService } from '@logger'
|
||||
import {
|
||||
@@ -9,13 +10,11 @@ import {
|
||||
isVisionModels,
|
||||
isWebSearchModel
|
||||
} from '@renderer/config/models'
|
||||
import { cacheService } from '@renderer/data/CacheService'
|
||||
import db from '@renderer/databases'
|
||||
import { useAssistant } from '@renderer/hooks/useAssistant'
|
||||
import { useInputText } from '@renderer/hooks/useInputText'
|
||||
import { useMessageOperations, useTopicLoading } from '@renderer/hooks/useMessageOperations'
|
||||
import { useShortcut } from '@renderer/hooks/useShortcuts'
|
||||
import { useSidebarIconShow } from '@renderer/hooks/useSidebarIcon'
|
||||
import { useTextareaResize } from '@renderer/hooks/useTextareaResize'
|
||||
import { useTimer } from '@renderer/hooks/useTimer'
|
||||
import {
|
||||
@@ -135,6 +134,10 @@ const InputbarInner: FC<InputbarInnerProps> = ({ assistant: initialAssistant, se
|
||||
const { setFiles, setMentionedModels, setSelectedKnowledgeBases } = useInputbarToolsDispatch()
|
||||
const { setCouldAddImageFile } = useInputbarToolsInternalDispatch()
|
||||
|
||||
const { text, setText } = useInputText({
|
||||
initialValue: cacheService.get<string>(INPUTBAR_DRAFT_CACHE_KEY) ?? '',
|
||||
onChange: (value) => cacheService.set(INPUTBAR_DRAFT_CACHE_KEY, value, DRAFT_CACHE_TTL)
|
||||
})
|
||||
const { text, setText } = useInputText({
|
||||
initialValue: cacheService.get<string>(INPUTBAR_DRAFT_CACHE_KEY) ?? '',
|
||||
onChange: (value) => cacheService.set(INPUTBAR_DRAFT_CACHE_KEY, value, DRAFT_CACHE_TTL)
|
||||
@@ -150,7 +153,6 @@ const InputbarInner: FC<InputbarInnerProps> = ({ assistant: initialAssistant, se
|
||||
minHeight: 30
|
||||
})
|
||||
|
||||
const showKnowledgeIcon = useSidebarIconShow('knowledge')
|
||||
const { assistant, addTopic, model, setModel, updateAssistant } = useAssistant(initialAssistant.id)
|
||||
const [showInputEstimatedTokens] = usePreference('chat.input.show_estimated_tokens')
|
||||
const [sendMessageShortcut] = usePreference('chat.input.send_message_shortcut')
|
||||
@@ -408,9 +410,10 @@ const InputbarInner: FC<InputbarInnerProps> = ({ assistant: initialAssistant, se
|
||||
focusTextarea
|
||||
])
|
||||
|
||||
// TODO: Just use assistant.knowledge_bases as selectedKnowledgeBases. context state is overdesigned.
|
||||
useEffect(() => {
|
||||
setSelectedKnowledgeBases(showKnowledgeIcon ? (assistant.knowledge_bases ?? []) : [])
|
||||
}, [assistant.knowledge_bases, setSelectedKnowledgeBases, showKnowledgeIcon])
|
||||
setSelectedKnowledgeBases(assistant.knowledge_bases ?? [])
|
||||
}, [assistant.knowledge_bases, setSelectedKnowledgeBases])
|
||||
|
||||
useEffect(() => {
|
||||
// Disable web search if model doesn't support it
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import { useAssistant } from '@renderer/hooks/useAssistant'
|
||||
import { useSidebarIconShow } from '@renderer/hooks/useSidebarIcon'
|
||||
import { defineTool, registerTool, TopicType } from '@renderer/pages/home/Inputbar/types'
|
||||
import type { KnowledgeBase } from '@renderer/types'
|
||||
import { isPromptToolUse, isSupportedToolUse } from '@renderer/utils/mcp-tools'
|
||||
@@ -30,7 +29,6 @@ const knowledgeBaseTool = defineTool({
|
||||
render: function KnowledgeBaseToolRender(context) {
|
||||
const { assistant, state, actions, quickPanel } = context
|
||||
|
||||
const knowledgeSidebarEnabled = useSidebarIconShow('knowledge')
|
||||
const { updateAssistant } = useAssistant(assistant.id)
|
||||
|
||||
const handleSelect = useCallback(
|
||||
@@ -41,10 +39,6 @@ const knowledgeBaseTool = defineTool({
|
||||
[updateAssistant, actions]
|
||||
)
|
||||
|
||||
if (!knowledgeSidebarEnabled) {
|
||||
return null
|
||||
}
|
||||
|
||||
return (
|
||||
<KnowledgeBaseButton
|
||||
quickPanel={quickPanel}
|
||||
|
||||
@@ -105,10 +105,12 @@ const ThinkingBlock: React.FC<Props> = ({ block }) => {
|
||||
)
|
||||
}
|
||||
|
||||
const normalizeThinkingTime = (value?: number) => (typeof value === 'number' && Number.isFinite(value) ? value : 0)
|
||||
|
||||
const ThinkingTimeSeconds = memo(
|
||||
({ blockThinkingTime, isThinking }: { blockThinkingTime: number; isThinking: boolean }) => {
|
||||
const { t } = useTranslation()
|
||||
const [displayTime, setDisplayTime] = useState(blockThinkingTime)
|
||||
const [displayTime, setDisplayTime] = useState(normalizeThinkingTime(blockThinkingTime))
|
||||
|
||||
const timer = useRef<NodeJS.Timeout | null>(null)
|
||||
|
||||
@@ -124,7 +126,7 @@ const ThinkingTimeSeconds = memo(
|
||||
clearInterval(timer.current)
|
||||
timer.current = null
|
||||
}
|
||||
setDisplayTime(blockThinkingTime)
|
||||
setDisplayTime(normalizeThinkingTime(blockThinkingTime))
|
||||
}
|
||||
|
||||
return () => {
|
||||
@@ -135,10 +137,10 @@ const ThinkingTimeSeconds = memo(
|
||||
}
|
||||
}, [isThinking, blockThinkingTime])
|
||||
|
||||
const thinkingTimeSeconds = useMemo(
|
||||
() => ((displayTime < 1000 ? 100 : displayTime) / 1000).toFixed(1),
|
||||
[displayTime]
|
||||
)
|
||||
const thinkingTimeSeconds = useMemo(() => {
|
||||
const safeTime = normalizeThinkingTime(displayTime)
|
||||
return ((safeTime < 1000 ? 100 : safeTime) / 1000).toFixed(1)
|
||||
}, [displayTime])
|
||||
|
||||
return isThinking
|
||||
? t('chat.thinking', {
|
||||
|
||||
@@ -287,6 +287,20 @@ describe('ThinkingBlock', () => {
|
||||
unmount()
|
||||
})
|
||||
})
|
||||
|
||||
it('should clamp invalid thinking times to a safe default', () => {
|
||||
const testCases = [undefined, Number.NaN, Number.POSITIVE_INFINITY]
|
||||
|
||||
testCases.forEach((thinking_millsec) => {
|
||||
const block = createThinkingBlock({
|
||||
thinking_millsec: thinking_millsec as any,
|
||||
status: MessageBlockStatus.SUCCESS
|
||||
})
|
||||
const { unmount } = renderThinkingBlock(block)
|
||||
expect(getThinkingTimeText()).toHaveTextContent('0.1s')
|
||||
unmount()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('collapse behavior', () => {
|
||||
|
||||
@@ -11,6 +11,7 @@ import { usePreference } from '@data/hooks/usePreference'
|
||||
import { useTimer } from '@renderer/hooks/useTimer'
|
||||
import type { RootState } from '@renderer/store'
|
||||
// import { selectCurrentTopicId } from '@renderer/store/newMessage'
|
||||
import { scrollIntoView } from '@renderer/utils/dom'
|
||||
import { Drawer } from 'antd'
|
||||
import type { FC } from 'react'
|
||||
import { useCallback, useEffect, useRef, useState } from 'react'
|
||||
@@ -120,7 +121,8 @@ const ChatNavigation: FC<ChatNavigationProps> = ({ containerId }) => {
|
||||
}
|
||||
|
||||
const scrollToMessage = (element: HTMLElement) => {
|
||||
element.scrollIntoView({ behavior: 'smooth', block: 'start' })
|
||||
// Use container: 'nearest' to keep scroll within the chat pane (Chromium-only, see #11565, #11567)
|
||||
scrollIntoView(element, { behavior: 'smooth', block: 'start', container: 'nearest' })
|
||||
}
|
||||
|
||||
const scrollToTop = () => {
|
||||
|
||||
@@ -15,6 +15,7 @@ import { estimateMessageUsage } from '@renderer/services/TokenService'
|
||||
import type { Assistant, Topic } from '@renderer/types'
|
||||
import type { Message, MessageBlock } from '@renderer/types/newMessage'
|
||||
import { classNames, cn } from '@renderer/utils'
|
||||
import { scrollIntoView } from '@renderer/utils/dom'
|
||||
import { isMessageProcessing } from '@renderer/utils/messageUtils/is'
|
||||
import { Divider } from 'antd'
|
||||
import type { Dispatch, FC, SetStateAction } from 'react'
|
||||
@@ -84,9 +85,10 @@ const MessageItem: FC<Props> = ({
|
||||
|
||||
useEffect(() => {
|
||||
if (isEditing && messageContainerRef.current) {
|
||||
messageContainerRef.current.scrollIntoView({
|
||||
scrollIntoView(messageContainerRef.current, {
|
||||
behavior: 'smooth',
|
||||
block: 'center'
|
||||
block: 'center',
|
||||
container: 'nearest'
|
||||
})
|
||||
}
|
||||
}, [isEditing])
|
||||
@@ -129,7 +131,7 @@ const MessageItem: FC<Props> = ({
|
||||
const messageHighlightHandler = useCallback(
|
||||
(highlight: boolean = true) => {
|
||||
if (messageContainerRef.current) {
|
||||
messageContainerRef.current.scrollIntoView({ behavior: 'smooth' })
|
||||
scrollIntoView(messageContainerRef.current, { behavior: 'smooth', block: 'center', container: 'nearest' })
|
||||
if (highlight) {
|
||||
setTimeoutTimer(
|
||||
'messageHighlightHandler',
|
||||
|
||||
@@ -12,6 +12,7 @@ import { newMessagesActions } from '@renderer/store/newMessage'
|
||||
// import { updateMessageThunk } from '@renderer/store/thunk/messageThunk'
|
||||
import type { Message } from '@renderer/types/newMessage'
|
||||
import { isEmoji, removeLeadingEmoji } from '@renderer/utils'
|
||||
import { scrollIntoView } from '@renderer/utils/dom'
|
||||
import { getMainTextContent } from '@renderer/utils/messageUtils/find'
|
||||
import { CircleChevronDown } from 'lucide-react'
|
||||
import { type FC, useCallback, useEffect, useRef, useState } from 'react'
|
||||
@@ -118,7 +119,7 @@ const MessageAnchorLine: FC<MessageLineProps> = ({ messages }) => {
|
||||
() => {
|
||||
const messageElement = document.getElementById(`message-${message.id}`)
|
||||
if (messageElement) {
|
||||
messageElement.scrollIntoView({ behavior: 'auto', block: 'start' })
|
||||
scrollIntoView(messageElement, { behavior: 'auto', block: 'start', container: 'nearest' })
|
||||
}
|
||||
},
|
||||
100
|
||||
@@ -140,7 +141,7 @@ const MessageAnchorLine: FC<MessageLineProps> = ({ messages }) => {
|
||||
return
|
||||
}
|
||||
|
||||
messageElement.scrollIntoView({ behavior: 'smooth', block: 'start' })
|
||||
scrollIntoView(messageElement, { behavior: 'smooth', block: 'start', container: 'nearest' })
|
||||
},
|
||||
[setSelectedMessage]
|
||||
)
|
||||
|
||||
@@ -9,6 +9,7 @@ import { EVENT_NAMES, EventEmitter } from '@renderer/services/EventService'
|
||||
import type { Topic } from '@renderer/types'
|
||||
import type { Message } from '@renderer/types/newMessage'
|
||||
import { classNames } from '@renderer/utils'
|
||||
import { scrollIntoView } from '@renderer/utils/dom'
|
||||
import type { MultiModelMessageStyle } from '@shared/data/preference/preferenceTypes'
|
||||
import { Popover } from 'antd'
|
||||
import type { ComponentProps } from 'react'
|
||||
@@ -75,7 +76,7 @@ const MessageGroup = ({ messages, topic, registerMessageElement }: Props) => {
|
||||
() => {
|
||||
const messageElement = document.getElementById(`message-${message.id}`)
|
||||
if (messageElement) {
|
||||
messageElement.scrollIntoView({ behavior: 'smooth', block: 'start' })
|
||||
scrollIntoView(messageElement, { behavior: 'smooth', block: 'start', container: 'nearest' })
|
||||
}
|
||||
},
|
||||
200
|
||||
@@ -134,7 +135,7 @@ const MessageGroup = ({ messages, topic, registerMessageElement }: Props) => {
|
||||
setSelectedMessage(message)
|
||||
} else {
|
||||
// 直接滚动
|
||||
element.scrollIntoView({ behavior: 'smooth', block: 'start' })
|
||||
scrollIntoView(element, { behavior: 'smooth', block: 'start', container: 'nearest' })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ import type { RootState } from '@renderer/store'
|
||||
import { messageBlocksSelectors } from '@renderer/store/messageBlock'
|
||||
import type { Message } from '@renderer/types/newMessage'
|
||||
import { MessageBlockType } from '@renderer/types/newMessage'
|
||||
import { scrollIntoView } from '@renderer/utils/dom'
|
||||
import type { FC } from 'react'
|
||||
import React, { useMemo, useRef } from 'react'
|
||||
import { useSelector } from 'react-redux'
|
||||
@@ -72,10 +73,10 @@ const MessageOutline: FC<MessageOutlineProps> = ({ message }) => {
|
||||
const parent = messageOutlineContainerRef.current?.parentElement
|
||||
const messageContentContainer = parent?.querySelector('.message-content-container')
|
||||
if (messageContentContainer) {
|
||||
const headingElement = messageContentContainer.querySelector(`#${id}`)
|
||||
const headingElement = messageContentContainer.querySelector<HTMLElement>(`#${id}`)
|
||||
if (headingElement) {
|
||||
const scrollBlock = ['horizontal', 'grid'].includes(message.multiModelMessageStyle ?? '') ? 'nearest' : 'start'
|
||||
headingElement.scrollIntoView({ behavior: 'smooth', block: scrollBlock })
|
||||
scrollIntoView(headingElement, { behavior: 'smooth', block: scrollBlock, container: 'nearest' })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,8 +5,6 @@ import { Terminal } from 'lucide-react'
|
||||
import { ToolTitle } from './GenericTools'
|
||||
import type { BashToolInput as BashToolInputType, BashToolOutput as BashToolOutputType } from './types'
|
||||
|
||||
const MAX_TAG_LENGTH = 100
|
||||
|
||||
export function BashTool({
|
||||
input,
|
||||
output
|
||||
@@ -17,12 +15,10 @@ export function BashTool({
|
||||
// 如果有输出,计算输出行数
|
||||
const outputLines = output ? output.split('\n').length : 0
|
||||
|
||||
// 处理命令字符串的截断,添加空值检查
|
||||
// 处理命令字符串,添加空值检查
|
||||
const command = input?.command ?? ''
|
||||
const needsTruncate = command.length > MAX_TAG_LENGTH
|
||||
const displayCommand = needsTruncate ? `${command.slice(0, MAX_TAG_LENGTH)}...` : command
|
||||
|
||||
const tagContent = <Tag className="whitespace-pre-wrap break-all font-mono">{displayCommand}</Tag>
|
||||
const tagContent = <Tag className="!m-0 max-w-full truncate font-mono">{command}</Tag>
|
||||
|
||||
return {
|
||||
key: 'tool',
|
||||
@@ -34,16 +30,12 @@ export function BashTool({
|
||||
params={input?.description}
|
||||
stats={output ? `${outputLines} ${outputLines === 1 ? 'line' : 'lines'}` : undefined}
|
||||
/>
|
||||
<div className="mt-1">
|
||||
{needsTruncate ? (
|
||||
<div className="mt-1 max-w-full">
|
||||
<Popover
|
||||
content={<div className="max-w-xl whitespace-pre-wrap break-all font-mono">{command}</div>}
|
||||
content={<div className="max-w-xl whitespace-pre-wrap break-all font-mono text-xs">{command}</div>}
|
||||
trigger="hover">
|
||||
{tagContent}
|
||||
</Popover>
|
||||
) : (
|
||||
tagContent
|
||||
)}
|
||||
</div>
|
||||
</>
|
||||
),
|
||||
|
||||
@@ -18,9 +18,9 @@ export function ToolTitle({
|
||||
}) {
|
||||
return (
|
||||
<div className={`flex items-center gap-1 ${className}`}>
|
||||
{icon}
|
||||
{label && <span className="font-medium text-sm">{label}</span>}
|
||||
{params && <span className="flex-shrink-0 text-muted-foreground text-xs">{params}</span>}
|
||||
{icon && <span className="flex flex-shrink-0">{icon}</span>}
|
||||
{label && <span className="flex-shrink-0 font-medium text-sm">{label}</span>}
|
||||
{params && <span className="min-w-0 truncate text-muted-foreground text-xs">{params}</span>}
|
||||
{stats && <span className="flex-shrink-0 text-muted-foreground text-xs">{stats}</span>}
|
||||
</div>
|
||||
)
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
import { loggerService } from '@logger'
|
||||
import { useAppSelector } from '@renderer/store'
|
||||
import { selectPendingPermission } from '@renderer/store/toolPermissions'
|
||||
import type { NormalToolResponse } from '@renderer/types'
|
||||
import type { CollapseProps } from 'antd'
|
||||
import { Collapse } from 'antd'
|
||||
import { Collapse, Spin } from 'antd'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
|
||||
// 导出所有类型
|
||||
export * from './types'
|
||||
@@ -83,17 +86,41 @@ function ToolContent({ toolName, input, output }: { toolName: AgentToolsType; in
|
||||
// 统一的组件渲染入口
|
||||
export function MessageAgentTools({ toolResponse }: { toolResponse: NormalToolResponse }) {
|
||||
const { arguments: args, response, tool, status } = toolResponse
|
||||
logger.info('Rendering agent tool response', {
|
||||
logger.debug('Rendering agent tool response', {
|
||||
tool: tool,
|
||||
arguments: args,
|
||||
status,
|
||||
response
|
||||
})
|
||||
|
||||
const pendingPermission = useAppSelector((state) =>
|
||||
selectPendingPermission(state.toolPermissions, toolResponse.toolCallId)
|
||||
)
|
||||
|
||||
if (status === 'pending') {
|
||||
if (pendingPermission) {
|
||||
return <ToolPermissionRequestCard toolResponse={toolResponse} />
|
||||
}
|
||||
return <ToolPendingIndicator toolName={tool?.name} description={tool?.description} />
|
||||
}
|
||||
|
||||
return (
|
||||
<ToolContent toolName={tool.name as AgentToolsType} input={args as ToolInput} output={response as ToolOutput} />
|
||||
)
|
||||
}
|
||||
|
||||
function ToolPendingIndicator({ toolName, description }: { toolName?: string; description?: string }) {
|
||||
const { t } = useTranslation()
|
||||
const label = toolName || t('agent.toolPermission.toolPendingFallback', 'Tool')
|
||||
const detail = description?.trim() || t('agent.toolPermission.executing')
|
||||
|
||||
return (
|
||||
<div className="flex w-full max-w-xl items-center gap-3 rounded-xl border border-default-200 bg-default-100 px-4 py-3 shadow-sm">
|
||||
<Spin size="small" />
|
||||
<div className="flex flex-col gap-1">
|
||||
<span className="font-semibold text-default-700 text-sm">{label}</span>
|
||||
<span className="text-default-500 text-xs">{detail}</span>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ import type { FetchChatCompletionParams } from '@renderer/types'
|
||||
import type { Assistant, MCPServer, MCPTool, Model, Provider } from '@renderer/types'
|
||||
import type { StreamTextParams } from '@renderer/types/aiCoreTypes'
|
||||
import { type Chunk, ChunkType } from '@renderer/types/chunk'
|
||||
import type { Message } from '@renderer/types/newMessage'
|
||||
import type { Message, ResponseError } from '@renderer/types/newMessage'
|
||||
import type { SdkModel } from '@renderer/types/sdk'
|
||||
import { removeSpecialCharactersForTopicName, uuid } from '@renderer/utils'
|
||||
import { abortCompletion, readyToAbort } from '@renderer/utils/abortController'
|
||||
@@ -476,7 +476,7 @@ export async function checkApi(provider: Provider, model: Model, timeout = 15000
|
||||
} else {
|
||||
const abortId = uuid()
|
||||
const signal = readyToAbort(abortId)
|
||||
let chunkError
|
||||
let streamError: ResponseError | undefined
|
||||
const params: StreamTextParams = {
|
||||
system: assistant.prompt,
|
||||
prompt: 'hi',
|
||||
@@ -495,19 +495,18 @@ export async function checkApi(provider: Provider, model: Model, timeout = 15000
|
||||
callType: 'check',
|
||||
onChunk: (chunk: Chunk) => {
|
||||
if (chunk.type === ChunkType.ERROR) {
|
||||
chunkError = chunk.error
|
||||
streamError = chunk.error
|
||||
} else {
|
||||
abortCompletion(abortId)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Try streaming check
|
||||
try {
|
||||
await ai.completions(model.id, params, config)
|
||||
} catch (e) {
|
||||
if (!isAbortError(e) && !isAbortError(chunkError)) {
|
||||
throw e
|
||||
if (!isAbortError(e) && !isAbortError(streamError)) {
|
||||
throw streamError ?? e
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -239,6 +239,7 @@ export type ModelType = 'text' | 'vision' | 'embedding' | 'reasoning' | 'functio
|
||||
|
||||
export type ModelTag = Exclude<ModelType, 'text'> | 'free'
|
||||
|
||||
// "image-generation" is also openai endpoint, but specifically for image generation.
|
||||
export type EndpointType = 'openai' | 'openai-response' | 'anthropic' | 'gemini' | 'image-generation' | 'jina-rerank'
|
||||
|
||||
export type ModelPricing = {
|
||||
|
||||
@@ -234,6 +234,7 @@ export interface Response {
|
||||
error?: ResponseError
|
||||
}
|
||||
|
||||
// FIXME: Weak type safety. It may be a specific class instance which inherits Error in runtime.
|
||||
export type ResponseError = Record<string, any>
|
||||
|
||||
export interface MessageInputBaseParams {
|
||||
|
||||
@@ -7,11 +7,13 @@ import {
|
||||
formatApiKeys,
|
||||
formatAzureOpenAIApiHost,
|
||||
formatVertexApiHost,
|
||||
getTrailingApiVersion,
|
||||
hasAPIVersion,
|
||||
maskApiKey,
|
||||
routeToEndpoint,
|
||||
splitApiKeyString,
|
||||
validateApiHost
|
||||
validateApiHost,
|
||||
withoutTrailingApiVersion
|
||||
} from '../api'
|
||||
|
||||
vi.mock('@renderer/store', () => {
|
||||
@@ -316,4 +318,90 @@ describe('api', () => {
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
describe('getTrailingApiVersion', () => {
|
||||
it('extracts trailing API version from URL', () => {
|
||||
expect(getTrailingApiVersion('https://api.example.com/v1')).toBe('v1')
|
||||
expect(getTrailingApiVersion('https://api.example.com/v2')).toBe('v2')
|
||||
})
|
||||
|
||||
it('extracts trailing API version with alpha/beta suffix', () => {
|
||||
expect(getTrailingApiVersion('https://api.example.com/v2alpha')).toBe('v2alpha')
|
||||
expect(getTrailingApiVersion('https://api.example.com/v3beta')).toBe('v3beta')
|
||||
})
|
||||
|
||||
it('extracts trailing API version with trailing slash', () => {
|
||||
expect(getTrailingApiVersion('https://api.example.com/v1/')).toBe('v1')
|
||||
expect(getTrailingApiVersion('https://api.example.com/v2beta/')).toBe('v2beta')
|
||||
})
|
||||
|
||||
it('returns undefined when API version is in the middle of path', () => {
|
||||
expect(getTrailingApiVersion('https://api.example.com/v1/chat')).toBeUndefined()
|
||||
expect(getTrailingApiVersion('https://api.example.com/v1/completions')).toBeUndefined()
|
||||
})
|
||||
|
||||
it('returns undefined when no trailing version exists', () => {
|
||||
expect(getTrailingApiVersion('https://api.example.com')).toBeUndefined()
|
||||
expect(getTrailingApiVersion('https://api.example.com/api')).toBeUndefined()
|
||||
})
|
||||
|
||||
it('extracts trailing version from complex URLs', () => {
|
||||
expect(getTrailingApiVersion('https://api.example.com/service/v1')).toBe('v1')
|
||||
expect(getTrailingApiVersion('https://gateway.ai.cloudflare.com/v1/xxx/google-ai-studio/v1beta')).toBe('v1beta')
|
||||
})
|
||||
|
||||
it('only extracts the trailing version when multiple versions exist', () => {
|
||||
expect(getTrailingApiVersion('https://api.example.com/v1/service/v2')).toBe('v2')
|
||||
expect(
|
||||
getTrailingApiVersion('https://gateway.ai.cloudflare.com/v1/xxxxxx/google-ai-studio/google-ai-studio/v1beta')
|
||||
).toBe('v1beta')
|
||||
})
|
||||
|
||||
it('returns undefined for empty string', () => {
|
||||
expect(getTrailingApiVersion('')).toBeUndefined()
|
||||
})
|
||||
})
|
||||
|
||||
describe('withoutTrailingApiVersion', () => {
|
||||
it('removes trailing API version from URL', () => {
|
||||
expect(withoutTrailingApiVersion('https://api.example.com/v1')).toBe('https://api.example.com')
|
||||
expect(withoutTrailingApiVersion('https://api.example.com/v2')).toBe('https://api.example.com')
|
||||
})
|
||||
|
||||
it('removes trailing API version with alpha/beta suffix', () => {
|
||||
expect(withoutTrailingApiVersion('https://api.example.com/v2alpha')).toBe('https://api.example.com')
|
||||
expect(withoutTrailingApiVersion('https://api.example.com/v3beta')).toBe('https://api.example.com')
|
||||
})
|
||||
|
||||
it('removes trailing API version with trailing slash', () => {
|
||||
expect(withoutTrailingApiVersion('https://api.example.com/v1/')).toBe('https://api.example.com')
|
||||
expect(withoutTrailingApiVersion('https://api.example.com/v2beta/')).toBe('https://api.example.com')
|
||||
})
|
||||
|
||||
it('does not remove API version in the middle of path', () => {
|
||||
expect(withoutTrailingApiVersion('https://api.example.com/v1/chat')).toBe('https://api.example.com/v1/chat')
|
||||
expect(withoutTrailingApiVersion('https://api.example.com/v1/completions')).toBe(
|
||||
'https://api.example.com/v1/completions'
|
||||
)
|
||||
})
|
||||
|
||||
it('returns URL unchanged when no trailing version exists', () => {
|
||||
expect(withoutTrailingApiVersion('https://api.example.com')).toBe('https://api.example.com')
|
||||
expect(withoutTrailingApiVersion('https://api.example.com/api')).toBe('https://api.example.com/api')
|
||||
})
|
||||
|
||||
it('handles complex URLs with version at the end', () => {
|
||||
expect(withoutTrailingApiVersion('https://api.example.com/service/v1')).toBe('https://api.example.com/service')
|
||||
})
|
||||
|
||||
it('handles URLs with multiple versions but only removes the trailing one', () => {
|
||||
expect(withoutTrailingApiVersion('https://api.example.com/v1/service/v2')).toBe(
|
||||
'https://api.example.com/v1/service'
|
||||
)
|
||||
})
|
||||
|
||||
it('returns empty string unchanged', () => {
|
||||
expect(withoutTrailingApiVersion('')).toBe('')
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -12,6 +12,19 @@ export function formatApiKeys(value: string): string {
|
||||
return value.replaceAll(',', ',').replaceAll('\n', ',')
|
||||
}
|
||||
|
||||
/**
|
||||
* Matches a version segment in a path that starts with `/v<number>` and optionally
|
||||
* continues with `alpha` or `beta`. The segment may be followed by `/` or the end
|
||||
* of the string (useful for cases like `/v3alpha/resources`).
|
||||
*/
|
||||
const VERSION_REGEX_PATTERN = '\\/v\\d+(?:alpha|beta)?(?=\\/|$)'
|
||||
|
||||
/**
|
||||
* Matches an API version at the end of a URL (with optional trailing slash).
|
||||
* Used to detect and extract versions only from the trailing position.
|
||||
*/
|
||||
const TRAILING_VERSION_REGEX = /\/v\d+(?:alpha|beta)?\/?$/i
|
||||
|
||||
/**
|
||||
* 判断 host 的 path 中是否包含形如版本的字符串(例如 /v1、/v2beta 等),
|
||||
*
|
||||
@@ -21,16 +34,14 @@ export function formatApiKeys(value: string): string {
|
||||
export function hasAPIVersion(host?: string): boolean {
|
||||
if (!host) return false
|
||||
|
||||
// 匹配路径中以 `/v<number>` 开头并可选跟随 `alpha` 或 `beta` 的版本段,
|
||||
// 该段后面可以跟 `/` 或字符串结束(用于匹配诸如 `/v3alpha/resources` 的情况)。
|
||||
const versionRegex = /\/v\d+(?:alpha|beta)?(?=\/|$)/i
|
||||
const regex = new RegExp(VERSION_REGEX_PATTERN, 'i')
|
||||
|
||||
try {
|
||||
const url = new URL(host)
|
||||
return versionRegex.test(url.pathname)
|
||||
return regex.test(url.pathname)
|
||||
} catch {
|
||||
// 若无法作为完整 URL 解析,则当作路径直接检测
|
||||
return versionRegex.test(host)
|
||||
return regex.test(host)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -55,7 +66,7 @@ export function withoutTrailingSlash<T extends string>(url: T): T {
|
||||
* Formats an API host URL by normalizing it and optionally appending an API version.
|
||||
*
|
||||
* @param host - The API host URL to format. Leading/trailing whitespace will be trimmed and trailing slashes removed.
|
||||
* @param isSupportedAPIVerion - Whether the API version is supported. Defaults to `true`.
|
||||
* @param supportApiVersion - Whether the API version is supported. Defaults to `true`.
|
||||
* @param apiVersion - The API version to append if needed. Defaults to `'v1'`.
|
||||
*
|
||||
* @returns The formatted API host URL. If the host is empty after normalization, returns an empty string.
|
||||
@@ -67,13 +78,13 @@ export function withoutTrailingSlash<T extends string>(url: T): T {
|
||||
* formatApiHost('https://api.example.com#') // Returns 'https://api.example.com#'
|
||||
* formatApiHost('https://api.example.com/v2', true, 'v1') // Returns 'https://api.example.com/v2'
|
||||
*/
|
||||
export function formatApiHost(host?: string, isSupportedAPIVerion: boolean = true, apiVersion: string = 'v1'): string {
|
||||
export function formatApiHost(host?: string, supportApiVersion: boolean = true, apiVersion: string = 'v1'): string {
|
||||
const normalizedHost = withoutTrailingSlash(trim(host))
|
||||
if (!normalizedHost) {
|
||||
return ''
|
||||
}
|
||||
|
||||
if (normalizedHost.endsWith('#') || !isSupportedAPIVerion || hasAPIVersion(normalizedHost)) {
|
||||
if (normalizedHost.endsWith('#') || !supportApiVersion || hasAPIVersion(normalizedHost)) {
|
||||
return normalizedHost
|
||||
}
|
||||
return `${normalizedHost}/${apiVersion}`
|
||||
@@ -213,3 +224,50 @@ export function splitApiKeyString(keyStr: string): string[] {
|
||||
.map((k) => k.replace(/\\,/g, ','))
|
||||
.filter((k) => k)
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts the trailing API version segment from a URL path.
|
||||
*
|
||||
* This function extracts API version patterns (e.g., `v1`, `v2beta`) from the end of a URL.
|
||||
* Only versions at the end of the path are extracted, not versions in the middle.
|
||||
* The returned version string does not include leading or trailing slashes.
|
||||
*
|
||||
* @param {string} url - The URL string to parse.
|
||||
* @returns {string | undefined} The trailing API version found (e.g., 'v1', 'v2beta'), or undefined if none found.
|
||||
*
|
||||
* @example
|
||||
* getTrailingApiVersion('https://api.example.com/v1') // 'v1'
|
||||
* getTrailingApiVersion('https://api.example.com/v2beta/') // 'v2beta'
|
||||
* getTrailingApiVersion('https://api.example.com/v1/chat') // undefined (version not at end)
|
||||
* getTrailingApiVersion('https://gateway.ai.cloudflare.com/v1/xxx/v1beta') // 'v1beta'
|
||||
* getTrailingApiVersion('https://api.example.com') // undefined
|
||||
*/
|
||||
export function getTrailingApiVersion(url: string): string | undefined {
|
||||
const match = url.match(TRAILING_VERSION_REGEX)
|
||||
|
||||
if (match) {
|
||||
// Extract version without leading slash and trailing slash
|
||||
return match[0].replace(/^\//, '').replace(/\/$/, '')
|
||||
}
|
||||
|
||||
return undefined
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes the trailing API version segment from a URL path.
|
||||
*
|
||||
* This function removes API version patterns (e.g., `/v1`, `/v2beta`) from the end of a URL.
|
||||
* Only versions at the end of the path are removed, not versions in the middle.
|
||||
*
|
||||
* @param {string} url - The URL string to process.
|
||||
* @returns {string} The URL with the trailing API version removed, or the original URL if no trailing version found.
|
||||
*
|
||||
* @example
|
||||
* withoutTrailingApiVersion('https://api.example.com/v1') // 'https://api.example.com'
|
||||
* withoutTrailingApiVersion('https://api.example.com/v2beta/') // 'https://api.example.com'
|
||||
* withoutTrailingApiVersion('https://api.example.com/v1/chat') // 'https://api.example.com/v1/chat' (no change)
|
||||
* withoutTrailingApiVersion('https://api.example.com') // 'https://api.example.com'
|
||||
*/
|
||||
export function withoutTrailingApiVersion(url: string): string {
|
||||
return url.replace(TRAILING_VERSION_REGEX, '')
|
||||
}
|
||||
|
||||
@@ -1,3 +1,15 @@
|
||||
import { loggerService } from '@logger'
|
||||
|
||||
const logger = loggerService.withContext('utils/dom')
|
||||
|
||||
interface ChromiumScrollIntoViewOptions extends ScrollIntoViewOptions {
|
||||
/**
|
||||
* @see https://developer.mozilla.org/en-US/docs/Web/API/Element/scrollIntoView#container
|
||||
* @see https://github.com/microsoft/TypeScript/issues/62803
|
||||
*/
|
||||
container?: 'all' | 'nearest'
|
||||
}
|
||||
|
||||
/**
|
||||
* Simple wrapper for scrollIntoView with common default options.
|
||||
* Provides a unified interface with sensible defaults.
|
||||
@@ -5,7 +17,12 @@
|
||||
* @param element - The target element to scroll into view
|
||||
* @param options - Scroll options. If not provided, uses { behavior: 'smooth', block: 'center', inline: 'nearest' }
|
||||
*/
|
||||
export function scrollIntoView(element: HTMLElement, options?: ScrollIntoViewOptions): void {
|
||||
export function scrollIntoView(element: HTMLElement, options?: ChromiumScrollIntoViewOptions): void {
|
||||
if (!element) {
|
||||
logger.warn('[scrollIntoView] Unexpected falsy element. Do nothing as fallback.')
|
||||
return
|
||||
}
|
||||
|
||||
const defaultOptions: ScrollIntoViewOptions = {
|
||||
behavior: 'smooth',
|
||||
block: 'center',
|
||||
|
||||
@@ -566,3 +566,54 @@ export const makeSvgSizeAdaptive = (element: Element): Element => {
|
||||
|
||||
return element
|
||||
}
|
||||
|
||||
/**
|
||||
* 将图片 Blob 转换为 PNG 格式的 Blob
|
||||
* @param blob 原始图片 Blob
|
||||
* @returns Promise<Blob> 转换后的 PNG Blob
|
||||
*/
|
||||
export const convertImageToPng = async (blob: Blob): Promise<Blob> => {
|
||||
if (blob.type === 'image/png') {
|
||||
return blob
|
||||
}
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
const img = new Image()
|
||||
const url = URL.createObjectURL(blob)
|
||||
|
||||
img.onload = () => {
|
||||
try {
|
||||
const canvas = document.createElement('canvas')
|
||||
canvas.width = img.width
|
||||
canvas.height = img.height
|
||||
const ctx = canvas.getContext('2d')
|
||||
|
||||
if (!ctx) {
|
||||
URL.revokeObjectURL(url)
|
||||
reject(new Error('Failed to get canvas context'))
|
||||
return
|
||||
}
|
||||
|
||||
ctx.drawImage(img, 0, 0)
|
||||
canvas.toBlob((pngBlob) => {
|
||||
URL.revokeObjectURL(url)
|
||||
if (pngBlob) {
|
||||
resolve(pngBlob)
|
||||
} else {
|
||||
reject(new Error('Failed to convert image to png'))
|
||||
}
|
||||
}, 'image/png')
|
||||
} catch (error) {
|
||||
URL.revokeObjectURL(url)
|
||||
reject(error)
|
||||
}
|
||||
}
|
||||
|
||||
img.onerror = () => {
|
||||
URL.revokeObjectURL(url)
|
||||
reject(new Error('Failed to load image for conversion'))
|
||||
}
|
||||
|
||||
img.src = url
|
||||
})
|
||||
}
|
||||
|
||||
@@ -90,7 +90,8 @@ export function openAIToolsToMcpTool(
|
||||
return undefined
|
||||
}
|
||||
const tools = mcpTools.filter((mcpTool) => {
|
||||
return mcpTool.id === toolName || mcpTool.name === toolName
|
||||
// toolName is mcpTool.id (registered with id as function name)
|
||||
return mcpTool.id === toolName
|
||||
})
|
||||
if (tools.length > 1) {
|
||||
logger.warn(`Multiple MCP Tools found for tool call: ${toolName}`)
|
||||
|
||||
@@ -256,6 +256,17 @@ const HomeWindow: FC<{ draggable?: boolean }> = ({ draggable = true }) => {
|
||||
|
||||
let blockId: string | null = null
|
||||
let thinkingBlockId: string | null = null
|
||||
let thinkingStartTime: number | null = null
|
||||
|
||||
const resolveThinkingDuration = (duration?: number) => {
|
||||
if (typeof duration === 'number' && Number.isFinite(duration)) {
|
||||
return duration
|
||||
}
|
||||
if (thinkingStartTime !== null) {
|
||||
return Math.max(0, performance.now() - thinkingStartTime)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
setIsLoading(true)
|
||||
setIsOutputted(false)
|
||||
@@ -293,6 +304,7 @@ const HomeWindow: FC<{ draggable?: boolean }> = ({ draggable = true }) => {
|
||||
case ChunkType.THINKING_START:
|
||||
{
|
||||
setIsOutputted(true)
|
||||
thinkingStartTime = performance.now()
|
||||
if (thinkingBlockId) {
|
||||
store.dispatch(
|
||||
updateOneBlock({ id: thinkingBlockId, changes: { status: MessageBlockStatus.STREAMING } })
|
||||
@@ -317,9 +329,13 @@ const HomeWindow: FC<{ draggable?: boolean }> = ({ draggable = true }) => {
|
||||
{
|
||||
setIsOutputted(true)
|
||||
if (thinkingBlockId) {
|
||||
if (thinkingStartTime === null) {
|
||||
thinkingStartTime = performance.now()
|
||||
}
|
||||
const thinkingDuration = resolveThinkingDuration(chunk.thinking_millsec)
|
||||
throttledBlockUpdate(thinkingBlockId, {
|
||||
content: chunk.text,
|
||||
thinking_millsec: chunk.thinking_millsec
|
||||
thinking_millsec: thinkingDuration
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -327,14 +343,17 @@ const HomeWindow: FC<{ draggable?: boolean }> = ({ draggable = true }) => {
|
||||
case ChunkType.THINKING_COMPLETE:
|
||||
{
|
||||
if (thinkingBlockId) {
|
||||
const thinkingDuration = resolveThinkingDuration(chunk.thinking_millsec)
|
||||
cancelThrottledBlockUpdate(thinkingBlockId)
|
||||
store.dispatch(
|
||||
updateOneBlock({
|
||||
id: thinkingBlockId,
|
||||
changes: { status: MessageBlockStatus.SUCCESS, thinking_millsec: chunk.thinking_millsec }
|
||||
changes: { status: MessageBlockStatus.SUCCESS, thinking_millsec: thinkingDuration }
|
||||
})
|
||||
)
|
||||
}
|
||||
thinkingStartTime = null
|
||||
thinkingBlockId = null
|
||||
}
|
||||
break
|
||||
case ChunkType.TEXT_START:
|
||||
@@ -406,6 +425,8 @@ const HomeWindow: FC<{ draggable?: boolean }> = ({ draggable = true }) => {
|
||||
if (!isAborted) {
|
||||
throw new Error(chunk.error.message)
|
||||
}
|
||||
thinkingStartTime = null
|
||||
thinkingBlockId = null
|
||||
}
|
||||
//fall through
|
||||
case ChunkType.BLOCK_COMPLETE:
|
||||
|
||||
@@ -41,8 +41,19 @@ export const processMessages = async (
|
||||
|
||||
let textBlockId: string | null = null
|
||||
let thinkingBlockId: string | null = null
|
||||
let thinkingStartTime: number | null = null
|
||||
let textBlockContent: string = ''
|
||||
|
||||
const resolveThinkingDuration = (duration?: number) => {
|
||||
if (typeof duration === 'number' && Number.isFinite(duration)) {
|
||||
return duration
|
||||
}
|
||||
if (thinkingStartTime !== null) {
|
||||
return Math.max(0, performance.now() - thinkingStartTime)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
const assistantMessage = getAssistantMessage({
|
||||
assistant,
|
||||
topic
|
||||
@@ -79,6 +90,7 @@ export const processMessages = async (
|
||||
switch (chunk.type) {
|
||||
case ChunkType.THINKING_START:
|
||||
{
|
||||
thinkingStartTime = performance.now()
|
||||
if (thinkingBlockId) {
|
||||
store.dispatch(
|
||||
updateOneBlock({ id: thinkingBlockId, changes: { status: MessageBlockStatus.STREAMING } })
|
||||
@@ -102,9 +114,13 @@ export const processMessages = async (
|
||||
case ChunkType.THINKING_DELTA:
|
||||
{
|
||||
if (thinkingBlockId) {
|
||||
if (thinkingStartTime === null) {
|
||||
thinkingStartTime = performance.now()
|
||||
}
|
||||
const thinkingDuration = resolveThinkingDuration(chunk.thinking_millsec)
|
||||
throttledBlockUpdate(thinkingBlockId, {
|
||||
content: chunk.text,
|
||||
thinking_millsec: chunk.thinking_millsec
|
||||
thinking_millsec: thinkingDuration
|
||||
})
|
||||
}
|
||||
onStream()
|
||||
@@ -113,6 +129,7 @@ export const processMessages = async (
|
||||
case ChunkType.THINKING_COMPLETE:
|
||||
{
|
||||
if (thinkingBlockId) {
|
||||
const thinkingDuration = resolveThinkingDuration(chunk.thinking_millsec)
|
||||
cancelThrottledBlockUpdate(thinkingBlockId)
|
||||
store.dispatch(
|
||||
updateOneBlock({
|
||||
@@ -120,12 +137,13 @@ export const processMessages = async (
|
||||
changes: {
|
||||
content: chunk.text,
|
||||
status: MessageBlockStatus.SUCCESS,
|
||||
thinking_millsec: chunk.thinking_millsec
|
||||
thinking_millsec: thinkingDuration
|
||||
}
|
||||
})
|
||||
)
|
||||
thinkingBlockId = null
|
||||
}
|
||||
thinkingStartTime = null
|
||||
}
|
||||
break
|
||||
case ChunkType.TEXT_START:
|
||||
@@ -190,6 +208,7 @@ export const processMessages = async (
|
||||
case ChunkType.ERROR:
|
||||
{
|
||||
const blockId = textBlockId || thinkingBlockId
|
||||
thinkingStartTime = null
|
||||
if (blockId) {
|
||||
store.dispatch(
|
||||
updateOneBlock({
|
||||
|
||||
@@ -284,6 +284,54 @@ describe('processMessages', () => {
|
||||
})
|
||||
})
|
||||
|
||||
describe('thinking timer fallback', () => {
|
||||
it('should use local timer when thinking_millsec is missing', async () => {
|
||||
const nowValues = [1000, 1500, 2000]
|
||||
let nowIndex = 0
|
||||
const performanceSpy = vi.spyOn(performance, 'now').mockImplementation(() => {
|
||||
const value = nowValues[Math.min(nowIndex, nowValues.length - 1)]
|
||||
nowIndex += 1
|
||||
return value
|
||||
})
|
||||
|
||||
const mockChunks = [
|
||||
{ type: ChunkType.THINKING_START },
|
||||
{ type: ChunkType.THINKING_DELTA, text: 'Thinking...' },
|
||||
{ type: ChunkType.THINKING_COMPLETE, text: 'Done thinking' },
|
||||
{ type: ChunkType.TEXT_START },
|
||||
{ type: ChunkType.TEXT_COMPLETE, text: 'Final answer' },
|
||||
{ type: ChunkType.BLOCK_COMPLETE }
|
||||
]
|
||||
|
||||
vi.mocked(fetchChatCompletion).mockImplementation(async ({ onChunkReceived }: any) => {
|
||||
for (const chunk of mockChunks) {
|
||||
await onChunkReceived(chunk)
|
||||
}
|
||||
})
|
||||
|
||||
await processMessages(
|
||||
mockAssistant,
|
||||
mockTopic,
|
||||
'test prompt',
|
||||
mockSetAskId,
|
||||
mockOnStream,
|
||||
mockOnFinish,
|
||||
mockOnError
|
||||
)
|
||||
|
||||
const thinkingDeltaCall = vi.mocked(throttledBlockUpdate).mock.calls.find(([id]) => id === 'thinking-block-1')
|
||||
const deltaPayload = thinkingDeltaCall?.[1] as { thinking_millsec?: number } | undefined
|
||||
expect(deltaPayload?.thinking_millsec).toBe(500)
|
||||
|
||||
const thinkingCompleteUpdate = vi
|
||||
.mocked(updateOneBlock)
|
||||
.mock.calls.find(([payload]) => (payload as any)?.changes?.thinking_millsec !== undefined)
|
||||
expect((thinkingCompleteUpdate?.[0] as any)?.changes?.thinking_millsec).toBe(1000)
|
||||
|
||||
performanceSpy.mockRestore()
|
||||
})
|
||||
})
|
||||
|
||||
describe('stream with exceptions', () => {
|
||||
it('should handle error chunks properly', async () => {
|
||||
const mockError = new Error('Stream processing error')
|
||||
|
||||
Reference in New Issue
Block a user