Compare commits
53 Commits
v1.7.0
...
feat/proxy
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4d77202afd | ||
|
|
8c9d79a7d4 | ||
|
|
fb9a8e7e2c | ||
|
|
14cc38a626 | ||
|
|
a231952969 | ||
|
|
874d69291f | ||
|
|
4a913fcef7 | ||
|
|
b3a58ec321 | ||
|
|
0097ca80e2 | ||
|
|
4c1466cd27 | ||
|
|
d968df4612 | ||
|
|
2bd680361a | ||
|
|
cc676d4bef | ||
|
|
3b1155b538 | ||
|
|
03ff6e1ca6 | ||
|
|
350519ac0a | ||
|
|
706fac898a | ||
|
|
f5c144404d | ||
|
|
50a217a638 | ||
|
|
444c13e1e3 | ||
|
|
255b19d6ee | ||
|
|
3989229f61 | ||
|
|
c6c7c240a3 | ||
|
|
f1f4831157 | ||
|
|
35cfc7c517 | ||
|
|
e255a992cc | ||
|
|
876f59d650 | ||
|
|
c23e88ecd1 | ||
|
|
284d0f99e1 | ||
|
|
13ac5d564a | ||
|
|
e8dccf51fe | ||
|
|
ed769ac4f7 | ||
|
|
95c18d192a | ||
|
|
534d27f37e | ||
|
|
313be4427b | ||
|
|
9d34098a53 | ||
|
|
d367040fd4 | ||
|
|
356e828422 | ||
|
|
ce25001590 | ||
|
|
77c1b77113 | ||
|
|
f163c4d3ee | ||
|
|
0f6ec3e061 | ||
|
|
5d1d2b7a9b | ||
|
|
15c0a3881c | ||
|
|
dad9cc95ad | ||
|
|
f02c0fe962 | ||
|
|
4c4102da20 | ||
|
|
2a1adfe322 | ||
|
|
36ed062b84 | ||
|
|
f225fbe3e3 | ||
|
|
ccfb9423e0 | ||
|
|
192357a32e | ||
|
|
a5e7aa1342 |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -73,3 +73,5 @@ test-results
|
||||
YOUR_MEMORY_FILE_PATH
|
||||
|
||||
.sessions/
|
||||
.next/
|
||||
*.tsbuildinfo
|
||||
|
||||
10
CLAUDE.md
10
CLAUDE.md
@@ -12,7 +12,15 @@ This file provides guidance to AI coding assistants when working with code in th
|
||||
- **Always propose before executing**: Before making any changes, clearly explain your planned approach and wait for explicit user approval to ensure alignment and prevent unwanted modifications.
|
||||
- **Lint, test, and format before completion**: Coding tasks are only complete after running `yarn lint`, `yarn test`, and `yarn format` successfully.
|
||||
- **Write conventional commits**: Commit small, focused changes using Conventional Commit messages (e.g., `feat:`, `fix:`, `refactor:`, `docs:`).
|
||||
- **Follow PR template**: When submitting pull requests, follow the template in `.github/pull_request_template.md` to ensure complete context and documentation.
|
||||
|
||||
## Pull Request Workflow (CRITICAL)
|
||||
|
||||
When creating a Pull Request, you MUST:
|
||||
|
||||
1. **Read the PR template first**: Always read `.github/pull_request_template.md` before creating the PR
|
||||
2. **Follow ALL template sections**: Structure the `--body` parameter to include every section from the template
|
||||
3. **Never skip sections**: Include all sections even if marking them as N/A or "None"
|
||||
4. **Use proper formatting**: Match the template's markdown structure exactly (headings, checkboxes, code blocks)
|
||||
|
||||
## Development Commands
|
||||
|
||||
|
||||
@@ -134,9 +134,9 @@ artifactBuildCompleted: scripts/artifact-build-completed.js
|
||||
releaseInfo:
|
||||
releaseNotes: |
|
||||
<!--LANG:en-->
|
||||
A New Era of Intelligence with Cherry Studio 1.7.0
|
||||
A New Era of Intelligence with Cherry Studio 1.7.1
|
||||
|
||||
Today we're releasing Cherry Studio 1.7.0 — our most ambitious update yet, introducing Agent: autonomous AI that thinks, plans, and acts.
|
||||
Today we're releasing Cherry Studio 1.7.1 — our most ambitious update yet, introducing Agent: autonomous AI that thinks, plans, and acts.
|
||||
|
||||
For years, AI assistants have been reactive — waiting for your commands, responding to your questions. With Agent, we're changing that. Now, AI can truly work alongside you: understanding complex goals, breaking them into steps, and executing them independently.
|
||||
|
||||
@@ -187,9 +187,9 @@ releaseInfo:
|
||||
The Agent Era is here. We can't wait to see what you'll create.
|
||||
|
||||
<!--LANG:zh-CN-->
|
||||
Cherry Studio 1.7.0:开启智能新纪元
|
||||
Cherry Studio 1.7.1:开启智能新纪元
|
||||
|
||||
今天,我们正式发布 Cherry Studio 1.7.0 —— 迄今最具雄心的版本,带来全新的 Agent:能够自主思考、规划和行动的 AI。
|
||||
今天,我们正式发布 Cherry Studio 1.7.1 —— 迄今最具雄心的版本,带来全新的 Agent:能够自主思考、规划和行动的 AI。
|
||||
|
||||
多年来,AI 助手一直是被动的——等待你的指令,回应你的问题。Agent 改变了这一切。现在,AI 能够真正与你并肩工作:理解复杂目标,将其拆解为步骤,并独立执行。
|
||||
|
||||
|
||||
@@ -25,7 +25,10 @@ export default defineConfig({
|
||||
'@shared': resolve('packages/shared'),
|
||||
'@logger': resolve('src/main/services/LoggerService'),
|
||||
'@mcp-trace/trace-core': resolve('packages/mcp-trace/trace-core'),
|
||||
'@mcp-trace/trace-node': resolve('packages/mcp-trace/trace-node')
|
||||
'@mcp-trace/trace-node': resolve('packages/mcp-trace/trace-node'),
|
||||
'@cherrystudio/ai-core/provider': resolve('packages/aiCore/src/core/providers'),
|
||||
'@cherrystudio/ai-core': resolve('packages/aiCore/src'),
|
||||
'@cherrystudio/ai-sdk-provider': resolve('packages/ai-sdk-provider/src')
|
||||
}
|
||||
},
|
||||
build: {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "CherryStudio",
|
||||
"version": "1.7.0",
|
||||
"version": "1.7.1",
|
||||
"private": true,
|
||||
"description": "A powerful AI assistant for producer.",
|
||||
"main": "./out/main/index.js",
|
||||
|
||||
@@ -69,6 +69,7 @@ export interface CherryInProviderSettings {
|
||||
headers?: HeadersInput
|
||||
/**
|
||||
* Optional endpoint type to distinguish different endpoint behaviors.
|
||||
* "image-generation" is also openai endpoint, but specifically for image generation.
|
||||
*/
|
||||
endpointType?: 'openai' | 'openai-response' | 'anthropic' | 'gemini' | 'image-generation' | 'jina-rerank'
|
||||
}
|
||||
|
||||
@@ -9,13 +9,27 @@
|
||||
*/
|
||||
|
||||
import Anthropic from '@anthropic-ai/sdk'
|
||||
import type { TextBlockParam } from '@anthropic-ai/sdk/resources'
|
||||
import type { MessageCreateParams, TextBlockParam, Tool as AnthropicTool } from '@anthropic-ai/sdk/resources'
|
||||
import { loggerService } from '@logger'
|
||||
import type { Provider } from '@types'
|
||||
import { type Provider, SystemProviderIds } from '@types'
|
||||
import type { ModelMessage } from 'ai'
|
||||
|
||||
const logger = loggerService.withContext('anthropic-sdk')
|
||||
|
||||
/**
|
||||
* Context for Anthropic SDK client creation.
|
||||
* This allows the shared module to be used in different environments
|
||||
* by providing environment-specific implementations.
|
||||
*/
|
||||
export interface AnthropicSdkContext {
|
||||
/**
|
||||
* Custom fetch function to use for HTTP requests.
|
||||
* In Electron main process, this should be `net.fetch`.
|
||||
* In other environments, can use the default fetch or a custom implementation.
|
||||
*/
|
||||
fetch?: typeof globalThis.fetch
|
||||
}
|
||||
|
||||
const defaultClaudeCodeSystemPrompt = `You are Claude Code, Anthropic's official CLI for Claude.`
|
||||
|
||||
const defaultClaudeCodeSystem: Array<TextBlockParam> = [
|
||||
@@ -58,8 +72,11 @@ const defaultClaudeCodeSystem: Array<TextBlockParam> = [
|
||||
export function getSdkClient(
|
||||
provider: Provider,
|
||||
oauthToken?: string | null,
|
||||
extraHeaders?: Record<string, string | string[]>
|
||||
extraHeaders?: Record<string, string | string[]>,
|
||||
context?: AnthropicSdkContext
|
||||
): Anthropic {
|
||||
const customFetch = context?.fetch
|
||||
|
||||
if (provider.authType === 'oauth') {
|
||||
if (!oauthToken) {
|
||||
throw new Error('OAuth token is not available')
|
||||
@@ -85,7 +102,8 @@ export function getSdkClient(
|
||||
'x-stainless-runtime': 'node',
|
||||
'x-stainless-runtime-version': 'v22.18.0',
|
||||
...extraHeaders
|
||||
}
|
||||
},
|
||||
fetch: customFetch
|
||||
})
|
||||
}
|
||||
let baseURL =
|
||||
@@ -106,11 +124,12 @@ export function getSdkClient(
|
||||
baseURL,
|
||||
dangerouslyAllowBrowser: true,
|
||||
defaultHeaders: {
|
||||
'anthropic-beta': 'output-128k-2025-02-19',
|
||||
'anthropic-beta': 'interleaved-thinking-2025-05-14',
|
||||
'APP-Code': 'MLTG2087',
|
||||
...provider.extra_headers,
|
||||
...extraHeaders
|
||||
}
|
||||
},
|
||||
fetch: customFetch
|
||||
})
|
||||
}
|
||||
|
||||
@@ -120,9 +139,11 @@ export function getSdkClient(
|
||||
baseURL,
|
||||
dangerouslyAllowBrowser: true,
|
||||
defaultHeaders: {
|
||||
'anthropic-beta': 'output-128k-2025-02-19',
|
||||
'anthropic-beta': 'interleaved-thinking-2025-05-14',
|
||||
Authorization: provider.id === SystemProviderIds.longcat ? `Bearer ${provider.apiKey}` : undefined,
|
||||
...provider.extra_headers
|
||||
}
|
||||
},
|
||||
fetch: customFetch
|
||||
})
|
||||
}
|
||||
|
||||
@@ -173,3 +194,31 @@ export function buildClaudeCodeSystemModelMessage(system?: string | Array<TextBl
|
||||
content: block.text
|
||||
}))
|
||||
}
|
||||
|
||||
/**
|
||||
* Sanitize tool definitions for Anthropic API.
|
||||
*
|
||||
* Removes non-standard fields like `input_examples` from tool definitions
|
||||
* that Anthropic's API doesn't support. This prevents validation errors when
|
||||
* tools with extended fields are passed to the Anthropic SDK.
|
||||
*
|
||||
* @param tools - Array of tool definitions from MessageCreateParams
|
||||
* @returns Sanitized tools array with non-standard fields removed
|
||||
*
|
||||
* @example
|
||||
* ```typescript
|
||||
* const sanitizedTools = sanitizeToolsForAnthropic(request.tools)
|
||||
* ```
|
||||
*/
|
||||
export function sanitizeToolsForAnthropic(tools?: MessageCreateParams['tools']): MessageCreateParams['tools'] {
|
||||
if (!tools || tools.length === 0) return tools
|
||||
|
||||
return tools.map((tool) => {
|
||||
if ('type' in tool && tool.type !== 'custom') return tool
|
||||
|
||||
// oxlint-disable-next-line no-unused-vars
|
||||
const { input_examples, ...sanitizedTool } = tool as AnthropicTool & { input_examples?: unknown }
|
||||
|
||||
return sanitizedTool as typeof tool
|
||||
})
|
||||
}
|
||||
|
||||
245
packages/shared/api/index.ts
Normal file
245
packages/shared/api/index.ts
Normal file
@@ -0,0 +1,245 @@
|
||||
/**
|
||||
* Shared API Utilities
|
||||
*
|
||||
* Common utilities for API URL formatting and validation.
|
||||
* Used by both main process (API Server) and renderer.
|
||||
*/
|
||||
|
||||
import type { MinimalProvider } from '@shared/provider'
|
||||
import { trim } from 'lodash'
|
||||
|
||||
// Supported endpoints for routing
|
||||
export const SUPPORTED_IMAGE_ENDPOINT_LIST = ['images/generations', 'images/edits', 'predict'] as const
|
||||
export const SUPPORTED_ENDPOINT_LIST = [
|
||||
'chat/completions',
|
||||
'responses',
|
||||
'messages',
|
||||
'generateContent',
|
||||
'streamGenerateContent',
|
||||
...SUPPORTED_IMAGE_ENDPOINT_LIST
|
||||
] as const
|
||||
|
||||
/**
|
||||
* Removes the trailing slash from a URL string if it exists.
|
||||
*/
|
||||
export function withoutTrailingSlash<T extends string>(url: T): T {
|
||||
return url.replace(/\/$/, '') as T
|
||||
}
|
||||
|
||||
/**
|
||||
* Matches a version segment in a path that starts with `/v<number>` and optionally
|
||||
* continues with `alpha` or `beta`. The segment may be followed by `/` or the end
|
||||
* of the string (useful for cases like `/v3alpha/resources`).
|
||||
*/
|
||||
const VERSION_REGEX_PATTERN = '\\/v\\d+(?:alpha|beta)?(?=\\/|$)'
|
||||
|
||||
/**
|
||||
* Matches an API version at the end of a URL (with optional trailing slash).
|
||||
* Used to detect and extract versions only from the trailing position.
|
||||
*/
|
||||
const TRAILING_VERSION_REGEX = /\/v\d+(?:alpha|beta)?\/?$/i
|
||||
|
||||
/**
|
||||
* 判断 host 的 path 中是否包含形如版本的字符串(例如 /v1、/v2beta 等),
|
||||
*
|
||||
* @param host - 要检查的 host 或 path 字符串
|
||||
* @returns 如果 path 中包含版本字符串则返回 true,否则 false
|
||||
*/
|
||||
export function hasAPIVersion(host?: string): boolean {
|
||||
if (!host) return false
|
||||
|
||||
const regex = new RegExp(VERSION_REGEX_PATTERN, 'i')
|
||||
|
||||
try {
|
||||
const url = new URL(host)
|
||||
return regex.test(url.pathname)
|
||||
} catch {
|
||||
// 若无法作为完整 URL 解析,则当作路径直接检测
|
||||
return regex.test(host)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 格式化 Azure OpenAI 的 API 主机地址。
|
||||
*/
|
||||
export function formatAzureOpenAIApiHost(host: string): string {
|
||||
const normalizedHost = withoutTrailingSlash(host)
|
||||
?.replace(/\/v1$/, '')
|
||||
.replace(/\/openai$/, '')
|
||||
// NOTE: AISDK会添加上`v1`
|
||||
return formatApiHost(normalizedHost + '/openai', false)
|
||||
}
|
||||
|
||||
export function formatVertexApiHost(
|
||||
provider: MinimalProvider,
|
||||
project: string = 'test-project',
|
||||
location: string = 'us-central1'
|
||||
): string {
|
||||
const { apiHost } = provider
|
||||
const trimmedHost = withoutTrailingSlash(trim(apiHost))
|
||||
if (!trimmedHost || trimmedHost.endsWith('aiplatform.googleapis.com')) {
|
||||
const host =
|
||||
location === 'global' ? 'https://aiplatform.googleapis.com' : `https://${location}-aiplatform.googleapis.com`
|
||||
return `${formatApiHost(host)}/projects/${project}/locations/${location}`
|
||||
}
|
||||
return formatApiHost(trimmedHost)
|
||||
}
|
||||
|
||||
/**
|
||||
* Formats an API host URL by normalizing it and optionally appending an API version.
|
||||
*
|
||||
* @param host - The API host URL to format. Leading/trailing whitespace will be trimmed and trailing slashes removed.
|
||||
* @param supportApiVersion - Whether the API version is supported. Defaults to `true`.
|
||||
* @param apiVersion - The API version to append if needed. Defaults to `'v1'`.
|
||||
*
|
||||
* @returns The formatted API host URL. If the host is empty after normalization, returns an empty string.
|
||||
* If the host ends with '#', API version is not supported, or the host already contains a version, returns the normalized host as-is.
|
||||
* Otherwise, returns the host with the API version appended.
|
||||
*
|
||||
* @example
|
||||
* formatApiHost('https://api.example.com/') // Returns 'https://api.example.com/v1'
|
||||
* formatApiHost('https://api.example.com#') // Returns 'https://api.example.com#'
|
||||
* formatApiHost('https://api.example.com/v2', true, 'v1') // Returns 'https://api.example.com/v2'
|
||||
*/
|
||||
export function formatApiHost(host?: string, supportApiVersion: boolean = true, apiVersion: string = 'v1'): string {
|
||||
const normalizedHost = withoutTrailingSlash(trim(host))
|
||||
if (!normalizedHost) {
|
||||
return ''
|
||||
}
|
||||
|
||||
if (normalizedHost.endsWith('#') || !supportApiVersion || hasAPIVersion(normalizedHost)) {
|
||||
return normalizedHost
|
||||
}
|
||||
return `${normalizedHost}/${apiVersion}`
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts an API host URL into separate base URL and endpoint components.
|
||||
*
|
||||
* This function extracts endpoint information from a composite API host string.
|
||||
* If the host ends with '#', it attempts to match the preceding part against the supported endpoint list.
|
||||
*
|
||||
* @param apiHost - The API host string to parse
|
||||
* @returns An object containing:
|
||||
* - `baseURL`: The base URL without the endpoint suffix
|
||||
* - `endpoint`: The matched endpoint identifier, or empty string if no match found
|
||||
*
|
||||
* @example
|
||||
* routeToEndpoint('https://api.example.com/openai/chat/completions#')
|
||||
* // Returns: { baseURL: 'https://api.example.com/v1', endpoint: 'chat/completions' }
|
||||
*
|
||||
* @example
|
||||
* routeToEndpoint('https://api.example.com/v1')
|
||||
* // Returns: { baseURL: 'https://api.example.com/v1', endpoint: '' }
|
||||
*/
|
||||
export function routeToEndpoint(apiHost: string): { baseURL: string; endpoint: string } {
|
||||
const trimmedHost = (apiHost || '').trim()
|
||||
if (!trimmedHost.endsWith('#')) {
|
||||
return { baseURL: trimmedHost, endpoint: '' }
|
||||
}
|
||||
// Remove trailing #
|
||||
const host = trimmedHost.slice(0, -1)
|
||||
const endpointMatch = SUPPORTED_ENDPOINT_LIST.find((endpoint) => host.endsWith(endpoint))
|
||||
if (!endpointMatch) {
|
||||
const baseURL = withoutTrailingSlash(host)
|
||||
return { baseURL, endpoint: '' }
|
||||
}
|
||||
const baseSegment = host.slice(0, host.length - endpointMatch.length)
|
||||
const baseURL = withoutTrailingSlash(baseSegment).replace(/:$/, '') // Remove trailing colon (gemini special case)
|
||||
return { baseURL, endpoint: endpointMatch }
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the AI SDK compatible base URL from a provider's apiHost.
|
||||
*
|
||||
* AI SDK expects baseURL WITH version suffix (e.g., /v1).
|
||||
* This function:
|
||||
* 1. Handles '#' endpoint routing format
|
||||
* 2. Ensures the URL has a version suffix (adds /v1 if missing)
|
||||
*
|
||||
* @param apiHost - The provider's apiHost value (may or may not have /v1)
|
||||
* @param apiVersion - The API version to use if missing. Defaults to 'v1'.
|
||||
* @returns The baseURL suitable for AI SDK (with version suffix)
|
||||
*
|
||||
* @example
|
||||
* getAiSdkBaseUrl('https://api.openai.com') // 'https://api.openai.com/v1'
|
||||
* getAiSdkBaseUrl('https://api.openai.com/v1') // 'https://api.openai.com/v1'
|
||||
* getAiSdkBaseUrl('https://api.example.com/chat/completions#') // 'https://api.example.com'
|
||||
*/
|
||||
export function getAiSdkBaseUrl(apiHost: string, apiVersion: string = 'v1'): string {
|
||||
// First handle '#' endpoint routing format
|
||||
const { baseURL } = routeToEndpoint(apiHost)
|
||||
|
||||
// If already has version, return as-is
|
||||
if (hasAPIVersion(baseURL)) {
|
||||
return withoutTrailingSlash(baseURL)
|
||||
}
|
||||
|
||||
// Add version suffix
|
||||
return `${withoutTrailingSlash(baseURL)}/${apiVersion}`
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates an API host address.
|
||||
*
|
||||
* @param apiHost - The API host address to validate
|
||||
* @returns true if valid URL with http/https protocol, false otherwise
|
||||
*/
|
||||
export function validateApiHost(apiHost: string): boolean {
|
||||
if (!apiHost || !apiHost.trim()) {
|
||||
return true // Allow empty
|
||||
}
|
||||
try {
|
||||
const url = new URL(apiHost.trim())
|
||||
return url.protocol === 'http:' || url.protocol === 'https:'
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts the trailing API version segment from a URL path.
|
||||
*
|
||||
* This function extracts API version patterns (e.g., `v1`, `v2beta`) from the end of a URL.
|
||||
* Only versions at the end of the path are extracted, not versions in the middle.
|
||||
* The returned version string does not include leading or trailing slashes.
|
||||
*
|
||||
* @param {string} url - The URL string to parse.
|
||||
* @returns {string | undefined} The trailing API version found (e.g., 'v1', 'v2beta'), or undefined if none found.
|
||||
*
|
||||
* @example
|
||||
* getTrailingApiVersion('https://api.example.com/v1') // 'v1'
|
||||
* getTrailingApiVersion('https://api.example.com/v2beta/') // 'v2beta'
|
||||
* getTrailingApiVersion('https://api.example.com/v1/chat') // undefined (version not at end)
|
||||
* getTrailingApiVersion('https://gateway.ai.cloudflare.com/v1/xxx/v1beta') // 'v1beta'
|
||||
* getTrailingApiVersion('https://api.example.com') // undefined
|
||||
*/
|
||||
export function getTrailingApiVersion(url: string): string | undefined {
|
||||
const match = url.match(TRAILING_VERSION_REGEX)
|
||||
|
||||
if (match) {
|
||||
// Extract version without leading slash and trailing slash
|
||||
return match[0].replace(/^\//, '').replace(/\/$/, '')
|
||||
}
|
||||
|
||||
return undefined
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes the trailing API version segment from a URL path.
|
||||
*
|
||||
* This function removes API version patterns (e.g., `/v1`, `/v2beta`) from the end of a URL.
|
||||
* Only versions at the end of the path are removed, not versions in the middle.
|
||||
*
|
||||
* @param {string} url - The URL string to process.
|
||||
* @returns {string} The URL with the trailing API version removed, or the original URL if no trailing version found.
|
||||
*
|
||||
* @example
|
||||
* withoutTrailingApiVersion('https://api.example.com/v1') // 'https://api.example.com'
|
||||
* withoutTrailingApiVersion('https://api.example.com/v2beta/') // 'https://api.example.com'
|
||||
* withoutTrailingApiVersion('https://api.example.com/v1/chat') // 'https://api.example.com/v1/chat' (no change)
|
||||
* withoutTrailingApiVersion('https://api.example.com') // 'https://api.example.com'
|
||||
*/
|
||||
export function withoutTrailingApiVersion(url: string): string {
|
||||
return url.replace(TRAILING_VERSION_REGEX, '')
|
||||
}
|
||||
@@ -43,6 +43,35 @@ export function isSiliconAnthropicCompatibleModel(modelId: string): boolean {
|
||||
}
|
||||
|
||||
/**
|
||||
* Silicon provider's Anthropic API host URL.
|
||||
* PPIO provider models that support Anthropic API endpoint.
|
||||
* These models can be used with Claude Code via the Anthropic-compatible API.
|
||||
*
|
||||
* @see https://ppio.com/docs/model/llm-anthropic-compatibility
|
||||
*/
|
||||
export const SILICON_ANTHROPIC_API_HOST = 'https://api.siliconflow.cn'
|
||||
export const PPIO_ANTHROPIC_COMPATIBLE_MODELS: readonly string[] = [
|
||||
'moonshotai/kimi-k2-thinking',
|
||||
'minimax/minimax-m2',
|
||||
'deepseek/deepseek-v3.2-exp',
|
||||
'deepseek/deepseek-v3.1-terminus',
|
||||
'zai-org/glm-4.6',
|
||||
'moonshotai/kimi-k2-0905',
|
||||
'deepseek/deepseek-v3.1',
|
||||
'moonshotai/kimi-k2-instruct',
|
||||
'qwen/qwen3-next-80b-a3b-instruct',
|
||||
'qwen/qwen3-next-80b-a3b-thinking'
|
||||
]
|
||||
|
||||
/**
|
||||
* Creates a Set for efficient lookup of PPIO Anthropic-compatible model IDs.
|
||||
*/
|
||||
const PPIO_ANTHROPIC_COMPATIBLE_MODEL_SET = new Set(PPIO_ANTHROPIC_COMPATIBLE_MODELS)
|
||||
|
||||
/**
|
||||
* Checks if a model ID is compatible with Anthropic API on PPIO provider.
|
||||
*
|
||||
* @param modelId - The model ID to check
|
||||
* @returns true if the model supports Anthropic API endpoint
|
||||
*/
|
||||
export function isPpioAnthropicCompatibleModel(modelId: string): boolean {
|
||||
return PPIO_ANTHROPIC_COMPATIBLE_MODEL_SET.has(modelId)
|
||||
}
|
||||
|
||||
15
packages/shared/middleware/index.ts
Normal file
15
packages/shared/middleware/index.ts
Normal file
@@ -0,0 +1,15 @@
|
||||
/**
|
||||
* Shared AI SDK Middlewares
|
||||
*
|
||||
* Environment-agnostic middlewares that can be used in both
|
||||
* renderer process and main process (API server).
|
||||
*/
|
||||
|
||||
export {
|
||||
buildSharedMiddlewares,
|
||||
getReasoningTagName,
|
||||
isGemini3ModelId,
|
||||
openrouterReasoningMiddleware,
|
||||
type SharedMiddlewareConfig,
|
||||
skipGeminiThoughtSignatureMiddleware
|
||||
} from './middlewares'
|
||||
205
packages/shared/middleware/middlewares.ts
Normal file
205
packages/shared/middleware/middlewares.ts
Normal file
@@ -0,0 +1,205 @@
|
||||
/**
|
||||
* Shared AI SDK Middlewares
|
||||
*
|
||||
* These middlewares are environment-agnostic and can be used in both
|
||||
* renderer process and main process (API server).
|
||||
*/
|
||||
import type { LanguageModelV2Middleware, LanguageModelV2StreamPart } from '@ai-sdk/provider'
|
||||
import { extractReasoningMiddleware } from 'ai'
|
||||
|
||||
/**
|
||||
* Configuration for building shared middlewares
|
||||
*/
|
||||
export interface SharedMiddlewareConfig {
|
||||
/**
|
||||
* Whether to enable reasoning extraction
|
||||
*/
|
||||
enableReasoning?: boolean
|
||||
|
||||
/**
|
||||
* Tag name for reasoning extraction
|
||||
* Defaults based on model ID
|
||||
*/
|
||||
reasoningTagName?: string
|
||||
|
||||
/**
|
||||
* Model ID - used to determine default reasoning tag and model detection
|
||||
*/
|
||||
modelId?: string
|
||||
|
||||
/**
|
||||
* Provider ID (Cherry Studio provider ID)
|
||||
* Used for provider-specific middlewares like OpenRouter
|
||||
*/
|
||||
providerId?: string
|
||||
|
||||
/**
|
||||
* AI SDK Provider ID
|
||||
* Used for Gemini thought signature middleware
|
||||
* e.g., 'google', 'google-vertex'
|
||||
*/
|
||||
aiSdkProviderId?: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if model ID represents a Gemini 3 (2.5) model
|
||||
* that requires thought signature handling
|
||||
*
|
||||
* @param modelId - The model ID string (not Model object)
|
||||
*/
|
||||
export function isGemini3ModelId(modelId?: string): boolean {
|
||||
if (!modelId) return false
|
||||
const lowerModelId = modelId.toLowerCase()
|
||||
return lowerModelId.includes('gemini-3')
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the default reasoning tag name based on model ID
|
||||
*
|
||||
* Different models use different tags for reasoning content:
|
||||
* - Most models: 'think'
|
||||
* - GPT-OSS models: 'reasoning'
|
||||
* - Gemini models: 'thought'
|
||||
* - Seed models: 'seed:think'
|
||||
*/
|
||||
export function getReasoningTagName(modelId?: string): string {
|
||||
if (!modelId) return 'think'
|
||||
const lowerModelId = modelId.toLowerCase()
|
||||
if (lowerModelId.includes('gpt-oss')) return 'reasoning'
|
||||
if (lowerModelId.includes('gemini')) return 'thought'
|
||||
if (lowerModelId.includes('seed-oss-36b')) return 'seed:think'
|
||||
return 'think'
|
||||
}
|
||||
|
||||
/**
|
||||
* Skip Gemini Thought Signature Middleware
|
||||
*
|
||||
* Due to the complexity of multi-model client requests (which can switch
|
||||
* to other models mid-process), this middleware skips all Gemini 3
|
||||
* thinking signatures validation.
|
||||
*
|
||||
* @param aiSdkId - AI SDK Provider ID (e.g., 'google', 'google-vertex')
|
||||
* @returns LanguageModelV2Middleware
|
||||
*/
|
||||
export function skipGeminiThoughtSignatureMiddleware(aiSdkId: string): LanguageModelV2Middleware {
|
||||
const MAGIC_STRING = 'skip_thought_signature_validator'
|
||||
return {
|
||||
middlewareVersion: 'v2',
|
||||
|
||||
transformParams: async ({ params }) => {
|
||||
const transformedParams = { ...params }
|
||||
// Process messages in prompt
|
||||
if (transformedParams.prompt && Array.isArray(transformedParams.prompt)) {
|
||||
transformedParams.prompt = transformedParams.prompt.map((message) => {
|
||||
if (typeof message.content !== 'string') {
|
||||
for (const part of message.content) {
|
||||
const googleOptions = part?.providerOptions?.[aiSdkId]
|
||||
if (googleOptions?.thoughtSignature) {
|
||||
googleOptions.thoughtSignature = MAGIC_STRING
|
||||
}
|
||||
}
|
||||
}
|
||||
return message
|
||||
})
|
||||
}
|
||||
|
||||
return transformedParams
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* OpenRouter Reasoning Middleware
|
||||
*
|
||||
* Filters out [REDACTED] blocks from OpenRouter reasoning responses.
|
||||
* OpenRouter may include [REDACTED] markers in reasoning content that
|
||||
* should be removed for cleaner output.
|
||||
*
|
||||
* @see https://openrouter.ai/docs/docs/best-practices/reasoning-tokens
|
||||
* @returns LanguageModelV2Middleware
|
||||
*/
|
||||
export function openrouterReasoningMiddleware(): LanguageModelV2Middleware {
|
||||
const REDACTED_BLOCK = '[REDACTED]'
|
||||
return {
|
||||
middlewareVersion: 'v2',
|
||||
wrapGenerate: async ({ doGenerate }) => {
|
||||
const { content, ...rest } = await doGenerate()
|
||||
const modifiedContent = content.map((part) => {
|
||||
if (part.type === 'reasoning' && part.text.includes(REDACTED_BLOCK)) {
|
||||
return {
|
||||
...part,
|
||||
text: part.text.replace(REDACTED_BLOCK, '')
|
||||
}
|
||||
}
|
||||
return part
|
||||
})
|
||||
return { content: modifiedContent, ...rest }
|
||||
},
|
||||
wrapStream: async ({ doStream }) => {
|
||||
const { stream, ...rest } = await doStream()
|
||||
return {
|
||||
stream: stream.pipeThrough(
|
||||
new TransformStream<LanguageModelV2StreamPart, LanguageModelV2StreamPart>({
|
||||
transform(
|
||||
chunk: LanguageModelV2StreamPart,
|
||||
controller: TransformStreamDefaultController<LanguageModelV2StreamPart>
|
||||
) {
|
||||
if (chunk.type === 'reasoning-delta' && chunk.delta.includes(REDACTED_BLOCK)) {
|
||||
controller.enqueue({
|
||||
...chunk,
|
||||
delta: chunk.delta.replace(REDACTED_BLOCK, '')
|
||||
})
|
||||
} else {
|
||||
controller.enqueue(chunk)
|
||||
}
|
||||
}
|
||||
})
|
||||
),
|
||||
...rest
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build shared middlewares based on configuration
|
||||
*
|
||||
* This function builds a set of middlewares that are commonly needed
|
||||
* across different environments (renderer, API server).
|
||||
*
|
||||
* @param config - Configuration for middleware building
|
||||
* @returns Array of AI SDK middlewares
|
||||
*
|
||||
* @example
|
||||
* ```typescript
|
||||
* import { buildSharedMiddlewares } from '@shared/middleware'
|
||||
*
|
||||
* const middlewares = buildSharedMiddlewares({
|
||||
* enableReasoning: true,
|
||||
* modelId: 'gemini-2.5-pro',
|
||||
* providerId: 'openrouter',
|
||||
* aiSdkProviderId: 'google'
|
||||
* })
|
||||
* ```
|
||||
*/
|
||||
export function buildSharedMiddlewares(config: SharedMiddlewareConfig): LanguageModelV2Middleware[] {
|
||||
const middlewares: LanguageModelV2Middleware[] = []
|
||||
|
||||
// 1. Reasoning extraction middleware
|
||||
if (config.enableReasoning) {
|
||||
const tagName = config.reasoningTagName || getReasoningTagName(config.modelId)
|
||||
middlewares.push(extractReasoningMiddleware({ tagName }))
|
||||
}
|
||||
|
||||
// 2. OpenRouter-specific: filter [REDACTED] blocks
|
||||
if (config.providerId === 'openrouter' && config.enableReasoning) {
|
||||
middlewares.push(openrouterReasoningMiddleware())
|
||||
}
|
||||
|
||||
// 3. Gemini 3 (2.5) specific: skip thought signature validation
|
||||
if (isGemini3ModelId(config.modelId) && config.aiSdkProviderId) {
|
||||
middlewares.push(skipGeminiThoughtSignatureMiddleware(config.aiSdkProviderId))
|
||||
}
|
||||
|
||||
return middlewares
|
||||
}
|
||||
@@ -1,13 +1,13 @@
|
||||
/**
|
||||
* AiHubMix规则集
|
||||
*/
|
||||
import { isOpenAILLMModel } from '@renderer/config/models'
|
||||
import type { Provider } from '@renderer/types'
|
||||
import { getLowerBaseModelName } from '@shared/utils/naming'
|
||||
|
||||
import type { MinimalModel, MinimalProvider } from '../types'
|
||||
import { provider2Provider, startsWith } from './helper'
|
||||
import type { RuleSet } from './types'
|
||||
|
||||
const extraProviderConfig = (provider: Provider) => {
|
||||
const extraProviderConfig = <P extends MinimalProvider>(provider: P) => {
|
||||
return {
|
||||
...provider,
|
||||
extra_headers: {
|
||||
@@ -17,11 +17,23 @@ const extraProviderConfig = (provider: Provider) => {
|
||||
}
|
||||
}
|
||||
|
||||
function isOpenAILLMModel<M extends MinimalModel>(model: M): boolean {
|
||||
const modelId = getLowerBaseModelName(model.id)
|
||||
const reasonings = ['o1', 'o3', 'o4', 'gpt-oss']
|
||||
if (reasonings.some((r) => modelId.includes(r))) {
|
||||
return true
|
||||
}
|
||||
if (modelId.includes('gpt')) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
const AIHUBMIX_RULES: RuleSet = {
|
||||
rules: [
|
||||
{
|
||||
match: startsWith('claude'),
|
||||
provider: (provider: Provider) => {
|
||||
provider: (provider) => {
|
||||
return extraProviderConfig({
|
||||
...provider,
|
||||
type: 'anthropic'
|
||||
@@ -34,7 +46,7 @@ const AIHUBMIX_RULES: RuleSet = {
|
||||
!model.id.endsWith('-nothink') &&
|
||||
!model.id.endsWith('-search') &&
|
||||
!model.id.includes('embedding'),
|
||||
provider: (provider: Provider) => {
|
||||
provider: (provider) => {
|
||||
return extraProviderConfig({
|
||||
...provider,
|
||||
type: 'gemini',
|
||||
@@ -44,7 +56,7 @@ const AIHUBMIX_RULES: RuleSet = {
|
||||
},
|
||||
{
|
||||
match: isOpenAILLMModel,
|
||||
provider: (provider: Provider) => {
|
||||
provider: (provider) => {
|
||||
return extraProviderConfig({
|
||||
...provider,
|
||||
type: 'openai-response'
|
||||
@@ -52,7 +64,8 @@ const AIHUBMIX_RULES: RuleSet = {
|
||||
}
|
||||
}
|
||||
],
|
||||
fallbackRule: (provider: Provider) => extraProviderConfig(provider)
|
||||
fallbackRule: (provider) => extraProviderConfig(provider)
|
||||
}
|
||||
|
||||
export const aihubmixProviderCreator = provider2Provider.bind(null, AIHUBMIX_RULES)
|
||||
export const aihubmixProviderCreator = <P extends MinimalProvider>(model: MinimalModel, provider: P): P =>
|
||||
provider2Provider<MinimalModel, MinimalProvider, P>(AIHUBMIX_RULES, model, provider)
|
||||
22
packages/shared/provider/config/azure-anthropic.ts
Normal file
22
packages/shared/provider/config/azure-anthropic.ts
Normal file
@@ -0,0 +1,22 @@
|
||||
import type { MinimalModel, MinimalProvider, ProviderType } from '../types'
|
||||
import { provider2Provider, startsWith } from './helper'
|
||||
import type { RuleSet } from './types'
|
||||
|
||||
// https://platform.claude.com/docs/en/build-with-claude/claude-in-microsoft-foundry
|
||||
const AZURE_ANTHROPIC_RULES: RuleSet = {
|
||||
rules: [
|
||||
{
|
||||
match: startsWith('claude'),
|
||||
provider: (provider: MinimalProvider) => ({
|
||||
...provider,
|
||||
type: 'anthropic' as ProviderType,
|
||||
apiHost: provider.apiHost + 'anthropic/v1',
|
||||
id: 'azure-anthropic'
|
||||
})
|
||||
}
|
||||
],
|
||||
fallbackRule: (provider: MinimalProvider) => provider
|
||||
}
|
||||
|
||||
export const azureAnthropicProviderCreator = <P extends MinimalProvider>(model: MinimalModel, provider: P): P =>
|
||||
provider2Provider<MinimalModel, MinimalProvider, P>(AZURE_ANTHROPIC_RULES, model, provider)
|
||||
32
packages/shared/provider/config/helper.ts
Normal file
32
packages/shared/provider/config/helper.ts
Normal file
@@ -0,0 +1,32 @@
|
||||
import type { MinimalModel, MinimalProvider } from '../types'
|
||||
import type { RuleSet } from './types'
|
||||
|
||||
export const startsWith =
|
||||
(prefix: string) =>
|
||||
<M extends MinimalModel>(model: M) =>
|
||||
model.id.toLowerCase().startsWith(prefix.toLowerCase())
|
||||
|
||||
export const endpointIs =
|
||||
(type: string) =>
|
||||
<M extends MinimalModel>(model: M) =>
|
||||
model.endpoint_type === type
|
||||
|
||||
/**
|
||||
* 解析模型对应的Provider
|
||||
* @param ruleSet 规则集对象
|
||||
* @param model 模型对象
|
||||
* @param provider 原始provider对象
|
||||
* @returns 解析出的provider对象
|
||||
*/
|
||||
export function provider2Provider<M extends MinimalModel, R extends MinimalProvider, P extends R = R>(
|
||||
ruleSet: RuleSet<M, R>,
|
||||
model: M,
|
||||
provider: P
|
||||
): P {
|
||||
for (const rule of ruleSet.rules) {
|
||||
if (rule.match(model)) {
|
||||
return rule.provider(provider) as P
|
||||
}
|
||||
}
|
||||
return ruleSet.fallbackRule(provider) as P
|
||||
}
|
||||
6
packages/shared/provider/config/index.ts
Normal file
6
packages/shared/provider/config/index.ts
Normal file
@@ -0,0 +1,6 @@
|
||||
export { aihubmixProviderCreator } from './aihubmix'
|
||||
export { azureAnthropicProviderCreator } from './azure-anthropic'
|
||||
export { endpointIs, provider2Provider, startsWith } from './helper'
|
||||
export { newApiResolverCreator } from './newApi'
|
||||
export type { RuleSet } from './types'
|
||||
export { vertexAnthropicProviderCreator } from './vertex-anthropic'
|
||||
@@ -1,8 +1,7 @@
|
||||
/**
|
||||
* NewAPI规则集
|
||||
*/
|
||||
import type { Provider } from '@renderer/types'
|
||||
|
||||
import type { MinimalModel, MinimalProvider, ProviderType } from '../types'
|
||||
import { endpointIs, provider2Provider } from './helper'
|
||||
import type { RuleSet } from './types'
|
||||
|
||||
@@ -10,42 +9,43 @@ const NEWAPI_RULES: RuleSet = {
|
||||
rules: [
|
||||
{
|
||||
match: endpointIs('anthropic'),
|
||||
provider: (provider: Provider) => {
|
||||
provider: (provider) => {
|
||||
return {
|
||||
...provider,
|
||||
type: 'anthropic'
|
||||
type: 'anthropic' as ProviderType
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
match: endpointIs('gemini'),
|
||||
provider: (provider: Provider) => {
|
||||
provider: (provider) => {
|
||||
return {
|
||||
...provider,
|
||||
type: 'gemini'
|
||||
type: 'gemini' as ProviderType
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
match: endpointIs('openai-response'),
|
||||
provider: (provider: Provider) => {
|
||||
provider: (provider) => {
|
||||
return {
|
||||
...provider,
|
||||
type: 'openai-response'
|
||||
type: 'openai-response' as ProviderType
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
match: (model) => endpointIs('openai')(model) || endpointIs('image-generation')(model),
|
||||
provider: (provider: Provider) => {
|
||||
provider: (provider) => {
|
||||
return {
|
||||
...provider,
|
||||
type: 'openai'
|
||||
type: 'openai' as ProviderType
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
fallbackRule: (provider: Provider) => provider
|
||||
fallbackRule: (provider) => provider
|
||||
}
|
||||
|
||||
export const newApiResolverCreator = provider2Provider.bind(null, NEWAPI_RULES)
|
||||
export const newApiResolverCreator = <P extends MinimalProvider>(model: MinimalModel, provider: P): P =>
|
||||
provider2Provider<MinimalModel, MinimalProvider, P>(NEWAPI_RULES, model, provider)
|
||||
9
packages/shared/provider/config/types.ts
Normal file
9
packages/shared/provider/config/types.ts
Normal file
@@ -0,0 +1,9 @@
|
||||
import type { MinimalModel, MinimalProvider } from '../types'
|
||||
|
||||
export interface RuleSet<M extends MinimalModel = MinimalModel, P extends MinimalProvider = MinimalProvider> {
|
||||
rules: Array<{
|
||||
match: (model: M) => boolean
|
||||
provider: (provider: P) => P
|
||||
}>
|
||||
fallbackRule: (provider: P) => P
|
||||
}
|
||||
19
packages/shared/provider/config/vertex-anthropic.ts
Normal file
19
packages/shared/provider/config/vertex-anthropic.ts
Normal file
@@ -0,0 +1,19 @@
|
||||
import type { MinimalModel, MinimalProvider } from '../types'
|
||||
import { provider2Provider, startsWith } from './helper'
|
||||
import type { RuleSet } from './types'
|
||||
|
||||
const VERTEX_ANTHROPIC_RULES: RuleSet = {
|
||||
rules: [
|
||||
{
|
||||
match: startsWith('claude'),
|
||||
provider: (provider: MinimalProvider) => ({
|
||||
...provider,
|
||||
id: 'google-vertex-anthropic'
|
||||
})
|
||||
}
|
||||
],
|
||||
fallbackRule: (provider: MinimalProvider) => provider
|
||||
}
|
||||
|
||||
export const vertexAnthropicProviderCreator = <P extends MinimalProvider>(model: MinimalModel, provider: P): P =>
|
||||
provider2Provider<MinimalModel, MinimalProvider, P>(VERTEX_ANTHROPIC_RULES, model, provider)
|
||||
26
packages/shared/provider/constant.ts
Normal file
26
packages/shared/provider/constant.ts
Normal file
@@ -0,0 +1,26 @@
|
||||
import { getLowerBaseModelName } from '@shared/utils/naming'
|
||||
|
||||
import type { MinimalModel } from './types'
|
||||
|
||||
export const COPILOT_EDITOR_VERSION = 'vscode/1.104.1'
|
||||
export const COPILOT_PLUGIN_VERSION = 'copilot-chat/0.26.7'
|
||||
export const COPILOT_INTEGRATION_ID = 'vscode-chat'
|
||||
export const COPILOT_USER_AGENT = 'GitHubCopilotChat/0.26.7'
|
||||
|
||||
export const COPILOT_DEFAULT_HEADERS = {
|
||||
'Copilot-Integration-Id': COPILOT_INTEGRATION_ID,
|
||||
'User-Agent': COPILOT_USER_AGENT,
|
||||
'Editor-Version': COPILOT_EDITOR_VERSION,
|
||||
'Editor-Plugin-Version': COPILOT_PLUGIN_VERSION,
|
||||
'editor-version': COPILOT_EDITOR_VERSION,
|
||||
'editor-plugin-version': COPILOT_PLUGIN_VERSION,
|
||||
'copilot-vision-request': 'true'
|
||||
} as const
|
||||
|
||||
// Models that require the OpenAI Responses endpoint when routed through GitHub Copilot (#10560)
|
||||
const COPILOT_RESPONSES_MODEL_IDS = ['gpt-5-codex', 'gpt-5.1-codex', 'gpt-5.1-codex-mini']
|
||||
|
||||
export function isCopilotResponsesModel<M extends MinimalModel>(model: M): boolean {
|
||||
const normalizedId = getLowerBaseModelName(model.id)
|
||||
return COPILOT_RESPONSES_MODEL_IDS.some((target) => normalizedId === target)
|
||||
}
|
||||
100
packages/shared/provider/detection.ts
Normal file
100
packages/shared/provider/detection.ts
Normal file
@@ -0,0 +1,100 @@
|
||||
/**
|
||||
* Provider Type Detection Utilities
|
||||
*
|
||||
* Functions to detect provider types based on provider configuration.
|
||||
* These are pure functions that only depend on provider.type and provider.id.
|
||||
*
|
||||
* NOTE: These functions should match the logic in @renderer/utils/provider.ts
|
||||
*/
|
||||
|
||||
import type { MinimalProvider } from './types'
|
||||
|
||||
/**
|
||||
* Check if provider is Anthropic type
|
||||
*/
|
||||
export function isAnthropicProvider<P extends MinimalProvider>(provider: P): boolean {
|
||||
return provider.type === 'anthropic'
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if provider is OpenAI Response type (openai-response)
|
||||
* NOTE: This matches isOpenAIProvider in renderer/utils/provider.ts
|
||||
*/
|
||||
export function isOpenAIProvider<P extends MinimalProvider>(provider: P): boolean {
|
||||
return provider.type === 'openai-response'
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if provider is Gemini type
|
||||
*/
|
||||
export function isGeminiProvider<P extends MinimalProvider>(provider: P): boolean {
|
||||
return provider.type === 'gemini'
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if provider is Azure OpenAI type
|
||||
*/
|
||||
export function isAzureOpenAIProvider<P extends MinimalProvider>(provider: P): boolean {
|
||||
return provider.type === 'azure-openai'
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if provider is Vertex AI type
|
||||
*/
|
||||
export function isVertexProvider<P extends MinimalProvider>(provider: P): boolean {
|
||||
return provider.type === 'vertexai'
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if provider is AWS Bedrock type
|
||||
*/
|
||||
export function isAwsBedrockProvider<P extends MinimalProvider>(provider: P): boolean {
|
||||
return provider.type === 'aws-bedrock'
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if provider is AI Gateway type
|
||||
*/
|
||||
export function isAIGatewayProvider<P extends MinimalProvider>(provider: P): boolean {
|
||||
return provider.type === 'ai-gateway'
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if Azure OpenAI provider uses responses endpoint
|
||||
* Matches isAzureResponsesEndpoint in renderer/utils/provider.ts
|
||||
*/
|
||||
export function isAzureResponsesEndpoint<P extends MinimalProvider>(provider: P): boolean {
|
||||
return provider.apiVersion === 'preview' || provider.apiVersion === 'v1'
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if provider is Cherry AI type
|
||||
* Matches isCherryAIProvider in renderer/utils/provider.ts
|
||||
*/
|
||||
export function isCherryAIProvider<P extends MinimalProvider>(provider: P): boolean {
|
||||
return provider.id === 'cherryai'
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if provider is Perplexity type
|
||||
* Matches isPerplexityProvider in renderer/utils/provider.ts
|
||||
*/
|
||||
export function isPerplexityProvider<P extends MinimalProvider>(provider: P): boolean {
|
||||
return provider.id === 'perplexity'
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if provider is new-api type (supports multiple backends)
|
||||
* Matches isNewApiProvider in renderer/utils/provider.ts
|
||||
*/
|
||||
export function isNewApiProvider<P extends MinimalProvider>(provider: P): boolean {
|
||||
return ['new-api', 'cherryin'].includes(provider.id) || provider.type === ('new-api' as string)
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if provider is OpenAI compatible
|
||||
* Matches isOpenAICompatibleProvider in renderer/utils/provider.ts
|
||||
*/
|
||||
export function isOpenAICompatibleProvider<P extends MinimalProvider>(provider: P): boolean {
|
||||
return ['openai', 'new-api', 'mistral'].includes(provider.type)
|
||||
}
|
||||
136
packages/shared/provider/format.ts
Normal file
136
packages/shared/provider/format.ts
Normal file
@@ -0,0 +1,136 @@
|
||||
/**
|
||||
* Provider API Host Formatting
|
||||
*
|
||||
* Utilities for formatting provider API hosts to work with AI SDK.
|
||||
* These handle the differences between how Cherry Studio stores API hosts
|
||||
* and how AI SDK expects them.
|
||||
*/
|
||||
|
||||
import {
|
||||
formatApiHost,
|
||||
formatAzureOpenAIApiHost,
|
||||
formatVertexApiHost,
|
||||
routeToEndpoint,
|
||||
withoutTrailingSlash
|
||||
} from '../api'
|
||||
import {
|
||||
isAnthropicProvider,
|
||||
isAzureOpenAIProvider,
|
||||
isCherryAIProvider,
|
||||
isGeminiProvider,
|
||||
isPerplexityProvider,
|
||||
isVertexProvider
|
||||
} from './detection'
|
||||
import type { MinimalProvider } from './types'
|
||||
import { SystemProviderIds } from './types'
|
||||
|
||||
/**
|
||||
* Interface for environment-specific implementations
|
||||
* Renderer and Main process can provide their own implementations
|
||||
*/
|
||||
export interface ProviderFormatContext {
|
||||
vertex: {
|
||||
project: string
|
||||
location: string
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Default Azure OpenAI API host formatter
|
||||
*/
|
||||
export function defaultFormatAzureOpenAIApiHost(host: string): string {
|
||||
const normalizedHost = withoutTrailingSlash(host)
|
||||
?.replace(/\/v1$/, '')
|
||||
.replace(/\/openai$/, '')
|
||||
// AI SDK will add /v1
|
||||
return formatApiHost(normalizedHost + '/openai', false)
|
||||
}
|
||||
|
||||
/**
|
||||
* Format provider API host for AI SDK
|
||||
*
|
||||
* This function normalizes the apiHost to work with AI SDK.
|
||||
* Different providers have different requirements:
|
||||
* - Most providers: add /v1 suffix
|
||||
* - Gemini: add /v1beta suffix
|
||||
* - Some providers: no suffix needed
|
||||
*
|
||||
* @param provider - The provider to format
|
||||
* @param context - Optional context with environment-specific implementations
|
||||
* @returns Provider with formatted apiHost (and anthropicApiHost if applicable)
|
||||
*/
|
||||
export function formatProviderApiHost<T extends MinimalProvider>(provider: T, context: ProviderFormatContext): T {
|
||||
const formatted = { ...provider }
|
||||
|
||||
// Format anthropicApiHost if present
|
||||
if (formatted.anthropicApiHost) {
|
||||
formatted.anthropicApiHost = formatApiHost(formatted.anthropicApiHost)
|
||||
}
|
||||
|
||||
// Format based on provider type
|
||||
if (isAnthropicProvider(provider)) {
|
||||
const baseHost = formatted.anthropicApiHost || formatted.apiHost
|
||||
// AI SDK needs /v1 in baseURL
|
||||
formatted.apiHost = formatApiHost(baseHost)
|
||||
if (!formatted.anthropicApiHost) {
|
||||
formatted.anthropicApiHost = formatted.apiHost
|
||||
}
|
||||
} else if (formatted.id === SystemProviderIds.copilot || formatted.id === SystemProviderIds.github) {
|
||||
formatted.apiHost = formatApiHost(formatted.apiHost, false)
|
||||
} else if (isGeminiProvider(formatted)) {
|
||||
formatted.apiHost = formatApiHost(formatted.apiHost, true, 'v1beta')
|
||||
} else if (isAzureOpenAIProvider(formatted)) {
|
||||
formatted.apiHost = formatAzureOpenAIApiHost(formatted.apiHost)
|
||||
} else if (isVertexProvider(formatted)) {
|
||||
formatted.apiHost = formatVertexApiHost(formatted, context.vertex.project, context.vertex.location)
|
||||
} else if (isCherryAIProvider(formatted)) {
|
||||
formatted.apiHost = formatApiHost(formatted.apiHost, false)
|
||||
} else if (isPerplexityProvider(formatted)) {
|
||||
formatted.apiHost = formatApiHost(formatted.apiHost, false)
|
||||
} else {
|
||||
formatted.apiHost = formatApiHost(formatted.apiHost)
|
||||
}
|
||||
|
||||
return formatted
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the base URL for AI SDK from a formatted provider
|
||||
*
|
||||
* This extracts the baseURL that AI SDK expects, handling
|
||||
* the '#' endpoint routing format if present.
|
||||
*
|
||||
* @param formattedApiHost - The formatted apiHost (after formatProviderApiHost)
|
||||
* @returns The baseURL for AI SDK
|
||||
*/
|
||||
export function getBaseUrlForAiSdk(formattedApiHost: string): string {
|
||||
const { baseURL } = routeToEndpoint(formattedApiHost)
|
||||
return baseURL
|
||||
}
|
||||
|
||||
/**
|
||||
* Get rotated API key from comma-separated keys
|
||||
*
|
||||
* This is the interface for API key rotation. The actual implementation
|
||||
* depends on the environment (renderer uses window.keyv, main uses its own storage).
|
||||
*/
|
||||
export interface ApiKeyRotator {
|
||||
/**
|
||||
* Get the next API key in rotation
|
||||
* @param providerId - The provider ID for tracking rotation
|
||||
* @param keys - Comma-separated API keys
|
||||
* @returns The next API key to use
|
||||
*/
|
||||
getRotatedKey(providerId: string, keys: string): string
|
||||
}
|
||||
|
||||
/**
|
||||
* Simple API key rotator that always returns the first key
|
||||
* Use this when rotation is not needed
|
||||
*/
|
||||
export const simpleKeyRotator: ApiKeyRotator = {
|
||||
getRotatedKey(_providerId: string, keys: string): string {
|
||||
const keyList = keys.split(',').map((k) => k.trim())
|
||||
return keyList[0] || keys
|
||||
}
|
||||
}
|
||||
48
packages/shared/provider/index.ts
Normal file
48
packages/shared/provider/index.ts
Normal file
@@ -0,0 +1,48 @@
|
||||
/**
|
||||
* Shared Provider Utilities
|
||||
*
|
||||
* This module exports utilities for working with AI providers
|
||||
* that can be shared between main process and renderer process.
|
||||
*/
|
||||
|
||||
// Type definitions
|
||||
export type { MinimalProvider, ProviderType, SystemProviderId } from './types'
|
||||
export { SystemProviderIds } from './types'
|
||||
|
||||
// Provider type detection
|
||||
export {
|
||||
isAIGatewayProvider,
|
||||
isAnthropicProvider,
|
||||
isAwsBedrockProvider,
|
||||
isAzureOpenAIProvider,
|
||||
isAzureResponsesEndpoint,
|
||||
isCherryAIProvider,
|
||||
isGeminiProvider,
|
||||
isNewApiProvider,
|
||||
isOpenAICompatibleProvider,
|
||||
isOpenAIProvider,
|
||||
isPerplexityProvider,
|
||||
isVertexProvider
|
||||
} from './detection'
|
||||
|
||||
// API host formatting
|
||||
export type { ApiKeyRotator, ProviderFormatContext } from './format'
|
||||
export {
|
||||
defaultFormatAzureOpenAIApiHost,
|
||||
formatProviderApiHost,
|
||||
getBaseUrlForAiSdk,
|
||||
simpleKeyRotator
|
||||
} from './format'
|
||||
|
||||
// Provider ID mapping
|
||||
export { getAiSdkProviderId, STATIC_PROVIDER_MAPPING, tryResolveProviderId } from './mapping'
|
||||
|
||||
// AI SDK configuration
|
||||
export type { AiSdkConfig, AiSdkConfigContext } from './sdk-config'
|
||||
export { providerToAiSdkConfig } from './sdk-config'
|
||||
|
||||
// Provider resolution
|
||||
export { resolveActualProvider } from './resolve'
|
||||
|
||||
// Provider initialization
|
||||
export { initializeSharedProviders, SHARED_PROVIDER_CONFIGS } from './initialization'
|
||||
107
packages/shared/provider/initialization.ts
Normal file
107
packages/shared/provider/initialization.ts
Normal file
@@ -0,0 +1,107 @@
|
||||
import { type ProviderConfig, registerMultipleProviderConfigs } from '@cherrystudio/ai-core/provider'
|
||||
|
||||
type ProviderInitializationLogger = {
|
||||
warn?: (message: string) => void
|
||||
error?: (message: string, error: Error) => void
|
||||
}
|
||||
|
||||
export const SHARED_PROVIDER_CONFIGS: ProviderConfig[] = [
|
||||
{
|
||||
id: 'openrouter',
|
||||
name: 'OpenRouter',
|
||||
import: () => import('@openrouter/ai-sdk-provider'),
|
||||
creatorFunctionName: 'createOpenRouter',
|
||||
supportsImageGeneration: true,
|
||||
aliases: ['openrouter']
|
||||
},
|
||||
{
|
||||
id: 'google-vertex',
|
||||
name: 'Google Vertex AI',
|
||||
import: () => import('@ai-sdk/google-vertex/edge'),
|
||||
creatorFunctionName: 'createVertex',
|
||||
supportsImageGeneration: true,
|
||||
aliases: ['vertexai']
|
||||
},
|
||||
{
|
||||
id: 'google-vertex-anthropic',
|
||||
name: 'Google Vertex AI Anthropic',
|
||||
import: () => import('@ai-sdk/google-vertex/anthropic/edge'),
|
||||
creatorFunctionName: 'createVertexAnthropic',
|
||||
supportsImageGeneration: true,
|
||||
aliases: ['vertexai-anthropic']
|
||||
},
|
||||
{
|
||||
id: 'azure-anthropic',
|
||||
name: 'Azure AI Anthropic',
|
||||
import: () => import('@ai-sdk/anthropic'),
|
||||
creatorFunctionName: 'createAnthropic',
|
||||
supportsImageGeneration: false,
|
||||
aliases: ['azure-anthropic']
|
||||
},
|
||||
{
|
||||
id: 'github-copilot-openai-compatible',
|
||||
name: 'GitHub Copilot OpenAI Compatible',
|
||||
import: () => import('@opeoginni/github-copilot-openai-compatible'),
|
||||
creatorFunctionName: 'createGitHubCopilotOpenAICompatible',
|
||||
supportsImageGeneration: false,
|
||||
aliases: ['copilot', 'github-copilot']
|
||||
},
|
||||
{
|
||||
id: 'bedrock',
|
||||
name: 'Amazon Bedrock',
|
||||
import: () => import('@ai-sdk/amazon-bedrock'),
|
||||
creatorFunctionName: 'createAmazonBedrock',
|
||||
supportsImageGeneration: true,
|
||||
aliases: ['aws-bedrock']
|
||||
},
|
||||
{
|
||||
id: 'perplexity',
|
||||
name: 'Perplexity',
|
||||
import: () => import('@ai-sdk/perplexity'),
|
||||
creatorFunctionName: 'createPerplexity',
|
||||
supportsImageGeneration: false,
|
||||
aliases: ['perplexity']
|
||||
},
|
||||
{
|
||||
id: 'mistral',
|
||||
name: 'Mistral',
|
||||
import: () => import('@ai-sdk/mistral'),
|
||||
creatorFunctionName: 'createMistral',
|
||||
supportsImageGeneration: false,
|
||||
aliases: ['mistral']
|
||||
},
|
||||
{
|
||||
id: 'huggingface',
|
||||
name: 'HuggingFace',
|
||||
import: () => import('@ai-sdk/huggingface'),
|
||||
creatorFunctionName: 'createHuggingFace',
|
||||
supportsImageGeneration: true,
|
||||
aliases: ['hf', 'hugging-face']
|
||||
},
|
||||
{
|
||||
id: 'ai-gateway',
|
||||
name: 'AI Gateway',
|
||||
import: () => import('@ai-sdk/gateway'),
|
||||
creatorFunctionName: 'createGateway',
|
||||
supportsImageGeneration: true,
|
||||
aliases: ['gateway']
|
||||
},
|
||||
{
|
||||
id: 'cerebras',
|
||||
name: 'Cerebras',
|
||||
import: () => import('@ai-sdk/cerebras'),
|
||||
creatorFunctionName: 'createCerebras',
|
||||
supportsImageGeneration: false
|
||||
}
|
||||
] as const
|
||||
|
||||
export function initializeSharedProviders(logger?: ProviderInitializationLogger): void {
|
||||
try {
|
||||
const successCount = registerMultipleProviderConfigs(SHARED_PROVIDER_CONFIGS)
|
||||
if (successCount < SHARED_PROVIDER_CONFIGS.length) {
|
||||
logger?.warn?.('Some providers failed to register. Check previous error logs.')
|
||||
}
|
||||
} catch (error) {
|
||||
logger?.error?.('Failed to initialize shared providers', error as Error)
|
||||
}
|
||||
}
|
||||
95
packages/shared/provider/mapping.ts
Normal file
95
packages/shared/provider/mapping.ts
Normal file
@@ -0,0 +1,95 @@
|
||||
/**
|
||||
* Provider ID Mapping
|
||||
*
|
||||
* Maps Cherry Studio provider IDs/types to AI SDK provider IDs.
|
||||
* This logic should match @renderer/aiCore/provider/factory.ts
|
||||
*/
|
||||
|
||||
import { hasProviderConfigByAlias, type ProviderId, resolveProviderConfigId } from '@cherrystudio/ai-core/provider'
|
||||
|
||||
import { isAzureOpenAIProvider, isAzureResponsesEndpoint } from './detection'
|
||||
import type { MinimalProvider } from './types'
|
||||
|
||||
/**
|
||||
* Static mapping from Cherry Studio provider ID/type to AI SDK provider ID
|
||||
* Matches STATIC_PROVIDER_MAPPING in @renderer/aiCore/provider/factory.ts
|
||||
*/
|
||||
export const STATIC_PROVIDER_MAPPING: Record<string, ProviderId> = {
|
||||
gemini: 'google', // Google Gemini -> google
|
||||
'azure-openai': 'azure', // Azure OpenAI -> azure
|
||||
'openai-response': 'openai', // OpenAI Responses -> openai
|
||||
grok: 'xai', // Grok -> xai
|
||||
copilot: 'github-copilot-openai-compatible'
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to resolve a provider identifier to an AI SDK provider ID
|
||||
* Matches tryResolveProviderId in @renderer/aiCore/provider/factory.ts
|
||||
*
|
||||
* @param identifier - The provider ID or type to resolve
|
||||
* @param checker - Provider config checker (defaults to static mapping only)
|
||||
* @returns The resolved AI SDK provider ID, or null if not found
|
||||
*/
|
||||
export function tryResolveProviderId(identifier: string): ProviderId | null {
|
||||
// 1. 检查静态映射
|
||||
const staticMapping = STATIC_PROVIDER_MAPPING[identifier]
|
||||
if (staticMapping) {
|
||||
return staticMapping
|
||||
}
|
||||
|
||||
// 2. 检查AiCore是否支持(包括别名支持)
|
||||
if (hasProviderConfigByAlias(identifier)) {
|
||||
// 解析为真实的Provider ID
|
||||
return resolveProviderConfigId(identifier) as ProviderId
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the AI SDK Provider ID for a Cherry Studio provider
|
||||
* Matches getAiSdkProviderId in @renderer/aiCore/provider/factory.ts
|
||||
*
|
||||
* Logic:
|
||||
* 1. Handle Azure OpenAI specially (check responses endpoint)
|
||||
* 2. Try to resolve from provider.id
|
||||
* 3. Try to resolve from provider.type (but not for generic 'openai' type)
|
||||
* 4. Check for OpenAI API host pattern
|
||||
* 5. Fallback to provider's own ID
|
||||
*
|
||||
* @param provider - The Cherry Studio provider
|
||||
* @param checker - Provider config checker (defaults to static mapping only)
|
||||
* @returns The AI SDK provider ID to use
|
||||
*/
|
||||
export function getAiSdkProviderId(provider: MinimalProvider): ProviderId {
|
||||
// 1. Handle Azure OpenAI specially - check this FIRST before other resolution
|
||||
if (isAzureOpenAIProvider(provider)) {
|
||||
if (isAzureResponsesEndpoint(provider)) {
|
||||
return 'azure-responses'
|
||||
}
|
||||
return 'azure'
|
||||
}
|
||||
|
||||
// 2. 尝试解析provider.id
|
||||
const resolvedFromId = tryResolveProviderId(provider.id)
|
||||
if (resolvedFromId) {
|
||||
return resolvedFromId
|
||||
}
|
||||
|
||||
// 3. 尝试解析provider.type
|
||||
// 会把所有类型为openai的自定义provider解析到aisdk的openaiProvider上
|
||||
if (provider.type !== 'openai') {
|
||||
const resolvedFromType = tryResolveProviderId(provider.type)
|
||||
if (resolvedFromType) {
|
||||
return resolvedFromType
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Check for OpenAI API host pattern
|
||||
if (provider.apiHost.includes('api.openai.com')) {
|
||||
return 'openai-chat'
|
||||
}
|
||||
|
||||
// 5. 最后的fallback(使用provider本身的id)
|
||||
return provider.id
|
||||
}
|
||||
43
packages/shared/provider/resolve.ts
Normal file
43
packages/shared/provider/resolve.ts
Normal file
@@ -0,0 +1,43 @@
|
||||
import { aihubmixProviderCreator, newApiResolverCreator, vertexAnthropicProviderCreator } from './config'
|
||||
import { azureAnthropicProviderCreator } from './config/azure-anthropic'
|
||||
import { isAzureOpenAIProvider, isNewApiProvider } from './detection'
|
||||
import type { MinimalModel, MinimalProvider } from './types'
|
||||
|
||||
export interface ResolveActualProviderOptions<P extends MinimalProvider> {
|
||||
isSystemProvider?: (provider: P) => boolean
|
||||
}
|
||||
|
||||
const defaultIsSystemProvider = <P extends MinimalProvider>(provider: P): boolean => {
|
||||
if ('isSystem' in provider) {
|
||||
return Boolean((provider as unknown as { isSystem?: boolean }).isSystem)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
export function resolveActualProvider<M extends MinimalModel, P extends MinimalProvider>(
|
||||
provider: P,
|
||||
model: M,
|
||||
options: ResolveActualProviderOptions<P> = {}
|
||||
): P {
|
||||
let resolvedProvider = provider
|
||||
|
||||
if (isNewApiProvider(resolvedProvider)) {
|
||||
resolvedProvider = newApiResolverCreator(model, resolvedProvider)
|
||||
}
|
||||
|
||||
const isSystemProvider = options.isSystemProvider?.(resolvedProvider) ?? defaultIsSystemProvider(resolvedProvider)
|
||||
|
||||
if (isSystemProvider && resolvedProvider.id === 'aihubmix') {
|
||||
resolvedProvider = aihubmixProviderCreator(model, resolvedProvider)
|
||||
}
|
||||
|
||||
if (isSystemProvider && resolvedProvider.id === 'vertexai') {
|
||||
resolvedProvider = vertexAnthropicProviderCreator(model, resolvedProvider)
|
||||
}
|
||||
|
||||
if (isAzureOpenAIProvider(resolvedProvider)) {
|
||||
resolvedProvider = azureAnthropicProviderCreator(model, resolvedProvider)
|
||||
}
|
||||
|
||||
return resolvedProvider
|
||||
}
|
||||
259
packages/shared/provider/sdk-config.ts
Normal file
259
packages/shared/provider/sdk-config.ts
Normal file
@@ -0,0 +1,259 @@
|
||||
/**
|
||||
* AI SDK Configuration
|
||||
*
|
||||
* Shared utilities for converting Cherry Studio Provider to AI SDK configuration.
|
||||
* Environment-specific logic (renderer/main) is injected via context interfaces.
|
||||
*/
|
||||
|
||||
import { formatPrivateKey, hasProviderConfig, ProviderConfigFactory } from '@cherrystudio/ai-core/provider'
|
||||
|
||||
import { routeToEndpoint } from '../api'
|
||||
import { getAiSdkProviderId } from './mapping'
|
||||
import type { MinimalProvider } from './types'
|
||||
import { SystemProviderIds } from './types'
|
||||
|
||||
/**
|
||||
* AI SDK configuration result
|
||||
*/
|
||||
export interface AiSdkConfig {
|
||||
providerId: string
|
||||
options: Record<string, unknown>
|
||||
}
|
||||
|
||||
/**
|
||||
* Context for environment-specific implementations
|
||||
*/
|
||||
export interface AiSdkConfigContext {
|
||||
/**
|
||||
* Get the rotated API key (for multi-key support)
|
||||
* Default: returns first key
|
||||
*/
|
||||
getRotatedApiKey?: (provider: MinimalProvider) => string
|
||||
|
||||
/**
|
||||
* Check if a model uses chat completion only (for OpenAI response mode)
|
||||
* Default: returns false
|
||||
*/
|
||||
isOpenAIChatCompletionOnlyModel?: (modelId: string) => boolean
|
||||
|
||||
/**
|
||||
* Get Copilot default headers (constants)
|
||||
* Default: returns empty object
|
||||
*/
|
||||
getCopilotDefaultHeaders?: () => Record<string, string>
|
||||
|
||||
/**
|
||||
* Get Copilot stored headers from state
|
||||
* Default: returns empty object
|
||||
*/
|
||||
getCopilotStoredHeaders?: () => Record<string, string>
|
||||
|
||||
/**
|
||||
* Get AWS Bedrock configuration
|
||||
* Default: returns undefined (not configured)
|
||||
*/
|
||||
getAwsBedrockConfig?: () =>
|
||||
| {
|
||||
authType: 'apiKey' | 'iam'
|
||||
region: string
|
||||
apiKey?: string
|
||||
accessKeyId?: string
|
||||
secretAccessKey?: string
|
||||
}
|
||||
| undefined
|
||||
|
||||
/**
|
||||
* Get Vertex AI configuration
|
||||
* Default: returns undefined (not configured)
|
||||
*/
|
||||
getVertexConfig?: (provider: MinimalProvider) =>
|
||||
| {
|
||||
project: string
|
||||
location: string
|
||||
googleCredentials: {
|
||||
privateKey: string
|
||||
clientEmail: string
|
||||
}
|
||||
}
|
||||
| undefined
|
||||
|
||||
/**
|
||||
* Get endpoint type for cherryin provider
|
||||
*/
|
||||
getEndpointType?: (modelId: string) => string | undefined
|
||||
|
||||
/**
|
||||
* Custom fetch implementation
|
||||
* Main process: use Electron net.fetch
|
||||
* Renderer process: use browser fetch (default)
|
||||
*/
|
||||
fetch?: typeof globalThis.fetch
|
||||
|
||||
/**
|
||||
* Get CherryAI signed fetch wrapper
|
||||
* Returns a fetch function that adds signature headers to requests
|
||||
*/
|
||||
getCherryAISignedFetch?: () => typeof globalThis.fetch
|
||||
}
|
||||
|
||||
/**
|
||||
* Default simple key rotator - returns first key
|
||||
*/
|
||||
function defaultGetRotatedApiKey(provider: MinimalProvider): string {
|
||||
const keys = provider.apiKey.split(',').map((k) => k.trim())
|
||||
return keys[0] || provider.apiKey
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert Cherry Studio Provider to AI SDK configuration
|
||||
*
|
||||
* @param provider - The formatted provider (after formatProviderApiHost)
|
||||
* @param modelId - The model ID to use
|
||||
* @param context - Environment-specific implementations
|
||||
* @returns AI SDK configuration
|
||||
*/
|
||||
export function providerToAiSdkConfig(
|
||||
provider: MinimalProvider,
|
||||
modelId: string,
|
||||
context: AiSdkConfigContext = {}
|
||||
): AiSdkConfig {
|
||||
const getRotatedApiKey = context.getRotatedApiKey || defaultGetRotatedApiKey
|
||||
const isOpenAIChatCompletionOnlyModel = context.isOpenAIChatCompletionOnlyModel || (() => false)
|
||||
|
||||
const aiSdkProviderId = getAiSdkProviderId(provider)
|
||||
|
||||
// Build base config
|
||||
const { baseURL, endpoint } = routeToEndpoint(provider.apiHost)
|
||||
const baseConfig = {
|
||||
baseURL,
|
||||
apiKey: getRotatedApiKey(provider)
|
||||
}
|
||||
|
||||
// Handle Copilot specially
|
||||
if (provider.id === SystemProviderIds.copilot) {
|
||||
const defaultHeaders = context.getCopilotDefaultHeaders?.() ?? {}
|
||||
const storedHeaders = context.getCopilotStoredHeaders?.() ?? {}
|
||||
const copilotExtraOptions: Record<string, unknown> = {
|
||||
headers: {
|
||||
...defaultHeaders,
|
||||
...storedHeaders,
|
||||
...provider.extra_headers
|
||||
},
|
||||
name: provider.id,
|
||||
includeUsage: true
|
||||
}
|
||||
if (context.fetch) {
|
||||
copilotExtraOptions.fetch = context.fetch
|
||||
}
|
||||
const options = ProviderConfigFactory.fromProvider(
|
||||
'github-copilot-openai-compatible',
|
||||
baseConfig,
|
||||
copilotExtraOptions
|
||||
)
|
||||
|
||||
return {
|
||||
providerId: 'github-copilot-openai-compatible',
|
||||
options
|
||||
}
|
||||
}
|
||||
|
||||
// Build extra options
|
||||
const extraOptions: Record<string, unknown> = {}
|
||||
if (endpoint) {
|
||||
extraOptions.endpoint = endpoint
|
||||
}
|
||||
|
||||
// Handle OpenAI mode
|
||||
if (provider.type === 'openai-response' && !isOpenAIChatCompletionOnlyModel(modelId)) {
|
||||
extraOptions.mode = 'responses'
|
||||
} else if (aiSdkProviderId === 'openai' || (aiSdkProviderId === 'cherryin' && provider.type === 'openai')) {
|
||||
extraOptions.mode = 'chat'
|
||||
}
|
||||
|
||||
// Add extra headers
|
||||
if (provider.extra_headers) {
|
||||
extraOptions.headers = provider.extra_headers
|
||||
if (aiSdkProviderId === 'openai') {
|
||||
extraOptions.headers = {
|
||||
...(extraOptions.headers as Record<string, string>),
|
||||
'HTTP-Referer': 'https://cherry-ai.com',
|
||||
'X-Title': 'Cherry Studio',
|
||||
'X-Api-Key': baseConfig.apiKey
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle Azure modes
|
||||
if (aiSdkProviderId === 'azure-responses') {
|
||||
extraOptions.mode = 'responses'
|
||||
} else if (aiSdkProviderId === 'azure') {
|
||||
extraOptions.mode = 'chat'
|
||||
}
|
||||
|
||||
// Handle AWS Bedrock
|
||||
if (aiSdkProviderId === 'bedrock') {
|
||||
const bedrockConfig = context.getAwsBedrockConfig?.()
|
||||
if (bedrockConfig) {
|
||||
extraOptions.region = bedrockConfig.region
|
||||
if (bedrockConfig.authType === 'apiKey') {
|
||||
extraOptions.apiKey = bedrockConfig.apiKey
|
||||
} else {
|
||||
extraOptions.accessKeyId = bedrockConfig.accessKeyId
|
||||
extraOptions.secretAccessKey = bedrockConfig.secretAccessKey
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle Vertex AI
|
||||
if (aiSdkProviderId === 'google-vertex' || aiSdkProviderId === 'google-vertex-anthropic') {
|
||||
const vertexConfig = context.getVertexConfig?.(provider)
|
||||
if (vertexConfig) {
|
||||
extraOptions.project = vertexConfig.project
|
||||
extraOptions.location = vertexConfig.location
|
||||
extraOptions.googleCredentials = {
|
||||
...vertexConfig.googleCredentials,
|
||||
privateKey: formatPrivateKey(vertexConfig.googleCredentials.privateKey)
|
||||
}
|
||||
baseConfig.baseURL += aiSdkProviderId === 'google-vertex' ? '/publishers/google' : '/publishers/anthropic/models'
|
||||
}
|
||||
}
|
||||
|
||||
// Handle cherryin endpoint type
|
||||
if (aiSdkProviderId === 'cherryin') {
|
||||
const endpointType = context.getEndpointType?.(modelId)
|
||||
if (endpointType) {
|
||||
extraOptions.endpointType = endpointType
|
||||
}
|
||||
}
|
||||
|
||||
// Handle cherryai signed fetch
|
||||
if (provider.id === 'cherryai') {
|
||||
const signedFetch = context.getCherryAISignedFetch?.()
|
||||
if (signedFetch) {
|
||||
extraOptions.fetch = signedFetch
|
||||
}
|
||||
} else if (context.fetch) {
|
||||
extraOptions.fetch = context.fetch
|
||||
}
|
||||
|
||||
// Check if AI SDK supports this provider natively
|
||||
if (hasProviderConfig(aiSdkProviderId) && aiSdkProviderId !== 'openai-compatible') {
|
||||
const options = ProviderConfigFactory.fromProvider(aiSdkProviderId, baseConfig, extraOptions)
|
||||
return {
|
||||
providerId: aiSdkProviderId,
|
||||
options
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to openai-compatible
|
||||
const options = ProviderConfigFactory.createOpenAICompatible(baseConfig.baseURL, baseConfig.apiKey)
|
||||
return {
|
||||
providerId: 'openai-compatible',
|
||||
options: {
|
||||
...options,
|
||||
name: provider.id,
|
||||
...extraOptions,
|
||||
includeUsage: true
|
||||
}
|
||||
}
|
||||
}
|
||||
174
packages/shared/provider/types.ts
Normal file
174
packages/shared/provider/types.ts
Normal file
@@ -0,0 +1,174 @@
|
||||
import * as z from 'zod'
|
||||
|
||||
export const ProviderTypeSchema = z.enum([
|
||||
'openai',
|
||||
'openai-response',
|
||||
'anthropic',
|
||||
'gemini',
|
||||
'azure-openai',
|
||||
'vertexai',
|
||||
'mistral',
|
||||
'aws-bedrock',
|
||||
'vertex-anthropic',
|
||||
'new-api',
|
||||
'ai-gateway'
|
||||
])
|
||||
|
||||
export type ProviderType = z.infer<typeof ProviderTypeSchema>
|
||||
|
||||
/**
|
||||
* Minimal provider interface for shared utilities
|
||||
* This is the subset of Provider that shared code needs
|
||||
*/
|
||||
export type MinimalProvider = {
|
||||
id: string
|
||||
type: ProviderType
|
||||
apiKey: string
|
||||
apiHost: string
|
||||
anthropicApiHost?: string
|
||||
apiVersion?: string
|
||||
extra_headers?: Record<string, string>
|
||||
}
|
||||
|
||||
/**
|
||||
* Minimal model interface for shared utilities
|
||||
* This is the subset of Model that shared code needs
|
||||
*/
|
||||
export type MinimalModel = {
|
||||
id: string
|
||||
endpoint_type?: string
|
||||
}
|
||||
|
||||
export const SystemProviderIdSchema = z.enum([
|
||||
'cherryin',
|
||||
'silicon',
|
||||
'aihubmix',
|
||||
'ocoolai',
|
||||
'deepseek',
|
||||
'ppio',
|
||||
'alayanew',
|
||||
'qiniu',
|
||||
'dmxapi',
|
||||
'burncloud',
|
||||
'tokenflux',
|
||||
'302ai',
|
||||
'cephalon',
|
||||
'lanyun',
|
||||
'ph8',
|
||||
'openrouter',
|
||||
'ollama',
|
||||
'ovms',
|
||||
'new-api',
|
||||
'lmstudio',
|
||||
'anthropic',
|
||||
'openai',
|
||||
'azure-openai',
|
||||
'gemini',
|
||||
'vertexai',
|
||||
'github',
|
||||
'copilot',
|
||||
'zhipu',
|
||||
'yi',
|
||||
'moonshot',
|
||||
'baichuan',
|
||||
'dashscope',
|
||||
'stepfun',
|
||||
'doubao',
|
||||
'infini',
|
||||
'minimax',
|
||||
'groq',
|
||||
'together',
|
||||
'fireworks',
|
||||
'nvidia',
|
||||
'grok',
|
||||
'hyperbolic',
|
||||
'mistral',
|
||||
'jina',
|
||||
'perplexity',
|
||||
'modelscope',
|
||||
'xirang',
|
||||
'hunyuan',
|
||||
'tencent-cloud-ti',
|
||||
'baidu-cloud',
|
||||
'gpustack',
|
||||
'voyageai',
|
||||
'aws-bedrock',
|
||||
'poe',
|
||||
'aionly',
|
||||
'longcat',
|
||||
'huggingface',
|
||||
'sophnet',
|
||||
'ai-gateway',
|
||||
'cerebras'
|
||||
])
|
||||
|
||||
export type SystemProviderId = z.infer<typeof SystemProviderIdSchema>
|
||||
|
||||
export const isSystemProviderId = (id: string): id is SystemProviderId => {
|
||||
return SystemProviderIdSchema.safeParse(id).success
|
||||
}
|
||||
|
||||
export const SystemProviderIds = {
|
||||
cherryin: 'cherryin',
|
||||
silicon: 'silicon',
|
||||
aihubmix: 'aihubmix',
|
||||
ocoolai: 'ocoolai',
|
||||
deepseek: 'deepseek',
|
||||
ppio: 'ppio',
|
||||
alayanew: 'alayanew',
|
||||
qiniu: 'qiniu',
|
||||
dmxapi: 'dmxapi',
|
||||
burncloud: 'burncloud',
|
||||
tokenflux: 'tokenflux',
|
||||
'302ai': '302ai',
|
||||
cephalon: 'cephalon',
|
||||
lanyun: 'lanyun',
|
||||
ph8: 'ph8',
|
||||
sophnet: 'sophnet',
|
||||
openrouter: 'openrouter',
|
||||
ollama: 'ollama',
|
||||
ovms: 'ovms',
|
||||
'new-api': 'new-api',
|
||||
lmstudio: 'lmstudio',
|
||||
anthropic: 'anthropic',
|
||||
openai: 'openai',
|
||||
'azure-openai': 'azure-openai',
|
||||
gemini: 'gemini',
|
||||
vertexai: 'vertexai',
|
||||
github: 'github',
|
||||
copilot: 'copilot',
|
||||
zhipu: 'zhipu',
|
||||
yi: 'yi',
|
||||
moonshot: 'moonshot',
|
||||
baichuan: 'baichuan',
|
||||
dashscope: 'dashscope',
|
||||
stepfun: 'stepfun',
|
||||
doubao: 'doubao',
|
||||
infini: 'infini',
|
||||
minimax: 'minimax',
|
||||
groq: 'groq',
|
||||
together: 'together',
|
||||
fireworks: 'fireworks',
|
||||
nvidia: 'nvidia',
|
||||
grok: 'grok',
|
||||
hyperbolic: 'hyperbolic',
|
||||
mistral: 'mistral',
|
||||
jina: 'jina',
|
||||
perplexity: 'perplexity',
|
||||
modelscope: 'modelscope',
|
||||
xirang: 'xirang',
|
||||
hunyuan: 'hunyuan',
|
||||
'tencent-cloud-ti': 'tencent-cloud-ti',
|
||||
'baidu-cloud': 'baidu-cloud',
|
||||
gpustack: 'gpustack',
|
||||
voyageai: 'voyageai',
|
||||
'aws-bedrock': 'aws-bedrock',
|
||||
poe: 'poe',
|
||||
aionly: 'aionly',
|
||||
longcat: 'longcat',
|
||||
huggingface: 'huggingface',
|
||||
'ai-gateway': 'ai-gateway',
|
||||
cerebras: 'cerebras'
|
||||
} as const satisfies Record<SystemProviderId, SystemProviderId>
|
||||
|
||||
export type SystemProviderIdTypeMap = typeof SystemProviderIds
|
||||
1
packages/shared/utils/index.ts
Normal file
1
packages/shared/utils/index.ts
Normal file
@@ -0,0 +1 @@
|
||||
export { getBaseModelName, getLowerBaseModelName } from './naming'
|
||||
31
packages/shared/utils/naming.ts
Normal file
31
packages/shared/utils/naming.ts
Normal file
@@ -0,0 +1,31 @@
|
||||
/**
|
||||
* 从模型 ID 中提取基础名称。
|
||||
* 例如:
|
||||
* - 'deepseek/deepseek-r1' => 'deepseek-r1'
|
||||
* - 'deepseek-ai/deepseek/deepseek-r1' => 'deepseek-r1'
|
||||
* @param {string} id 模型 ID
|
||||
* @param {string} [delimiter='/'] 分隔符,默认为 '/'
|
||||
* @returns {string} 基础名称
|
||||
*/
|
||||
export const getBaseModelName = (id: string, delimiter: string = '/'): string => {
|
||||
const parts = id.split(delimiter)
|
||||
return parts[parts.length - 1]
|
||||
}
|
||||
|
||||
/**
|
||||
* 从模型 ID 中提取基础名称并转换为小写。
|
||||
* 例如:
|
||||
* - 'deepseek/DeepSeek-R1' => 'deepseek-r1'
|
||||
* - 'deepseek-ai/deepseek/DeepSeek-R1' => 'deepseek-r1'
|
||||
* @param {string} id 模型 ID
|
||||
* @param {string} [delimiter='/'] 分隔符,默认为 '/'
|
||||
* @returns {string} 小写的基础名称
|
||||
*/
|
||||
export const getLowerBaseModelName = (id: string, delimiter: string = '/'): string => {
|
||||
const baseModelName = getBaseModelName(id, delimiter).toLowerCase()
|
||||
// for openrouter
|
||||
if (baseModelName.endsWith(':free')) {
|
||||
return baseModelName.replace(':free', '')
|
||||
}
|
||||
return baseModelName
|
||||
}
|
||||
638
src/main/apiServer/adapters/AiSdkToAnthropicSSE.ts
Normal file
638
src/main/apiServer/adapters/AiSdkToAnthropicSSE.ts
Normal file
@@ -0,0 +1,638 @@
|
||||
/**
|
||||
* AI SDK to Anthropic SSE Adapter
|
||||
*
|
||||
* Converts AI SDK's fullStream (TextStreamPart) events to Anthropic Messages API SSE format.
|
||||
* This enables any AI provider supported by AI SDK to be exposed via Anthropic-compatible API.
|
||||
*
|
||||
* Anthropic SSE Event Flow:
|
||||
* 1. message_start - Initial message with metadata
|
||||
* 2. content_block_start - Begin a content block (text, tool_use, thinking)
|
||||
* 3. content_block_delta - Incremental content updates
|
||||
* 4. content_block_stop - End a content block
|
||||
* 5. message_delta - Updates to overall message (stop_reason, usage)
|
||||
* 6. message_stop - Stream complete
|
||||
*
|
||||
* @see https://docs.anthropic.com/en/api/messages-streaming
|
||||
*/
|
||||
|
||||
import type {
|
||||
ContentBlock,
|
||||
InputJSONDelta,
|
||||
Message,
|
||||
MessageDeltaUsage,
|
||||
RawContentBlockDeltaEvent,
|
||||
RawContentBlockStartEvent,
|
||||
RawContentBlockStopEvent,
|
||||
RawMessageDeltaEvent,
|
||||
RawMessageStartEvent,
|
||||
RawMessageStopEvent,
|
||||
RawMessageStreamEvent,
|
||||
StopReason,
|
||||
TextBlock,
|
||||
TextDelta,
|
||||
ThinkingBlock,
|
||||
ThinkingDelta,
|
||||
ToolUseBlock,
|
||||
Usage
|
||||
} from '@anthropic-ai/sdk/resources/messages'
|
||||
import { loggerService } from '@logger'
|
||||
import { type FinishReason, type LanguageModelUsage, type TextStreamPart, type ToolSet } from 'ai'
|
||||
|
||||
import { googleReasoningCache, openRouterReasoningCache } from '../../services/CacheService'
|
||||
|
||||
const logger = loggerService.withContext('AiSdkToAnthropicSSE')
|
||||
|
||||
interface ContentBlockState {
|
||||
type: 'text' | 'tool_use' | 'thinking'
|
||||
index: number
|
||||
started: boolean
|
||||
content: string
|
||||
// For tool_use blocks
|
||||
toolId?: string
|
||||
toolName?: string
|
||||
toolInput?: string
|
||||
}
|
||||
|
||||
interface AdapterState {
|
||||
messageId: string
|
||||
model: string
|
||||
inputTokens: number
|
||||
outputTokens: number
|
||||
cacheInputTokens: number
|
||||
currentBlockIndex: number
|
||||
blocks: Map<number, ContentBlockState>
|
||||
textBlockIndex: number | null
|
||||
// Track multiple thinking blocks by their reasoning ID
|
||||
thinkingBlocks: Map<string, number> // reasoningId -> blockIndex
|
||||
currentThinkingId: string | null // Currently active thinking block ID
|
||||
toolBlocks: Map<string, number> // toolCallId -> blockIndex
|
||||
stopReason: StopReason | null
|
||||
hasEmittedMessageStart: boolean
|
||||
}
|
||||
|
||||
export type SSEEventCallback = (event: RawMessageStreamEvent) => void
|
||||
|
||||
export interface AiSdkToAnthropicSSEOptions {
|
||||
model: string
|
||||
messageId?: string
|
||||
inputTokens?: number
|
||||
onEvent: SSEEventCallback
|
||||
}
|
||||
|
||||
/**
|
||||
* Adapter that converts AI SDK fullStream events to Anthropic SSE events
|
||||
*/
|
||||
export class AiSdkToAnthropicSSE {
|
||||
private state: AdapterState
|
||||
private onEvent: SSEEventCallback
|
||||
|
||||
constructor(options: AiSdkToAnthropicSSEOptions) {
|
||||
this.onEvent = options.onEvent
|
||||
this.state = {
|
||||
messageId: options.messageId || `msg_${Date.now()}_${Math.random().toString(36).substring(2, 11)}`,
|
||||
model: options.model,
|
||||
inputTokens: options.inputTokens || 0,
|
||||
outputTokens: 0,
|
||||
cacheInputTokens: 0,
|
||||
currentBlockIndex: 0,
|
||||
blocks: new Map(),
|
||||
textBlockIndex: null,
|
||||
thinkingBlocks: new Map(),
|
||||
currentThinkingId: null,
|
||||
toolBlocks: new Map(),
|
||||
stopReason: null,
|
||||
hasEmittedMessageStart: false
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Process the AI SDK stream and emit Anthropic SSE events
|
||||
*/
|
||||
async processStream(fullStream: ReadableStream<TextStreamPart<ToolSet>>): Promise<void> {
|
||||
const reader = fullStream.getReader()
|
||||
|
||||
try {
|
||||
// Emit message_start at the beginning
|
||||
this.emitMessageStart()
|
||||
|
||||
while (true) {
|
||||
const { done, value } = await reader.read()
|
||||
|
||||
if (done) {
|
||||
break
|
||||
}
|
||||
|
||||
this.processChunk(value)
|
||||
}
|
||||
|
||||
// Ensure all blocks are closed and emit final events
|
||||
this.finalize()
|
||||
} catch (error) {
|
||||
await reader.cancel()
|
||||
throw error
|
||||
} finally {
|
||||
reader.releaseLock()
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Process a single AI SDK chunk and emit corresponding Anthropic events
|
||||
*/
|
||||
private processChunk(chunk: TextStreamPart<ToolSet>): void {
|
||||
logger.silly('AiSdkToAnthropicSSE - Processing chunk:', { chunk: JSON.stringify(chunk) })
|
||||
switch (chunk.type) {
|
||||
// === Text Events ===
|
||||
case 'text-start':
|
||||
this.startTextBlock()
|
||||
break
|
||||
|
||||
case 'text-delta':
|
||||
this.emitTextDelta(chunk.text || '')
|
||||
break
|
||||
|
||||
case 'text-end':
|
||||
this.stopTextBlock()
|
||||
break
|
||||
|
||||
// === Reasoning/Thinking Events ===
|
||||
case 'reasoning-start': {
|
||||
const reasoningId = chunk.id
|
||||
this.startThinkingBlock(reasoningId)
|
||||
break
|
||||
}
|
||||
|
||||
case 'reasoning-delta': {
|
||||
const reasoningId = chunk.id
|
||||
this.emitThinkingDelta(chunk.text || '', reasoningId)
|
||||
break
|
||||
}
|
||||
|
||||
case 'reasoning-end': {
|
||||
const reasoningId = chunk.id
|
||||
this.stopThinkingBlock(reasoningId)
|
||||
break
|
||||
}
|
||||
|
||||
// === Tool Events ===
|
||||
case 'tool-call':
|
||||
if (googleReasoningCache && chunk.providerMetadata?.google?.thoughtSignature) {
|
||||
googleReasoningCache.set(
|
||||
`google-${chunk.toolName}`,
|
||||
chunk.providerMetadata?.google?.thoughtSignature as string
|
||||
)
|
||||
}
|
||||
// FIXME: 按toolcall id绑定
|
||||
if (
|
||||
openRouterReasoningCache &&
|
||||
chunk.providerMetadata?.openrouter?.reasoning_details &&
|
||||
Array.isArray(chunk.providerMetadata.openrouter.reasoning_details)
|
||||
) {
|
||||
openRouterReasoningCache.set(
|
||||
'openrouter',
|
||||
JSON.parse(JSON.stringify(chunk.providerMetadata.openrouter.reasoning_details))
|
||||
)
|
||||
}
|
||||
this.handleToolCall({
|
||||
type: 'tool-call',
|
||||
toolCallId: chunk.toolCallId,
|
||||
toolName: chunk.toolName,
|
||||
args: chunk.input
|
||||
})
|
||||
break
|
||||
|
||||
case 'tool-result':
|
||||
// this.handleToolResult({
|
||||
// type: 'tool-result',
|
||||
// toolCallId: chunk.toolCallId,
|
||||
// toolName: chunk.toolName,
|
||||
// args: chunk.input,
|
||||
// result: chunk.output
|
||||
// })
|
||||
break
|
||||
|
||||
case 'finish-step':
|
||||
if (chunk.finishReason === 'tool-calls') {
|
||||
this.state.stopReason = 'tool_use'
|
||||
}
|
||||
break
|
||||
|
||||
case 'finish':
|
||||
this.handleFinish(chunk)
|
||||
break
|
||||
|
||||
case 'error':
|
||||
throw chunk.error
|
||||
|
||||
// Ignore other event types
|
||||
default:
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
private emitMessageStart(): void {
|
||||
if (this.state.hasEmittedMessageStart) return
|
||||
|
||||
this.state.hasEmittedMessageStart = true
|
||||
|
||||
const usage: Usage = {
|
||||
input_tokens: this.state.inputTokens,
|
||||
output_tokens: 0,
|
||||
cache_creation_input_tokens: 0,
|
||||
cache_read_input_tokens: 0,
|
||||
server_tool_use: null
|
||||
}
|
||||
|
||||
const message: Message = {
|
||||
id: this.state.messageId,
|
||||
type: 'message',
|
||||
role: 'assistant',
|
||||
content: [],
|
||||
model: this.state.model,
|
||||
stop_reason: null,
|
||||
stop_sequence: null,
|
||||
usage
|
||||
}
|
||||
|
||||
const event: RawMessageStartEvent = {
|
||||
type: 'message_start',
|
||||
message
|
||||
}
|
||||
|
||||
this.onEvent(event)
|
||||
}
|
||||
|
||||
private startTextBlock(): void {
|
||||
// If we already have a text block, don't create another
|
||||
if (this.state.textBlockIndex !== null) return
|
||||
|
||||
const index = this.state.currentBlockIndex++
|
||||
this.state.textBlockIndex = index
|
||||
this.state.blocks.set(index, {
|
||||
type: 'text',
|
||||
index,
|
||||
started: true,
|
||||
content: ''
|
||||
})
|
||||
|
||||
const contentBlock: TextBlock = {
|
||||
type: 'text',
|
||||
text: '',
|
||||
citations: null
|
||||
}
|
||||
|
||||
const event: RawContentBlockStartEvent = {
|
||||
type: 'content_block_start',
|
||||
index,
|
||||
content_block: contentBlock
|
||||
}
|
||||
|
||||
this.onEvent(event)
|
||||
}
|
||||
|
||||
private emitTextDelta(text: string): void {
|
||||
if (!text) return
|
||||
|
||||
// Auto-start text block if not started
|
||||
if (this.state.textBlockIndex === null) {
|
||||
this.startTextBlock()
|
||||
}
|
||||
|
||||
const index = this.state.textBlockIndex!
|
||||
const block = this.state.blocks.get(index)
|
||||
if (block) {
|
||||
block.content += text
|
||||
}
|
||||
|
||||
const delta: TextDelta = {
|
||||
type: 'text_delta',
|
||||
text
|
||||
}
|
||||
|
||||
const event: RawContentBlockDeltaEvent = {
|
||||
type: 'content_block_delta',
|
||||
index,
|
||||
delta
|
||||
}
|
||||
|
||||
this.onEvent(event)
|
||||
}
|
||||
|
||||
private stopTextBlock(): void {
|
||||
if (this.state.textBlockIndex === null) return
|
||||
|
||||
const index = this.state.textBlockIndex
|
||||
|
||||
const event: RawContentBlockStopEvent = {
|
||||
type: 'content_block_stop',
|
||||
index
|
||||
}
|
||||
|
||||
this.onEvent(event)
|
||||
this.state.textBlockIndex = null
|
||||
}
|
||||
|
||||
private startThinkingBlock(reasoningId: string): void {
|
||||
// Check if this thinking block already exists
|
||||
if (this.state.thinkingBlocks.has(reasoningId)) return
|
||||
|
||||
const index = this.state.currentBlockIndex++
|
||||
this.state.thinkingBlocks.set(reasoningId, index)
|
||||
this.state.currentThinkingId = reasoningId
|
||||
this.state.blocks.set(index, {
|
||||
type: 'thinking',
|
||||
index,
|
||||
started: true,
|
||||
content: ''
|
||||
})
|
||||
|
||||
const contentBlock: ThinkingBlock = {
|
||||
type: 'thinking',
|
||||
thinking: '',
|
||||
signature: ''
|
||||
}
|
||||
|
||||
const event: RawContentBlockStartEvent = {
|
||||
type: 'content_block_start',
|
||||
index,
|
||||
content_block: contentBlock
|
||||
}
|
||||
|
||||
this.onEvent(event)
|
||||
}
|
||||
|
||||
private emitThinkingDelta(text: string, reasoningId?: string): void {
|
||||
if (!text) return
|
||||
|
||||
// Determine which thinking block to use
|
||||
const targetId = reasoningId || this.state.currentThinkingId
|
||||
if (!targetId) {
|
||||
// Auto-start thinking block if not started
|
||||
const newId = `reasoning_${Date.now()}`
|
||||
this.startThinkingBlock(newId)
|
||||
return this.emitThinkingDelta(text, newId)
|
||||
}
|
||||
|
||||
const index = this.state.thinkingBlocks.get(targetId)
|
||||
if (index === undefined) {
|
||||
// If the block doesn't exist, create it
|
||||
this.startThinkingBlock(targetId)
|
||||
return this.emitThinkingDelta(text, targetId)
|
||||
}
|
||||
|
||||
const block = this.state.blocks.get(index)
|
||||
if (block) {
|
||||
block.content += text
|
||||
}
|
||||
|
||||
const delta: ThinkingDelta = {
|
||||
type: 'thinking_delta',
|
||||
thinking: text
|
||||
}
|
||||
|
||||
const event: RawContentBlockDeltaEvent = {
|
||||
type: 'content_block_delta',
|
||||
index,
|
||||
delta
|
||||
}
|
||||
|
||||
this.onEvent(event)
|
||||
}
|
||||
|
||||
private stopThinkingBlock(reasoningId?: string): void {
|
||||
const targetId = reasoningId || this.state.currentThinkingId
|
||||
if (!targetId) return
|
||||
|
||||
const index = this.state.thinkingBlocks.get(targetId)
|
||||
if (index === undefined) return
|
||||
|
||||
const event: RawContentBlockStopEvent = {
|
||||
type: 'content_block_stop',
|
||||
index
|
||||
}
|
||||
|
||||
this.onEvent(event)
|
||||
this.state.thinkingBlocks.delete(targetId)
|
||||
|
||||
// Update currentThinkingId if we just closed the current one
|
||||
if (this.state.currentThinkingId === targetId) {
|
||||
// Set to the most recent remaining thinking block, or null if none
|
||||
const remaining = Array.from(this.state.thinkingBlocks.keys())
|
||||
this.state.currentThinkingId = remaining.length > 0 ? remaining[remaining.length - 1] : null
|
||||
}
|
||||
}
|
||||
|
||||
private handleToolCall(chunk: { type: 'tool-call'; toolCallId: string; toolName: string; args: unknown }): void {
|
||||
const { toolCallId, toolName, args } = chunk
|
||||
|
||||
// Check if we already have this tool call
|
||||
if (this.state.toolBlocks.has(toolCallId)) {
|
||||
return
|
||||
}
|
||||
|
||||
const index = this.state.currentBlockIndex++
|
||||
this.state.toolBlocks.set(toolCallId, index)
|
||||
|
||||
const inputJson = JSON.stringify(args)
|
||||
|
||||
this.state.blocks.set(index, {
|
||||
type: 'tool_use',
|
||||
index,
|
||||
started: true,
|
||||
content: inputJson,
|
||||
toolId: toolCallId,
|
||||
toolName,
|
||||
toolInput: inputJson
|
||||
})
|
||||
|
||||
// Emit content_block_start for tool_use
|
||||
const contentBlock: ToolUseBlock = {
|
||||
type: 'tool_use',
|
||||
id: toolCallId,
|
||||
name: toolName,
|
||||
input: {}
|
||||
}
|
||||
|
||||
const startEvent: RawContentBlockStartEvent = {
|
||||
type: 'content_block_start',
|
||||
index,
|
||||
content_block: contentBlock
|
||||
}
|
||||
|
||||
this.onEvent(startEvent)
|
||||
|
||||
// Emit the full input as a delta (Anthropic streams JSON incrementally)
|
||||
const delta: InputJSONDelta = {
|
||||
type: 'input_json_delta',
|
||||
partial_json: inputJson
|
||||
}
|
||||
|
||||
const deltaEvent: RawContentBlockDeltaEvent = {
|
||||
type: 'content_block_delta',
|
||||
index,
|
||||
delta
|
||||
}
|
||||
|
||||
this.onEvent(deltaEvent)
|
||||
|
||||
// Emit content_block_stop
|
||||
const stopEvent: RawContentBlockStopEvent = {
|
||||
type: 'content_block_stop',
|
||||
index
|
||||
}
|
||||
|
||||
this.onEvent(stopEvent)
|
||||
|
||||
// Mark that we have tool use
|
||||
this.state.stopReason = 'tool_use'
|
||||
}
|
||||
|
||||
private handleFinish(chunk: { type: 'finish'; finishReason?: FinishReason; totalUsage?: LanguageModelUsage }): void {
|
||||
// Update usage
|
||||
if (chunk.totalUsage) {
|
||||
this.state.inputTokens = chunk.totalUsage.inputTokens || 0
|
||||
this.state.outputTokens = chunk.totalUsage.outputTokens || 0
|
||||
this.state.cacheInputTokens = chunk.totalUsage.cachedInputTokens || 0
|
||||
}
|
||||
|
||||
// Determine finish reason
|
||||
if (!this.state.stopReason) {
|
||||
switch (chunk.finishReason) {
|
||||
case 'stop':
|
||||
this.state.stopReason = 'end_turn'
|
||||
break
|
||||
case 'length':
|
||||
this.state.stopReason = 'max_tokens'
|
||||
break
|
||||
case 'tool-calls':
|
||||
this.state.stopReason = 'tool_use'
|
||||
break
|
||||
case 'content-filter':
|
||||
this.state.stopReason = 'refusal'
|
||||
break
|
||||
default:
|
||||
this.state.stopReason = 'end_turn'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private finalize(): void {
|
||||
// Close any open blocks
|
||||
if (this.state.textBlockIndex !== null) {
|
||||
this.stopTextBlock()
|
||||
}
|
||||
// Close all open thinking blocks
|
||||
for (const reasoningId of this.state.thinkingBlocks.keys()) {
|
||||
this.stopThinkingBlock(reasoningId)
|
||||
}
|
||||
|
||||
// Emit message_delta with final stop reason and usage
|
||||
const usage: MessageDeltaUsage = {
|
||||
output_tokens: this.state.outputTokens,
|
||||
input_tokens: this.state.inputTokens,
|
||||
cache_creation_input_tokens: this.state.cacheInputTokens,
|
||||
cache_read_input_tokens: null,
|
||||
server_tool_use: null
|
||||
}
|
||||
|
||||
const messageDeltaEvent: RawMessageDeltaEvent = {
|
||||
type: 'message_delta',
|
||||
delta: {
|
||||
stop_reason: this.state.stopReason || 'end_turn',
|
||||
stop_sequence: null
|
||||
},
|
||||
usage
|
||||
}
|
||||
|
||||
this.onEvent(messageDeltaEvent)
|
||||
|
||||
// Emit message_stop
|
||||
const messageStopEvent: RawMessageStopEvent = {
|
||||
type: 'message_stop'
|
||||
}
|
||||
|
||||
this.onEvent(messageStopEvent)
|
||||
}
|
||||
|
||||
/**
|
||||
* Set input token count (typically from prompt)
|
||||
*/
|
||||
setInputTokens(count: number): void {
|
||||
this.state.inputTokens = count
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the current message ID
|
||||
*/
|
||||
getMessageId(): string {
|
||||
return this.state.messageId
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a complete Message object for non-streaming responses
|
||||
*/
|
||||
buildNonStreamingResponse(): Message {
|
||||
const content: ContentBlock[] = []
|
||||
|
||||
// Collect all content blocks in order
|
||||
const sortedBlocks = Array.from(this.state.blocks.values()).sort((a, b) => a.index - b.index)
|
||||
|
||||
for (const block of sortedBlocks) {
|
||||
switch (block.type) {
|
||||
case 'text':
|
||||
content.push({
|
||||
type: 'text',
|
||||
text: block.content,
|
||||
citations: null
|
||||
} as TextBlock)
|
||||
break
|
||||
case 'thinking':
|
||||
content.push({
|
||||
type: 'thinking',
|
||||
thinking: block.content
|
||||
} as ThinkingBlock)
|
||||
break
|
||||
case 'tool_use':
|
||||
content.push({
|
||||
type: 'tool_use',
|
||||
id: block.toolId!,
|
||||
name: block.toolName!,
|
||||
input: JSON.parse(block.toolInput || '{}')
|
||||
} as ToolUseBlock)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
id: this.state.messageId,
|
||||
type: 'message',
|
||||
role: 'assistant',
|
||||
content,
|
||||
model: this.state.model,
|
||||
stop_reason: this.state.stopReason || 'end_turn',
|
||||
stop_sequence: null,
|
||||
usage: {
|
||||
input_tokens: this.state.inputTokens,
|
||||
output_tokens: this.state.outputTokens,
|
||||
cache_creation_input_tokens: 0,
|
||||
cache_read_input_tokens: 0,
|
||||
server_tool_use: null
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Format an Anthropic SSE event for HTTP streaming
|
||||
*/
|
||||
export function formatSSEEvent(event: RawMessageStreamEvent): string {
|
||||
return `event: ${event.type}\ndata: ${JSON.stringify(event)}\n\n`
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a done marker for SSE stream
|
||||
*/
|
||||
export function formatSSEDone(): string {
|
||||
return 'data: [DONE]\n\n'
|
||||
}
|
||||
|
||||
export default AiSdkToAnthropicSSE
|
||||
13
src/main/apiServer/adapters/index.ts
Normal file
13
src/main/apiServer/adapters/index.ts
Normal file
@@ -0,0 +1,13 @@
|
||||
/**
|
||||
* Shared Adapters
|
||||
*
|
||||
* This module exports adapters for converting between different AI API formats.
|
||||
*/
|
||||
|
||||
export {
|
||||
AiSdkToAnthropicSSE,
|
||||
type AiSdkToAnthropicSSEOptions,
|
||||
formatSSEDone,
|
||||
formatSSEEvent,
|
||||
type SSEEventCallback
|
||||
} from './AiSdkToAnthropicSSE'
|
||||
95
src/main/apiServer/adapters/openrouter.ts
Normal file
95
src/main/apiServer/adapters/openrouter.ts
Normal file
@@ -0,0 +1,95 @@
|
||||
import * as z from 'zod/v4'
|
||||
|
||||
enum ReasoningFormat {
|
||||
Unknown = 'unknown',
|
||||
OpenAIResponsesV1 = 'openai-responses-v1',
|
||||
XAIResponsesV1 = 'xai-responses-v1',
|
||||
AnthropicClaudeV1 = 'anthropic-claude-v1',
|
||||
GoogleGeminiV1 = 'google-gemini-v1'
|
||||
}
|
||||
|
||||
// Anthropic Claude was the first reasoning that we're
|
||||
// passing back and forth
|
||||
export const DEFAULT_REASONING_FORMAT = ReasoningFormat.AnthropicClaudeV1
|
||||
|
||||
function isDefinedOrNotNull<T>(value: T | null | undefined): value is T {
|
||||
return value !== null && value !== undefined
|
||||
}
|
||||
|
||||
export enum ReasoningDetailType {
|
||||
Summary = 'reasoning.summary',
|
||||
Encrypted = 'reasoning.encrypted',
|
||||
Text = 'reasoning.text'
|
||||
}
|
||||
|
||||
export const CommonReasoningDetailSchema = z
|
||||
.object({
|
||||
id: z.string().nullish(),
|
||||
format: z.enum(ReasoningFormat).nullish(),
|
||||
index: z.number().optional()
|
||||
})
|
||||
.loose()
|
||||
|
||||
export const ReasoningDetailSummarySchema = z
|
||||
.object({
|
||||
type: z.literal(ReasoningDetailType.Summary),
|
||||
summary: z.string()
|
||||
})
|
||||
.extend(CommonReasoningDetailSchema.shape)
|
||||
export type ReasoningDetailSummary = z.infer<typeof ReasoningDetailSummarySchema>
|
||||
|
||||
export const ReasoningDetailEncryptedSchema = z
|
||||
.object({
|
||||
type: z.literal(ReasoningDetailType.Encrypted),
|
||||
data: z.string()
|
||||
})
|
||||
.extend(CommonReasoningDetailSchema.shape)
|
||||
|
||||
export type ReasoningDetailEncrypted = z.infer<typeof ReasoningDetailEncryptedSchema>
|
||||
|
||||
export const ReasoningDetailTextSchema = z
|
||||
.object({
|
||||
type: z.literal(ReasoningDetailType.Text),
|
||||
text: z.string().nullish(),
|
||||
signature: z.string().nullish()
|
||||
})
|
||||
.extend(CommonReasoningDetailSchema.shape)
|
||||
|
||||
export type ReasoningDetailText = z.infer<typeof ReasoningDetailTextSchema>
|
||||
|
||||
export const ReasoningDetailUnionSchema = z.union([
|
||||
ReasoningDetailSummarySchema,
|
||||
ReasoningDetailEncryptedSchema,
|
||||
ReasoningDetailTextSchema
|
||||
])
|
||||
|
||||
export type ReasoningDetailUnion = z.infer<typeof ReasoningDetailUnionSchema>
|
||||
|
||||
const ReasoningDetailsWithUnknownSchema = z.union([ReasoningDetailUnionSchema, z.unknown().transform(() => null)])
|
||||
|
||||
export const ReasoningDetailArraySchema = z
|
||||
.array(ReasoningDetailsWithUnknownSchema)
|
||||
.transform((d) => d.filter((d): d is ReasoningDetailUnion => !!d))
|
||||
|
||||
export const OutputUnionToReasoningDetailsSchema = z.union([
|
||||
z
|
||||
.object({
|
||||
delta: z.object({
|
||||
reasoning_details: z.array(ReasoningDetailsWithUnknownSchema)
|
||||
})
|
||||
})
|
||||
.transform((data) => data.delta.reasoning_details.filter(isDefinedOrNotNull)),
|
||||
z
|
||||
.object({
|
||||
message: z.object({
|
||||
reasoning_details: z.array(ReasoningDetailsWithUnknownSchema)
|
||||
})
|
||||
})
|
||||
.transform((data) => data.message.reasoning_details.filter(isDefinedOrNotNull)),
|
||||
z
|
||||
.object({
|
||||
text: z.string(),
|
||||
reasoning_details: z.array(ReasoningDetailsWithUnknownSchema)
|
||||
})
|
||||
.transform((data) => data.reasoning_details.filter(isDefinedOrNotNull))
|
||||
])
|
||||
@@ -1,17 +1,93 @@
|
||||
import type { MessageCreateParams } from '@anthropic-ai/sdk/resources'
|
||||
import { loggerService } from '@logger'
|
||||
import { buildSharedMiddlewares, type SharedMiddlewareConfig } from '@shared/middleware'
|
||||
import { getAiSdkProviderId } from '@shared/provider'
|
||||
import type { Provider } from '@types'
|
||||
import type { Request, Response } from 'express'
|
||||
import express from 'express'
|
||||
|
||||
import { messagesService } from '../services/messages'
|
||||
import { getProviderById, validateModelId } from '../utils'
|
||||
import { generateUnifiedMessage, streamUnifiedMessages } from '../services/unified-messages'
|
||||
import { getProviderById, isModelAnthropicCompatible, validateModelId } from '../utils'
|
||||
|
||||
/**
|
||||
* Check if a specific model on a provider should use direct Anthropic SDK
|
||||
*
|
||||
* A provider+model combination is considered "Anthropic-compatible" if:
|
||||
* 1. It's a native Anthropic provider (type === 'anthropic'), OR
|
||||
* 2. It has anthropicApiHost configured AND the specific model supports Anthropic API
|
||||
* (for aggregated providers like Silicon, only certain models support Anthropic endpoint)
|
||||
*
|
||||
* @param provider - The provider to check
|
||||
* @param modelId - The model ID to check (without provider prefix)
|
||||
* @returns true if should use direct Anthropic SDK, false for unified SDK
|
||||
*/
|
||||
function shouldUseDirectAnthropic(provider: Provider, modelId: string): boolean {
|
||||
// Native Anthropic provider - always use direct SDK
|
||||
if (provider.type === 'anthropic') {
|
||||
return true
|
||||
}
|
||||
|
||||
// No anthropicApiHost configured - use unified SDK
|
||||
if (!provider.anthropicApiHost?.trim()) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Has anthropicApiHost - check model-level compatibility
|
||||
// For aggregated providers, only specific models support Anthropic API
|
||||
return isModelAnthropicCompatible(provider, modelId)
|
||||
}
|
||||
|
||||
const logger = loggerService.withContext('ApiServerMessagesRoutes')
|
||||
|
||||
const router = express.Router()
|
||||
const providerRouter = express.Router({ mergeParams: true })
|
||||
|
||||
/**
|
||||
* Estimate token count from messages
|
||||
* Simple approximation: ~4 characters per token for English text
|
||||
*/
|
||||
interface CountTokensInput {
|
||||
messages: Array<{ role: string; content: string | Array<{ type: string; text?: string }> }>
|
||||
system?: string | Array<{ type: string; text?: string }>
|
||||
}
|
||||
|
||||
function estimateTokenCount(input: CountTokensInput): number {
|
||||
const { messages, system } = input
|
||||
let totalChars = 0
|
||||
|
||||
// Count system message tokens
|
||||
if (system) {
|
||||
if (typeof system === 'string') {
|
||||
totalChars += system.length
|
||||
} else if (Array.isArray(system)) {
|
||||
for (const block of system) {
|
||||
if (block.type === 'text' && block.text) {
|
||||
totalChars += block.text.length
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Count message tokens
|
||||
for (const msg of messages) {
|
||||
if (typeof msg.content === 'string') {
|
||||
totalChars += msg.content.length
|
||||
} else if (Array.isArray(msg.content)) {
|
||||
for (const block of msg.content) {
|
||||
if (block.type === 'text' && block.text) {
|
||||
totalChars += block.text.length
|
||||
}
|
||||
}
|
||||
}
|
||||
// Add overhead for role
|
||||
totalChars += 10
|
||||
}
|
||||
|
||||
// Estimate tokens (~4 chars per token, with some overhead)
|
||||
return Math.ceil(totalChars / 4) + messages.length * 3
|
||||
}
|
||||
|
||||
// Helper function for basic request validation
|
||||
async function validateRequestBody(req: Request): Promise<{ valid: boolean; error?: any }> {
|
||||
const request: MessageCreateParams = req.body
|
||||
@@ -33,21 +109,36 @@ async function validateRequestBody(req: Request): Promise<{ valid: boolean; erro
|
||||
}
|
||||
|
||||
interface HandleMessageProcessingOptions {
|
||||
req: Request
|
||||
res: Response
|
||||
provider: Provider
|
||||
request: MessageCreateParams
|
||||
modelId?: string
|
||||
}
|
||||
|
||||
async function handleMessageProcessing({
|
||||
req,
|
||||
/**
|
||||
* Handle message processing using direct Anthropic SDK
|
||||
* Used for providers with anthropicApiHost or native Anthropic providers
|
||||
* This bypasses AI SDK conversion and uses native Anthropic protocol
|
||||
*/
|
||||
async function handleDirectAnthropicProcessing({
|
||||
res,
|
||||
provider,
|
||||
request,
|
||||
modelId
|
||||
}: HandleMessageProcessingOptions): Promise<void> {
|
||||
modelId,
|
||||
extraHeaders
|
||||
}: HandleMessageProcessingOptions & { extraHeaders?: Record<string, string | string[]> }): Promise<void> {
|
||||
const actualModelId = modelId || request.model
|
||||
|
||||
logger.info('Processing message via direct Anthropic SDK', {
|
||||
providerId: provider.id,
|
||||
providerType: provider.type,
|
||||
modelId: actualModelId,
|
||||
stream: !!request.stream,
|
||||
anthropicApiHost: provider.anthropicApiHost
|
||||
})
|
||||
|
||||
try {
|
||||
// Validate request
|
||||
const validation = messagesService.validateRequest(request)
|
||||
if (!validation.isValid) {
|
||||
res.status(400).json({
|
||||
@@ -60,28 +151,126 @@ async function handleMessageProcessing({
|
||||
return
|
||||
}
|
||||
|
||||
const extraHeaders = messagesService.prepareHeaders(req.headers)
|
||||
// Process message using messagesService (native Anthropic SDK)
|
||||
const { client, anthropicRequest } = await messagesService.processMessage({
|
||||
provider,
|
||||
request,
|
||||
extraHeaders,
|
||||
modelId
|
||||
modelId: actualModelId
|
||||
})
|
||||
|
||||
if (request.stream) {
|
||||
// Use native Anthropic streaming
|
||||
await messagesService.handleStreaming(client, anthropicRequest, { response: res }, provider)
|
||||
return
|
||||
} else {
|
||||
// Use native Anthropic non-streaming
|
||||
const response = await client.messages.create(anthropicRequest)
|
||||
res.json(response)
|
||||
}
|
||||
|
||||
const response = await client.messages.create(anthropicRequest)
|
||||
res.json(response)
|
||||
} catch (error: any) {
|
||||
logger.error('Message processing error', { error })
|
||||
logger.error('Direct Anthropic processing error', { error })
|
||||
const { statusCode, errorResponse } = messagesService.transformError(error)
|
||||
res.status(statusCode).json(errorResponse)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle message processing using unified AI SDK
|
||||
* Used for non-Anthropic providers that need format conversion
|
||||
* - Uses AI SDK adapters with output converted to Anthropic SSE format
|
||||
*/
|
||||
async function handleUnifiedProcessing({
|
||||
res,
|
||||
provider,
|
||||
request,
|
||||
modelId
|
||||
}: HandleMessageProcessingOptions): Promise<void> {
|
||||
const actualModelId = modelId || request.model
|
||||
|
||||
logger.info('Processing message via unified AI SDK', {
|
||||
providerId: provider.id,
|
||||
providerType: provider.type,
|
||||
modelId: actualModelId,
|
||||
stream: !!request.stream
|
||||
})
|
||||
|
||||
try {
|
||||
// Validate request
|
||||
const validation = messagesService.validateRequest(request)
|
||||
if (!validation.isValid) {
|
||||
res.status(400).json({
|
||||
type: 'error',
|
||||
error: {
|
||||
type: 'invalid_request_error',
|
||||
message: validation.errors.join('; ')
|
||||
}
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
const middlewareConfig: SharedMiddlewareConfig = {
|
||||
modelId: actualModelId,
|
||||
providerId: provider.id,
|
||||
aiSdkProviderId: getAiSdkProviderId(provider)
|
||||
}
|
||||
const middlewares = buildSharedMiddlewares(middlewareConfig)
|
||||
|
||||
logger.debug('Built middlewares for unified processing', {
|
||||
middlewareCount: middlewares.length,
|
||||
modelId: actualModelId,
|
||||
providerId: provider.id
|
||||
})
|
||||
|
||||
if (request.stream) {
|
||||
await streamUnifiedMessages({
|
||||
response: res,
|
||||
provider,
|
||||
modelId: actualModelId,
|
||||
params: request,
|
||||
middlewares,
|
||||
onError: (error) => {
|
||||
logger.error('Stream error', error as Error)
|
||||
},
|
||||
onComplete: () => {
|
||||
logger.debug('Stream completed')
|
||||
}
|
||||
})
|
||||
} else {
|
||||
const response = await generateUnifiedMessage({
|
||||
provider,
|
||||
modelId: actualModelId,
|
||||
params: request,
|
||||
middlewares
|
||||
})
|
||||
res.json(response)
|
||||
}
|
||||
} catch (error: any) {
|
||||
const { statusCode, errorResponse } = messagesService.transformError(error)
|
||||
res.status(statusCode).json(errorResponse)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle message processing - routes to appropriate handler based on provider and model
|
||||
*
|
||||
* Routing logic:
|
||||
* - Native Anthropic providers (type === 'anthropic'): Direct Anthropic SDK
|
||||
* - Providers with anthropicApiHost AND model supports Anthropic API: Direct Anthropic SDK
|
||||
* - Other providers/models: Unified AI SDK with Anthropic SSE conversion
|
||||
*/
|
||||
async function handleMessageProcessing({
|
||||
res,
|
||||
provider,
|
||||
request,
|
||||
modelId
|
||||
}: HandleMessageProcessingOptions): Promise<void> {
|
||||
const actualModelId = modelId || request.model
|
||||
if (shouldUseDirectAnthropic(provider, actualModelId)) {
|
||||
return handleDirectAnthropicProcessing({ res, provider, request, modelId })
|
||||
}
|
||||
return handleUnifiedProcessing({ res, provider, request, modelId })
|
||||
}
|
||||
|
||||
/**
|
||||
* @swagger
|
||||
* /v1/messages:
|
||||
@@ -235,7 +424,7 @@ router.post('/', async (req: Request, res: Response) => {
|
||||
const provider = modelValidation.provider!
|
||||
const modelId = modelValidation.modelId!
|
||||
|
||||
return handleMessageProcessing({ req, res, provider, request, modelId })
|
||||
return handleMessageProcessing({ res, provider, request, modelId })
|
||||
} catch (error: any) {
|
||||
logger.error('Message processing error', { error })
|
||||
const { statusCode, errorResponse } = messagesService.transformError(error)
|
||||
@@ -393,7 +582,7 @@ providerRouter.post('/', async (req: Request, res: Response) => {
|
||||
|
||||
const request: MessageCreateParams = req.body
|
||||
|
||||
return handleMessageProcessing({ req, res, provider, request })
|
||||
return handleMessageProcessing({ res, provider, request })
|
||||
} catch (error: any) {
|
||||
logger.error('Message processing error', { error })
|
||||
const { statusCode, errorResponse } = messagesService.transformError(error)
|
||||
@@ -401,4 +590,132 @@ providerRouter.post('/', async (req: Request, res: Response) => {
|
||||
}
|
||||
})
|
||||
|
||||
/**
|
||||
* @swagger
|
||||
* /v1/messages/count_tokens:
|
||||
* post:
|
||||
* summary: Count tokens for messages
|
||||
* description: Count tokens for Anthropic Messages API format (required by Claude Code SDK)
|
||||
* tags: [Messages]
|
||||
* requestBody:
|
||||
* required: true
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* type: object
|
||||
* required:
|
||||
* - model
|
||||
* - messages
|
||||
* properties:
|
||||
* model:
|
||||
* type: string
|
||||
* description: Model ID
|
||||
* messages:
|
||||
* type: array
|
||||
* items:
|
||||
* type: object
|
||||
* system:
|
||||
* type: string
|
||||
* description: System message
|
||||
* responses:
|
||||
* 200:
|
||||
* description: Token count response
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* type: object
|
||||
* properties:
|
||||
* input_tokens:
|
||||
* type: integer
|
||||
* 400:
|
||||
* description: Bad request
|
||||
*/
|
||||
router.post('/count_tokens', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const { model, messages, system } = req.body
|
||||
|
||||
if (!model) {
|
||||
return res.status(400).json({
|
||||
type: 'error',
|
||||
error: {
|
||||
type: 'invalid_request_error',
|
||||
message: 'model parameter is required'
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
if (!messages || !Array.isArray(messages)) {
|
||||
return res.status(400).json({
|
||||
type: 'error',
|
||||
error: {
|
||||
type: 'invalid_request_error',
|
||||
message: 'messages parameter is required'
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
const estimatedTokens = estimateTokenCount({ messages, system })
|
||||
|
||||
logger.debug('Token count estimated', {
|
||||
model,
|
||||
messageCount: messages.length,
|
||||
estimatedTokens
|
||||
})
|
||||
|
||||
return res.json({
|
||||
input_tokens: estimatedTokens
|
||||
})
|
||||
} catch (error: any) {
|
||||
logger.error('Token counting error', { error })
|
||||
return res.status(500).json({
|
||||
type: 'error',
|
||||
error: {
|
||||
type: 'api_error',
|
||||
message: error.message || 'Internal server error'
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
/**
|
||||
* Provider-specific count_tokens endpoint
|
||||
*/
|
||||
providerRouter.post('/count_tokens', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const { model, messages, system } = req.body
|
||||
|
||||
if (!messages || !Array.isArray(messages)) {
|
||||
return res.status(400).json({
|
||||
type: 'error',
|
||||
error: {
|
||||
type: 'invalid_request_error',
|
||||
message: 'messages parameter is required'
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
const estimatedTokens = estimateTokenCount({ messages, system })
|
||||
|
||||
logger.debug('Token count estimated (provider route)', {
|
||||
providerId: req.params.provider,
|
||||
model,
|
||||
messageCount: messages.length,
|
||||
estimatedTokens
|
||||
})
|
||||
|
||||
return res.json({
|
||||
input_tokens: estimatedTokens
|
||||
})
|
||||
} catch (error: any) {
|
||||
logger.error('Token counting error', { error })
|
||||
return res.status(500).json({
|
||||
type: 'error',
|
||||
error: {
|
||||
type: 'api_error',
|
||||
message: error.message || 'Internal server error'
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
export { providerRouter as messagesProviderRoutes, router as messagesRoutes }
|
||||
|
||||
@@ -2,8 +2,10 @@ import type Anthropic from '@anthropic-ai/sdk'
|
||||
import type { MessageCreateParams, MessageStreamEvent } from '@anthropic-ai/sdk/resources'
|
||||
import { loggerService } from '@logger'
|
||||
import anthropicService from '@main/services/AnthropicService'
|
||||
import { buildClaudeCodeSystemMessage, getSdkClient } from '@shared/anthropic'
|
||||
import { buildClaudeCodeSystemMessage, getSdkClient, sanitizeToolsForAnthropic } from '@shared/anthropic'
|
||||
import type { Provider } from '@types'
|
||||
import { APICallError, RetryError } from 'ai'
|
||||
import { net } from 'electron'
|
||||
import type { Response } from 'express'
|
||||
|
||||
const logger = loggerService.withContext('MessagesService')
|
||||
@@ -98,11 +100,30 @@ export class MessagesService {
|
||||
|
||||
async getClient(provider: Provider, extraHeaders?: Record<string, string | string[]>): Promise<Anthropic> {
|
||||
// Create Anthropic client for the provider
|
||||
// Wrap net.fetch to handle compatibility issues:
|
||||
// 1. net.fetch expects string URLs, not Request objects
|
||||
// 2. net.fetch doesn't support 'agent' option from Node.js http module
|
||||
const electronFetch: typeof globalThis.fetch = async (input: URL | RequestInfo, init?: RequestInit) => {
|
||||
const url = typeof input === 'string' ? input : input instanceof URL ? input.toString() : input.url
|
||||
// Remove unsupported options for Electron's net.fetch
|
||||
if (init) {
|
||||
const initWithAgent = init as RequestInit & { agent?: unknown }
|
||||
delete initWithAgent.agent
|
||||
const headers = new Headers(initWithAgent.headers)
|
||||
if (headers.has('content-length')) {
|
||||
headers.delete('content-length')
|
||||
}
|
||||
initWithAgent.headers = headers
|
||||
return net.fetch(url, initWithAgent)
|
||||
}
|
||||
return net.fetch(url)
|
||||
}
|
||||
const context = { fetch: electronFetch }
|
||||
if (provider.authType === 'oauth') {
|
||||
const oauthToken = await anthropicService.getValidAccessToken()
|
||||
return getSdkClient(provider, oauthToken, extraHeaders)
|
||||
return getSdkClient(provider, oauthToken, extraHeaders, context)
|
||||
}
|
||||
return getSdkClient(provider, null, extraHeaders)
|
||||
return getSdkClient(provider, null, extraHeaders, context)
|
||||
}
|
||||
|
||||
prepareHeaders(headers: Record<string, string | string[] | undefined>): Record<string, string | string[]> {
|
||||
@@ -127,7 +148,8 @@ export class MessagesService {
|
||||
createAnthropicRequest(request: MessageCreateParams, provider: Provider, modelId?: string): MessageCreateParams {
|
||||
const anthropicRequest: MessageCreateParams = {
|
||||
...request,
|
||||
stream: !!request.stream
|
||||
stream: !!request.stream,
|
||||
tools: sanitizeToolsForAnthropic(request.tools)
|
||||
}
|
||||
|
||||
// Override model if provided
|
||||
@@ -233,9 +255,71 @@ export class MessagesService {
|
||||
}
|
||||
|
||||
transformError(error: any): { statusCode: number; errorResponse: ErrorResponse } {
|
||||
let statusCode = 500
|
||||
let errorType = 'api_error'
|
||||
let errorMessage = 'Internal server error'
|
||||
let statusCode: number | undefined = undefined
|
||||
let errorType: string | undefined = undefined
|
||||
let errorMessage: string | undefined = undefined
|
||||
|
||||
const errorMap: Record<number, string> = {
|
||||
400: 'invalid_request_error',
|
||||
401: 'authentication_error',
|
||||
403: 'forbidden_error',
|
||||
404: 'not_found_error',
|
||||
429: 'rate_limit_error',
|
||||
500: 'internal_server_error'
|
||||
}
|
||||
|
||||
// Handle AI SDK RetryError - extract the last error for better error messages
|
||||
if (RetryError.isInstance(error)) {
|
||||
const lastError = error.lastError
|
||||
// If the last error is an APICallError, extract its details
|
||||
if (APICallError.isInstance(lastError)) {
|
||||
statusCode = lastError.statusCode || 502
|
||||
errorMessage = lastError.message
|
||||
return {
|
||||
statusCode,
|
||||
errorResponse: {
|
||||
type: 'error',
|
||||
error: {
|
||||
type: errorMap[statusCode] || 'api_error',
|
||||
message: `${error.reason}: ${errorMessage}`,
|
||||
requestId: lastError.name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Fallback for other retry errors
|
||||
errorMessage = error.message
|
||||
statusCode = 502
|
||||
return {
|
||||
statusCode,
|
||||
errorResponse: {
|
||||
type: 'error',
|
||||
error: {
|
||||
type: 'api_error',
|
||||
message: errorMessage,
|
||||
requestId: error.name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (APICallError.isInstance(error)) {
|
||||
statusCode = error.statusCode
|
||||
errorMessage = error.message
|
||||
if (statusCode) {
|
||||
return {
|
||||
statusCode,
|
||||
errorResponse: {
|
||||
type: 'error',
|
||||
error: {
|
||||
type: errorMap[statusCode] || 'api_error',
|
||||
message: errorMessage,
|
||||
requestId: error.name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const anthropicStatus = typeof error?.status === 'number' ? error.status : undefined
|
||||
const anthropicError = error?.error
|
||||
@@ -277,11 +361,11 @@ export class MessagesService {
|
||||
typeof errorMessage === 'string' && errorMessage.length > 0 ? errorMessage : 'Internal server error'
|
||||
|
||||
return {
|
||||
statusCode,
|
||||
statusCode: statusCode ?? 500,
|
||||
errorResponse: {
|
||||
type: 'error',
|
||||
error: {
|
||||
type: errorType,
|
||||
type: errorType || 'api_error',
|
||||
message: safeErrorMessage,
|
||||
requestId: error?.request_id
|
||||
}
|
||||
|
||||
@@ -1,13 +1,6 @@
|
||||
import { isEmpty } from 'lodash'
|
||||
|
||||
import type { ApiModel, ApiModelsFilter, ApiModelsResponse } from '../../../renderer/src/types/apiModels'
|
||||
import { loggerService } from '../../services/LoggerService'
|
||||
import {
|
||||
getAvailableProviders,
|
||||
getProviderAnthropicModelChecker,
|
||||
listAllAvailableModels,
|
||||
transformModelToOpenAI
|
||||
} from '../utils'
|
||||
import { getAvailableProviders, listAllAvailableModels, transformModelToOpenAI } from '../utils'
|
||||
|
||||
const logger = loggerService.withContext('ModelsService')
|
||||
|
||||
@@ -20,11 +13,12 @@ export class ModelsService {
|
||||
try {
|
||||
logger.debug('Getting available models from providers', { filter })
|
||||
|
||||
let providers = await getAvailableProviders()
|
||||
const providers = await getAvailableProviders()
|
||||
|
||||
if (filter.providerType === 'anthropic') {
|
||||
providers = providers.filter((p) => p.type === 'anthropic' || !isEmpty(p.anthropicApiHost?.trim()))
|
||||
}
|
||||
// Note: When providerType === 'anthropic', we now return ALL available models
|
||||
// because the API Server's unified adapter (AiSdkToAnthropicSSE) can convert
|
||||
// any provider's response to Anthropic SSE format. This enables Claude Code Agent
|
||||
// to work with OpenAI, Gemini, and other providers transparently.
|
||||
|
||||
const models = await listAllAvailableModels(providers)
|
||||
// Use Map to deduplicate models by their full ID (provider:model_id)
|
||||
@@ -32,20 +26,11 @@ export class ModelsService {
|
||||
|
||||
for (const model of models) {
|
||||
const provider = providers.find((p) => p.id === model.provider)
|
||||
// logger.debug(`Processing model ${model.id}`)
|
||||
if (!provider) {
|
||||
logger.debug(`Skipping model ${model.id} . Reason: Provider not found.`)
|
||||
continue
|
||||
}
|
||||
|
||||
if (filter.providerType === 'anthropic') {
|
||||
const checker = getProviderAnthropicModelChecker(provider.id)
|
||||
if (!checker(model)) {
|
||||
logger.debug(`Skipping model ${model.id} from ${model.provider}. Reason: Not an Anthropic model.`)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
const openAIModel = transformModelToOpenAI(model, provider)
|
||||
const fullModelId = openAIModel.id // This is already in format "provider:model_id"
|
||||
|
||||
|
||||
718
src/main/apiServer/services/unified-messages.ts
Normal file
718
src/main/apiServer/services/unified-messages.ts
Normal file
@@ -0,0 +1,718 @@
|
||||
import type { AnthropicProviderOptions } from '@ai-sdk/anthropic'
|
||||
import type { GoogleGenerativeAIProviderOptions } from '@ai-sdk/google'
|
||||
import type { OpenAIResponsesProviderOptions } from '@ai-sdk/openai'
|
||||
import type { LanguageModelV2Middleware, LanguageModelV2ToolResultOutput } from '@ai-sdk/provider'
|
||||
import type { ProviderOptions, ReasoningPart, ToolCallPart, ToolResultPart } from '@ai-sdk/provider-utils'
|
||||
import type {
|
||||
ImageBlockParam,
|
||||
MessageCreateParams,
|
||||
TextBlockParam,
|
||||
Tool as AnthropicTool
|
||||
} from '@anthropic-ai/sdk/resources/messages'
|
||||
import { type AiPlugin, createExecutor } from '@cherrystudio/ai-core'
|
||||
import { createProvider as createProviderCore } from '@cherrystudio/ai-core/provider'
|
||||
import { loggerService } from '@logger'
|
||||
import { AiSdkToAnthropicSSE, formatSSEDone, formatSSEEvent } from '@main/apiServer/adapters'
|
||||
import { generateSignature as cherryaiGenerateSignature } from '@main/integration/cherryai'
|
||||
import anthropicService from '@main/services/AnthropicService'
|
||||
import copilotService from '@main/services/CopilotService'
|
||||
import { reduxService } from '@main/services/ReduxService'
|
||||
import { isGemini3ModelId } from '@shared/middleware'
|
||||
import {
|
||||
type AiSdkConfig,
|
||||
type AiSdkConfigContext,
|
||||
formatProviderApiHost,
|
||||
initializeSharedProviders,
|
||||
isAnthropicProvider,
|
||||
isGeminiProvider,
|
||||
isOpenAIProvider,
|
||||
type ProviderFormatContext,
|
||||
providerToAiSdkConfig as sharedProviderToAiSdkConfig,
|
||||
resolveActualProvider
|
||||
} from '@shared/provider'
|
||||
import { COPILOT_DEFAULT_HEADERS } from '@shared/provider/constant'
|
||||
import { defaultAppHeaders } from '@shared/utils'
|
||||
import type { Provider } from '@types'
|
||||
import type { ImagePart, JSONValue, ModelMessage, Provider as AiSdkProvider, TextPart, Tool as AiSdkTool } from 'ai'
|
||||
import { simulateStreamingMiddleware, stepCountIs, tool, wrapLanguageModel, zodSchema } from 'ai'
|
||||
import { net } from 'electron'
|
||||
import type { Response } from 'express'
|
||||
import * as z from 'zod'
|
||||
|
||||
import { googleReasoningCache, openRouterReasoningCache } from '../../services/CacheService'
|
||||
|
||||
const logger = loggerService.withContext('UnifiedMessagesService')
|
||||
|
||||
const MAGIC_STRING = 'skip_thought_signature_validator'
|
||||
|
||||
function sanitizeJson(value: unknown): JSONValue {
|
||||
return JSON.parse(JSON.stringify(value))
|
||||
}
|
||||
|
||||
initializeSharedProviders({
|
||||
warn: (message) => logger.warn(message),
|
||||
error: (message, error) => logger.error(message, error)
|
||||
})
|
||||
|
||||
/**
|
||||
* Configuration for unified message streaming
|
||||
*/
|
||||
export interface UnifiedStreamConfig {
|
||||
response: Response
|
||||
provider: Provider
|
||||
modelId: string
|
||||
params: MessageCreateParams
|
||||
onError?: (error: unknown) => void
|
||||
onComplete?: () => void
|
||||
/**
|
||||
* Optional AI SDK middlewares to apply
|
||||
*/
|
||||
middlewares?: LanguageModelV2Middleware[]
|
||||
/**
|
||||
* Optional AI Core plugins to use with the executor
|
||||
*/
|
||||
plugins?: AiPlugin[]
|
||||
}
|
||||
|
||||
/**
|
||||
* Configuration for non-streaming message generation
|
||||
*/
|
||||
export interface GenerateUnifiedMessageConfig {
|
||||
provider: Provider
|
||||
modelId: string
|
||||
params: MessageCreateParams
|
||||
middlewares?: LanguageModelV2Middleware[]
|
||||
plugins?: AiPlugin[]
|
||||
}
|
||||
|
||||
function getMainProcessFormatContext(): ProviderFormatContext {
|
||||
const vertexSettings = reduxService.selectSync<{ projectId: string; location: string }>('state.llm.settings.vertexai')
|
||||
return {
|
||||
vertex: {
|
||||
project: vertexSettings?.projectId || 'default-project',
|
||||
location: vertexSettings?.location || 'us-central1'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const mainProcessSdkContext: AiSdkConfigContext = {
|
||||
getRotatedApiKey: (provider) => {
|
||||
const keys = provider.apiKey.split(',').map((k) => k.trim())
|
||||
return keys[0] || provider.apiKey
|
||||
},
|
||||
fetch: net.fetch as typeof globalThis.fetch
|
||||
}
|
||||
|
||||
function getActualProvider(provider: Provider, modelId: string): Provider {
|
||||
const model = provider.models?.find((m) => m.id === modelId)
|
||||
if (!model) return provider
|
||||
return resolveActualProvider(provider, model)
|
||||
}
|
||||
|
||||
function providerToAiSdkConfig(provider: Provider, modelId: string): AiSdkConfig {
|
||||
const actualProvider = getActualProvider(provider, modelId)
|
||||
const formattedProvider = formatProviderApiHost(actualProvider, getMainProcessFormatContext())
|
||||
return sharedProviderToAiSdkConfig(formattedProvider, modelId, mainProcessSdkContext)
|
||||
}
|
||||
|
||||
function convertAnthropicToolResultToAiSdk(
|
||||
content: string | Array<TextBlockParam | ImageBlockParam>
|
||||
): LanguageModelV2ToolResultOutput {
|
||||
if (typeof content === 'string') {
|
||||
return { type: 'text', value: content }
|
||||
}
|
||||
const values: Array<{ type: 'text'; text: string } | { type: 'media'; data: string; mediaType: string }> = []
|
||||
for (const block of content) {
|
||||
if (block.type === 'text') {
|
||||
values.push({ type: 'text', text: block.text })
|
||||
} else if (block.type === 'image') {
|
||||
values.push({
|
||||
type: 'media',
|
||||
data: block.source.type === 'base64' ? block.source.data : block.source.url,
|
||||
mediaType: block.source.type === 'base64' ? block.source.media_type : 'image/png'
|
||||
})
|
||||
}
|
||||
}
|
||||
return { type: 'content', value: values }
|
||||
}
|
||||
|
||||
// Type alias for JSON Schema (compatible with recursive calls)
|
||||
type JsonSchemaLike = AnthropicTool.InputSchema | Record<string, unknown>
|
||||
|
||||
/**
|
||||
* Convert JSON Schema to Zod schema
|
||||
* This avoids non-standard fields like input_examples that Anthropic doesn't support
|
||||
*/
|
||||
function jsonSchemaToZod(schema: JsonSchemaLike): z.ZodTypeAny {
|
||||
const s = schema as Record<string, unknown>
|
||||
const schemaType = s.type as string | string[] | undefined
|
||||
const enumValues = s.enum as unknown[] | undefined
|
||||
const description = s.description as string | undefined
|
||||
|
||||
// Handle enum first
|
||||
if (enumValues && Array.isArray(enumValues) && enumValues.length > 0) {
|
||||
if (enumValues.every((v) => typeof v === 'string')) {
|
||||
const zodEnum = z.enum(enumValues as [string, ...string[]])
|
||||
return description ? zodEnum.describe(description) : zodEnum
|
||||
}
|
||||
// For non-string enums, use union of literals
|
||||
const literals = enumValues.map((v) => z.literal(v as string | number | boolean))
|
||||
if (literals.length === 1) {
|
||||
return description ? literals[0].describe(description) : literals[0]
|
||||
}
|
||||
const zodUnion = z.union(literals as unknown as [z.ZodTypeAny, z.ZodTypeAny, ...z.ZodTypeAny[]])
|
||||
return description ? zodUnion.describe(description) : zodUnion
|
||||
}
|
||||
|
||||
// Handle union types (type: ["string", "null"])
|
||||
if (Array.isArray(schemaType)) {
|
||||
const schemas = schemaType.map((t) => jsonSchemaToZod({ ...s, type: t, enum: undefined }))
|
||||
if (schemas.length === 1) {
|
||||
return schemas[0]
|
||||
}
|
||||
return z.union(schemas as [z.ZodTypeAny, z.ZodTypeAny, ...z.ZodTypeAny[]])
|
||||
}
|
||||
|
||||
// Handle by type
|
||||
switch (schemaType) {
|
||||
case 'string': {
|
||||
let zodString = z.string()
|
||||
if (typeof s.minLength === 'number') zodString = zodString.min(s.minLength)
|
||||
if (typeof s.maxLength === 'number') zodString = zodString.max(s.maxLength)
|
||||
if (typeof s.pattern === 'string') zodString = zodString.regex(new RegExp(s.pattern))
|
||||
return description ? zodString.describe(description) : zodString
|
||||
}
|
||||
|
||||
case 'number':
|
||||
case 'integer': {
|
||||
let zodNumber = schemaType === 'integer' ? z.number().int() : z.number()
|
||||
if (typeof s.minimum === 'number') zodNumber = zodNumber.min(s.minimum)
|
||||
if (typeof s.maximum === 'number') zodNumber = zodNumber.max(s.maximum)
|
||||
return description ? zodNumber.describe(description) : zodNumber
|
||||
}
|
||||
|
||||
case 'boolean': {
|
||||
const zodBoolean = z.boolean()
|
||||
return description ? zodBoolean.describe(description) : zodBoolean
|
||||
}
|
||||
|
||||
case 'null':
|
||||
return z.null()
|
||||
|
||||
case 'array': {
|
||||
const items = s.items as Record<string, unknown> | undefined
|
||||
let zodArray = items ? z.array(jsonSchemaToZod(items)) : z.array(z.unknown())
|
||||
if (typeof s.minItems === 'number') zodArray = zodArray.min(s.minItems)
|
||||
if (typeof s.maxItems === 'number') zodArray = zodArray.max(s.maxItems)
|
||||
return description ? zodArray.describe(description) : zodArray
|
||||
}
|
||||
|
||||
case 'object': {
|
||||
const properties = s.properties as Record<string, Record<string, unknown>> | undefined
|
||||
const required = (s.required as string[]) || []
|
||||
|
||||
// Always use z.object() to ensure "properties" field is present in output schema
|
||||
// OpenAI requires explicit properties field even for empty objects
|
||||
const shape: Record<string, z.ZodTypeAny> = {}
|
||||
if (properties) {
|
||||
for (const [key, propSchema] of Object.entries(properties)) {
|
||||
const zodProp = jsonSchemaToZod(propSchema)
|
||||
shape[key] = required.includes(key) ? zodProp : zodProp.optional()
|
||||
}
|
||||
}
|
||||
|
||||
const zodObject = z.object(shape)
|
||||
return description ? zodObject.describe(description) : zodObject
|
||||
}
|
||||
|
||||
default:
|
||||
// Unknown type, use z.unknown()
|
||||
return z.unknown()
|
||||
}
|
||||
}
|
||||
|
||||
function convertAnthropicToolsToAiSdk(tools: MessageCreateParams['tools']): Record<string, AiSdkTool> | undefined {
|
||||
if (!tools || tools.length === 0) return undefined
|
||||
|
||||
const aiSdkTools: Record<string, AiSdkTool> = {}
|
||||
for (const anthropicTool of tools) {
|
||||
if (anthropicTool.type === 'bash_20250124') continue
|
||||
const toolDef = anthropicTool as AnthropicTool
|
||||
const rawSchema = toolDef.input_schema
|
||||
const schema = jsonSchemaToZod(rawSchema)
|
||||
|
||||
// Use tool() with inputSchema (AI SDK v5 API)
|
||||
const aiTool = tool({
|
||||
description: toolDef.description || '',
|
||||
inputSchema: zodSchema(schema)
|
||||
})
|
||||
|
||||
aiSdkTools[toolDef.name] = aiTool
|
||||
}
|
||||
return Object.keys(aiSdkTools).length > 0 ? aiSdkTools : undefined
|
||||
}
|
||||
|
||||
function convertAnthropicToAiMessages(params: MessageCreateParams): ModelMessage[] {
|
||||
const messages: ModelMessage[] = []
|
||||
|
||||
// System message
|
||||
if (params.system) {
|
||||
if (typeof params.system === 'string') {
|
||||
messages.push({ role: 'system', content: params.system })
|
||||
} else if (Array.isArray(params.system)) {
|
||||
const systemText = params.system
|
||||
.filter((block) => block.type === 'text')
|
||||
.map((block) => block.text)
|
||||
.join('\n')
|
||||
if (systemText) {
|
||||
messages.push({ role: 'system', content: systemText })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const toolCallIdToName = new Map<string, string>()
|
||||
for (const msg of params.messages) {
|
||||
if (Array.isArray(msg.content)) {
|
||||
for (const block of msg.content) {
|
||||
if (block.type === 'tool_use') {
|
||||
toolCallIdToName.set(block.id, block.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// User/assistant messages
|
||||
for (const msg of params.messages) {
|
||||
if (typeof msg.content === 'string') {
|
||||
messages.push({
|
||||
role: msg.role === 'user' ? 'user' : 'assistant',
|
||||
content: msg.content
|
||||
})
|
||||
} else if (Array.isArray(msg.content)) {
|
||||
const textParts: TextPart[] = []
|
||||
const imageParts: ImagePart[] = []
|
||||
const reasoningParts: ReasoningPart[] = []
|
||||
const toolCallParts: ToolCallPart[] = []
|
||||
const toolResultParts: ToolResultPart[] = []
|
||||
|
||||
for (const block of msg.content) {
|
||||
if (block.type === 'text') {
|
||||
textParts.push({ type: 'text', text: block.text })
|
||||
} else if (block.type === 'thinking') {
|
||||
reasoningParts.push({ type: 'reasoning', text: block.thinking })
|
||||
} else if (block.type === 'redacted_thinking') {
|
||||
reasoningParts.push({ type: 'reasoning', text: block.data })
|
||||
} else if (block.type === 'image') {
|
||||
const source = block.source
|
||||
if (source.type === 'base64') {
|
||||
imageParts.push({ type: 'image', image: `data:${source.media_type};base64,${source.data}` })
|
||||
} else if (source.type === 'url') {
|
||||
imageParts.push({ type: 'image', image: source.url })
|
||||
}
|
||||
} else if (block.type === 'tool_use') {
|
||||
const options: ProviderOptions = {}
|
||||
|
||||
if (isGemini3ModelId(params.model)) {
|
||||
if (googleReasoningCache.get(`google-${block.name}`)) {
|
||||
options.google = {
|
||||
thoughtSignature: MAGIC_STRING
|
||||
}
|
||||
} else if (openRouterReasoningCache.get('openrouter')) {
|
||||
options.openrouter = {
|
||||
reasoning_details: (sanitizeJson(openRouterReasoningCache.get('openrouter')) as JSONValue[]) || []
|
||||
}
|
||||
}
|
||||
}
|
||||
toolCallParts.push({
|
||||
type: 'tool-call',
|
||||
toolName: block.name,
|
||||
toolCallId: block.id,
|
||||
input: block.input,
|
||||
providerOptions: options
|
||||
})
|
||||
} else if (block.type === 'tool_result') {
|
||||
// Look up toolName from the pre-built map (covers cross-message references)
|
||||
const toolName = toolCallIdToName.get(block.tool_use_id) || 'unknown'
|
||||
toolResultParts.push({
|
||||
type: 'tool-result',
|
||||
toolCallId: block.tool_use_id,
|
||||
toolName,
|
||||
output: block.content ? convertAnthropicToolResultToAiSdk(block.content) : { type: 'text', value: '' }
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if (toolResultParts.length > 0) {
|
||||
messages.push({ role: 'tool', content: [...toolResultParts] })
|
||||
}
|
||||
|
||||
if (msg.role === 'user') {
|
||||
const userContent = [...textParts, ...imageParts]
|
||||
if (userContent.length > 0) {
|
||||
messages.push({ role: 'user', content: userContent })
|
||||
}
|
||||
} else {
|
||||
const assistantContent = [...reasoningParts, ...textParts, ...toolCallParts]
|
||||
if (assistantContent.length > 0) {
|
||||
let providerOptions: ProviderOptions | undefined = undefined
|
||||
if (openRouterReasoningCache.get('openrouter')) {
|
||||
providerOptions = {
|
||||
openrouter: {
|
||||
reasoning_details: (sanitizeJson(openRouterReasoningCache.get('openrouter')) as JSONValue[]) || []
|
||||
}
|
||||
}
|
||||
} else if (isGemini3ModelId(params.model)) {
|
||||
providerOptions = {
|
||||
google: {
|
||||
thoughtSignature: MAGIC_STRING
|
||||
}
|
||||
}
|
||||
}
|
||||
messages.push({ role: 'assistant', content: assistantContent, providerOptions })
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return messages
|
||||
}
|
||||
|
||||
interface ExecuteStreamConfig {
|
||||
provider: Provider
|
||||
modelId: string
|
||||
params: MessageCreateParams
|
||||
middlewares?: LanguageModelV2Middleware[]
|
||||
plugins?: AiPlugin[]
|
||||
onEvent?: (event: Parameters<typeof formatSSEEvent>[0]) => void
|
||||
}
|
||||
|
||||
/**
|
||||
* Create AI SDK provider instance from config
|
||||
* Similar to renderer's createAiSdkProvider
|
||||
*/
|
||||
async function createAiSdkProvider(config: AiSdkConfig): Promise<AiSdkProvider> {
|
||||
let providerId = config.providerId
|
||||
|
||||
// Handle special provider modes (same as renderer)
|
||||
if (providerId === 'openai' && config.options?.mode === 'chat') {
|
||||
providerId = 'openai-chat'
|
||||
} else if (providerId === 'azure' && config.options?.mode === 'responses') {
|
||||
providerId = 'azure-responses'
|
||||
} else if (providerId === 'cherryin' && config.options?.mode === 'chat') {
|
||||
providerId = 'cherryin-chat'
|
||||
}
|
||||
|
||||
const provider = await createProviderCore(providerId, config.options)
|
||||
|
||||
return provider
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepare special provider configuration for providers that need dynamic tokens
|
||||
* Similar to renderer's prepareSpecialProviderConfig
|
||||
*/
|
||||
async function prepareSpecialProviderConfig(provider: Provider, config: AiSdkConfig): Promise<AiSdkConfig> {
|
||||
switch (provider.id) {
|
||||
case 'copilot': {
|
||||
const storedHeaders =
|
||||
((await reduxService.select('state.copilot.defaultHeaders')) as Record<string, string> | null) ?? {}
|
||||
const headers: Record<string, string> = {
|
||||
...COPILOT_DEFAULT_HEADERS,
|
||||
...storedHeaders
|
||||
}
|
||||
|
||||
try {
|
||||
const { token } = await copilotService.getToken(null as any, headers)
|
||||
config.options.apiKey = token
|
||||
const existingHeaders = (config.options.headers as Record<string, string> | undefined) ?? {}
|
||||
config.options.headers = {
|
||||
...headers,
|
||||
...existingHeaders
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Failed to get Copilot token', error as Error)
|
||||
throw new Error('Failed to get Copilot token. Please re-authorize Copilot.')
|
||||
}
|
||||
break
|
||||
}
|
||||
case 'anthropic': {
|
||||
if (provider.authType === 'oauth') {
|
||||
try {
|
||||
const oauthToken = await anthropicService.getValidAccessToken()
|
||||
if (!oauthToken) {
|
||||
throw new Error('Anthropic OAuth token not available. Please re-authorize.')
|
||||
}
|
||||
config.options = {
|
||||
...config.options,
|
||||
headers: {
|
||||
...(config.options.headers ? config.options.headers : {}),
|
||||
'Content-Type': 'application/json',
|
||||
'anthropic-version': '2023-06-01',
|
||||
'anthropic-beta': 'oauth-2025-04-20',
|
||||
Authorization: `Bearer ${oauthToken}`
|
||||
},
|
||||
baseURL: 'https://api.anthropic.com/v1',
|
||||
apiKey: ''
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Failed to get Anthropic OAuth token', error as Error)
|
||||
throw new Error('Failed to get Anthropic OAuth token. Please re-authorize.')
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
case 'cherryai': {
|
||||
// Create a signed fetch wrapper for cherryai
|
||||
const baseFetch = net.fetch as typeof globalThis.fetch
|
||||
config.options.fetch = async (url: RequestInfo | URL, options?: RequestInit) => {
|
||||
if (!options?.body) {
|
||||
return baseFetch(url, options)
|
||||
}
|
||||
const signature = cherryaiGenerateSignature({
|
||||
method: 'POST',
|
||||
path: '/chat/completions',
|
||||
query: '',
|
||||
body: JSON.parse(options.body as string)
|
||||
})
|
||||
return baseFetch(url, {
|
||||
...options,
|
||||
headers: {
|
||||
...(options.headers as Record<string, string>),
|
||||
...signature
|
||||
}
|
||||
})
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
return config
|
||||
}
|
||||
|
||||
function mapAnthropicThinkToAISdkProviderOptions(
|
||||
provider: Provider,
|
||||
config: MessageCreateParams['thinking']
|
||||
): ProviderOptions | undefined {
|
||||
if (!config) return undefined
|
||||
if (isAnthropicProvider(provider)) {
|
||||
return {
|
||||
anthropic: {
|
||||
...mapToAnthropicProviderOptions(config)
|
||||
}
|
||||
}
|
||||
}
|
||||
if (isGeminiProvider(provider)) {
|
||||
return {
|
||||
google: {
|
||||
...mapToGeminiProviderOptions(config)
|
||||
}
|
||||
}
|
||||
}
|
||||
if (isOpenAIProvider(provider)) {
|
||||
return {
|
||||
openai: {
|
||||
...mapToOpenAIProviderOptions(config)
|
||||
}
|
||||
}
|
||||
}
|
||||
return undefined
|
||||
}
|
||||
|
||||
function mapToAnthropicProviderOptions(config: NonNullable<MessageCreateParams['thinking']>): AnthropicProviderOptions {
|
||||
return {
|
||||
thinking: {
|
||||
type: config.type,
|
||||
budgetTokens: config.type === 'enabled' ? config.budget_tokens : undefined
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function mapToGeminiProviderOptions(
|
||||
config: NonNullable<MessageCreateParams['thinking']>
|
||||
): GoogleGenerativeAIProviderOptions {
|
||||
return {
|
||||
thinkingConfig: {
|
||||
thinkingBudget: config.type === 'enabled' ? config.budget_tokens : -1,
|
||||
includeThoughts: config.type === 'enabled'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function mapToOpenAIProviderOptions(
|
||||
config: NonNullable<MessageCreateParams['thinking']>
|
||||
): OpenAIResponsesProviderOptions {
|
||||
return {
|
||||
reasoningEffort: config.type === 'enabled' ? 'high' : 'none'
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Core stream execution function - single source of truth for AI SDK calls
|
||||
*/
|
||||
async function executeStream(config: ExecuteStreamConfig): Promise<AiSdkToAnthropicSSE> {
|
||||
const { provider, modelId, params, middlewares = [], plugins = [], onEvent } = config
|
||||
|
||||
// Convert provider config to AI SDK config
|
||||
let sdkConfig = providerToAiSdkConfig(provider, modelId)
|
||||
|
||||
// Prepare special provider config (Copilot, Anthropic OAuth, etc.)
|
||||
sdkConfig = await prepareSpecialProviderConfig(provider, sdkConfig)
|
||||
|
||||
// Create provider instance and get language model
|
||||
const aiSdkProvider = await createAiSdkProvider(sdkConfig)
|
||||
const baseModel = aiSdkProvider.languageModel(modelId)
|
||||
|
||||
// Apply middlewares if present
|
||||
const model =
|
||||
middlewares.length > 0 && typeof baseModel === 'object'
|
||||
? (wrapLanguageModel({ model: baseModel, middleware: middlewares }) as typeof baseModel)
|
||||
: baseModel
|
||||
|
||||
// Create executor with plugins
|
||||
const executor = createExecutor(sdkConfig.providerId, sdkConfig.options, plugins)
|
||||
|
||||
// Convert messages and tools
|
||||
const coreMessages = convertAnthropicToAiMessages(params)
|
||||
const tools = convertAnthropicToolsToAiSdk(params.tools)
|
||||
|
||||
// Create the adapter
|
||||
const adapter = new AiSdkToAnthropicSSE({
|
||||
model: `${provider.id}:${modelId}`,
|
||||
onEvent: onEvent || (() => {})
|
||||
})
|
||||
|
||||
// Execute stream - pass model object instead of string
|
||||
const result = await executor.streamText({
|
||||
model, // Now passing LanguageModel object, not string
|
||||
messages: coreMessages,
|
||||
// FIXME: Claude Code传入的maxToken会超出有些模型限制,需做特殊处理,可能在v2好修复一点,现在维护的成本有点高
|
||||
// 已知: 豆包
|
||||
maxOutputTokens: params.max_tokens,
|
||||
temperature: params.temperature,
|
||||
topP: params.top_p,
|
||||
topK: params.top_k,
|
||||
stopSequences: params.stop_sequences,
|
||||
stopWhen: stepCountIs(100),
|
||||
headers: defaultAppHeaders(),
|
||||
tools,
|
||||
providerOptions: mapAnthropicThinkToAISdkProviderOptions(provider, params.thinking)
|
||||
})
|
||||
|
||||
// Process the stream through the adapter
|
||||
await adapter.processStream(result.fullStream)
|
||||
|
||||
return adapter
|
||||
}
|
||||
|
||||
/**
|
||||
* Stream a message request using AI SDK executor and convert to Anthropic SSE format
|
||||
*/
|
||||
export async function streamUnifiedMessages(config: UnifiedStreamConfig): Promise<void> {
|
||||
const { response, provider, modelId, params, onError, onComplete, middlewares = [], plugins = [] } = config
|
||||
|
||||
logger.info('Starting unified message stream', {
|
||||
providerId: provider.id,
|
||||
providerType: provider.type,
|
||||
modelId,
|
||||
stream: params.stream,
|
||||
middlewareCount: middlewares.length,
|
||||
pluginCount: plugins.length
|
||||
})
|
||||
|
||||
try {
|
||||
response.setHeader('Content-Type', 'text/event-stream')
|
||||
response.setHeader('Cache-Control', 'no-cache')
|
||||
response.setHeader('Connection', 'keep-alive')
|
||||
response.setHeader('X-Accel-Buffering', 'no')
|
||||
|
||||
await executeStream({
|
||||
provider,
|
||||
modelId,
|
||||
params,
|
||||
middlewares,
|
||||
plugins,
|
||||
onEvent: (event) => {
|
||||
logger.silly('Streaming event', { eventType: event.type })
|
||||
const sseData = formatSSEEvent(event)
|
||||
response.write(sseData)
|
||||
}
|
||||
})
|
||||
|
||||
// Send done marker
|
||||
response.write(formatSSEDone())
|
||||
response.end()
|
||||
|
||||
logger.info('Unified message stream completed', { providerId: provider.id, modelId })
|
||||
onComplete?.()
|
||||
} catch (error) {
|
||||
logger.error('Error in unified message stream', error as Error, { providerId: provider.id, modelId })
|
||||
onError?.(error)
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a non-streaming message response
|
||||
*
|
||||
* Uses simulateStreamingMiddleware to reuse the same streaming logic,
|
||||
* similar to renderer's ModernAiProvider pattern.
|
||||
*/
|
||||
export async function generateUnifiedMessage(
|
||||
providerOrConfig: Provider | GenerateUnifiedMessageConfig,
|
||||
modelId?: string,
|
||||
params?: MessageCreateParams
|
||||
): Promise<ReturnType<typeof AiSdkToAnthropicSSE.prototype.buildNonStreamingResponse>> {
|
||||
// Support both old signature and new config-based signature
|
||||
let config: GenerateUnifiedMessageConfig
|
||||
if ('provider' in providerOrConfig && 'modelId' in providerOrConfig && 'params' in providerOrConfig) {
|
||||
config = providerOrConfig
|
||||
} else {
|
||||
config = {
|
||||
provider: providerOrConfig as Provider,
|
||||
modelId: modelId!,
|
||||
params: params!
|
||||
}
|
||||
}
|
||||
|
||||
const { provider, middlewares = [], plugins = [] } = config
|
||||
|
||||
logger.info('Starting unified message generation', {
|
||||
providerId: provider.id,
|
||||
providerType: provider.type,
|
||||
modelId: config.modelId,
|
||||
middlewareCount: middlewares.length,
|
||||
pluginCount: plugins.length
|
||||
})
|
||||
|
||||
try {
|
||||
// Add simulateStreamingMiddleware to reuse streaming logic for non-streaming
|
||||
const allMiddlewares = [simulateStreamingMiddleware(), ...middlewares]
|
||||
|
||||
const adapter = await executeStream({
|
||||
provider,
|
||||
modelId: config.modelId,
|
||||
params: config.params,
|
||||
middlewares: allMiddlewares,
|
||||
plugins
|
||||
})
|
||||
|
||||
const finalResponse = adapter.buildNonStreamingResponse()
|
||||
|
||||
logger.info('Unified message generation completed', {
|
||||
providerId: provider.id,
|
||||
modelId: config.modelId
|
||||
})
|
||||
|
||||
return finalResponse
|
||||
} catch (error) {
|
||||
logger.error('Error in unified message generation', error as Error, {
|
||||
providerId: provider.id,
|
||||
modelId: config.modelId
|
||||
})
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
export default {
|
||||
streamUnifiedMessages,
|
||||
generateUnifiedMessage
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
import { CacheService } from '@main/services/CacheService'
|
||||
import { loggerService } from '@main/services/LoggerService'
|
||||
import { reduxService } from '@main/services/ReduxService'
|
||||
import { isSiliconAnthropicCompatibleModel } from '@shared/config/providers'
|
||||
import { isPpioAnthropicCompatibleModel, isSiliconAnthropicCompatibleModel } from '@shared/config/providers'
|
||||
import type { ApiModel, Model, Provider } from '@types'
|
||||
|
||||
const logger = loggerService.withContext('ApiServerUtils')
|
||||
@@ -28,10 +28,9 @@ export async function getAvailableProviders(): Promise<Provider[]> {
|
||||
return []
|
||||
}
|
||||
|
||||
// Support OpenAI and Anthropic type providers for API server
|
||||
const supportedProviders = providers.filter(
|
||||
(p: Provider) => p.enabled && (p.type === 'openai' || p.type === 'anthropic')
|
||||
)
|
||||
// Support all provider types that AI SDK can handle
|
||||
// The unified-messages service uses AI SDK which supports many providers
|
||||
const supportedProviders = providers.filter((p: Provider) => p.enabled)
|
||||
|
||||
// Cache the filtered results
|
||||
CacheService.set(PROVIDERS_CACHE_KEY, supportedProviders, PROVIDERS_CACHE_TTL)
|
||||
@@ -160,7 +159,7 @@ export async function validateModelId(model: string): Promise<{
|
||||
valid: false,
|
||||
error: {
|
||||
type: 'provider_not_found',
|
||||
message: `Provider '${providerId}' not found, not enabled, or not supported. Only OpenAI providers are currently supported.`,
|
||||
message: `Provider '${providerId}' not found or not enabled.`,
|
||||
code: 'provider_not_found'
|
||||
}
|
||||
}
|
||||
@@ -262,14 +261,8 @@ export function validateProvider(provider: Provider): boolean {
|
||||
return false
|
||||
}
|
||||
|
||||
// Support OpenAI and Anthropic type providers
|
||||
if (provider.type !== 'openai' && provider.type !== 'anthropic') {
|
||||
logger.debug('Provider type not supported', {
|
||||
providerId: provider.id,
|
||||
providerType: provider.type
|
||||
})
|
||||
return false
|
||||
}
|
||||
// AI SDK supports many provider types, no longer need to filter by type
|
||||
// The unified-messages service handles all supported types
|
||||
|
||||
return true
|
||||
} catch (error: any) {
|
||||
@@ -290,8 +283,39 @@ export const getProviderAnthropicModelChecker = (providerId: string): ((m: Model
|
||||
return (m: Model) => m.id.includes('claude')
|
||||
case 'silicon':
|
||||
return (m: Model) => isSiliconAnthropicCompatibleModel(m.id)
|
||||
case 'ppio':
|
||||
return (m: Model) => isPpioAnthropicCompatibleModel(m.id)
|
||||
default:
|
||||
// allow all models when checker not configured
|
||||
return () => true
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a specific model is compatible with Anthropic API for a given provider.
|
||||
*
|
||||
* This is used for fine-grained routing decisions at the model level.
|
||||
* For aggregated providers (like Silicon), only certain models support the Anthropic API endpoint.
|
||||
*
|
||||
* @param provider - The provider to check
|
||||
* @param modelId - The model ID to check (without provider prefix)
|
||||
* @returns true if the model supports Anthropic API endpoint
|
||||
*/
|
||||
export function isModelAnthropicCompatible(provider: Provider, modelId: string): boolean {
|
||||
const checker = getProviderAnthropicModelChecker(provider.id)
|
||||
|
||||
const model = provider.models?.find((m) => m.id === modelId)
|
||||
|
||||
if (model) {
|
||||
return checker(model)
|
||||
}
|
||||
|
||||
const minimalModel: Model = {
|
||||
id: modelId,
|
||||
name: modelId,
|
||||
provider: provider.id,
|
||||
group: ''
|
||||
}
|
||||
|
||||
return checker(minimalModel)
|
||||
}
|
||||
|
||||
@@ -1,9 +1,19 @@
|
||||
import type { ReasoningDetailUnion } from '@main/apiServer/adapters/openrouter'
|
||||
|
||||
interface CacheItem<T> {
|
||||
data: T
|
||||
timestamp: number
|
||||
duration: number
|
||||
}
|
||||
|
||||
/**
|
||||
* Interface for reasoning cache
|
||||
*/
|
||||
export interface IReasoningCache<T> {
|
||||
set(key: string, value: T): void
|
||||
get(key: string): T | undefined
|
||||
}
|
||||
|
||||
export class CacheService {
|
||||
private static cache: Map<string, CacheItem<any>> = new Map()
|
||||
|
||||
@@ -72,3 +82,14 @@ export class CacheService {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Singleton cache instances using CacheService
|
||||
export const googleReasoningCache: IReasoningCache<string> = {
|
||||
set: (key, value) => CacheService.set(`google-reasoning:${key}`, value, 30 * 60 * 1000),
|
||||
get: (key) => CacheService.get(`google-reasoning:${key}`) || undefined
|
||||
}
|
||||
|
||||
export const openRouterReasoningCache: IReasoningCache<ReasoningDetailUnion[]> = {
|
||||
set: (key, value) => CacheService.set(`openrouter-reasoning:${key}`, value, 30 * 60 * 1000),
|
||||
get: (key) => CacheService.get(`openrouter-reasoning:${key}`) || undefined
|
||||
}
|
||||
|
||||
@@ -87,6 +87,7 @@ export class ClaudeStreamState {
|
||||
private pendingUsage: PendingUsageState = {}
|
||||
private pendingToolCalls = new Map<string, PendingToolCall>()
|
||||
private stepActive = false
|
||||
private _streamFinished = false
|
||||
|
||||
constructor(options: ClaudeStreamStateOptions) {
|
||||
this.logger = loggerService.withContext('ClaudeStreamState')
|
||||
@@ -289,6 +290,16 @@ export class ClaudeStreamState {
|
||||
getNamespacedToolCallId(rawToolCallId: string): string {
|
||||
return buildNamespacedToolCallId(this.agentSessionId, rawToolCallId)
|
||||
}
|
||||
|
||||
/** Marks the stream as finished (either completed or errored). */
|
||||
markFinished(): void {
|
||||
this._streamFinished = true
|
||||
}
|
||||
|
||||
/** Returns true if the stream has already emitted a terminal event. */
|
||||
isFinished(): boolean {
|
||||
return this._streamFinished
|
||||
}
|
||||
}
|
||||
|
||||
export type { PendingToolCall }
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
// src/main/services/agents/services/claudecode/index.ts
|
||||
import { EventEmitter } from 'node:events'
|
||||
import { createRequire } from 'node:module'
|
||||
import path from 'node:path'
|
||||
|
||||
import type {
|
||||
CanUseTool,
|
||||
@@ -84,18 +85,14 @@ class ClaudeCodeService implements AgentServiceInterface {
|
||||
})
|
||||
return aiStream
|
||||
}
|
||||
if (
|
||||
(modelInfo.provider?.type !== 'anthropic' &&
|
||||
(modelInfo.provider?.anthropicApiHost === undefined || modelInfo.provider.anthropicApiHost.trim() === '')) ||
|
||||
modelInfo.provider.apiKey === ''
|
||||
) {
|
||||
logger.error('Anthropic provider configuration is missing', {
|
||||
modelInfo
|
||||
})
|
||||
|
||||
// Validate provider has required configuration
|
||||
// Note: We no longer restrict to anthropic type only - the API Server's unified adapter
|
||||
// handles format conversion for any provider type (OpenAI, Gemini, etc.)
|
||||
if (!modelInfo.provider?.apiKey) {
|
||||
logger.error('Provider API key is missing', { modelInfo })
|
||||
aiStream.emit('data', {
|
||||
type: 'error',
|
||||
error: new Error(`Invalid provider type '${modelInfo.provider?.type}'. Expected 'anthropic' provider type.`)
|
||||
error: new Error(`Provider '${modelInfo.provider?.id}' is missing API key configuration.`)
|
||||
})
|
||||
return aiStream
|
||||
}
|
||||
@@ -106,22 +103,25 @@ class ClaudeCodeService implements AgentServiceInterface {
|
||||
Object.entries(loginShellEnv).filter(([key]) => !key.toLowerCase().endsWith('_proxy'))
|
||||
) as Record<string, string>
|
||||
|
||||
// Route through local API Server which handles format conversion via unified adapter
|
||||
// This enables Claude Code Agent to work with any provider (OpenAI, Gemini, etc.)
|
||||
// The API Server converts AI SDK responses to Anthropic SSE format transparently
|
||||
const env = {
|
||||
...loginShellEnvWithoutProxies,
|
||||
// TODO: fix the proxy api server
|
||||
// ANTHROPIC_API_KEY: apiConfig.apiKey,
|
||||
// ANTHROPIC_AUTH_TOKEN: apiConfig.apiKey,
|
||||
// ANTHROPIC_BASE_URL: `http://${apiConfig.host}:${apiConfig.port}/${modelInfo.provider.id}`,
|
||||
ANTHROPIC_API_KEY: modelInfo.provider.apiKey,
|
||||
ANTHROPIC_AUTH_TOKEN: modelInfo.provider.apiKey,
|
||||
ANTHROPIC_BASE_URL: modelInfo.provider.anthropicApiHost?.trim() || modelInfo.provider.apiHost,
|
||||
ANTHROPIC_API_KEY: apiConfig.apiKey,
|
||||
ANTHROPIC_AUTH_TOKEN: apiConfig.apiKey,
|
||||
ANTHROPIC_BASE_URL: `http://${apiConfig.host}:${apiConfig.port}/${modelInfo.provider.id}`,
|
||||
ANTHROPIC_MODEL: modelInfo.modelId,
|
||||
ANTHROPIC_DEFAULT_OPUS_MODEL: modelInfo.modelId,
|
||||
ANTHROPIC_DEFAULT_SONNET_MODEL: modelInfo.modelId,
|
||||
// TODO: support set small model in UI
|
||||
ANTHROPIC_DEFAULT_HAIKU_MODEL: modelInfo.modelId,
|
||||
ELECTRON_RUN_AS_NODE: '1',
|
||||
ELECTRON_NO_ATTACH_CONSOLE: '1'
|
||||
ELECTRON_NO_ATTACH_CONSOLE: '1',
|
||||
// Set CLAUDE_CONFIG_DIR to app's userData directory to avoid path encoding issues
|
||||
// on Windows when the username contains non-ASCII characters (e.g., Chinese characters)
|
||||
// This prevents the SDK from using the user's home directory which may have encoding problems
|
||||
CLAUDE_CONFIG_DIR: path.join(app.getPath('userData'), '.claude')
|
||||
}
|
||||
|
||||
const errorChunks: string[] = []
|
||||
@@ -534,6 +534,19 @@ class ClaudeCodeService implements AgentServiceInterface {
|
||||
return
|
||||
}
|
||||
|
||||
// Skip emitting error if stream already finished (error was handled via result message)
|
||||
if (streamState.isFinished()) {
|
||||
logger.debug('SDK process exited after stream finished, skipping duplicate error event', {
|
||||
duration,
|
||||
error: errorObj instanceof Error ? { name: errorObj.name, message: errorObj.message } : String(errorObj)
|
||||
})
|
||||
// Still emit complete to signal stream end
|
||||
stream.emit('data', {
|
||||
type: 'complete'
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
errorChunks.push(errorObj instanceof Error ? errorObj.message : String(errorObj))
|
||||
const errorMessage = errorChunks.join('\n\n')
|
||||
logger.error('SDK query failed', {
|
||||
|
||||
@@ -121,7 +121,7 @@ export function transformSDKMessageToStreamParts(sdkMessage: SDKMessage, state:
|
||||
case 'system':
|
||||
return handleSystemMessage(sdkMessage)
|
||||
case 'result':
|
||||
return handleResultMessage(sdkMessage)
|
||||
return handleResultMessage(sdkMessage, state)
|
||||
default:
|
||||
logger.warn('Unknown SDKMessage type', { type: (sdkMessage as any).type })
|
||||
return []
|
||||
@@ -193,6 +193,30 @@ function handleAssistantMessage(
|
||||
}
|
||||
break
|
||||
}
|
||||
case 'thinking':
|
||||
case 'redacted_thinking': {
|
||||
const thinkingText = block.type === 'thinking' ? block.thinking : block.data
|
||||
if (thinkingText) {
|
||||
const id = generateMessageId()
|
||||
chunks.push({
|
||||
type: 'reasoning-start',
|
||||
id,
|
||||
providerMetadata
|
||||
})
|
||||
chunks.push({
|
||||
type: 'reasoning-delta',
|
||||
id,
|
||||
text: thinkingText,
|
||||
providerMetadata
|
||||
})
|
||||
chunks.push({
|
||||
type: 'reasoning-end',
|
||||
id,
|
||||
providerMetadata
|
||||
})
|
||||
}
|
||||
break
|
||||
}
|
||||
case 'tool_use':
|
||||
handleAssistantToolUse(block as ToolUseContent, providerMetadata, state, chunks)
|
||||
break
|
||||
@@ -445,7 +469,11 @@ function handleStreamEvent(
|
||||
case 'content_block_stop': {
|
||||
const block = state.closeBlock(event.index)
|
||||
if (!block) {
|
||||
logger.warn('Received content_block_stop for unknown index', { index: event.index })
|
||||
// Some providers (e.g., Gemini) send content via assistant message before stream events,
|
||||
// so the block may not exist in state. This is expected behavior, not an error.
|
||||
logger.debug('Received content_block_stop for unknown index (may be from non-streaming content)', {
|
||||
index: event.index
|
||||
})
|
||||
break
|
||||
}
|
||||
|
||||
@@ -679,7 +707,13 @@ function handleSystemMessage(message: Extract<SDKMessage, { type: 'system' }>):
|
||||
* Successful runs yield a `finish` frame with aggregated usage metrics, while
|
||||
* failures are surfaced as `error` frames.
|
||||
*/
|
||||
function handleResultMessage(message: Extract<SDKMessage, { type: 'result' }>): AgentStreamPart[] {
|
||||
function handleResultMessage(
|
||||
message: Extract<SDKMessage, { type: 'result' }>,
|
||||
state: ClaudeStreamState
|
||||
): AgentStreamPart[] {
|
||||
// Mark stream as finished to prevent duplicate error events when SDK process exits
|
||||
state.markFinished()
|
||||
|
||||
const chunks: AgentStreamPart[] = []
|
||||
|
||||
let usage: LanguageModelUsage | undefined
|
||||
@@ -691,26 +725,33 @@ function handleResultMessage(message: Extract<SDKMessage, { type: 'result' }>):
|
||||
}
|
||||
}
|
||||
|
||||
if (message.subtype === 'success') {
|
||||
chunks.push({
|
||||
type: 'finish',
|
||||
totalUsage: usage ?? emptyUsage,
|
||||
finishReason: mapClaudeCodeFinishReason(message.subtype),
|
||||
providerMetadata: {
|
||||
...sdkMessageToProviderMetadata(message),
|
||||
usage: message.usage,
|
||||
durationMs: message.duration_ms,
|
||||
costUsd: message.total_cost_usd,
|
||||
raw: message
|
||||
}
|
||||
} as AgentStreamPart)
|
||||
} else {
|
||||
chunks.push({
|
||||
type: 'finish',
|
||||
totalUsage: usage ?? emptyUsage,
|
||||
finishReason: mapClaudeCodeFinishReason(message.subtype),
|
||||
providerMetadata: {
|
||||
...sdkMessageToProviderMetadata(message),
|
||||
usage: message.usage,
|
||||
durationMs: message.duration_ms,
|
||||
costUsd: message.total_cost_usd,
|
||||
raw: message
|
||||
}
|
||||
} as AgentStreamPart)
|
||||
if (message.subtype !== 'success') {
|
||||
chunks.push({
|
||||
type: 'error',
|
||||
error: {
|
||||
message: `${message.subtype}: Process failed after ${message.num_turns} turns`
|
||||
}
|
||||
} as AgentStreamPart)
|
||||
} else {
|
||||
if (message.is_error) {
|
||||
const errorMatch = message.result.match(/\{.*\}/)
|
||||
if (errorMatch) {
|
||||
const errorDetail = JSON.parse(errorMatch[0])
|
||||
chunks.push(errorDetail)
|
||||
}
|
||||
}
|
||||
}
|
||||
return chunks
|
||||
}
|
||||
|
||||
@@ -212,8 +212,9 @@ export class ToolCallChunkHandler {
|
||||
description: toolName,
|
||||
type: 'builtin'
|
||||
} as BaseTool
|
||||
} else if ((mcpTool = this.mcpTools.find((t) => t.name === toolName) as MCPTool)) {
|
||||
} else if ((mcpTool = this.mcpTools.find((t) => t.id === toolName) as MCPTool)) {
|
||||
// 如果是客户端执行的 MCP 工具,沿用现有逻辑
|
||||
// toolName is mcpTool.id (registered with id as key in convertMcpToolsToAiSdkTools)
|
||||
logger.info(`[ToolCallChunkHandler] Handling client-side MCP tool: ${toolName}`)
|
||||
// mcpTool = this.mcpTools.find((t) => t.name === toolName) as MCPTool
|
||||
// if (!mcpTool) {
|
||||
|
||||
@@ -46,6 +46,7 @@ import type {
|
||||
GeminiSdkRawOutput,
|
||||
GeminiSdkToolCall
|
||||
} from '@renderer/types/sdk'
|
||||
import { getTrailingApiVersion, withoutTrailingApiVersion } from '@renderer/utils'
|
||||
import { isToolUseModeFunction } from '@renderer/utils/assistant'
|
||||
import {
|
||||
geminiFunctionCallToMcpTool,
|
||||
@@ -163,6 +164,10 @@ export class GeminiAPIClient extends BaseApiClient<
|
||||
return models
|
||||
}
|
||||
|
||||
override getBaseURL(): string {
|
||||
return withoutTrailingApiVersion(super.getBaseURL())
|
||||
}
|
||||
|
||||
override async getSdkInstance() {
|
||||
if (this.sdkInstance) {
|
||||
return this.sdkInstance
|
||||
@@ -188,6 +193,13 @@ export class GeminiAPIClient extends BaseApiClient<
|
||||
if (this.provider.isVertex) {
|
||||
return 'v1'
|
||||
}
|
||||
|
||||
// Extract trailing API version from the URL
|
||||
const trailingVersion = getTrailingApiVersion(this.provider.apiHost || '')
|
||||
if (trailingVersion) {
|
||||
return trailingVersion
|
||||
}
|
||||
|
||||
return 'v1beta'
|
||||
}
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ export class VertexAPIClient extends GeminiAPIClient {
|
||||
this.anthropicVertexClient = new AnthropicVertexClient(provider)
|
||||
// 如果传入的是普通 Provider,转换为 VertexProvider
|
||||
if (isVertexProvider(provider)) {
|
||||
this.vertexProvider = provider
|
||||
this.vertexProvider = provider as VertexProvider
|
||||
} else {
|
||||
this.vertexProvider = createVertexProvider(provider)
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import type { MCPTool } from '@renderer/types'
|
||||
import { type Assistant, type Message, type Model, type Provider, SystemProviderIds } from '@renderer/types'
|
||||
import type { Chunk } from '@renderer/types/chunk'
|
||||
import { isSupportEnableThinkingProvider } from '@renderer/utils/provider'
|
||||
import { openrouterReasoningMiddleware, skipGeminiThoughtSignatureMiddleware } from '@shared/middleware'
|
||||
import type { LanguageModelMiddleware } from 'ai'
|
||||
import { extractReasoningMiddleware, simulateStreamingMiddleware } from 'ai'
|
||||
import { isEmpty } from 'lodash'
|
||||
@@ -13,9 +14,7 @@ import { getAiSdkProviderId } from '../provider/factory'
|
||||
import { isOpenRouterGeminiGenerateImageModel } from '../utils/image'
|
||||
import { noThinkMiddleware } from './noThinkMiddleware'
|
||||
import { openrouterGenerateImageMiddleware } from './openrouterGenerateImageMiddleware'
|
||||
import { openrouterReasoningMiddleware } from './openrouterReasoningMiddleware'
|
||||
import { qwenThinkingMiddleware } from './qwenThinkingMiddleware'
|
||||
import { skipGeminiThoughtSignatureMiddleware } from './skipGeminiThoughtSignatureMiddleware'
|
||||
import { toolChoiceMiddleware } from './toolChoiceMiddleware'
|
||||
|
||||
const logger = loggerService.withContext('AiSdkMiddlewareBuilder')
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
import type { LanguageModelV2StreamPart } from '@ai-sdk/provider'
|
||||
import type { LanguageModelMiddleware } from 'ai'
|
||||
|
||||
/**
|
||||
* https://openrouter.ai/docs/docs/best-practices/reasoning-tokens#example-preserving-reasoning-blocks-with-openrouter-and-claude
|
||||
*
|
||||
* @returns LanguageModelMiddleware - a middleware filter redacted block
|
||||
*/
|
||||
export function openrouterReasoningMiddleware(): LanguageModelMiddleware {
|
||||
const REDACTED_BLOCK = '[REDACTED]'
|
||||
return {
|
||||
middlewareVersion: 'v2',
|
||||
wrapGenerate: async ({ doGenerate }) => {
|
||||
const { content, ...rest } = await doGenerate()
|
||||
const modifiedContent = content.map((part) => {
|
||||
if (part.type === 'reasoning' && part.text.includes(REDACTED_BLOCK)) {
|
||||
return {
|
||||
...part,
|
||||
text: part.text.replace(REDACTED_BLOCK, '')
|
||||
}
|
||||
}
|
||||
return part
|
||||
})
|
||||
return { content: modifiedContent, ...rest }
|
||||
},
|
||||
wrapStream: async ({ doStream }) => {
|
||||
const { stream, ...rest } = await doStream()
|
||||
return {
|
||||
stream: stream.pipeThrough(
|
||||
new TransformStream<LanguageModelV2StreamPart, LanguageModelV2StreamPart>({
|
||||
transform(
|
||||
chunk: LanguageModelV2StreamPart,
|
||||
controller: TransformStreamDefaultController<LanguageModelV2StreamPart>
|
||||
) {
|
||||
if (chunk.type === 'reasoning-delta' && chunk.delta.includes(REDACTED_BLOCK)) {
|
||||
controller.enqueue({
|
||||
...chunk,
|
||||
delta: chunk.delta.replace(REDACTED_BLOCK, '')
|
||||
})
|
||||
} else {
|
||||
controller.enqueue(chunk)
|
||||
}
|
||||
}
|
||||
})
|
||||
),
|
||||
...rest
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
import type { LanguageModelMiddleware } from 'ai'
|
||||
|
||||
/**
|
||||
* skip Gemini Thought Signature Middleware
|
||||
* 由于多模型客户端请求的复杂性(可以中途切换其他模型),这里选择通过中间件方式添加跳过所有 Gemini3 思考签名
|
||||
* Due to the complexity of multi-model client requests (which can switch to other models mid-process),
|
||||
* it was decided to add a skip for all Gemini3 thinking signatures via middleware.
|
||||
* @param aiSdkId AI SDK Provider ID
|
||||
* @returns LanguageModelMiddleware
|
||||
*/
|
||||
export function skipGeminiThoughtSignatureMiddleware(aiSdkId: string): LanguageModelMiddleware {
|
||||
const MAGIC_STRING = 'skip_thought_signature_validator'
|
||||
return {
|
||||
middlewareVersion: 'v2',
|
||||
|
||||
transformParams: async ({ params }) => {
|
||||
const transformedParams = { ...params }
|
||||
// Process messages in prompt
|
||||
if (transformedParams.prompt && Array.isArray(transformedParams.prompt)) {
|
||||
transformedParams.prompt = transformedParams.prompt.map((message) => {
|
||||
if (typeof message.content !== 'string') {
|
||||
for (const part of message.content) {
|
||||
const googleOptions = part?.providerOptions?.[aiSdkId]
|
||||
if (googleOptions?.thoughtSignature) {
|
||||
googleOptions.thoughtSignature = MAGIC_STRING
|
||||
}
|
||||
}
|
||||
}
|
||||
return message
|
||||
})
|
||||
}
|
||||
|
||||
return transformedParams
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -7,7 +7,7 @@ import { isAwsBedrockProvider, isVertexProvider } from '@renderer/utils/provider
|
||||
// https://docs.claude.com/en/docs/build-with-claude/extended-thinking#interleaved-thinking
|
||||
const INTERLEAVED_THINKING_HEADER = 'interleaved-thinking-2025-05-14'
|
||||
// https://docs.claude.com/en/docs/build-with-claude/context-windows#1m-token-context-window
|
||||
const CONTEXT_100M_HEADER = 'context-1m-2025-08-07'
|
||||
// const CONTEXT_100M_HEADER = 'context-1m-2025-08-07'
|
||||
// https://docs.cloud.google.com/vertex-ai/generative-ai/docs/partner-models/claude/web-search
|
||||
const WEBSEARCH_HEADER = 'web-search-2025-03-05'
|
||||
|
||||
@@ -25,7 +25,9 @@ export function addAnthropicHeaders(assistant: Assistant, model: Model): string[
|
||||
if (isVertexProvider(provider) && assistant.enableWebSearch) {
|
||||
anthropicHeaders.push(WEBSEARCH_HEADER)
|
||||
}
|
||||
anthropicHeaders.push(CONTEXT_100M_HEADER)
|
||||
// We may add it by user preference in assistant.settings instead of always adding it.
|
||||
// See #11540, #11397
|
||||
// anthropicHeaders.push(CONTEXT_100M_HEADER)
|
||||
}
|
||||
return anthropicHeaders
|
||||
}
|
||||
|
||||
@@ -24,7 +24,17 @@ vi.mock('@renderer/services/AssistantService', () => ({
|
||||
|
||||
vi.mock('@renderer/store', () => ({
|
||||
default: {
|
||||
getState: () => ({ copilot: { defaultHeaders: {} } })
|
||||
getState: () => ({
|
||||
copilot: { defaultHeaders: {} },
|
||||
llm: {
|
||||
settings: {
|
||||
vertexai: {
|
||||
projectId: 'test-project',
|
||||
location: 'us-central1'
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}))
|
||||
|
||||
@@ -33,7 +43,7 @@ vi.mock('@renderer/utils/api', () => ({
|
||||
if (isSupportedAPIVersion === false) {
|
||||
return host // Return host as-is when isSupportedAPIVersion is false
|
||||
}
|
||||
return `${host}/v1` // Default behavior when isSupportedAPIVersion is true
|
||||
return host ? `${host}/v1` : '' // Default behavior when isSupportedAPIVersion is true
|
||||
}),
|
||||
routeToEndpoint: vi.fn((host) => ({
|
||||
baseURL: host,
|
||||
@@ -41,6 +51,20 @@ vi.mock('@renderer/utils/api', () => ({
|
||||
}))
|
||||
}))
|
||||
|
||||
// Also mock @shared/api since formatProviderApiHost uses it directly
|
||||
vi.mock('@shared/api', async (importOriginal) => {
|
||||
const actual = (await importOriginal()) as any
|
||||
return {
|
||||
...actual,
|
||||
formatApiHost: vi.fn((host, isSupportedAPIVersion = true) => {
|
||||
if (isSupportedAPIVersion === false) {
|
||||
return host || '' // Return host as-is when isSupportedAPIVersion is false
|
||||
}
|
||||
return host ? `${host}/v1` : '' // Default behavior when isSupportedAPIVersion is true
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
vi.mock('@renderer/utils/provider', async (importOriginal) => {
|
||||
const actual = (await importOriginal()) as any
|
||||
return {
|
||||
@@ -73,8 +97,8 @@ vi.mock('@renderer/services/AssistantService', () => ({
|
||||
|
||||
import { getProviderByModel } from '@renderer/services/AssistantService'
|
||||
import type { Model, Provider } from '@renderer/types'
|
||||
import { formatApiHost } from '@renderer/utils/api'
|
||||
import { isCherryAIProvider, isPerplexityProvider } from '@renderer/utils/provider'
|
||||
import { formatApiHost } from '@shared/api'
|
||||
|
||||
import { COPILOT_DEFAULT_HEADERS, COPILOT_EDITOR_VERSION, isCopilotResponsesModel } from '../constants'
|
||||
import { getActualProvider, providerToAiSdkConfig } from '../providerConfig'
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
import type { Provider } from '@renderer/types'
|
||||
|
||||
import { provider2Provider, startsWith } from './helper'
|
||||
import type { RuleSet } from './types'
|
||||
|
||||
// https://platform.claude.com/docs/en/build-with-claude/claude-in-microsoft-foundry
|
||||
const AZURE_ANTHROPIC_RULES: RuleSet = {
|
||||
rules: [
|
||||
{
|
||||
match: startsWith('claude'),
|
||||
provider: (provider: Provider) => ({
|
||||
...provider,
|
||||
type: 'anthropic',
|
||||
apiHost: provider.apiHost + 'anthropic/v1',
|
||||
id: 'azure-anthropic'
|
||||
})
|
||||
}
|
||||
],
|
||||
fallbackRule: (provider: Provider) => provider
|
||||
}
|
||||
|
||||
export const azureAnthropicProviderCreator = provider2Provider.bind(null, AZURE_ANTHROPIC_RULES)
|
||||
@@ -1,22 +0,0 @@
|
||||
import type { Model, Provider } from '@renderer/types'
|
||||
|
||||
import type { RuleSet } from './types'
|
||||
|
||||
export const startsWith = (prefix: string) => (model: Model) => model.id.toLowerCase().startsWith(prefix.toLowerCase())
|
||||
export const endpointIs = (type: string) => (model: Model) => model.endpoint_type === type
|
||||
|
||||
/**
|
||||
* 解析模型对应的Provider
|
||||
* @param ruleSet 规则集对象
|
||||
* @param model 模型对象
|
||||
* @param provider 原始provider对象
|
||||
* @returns 解析出的provider对象
|
||||
*/
|
||||
export function provider2Provider(ruleSet: RuleSet, model: Model, provider: Provider): Provider {
|
||||
for (const rule of ruleSet.rules) {
|
||||
if (rule.match(model)) {
|
||||
return rule.provider(provider)
|
||||
}
|
||||
}
|
||||
return ruleSet.fallbackRule(provider)
|
||||
}
|
||||
@@ -1,3 +1,7 @@
|
||||
export { aihubmixProviderCreator } from './aihubmix'
|
||||
export { newApiResolverCreator } from './newApi'
|
||||
export { vertexAnthropicProviderCreator } from './vertext-anthropic'
|
||||
// Re-export from shared config
|
||||
export {
|
||||
aihubmixProviderCreator,
|
||||
azureAnthropicProviderCreator,
|
||||
newApiResolverCreator,
|
||||
vertexAnthropicProviderCreator
|
||||
} from '@shared/provider/config'
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
import type { Model, Provider } from '@renderer/types'
|
||||
|
||||
export interface RuleSet {
|
||||
rules: Array<{
|
||||
match: (model: Model) => boolean
|
||||
provider: (provider: Provider) => Provider
|
||||
}>
|
||||
fallbackRule: (provider: Provider) => Provider
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
import type { Provider } from '@renderer/types'
|
||||
|
||||
import { provider2Provider, startsWith } from './helper'
|
||||
import type { RuleSet } from './types'
|
||||
|
||||
const VERTEX_ANTHROPIC_RULES: RuleSet = {
|
||||
rules: [
|
||||
{
|
||||
match: startsWith('claude'),
|
||||
provider: (provider: Provider) => ({
|
||||
...provider,
|
||||
id: 'google-vertex-anthropic'
|
||||
})
|
||||
}
|
||||
],
|
||||
fallbackRule: (provider: Provider) => provider
|
||||
}
|
||||
|
||||
export const vertexAnthropicProviderCreator = provider2Provider.bind(null, VERTEX_ANTHROPIC_RULES)
|
||||
@@ -1,25 +1 @@
|
||||
import type { Model } from '@renderer/types'
|
||||
|
||||
export const COPILOT_EDITOR_VERSION = 'vscode/1.104.1'
|
||||
export const COPILOT_PLUGIN_VERSION = 'copilot-chat/0.26.7'
|
||||
export const COPILOT_INTEGRATION_ID = 'vscode-chat'
|
||||
export const COPILOT_USER_AGENT = 'GitHubCopilotChat/0.26.7'
|
||||
|
||||
export const COPILOT_DEFAULT_HEADERS = {
|
||||
'Copilot-Integration-Id': COPILOT_INTEGRATION_ID,
|
||||
'User-Agent': COPILOT_USER_AGENT,
|
||||
'Editor-Version': COPILOT_EDITOR_VERSION,
|
||||
'Editor-Plugin-Version': COPILOT_PLUGIN_VERSION,
|
||||
'editor-version': COPILOT_EDITOR_VERSION,
|
||||
'editor-plugin-version': COPILOT_PLUGIN_VERSION,
|
||||
'copilot-vision-request': 'true'
|
||||
} as const
|
||||
|
||||
// Models that require the OpenAI Responses endpoint when routed through GitHub Copilot (#10560)
|
||||
const COPILOT_RESPONSES_MODEL_IDS = ['gpt-5-codex']
|
||||
|
||||
export function isCopilotResponsesModel(model: Model): boolean {
|
||||
const normalizedId = model.id?.trim().toLowerCase()
|
||||
const normalizedName = model.name?.trim().toLowerCase()
|
||||
return COPILOT_RESPONSES_MODEL_IDS.some((target) => normalizedId === target || normalizedName === target)
|
||||
}
|
||||
export { COPILOT_DEFAULT_HEADERS, COPILOT_EDITOR_VERSION, isCopilotResponsesModel } from '@shared/provider/constant'
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
import { hasProviderConfigByAlias, type ProviderId, resolveProviderConfigId } from '@cherrystudio/ai-core/provider'
|
||||
import { createProvider as createProviderCore } from '@cherrystudio/ai-core/provider'
|
||||
import { loggerService } from '@logger'
|
||||
import type { Provider } from '@renderer/types'
|
||||
import { isAzureOpenAIProvider, isAzureResponsesEndpoint } from '@renderer/utils/provider'
|
||||
import { getAiSdkProviderId as sharedGetAiSdkProviderId } from '@shared/provider'
|
||||
import type { Provider as AiSdkProvider } from 'ai'
|
||||
|
||||
import type { AiSdkConfig } from '../types'
|
||||
@@ -22,68 +21,12 @@ const logger = loggerService.withContext('ProviderFactory')
|
||||
}
|
||||
})()
|
||||
|
||||
/**
|
||||
* 静态Provider映射表
|
||||
* 处理Cherry Studio特有的provider ID到AI SDK标准ID的映射
|
||||
*/
|
||||
const STATIC_PROVIDER_MAPPING: Record<string, ProviderId> = {
|
||||
gemini: 'google', // Google Gemini -> google
|
||||
'azure-openai': 'azure', // Azure OpenAI -> azure
|
||||
'openai-response': 'openai', // OpenAI Responses -> openai
|
||||
grok: 'xai', // Grok -> xai
|
||||
copilot: 'github-copilot-openai-compatible'
|
||||
}
|
||||
|
||||
/**
|
||||
* 尝试解析provider标识符(支持静态映射和别名)
|
||||
*/
|
||||
function tryResolveProviderId(identifier: string): ProviderId | null {
|
||||
// 1. 检查静态映射
|
||||
const staticMapping = STATIC_PROVIDER_MAPPING[identifier]
|
||||
if (staticMapping) {
|
||||
return staticMapping
|
||||
}
|
||||
|
||||
// 2. 检查AiCore是否支持(包括别名支持)
|
||||
if (hasProviderConfigByAlias(identifier)) {
|
||||
// 解析为真实的Provider ID
|
||||
return resolveProviderConfigId(identifier) as ProviderId
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取AI SDK Provider ID
|
||||
* 简化版:减少重复逻辑,利用通用解析函数
|
||||
* Uses shared implementation with renderer-specific config checker
|
||||
*/
|
||||
export function getAiSdkProviderId(provider: Provider): string {
|
||||
// 1. 尝试解析provider.id
|
||||
const resolvedFromId = tryResolveProviderId(provider.id)
|
||||
if (isAzureOpenAIProvider(provider)) {
|
||||
if (isAzureResponsesEndpoint(provider)) {
|
||||
return 'azure-responses'
|
||||
} else {
|
||||
return 'azure'
|
||||
}
|
||||
}
|
||||
if (resolvedFromId) {
|
||||
return resolvedFromId
|
||||
}
|
||||
|
||||
// 2. 尝试解析provider.type
|
||||
// 会把所有类型为openai的自定义provider解析到aisdk的openaiProvider上
|
||||
if (provider.type !== 'openai') {
|
||||
const resolvedFromType = tryResolveProviderId(provider.type)
|
||||
if (resolvedFromType) {
|
||||
return resolvedFromType
|
||||
}
|
||||
}
|
||||
if (provider.apiHost.includes('api.openai.com')) {
|
||||
return 'openai-chat'
|
||||
}
|
||||
// 3. 最后的fallback(使用provider本身的id)
|
||||
return provider.id
|
||||
return sharedGetAiSdkProviderId(provider)
|
||||
}
|
||||
|
||||
export async function createAiSdkProvider(config: AiSdkConfig): Promise<AiSdkProvider | null> {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { formatPrivateKey, hasProviderConfig, ProviderConfigFactory } from '@cherrystudio/ai-core/provider'
|
||||
import { hasProviderConfig } from '@cherrystudio/ai-core/provider'
|
||||
import { isOpenAIChatCompletionOnlyModel } from '@renderer/config/models'
|
||||
import {
|
||||
getAwsBedrockAccessKeyId,
|
||||
@@ -10,22 +10,17 @@ import {
|
||||
import { createVertexProvider, isVertexAIConfigured } from '@renderer/hooks/useVertexAI'
|
||||
import { getProviderByModel } from '@renderer/services/AssistantService'
|
||||
import store from '@renderer/store'
|
||||
import { isSystemProvider, type Model, type Provider, SystemProviderIds } from '@renderer/types'
|
||||
import { formatApiHost, formatAzureOpenAIApiHost, formatVertexApiHost, routeToEndpoint } from '@renderer/utils/api'
|
||||
import { isSystemProvider, type Model, type Provider } from '@renderer/types'
|
||||
import {
|
||||
isAnthropicProvider,
|
||||
isAzureOpenAIProvider,
|
||||
isCherryAIProvider,
|
||||
isGeminiProvider,
|
||||
isNewApiProvider,
|
||||
isPerplexityProvider,
|
||||
isVertexProvider
|
||||
} from '@renderer/utils/provider'
|
||||
type AiSdkConfigContext,
|
||||
formatProviderApiHost as sharedFormatProviderApiHost,
|
||||
type ProviderFormatContext,
|
||||
providerToAiSdkConfig as sharedProviderToAiSdkConfig,
|
||||
resolveActualProvider
|
||||
} from '@shared/provider'
|
||||
import { cloneDeep } from 'lodash'
|
||||
|
||||
import type { AiSdkConfig } from '../types'
|
||||
import { aihubmixProviderCreator, newApiResolverCreator, vertexAnthropicProviderCreator } from './config'
|
||||
import { azureAnthropicProviderCreator } from './config/azure-anthropic'
|
||||
import { COPILOT_DEFAULT_HEADERS } from './constants'
|
||||
import { getAiSdkProviderId } from './factory'
|
||||
|
||||
@@ -56,61 +51,51 @@ function getRotatedApiKey(provider: Provider): string {
|
||||
}
|
||||
|
||||
/**
|
||||
* 处理特殊provider的转换逻辑
|
||||
* Renderer-specific context for providerToAiSdkConfig
|
||||
* Provides implementations using browser APIs, store, and hooks
|
||||
*/
|
||||
function handleSpecialProviders(model: Model, provider: Provider): Provider {
|
||||
if (isNewApiProvider(provider)) {
|
||||
return newApiResolverCreator(model, provider)
|
||||
function createRendererSdkContext(model: Model): AiSdkConfigContext {
|
||||
return {
|
||||
getRotatedApiKey: (provider) => getRotatedApiKey(provider as Provider),
|
||||
isOpenAIChatCompletionOnlyModel: () => isOpenAIChatCompletionOnlyModel(model),
|
||||
getCopilotDefaultHeaders: () => COPILOT_DEFAULT_HEADERS,
|
||||
getCopilotStoredHeaders: () => store.getState().copilot.defaultHeaders ?? {},
|
||||
getAwsBedrockConfig: () => {
|
||||
const authType = getAwsBedrockAuthType()
|
||||
return {
|
||||
authType,
|
||||
region: getAwsBedrockRegion(),
|
||||
apiKey: authType === 'apiKey' ? getAwsBedrockApiKey() : undefined,
|
||||
accessKeyId: authType === 'iam' ? getAwsBedrockAccessKeyId() : undefined,
|
||||
secretAccessKey: authType === 'iam' ? getAwsBedrockSecretAccessKey() : undefined
|
||||
}
|
||||
},
|
||||
getVertexConfig: (provider) => {
|
||||
if (!isVertexAIConfigured()) {
|
||||
return undefined
|
||||
}
|
||||
return createVertexProvider(provider as Provider)
|
||||
},
|
||||
getEndpointType: () => model.endpoint_type
|
||||
}
|
||||
|
||||
if (isSystemProvider(provider)) {
|
||||
if (provider.id === 'aihubmix') {
|
||||
return aihubmixProviderCreator(model, provider)
|
||||
}
|
||||
if (provider.id === 'vertexai') {
|
||||
return vertexAnthropicProviderCreator(model, provider)
|
||||
}
|
||||
}
|
||||
if (isAzureOpenAIProvider(provider)) {
|
||||
return azureAnthropicProviderCreator(model, provider)
|
||||
}
|
||||
return provider
|
||||
}
|
||||
|
||||
/**
|
||||
* 主要用来对齐AISdk的BaseURL格式
|
||||
* @param provider
|
||||
* @returns
|
||||
* Uses shared implementation with renderer-specific context
|
||||
*/
|
||||
function formatProviderApiHost(provider: Provider): Provider {
|
||||
const formatted = { ...provider }
|
||||
if (formatted.anthropicApiHost) {
|
||||
formatted.anthropicApiHost = formatApiHost(formatted.anthropicApiHost)
|
||||
}
|
||||
|
||||
if (isAnthropicProvider(provider)) {
|
||||
const baseHost = formatted.anthropicApiHost || formatted.apiHost
|
||||
// AI SDK needs /v1 in baseURL, Anthropic SDK will strip it in getSdkClient
|
||||
formatted.apiHost = formatApiHost(baseHost)
|
||||
if (!formatted.anthropicApiHost) {
|
||||
formatted.anthropicApiHost = formatted.apiHost
|
||||
function getRendererFormatContext(): ProviderFormatContext {
|
||||
const vertexSettings = store.getState().llm.settings.vertexai
|
||||
return {
|
||||
vertex: {
|
||||
project: vertexSettings.projectId || 'default-project',
|
||||
location: vertexSettings.location || 'us-central1'
|
||||
}
|
||||
} else if (formatted.id === SystemProviderIds.copilot || formatted.id === SystemProviderIds.github) {
|
||||
formatted.apiHost = formatApiHost(formatted.apiHost, false)
|
||||
} else if (isGeminiProvider(formatted)) {
|
||||
formatted.apiHost = formatApiHost(formatted.apiHost, true, 'v1beta')
|
||||
} else if (isAzureOpenAIProvider(formatted)) {
|
||||
formatted.apiHost = formatAzureOpenAIApiHost(formatted.apiHost)
|
||||
} else if (isVertexProvider(formatted)) {
|
||||
formatted.apiHost = formatVertexApiHost(formatted)
|
||||
} else if (isCherryAIProvider(formatted)) {
|
||||
formatted.apiHost = formatApiHost(formatted.apiHost, false)
|
||||
} else if (isPerplexityProvider(formatted)) {
|
||||
formatted.apiHost = formatApiHost(formatted.apiHost, false)
|
||||
} else {
|
||||
formatted.apiHost = formatApiHost(formatted.apiHost)
|
||||
}
|
||||
return formatted
|
||||
}
|
||||
|
||||
function formatProviderApiHost(provider: Provider): Provider {
|
||||
return sharedFormatProviderApiHost(provider, getRendererFormatContext())
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -122,7 +107,9 @@ export function getActualProvider(model: Model): Provider {
|
||||
|
||||
// 按顺序处理各种转换
|
||||
let actualProvider = cloneDeep(baseProvider)
|
||||
actualProvider = handleSpecialProviders(model, actualProvider)
|
||||
actualProvider = resolveActualProvider(actualProvider, model, {
|
||||
isSystemProvider
|
||||
}) as Provider
|
||||
actualProvider = formatProviderApiHost(actualProvider)
|
||||
|
||||
return actualProvider
|
||||
@@ -130,121 +117,11 @@ export function getActualProvider(model: Model): Provider {
|
||||
|
||||
/**
|
||||
* 将 Provider 配置转换为新 AI SDK 格式
|
||||
* 简化版:利用新的别名映射系统
|
||||
* Uses shared implementation with renderer-specific context
|
||||
*/
|
||||
export function providerToAiSdkConfig(actualProvider: Provider, model: Model): AiSdkConfig {
|
||||
const aiSdkProviderId = getAiSdkProviderId(actualProvider)
|
||||
|
||||
// 构建基础配置
|
||||
const { baseURL, endpoint } = routeToEndpoint(actualProvider.apiHost)
|
||||
const baseConfig = {
|
||||
baseURL: baseURL,
|
||||
apiKey: getRotatedApiKey(actualProvider)
|
||||
}
|
||||
|
||||
const isCopilotProvider = actualProvider.id === SystemProviderIds.copilot
|
||||
if (isCopilotProvider) {
|
||||
const storedHeaders = store.getState().copilot.defaultHeaders ?? {}
|
||||
const options = ProviderConfigFactory.fromProvider('github-copilot-openai-compatible', baseConfig, {
|
||||
headers: {
|
||||
...COPILOT_DEFAULT_HEADERS,
|
||||
...storedHeaders,
|
||||
...actualProvider.extra_headers
|
||||
},
|
||||
name: actualProvider.id,
|
||||
includeUsage: true
|
||||
})
|
||||
|
||||
return {
|
||||
providerId: 'github-copilot-openai-compatible',
|
||||
options
|
||||
}
|
||||
}
|
||||
|
||||
// 处理OpenAI模式
|
||||
const extraOptions: any = {}
|
||||
extraOptions.endpoint = endpoint
|
||||
if (actualProvider.type === 'openai-response' && !isOpenAIChatCompletionOnlyModel(model)) {
|
||||
extraOptions.mode = 'responses'
|
||||
} else if (aiSdkProviderId === 'openai' || (aiSdkProviderId === 'cherryin' && actualProvider.type === 'openai')) {
|
||||
extraOptions.mode = 'chat'
|
||||
}
|
||||
|
||||
// 添加额外headers
|
||||
if (actualProvider.extra_headers) {
|
||||
extraOptions.headers = actualProvider.extra_headers
|
||||
// copy from openaiBaseClient/openaiResponseApiClient
|
||||
if (aiSdkProviderId === 'openai') {
|
||||
extraOptions.headers = {
|
||||
...extraOptions.headers,
|
||||
'HTTP-Referer': 'https://cherry-ai.com',
|
||||
'X-Title': 'Cherry Studio',
|
||||
'X-Api-Key': baseConfig.apiKey
|
||||
}
|
||||
}
|
||||
}
|
||||
// azure
|
||||
// https://learn.microsoft.com/en-us/azure/ai-foundry/openai/latest
|
||||
// https://learn.microsoft.com/en-us/azure/ai-foundry/openai/how-to/responses?tabs=python-key#responses-api
|
||||
if (aiSdkProviderId === 'azure-responses') {
|
||||
extraOptions.mode = 'responses'
|
||||
} else if (aiSdkProviderId === 'azure') {
|
||||
extraOptions.mode = 'chat'
|
||||
}
|
||||
|
||||
// bedrock
|
||||
if (aiSdkProviderId === 'bedrock') {
|
||||
const authType = getAwsBedrockAuthType()
|
||||
extraOptions.region = getAwsBedrockRegion()
|
||||
|
||||
if (authType === 'apiKey') {
|
||||
extraOptions.apiKey = getAwsBedrockApiKey()
|
||||
} else {
|
||||
extraOptions.accessKeyId = getAwsBedrockAccessKeyId()
|
||||
extraOptions.secretAccessKey = getAwsBedrockSecretAccessKey()
|
||||
}
|
||||
}
|
||||
// google-vertex
|
||||
if (aiSdkProviderId === 'google-vertex' || aiSdkProviderId === 'google-vertex-anthropic') {
|
||||
if (!isVertexAIConfigured()) {
|
||||
throw new Error('VertexAI is not configured. Please configure project, location and service account credentials.')
|
||||
}
|
||||
const { project, location, googleCredentials } = createVertexProvider(actualProvider)
|
||||
extraOptions.project = project
|
||||
extraOptions.location = location
|
||||
extraOptions.googleCredentials = {
|
||||
...googleCredentials,
|
||||
privateKey: formatPrivateKey(googleCredentials.privateKey)
|
||||
}
|
||||
baseConfig.baseURL += aiSdkProviderId === 'google-vertex' ? '/publishers/google' : '/publishers/anthropic/models'
|
||||
}
|
||||
|
||||
// cherryin
|
||||
if (aiSdkProviderId === 'cherryin') {
|
||||
if (model.endpoint_type) {
|
||||
extraOptions.endpointType = model.endpoint_type
|
||||
}
|
||||
}
|
||||
|
||||
if (hasProviderConfig(aiSdkProviderId) && aiSdkProviderId !== 'openai-compatible') {
|
||||
const options = ProviderConfigFactory.fromProvider(aiSdkProviderId, baseConfig, extraOptions)
|
||||
return {
|
||||
providerId: aiSdkProviderId,
|
||||
options
|
||||
}
|
||||
}
|
||||
|
||||
// 否则fallback到openai-compatible
|
||||
const options = ProviderConfigFactory.createOpenAICompatible(baseConfig.baseURL, baseConfig.apiKey)
|
||||
return {
|
||||
providerId: 'openai-compatible',
|
||||
options: {
|
||||
...options,
|
||||
name: actualProvider.id,
|
||||
...extraOptions,
|
||||
includeUsage: true
|
||||
}
|
||||
}
|
||||
const context = createRendererSdkContext(model)
|
||||
return sharedProviderToAiSdkConfig(actualProvider, model.id, context) as AiSdkConfig
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -287,13 +164,13 @@ export async function prepareSpecialProviderConfig(
|
||||
break
|
||||
}
|
||||
case 'cherryai': {
|
||||
config.options.fetch = async (url, options) => {
|
||||
config.options.fetch = async (url: RequestInfo | URL, options: RequestInit) => {
|
||||
// 在这里对最终参数进行签名
|
||||
const signature = await window.api.cherryai.generateSignature({
|
||||
method: 'POST',
|
||||
path: '/chat/completions',
|
||||
query: '',
|
||||
body: JSON.parse(options.body)
|
||||
body: JSON.parse(options.body as string)
|
||||
})
|
||||
return fetch(url, {
|
||||
...options,
|
||||
|
||||
@@ -1,113 +1,13 @@
|
||||
import { type ProviderConfig, registerMultipleProviderConfigs } from '@cherrystudio/ai-core/provider'
|
||||
import { loggerService } from '@logger'
|
||||
import { initializeSharedProviders, SHARED_PROVIDER_CONFIGS } from '@shared/provider'
|
||||
|
||||
const logger = loggerService.withContext('ProviderConfigs')
|
||||
|
||||
/**
|
||||
* 新Provider配置定义
|
||||
* 定义了需要动态注册的AI Providers
|
||||
*/
|
||||
export const NEW_PROVIDER_CONFIGS: ProviderConfig[] = [
|
||||
{
|
||||
id: 'openrouter',
|
||||
name: 'OpenRouter',
|
||||
import: () => import('@openrouter/ai-sdk-provider'),
|
||||
creatorFunctionName: 'createOpenRouter',
|
||||
supportsImageGeneration: true,
|
||||
aliases: ['openrouter']
|
||||
},
|
||||
{
|
||||
id: 'google-vertex',
|
||||
name: 'Google Vertex AI',
|
||||
import: () => import('@ai-sdk/google-vertex/edge'),
|
||||
creatorFunctionName: 'createVertex',
|
||||
supportsImageGeneration: true,
|
||||
aliases: ['vertexai']
|
||||
},
|
||||
{
|
||||
id: 'google-vertex-anthropic',
|
||||
name: 'Google Vertex AI Anthropic',
|
||||
import: () => import('@ai-sdk/google-vertex/anthropic/edge'),
|
||||
creatorFunctionName: 'createVertexAnthropic',
|
||||
supportsImageGeneration: true,
|
||||
aliases: ['vertexai-anthropic']
|
||||
},
|
||||
{
|
||||
id: 'azure-anthropic',
|
||||
name: 'Azure AI Anthropic',
|
||||
import: () => import('@ai-sdk/anthropic'),
|
||||
creatorFunctionName: 'createAnthropic',
|
||||
supportsImageGeneration: false,
|
||||
aliases: ['azure-anthropic']
|
||||
},
|
||||
{
|
||||
id: 'github-copilot-openai-compatible',
|
||||
name: 'GitHub Copilot OpenAI Compatible',
|
||||
import: () => import('@opeoginni/github-copilot-openai-compatible'),
|
||||
creatorFunctionName: 'createGitHubCopilotOpenAICompatible',
|
||||
supportsImageGeneration: false,
|
||||
aliases: ['copilot', 'github-copilot']
|
||||
},
|
||||
{
|
||||
id: 'bedrock',
|
||||
name: 'Amazon Bedrock',
|
||||
import: () => import('@ai-sdk/amazon-bedrock'),
|
||||
creatorFunctionName: 'createAmazonBedrock',
|
||||
supportsImageGeneration: true,
|
||||
aliases: ['aws-bedrock']
|
||||
},
|
||||
{
|
||||
id: 'perplexity',
|
||||
name: 'Perplexity',
|
||||
import: () => import('@ai-sdk/perplexity'),
|
||||
creatorFunctionName: 'createPerplexity',
|
||||
supportsImageGeneration: false,
|
||||
aliases: ['perplexity']
|
||||
},
|
||||
{
|
||||
id: 'mistral',
|
||||
name: 'Mistral',
|
||||
import: () => import('@ai-sdk/mistral'),
|
||||
creatorFunctionName: 'createMistral',
|
||||
supportsImageGeneration: false,
|
||||
aliases: ['mistral']
|
||||
},
|
||||
{
|
||||
id: 'huggingface',
|
||||
name: 'HuggingFace',
|
||||
import: () => import('@ai-sdk/huggingface'),
|
||||
creatorFunctionName: 'createHuggingFace',
|
||||
supportsImageGeneration: true,
|
||||
aliases: ['hf', 'hugging-face']
|
||||
},
|
||||
{
|
||||
id: 'ai-gateway',
|
||||
name: 'AI Gateway',
|
||||
import: () => import('@ai-sdk/gateway'),
|
||||
creatorFunctionName: 'createGateway',
|
||||
supportsImageGeneration: true,
|
||||
aliases: ['gateway']
|
||||
},
|
||||
{
|
||||
id: 'cerebras',
|
||||
name: 'Cerebras',
|
||||
import: () => import('@ai-sdk/cerebras'),
|
||||
creatorFunctionName: 'createCerebras',
|
||||
supportsImageGeneration: false
|
||||
}
|
||||
] as const
|
||||
export const NEW_PROVIDER_CONFIGS = SHARED_PROVIDER_CONFIGS
|
||||
|
||||
/**
|
||||
* 初始化新的Providers
|
||||
* 使用aiCore的动态注册功能
|
||||
*/
|
||||
export async function initializeNewProviders(): Promise<void> {
|
||||
try {
|
||||
const successCount = registerMultipleProviderConfigs(NEW_PROVIDER_CONFIGS)
|
||||
if (successCount < NEW_PROVIDER_CONFIGS.length) {
|
||||
logger.warn('Some providers failed to register. Check previous error logs.')
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Failed to initialize new providers:', error as Error)
|
||||
}
|
||||
initializeSharedProviders({
|
||||
warn: (message) => logger.warn(message),
|
||||
error: (message, error) => logger.error(message, error)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -245,8 +245,8 @@ export class AiSdkSpanAdapter {
|
||||
'gen_ai.usage.output_tokens'
|
||||
]
|
||||
|
||||
const completionTokens = attributes[inputsTokenKeys.find((key) => attributes[key]) || '']
|
||||
const promptTokens = attributes[outputTokenKeys.find((key) => attributes[key]) || '']
|
||||
const promptTokens = attributes[inputsTokenKeys.find((key) => attributes[key]) || '']
|
||||
const completionTokens = attributes[outputTokenKeys.find((key) => attributes[key]) || '']
|
||||
|
||||
if (completionTokens !== undefined || promptTokens !== undefined) {
|
||||
const usage: TokenUsage = {
|
||||
|
||||
@@ -0,0 +1,53 @@
|
||||
import type { Span } from '@opentelemetry/api'
|
||||
import { SpanKind, SpanStatusCode } from '@opentelemetry/api'
|
||||
import { describe, expect, it, vi } from 'vitest'
|
||||
|
||||
import { AiSdkSpanAdapter } from '../AiSdkSpanAdapter'
|
||||
|
||||
vi.mock('@logger', () => ({
|
||||
loggerService: {
|
||||
withContext: () => ({
|
||||
debug: vi.fn(),
|
||||
error: vi.fn(),
|
||||
info: vi.fn(),
|
||||
warn: vi.fn()
|
||||
})
|
||||
}
|
||||
}))
|
||||
|
||||
describe('AiSdkSpanAdapter', () => {
|
||||
const createMockSpan = (attributes: Record<string, unknown>): Span => {
|
||||
const span = {
|
||||
spanContext: () => ({
|
||||
traceId: 'trace-id',
|
||||
spanId: 'span-id'
|
||||
}),
|
||||
_attributes: attributes,
|
||||
_events: [],
|
||||
name: 'test span',
|
||||
status: { code: SpanStatusCode.OK },
|
||||
kind: SpanKind.CLIENT,
|
||||
startTime: [0, 0] as [number, number],
|
||||
endTime: [0, 1] as [number, number],
|
||||
ended: true,
|
||||
parentSpanId: '',
|
||||
links: []
|
||||
}
|
||||
return span as unknown as Span
|
||||
}
|
||||
|
||||
it('maps prompt and completion usage tokens to the correct fields', () => {
|
||||
const attributes = {
|
||||
'ai.usage.promptTokens': 321,
|
||||
'ai.usage.completionTokens': 654
|
||||
}
|
||||
|
||||
const span = createMockSpan(attributes)
|
||||
const result = AiSdkSpanAdapter.convertToSpanEntity({ span })
|
||||
|
||||
expect(result.usage).toBeDefined()
|
||||
expect(result.usage?.prompt_tokens).toBe(321)
|
||||
expect(result.usage?.completion_tokens).toBe(654)
|
||||
expect(result.usage?.total_tokens).toBe(975)
|
||||
})
|
||||
})
|
||||
@@ -144,7 +144,7 @@ describe('reasoning utils', () => {
|
||||
expect(result).toEqual({})
|
||||
})
|
||||
|
||||
it('should disable reasoning for OpenRouter when no reasoning effort set', async () => {
|
||||
it('should not override reasoning for OpenRouter when reasoning effort undefined', async () => {
|
||||
const { isReasoningModel } = await import('@renderer/config/models')
|
||||
|
||||
vi.mocked(isReasoningModel).mockReturnValue(true)
|
||||
@@ -161,6 +161,29 @@ describe('reasoning utils', () => {
|
||||
settings: {}
|
||||
} as Assistant
|
||||
|
||||
const result = getReasoningEffort(assistant, model)
|
||||
expect(result).toEqual({})
|
||||
})
|
||||
|
||||
it('should disable reasoning for OpenRouter when reasoning effort explicitly none', async () => {
|
||||
const { isReasoningModel } = await import('@renderer/config/models')
|
||||
|
||||
vi.mocked(isReasoningModel).mockReturnValue(true)
|
||||
|
||||
const model: Model = {
|
||||
id: 'anthropic/claude-sonnet-4',
|
||||
name: 'Claude Sonnet 4',
|
||||
provider: SystemProviderIds.openrouter
|
||||
} as Model
|
||||
|
||||
const assistant: Assistant = {
|
||||
id: 'test',
|
||||
name: 'Test',
|
||||
settings: {
|
||||
reasoning_effort: 'none'
|
||||
}
|
||||
} as Assistant
|
||||
|
||||
const result = getReasoningEffort(assistant, model)
|
||||
expect(result).toEqual({ reasoning: { enabled: false, exclude: true } })
|
||||
})
|
||||
@@ -269,7 +292,9 @@ describe('reasoning utils', () => {
|
||||
const assistant: Assistant = {
|
||||
id: 'test',
|
||||
name: 'Test',
|
||||
settings: {}
|
||||
settings: {
|
||||
reasoning_effort: 'none'
|
||||
}
|
||||
} as Assistant
|
||||
|
||||
const result = getReasoningEffort(assistant, model)
|
||||
|
||||
@@ -16,10 +16,8 @@ import {
|
||||
isGPT5SeriesModel,
|
||||
isGPT51SeriesModel,
|
||||
isGrok4FastReasoningModel,
|
||||
isGrokReasoningModel,
|
||||
isOpenAIDeepResearchModel,
|
||||
isOpenAIModel,
|
||||
isOpenAIReasoningModel,
|
||||
isQwenAlwaysThinkModel,
|
||||
isQwenReasoningModel,
|
||||
isReasoningModel,
|
||||
@@ -64,30 +62,22 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
}
|
||||
const reasoningEffort = assistant?.settings?.reasoning_effort
|
||||
|
||||
// Handle undefined and 'none' reasoningEffort.
|
||||
// TODO: They should be separated.
|
||||
if (!reasoningEffort || reasoningEffort === 'none') {
|
||||
// reasoningEffort is not set, no extra reasoning setting
|
||||
// Generally, for every model which supports reasoning control, the reasoning effort won't be undefined.
|
||||
// It's for some reasoning models that don't support reasoning control, such as deepseek reasoner.
|
||||
if (!reasoningEffort) {
|
||||
return {}
|
||||
}
|
||||
|
||||
// Handle 'none' reasoningEffort. It's explicitly off.
|
||||
if (reasoningEffort === 'none') {
|
||||
// openrouter: use reasoning
|
||||
if (model.provider === SystemProviderIds.openrouter) {
|
||||
// Don't disable reasoning for Gemini models that support thinking tokens
|
||||
if (isSupportedThinkingTokenGeminiModel(model) && !GEMINI_FLASH_MODEL_REGEX.test(model.id)) {
|
||||
return {}
|
||||
}
|
||||
// 'none' is not an available value for effort for now.
|
||||
// I think they should resolve this issue soon, so I'll just go ahead and use this value.
|
||||
if (isGPT51SeriesModel(model) && reasoningEffort === 'none') {
|
||||
return { reasoning: { effort: 'none' } }
|
||||
}
|
||||
// Don't disable reasoning for models that require it
|
||||
if (
|
||||
isGrokReasoningModel(model) ||
|
||||
isOpenAIReasoningModel(model) ||
|
||||
isQwenAlwaysThinkModel(model) ||
|
||||
model.id.includes('seed-oss') ||
|
||||
model.id.includes('minimax-m2')
|
||||
) {
|
||||
return {}
|
||||
}
|
||||
return { reasoning: { enabled: false, exclude: true } }
|
||||
}
|
||||
|
||||
@@ -101,11 +91,6 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
return { enable_thinking: false }
|
||||
}
|
||||
|
||||
// claude
|
||||
if (isSupportedThinkingTokenClaudeModel(model)) {
|
||||
return {}
|
||||
}
|
||||
|
||||
// gemini
|
||||
if (isSupportedThinkingTokenGeminiModel(model)) {
|
||||
if (GEMINI_FLASH_MODEL_REGEX.test(model.id)) {
|
||||
@@ -118,8 +103,10 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
logger.warn(`Model ${model.id} cannot disable reasoning. Fallback to empty reasoning param.`)
|
||||
return {}
|
||||
}
|
||||
return {}
|
||||
}
|
||||
|
||||
// use thinking, doubao, zhipu, etc.
|
||||
@@ -139,6 +126,7 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
}
|
||||
}
|
||||
|
||||
logger.warn(`Model ${model.id} doesn't match any disable reasoning behavior. Fallback to empty reasoning param.`)
|
||||
return {}
|
||||
}
|
||||
|
||||
@@ -293,6 +281,7 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
}
|
||||
|
||||
// OpenRouter models, use reasoning
|
||||
// FIXME: duplicated openrouter handling. remove one
|
||||
if (model.provider === SystemProviderIds.openrouter) {
|
||||
if (isSupportedReasoningEffortModel(model) || isSupportedThinkingTokenModel(model)) {
|
||||
return {
|
||||
|
||||
@@ -215,6 +215,10 @@
|
||||
border-top: none !important;
|
||||
}
|
||||
|
||||
.ant-collapse-header-text {
|
||||
overflow-x: hidden;
|
||||
}
|
||||
|
||||
.ant-slider .ant-slider-handle::after {
|
||||
box-shadow: 0 1px 4px 0px rgb(128 128 128 / 50%) !important;
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import {
|
||||
} from '@ant-design/icons'
|
||||
import { loggerService } from '@logger'
|
||||
import { download } from '@renderer/utils/download'
|
||||
import { convertImageToPng } from '@renderer/utils/image'
|
||||
import type { ImageProps as AntImageProps } from 'antd'
|
||||
import { Dropdown, Image as AntImage, Space } from 'antd'
|
||||
import { Base64 } from 'js-base64'
|
||||
@@ -33,39 +34,38 @@ const ImageViewer: React.FC<ImageViewerProps> = ({ src, style, ...props }) => {
|
||||
// 复制图片到剪贴板
|
||||
const handleCopyImage = async (src: string) => {
|
||||
try {
|
||||
let blob: Blob
|
||||
|
||||
if (src.startsWith('data:')) {
|
||||
// 处理 base64 格式的图片
|
||||
const match = src.match(/^data:(image\/\w+);base64,(.+)$/)
|
||||
if (!match) throw new Error('Invalid base64 image format')
|
||||
const mimeType = match[1]
|
||||
const byteArray = Base64.toUint8Array(match[2])
|
||||
const blob = new Blob([byteArray], { type: mimeType })
|
||||
await navigator.clipboard.write([new ClipboardItem({ [mimeType]: blob })])
|
||||
blob = new Blob([byteArray], { type: mimeType })
|
||||
} else if (src.startsWith('file://')) {
|
||||
// 处理本地文件路径
|
||||
const bytes = await window.api.fs.read(src)
|
||||
const mimeType = mime.getType(src) || 'application/octet-stream'
|
||||
const blob = new Blob([bytes], { type: mimeType })
|
||||
await navigator.clipboard.write([
|
||||
new ClipboardItem({
|
||||
[mimeType]: blob
|
||||
})
|
||||
])
|
||||
blob = new Blob([bytes], { type: mimeType })
|
||||
} else {
|
||||
// 处理 URL 格式的图片
|
||||
const response = await fetch(src)
|
||||
const blob = await response.blob()
|
||||
|
||||
await navigator.clipboard.write([
|
||||
new ClipboardItem({
|
||||
[blob.type]: blob
|
||||
})
|
||||
])
|
||||
blob = await response.blob()
|
||||
}
|
||||
|
||||
// 统一转换为 PNG 以确保兼容性(剪贴板 API 不支持 JPEG)
|
||||
const pngBlob = await convertImageToPng(blob)
|
||||
|
||||
const item = new ClipboardItem({
|
||||
'image/png': pngBlob
|
||||
})
|
||||
await navigator.clipboard.write([item])
|
||||
|
||||
window.toast.success(t('message.copy.success'))
|
||||
} catch (error) {
|
||||
logger.error('Failed to copy image:', error as Error)
|
||||
const err = error as Error
|
||||
logger.error(`Failed to copy image: ${err.message}`, { stack: err.stack })
|
||||
window.toast.error(t('message.copy.failed'))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ import {
|
||||
isSupportVerbosityModel
|
||||
} from '../openai'
|
||||
import { isQwenMTModel } from '../qwen'
|
||||
import { isFunctionCallingModel } from '../tooluse'
|
||||
import {
|
||||
agentModelFilter,
|
||||
getModelSupportedVerbosity,
|
||||
@@ -112,6 +113,7 @@ const textToImageMock = vi.mocked(isTextToImageModel)
|
||||
const generateImageMock = vi.mocked(isGenerateImageModel)
|
||||
const reasoningMock = vi.mocked(isOpenAIReasoningModel)
|
||||
const openAIWebSearchOnlyMock = vi.mocked(isOpenAIWebSearchChatCompletionOnlyModel)
|
||||
const isFunctionCallingModelMock = vi.mocked(isFunctionCallingModel)
|
||||
|
||||
describe('model utils', () => {
|
||||
beforeEach(() => {
|
||||
@@ -120,7 +122,7 @@ describe('model utils', () => {
|
||||
rerankMock.mockReturnValue(false)
|
||||
visionMock.mockReturnValue(true)
|
||||
textToImageMock.mockReturnValue(false)
|
||||
generateImageMock.mockReturnValue(true)
|
||||
generateImageMock.mockReturnValue(false)
|
||||
reasoningMock.mockReturnValue(false)
|
||||
openAIWebSearchOnlyMock.mockReturnValue(false)
|
||||
})
|
||||
@@ -418,6 +420,7 @@ describe('model utils', () => {
|
||||
describe('isGenerateImageModels', () => {
|
||||
it('returns true when all models support image generation', () => {
|
||||
const models = [createModel({ id: 'gpt-4o' }), createModel({ id: 'gpt-4o-mini' })]
|
||||
generateImageMock.mockReturnValue(true)
|
||||
expect(isGenerateImageModels(models)).toBe(true)
|
||||
})
|
||||
|
||||
@@ -456,12 +459,22 @@ describe('model utils', () => {
|
||||
expect(agentModelFilter(createModel({ id: 'rerank' }))).toBe(false)
|
||||
})
|
||||
|
||||
it('filters out non-function-call models', () => {
|
||||
rerankMock.mockReturnValue(false)
|
||||
isFunctionCallingModelMock.mockReturnValueOnce(false)
|
||||
expect(agentModelFilter(createModel({ id: 'DeepSeek R1' }))).toBe(false)
|
||||
})
|
||||
|
||||
it('filters out text-to-image models', () => {
|
||||
rerankMock.mockReturnValue(false)
|
||||
textToImageMock.mockReturnValueOnce(true)
|
||||
expect(agentModelFilter(createModel({ id: 'gpt-image-1' }))).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
textToImageMock.mockReturnValue(false)
|
||||
generateImageMock.mockReturnValueOnce(true)
|
||||
expect(agentModelFilter(createModel({ id: 'dall-e-3' }))).toBe(false)
|
||||
})
|
||||
|
||||
describe('Temperature limits', () => {
|
||||
|
||||
@@ -460,16 +460,19 @@ export const isSupportedThinkingTokenZhipuModel = (model: Model): boolean => {
|
||||
}
|
||||
|
||||
export const isDeepSeekHybridInferenceModel = (model: Model) => {
|
||||
const modelId = getLowerBaseModelName(model.id)
|
||||
// deepseek官方使用chat和reasoner做推理控制,其他provider需要单独判断,id可能会有所差别
|
||||
// openrouter: deepseek/deepseek-chat-v3.1 不知道会不会有其他provider仿照ds官方分出一个同id的作为非思考模式的模型,这里有风险
|
||||
// Matches: "deepseek-v3" followed by ".digit" or "-digit".
|
||||
// Optionally, this can be followed by ".alphanumeric_sequence" or "-alphanumeric_sequence"
|
||||
// until the end of the string.
|
||||
// Examples: deepseek-v3.1, deepseek-v3-1, deepseek-v3.1.2, deepseek-v3.1-alpha
|
||||
// Does NOT match: deepseek-v3.123 (missing separator after '1'), deepseek-v3.x (x isn't a digit)
|
||||
// TODO: move to utils and add test cases
|
||||
return /deepseek-v3(?:\.\d|-\d)(?:(\.|-)\w+)?$/.test(modelId) || modelId.includes('deepseek-chat-v3.1')
|
||||
const { idResult, nameResult } = withModelIdAndNameAsId(model, (model) => {
|
||||
const modelId = getLowerBaseModelName(model.id)
|
||||
// deepseek官方使用chat和reasoner做推理控制,其他provider需要单独判断,id可能会有所差别
|
||||
// openrouter: deepseek/deepseek-chat-v3.1 不知道会不会有其他provider仿照ds官方分出一个同id的作为非思考模式的模型,这里有风险
|
||||
// Matches: "deepseek-v3" followed by ".digit" or "-digit".
|
||||
// Optionally, this can be followed by ".alphanumeric_sequence" or "-alphanumeric_sequence"
|
||||
// until the end of the string.
|
||||
// Examples: deepseek-v3.1, deepseek-v3-1, deepseek-v3.1.2, deepseek-v3.1-alpha
|
||||
// Does NOT match: deepseek-v3.123 (missing separator after '1'), deepseek-v3.x (x isn't a digit)
|
||||
// TODO: move to utils and add test cases
|
||||
return /deepseek-v3(?:\.\d|-\d)(?:(\.|-)\w+)?$/.test(modelId) || modelId.includes('deepseek-chat-v3.1')
|
||||
})
|
||||
return idResult || nameResult
|
||||
}
|
||||
|
||||
export const isLingReasoningModel = (model?: Model): boolean => {
|
||||
@@ -523,7 +526,6 @@ export function isReasoningModel(model?: Model): boolean {
|
||||
REASONING_REGEX.test(model.name) ||
|
||||
isSupportedThinkingTokenDoubaoModel(model) ||
|
||||
isDeepSeekHybridInferenceModel(model) ||
|
||||
isDeepSeekHybridInferenceModel({ ...model, id: model.name }) ||
|
||||
false
|
||||
)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import { getProviderByModel } from '@renderer/services/AssistantService'
|
||||
import type { Model } from '@renderer/types'
|
||||
import { isSystemProviderId } from '@renderer/types'
|
||||
import { getLowerBaseModelName, isUserSelectedModelType } from '@renderer/utils'
|
||||
import { isAzureOpenAIProvider } from '@shared/provider'
|
||||
|
||||
import { isEmbeddingModel, isRerankModel } from './embedding'
|
||||
import { isDeepSeekHybridInferenceModel } from './reasoning'
|
||||
@@ -52,6 +54,13 @@ export const FUNCTION_CALLING_REGEX = new RegExp(
|
||||
'i'
|
||||
)
|
||||
|
||||
const AZURE_FUNCTION_CALLING_EXCLUDED_MODELS = [
|
||||
'(?:Meta-)?Llama-3(?:\\.\\d+)?-[\\w-]+',
|
||||
'Phi-[34](?:\\.[\\w-]+)?(?:-[\\w-]+)?',
|
||||
'DeepSeek-(?:R1|V3)',
|
||||
'Codestral-2501'
|
||||
]
|
||||
|
||||
export function isFunctionCallingModel(model?: Model): boolean {
|
||||
if (!model || isEmbeddingModel(model) || isRerankModel(model) || isTextToImageModel(model)) {
|
||||
return false
|
||||
@@ -67,6 +76,15 @@ export function isFunctionCallingModel(model?: Model): boolean {
|
||||
return FUNCTION_CALLING_REGEX.test(modelId) || FUNCTION_CALLING_REGEX.test(model.name)
|
||||
}
|
||||
|
||||
const provider = getProviderByModel(model)
|
||||
|
||||
if (isAzureOpenAIProvider(provider)) {
|
||||
const azureExcludedRegex = new RegExp(`\\b(?:${AZURE_FUNCTION_CALLING_EXCLUDED_MODELS.join('|')})\\b`, 'i')
|
||||
if (azureExcludedRegex.test(modelId)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if (['deepseek', 'anthropic', 'kimi', 'moonshot'].includes(model.provider)) {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import type OpenAI from '@cherrystudio/openai'
|
||||
import { isEmbeddingModel, isRerankModel } from '@renderer/config/models/embedding'
|
||||
import { getProviderByModel } from '@renderer/services/AssistantService'
|
||||
import { type Model, SystemProviderIds } from '@renderer/types'
|
||||
import type { OpenAIVerbosity, ValidOpenAIVerbosity } from '@renderer/types/aiCoreTypes'
|
||||
import { getLowerBaseModelName } from '@renderer/utils'
|
||||
@@ -13,6 +14,7 @@ import {
|
||||
isOpenAIReasoningModel
|
||||
} from './openai'
|
||||
import { isQwenMTModel } from './qwen'
|
||||
import { isFunctionCallingModel } from './tooluse'
|
||||
import { isGenerateImageModel, isTextToImageModel, isVisionModel } from './vision'
|
||||
export const NOT_SUPPORTED_REGEX = /(?:^tts|whisper|speech)/i
|
||||
export const GEMINI_FLASH_MODEL_REGEX = new RegExp('gemini.*-flash.*$', 'i')
|
||||
@@ -181,8 +183,21 @@ export const isGeminiModel = (model: Model) => {
|
||||
// zhipu 视觉推理模型用这组 special token 标记推理结果
|
||||
export const ZHIPU_RESULT_TOKENS = ['<|begin_of_box|>', '<|end_of_box|>'] as const
|
||||
|
||||
// TODO: 支持提示词模式的工具调用
|
||||
export const agentModelFilter = (model: Model): boolean => {
|
||||
return !isEmbeddingModel(model) && !isRerankModel(model) && !isTextToImageModel(model)
|
||||
const provider = getProviderByModel(model)
|
||||
|
||||
// 需要适配,且容易超出限额
|
||||
if (provider.id === SystemProviderIds.copilot) {
|
||||
return false
|
||||
}
|
||||
return (
|
||||
!isEmbeddingModel(model) &&
|
||||
!isRerankModel(model) &&
|
||||
!isTextToImageModel(model) &&
|
||||
!isGenerateImageModel(model) &&
|
||||
isFunctionCallingModel(model)
|
||||
)
|
||||
}
|
||||
|
||||
export const isMaxTemperatureOneModel = (model: Model): boolean => {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { throttle } from 'lodash'
|
||||
import { useEffect, useRef } from 'react'
|
||||
import { useEffect, useMemo, useRef } from 'react'
|
||||
|
||||
import { useTimer } from './useTimer'
|
||||
|
||||
@@ -12,13 +12,18 @@ import { useTimer } from './useTimer'
|
||||
*/
|
||||
export default function useScrollPosition(key: string, throttleWait?: number) {
|
||||
const containerRef = useRef<HTMLDivElement>(null)
|
||||
const scrollKey = `scroll:${key}`
|
||||
const scrollKey = useMemo(() => `scroll:${key}`, [key])
|
||||
const scrollKeyRef = useRef(scrollKey)
|
||||
const { setTimeoutTimer } = useTimer()
|
||||
|
||||
useEffect(() => {
|
||||
scrollKeyRef.current = scrollKey
|
||||
}, [scrollKey])
|
||||
|
||||
const handleScroll = throttle(() => {
|
||||
const position = containerRef.current?.scrollTop ?? 0
|
||||
window.requestAnimationFrame(() => {
|
||||
window.keyv.set(scrollKey, position)
|
||||
window.keyv.set(scrollKeyRef.current, position)
|
||||
})
|
||||
}, throttleWait ?? 100)
|
||||
|
||||
@@ -28,5 +33,9 @@ export default function useScrollPosition(key: string, throttleWait?: number) {
|
||||
setTimeoutTimer('scrollEffect', scroll, 50)
|
||||
}, [scrollKey, setTimeoutTimer])
|
||||
|
||||
useEffect(() => {
|
||||
return () => handleScroll.cancel()
|
||||
}, [handleScroll])
|
||||
|
||||
return { containerRef, handleScroll }
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { useEffect, useRef } from 'react'
|
||||
import { useCallback, useEffect, useRef } from 'react'
|
||||
|
||||
/**
|
||||
* 定时器管理 Hook,用于管理 setTimeout 和 setInterval 定时器,支持通过 key 来标识不同的定时器
|
||||
@@ -43,10 +43,38 @@ export const useTimer = () => {
|
||||
const timeoutMapRef = useRef(new Map<string, NodeJS.Timeout>())
|
||||
const intervalMapRef = useRef(new Map<string, NodeJS.Timeout>())
|
||||
|
||||
/**
|
||||
* 清除指定 key 的 setTimeout 定时器
|
||||
* @param key - 定时器标识符
|
||||
*/
|
||||
const clearTimeoutTimer = useCallback((key: string) => {
|
||||
clearTimeout(timeoutMapRef.current.get(key))
|
||||
timeoutMapRef.current.delete(key)
|
||||
}, [])
|
||||
|
||||
/**
|
||||
* 清除指定 key 的 setInterval 定时器
|
||||
* @param key - 定时器标识符
|
||||
*/
|
||||
const clearIntervalTimer = useCallback((key: string) => {
|
||||
clearInterval(intervalMapRef.current.get(key))
|
||||
intervalMapRef.current.delete(key)
|
||||
}, [])
|
||||
|
||||
/**
|
||||
* 清除所有定时器,包括 setTimeout 和 setInterval
|
||||
*/
|
||||
const clearAllTimers = useCallback(() => {
|
||||
timeoutMapRef.current.forEach((timer) => clearTimeout(timer))
|
||||
intervalMapRef.current.forEach((timer) => clearInterval(timer))
|
||||
timeoutMapRef.current.clear()
|
||||
intervalMapRef.current.clear()
|
||||
}, [])
|
||||
|
||||
// 组件卸载时自动清理所有定时器
|
||||
useEffect(() => {
|
||||
return () => clearAllTimers()
|
||||
}, [])
|
||||
}, [clearAllTimers])
|
||||
|
||||
/**
|
||||
* 设置一个 setTimeout 定时器
|
||||
@@ -65,12 +93,15 @@ export const useTimer = () => {
|
||||
* cleanup();
|
||||
* ```
|
||||
*/
|
||||
const setTimeoutTimer = (key: string, ...args: Parameters<typeof setTimeout>) => {
|
||||
clearTimeout(timeoutMapRef.current.get(key))
|
||||
const timer = setTimeout(...args)
|
||||
timeoutMapRef.current.set(key, timer)
|
||||
return () => clearTimeoutTimer(key)
|
||||
}
|
||||
const setTimeoutTimer = useCallback(
|
||||
(key: string, ...args: Parameters<typeof setTimeout>) => {
|
||||
clearTimeout(timeoutMapRef.current.get(key))
|
||||
const timer = setTimeout(...args)
|
||||
timeoutMapRef.current.set(key, timer)
|
||||
return () => clearTimeoutTimer(key)
|
||||
},
|
||||
[clearTimeoutTimer]
|
||||
)
|
||||
|
||||
/**
|
||||
* 设置一个 setInterval 定时器
|
||||
@@ -89,56 +120,31 @@ export const useTimer = () => {
|
||||
* cleanup();
|
||||
* ```
|
||||
*/
|
||||
const setIntervalTimer = (key: string, ...args: Parameters<typeof setInterval>) => {
|
||||
clearInterval(intervalMapRef.current.get(key))
|
||||
const timer = setInterval(...args)
|
||||
intervalMapRef.current.set(key, timer)
|
||||
return () => clearIntervalTimer(key)
|
||||
}
|
||||
|
||||
/**
|
||||
* 清除指定 key 的 setTimeout 定时器
|
||||
* @param key - 定时器标识符
|
||||
*/
|
||||
const clearTimeoutTimer = (key: string) => {
|
||||
clearTimeout(timeoutMapRef.current.get(key))
|
||||
timeoutMapRef.current.delete(key)
|
||||
}
|
||||
|
||||
/**
|
||||
* 清除指定 key 的 setInterval 定时器
|
||||
* @param key - 定时器标识符
|
||||
*/
|
||||
const clearIntervalTimer = (key: string) => {
|
||||
clearInterval(intervalMapRef.current.get(key))
|
||||
intervalMapRef.current.delete(key)
|
||||
}
|
||||
const setIntervalTimer = useCallback(
|
||||
(key: string, ...args: Parameters<typeof setInterval>) => {
|
||||
clearInterval(intervalMapRef.current.get(key))
|
||||
const timer = setInterval(...args)
|
||||
intervalMapRef.current.set(key, timer)
|
||||
return () => clearIntervalTimer(key)
|
||||
},
|
||||
[clearIntervalTimer]
|
||||
)
|
||||
|
||||
/**
|
||||
* 清除所有 setTimeout 定时器
|
||||
*/
|
||||
const clearAllTimeoutTimers = () => {
|
||||
const clearAllTimeoutTimers = useCallback(() => {
|
||||
timeoutMapRef.current.forEach((timer) => clearTimeout(timer))
|
||||
timeoutMapRef.current.clear()
|
||||
}
|
||||
}, [])
|
||||
|
||||
/**
|
||||
* 清除所有 setInterval 定时器
|
||||
*/
|
||||
const clearAllIntervalTimers = () => {
|
||||
const clearAllIntervalTimers = useCallback(() => {
|
||||
intervalMapRef.current.forEach((timer) => clearInterval(timer))
|
||||
intervalMapRef.current.clear()
|
||||
}
|
||||
|
||||
/**
|
||||
* 清除所有定时器,包括 setTimeout 和 setInterval
|
||||
*/
|
||||
const clearAllTimers = () => {
|
||||
timeoutMapRef.current.forEach((timer) => clearTimeout(timer))
|
||||
intervalMapRef.current.forEach((timer) => clearInterval(timer))
|
||||
timeoutMapRef.current.clear()
|
||||
intervalMapRef.current.clear()
|
||||
}
|
||||
}, [])
|
||||
|
||||
return {
|
||||
setTimeoutTimer,
|
||||
|
||||
@@ -280,6 +280,7 @@
|
||||
"denied": "Tool request was denied.",
|
||||
"timeout": "Tool request timed out before receiving approval."
|
||||
},
|
||||
"toolPendingFallback": "Tool",
|
||||
"waiting": "Waiting for tool permission decision..."
|
||||
},
|
||||
"type": {
|
||||
@@ -1208,7 +1209,7 @@
|
||||
"endpoint_type": {
|
||||
"anthropic": "Anthropic",
|
||||
"gemini": "Gemini",
|
||||
"image-generation": "Image Generation",
|
||||
"image-generation": "Image Generation (OpenAI)",
|
||||
"jina-rerank": "Jina Rerank",
|
||||
"openai": "OpenAI",
|
||||
"openai-response": "OpenAI-Response"
|
||||
|
||||
@@ -280,6 +280,7 @@
|
||||
"denied": "工具请求已被拒绝。",
|
||||
"timeout": "工具请求在收到批准前超时。"
|
||||
},
|
||||
"toolPendingFallback": "工具",
|
||||
"waiting": "等待工具权限决定..."
|
||||
},
|
||||
"type": {
|
||||
@@ -1208,7 +1209,7 @@
|
||||
"endpoint_type": {
|
||||
"anthropic": "Anthropic",
|
||||
"gemini": "Gemini",
|
||||
"image-generation": "图片生成",
|
||||
"image-generation": "图像生成 (OpenAI)",
|
||||
"jina-rerank": "Jina 重排序",
|
||||
"openai": "OpenAI",
|
||||
"openai-response": "OpenAI-Response"
|
||||
|
||||
@@ -280,6 +280,7 @@
|
||||
"denied": "工具請求已被拒絕。",
|
||||
"timeout": "工具請求在收到核准前逾時。"
|
||||
},
|
||||
"toolPendingFallback": "工具",
|
||||
"waiting": "等待工具權限決定..."
|
||||
},
|
||||
"type": {
|
||||
@@ -1208,7 +1209,7 @@
|
||||
"endpoint_type": {
|
||||
"anthropic": "Anthropic",
|
||||
"gemini": "Gemini",
|
||||
"image-generation": "圖片生成",
|
||||
"image-generation": "圖像生成 (OpenAI)",
|
||||
"jina-rerank": "Jina Rerank",
|
||||
"openai": "OpenAI",
|
||||
"openai-response": "OpenAI-Response"
|
||||
|
||||
@@ -280,6 +280,7 @@
|
||||
"denied": "Tool-Anfrage wurde abgelehnt.",
|
||||
"timeout": "Tool-Anfrage ist abgelaufen, bevor eine Genehmigung eingegangen ist."
|
||||
},
|
||||
"toolPendingFallback": "Werkzeug",
|
||||
"waiting": "Warten auf Entscheidung über Tool-Berechtigung..."
|
||||
},
|
||||
"type": {
|
||||
@@ -1208,7 +1209,7 @@
|
||||
"endpoint_type": {
|
||||
"anthropic": "Anthropic",
|
||||
"gemini": "Gemini",
|
||||
"image-generation": "Bildgenerierung",
|
||||
"image-generation": "Bilderzeugung (OpenAI)",
|
||||
"jina-rerank": "Jina Reranking",
|
||||
"openai": "OpenAI",
|
||||
"openai-response": "OpenAI-Response"
|
||||
|
||||
@@ -280,6 +280,7 @@
|
||||
"denied": "Το αίτημα για εργαλείο απορρίφθηκε.",
|
||||
"timeout": "Το αίτημα για το εργαλείο έληξε πριν λάβει έγκριση."
|
||||
},
|
||||
"toolPendingFallback": "Εργαλείο",
|
||||
"waiting": "Αναμονή για απόφαση άδειας εργαλείου..."
|
||||
},
|
||||
"type": {
|
||||
@@ -1208,7 +1209,7 @@
|
||||
"endpoint_type": {
|
||||
"anthropic": "Anthropic",
|
||||
"gemini": "Gemini",
|
||||
"image-generation": "Δημιουργία Εικόνας",
|
||||
"image-generation": "Δημιουργία Εικόνων (OpenAI)",
|
||||
"jina-rerank": "Επαναταξινόμηση Jina",
|
||||
"openai": "OpenAI",
|
||||
"openai-response": "Απάντηση OpenAI"
|
||||
|
||||
@@ -280,6 +280,7 @@
|
||||
"denied": "La solicitud de herramienta fue denegada.",
|
||||
"timeout": "La solicitud de herramienta expiró antes de recibir la aprobación."
|
||||
},
|
||||
"toolPendingFallback": "Herramienta",
|
||||
"waiting": "Esperando la decisión de permiso de la herramienta..."
|
||||
},
|
||||
"type": {
|
||||
@@ -1208,7 +1209,7 @@
|
||||
"endpoint_type": {
|
||||
"anthropic": "Anthropic",
|
||||
"gemini": "Gemini",
|
||||
"image-generation": "Generación de imágenes",
|
||||
"image-generation": "Generación de Imágenes (OpenAI)",
|
||||
"jina-rerank": "Reordenamiento Jina",
|
||||
"openai": "OpenAI",
|
||||
"openai-response": "Respuesta de OpenAI"
|
||||
|
||||
@@ -280,6 +280,7 @@
|
||||
"denied": "La demande d'outil a été refusée.",
|
||||
"timeout": "La demande d'outil a expiré avant d'obtenir l'approbation."
|
||||
},
|
||||
"toolPendingFallback": "Outil",
|
||||
"waiting": "En attente de la décision d'autorisation de l'outil..."
|
||||
},
|
||||
"type": {
|
||||
@@ -1208,7 +1209,7 @@
|
||||
"endpoint_type": {
|
||||
"anthropic": "Anthropic",
|
||||
"gemini": "Gemini",
|
||||
"image-generation": "Génération d'images",
|
||||
"image-generation": "Génération d'images (OpenAI)",
|
||||
"jina-rerank": "Reclassement Jina",
|
||||
"openai": "OpenAI",
|
||||
"openai-response": "Réponse OpenAI"
|
||||
|
||||
@@ -280,6 +280,7 @@
|
||||
"denied": "ツールリクエストは拒否されました。",
|
||||
"timeout": "ツールリクエストは承認を受ける前にタイムアウトしました。"
|
||||
},
|
||||
"toolPendingFallback": "ツール",
|
||||
"waiting": "ツールの許可決定を待っています..."
|
||||
},
|
||||
"type": {
|
||||
@@ -1208,7 +1209,7 @@
|
||||
"endpoint_type": {
|
||||
"anthropic": "Anthropic",
|
||||
"gemini": "Gemini",
|
||||
"image-generation": "画像生成",
|
||||
"image-generation": "画像生成 (OpenAI)",
|
||||
"jina-rerank": "Jina Rerank",
|
||||
"openai": "OpenAI",
|
||||
"openai-response": "OpenAI-Response"
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
"error": {
|
||||
"failed": "Falha ao excluir o agente"
|
||||
},
|
||||
"title": "删除代理"
|
||||
"title": "Excluir Agente"
|
||||
},
|
||||
"edit": {
|
||||
"title": "Agent Editor"
|
||||
@@ -111,7 +111,7 @@
|
||||
"label": "Modo de permissão",
|
||||
"options": {
|
||||
"acceptEdits": "Aceitar edições automaticamente",
|
||||
"bypassPermissions": "忽略检查 de permissão",
|
||||
"bypassPermissions": "Ignorar verificações de permissão",
|
||||
"default": "Padrão (perguntar antes de continuar)",
|
||||
"plan": "Modo de planejamento (plano sujeito a aprovação)"
|
||||
},
|
||||
@@ -150,7 +150,7 @@
|
||||
},
|
||||
"success": {
|
||||
"install": "Plugin instalado com sucesso",
|
||||
"uninstall": "插件 desinstalado com sucesso"
|
||||
"uninstall": "Plugin desinstalado com sucesso"
|
||||
},
|
||||
"tab": "plug-in",
|
||||
"type": {
|
||||
@@ -280,6 +280,7 @@
|
||||
"denied": "Solicitação de ferramenta foi negada.",
|
||||
"timeout": "A solicitação da ferramenta expirou antes de receber aprovação."
|
||||
},
|
||||
"toolPendingFallback": "Ferramenta",
|
||||
"waiting": "Aguardando decisão de permissão da ferramenta..."
|
||||
},
|
||||
"type": {
|
||||
@@ -1134,7 +1135,7 @@
|
||||
"duplicate": "Duplicar",
|
||||
"edit": "Editar",
|
||||
"enabled": "Ativado",
|
||||
"error": "错误",
|
||||
"error": "Erro",
|
||||
"errors": {
|
||||
"create_message": "Falha ao criar mensagem",
|
||||
"validation": "Falha na verificação"
|
||||
@@ -1208,7 +1209,7 @@
|
||||
"endpoint_type": {
|
||||
"anthropic": "Anthropic",
|
||||
"gemini": "Gemini",
|
||||
"image-generation": "Geração de Imagem",
|
||||
"image-generation": "Geração de Imagens (OpenAI)",
|
||||
"jina-rerank": "Jina Reordenar",
|
||||
"openai": "OpenAI",
|
||||
"openai-response": "Resposta OpenAI"
|
||||
|
||||
@@ -280,6 +280,7 @@
|
||||
"denied": "Запрос на инструмент был отклонён.",
|
||||
"timeout": "Запрос на инструмент превысил время ожидания до получения подтверждения."
|
||||
},
|
||||
"toolPendingFallback": "Инструмент",
|
||||
"waiting": "Ожидание решения о разрешении на использование инструмента..."
|
||||
},
|
||||
"type": {
|
||||
@@ -1208,7 +1209,7 @@
|
||||
"endpoint_type": {
|
||||
"anthropic": "Anthropic",
|
||||
"gemini": "Gemini",
|
||||
"image-generation": "Изображение",
|
||||
"image-generation": "Генерация изображений (OpenAI)",
|
||||
"jina-rerank": "Jina Rerank",
|
||||
"openai": "OpenAI",
|
||||
"openai-response": "OpenAI-Response"
|
||||
|
||||
@@ -17,7 +17,7 @@ import type { EndpointType, Model } from '@renderer/types'
|
||||
import { getClaudeSupportedProviders } from '@renderer/utils/provider'
|
||||
import type { TerminalConfig } from '@shared/config/constant'
|
||||
import { codeTools, terminalApps } from '@shared/config/constant'
|
||||
import { isSiliconAnthropicCompatibleModel } from '@shared/config/providers'
|
||||
import { isPpioAnthropicCompatibleModel, isSiliconAnthropicCompatibleModel } from '@shared/config/providers'
|
||||
import { Alert, Avatar, Button, Checkbox, Input, Popover, Select, Space, Tooltip } from 'antd'
|
||||
import { ArrowUpRight, Download, FolderOpen, HelpCircle, Terminal, X } from 'lucide-react'
|
||||
import type { FC } from 'react'
|
||||
@@ -82,10 +82,12 @@ const CodeToolsPage: FC = () => {
|
||||
if (m.supported_endpoint_types) {
|
||||
return m.supported_endpoint_types.includes('anthropic')
|
||||
}
|
||||
// Special handling for silicon provider: only specific models support Anthropic API
|
||||
if (m.provider === 'silicon') {
|
||||
return isSiliconAnthropicCompatibleModel(m.id)
|
||||
}
|
||||
if (m.provider === 'ppio') {
|
||||
return isPpioAnthropicCompatibleModel(m.id)
|
||||
}
|
||||
return m.id.includes('claude') || CLAUDE_OFFICIAL_SUPPORTED_PROVIDERS.includes(m.provider)
|
||||
}
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ import { getModel } from '@renderer/hooks/useModel'
|
||||
import { useSettings } from '@renderer/hooks/useSettings'
|
||||
import { useTextareaResize } from '@renderer/hooks/useTextareaResize'
|
||||
import { useTimer } from '@renderer/hooks/useTimer'
|
||||
import { CacheService } from '@renderer/services/CacheService'
|
||||
import { pauseTrace } from '@renderer/services/SpanManagerService'
|
||||
import { estimateUserPromptUsage } from '@renderer/services/TokenService'
|
||||
import { useAppDispatch, useAppSelector } from '@renderer/store'
|
||||
@@ -41,19 +42,10 @@ import { getInputbarConfig } from './registry'
|
||||
import { TopicType } from './types'
|
||||
|
||||
const logger = loggerService.withContext('AgentSessionInputbar')
|
||||
const agentSessionDraftCache = new Map<string, string>()
|
||||
|
||||
const readDraftFromCache = (key: string): string => {
|
||||
return agentSessionDraftCache.get(key) ?? ''
|
||||
}
|
||||
const DRAFT_CACHE_TTL = 24 * 60 * 60 * 1000 // 24 hours
|
||||
|
||||
const writeDraftToCache = (key: string, value: string) => {
|
||||
if (!value) {
|
||||
agentSessionDraftCache.delete(key)
|
||||
} else {
|
||||
agentSessionDraftCache.set(key, value)
|
||||
}
|
||||
}
|
||||
const getAgentDraftCacheKey = (agentId: string) => `agent-session-draft-${agentId}`
|
||||
|
||||
type Props = {
|
||||
agentId: string
|
||||
@@ -170,16 +162,15 @@ const AgentSessionInputbarInner: FC<InnerProps> = ({ assistant, agentId, session
|
||||
const scope = TopicType.Session
|
||||
const config = getInputbarConfig(scope)
|
||||
|
||||
// Use shared hooks for text and textarea management
|
||||
const initialDraft = useMemo(() => readDraftFromCache(agentId), [agentId])
|
||||
const persistDraft = useCallback((next: string) => writeDraftToCache(agentId, next), [agentId])
|
||||
// Use shared hooks for text and textarea management with draft persistence
|
||||
const draftCacheKey = getAgentDraftCacheKey(agentId)
|
||||
const {
|
||||
text,
|
||||
setText,
|
||||
isEmpty: inputEmpty
|
||||
} = useInputText({
|
||||
initialValue: initialDraft,
|
||||
onChange: persistDraft
|
||||
initialValue: CacheService.get<string>(draftCacheKey) ?? '',
|
||||
onChange: (value) => CacheService.set(draftCacheKey, value, DRAFT_CACHE_TTL)
|
||||
})
|
||||
const {
|
||||
textareaRef,
|
||||
@@ -431,6 +422,7 @@ const AgentSessionInputbarInner: FC<InnerProps> = ({ assistant, agentId, session
|
||||
})
|
||||
)
|
||||
|
||||
// Clear text after successful send (draft is cleared automatically via onChange)
|
||||
setText('')
|
||||
setTimeoutTimer('agentSession_sendMessage', () => setText(''), 500)
|
||||
} catch (error) {
|
||||
|
||||
@@ -14,7 +14,6 @@ import { useInputText } from '@renderer/hooks/useInputText'
|
||||
import { useMessageOperations, useTopicLoading } from '@renderer/hooks/useMessageOperations'
|
||||
import { useSettings } from '@renderer/hooks/useSettings'
|
||||
import { useShortcut } from '@renderer/hooks/useShortcuts'
|
||||
import { useSidebarIconShow } from '@renderer/hooks/useSidebarIcon'
|
||||
import { useTextareaResize } from '@renderer/hooks/useTextareaResize'
|
||||
import { useTimer } from '@renderer/hooks/useTimer'
|
||||
import {
|
||||
@@ -24,6 +23,7 @@ import {
|
||||
useInputbarToolsState
|
||||
} from '@renderer/pages/home/Inputbar/context/InputbarToolsProvider'
|
||||
import { getDefaultTopic } from '@renderer/services/AssistantService'
|
||||
import { CacheService } from '@renderer/services/CacheService'
|
||||
import { EVENT_NAMES, EventEmitter } from '@renderer/services/EventService'
|
||||
import FileManager from '@renderer/services/FileManager'
|
||||
import { checkRateLimit, getUserMessage } from '@renderer/services/MessagesService'
|
||||
@@ -39,7 +39,7 @@ import { getSendMessageShortcutLabel } from '@renderer/utils/input'
|
||||
import { documentExts, imageExts, textExts } from '@shared/config/constant'
|
||||
import { debounce } from 'lodash'
|
||||
import type { FC } from 'react'
|
||||
import React, { useCallback, useEffect, useMemo, useRef, useState } from 'react'
|
||||
import React, { useCallback, useEffect, useEffectEvent, useMemo, useRef, useState } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
|
||||
import { InputbarCore } from './components/InputbarCore'
|
||||
@@ -51,6 +51,17 @@ import TokenCount from './TokenCount'
|
||||
|
||||
const logger = loggerService.withContext('Inputbar')
|
||||
|
||||
const INPUTBAR_DRAFT_CACHE_KEY = 'inputbar-draft'
|
||||
const DRAFT_CACHE_TTL = 24 * 60 * 60 * 1000 // 24 hours
|
||||
|
||||
const getMentionedModelsCacheKey = (assistantId: string) => `inputbar-mentioned-models-${assistantId}`
|
||||
|
||||
const getValidatedCachedModels = (assistantId: string): Model[] => {
|
||||
const cached = CacheService.get<Model[]>(getMentionedModelsCacheKey(assistantId))
|
||||
if (!Array.isArray(cached)) return []
|
||||
return cached.filter((model) => model?.id && model?.name)
|
||||
}
|
||||
|
||||
interface Props {
|
||||
assistant: Assistant
|
||||
setActiveTopic: (topic: Topic) => void
|
||||
@@ -80,16 +91,18 @@ const Inputbar: FC<Props> = ({ assistant: initialAssistant, setActiveTopic, topi
|
||||
toggleExpanded: () => {}
|
||||
})
|
||||
|
||||
const [initialMentionedModels] = useState(() => getValidatedCachedModels(initialAssistant.id))
|
||||
|
||||
const initialState = useMemo(
|
||||
() => ({
|
||||
files: [] as FileType[],
|
||||
mentionedModels: [] as Model[],
|
||||
mentionedModels: initialMentionedModels,
|
||||
selectedKnowledgeBases: initialAssistant.knowledge_bases ?? [],
|
||||
isExpanded: false,
|
||||
couldAddImageFile: false,
|
||||
extensions: [] as string[]
|
||||
}),
|
||||
[initialAssistant.knowledge_bases]
|
||||
[initialMentionedModels, initialAssistant.knowledge_bases]
|
||||
)
|
||||
|
||||
return (
|
||||
@@ -121,7 +134,10 @@ const InputbarInner: FC<InputbarInnerProps> = ({ assistant: initialAssistant, se
|
||||
const { setFiles, setMentionedModels, setSelectedKnowledgeBases } = useInputbarToolsDispatch()
|
||||
const { setCouldAddImageFile } = useInputbarToolsInternalDispatch()
|
||||
|
||||
const { text, setText } = useInputText()
|
||||
const { text, setText } = useInputText({
|
||||
initialValue: CacheService.get<string>(INPUTBAR_DRAFT_CACHE_KEY) ?? '',
|
||||
onChange: (value) => CacheService.set(INPUTBAR_DRAFT_CACHE_KEY, value, DRAFT_CACHE_TTL)
|
||||
})
|
||||
const {
|
||||
textareaRef,
|
||||
resize: resizeTextArea,
|
||||
@@ -133,7 +149,6 @@ const InputbarInner: FC<InputbarInnerProps> = ({ assistant: initialAssistant, se
|
||||
minHeight: 30
|
||||
})
|
||||
|
||||
const showKnowledgeIcon = useSidebarIconShow('knowledge')
|
||||
const { assistant, addTopic, model, setModel, updateAssistant } = useAssistant(initialAssistant.id)
|
||||
const { sendMessageShortcut, showInputEstimatedTokens, enableQuickPanelTriggers } = useSettings()
|
||||
const [estimateTokenCount, setEstimateTokenCount] = useState(0)
|
||||
@@ -190,6 +205,15 @@ const InputbarInner: FC<InputbarInnerProps> = ({ assistant: initialAssistant, se
|
||||
setCouldAddImageFile(canAddImageFile)
|
||||
}, [canAddImageFile, setCouldAddImageFile])
|
||||
|
||||
const onUnmount = useEffectEvent((id: string) => {
|
||||
CacheService.set(getMentionedModelsCacheKey(id), mentionedModels, DRAFT_CACHE_TTL)
|
||||
})
|
||||
|
||||
useEffect(() => {
|
||||
return () => onUnmount(assistant.id)
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [assistant.id])
|
||||
|
||||
const placeholderText = enableQuickPanelTriggers
|
||||
? t('chat.input.placeholder', { key: getSendMessageShortcutLabel(sendMessageShortcut) })
|
||||
: t('chat.input.placeholder_without_triggers', {
|
||||
@@ -381,9 +405,10 @@ const InputbarInner: FC<InputbarInnerProps> = ({ assistant: initialAssistant, se
|
||||
focusTextarea
|
||||
])
|
||||
|
||||
// TODO: Just use assistant.knowledge_bases as selectedKnowledgeBases. context state is overdesigned.
|
||||
useEffect(() => {
|
||||
setSelectedKnowledgeBases(showKnowledgeIcon ? (assistant.knowledge_bases ?? []) : [])
|
||||
}, [assistant.knowledge_bases, setSelectedKnowledgeBases, showKnowledgeIcon])
|
||||
setSelectedKnowledgeBases(assistant.knowledge_bases ?? [])
|
||||
}, [assistant.knowledge_bases, setSelectedKnowledgeBases])
|
||||
|
||||
useEffect(() => {
|
||||
// Disable web search if model doesn't support it
|
||||
|
||||
@@ -156,11 +156,8 @@ export const InputbarCore: FC<InputbarCoreProps> = ({
|
||||
|
||||
const setText = useCallback<React.Dispatch<React.SetStateAction<string>>>(
|
||||
(value) => {
|
||||
if (typeof value === 'function') {
|
||||
onTextChange(value(textRef.current))
|
||||
} else {
|
||||
onTextChange(value)
|
||||
}
|
||||
const newText = typeof value === 'function' ? value(textRef.current) : value
|
||||
onTextChange(newText)
|
||||
},
|
||||
[onTextChange]
|
||||
)
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import { useAssistant } from '@renderer/hooks/useAssistant'
|
||||
import { useSidebarIconShow } from '@renderer/hooks/useSidebarIcon'
|
||||
import { defineTool, registerTool, TopicType } from '@renderer/pages/home/Inputbar/types'
|
||||
import type { KnowledgeBase } from '@renderer/types'
|
||||
import { isPromptToolUse, isSupportedToolUse } from '@renderer/utils/mcp-tools'
|
||||
@@ -30,7 +29,6 @@ const knowledgeBaseTool = defineTool({
|
||||
render: function KnowledgeBaseToolRender(context) {
|
||||
const { assistant, state, actions, quickPanel } = context
|
||||
|
||||
const knowledgeSidebarEnabled = useSidebarIconShow('knowledge')
|
||||
const { updateAssistant } = useAssistant(assistant.id)
|
||||
|
||||
const handleSelect = useCallback(
|
||||
@@ -41,10 +39,6 @@ const knowledgeBaseTool = defineTool({
|
||||
[updateAssistant, actions]
|
||||
)
|
||||
|
||||
if (!knowledgeSidebarEnabled) {
|
||||
return null
|
||||
}
|
||||
|
||||
return (
|
||||
<KnowledgeBaseButton
|
||||
quickPanel={quickPanel}
|
||||
|
||||
@@ -102,10 +102,12 @@ const ThinkingBlock: React.FC<Props> = ({ block }) => {
|
||||
)
|
||||
}
|
||||
|
||||
const normalizeThinkingTime = (value?: number) => (typeof value === 'number' && Number.isFinite(value) ? value : 0)
|
||||
|
||||
const ThinkingTimeSeconds = memo(
|
||||
({ blockThinkingTime, isThinking }: { blockThinkingTime: number; isThinking: boolean }) => {
|
||||
const { t } = useTranslation()
|
||||
const [displayTime, setDisplayTime] = useState(blockThinkingTime)
|
||||
const [displayTime, setDisplayTime] = useState(normalizeThinkingTime(blockThinkingTime))
|
||||
|
||||
const timer = useRef<NodeJS.Timeout | null>(null)
|
||||
|
||||
@@ -121,7 +123,7 @@ const ThinkingTimeSeconds = memo(
|
||||
clearInterval(timer.current)
|
||||
timer.current = null
|
||||
}
|
||||
setDisplayTime(blockThinkingTime)
|
||||
setDisplayTime(normalizeThinkingTime(blockThinkingTime))
|
||||
}
|
||||
|
||||
return () => {
|
||||
@@ -132,10 +134,10 @@ const ThinkingTimeSeconds = memo(
|
||||
}
|
||||
}, [isThinking, blockThinkingTime])
|
||||
|
||||
const thinkingTimeSeconds = useMemo(
|
||||
() => ((displayTime < 1000 ? 100 : displayTime) / 1000).toFixed(1),
|
||||
[displayTime]
|
||||
)
|
||||
const thinkingTimeSeconds = useMemo(() => {
|
||||
const safeTime = normalizeThinkingTime(displayTime)
|
||||
return ((safeTime < 1000 ? 100 : safeTime) / 1000).toFixed(1)
|
||||
}, [displayTime])
|
||||
|
||||
return isThinking
|
||||
? t('chat.thinking', {
|
||||
|
||||
@@ -255,6 +255,20 @@ describe('ThinkingBlock', () => {
|
||||
unmount()
|
||||
})
|
||||
})
|
||||
|
||||
it('should clamp invalid thinking times to a safe default', () => {
|
||||
const testCases = [undefined, Number.NaN, Number.POSITIVE_INFINITY]
|
||||
|
||||
testCases.forEach((thinking_millsec) => {
|
||||
const block = createThinkingBlock({
|
||||
thinking_millsec: thinking_millsec as any,
|
||||
status: MessageBlockStatus.SUCCESS
|
||||
})
|
||||
const { unmount } = renderThinkingBlock(block)
|
||||
expect(getThinkingTimeText()).toHaveTextContent('0.1s')
|
||||
unmount()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('collapse behavior', () => {
|
||||
|
||||
@@ -10,6 +10,7 @@ import { useSettings } from '@renderer/hooks/useSettings'
|
||||
import { useTimer } from '@renderer/hooks/useTimer'
|
||||
import type { RootState } from '@renderer/store'
|
||||
// import { selectCurrentTopicId } from '@renderer/store/newMessage'
|
||||
import { scrollIntoView } from '@renderer/utils/dom'
|
||||
import { Button, Drawer, Tooltip } from 'antd'
|
||||
import type { FC } from 'react'
|
||||
import { useCallback, useEffect, useRef, useState } from 'react'
|
||||
@@ -118,7 +119,8 @@ const ChatNavigation: FC<ChatNavigationProps> = ({ containerId }) => {
|
||||
}
|
||||
|
||||
const scrollToMessage = (element: HTMLElement) => {
|
||||
element.scrollIntoView({ behavior: 'smooth', block: 'start' })
|
||||
// Use container: 'nearest' to keep scroll within the chat pane (Chromium-only, see #11565, #11567)
|
||||
scrollIntoView(element, { behavior: 'smooth', block: 'start', container: 'nearest' })
|
||||
}
|
||||
|
||||
const scrollToTop = () => {
|
||||
|
||||
@@ -15,6 +15,7 @@ import { estimateMessageUsage } from '@renderer/services/TokenService'
|
||||
import type { Assistant, Topic } from '@renderer/types'
|
||||
import type { Message, MessageBlock } from '@renderer/types/newMessage'
|
||||
import { classNames, cn } from '@renderer/utils'
|
||||
import { scrollIntoView } from '@renderer/utils/dom'
|
||||
import { isMessageProcessing } from '@renderer/utils/messageUtils/is'
|
||||
import { Divider } from 'antd'
|
||||
import type { Dispatch, FC, SetStateAction } from 'react'
|
||||
@@ -79,9 +80,10 @@ const MessageItem: FC<Props> = ({
|
||||
|
||||
useEffect(() => {
|
||||
if (isEditing && messageContainerRef.current) {
|
||||
messageContainerRef.current.scrollIntoView({
|
||||
scrollIntoView(messageContainerRef.current, {
|
||||
behavior: 'smooth',
|
||||
block: 'center'
|
||||
block: 'center',
|
||||
container: 'nearest'
|
||||
})
|
||||
}
|
||||
}, [isEditing])
|
||||
@@ -124,7 +126,7 @@ const MessageItem: FC<Props> = ({
|
||||
const messageHighlightHandler = useCallback(
|
||||
(highlight: boolean = true) => {
|
||||
if (messageContainerRef.current) {
|
||||
messageContainerRef.current.scrollIntoView({ behavior: 'smooth' })
|
||||
scrollIntoView(messageContainerRef.current, { behavior: 'smooth', block: 'center', container: 'nearest' })
|
||||
if (highlight) {
|
||||
setTimeoutTimer(
|
||||
'messageHighlightHandler',
|
||||
|
||||
@@ -12,6 +12,7 @@ import { newMessagesActions } from '@renderer/store/newMessage'
|
||||
// import { updateMessageThunk } from '@renderer/store/thunk/messageThunk'
|
||||
import type { Message } from '@renderer/types/newMessage'
|
||||
import { isEmoji, removeLeadingEmoji } from '@renderer/utils'
|
||||
import { scrollIntoView } from '@renderer/utils/dom'
|
||||
import { getMainTextContent } from '@renderer/utils/messageUtils/find'
|
||||
import { Avatar } from 'antd'
|
||||
import { CircleChevronDown } from 'lucide-react'
|
||||
@@ -119,7 +120,7 @@ const MessageAnchorLine: FC<MessageLineProps> = ({ messages }) => {
|
||||
() => {
|
||||
const messageElement = document.getElementById(`message-${message.id}`)
|
||||
if (messageElement) {
|
||||
messageElement.scrollIntoView({ behavior: 'auto', block: 'start' })
|
||||
scrollIntoView(messageElement, { behavior: 'auto', block: 'start', container: 'nearest' })
|
||||
}
|
||||
},
|
||||
100
|
||||
@@ -141,7 +142,7 @@ const MessageAnchorLine: FC<MessageLineProps> = ({ messages }) => {
|
||||
return
|
||||
}
|
||||
|
||||
messageElement.scrollIntoView({ behavior: 'smooth', block: 'start' })
|
||||
scrollIntoView(messageElement, { behavior: 'smooth', block: 'start', container: 'nearest' })
|
||||
},
|
||||
[setSelectedMessage]
|
||||
)
|
||||
|
||||
@@ -10,6 +10,7 @@ import type { MultiModelMessageStyle } from '@renderer/store/settings'
|
||||
import type { Topic } from '@renderer/types'
|
||||
import type { Message } from '@renderer/types/newMessage'
|
||||
import { classNames } from '@renderer/utils'
|
||||
import { scrollIntoView } from '@renderer/utils/dom'
|
||||
import { Popover } from 'antd'
|
||||
import type { ComponentProps } from 'react'
|
||||
import { memo, useCallback, useEffect, useMemo, useState } from 'react'
|
||||
@@ -73,7 +74,7 @@ const MessageGroup = ({ messages, topic, registerMessageElement }: Props) => {
|
||||
() => {
|
||||
const messageElement = document.getElementById(`message-${message.id}`)
|
||||
if (messageElement) {
|
||||
messageElement.scrollIntoView({ behavior: 'smooth', block: 'start' })
|
||||
scrollIntoView(messageElement, { behavior: 'smooth', block: 'start', container: 'nearest' })
|
||||
}
|
||||
},
|
||||
200
|
||||
@@ -132,7 +133,7 @@ const MessageGroup = ({ messages, topic, registerMessageElement }: Props) => {
|
||||
setSelectedMessage(message)
|
||||
} else {
|
||||
// 直接滚动
|
||||
element.scrollIntoView({ behavior: 'smooth', block: 'start' })
|
||||
scrollIntoView(element, { behavior: 'smooth', block: 'start', container: 'nearest' })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ import type { RootState } from '@renderer/store'
|
||||
import { messageBlocksSelectors } from '@renderer/store/messageBlock'
|
||||
import type { Message } from '@renderer/types/newMessage'
|
||||
import { MessageBlockType } from '@renderer/types/newMessage'
|
||||
import { scrollIntoView } from '@renderer/utils/dom'
|
||||
import type { FC } from 'react'
|
||||
import React, { useMemo, useRef } from 'react'
|
||||
import { useSelector } from 'react-redux'
|
||||
@@ -72,10 +73,10 @@ const MessageOutline: FC<MessageOutlineProps> = ({ message }) => {
|
||||
const parent = messageOutlineContainerRef.current?.parentElement
|
||||
const messageContentContainer = parent?.querySelector('.message-content-container')
|
||||
if (messageContentContainer) {
|
||||
const headingElement = messageContentContainer.querySelector(`#${id}`)
|
||||
const headingElement = messageContentContainer.querySelector<HTMLElement>(`#${id}`)
|
||||
if (headingElement) {
|
||||
const scrollBlock = ['horizontal', 'grid'].includes(message.multiModelMessageStyle ?? '') ? 'nearest' : 'start'
|
||||
headingElement.scrollIntoView({ behavior: 'smooth', block: scrollBlock })
|
||||
scrollIntoView(headingElement, { behavior: 'smooth', block: scrollBlock, container: 'nearest' })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,8 +5,6 @@ import { Terminal } from 'lucide-react'
|
||||
import { ToolTitle } from './GenericTools'
|
||||
import type { BashToolInput as BashToolInputType, BashToolOutput as BashToolOutputType } from './types'
|
||||
|
||||
const MAX_TAG_LENGTH = 100
|
||||
|
||||
export function BashTool({
|
||||
input,
|
||||
output
|
||||
@@ -17,12 +15,10 @@ export function BashTool({
|
||||
// 如果有输出,计算输出行数
|
||||
const outputLines = output ? output.split('\n').length : 0
|
||||
|
||||
// 处理命令字符串的截断,添加空值检查
|
||||
// 处理命令字符串,添加空值检查
|
||||
const command = input?.command ?? ''
|
||||
const needsTruncate = command.length > MAX_TAG_LENGTH
|
||||
const displayCommand = needsTruncate ? `${command.slice(0, MAX_TAG_LENGTH)}...` : command
|
||||
|
||||
const tagContent = <Tag className="whitespace-pre-wrap break-all font-mono">{displayCommand}</Tag>
|
||||
const tagContent = <Tag className="!m-0 max-w-full truncate font-mono">{command}</Tag>
|
||||
|
||||
return {
|
||||
key: 'tool',
|
||||
@@ -34,16 +30,12 @@ export function BashTool({
|
||||
params={input?.description}
|
||||
stats={output ? `${outputLines} ${outputLines === 1 ? 'line' : 'lines'}` : undefined}
|
||||
/>
|
||||
<div className="mt-1">
|
||||
{needsTruncate ? (
|
||||
<Popover
|
||||
content={<div className="max-w-xl whitespace-pre-wrap break-all font-mono">{command}</div>}
|
||||
trigger="hover">
|
||||
{tagContent}
|
||||
</Popover>
|
||||
) : (
|
||||
tagContent
|
||||
)}
|
||||
<div className="mt-1 max-w-full">
|
||||
<Popover
|
||||
content={<div className="max-w-xl whitespace-pre-wrap break-all font-mono text-xs">{command}</div>}
|
||||
trigger="hover">
|
||||
{tagContent}
|
||||
</Popover>
|
||||
</div>
|
||||
</>
|
||||
),
|
||||
|
||||
@@ -18,9 +18,9 @@ export function ToolTitle({
|
||||
}) {
|
||||
return (
|
||||
<div className={`flex items-center gap-1 ${className}`}>
|
||||
{icon}
|
||||
{label && <span className="font-medium text-sm">{label}</span>}
|
||||
{params && <span className="flex-shrink-0 text-muted-foreground text-xs">{params}</span>}
|
||||
{icon && <span className="flex flex-shrink-0">{icon}</span>}
|
||||
{label && <span className="flex-shrink-0 font-medium text-sm">{label}</span>}
|
||||
{params && <span className="min-w-0 truncate text-muted-foreground text-xs">{params}</span>}
|
||||
{stats && <span className="flex-shrink-0 text-muted-foreground text-xs">{stats}</span>}
|
||||
</div>
|
||||
)
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
import { loggerService } from '@logger'
|
||||
import { useAppSelector } from '@renderer/store'
|
||||
import { selectPendingPermission } from '@renderer/store/toolPermissions'
|
||||
import type { NormalToolResponse } from '@renderer/types'
|
||||
import type { CollapseProps } from 'antd'
|
||||
import { Collapse } from 'antd'
|
||||
import { Collapse, Spin } from 'antd'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
|
||||
// 导出所有类型
|
||||
export * from './types'
|
||||
@@ -83,17 +86,41 @@ function ToolContent({ toolName, input, output }: { toolName: AgentToolsType; in
|
||||
// 统一的组件渲染入口
|
||||
export function MessageAgentTools({ toolResponse }: { toolResponse: NormalToolResponse }) {
|
||||
const { arguments: args, response, tool, status } = toolResponse
|
||||
logger.info('Rendering agent tool response', {
|
||||
logger.debug('Rendering agent tool response', {
|
||||
tool: tool,
|
||||
arguments: args,
|
||||
status,
|
||||
response
|
||||
})
|
||||
|
||||
const pendingPermission = useAppSelector((state) =>
|
||||
selectPendingPermission(state.toolPermissions, toolResponse.toolCallId)
|
||||
)
|
||||
|
||||
if (status === 'pending') {
|
||||
return <ToolPermissionRequestCard toolResponse={toolResponse} />
|
||||
if (pendingPermission) {
|
||||
return <ToolPermissionRequestCard toolResponse={toolResponse} />
|
||||
}
|
||||
return <ToolPendingIndicator toolName={tool?.name} description={tool?.description} />
|
||||
}
|
||||
|
||||
return (
|
||||
<ToolContent toolName={tool.name as AgentToolsType} input={args as ToolInput} output={response as ToolOutput} />
|
||||
)
|
||||
}
|
||||
|
||||
function ToolPendingIndicator({ toolName, description }: { toolName?: string; description?: string }) {
|
||||
const { t } = useTranslation()
|
||||
const label = toolName || t('agent.toolPermission.toolPendingFallback', 'Tool')
|
||||
const detail = description?.trim() || t('agent.toolPermission.executing')
|
||||
|
||||
return (
|
||||
<div className="flex w-full max-w-xl items-center gap-3 rounded-xl border border-default-200 bg-default-100 px-4 py-3 shadow-sm">
|
||||
<Spin size="small" />
|
||||
<div className="flex flex-col gap-1">
|
||||
<span className="font-semibold text-default-700 text-sm">{label}</span>
|
||||
<span className="text-default-500 text-xs">{detail}</span>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -85,7 +85,8 @@ const ANTHROPIC_COMPATIBLE_PROVIDER_IDS = [
|
||||
SystemProviderIds.minimax,
|
||||
SystemProviderIds.silicon,
|
||||
SystemProviderIds.qiniu,
|
||||
SystemProviderIds.dmxapi
|
||||
SystemProviderIds.dmxapi,
|
||||
SystemProviderIds.ppio
|
||||
] as const
|
||||
type AnthropicCompatibleProviderId = (typeof ANTHROPIC_COMPATIBLE_PROVIDER_IDS)[number]
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ import type { FetchChatCompletionParams } from '@renderer/types'
|
||||
import type { Assistant, MCPServer, MCPTool, Model, Provider } from '@renderer/types'
|
||||
import type { StreamTextParams } from '@renderer/types/aiCoreTypes'
|
||||
import { type Chunk, ChunkType } from '@renderer/types/chunk'
|
||||
import type { Message } from '@renderer/types/newMessage'
|
||||
import type { Message, ResponseError } from '@renderer/types/newMessage'
|
||||
import type { SdkModel } from '@renderer/types/sdk'
|
||||
import { removeSpecialCharactersForTopicName, uuid } from '@renderer/utils'
|
||||
import { abortCompletion, readyToAbort } from '@renderer/utils/abortController'
|
||||
@@ -476,7 +476,7 @@ export async function checkApi(provider: Provider, model: Model, timeout = 15000
|
||||
} else {
|
||||
const abortId = uuid()
|
||||
const signal = readyToAbort(abortId)
|
||||
let chunkError
|
||||
let streamError: ResponseError | undefined
|
||||
const params: StreamTextParams = {
|
||||
system: assistant.prompt,
|
||||
prompt: 'hi',
|
||||
@@ -495,19 +495,18 @@ export async function checkApi(provider: Provider, model: Model, timeout = 15000
|
||||
callType: 'check',
|
||||
onChunk: (chunk: Chunk) => {
|
||||
if (chunk.type === ChunkType.ERROR) {
|
||||
chunkError = chunk.error
|
||||
streamError = chunk.error
|
||||
} else {
|
||||
abortCompletion(abortId)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Try streaming check
|
||||
try {
|
||||
await ai.completions(model.id, params, config)
|
||||
} catch (e) {
|
||||
if (!isAbortError(e) && !isAbortError(chunkError)) {
|
||||
throw e
|
||||
if (!isAbortError(e) && !isAbortError(streamError)) {
|
||||
throw streamError ?? e
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -67,7 +67,7 @@ const persistedReducer = persistReducer(
|
||||
{
|
||||
key: 'cherry-studio',
|
||||
storage,
|
||||
version: 179,
|
||||
version: 180,
|
||||
blacklist: ['runtime', 'messages', 'messageBlocks', 'tabs', 'toolPermissions'],
|
||||
migrate
|
||||
},
|
||||
|
||||
@@ -2906,6 +2906,20 @@ const migrateConfig = {
|
||||
logger.error('migrate 179 error', error as Error)
|
||||
return state
|
||||
}
|
||||
},
|
||||
'180': (state: RootState) => {
|
||||
try {
|
||||
state.llm.providers.forEach((provider) => {
|
||||
if (provider.id === SystemProviderIds.ppio) {
|
||||
provider.anthropicApiHost = 'https://api.ppinfra.com/anthropic'
|
||||
}
|
||||
})
|
||||
logger.info('migrate 180 success')
|
||||
return state
|
||||
} catch (error) {
|
||||
logger.error('migrate 180 error', error as Error)
|
||||
return state
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -7,6 +7,8 @@ import type { CSSProperties } from 'react'
|
||||
export * from './file'
|
||||
export * from './note'
|
||||
|
||||
import type { MinimalModel } from '@shared/provider/types'
|
||||
|
||||
import type { StreamTextParams } from './aiCoreTypes'
|
||||
import type { Chunk } from './chunk'
|
||||
import type { FileMetadata } from './file'
|
||||
@@ -239,6 +241,7 @@ export type ModelType = 'text' | 'vision' | 'embedding' | 'reasoning' | 'functio
|
||||
|
||||
export type ModelTag = Exclude<ModelType, 'text'> | 'free'
|
||||
|
||||
// "image-generation" is also openai endpoint, but specifically for image generation.
|
||||
export type EndpointType = 'openai' | 'openai-response' | 'anthropic' | 'gemini' | 'image-generation' | 'jina-rerank'
|
||||
|
||||
export type ModelPricing = {
|
||||
@@ -256,7 +259,7 @@ export type ModelCapability = {
|
||||
isUserSelected?: boolean
|
||||
}
|
||||
|
||||
export type Model = {
|
||||
export type Model = MinimalModel & {
|
||||
id: string
|
||||
provider: string
|
||||
name: string
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user