Compare commits
2 Commits
fix/valida
...
v1.2.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6d46308915 | ||
|
|
55e5a5bf58 |
@@ -85,7 +85,9 @@ afterPack: scripts/after-pack.js
|
||||
afterSign: scripts/notarize.js
|
||||
releaseInfo:
|
||||
releaseNotes: |
|
||||
知识库和服务商界面更新
|
||||
增加 Dangbei 小程序
|
||||
可以强制使用搜索引擎覆盖模型自带搜索能力
|
||||
修复部分公式无法正常渲染问题
|
||||
增加对 grok-3 和 Grok-3-mini 的支持
|
||||
助手支持使用拼音排序
|
||||
网络搜索增加 Baidu, Google, Bing 支持(免费使用)
|
||||
网络搜索增加 uBlacklist 订阅
|
||||
快速面板 (QuickPanel) 进行性能优化
|
||||
解决 mcp 依赖工具下载速度问题
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "CherryStudio",
|
||||
"version": "1.2.1",
|
||||
"version": "1.2.2",
|
||||
"private": true,
|
||||
"description": "A powerful AI assistant for producer.",
|
||||
"main": "./out/main/index.js",
|
||||
|
||||
@@ -15,7 +15,7 @@ import {
|
||||
} from '@ant-design/icons'
|
||||
import { QuickPanelListItem, QuickPanelView, useQuickPanel } from '@renderer/components/QuickPanel'
|
||||
import TranslateButton from '@renderer/components/TranslateButton'
|
||||
import { isGenerateImageModel, isVisionModel, isWebSearchModel } from '@renderer/config/models'
|
||||
import { isFunctionCallingModel, isGenerateImageModel, isVisionModel, isWebSearchModel } from '@renderer/config/models'
|
||||
import db from '@renderer/databases'
|
||||
import { useAssistant } from '@renderer/hooks/useAssistant'
|
||||
import { useKnowledgeBases } from '@renderer/hooks/useKnowledge'
|
||||
@@ -119,7 +119,7 @@ const Inputbar: FC<Props> = ({ assistant: _assistant, setActiveTopic, topic }) =
|
||||
const quickPanel = useQuickPanel()
|
||||
|
||||
const showKnowledgeIcon = useSidebarIconShow('knowledge')
|
||||
// const showMCPToolsIcon = isFunctionCallingModel(model)
|
||||
const showMCPToolsIcon = isFunctionCallingModel(model)
|
||||
|
||||
const [tokenCount, setTokenCount] = useState(0)
|
||||
|
||||
@@ -199,8 +199,10 @@ const Inputbar: FC<Props> = ({ assistant: _assistant, setActiveTopic, topic }) =
|
||||
userMessage.mentions = mentionModels
|
||||
}
|
||||
|
||||
if (!isEmpty(enabledMCPs) && !isEmpty(activedMcpServers)) {
|
||||
userMessage.enabledMCPs = activedMcpServers.filter((server) => enabledMCPs?.some((s) => s.id === server.id))
|
||||
if (isFunctionCallingModel(model)) {
|
||||
if (!isEmpty(enabledMCPs) && !isEmpty(activedMcpServers)) {
|
||||
userMessage.enabledMCPs = activedMcpServers.filter((server) => enabledMCPs?.some((s) => s.id === server.id))
|
||||
}
|
||||
}
|
||||
|
||||
userMessage.usage = await estimateMessageUsage(userMessage)
|
||||
@@ -229,6 +231,7 @@ const Inputbar: FC<Props> = ({ assistant: _assistant, setActiveTopic, topic }) =
|
||||
inputEmpty,
|
||||
loading,
|
||||
mentionModels,
|
||||
model,
|
||||
resizeTextArea,
|
||||
selectedKnowledgeBases,
|
||||
text,
|
||||
@@ -344,16 +347,17 @@ const Inputbar: FC<Props> = ({ assistant: _assistant, setActiveTopic, topic }) =
|
||||
description: '',
|
||||
icon: <FileSearchOutlined />,
|
||||
isMenu: true,
|
||||
disabled: files.length > 0,
|
||||
disabled: !showKnowledgeIcon || files.length > 0,
|
||||
action: () => {
|
||||
knowledgeBaseButtonRef.current?.openQuickPanel()
|
||||
}
|
||||
},
|
||||
{
|
||||
label: t('settings.mcp.title'),
|
||||
description: t('settings.mcp.not_support'),
|
||||
description: showMCPToolsIcon ? '' : t('settings.mcp.not_support'),
|
||||
icon: <CodeOutlined />,
|
||||
isMenu: true,
|
||||
disabled: !showMCPToolsIcon,
|
||||
action: () => {
|
||||
mcpToolsButtonRef.current?.openQuickPanel()
|
||||
}
|
||||
@@ -375,7 +379,7 @@ const Inputbar: FC<Props> = ({ assistant: _assistant, setActiveTopic, topic }) =
|
||||
}
|
||||
}
|
||||
]
|
||||
}, [files.length, model, openSelectFileMenu, t, text, translate])
|
||||
}, [files.length, model, openSelectFileMenu, showKnowledgeIcon, showMCPToolsIcon, t, text, translate])
|
||||
|
||||
const handleKeyDown = (event: React.KeyboardEvent<HTMLTextAreaElement>) => {
|
||||
const isEnterPressed = event.keyCode == 13
|
||||
@@ -955,12 +959,14 @@ const Inputbar: FC<Props> = ({ assistant: _assistant, setActiveTopic, topic }) =
|
||||
disabled={files.length > 0}
|
||||
/>
|
||||
)}
|
||||
<MCPToolsButton
|
||||
ref={mcpToolsButtonRef}
|
||||
enabledMCPs={enabledMCPs}
|
||||
toggelEnableMCP={toggelEnableMCP}
|
||||
ToolbarButton={ToolbarButton}
|
||||
/>
|
||||
{showMCPToolsIcon && (
|
||||
<MCPToolsButton
|
||||
ref={mcpToolsButtonRef}
|
||||
enabledMCPs={enabledMCPs}
|
||||
toggelEnableMCP={toggelEnableMCP}
|
||||
ToolbarButton={ToolbarButton}
|
||||
/>
|
||||
)}
|
||||
<GenerateImageButton
|
||||
model={model}
|
||||
assistant={assistant}
|
||||
|
||||
@@ -197,7 +197,7 @@ const MessageContent: React.FC<Props> = ({ message: _message, model }) => {
|
||||
const content = `[@${model.name}](#) ${getBriefInfo(message.content)}`
|
||||
return <Markdown message={{ ...message, content }} />
|
||||
}
|
||||
const toolUseRegex = /<tool_use>([\s\S]*?)<\/tool_use>/g
|
||||
|
||||
return (
|
||||
<Fragment>
|
||||
<Flex gap="8px" wrap style={{ marginBottom: 10 }}>
|
||||
@@ -205,7 +205,7 @@ const MessageContent: React.FC<Props> = ({ message: _message, model }) => {
|
||||
</Flex>
|
||||
<MessageThought message={message} />
|
||||
<MessageTools message={message} />
|
||||
<Markdown message={{ ...message, content: processedContent.replace(toolUseRegex, '') }} />
|
||||
<Markdown message={{ ...message, content: processedContent }} />
|
||||
{message.metadata?.generateImage && <MessageImage message={message} />}
|
||||
{message.translatedContent && (
|
||||
<Fragment>
|
||||
|
||||
@@ -1,5 +1,10 @@
|
||||
import Anthropic from '@anthropic-ai/sdk'
|
||||
import { MessageCreateParamsNonStreaming, MessageParam } from '@anthropic-ai/sdk/resources'
|
||||
import {
|
||||
MessageCreateParamsNonStreaming,
|
||||
MessageParam,
|
||||
ToolResultBlockParam,
|
||||
ToolUseBlock
|
||||
} from '@anthropic-ai/sdk/resources'
|
||||
import { DEFAULT_MAX_TOKENS } from '@renderer/config/constant'
|
||||
import { isReasoningModel } from '@renderer/config/models'
|
||||
import { getStoreSetting } from '@renderer/hooks/useSettings'
|
||||
@@ -12,9 +17,13 @@ import {
|
||||
} from '@renderer/services/MessagesService'
|
||||
import { Assistant, FileTypes, MCPToolResponse, Message, Model, Provider, Suggestion } from '@renderer/types'
|
||||
import { removeSpecialCharactersForTopicName } from '@renderer/utils'
|
||||
import { parseAndCallTools } from '@renderer/utils/mcp-tools'
|
||||
import { buildSystemPrompt } from '@renderer/utils/prompt'
|
||||
import { first, flatten, sum, takeRight } from 'lodash'
|
||||
import {
|
||||
anthropicToolUseToMcpTool,
|
||||
callMCPTool,
|
||||
mcpToolsToAnthropicTools,
|
||||
upsertMCPToolResponse
|
||||
} from '@renderer/utils/mcp-tools'
|
||||
import { first, flatten, isEmpty, sum, takeRight } from 'lodash'
|
||||
import OpenAI from 'openai'
|
||||
|
||||
import { CompletionsParams } from '.'
|
||||
@@ -173,21 +182,16 @@ export default class AnthropicProvider extends BaseProvider {
|
||||
|
||||
const userMessages = flatten(userMessagesParams)
|
||||
const lastUserMessage = _messages.findLast((m) => m.role === 'user')
|
||||
// const tools = mcpTools ? mcpToolsToAnthropicTools(mcpTools) : undefined
|
||||
|
||||
let systemPrompt = assistant.prompt
|
||||
if (mcpTools && mcpTools.length > 0) {
|
||||
systemPrompt = buildSystemPrompt(systemPrompt, mcpTools)
|
||||
}
|
||||
const tools = mcpTools ? mcpToolsToAnthropicTools(mcpTools) : undefined
|
||||
|
||||
const body: MessageCreateParamsNonStreaming = {
|
||||
model: model.id,
|
||||
messages: userMessages,
|
||||
// tools: isEmpty(tools) ? undefined : tools,
|
||||
tools: isEmpty(tools) ? undefined : tools,
|
||||
max_tokens: maxTokens || DEFAULT_MAX_TOKENS,
|
||||
temperature: this.getTemperature(assistant, model),
|
||||
top_p: this.getTopP(assistant, model),
|
||||
system: systemPrompt,
|
||||
system: assistant.prompt,
|
||||
// @ts-ignore thinking
|
||||
thinking: this.getReasoningEffort(assistant, model),
|
||||
...this.getCustomParameters(assistant)
|
||||
@@ -235,6 +239,7 @@ export default class AnthropicProvider extends BaseProvider {
|
||||
|
||||
const processStream = (body: MessageCreateParamsNonStreaming, idx: number) => {
|
||||
return new Promise<void>((resolve, reject) => {
|
||||
const toolCalls: ToolUseBlock[] = []
|
||||
let hasThinkingContent = false
|
||||
this.sdk.messages
|
||||
.stream({ ...body, stream: true }, { signal })
|
||||
@@ -287,11 +292,30 @@ export default class AnthropicProvider extends BaseProvider {
|
||||
}
|
||||
})
|
||||
})
|
||||
.on('contentBlock', (content) => {
|
||||
if (content.type == 'tool_use') {
|
||||
toolCalls.push(content)
|
||||
}
|
||||
})
|
||||
.on('finalMessage', async (message) => {
|
||||
const content = message.content[0]
|
||||
if (content && content.type === 'text') {
|
||||
const toolResults = await parseAndCallTools(content.text, toolResponses, onChunk, idx, mcpTools)
|
||||
if (toolResults.length > 0) {
|
||||
if (toolCalls.length > 0) {
|
||||
const toolCallResults: ToolResultBlockParam[] = []
|
||||
|
||||
for (const toolCall of toolCalls) {
|
||||
const mcpTool = anthropicToolUseToMcpTool(mcpTools, toolCall)
|
||||
if (mcpTool) {
|
||||
upsertMCPToolResponse(toolResponses, { tool: mcpTool, status: 'invoking', id: toolCall.id }, onChunk)
|
||||
const resp = await callMCPTool(mcpTool)
|
||||
toolCallResults.push({ type: 'tool_result', tool_use_id: toolCall.id, content: resp.content })
|
||||
upsertMCPToolResponse(
|
||||
toolResponses,
|
||||
{ tool: mcpTool, status: 'done', response: resp, id: toolCall.id },
|
||||
onChunk
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
if (toolCallResults.length > 0) {
|
||||
userMessages.push({
|
||||
role: message.role,
|
||||
content: message.content
|
||||
@@ -299,10 +323,12 @@ export default class AnthropicProvider extends BaseProvider {
|
||||
|
||||
userMessages.push({
|
||||
role: 'user',
|
||||
content: toolResults.join('\n')
|
||||
content: toolCallResults
|
||||
})
|
||||
|
||||
const newBody = body
|
||||
newBody.messages = userMessages
|
||||
body.messages = userMessages
|
||||
|
||||
await processStream(newBody, idx + 1)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,6 +8,8 @@ import {
|
||||
import {
|
||||
Content,
|
||||
FileDataPart,
|
||||
FunctionCallPart,
|
||||
FunctionResponsePart,
|
||||
GenerateContentStreamResult,
|
||||
GoogleGenerativeAI,
|
||||
HarmBlockThreshold,
|
||||
@@ -16,8 +18,7 @@ import {
|
||||
Part,
|
||||
RequestOptions,
|
||||
SafetySetting,
|
||||
TextPart,
|
||||
Tool
|
||||
TextPart
|
||||
} from '@google/generative-ai'
|
||||
import { isGemmaModel, isWebSearchModel } from '@renderer/config/models'
|
||||
import { getStoreSetting } from '@renderer/hooks/useSettings'
|
||||
@@ -32,8 +33,12 @@ import {
|
||||
import WebSearchService from '@renderer/services/WebSearchService'
|
||||
import { Assistant, FileType, FileTypes, MCPToolResponse, Message, Model, Provider, Suggestion } from '@renderer/types'
|
||||
import { removeSpecialCharactersForTopicName } from '@renderer/utils'
|
||||
import { parseAndCallTools } from '@renderer/utils/mcp-tools'
|
||||
import { buildSystemPrompt } from '@renderer/utils/prompt'
|
||||
import {
|
||||
callMCPTool,
|
||||
geminiFunctionCallToMcpTool,
|
||||
mcpToolsToGeminiTools,
|
||||
upsertMCPToolResponse
|
||||
} from '@renderer/utils/mcp-tools'
|
||||
import { MB } from '@shared/config/constant'
|
||||
import axios from 'axios'
|
||||
import { isEmpty, takeRight } from 'lodash'
|
||||
@@ -225,14 +230,7 @@ export default class GeminiProvider extends BaseProvider {
|
||||
history.push(await this.getMessageContents(message))
|
||||
}
|
||||
|
||||
let systemInstruction = assistant.prompt
|
||||
|
||||
if (mcpTools && mcpTools.length > 0) {
|
||||
systemInstruction = buildSystemPrompt(assistant.prompt || '', mcpTools)
|
||||
}
|
||||
|
||||
// const tools = mcpToolsToGeminiTools(mcpTools)
|
||||
const tools: Tool[] = []
|
||||
const tools = mcpToolsToGeminiTools(mcpTools)
|
||||
const toolResponses: MCPToolResponse[] = []
|
||||
|
||||
if (!WebSearchService.isOverwriteEnabled() && assistant.enableWebSearch && isWebSearchModel(model)) {
|
||||
@@ -245,7 +243,7 @@ export default class GeminiProvider extends BaseProvider {
|
||||
const geminiModel = this.sdk.getGenerativeModel(
|
||||
{
|
||||
model: model.id,
|
||||
...(isGemmaModel(model) ? {} : { systemInstruction: systemInstruction }),
|
||||
...(isGemmaModel(model) ? {} : { systemInstruction: assistant.prompt }),
|
||||
safetySettings: this.getSafetySettings(model.id),
|
||||
tools: tools,
|
||||
generationConfig: {
|
||||
@@ -270,7 +268,7 @@ export default class GeminiProvider extends BaseProvider {
|
||||
{
|
||||
text:
|
||||
'<start_of_turn>user\n' +
|
||||
systemInstruction +
|
||||
assistant.prompt +
|
||||
'<end_of_turn>\n' +
|
||||
'<start_of_turn>user\n' +
|
||||
messageContents.parts[0].text +
|
||||
@@ -309,25 +307,7 @@ export default class GeminiProvider extends BaseProvider {
|
||||
const userMessagesStream = await chat.sendMessageStream(messageContents.parts, { signal })
|
||||
let time_first_token_millsec = 0
|
||||
|
||||
const processToolUses = async (content: string, idx: number) => {
|
||||
const toolResults = await parseAndCallTools(content, toolResponses, onChunk, idx, mcpTools)
|
||||
if (toolResults && toolResults.length > 0) {
|
||||
history.push(messageContents)
|
||||
const newChat = geminiModel.startChat({ history })
|
||||
const newStream = await newChat.sendMessageStream(
|
||||
[
|
||||
{
|
||||
text: toolResults.join('\n')
|
||||
}
|
||||
],
|
||||
{ signal }
|
||||
)
|
||||
await processStream(newStream, idx + 1)
|
||||
}
|
||||
}
|
||||
|
||||
const processStream = async (stream: GenerateContentStreamResult, idx: number) => {
|
||||
let content = ''
|
||||
for await (const chunk of stream.stream) {
|
||||
if (window.keyv.get(EVENT_NAMES.CHAT_COMPLETION_PAUSED)) break
|
||||
|
||||
@@ -337,8 +317,56 @@ export default class GeminiProvider extends BaseProvider {
|
||||
|
||||
const time_completion_millsec = new Date().getTime() - start_time_millsec
|
||||
|
||||
content += chunk.text()
|
||||
processToolUses(content, idx)
|
||||
const functionCalls = chunk.functionCalls()
|
||||
|
||||
if (functionCalls) {
|
||||
const fcallParts: FunctionCallPart[] = []
|
||||
const fcRespParts: FunctionResponsePart[] = []
|
||||
for (const call of functionCalls) {
|
||||
console.log('Function call:', call)
|
||||
fcallParts.push({ functionCall: call } as FunctionCallPart)
|
||||
const mcpTool = geminiFunctionCallToMcpTool(mcpTools, call)
|
||||
if (mcpTool) {
|
||||
upsertMCPToolResponse(
|
||||
toolResponses,
|
||||
{
|
||||
tool: mcpTool,
|
||||
status: 'invoking',
|
||||
id: `${call.name}-${idx}`
|
||||
},
|
||||
onChunk
|
||||
)
|
||||
const toolCallResponse = await callMCPTool(mcpTool)
|
||||
fcRespParts.push({
|
||||
functionResponse: {
|
||||
name: mcpTool.id,
|
||||
response: toolCallResponse
|
||||
}
|
||||
})
|
||||
upsertMCPToolResponse(
|
||||
toolResponses,
|
||||
{
|
||||
tool: mcpTool,
|
||||
status: 'done',
|
||||
response: toolCallResponse,
|
||||
id: `${call.name}-${idx}`
|
||||
},
|
||||
onChunk
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
if (fcRespParts) {
|
||||
history.push(messageContents)
|
||||
history.push({
|
||||
role: 'model',
|
||||
parts: fcallParts
|
||||
})
|
||||
const newChat = geminiModel.startChat({ history })
|
||||
const newStream = await newChat.sendMessageStream(fcRespParts, { signal })
|
||||
await processStream(newStream, idx + 1)
|
||||
}
|
||||
}
|
||||
|
||||
onChunk({
|
||||
text: chunk.text(),
|
||||
|
||||
@@ -32,14 +32,21 @@ import {
|
||||
} from '@renderer/types'
|
||||
import { removeSpecialCharactersForTopicName } from '@renderer/utils'
|
||||
import { addImageFileToContents } from '@renderer/utils/formats'
|
||||
import { parseAndCallTools } from '@renderer/utils/mcp-tools'
|
||||
import { buildSystemPrompt } from '@renderer/utils/prompt'
|
||||
import {
|
||||
callMCPTool,
|
||||
mcpToolsToOpenAITools,
|
||||
openAIToolsToMcpTool,
|
||||
upsertMCPToolResponse
|
||||
} from '@renderer/utils/mcp-tools'
|
||||
import { isEmpty, takeRight } from 'lodash'
|
||||
import OpenAI, { AzureOpenAI } from 'openai'
|
||||
import {
|
||||
ChatCompletionAssistantMessageParam,
|
||||
ChatCompletionContentPart,
|
||||
ChatCompletionCreateParamsNonStreaming,
|
||||
ChatCompletionMessageParam
|
||||
ChatCompletionMessageParam,
|
||||
ChatCompletionMessageToolCall,
|
||||
ChatCompletionToolMessageParam
|
||||
} from 'openai/resources'
|
||||
|
||||
import { CompletionsParams } from '.'
|
||||
@@ -296,6 +303,55 @@ export default class OpenAIProvider extends BaseProvider {
|
||||
return model.id.startsWith('o1') || model.id.startsWith('o3')
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if the model is a Glm-4-alltools
|
||||
* @param model - The model
|
||||
* @returns True if the model is a Glm-4-alltools, false otherwise
|
||||
*/
|
||||
private isZhipuTool(model: Model) {
|
||||
return model.id.includes('glm-4-alltools')
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean the tool call arguments
|
||||
* @param toolCall - The tool call
|
||||
* @returns The cleaned tool call
|
||||
*/
|
||||
private cleanToolCallArgs(toolCall: ChatCompletionMessageToolCall): ChatCompletionMessageToolCall {
|
||||
if (toolCall.function.arguments) {
|
||||
let args = toolCall.function.arguments
|
||||
const codeBlockRegex = /```(?:\w*\n)?([\s\S]*?)```/
|
||||
const match = args.match(codeBlockRegex)
|
||||
if (match) {
|
||||
// Extract content from code block
|
||||
let extractedArgs = match[1].trim()
|
||||
// Clean function call format like tool_call(name1=value1,name2=value2)
|
||||
const functionCallRegex = /^\s*\w+\s*\(([\s\S]*?)\)\s*$/
|
||||
const functionMatch = extractedArgs.match(functionCallRegex)
|
||||
if (functionMatch) {
|
||||
// Try to convert parameters to JSON format
|
||||
const params = functionMatch[1].split(',').filter(Boolean)
|
||||
const paramsObj = {}
|
||||
params.forEach((param) => {
|
||||
const [name, value] = param.split('=').map((p) => p.trim())
|
||||
if (name && value !== undefined) {
|
||||
paramsObj[name] = value
|
||||
}
|
||||
})
|
||||
extractedArgs = JSON.stringify(paramsObj)
|
||||
}
|
||||
toolCall.function.arguments = extractedArgs
|
||||
}
|
||||
args = toolCall.function.arguments
|
||||
const firstBraceIndex = args.indexOf('{')
|
||||
const lastBraceIndex = args.lastIndexOf('}')
|
||||
if (firstBraceIndex !== -1 && lastBraceIndex !== -1 && firstBraceIndex < lastBraceIndex) {
|
||||
toolCall.function.arguments = args.substring(firstBraceIndex, lastBraceIndex + 1)
|
||||
}
|
||||
}
|
||||
return toolCall
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate completions for the assistant
|
||||
* @param messages - The messages
|
||||
@@ -310,16 +366,14 @@ export default class OpenAIProvider extends BaseProvider {
|
||||
const model = assistant.model || defaultModel
|
||||
const { contextCount, maxTokens, streamOutput } = getAssistantSettings(assistant)
|
||||
messages = addImageFileToContents(messages)
|
||||
let systemMessage = { role: 'system', content: assistant.prompt || '' }
|
||||
let systemMessage = assistant.prompt ? { role: 'system', content: assistant.prompt } : undefined
|
||||
|
||||
if (isOpenAIoSeries(model)) {
|
||||
systemMessage = {
|
||||
role: 'developer',
|
||||
content: `Formatting re-enabled${systemMessage ? '\n' + systemMessage.content : ''}`
|
||||
}
|
||||
}
|
||||
if (mcpTools && mcpTools.length > 0) {
|
||||
systemMessage.content = buildSystemPrompt(systemMessage.content || '', mcpTools)
|
||||
}
|
||||
|
||||
const userMessages: ChatCompletionMessageParam[] = []
|
||||
const _messages = filterUserRoleStartMessages(
|
||||
@@ -382,51 +436,14 @@ export default class OpenAIProvider extends BaseProvider {
|
||||
const { signal } = abortController
|
||||
await this.checkIsCopilot()
|
||||
|
||||
const tools = mcpTools && mcpTools.length > 0 ? mcpToolsToOpenAITools(mcpTools) : undefined
|
||||
|
||||
const reqMessages: ChatCompletionMessageParam[] = [systemMessage, ...userMessages].filter(
|
||||
Boolean
|
||||
) as ChatCompletionMessageParam[]
|
||||
|
||||
const toolResponses: MCPToolResponse[] = []
|
||||
let firstChunk = true
|
||||
|
||||
const processToolUses = async (content: string, idx: number) => {
|
||||
const toolResults = await parseAndCallTools(content, toolResponses, onChunk, idx, mcpTools)
|
||||
|
||||
if (toolResults.length > 0) {
|
||||
reqMessages.push({
|
||||
role: 'assistant',
|
||||
content: content
|
||||
} as ChatCompletionMessageParam)
|
||||
reqMessages.push({
|
||||
role: 'user',
|
||||
content: toolResults.join('\n')
|
||||
} as ChatCompletionMessageParam)
|
||||
|
||||
const newStream = await this.sdk.chat.completions
|
||||
// @ts-ignore key is not typed
|
||||
.create(
|
||||
{
|
||||
model: model.id,
|
||||
messages: reqMessages,
|
||||
temperature: this.getTemperature(assistant, model),
|
||||
top_p: this.getTopP(assistant, model),
|
||||
max_tokens: maxTokens,
|
||||
keep_alive: this.keepAliveTime,
|
||||
stream: isSupportStreamOutput(),
|
||||
// tools: tools,
|
||||
...getOpenAIWebSearchParams(assistant, model),
|
||||
...this.getReasoningEffort(assistant, model),
|
||||
...this.getProviderSpecificParameters(assistant, model),
|
||||
...this.getCustomParameters(assistant)
|
||||
},
|
||||
{
|
||||
signal
|
||||
}
|
||||
)
|
||||
await processStream(newStream, idx + 1)
|
||||
}
|
||||
}
|
||||
|
||||
const processStream = async (stream: any, idx: number) => {
|
||||
if (!isSupportStreamOutput()) {
|
||||
const time_completion_millsec = new Date().getTime() - start_time_millsec
|
||||
@@ -440,17 +457,14 @@ export default class OpenAIProvider extends BaseProvider {
|
||||
}
|
||||
})
|
||||
}
|
||||
const final_tool_calls = {} as Record<number, ChatCompletionMessageToolCall>
|
||||
|
||||
let content = ''
|
||||
for await (const chunk of stream) {
|
||||
if (window.keyv.get(EVENT_NAMES.CHAT_COMPLETION_PAUSED)) {
|
||||
break
|
||||
}
|
||||
|
||||
const delta = chunk.choices[0]?.delta
|
||||
if (delta?.content) {
|
||||
content += delta.content
|
||||
}
|
||||
|
||||
if (delta?.reasoning_content || delta?.reasoning) {
|
||||
hasReasoningContent = true
|
||||
@@ -472,6 +486,29 @@ export default class OpenAIProvider extends BaseProvider {
|
||||
|
||||
const finishReason = chunk.choices[0]?.finish_reason
|
||||
|
||||
if (delta?.tool_calls?.length) {
|
||||
const chunkToolCalls = delta.tool_calls
|
||||
for (const t of chunkToolCalls) {
|
||||
const { index, id, function: fn, type } = t
|
||||
const args = fn && typeof fn.arguments === 'string' ? fn.arguments : ''
|
||||
if (!(index in final_tool_calls)) {
|
||||
final_tool_calls[index] = {
|
||||
id,
|
||||
function: {
|
||||
name: fn?.name,
|
||||
arguments: args
|
||||
},
|
||||
type
|
||||
} as ChatCompletionMessageToolCall
|
||||
} else {
|
||||
final_tool_calls[index].function.arguments += args
|
||||
}
|
||||
}
|
||||
if (finishReason !== 'tool_calls') {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
let webSearch: any[] | undefined = undefined
|
||||
if (assistant.enableWebSearch && isZhipuModel(model) && finishReason === 'stop') {
|
||||
webSearch = chunk?.web_search
|
||||
@@ -480,6 +517,102 @@ export default class OpenAIProvider extends BaseProvider {
|
||||
webSearch = chunk?.search_info?.search_results
|
||||
firstChunk = true
|
||||
}
|
||||
|
||||
if (finishReason === 'tool_calls' || (finishReason === 'stop' && Object.keys(final_tool_calls).length > 0)) {
|
||||
const toolCalls = Object.values(final_tool_calls).map(this.cleanToolCallArgs)
|
||||
console.log('start invoke tools', toolCalls)
|
||||
if (this.isZhipuTool(model)) {
|
||||
reqMessages.push({
|
||||
role: 'assistant',
|
||||
content: `argments=${JSON.stringify(toolCalls[0].function.arguments)}`
|
||||
})
|
||||
} else {
|
||||
reqMessages.push({
|
||||
role: 'assistant',
|
||||
tool_calls: toolCalls
|
||||
} as ChatCompletionAssistantMessageParam)
|
||||
}
|
||||
|
||||
for (const toolCall of toolCalls) {
|
||||
const mcpTool = openAIToolsToMcpTool(mcpTools, toolCall)
|
||||
|
||||
if (!mcpTool) {
|
||||
continue
|
||||
}
|
||||
|
||||
upsertMCPToolResponse(toolResponses, { tool: mcpTool, status: 'invoking', id: toolCall.id }, onChunk)
|
||||
|
||||
const toolCallResponse = await callMCPTool(mcpTool)
|
||||
const toolResponsContent: { type: string; text?: string; image_url?: { url: string } }[] = []
|
||||
for (const content of toolCallResponse.content) {
|
||||
if (content.type === 'text') {
|
||||
toolResponsContent.push({
|
||||
type: 'text',
|
||||
text: content.text
|
||||
})
|
||||
} else if (content.type === 'image') {
|
||||
toolResponsContent.push({
|
||||
type: 'image_url',
|
||||
image_url: { url: `data:${content.mimeType};base64,${content.data}` }
|
||||
})
|
||||
} else {
|
||||
console.warn('Unsupported content type:', content.type)
|
||||
toolResponsContent.push({
|
||||
type: 'text',
|
||||
text: 'unsupported content type: ' + content.type
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const provider = lastUserMessage?.model?.provider
|
||||
const modelName = lastUserMessage?.model?.name
|
||||
|
||||
if (
|
||||
modelName?.toLocaleLowerCase().includes('gpt') ||
|
||||
(provider === 'dashscope' && modelName?.toLocaleLowerCase().includes('qwen'))
|
||||
) {
|
||||
reqMessages.push({
|
||||
role: 'tool',
|
||||
content: toolResponsContent,
|
||||
tool_call_id: toolCall.id
|
||||
} as ChatCompletionToolMessageParam)
|
||||
} else {
|
||||
reqMessages.push({
|
||||
role: 'tool',
|
||||
content: JSON.stringify(toolResponsContent),
|
||||
tool_call_id: toolCall.id
|
||||
} as ChatCompletionToolMessageParam)
|
||||
}
|
||||
upsertMCPToolResponse(
|
||||
toolResponses,
|
||||
{ tool: mcpTool, status: 'done', response: toolCallResponse, id: toolCall.id },
|
||||
onChunk
|
||||
)
|
||||
}
|
||||
const newStream = await this.sdk.chat.completions
|
||||
// @ts-ignore key is not typed
|
||||
.create(
|
||||
{
|
||||
model: model.id,
|
||||
messages: reqMessages,
|
||||
temperature: this.getTemperature(assistant, model),
|
||||
top_p: this.getTopP(assistant, model),
|
||||
max_tokens: maxTokens,
|
||||
keep_alive: this.keepAliveTime,
|
||||
stream: isSupportStreamOutput(),
|
||||
tools: tools,
|
||||
...getOpenAIWebSearchParams(assistant, model),
|
||||
...this.getReasoningEffort(assistant, model),
|
||||
...this.getProviderSpecificParameters(assistant, model),
|
||||
...this.getCustomParameters(assistant)
|
||||
},
|
||||
{
|
||||
signal
|
||||
}
|
||||
)
|
||||
await processStream(newStream, idx + 1)
|
||||
}
|
||||
|
||||
onChunk({
|
||||
text: delta?.content || '',
|
||||
reasoning_content: delta?.reasoning_content || delta?.reasoning || '',
|
||||
@@ -496,10 +629,7 @@ export default class OpenAIProvider extends BaseProvider {
|
||||
mcpToolResponse: toolResponses
|
||||
})
|
||||
}
|
||||
|
||||
await processToolUses(content, idx)
|
||||
}
|
||||
|
||||
const stream = await this.sdk.chat.completions
|
||||
// @ts-ignore key is not typed
|
||||
.create(
|
||||
@@ -511,7 +641,7 @@ export default class OpenAIProvider extends BaseProvider {
|
||||
max_tokens: maxTokens,
|
||||
keep_alive: this.keepAliveTime,
|
||||
stream: isSupportStreamOutput(),
|
||||
// tools: tools,
|
||||
tools: tools,
|
||||
...getOpenAIWebSearchParams(assistant, model),
|
||||
...this.getReasoningEffort(assistant, model),
|
||||
...this.getProviderSpecificParameters(assistant, model),
|
||||
|
||||
@@ -250,7 +250,7 @@ export async function fetchChatCompletion({
|
||||
}
|
||||
}
|
||||
}
|
||||
// console.log('message', message)
|
||||
console.log('message', message)
|
||||
} catch (error: any) {
|
||||
if (isAbortError(error)) {
|
||||
message.status = 'paused'
|
||||
|
||||
@@ -1198,6 +1198,13 @@ const migrateConfig = {
|
||||
addWebSearchProvider(state, 'local-google')
|
||||
addWebSearchProvider(state, 'local-bing')
|
||||
addWebSearchProvider(state, 'local-baidu')
|
||||
|
||||
if (state.websearch) {
|
||||
if (isEmpty(state.websearch.subscribeSources)) {
|
||||
state.websearch.subscribeSources = []
|
||||
}
|
||||
}
|
||||
|
||||
const qiniuProvider = state.llm.providers.find((provider) => provider.id === 'qiniu')
|
||||
if (qiniuProvider && isEmpty(qiniuProvider.models)) {
|
||||
qiniuProvider.models = SYSTEM_MODELS.qiniu
|
||||
|
||||
@@ -18,6 +18,7 @@ export interface WebSearchState {
|
||||
maxResults: number
|
||||
// 要排除的域名列表
|
||||
excludeDomains: string[]
|
||||
// 订阅源列表
|
||||
subscribeSources: SubscribeSource[]
|
||||
// 是否启用搜索增强模式
|
||||
enhanceMode: boolean
|
||||
|
||||
@@ -206,8 +206,9 @@ export async function filterResultWithBlacklist(
|
||||
websearch: WebSearchState
|
||||
): Promise<WebSearchResponse> {
|
||||
console.log('filterResultWithBlacklist', response)
|
||||
|
||||
// 没有结果或者没有黑名单规则时,直接返回原始结果
|
||||
if (!response.results?.length || (!websearch.excludeDomains.length && !websearch.subscribeSources.length)) {
|
||||
if (!response.results?.length || (!websearch?.excludeDomains?.length && !websearch?.subscribeSources?.length)) {
|
||||
return response
|
||||
}
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ import { addMCPServer } from '@renderer/store/mcp'
|
||||
import { MCPServer, MCPTool, MCPToolResponse } from '@renderer/types'
|
||||
import { ChatCompletionMessageToolCall, ChatCompletionTool } from 'openai/resources'
|
||||
|
||||
import { ChunkCallbackData, CompletionsParams } from '../providers/AiProvider'
|
||||
import { ChunkCallbackData } from '../providers/AiProvider'
|
||||
|
||||
const ensureValidSchema = (obj: Record<string, any>): FunctionDeclarationSchemaProperty => {
|
||||
// Filter out unsupported keys for Gemini
|
||||
@@ -375,87 +375,3 @@ export function getMcpServerByTool(tool: MCPTool) {
|
||||
const servers = store.getState().mcp.servers
|
||||
return servers.find((s) => s.id === tool.serverId)
|
||||
}
|
||||
|
||||
export function parseToolUse(content: string, mcpTools: MCPTool[]): MCPToolResponse[] {
|
||||
if (!content || !mcpTools || mcpTools.length === 0) {
|
||||
return []
|
||||
}
|
||||
const toolUsePattern =
|
||||
/<tool_use>([\s\S]*?)<name>([\s\S]*?)<\/name>([\s\S]*?)<arguments>([\s\S]*?)<\/arguments>([\s\S]*?)<\/tool_use>/g
|
||||
const tools: MCPToolResponse[] = []
|
||||
let match
|
||||
let idx = 0
|
||||
// Find all tool use blocks
|
||||
while ((match = toolUsePattern.exec(content)) !== null) {
|
||||
// const fullMatch = match[0]
|
||||
const toolName = match[2].trim()
|
||||
const toolArgs = match[4].trim()
|
||||
|
||||
// Try to parse the arguments as JSON
|
||||
let parsedArgs
|
||||
try {
|
||||
parsedArgs = JSON.parse(toolArgs)
|
||||
} catch (error) {
|
||||
// If parsing fails, use the string as is
|
||||
parsedArgs = toolArgs
|
||||
}
|
||||
// console.log(`Parsed arguments for tool "${toolName}":`, parsedArgs)
|
||||
const mcpTool = mcpTools.find((tool) => tool.id === toolName)
|
||||
if (!mcpTool) {
|
||||
console.error(`Tool "${toolName}" not found in MCP tools`)
|
||||
continue
|
||||
}
|
||||
|
||||
// Add to tools array
|
||||
tools.push({
|
||||
id: `${toolName}-${idx++}`, // Unique ID for each tool use
|
||||
tool: {
|
||||
...mcpTool,
|
||||
inputSchema: parsedArgs
|
||||
},
|
||||
status: 'pending'
|
||||
})
|
||||
|
||||
// Remove the tool use block from the content
|
||||
// content = content.replace(fullMatch, '')
|
||||
}
|
||||
return tools
|
||||
}
|
||||
|
||||
export async function parseAndCallTools(
|
||||
content: string,
|
||||
toolResponses: MCPToolResponse[],
|
||||
onChunk: CompletionsParams['onChunk'],
|
||||
idx: number,
|
||||
mcpTools?: MCPTool[]
|
||||
): Promise<string[]> {
|
||||
const toolResults: string[] = []
|
||||
// process tool use
|
||||
const tools = parseToolUse(content, mcpTools || [])
|
||||
if (!tools || tools.length === 0) {
|
||||
return toolResults
|
||||
}
|
||||
for (let i = 0; i < tools.length; i++) {
|
||||
const tool = tools[i]
|
||||
upsertMCPToolResponse(toolResponses, { id: `${tool.id}-${idx}-${i}`, tool: tool.tool, status: 'invoking' }, onChunk)
|
||||
}
|
||||
|
||||
const toolPromises = tools.map(async (tool, i) => {
|
||||
const toolCallResponse = await callMCPTool(tool.tool)
|
||||
const result = `
|
||||
<tool_use_result>
|
||||
<name>${tool.id}</name>
|
||||
<result>${JSON.stringify(toolCallResponse)}</result>
|
||||
</tool_use_result>
|
||||
`.trim()
|
||||
upsertMCPToolResponse(
|
||||
toolResponses,
|
||||
{ id: `${tool.id}-${idx}-${i}`, tool: tool.tool, status: 'done', response: toolCallResponse },
|
||||
onChunk
|
||||
)
|
||||
return result
|
||||
})
|
||||
|
||||
toolResults.push(...(await Promise.all(toolPromises)))
|
||||
return toolResults
|
||||
}
|
||||
|
||||
@@ -1,158 +0,0 @@
|
||||
import { MCPTool } from '@renderer/types'
|
||||
|
||||
export const SYSTEM_PROMPT = `In this environment you have access to a set of tools you can use to answer the user's question. \
|
||||
You can use one tool per message, and will receive the result of that tool use in the user's response. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
|
||||
|
||||
## Tool Use Formatting
|
||||
|
||||
Tool use is formatted using XML-style tags. The tool name is enclosed in opening and closing tags, and each parameter is similarly enclosed within its own set of tags. Here's the structure:
|
||||
|
||||
<tool_use>
|
||||
<name>{tool_name}</name>
|
||||
<arguments>{json_arguments}</arguments>
|
||||
</tool_use>
|
||||
|
||||
The tool name should be the exact name of the tool you are using, and the arguments should be a JSON object containing the parameters required by that tool. For example:
|
||||
<tool_use>
|
||||
<name>python_interpreter</name>
|
||||
<arguments>{"code": "5 + 3 + 1294.678"}</arguments>
|
||||
</tool_use>
|
||||
|
||||
The user will respond with the result of the tool use, which should be formatted as follows:
|
||||
|
||||
<tool_use_result>
|
||||
<name>{tool_name}</name>
|
||||
<result>{result}</result>
|
||||
</tool_use_result>
|
||||
|
||||
The result should be a string, which can represent a file or any other output type. You can use this result as input for the next action.
|
||||
For example, if the result of the tool use is an image file, you can use it in the next action like this:
|
||||
|
||||
<tool_use>
|
||||
<name>image_transformer</name>
|
||||
<arguments>{"image": "image_1.jpg"}</arguments>
|
||||
</tool_use>
|
||||
|
||||
Always adhere to this format for the tool use to ensure proper parsing and execution.
|
||||
|
||||
## Tool Use Examples
|
||||
{{ TOOL_USE_EXAMPLES }}
|
||||
|
||||
## Tool Use Available Tools
|
||||
Above example were using notional tools that might not exist for you. You only have access to these tools:
|
||||
{{ AVAILABLE_TOOLS }}
|
||||
|
||||
## Tool Use Rules
|
||||
Here are the rules you should always follow to solve your task:
|
||||
1. Always use the right arguments for the tools. Never use variable names as the action arguments, use the value instead.
|
||||
2. Call a tool only when needed: do not call the search agent if you do not need information, try to solve the task yourself.
|
||||
3. If no tool call is needed, just answer the question directly.
|
||||
4. Never re-do a tool call that you previously did with the exact same parameters.
|
||||
5. For tool use, MARK SURE use XML tag format as shown in the examples above. Do not use any other format.
|
||||
|
||||
# User Instructions
|
||||
{{ USER_SYSTEM_PROMPT }}
|
||||
|
||||
Now Begin! If you solve the task correctly, you will receive a reward of $1,000,000.
|
||||
`
|
||||
|
||||
export const ToolUseExamples = `
|
||||
Here are a few examples using notional tools:
|
||||
---
|
||||
User: Generate an image of the oldest person in this document.
|
||||
|
||||
Assistant: I can use the document_qa tool to find out who the oldest person is in the document.
|
||||
<tool_use>
|
||||
<name>document_qa</name>
|
||||
<arguments>{"document": "document.pdf", "question": "Who is the oldest person mentioned?"}</arguments>
|
||||
</tool_use>
|
||||
|
||||
User: <tool_use_result>
|
||||
<name>document_qa</name>
|
||||
<result>John Doe, a 55 year old lumberjack living in Newfoundland.</result>
|
||||
</tool_use_result>
|
||||
|
||||
Assistant: I can use the image_generator tool to create a portrait of John Doe.
|
||||
<tool_use>
|
||||
<name>image_generator</name>
|
||||
<arguments>{"prompt": "A portrait of John Doe, a 55-year-old man living in Canada."}</arguments>
|
||||
</tool_use>
|
||||
|
||||
User: <tool_use_result>
|
||||
<name>image_generator</name>
|
||||
<result>image.png</result>
|
||||
</tool_use_result>
|
||||
|
||||
Assistant: the image is generated as image.png
|
||||
|
||||
---
|
||||
User: "What is the result of the following operation: 5 + 3 + 1294.678?"
|
||||
|
||||
Assistant: I can use the python_interpreter tool to calculate the result of the operation.
|
||||
<tool_use>
|
||||
<name>python_interpreter</name>
|
||||
<arguments>{"code": "5 + 3 + 1294.678"}</arguments>
|
||||
</tool_use>
|
||||
|
||||
User: <tool_use_result>
|
||||
<name>python_interpreter</name>
|
||||
<result>1302.678</result>
|
||||
</tool_use_result>
|
||||
|
||||
Assistant: The result of the operation is 1302.678.
|
||||
|
||||
---
|
||||
User: "Which city has the highest population , Guangzhou or Shanghai?"
|
||||
|
||||
Assistant: I can use the search tool to find the population of Guangzhou.
|
||||
<tool_use>
|
||||
<name>search</name>
|
||||
<arguments>{"query": "Population Guangzhou"}</arguments>
|
||||
</tool_use>
|
||||
|
||||
User: <tool_use_result>
|
||||
<name>search</name>
|
||||
<result>Guangzhou has a population of 15 million inhabitants as of 2021.</result>
|
||||
</tool_use_result>
|
||||
|
||||
Assistant: I can use the search tool to find the population of Shanghai.
|
||||
<tool_use>
|
||||
<name>search</name>
|
||||
<arguments>{"query": "Population Shanghai"}</arguments>
|
||||
</tool_use>
|
||||
|
||||
User: <tool_use_result>
|
||||
<name>search</name>
|
||||
<result>26 million (2019)</result>
|
||||
</tool_use_result>
|
||||
Assistant: The population of Shanghai is 26 million, while Guangzhou has a population of 15 million. Therefore, Shanghai has the highest population.
|
||||
`
|
||||
|
||||
export const AvailableTools = (tools: MCPTool[]) => {
|
||||
const availableTools = tools
|
||||
.map((tool) => {
|
||||
return `
|
||||
<tool>
|
||||
<name>${tool.id}</name>
|
||||
<description>${tool.description}</description>
|
||||
<arguments>
|
||||
${tool.inputSchema ? JSON.stringify(tool.inputSchema) : ''}
|
||||
</arguments>
|
||||
</tool>
|
||||
`
|
||||
})
|
||||
.join('\n')
|
||||
return `<tools>
|
||||
${availableTools}
|
||||
</tools>`
|
||||
}
|
||||
|
||||
export const buildSystemPrompt = (userSystemPrompt: string, tools: MCPTool[]): string => {
|
||||
if (tools && tools.length > 0) {
|
||||
return SYSTEM_PROMPT.replace('{{ USER_SYSTEM_PROMPT }}', userSystemPrompt)
|
||||
.replace('{{ TOOL_USE_EXAMPLES }}', ToolUseExamples)
|
||||
.replace('{{ AVAILABLE_TOOLS }}', AvailableTools(tools))
|
||||
}
|
||||
|
||||
return userSystemPrompt
|
||||
}
|
||||
Reference in New Issue
Block a user