Compare commits

...

25 Commits

Author SHA1 Message Date
kangfenmao
117069e450 chore(version): 0.3.0 2024-07-21 22:03:49 +08:00
kangfenmao
c5965dc696 fix: assistant settings bugs 2024-07-21 21:57:08 +08:00
kangfenmao
4169a2ef35 feat: add asistant model temperature maxTokens contextCount 2024-07-21 17:50:50 +08:00
kangfenmao
75c37632d4 feat: change default assistant name
# Conflicts:
#	src/renderer/src/i18n/index.ts
2024-07-21 10:51:33 +08:00
亢奋猫
3f5c151a11 Update README.md 2024-07-20 15:10:49 +08:00
kangfenmao
d049e36c46 0.2.9 2024-07-20 12:47:29 +08:00
kangfenmao
d05fc1c9be chore(version): v0.2.9 2024-07-20 12:47:19 +08:00
kangfenmao
f33317a3fb fix: send message setting position 2024-07-20 11:34:52 +08:00
kangfenmao
f2b5ed09c0 feat(provider): add AiHubMix provider 2024-07-20 11:29:24 +08:00
kangfenmao
81e66dde0e 0.2.8 2024-07-20 00:57:02 +08:00
kangfenmao
f76388d979 chore(version): v0.2.8 2024-07-20 00:56:52 +08:00
kangfenmao
9e542f813c feat: add custom llm provider 2024-07-20 00:50:46 +08:00
kangfenmao
5ede95cf2e 0.2.7 2024-07-19 15:57:16 +08:00
kangfenmao
fd8b15ebbe chore(version): 0.2.7 2024-07-19 15:52:52 +08:00
kangfenmao
5a636e7614 refactor: ProviderSDK 2024-07-19 15:49:08 +08:00
kangfenmao
13c73a3de1 fix: use activeAssistant's id for fetching assistant and model data 2024-07-19 15:39:49 +08:00
kangfenmao
31284a6e23 feat: add anthropic provider 2024-07-19 15:34:34 +08:00
kangfenmao
c4394b925d feat(settings/components): introduce password input for API key to enhance security 2024-07-19 13:38:44 +08:00
kangfenmao
93a5739d87 feat(assistants.json): Introduce new assistants for translation and summarization 2024-07-19 13:34:21 +08:00
kangfenmao
f23c4a0afa feat: add DashScope provider 2024-07-19 12:28:00 +08:00
kangfenmao
8723c251b1 Update Website 2024-07-19 09:38:29 +08:00
亢奋猫
a9634fd684 Update README.md 2024-07-18 16:46:33 +08:00
亢奋猫
53757626f2 Update README.md 2024-07-18 16:41:09 +08:00
kangfenmao
83af70e460 feat(website): cherry studio -> cherry ai 2024-07-18 13:02:46 +08:00
亢奋猫
3377aae0ff Update README.md 2024-07-17 23:08:13 +08:00
49 changed files with 1474 additions and 444 deletions

View File

@@ -1,12 +1,14 @@
# Cherry Studio
Cherry Studio is a desktop client for multiple cutting-edge LLM models, available on Windows, Mac and Linux.
🍒 Cherry Studio is a desktop client that supports multiple Large Language Model (LLM) providers, available on Windows, Mac and Linux.
# Screenshot
![image.png](https://s2.loli.net/2024/07/16/IAVSOorsfFQyGhM.png)
![image](https://github.com/user-attachments/assets/1763dc38-bece-4d24-9c21-ed82f6142694)
![image.png](https://s2.loli.net/2024/07/16/IQPz12OajfNoBTV.png)
![](https://github.com/user-attachments/assets/18c10eed-4711-4975-bf9c-b274c61924f3)
![](https://github.com/user-attachments/assets/7395ebf2-64f8-46fa-aa48-63293516c320)
# Feature

View File

@@ -56,5 +56,6 @@ electronDownload:
afterSign: scripts/notarize.js
releaseInfo:
releaseNotes: |
🆕 修复百川 API KEY 网址没有显示问题
📢 新的智能体中心
支持设置模型 Temperature 参数
支持设置上下文数量
输入框增加 Token 消耗预估

View File

@@ -1,6 +1,6 @@
{
"name": "cherry-studio",
"version": "0.2.6",
"version": "0.3.0",
"description": "A powerful AI assistant for producer.",
"main": "./out/main/index.js",
"author": "kangfenmao@qq.com",
@@ -29,6 +29,7 @@
"electron-window-state": "^5.0.3"
},
"devDependencies": {
"@anthropic-ai/sdk": "^0.24.3",
"@electron-toolkit/eslint-config-prettier": "^2.0.0",
"@electron-toolkit/eslint-config-ts": "^1.0.1",
"@electron-toolkit/tsconfig": "^1.0.1",
@@ -55,6 +56,7 @@
"eslint-plugin-react": "^7.34.3",
"eslint-plugin-react-hooks": "^4.6.2",
"eslint-plugin-unused-imports": "^4.0.0",
"gpt-tokens": "^1.3.6",
"i18next": "^23.11.5",
"localforage": "^1.10.0",
"lodash": "^4.17.21",

View File

@@ -30,7 +30,8 @@ function createWindow() {
webPreferences: {
preload: join(__dirname, '../preload/index.js'),
sandbox: false,
devTools: !app.isPackaged
devTools: !app.isPackaged,
webSecurity: false
}
})

View File

@@ -1,5 +1,24 @@
# CHANGES LOG
### v0.3.0 - 2024-07-21
- Supports setting the model Temperature parameter
- Support for setting the number of contexts
- Token consumption estimation added to the input box
### v0.2.9 - 2024-07-20
- 📢 Add AiHubMix provider
### v0.2.8 - 2024-07-20
- 🆕 Feature: Add customized service providers
### v0.2.7 - 2024-07-19
- 📢 Add DashScope Provider
- 📢 Add Anthropic Provider
### v0.2.6 - 2024-07-17
- 🆕 Fixed the issue of the BaiChuan API KEY not displaying when clicking to obtain the URL

View File

@@ -1,5 +1,24 @@
# 更新日志
### v0.3.0 - 2024-07-21
- 支持设置模型 Temperature 参数
- 支持设置上下文数量
- 输入框增加 Token 消耗预估
### v0.2.9 - 2024-07-20
- 📢 新增 AiMixHub 服务提供商
### v0.2.8 - 2024-07-20
- 🆕 新功能: 可以添加自定义服务提供商了
### v0.2.7 - 2024-07-19
- 📢 新增阿里云灵积服务商
- 📢 新增 Anthropic 服务商
### v0.2.6 - 2024-07-17
- 🆕 修复百川 API KEY 点击获取网址没有显示问题

Binary file not shown.

After

Width:  |  Height:  |  Size: 10 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.6 KiB

After

Width:  |  Height:  |  Size: 5.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

View File

@@ -38,7 +38,7 @@
--topic-list-width: 250px;
--settings-width: var(--assistants-width);
--status-bar-height: 40px;
--input-bar-height: 120px;
--input-bar-height: 125px;
}
*,

View File

@@ -61,7 +61,11 @@ const TopViewContainer: React.FC<Props> = ({ children }) => {
<div style={{ display: 'flex', flex: 1, position: 'absolute', width: '100%', height: '100%' }}>
<div style={{ position: 'absolute', width: '100%', height: '100%' }} onClick={onPop} />
{elements.map(({ element: Element, key }) =>
typeof Element === 'function' ? <Element key={`TOPVIEW_${key}`} /> : Element
typeof Element === 'function' ? (
<Element key={`TOPVIEW_${key}`} />
) : (
<div key={`TOPVIEW_${key}`}>{Element}</div>
)
)}
</div>
)}

View File

@@ -39,18 +39,20 @@ const NavbarLeftContainer = styled.div`
display: flex;
flex-direction: row;
align-items: center;
font-size: 14px;
font-weight: bold;
color: var(--color-text-1);
`
const NavbarCenterContainer = styled.div`
flex: 1;
display: flex;
align-items: center;
border-right: 1px solid var(--color-border);
padding: 0 20px;
font-size: 14px;
font-weight: bold;
color: var(--color-text-1);
text-align: center;
border-right: 1px solid var(--color-border);
padding: 0 20px;
`
const NavbarRightContainer = styled.div`

View File

@@ -44,14 +44,13 @@ const Container = styled.div`
display: flex;
flex-direction: column;
align-items: center;
padding: 12px 0;
padding: 8px 0;
min-width: var(--sidebar-width);
min-height: 100%;
-webkit-app-region: drag !important;
background-color: #1f1f1f;
border-right: 0.5px solid var(--color-border);
margin-top: var(--navbar-height);
padding-bottom: calc(var(--navbar-height) + 6px);
padding-top: var(--navbar-height);
`
const AvatarImg = styled.img`
@@ -60,6 +59,7 @@ const AvatarImg = styled.img`
height: 28px;
background-color: var(--color-background-soft);
margin: 5px 0;
margin-top: 12px;
`
const MainMenus = styled.div`
display: flex;

View File

@@ -209,7 +209,7 @@
},
{
"id": 27,
"name": "🇨🇳 翻译成中文",
"name": "🇨🇳 翻译成中文 - Chinese",
"emoji": "🇨🇳",
"group": "语言",
"prompt": "你是一个好用的翻译助手。请将我的英文翻译成中文,将所有非中文的翻译成中文。我发给你所有的话都是需要翻译的内容,你只需要回答翻译结果。翻译结果请符合中文的语言习惯。",
@@ -217,7 +217,7 @@
},
{
"id": 28,
"name": "🌐 翻译成英文",
"name": "🌐 翻译成英文 - English",
"emoji": "🌐",
"group": "语言",
"prompt": "你是一个好用的翻译助手。请将我的中文翻译成英文,将所有非中文的翻译成英文。我发给你所有的话都是需要翻译的内容,你只需要回答翻译结果。翻译结果请符合英文的语言习惯。",
@@ -233,7 +233,7 @@
},
{
"id": 30,
"name": "📖 文章总结",
"name": "📖 文章总结 - Summarize",
"emoji": "📖",
"group": "阅读",
"prompt": "总结下面的文章,给出总结、摘要、观点三个部分内容,其中观点部分要使用列表列出,使用 Markdown 回复",

View File

@@ -0,0 +1,2 @@
export const DEFAULT_TEMPERATURE = 0.7
export const DEFAULT_CONEXTCOUNT = 5

View File

@@ -1,6 +1,6 @@
import { Model } from '@renderer/types'
type SystemModel = Model & { defaultEnabled: boolean }
type SystemModel = Model & { enabled: boolean }
export const SYSTEM_MODELS: Record<string, SystemModel[]> = {
openai: [
@@ -9,28 +9,28 @@ export const SYSTEM_MODELS: Record<string, SystemModel[]> = {
provider: 'openai',
name: 'GPT-3.5 Turbo',
group: 'GPT 3.5',
defaultEnabled: true
enabled: true
},
{
id: 'gpt-4-turbo',
provider: 'openai',
name: ' GPT-4 Turbo',
group: 'GPT 4',
defaultEnabled: true
enabled: true
},
{
id: 'gpt-4',
provider: 'openai',
name: ' GPT-4',
group: 'GPT 4',
defaultEnabled: true
enabled: true
},
{
id: 'gpt-4o',
provider: 'openai',
name: ' GPT-4o',
group: 'GPT 4o',
defaultEnabled: true
enabled: true
}
],
silicon: [
@@ -39,112 +39,112 @@ export const SYSTEM_MODELS: Record<string, SystemModel[]> = {
provider: 'silicon',
name: 'Qwen2-7B-Instruct',
group: 'Qwen2',
defaultEnabled: true
enabled: true
},
{
id: 'Qwen/Qwen2-1.5B-Instruct',
provider: 'silicon',
name: 'Qwen2-1.5B-Instruct',
group: 'Qwen2',
defaultEnabled: false
enabled: false
},
{
id: 'Qwen/Qwen1.5-7B-Chat',
provider: 'silicon',
name: 'Qwen1.5-7B-Chat',
group: 'Qwen1.5',
defaultEnabled: false
enabled: false
},
{
id: 'Qwen/Qwen2-72B-Instruct',
provider: 'silicon',
name: 'Qwen2-72B-Instruct',
group: 'Qwen2',
defaultEnabled: true
enabled: true
},
{
id: 'Qwen/Qwen2-57B-A14B-Instruct',
provider: 'silicon',
name: 'Qwen2-57B-A14B-Instruct',
group: 'Qwen2',
defaultEnabled: false
enabled: false
},
{
id: 'Qwen/Qwen1.5-110B-Chat',
provider: 'silicon',
name: 'Qwen1.5-110B-Chat',
group: 'Qwen1.5',
defaultEnabled: false
enabled: false
},
{
id: 'Qwen/Qwen1.5-32B-Chat',
provider: 'silicon',
name: 'Qwen1.5-32B-Chat',
group: 'Qwen1.5',
defaultEnabled: false
enabled: false
},
{
id: 'Qwen/Qwen1.5-14B-Chat',
provider: 'silicon',
name: 'Qwen1.5-14B-Chat',
group: 'Qwen1.5',
defaultEnabled: false
enabled: false
},
{
id: 'deepseek-ai/DeepSeek-V2-Chat',
provider: 'silicon',
name: 'DeepSeek-V2-Chat',
group: 'DeepSeek',
defaultEnabled: false
enabled: false
},
{
id: 'deepseek-ai/DeepSeek-Coder-V2-Instruct',
provider: 'silicon',
name: 'DeepSeek-Coder-V2-Instruct',
group: 'DeepSeek',
defaultEnabled: false
enabled: false
},
{
id: 'deepseek-ai/deepseek-llm-67b-chat',
provider: 'silicon',
name: 'Deepseek-LLM-67B-Chat',
group: 'DeepSeek',
defaultEnabled: false
enabled: false
},
{
id: 'THUDM/glm-4-9b-chat',
provider: 'silicon',
name: 'GLM-4-9B-Chat',
group: 'GLM',
defaultEnabled: true
enabled: true
},
{
id: 'THUDM/chatglm3-6b',
provider: 'silicon',
name: 'GhatGLM3-6B',
group: 'GLM',
defaultEnabled: false
enabled: false
},
{
id: '01-ai/Yi-1.5-9B-Chat-16K',
provider: 'silicon',
name: 'Yi-1.5-9B-Chat-16K',
group: 'Yi',
defaultEnabled: false
enabled: false
},
{
id: '01-ai/Yi-1.5-6B-Chat',
provider: 'silicon',
name: 'Yi-1.5-6B-Chat',
group: 'Yi',
defaultEnabled: false
enabled: false
},
{
id: '01-ai/Yi-1.5-34B-Chat-16K',
provider: 'silicon',
name: 'Yi-1.5-34B-Chat-16K',
group: 'Yi',
defaultEnabled: false
enabled: false
}
],
deepseek: [
@@ -153,14 +153,14 @@ export const SYSTEM_MODELS: Record<string, SystemModel[]> = {
provider: 'deepseek',
name: 'DeepSeek Chat',
group: 'DeepSeek Chat',
defaultEnabled: true
enabled: true
},
{
id: 'deepseek-coder',
provider: 'deepseek',
name: 'DeepSeek Coder',
group: 'DeepSeek Coder',
defaultEnabled: true
enabled: true
}
],
yi: [
@@ -169,42 +169,42 @@ export const SYSTEM_MODELS: Record<string, SystemModel[]> = {
provider: 'yi',
name: 'Yi-Large',
group: 'Yi',
defaultEnabled: false
enabled: false
},
{
id: 'yi-large-turbo',
provider: 'yi',
name: 'Yi-Large-Turbo',
group: 'Yi',
defaultEnabled: true
enabled: true
},
{
id: 'yi-large-rag',
provider: 'yi',
name: 'Yi-Large-Rag',
group: 'Yi',
defaultEnabled: false
enabled: false
},
{
id: 'yi-medium',
provider: 'yi',
name: 'Yi-Medium',
group: 'Yi',
defaultEnabled: true
enabled: true
},
{
id: 'yi-medium-200k',
provider: 'yi',
name: 'Yi-Medium-200k',
group: 'Yi',
defaultEnabled: false
enabled: false
},
{
id: 'yi-spark',
provider: 'yi',
name: 'Yi-Spark',
group: 'Yi',
defaultEnabled: false
enabled: false
}
],
zhipu: [
@@ -213,42 +213,42 @@ export const SYSTEM_MODELS: Record<string, SystemModel[]> = {
provider: 'zhipu',
name: 'GLM-4-0520',
group: 'GLM',
defaultEnabled: true
enabled: true
},
{
id: 'glm-4',
provider: 'zhipu',
name: 'GLM-4',
group: 'GLM',
defaultEnabled: false
enabled: false
},
{
id: 'glm-4-airx',
provider: 'zhipu',
name: 'GLM-4-AirX',
group: 'GLM',
defaultEnabled: false
enabled: false
},
{
id: 'glm-4-air',
provider: 'zhipu',
name: 'GLM-4-Air',
group: 'GLM',
defaultEnabled: true
enabled: true
},
{
id: 'glm-4v',
provider: 'zhipu',
name: 'GLM-4V',
group: 'GLM',
defaultEnabled: false
enabled: false
},
{
id: 'glm-4-alltools',
provider: 'zhipu',
name: 'GLM-4-AllTools',
group: 'GLM',
defaultEnabled: false
enabled: false
}
],
moonshot: [
@@ -257,88 +257,21 @@ export const SYSTEM_MODELS: Record<string, SystemModel[]> = {
provider: 'moonshot',
name: 'Moonshot V1 8k',
group: 'Moonshot V1',
defaultEnabled: true
enabled: true
},
{
id: 'moonshot-v1-32k',
provider: 'moonshot',
name: 'Moonshot V1 32k',
group: 'Moonshot V1',
defaultEnabled: true
enabled: true
},
{
id: 'moonshot-v1-128k',
provider: 'moonshot',
name: 'Moonshot V1 128k',
group: 'Moonshot V1',
defaultEnabled: true
}
],
openrouter: [
{
id: 'google/gemma-2-9b-it:free',
provider: 'openrouter',
name: 'Google: Gemma 2 9B',
group: 'Gemma',
defaultEnabled: true
},
{
id: 'microsoft/phi-3-mini-128k-instruct:free',
provider: 'openrouter',
name: 'Phi-3 Mini 128K Instruct',
group: 'Phi',
defaultEnabled: true
},
{
id: 'microsoft/phi-3-medium-128k-instruct:free',
provider: 'openrouter',
name: 'Phi-3 Medium 128K Instruct',
group: 'Phi',
defaultEnabled: true
},
{
id: 'meta-llama/llama-3-8b-instruct:free',
provider: 'openrouter',
name: 'Meta: Llama 3 8B Instruct',
group: 'Llama3',
defaultEnabled: true
},
{
id: 'mistralai/mistral-7b-instruct:free',
provider: 'openrouter',
name: 'Mistral: Mistral 7B Instruct',
group: 'Mistral',
defaultEnabled: true
}
],
groq: [
{
id: 'llama3-8b-8192',
provider: 'groq',
name: 'LLaMA3 8B',
group: 'Llama3',
defaultEnabled: false
},
{
id: 'llama3-70b-8192',
provider: 'groq',
name: 'LLaMA3 70B',
group: 'Llama3',
defaultEnabled: true
},
{
id: 'mixtral-8x7b-32768',
provider: 'groq',
name: 'Mixtral 8x7B',
group: 'Mixtral',
defaultEnabled: false
},
{
id: 'gemma-7b-it',
provider: 'groq',
name: 'Gemma 7B',
group: 'Gemma',
defaultEnabled: false
enabled: true
}
],
baichuan: [
@@ -347,21 +280,157 @@ export const SYSTEM_MODELS: Record<string, SystemModel[]> = {
provider: 'baichuan',
name: 'Baichuan4',
group: 'Baichuan4',
defaultEnabled: true
enabled: true
},
{
id: 'Baichuan3-Turbo',
provider: 'baichuan',
name: 'Baichuan3 Turbo',
group: 'Baichuan3',
defaultEnabled: true
enabled: true
},
{
id: 'Baichuan3-Turbo-128k',
provider: 'baichuan',
name: 'Baichuan3 Turbo 128k',
group: 'Baichuan3',
defaultEnabled: true
enabled: true
}
],
dashscope: [
{
id: 'qwen-turbo',
provider: 'dashscope',
name: 'Qwen Turbo',
group: 'Qwen',
enabled: true
},
{
id: 'qwen-plus',
provider: 'dashscope',
name: 'Qwen Plus',
group: 'Qwen',
enabled: true
},
{
id: 'qwen-max',
provider: 'dashscope',
name: 'Qwen Max',
group: 'Qwen',
enabled: true
}
],
aihubmix: [
{
id: 'gpt-4o-mini',
provider: 'aihubmix',
name: 'GPT-4o Mini',
group: 'GPT-4o',
enabled: true
},
{
id: 'aihubmix-Llama-3-70B-Instruct',
provider: 'aihubmix',
name: 'Llama 3 70B Instruct',
group: 'Llama3',
enabled: true
}
],
openrouter: [
{
id: 'google/gemma-2-9b-it:free',
provider: 'openrouter',
name: 'Google: Gemma 2 9B',
group: 'Gemma',
enabled: true
},
{
id: 'microsoft/phi-3-mini-128k-instruct:free',
provider: 'openrouter',
name: 'Phi-3 Mini 128K Instruct',
group: 'Phi',
enabled: true
},
{
id: 'microsoft/phi-3-medium-128k-instruct:free',
provider: 'openrouter',
name: 'Phi-3 Medium 128K Instruct',
group: 'Phi',
enabled: true
},
{
id: 'meta-llama/llama-3-8b-instruct:free',
provider: 'openrouter',
name: 'Meta: Llama 3 8B Instruct',
group: 'Llama3',
enabled: true
},
{
id: 'mistralai/mistral-7b-instruct:free',
provider: 'openrouter',
name: 'Mistral: Mistral 7B Instruct',
group: 'Mistral',
enabled: true
}
],
groq: [
{
id: 'llama3-8b-8192',
provider: 'groq',
name: 'LLaMA3 8B',
group: 'Llama3',
enabled: false
},
{
id: 'llama3-70b-8192',
provider: 'groq',
name: 'LLaMA3 70B',
group: 'Llama3',
enabled: true
},
{
id: 'mixtral-8x7b-32768',
provider: 'groq',
name: 'Mixtral 8x7B',
group: 'Mixtral',
enabled: false
},
{
id: 'gemma-7b-it',
provider: 'groq',
name: 'Gemma 7B',
group: 'Gemma',
enabled: false
}
],
anthropic: [
{
id: 'claude-3-5-sonnet-20240620',
provider: 'anthropic',
name: 'Claude 3.5 Sonnet',
group: 'Claude 3.5',
enabled: true
},
{
id: 'claude-3-opus-20240229',
provider: 'anthropic',
name: 'Claude 3 Opus',
group: 'Claude 3',
enabled: true
},
{
id: 'claude-3-sonnet-20240229',
provider: 'anthropic',
name: 'Claude 3 Sonnet',
group: 'Claude 3',
enabled: true
},
{
id: 'claude-3-haiku-20240307',
provider: 'anthropic',
name: 'Claude 3 Haiku',
group: 'Claude 3',
enabled: true
}
]
}

View File

@@ -1,3 +1,88 @@
import OpenAiProviderLogo from '@renderer/assets/images/providers/openai.jpeg'
import SiliconFlowProviderLogo from '@renderer/assets/images/providers/silicon.png'
import DeepSeekProviderLogo from '@renderer/assets/images/providers/deepseek.png'
import YiProviderLogo from '@renderer/assets/images/providers/yi.svg'
import GroqProviderLogo from '@renderer/assets/images/providers/groq.png'
import ZhipuProviderLogo from '@renderer/assets/images/providers/zhipu.png'
import OllamaProviderLogo from '@renderer/assets/images/providers/ollama.png'
import MoonshotProviderLogo from '@renderer/assets/images/providers/moonshot.jpeg'
import OpenRouterProviderLogo from '@renderer/assets/images/providers/openrouter.png'
import BaichuanProviderLogo from '@renderer/assets/images/providers/baichuan.png'
import DashScopeProviderLogo from '@renderer/assets/images/providers/dashscope.png'
import AnthropicProviderLogo from '@renderer/assets/images/providers/anthropic.jpeg'
import AiHubMixProviderLogo from '@renderer/assets/images/providers/aihubmix.jpg'
import ChatGPTModelLogo from '@renderer/assets/images/models/chatgpt.jpeg'
import ChatGLMModelLogo from '@renderer/assets/images/models/chatglm.jpeg'
import DeepSeekModelLogo from '@renderer/assets/images/models/deepseek.png'
import GemmaModelLogo from '@renderer/assets/images/models/gemma.jpeg'
import QwenModelLogo from '@renderer/assets/images/models/qwen.jpeg'
import YiModelLogo from '@renderer/assets/images/models/yi.svg'
import LlamaModelLogo from '@renderer/assets/images/models/llama.jpeg'
import MixtralModelLogo from '@renderer/assets/images/models/mixtral.jpeg'
import MoonshotModelLogo from '@renderer/assets/images/providers/moonshot.jpeg'
import MicrosoftModelLogo from '@renderer/assets/images/models/microsoft.png'
import BaichuanModelLogo from '@renderer/assets/images/models/baichuan.png'
import ClaudeModelLogo from '@renderer/assets/images/models/claude.png'
export function getProviderLogo(providerId: string) {
switch (providerId) {
case 'openai':
return OpenAiProviderLogo
case 'silicon':
return SiliconFlowProviderLogo
case 'deepseek':
return DeepSeekProviderLogo
case 'yi':
return YiProviderLogo
case 'groq':
return GroqProviderLogo
case 'zhipu':
return ZhipuProviderLogo
case 'ollama':
return OllamaProviderLogo
case 'moonshot':
return MoonshotProviderLogo
case 'openrouter':
return OpenRouterProviderLogo
case 'baichuan':
return BaichuanProviderLogo
case 'dashscope':
return DashScopeProviderLogo
case 'anthropic':
return AnthropicProviderLogo
case 'aihubmix':
return AiHubMixProviderLogo
default:
return undefined
}
}
export function getModelLogo(modelId: string) {
const logoMap = {
gpt: ChatGPTModelLogo,
glm: ChatGLMModelLogo,
deepseek: DeepSeekModelLogo,
qwen: QwenModelLogo,
gemma: GemmaModelLogo,
'yi-': YiModelLogo,
llama: LlamaModelLogo,
mixtral: MixtralModelLogo,
mistral: MixtralModelLogo,
moonshot: MoonshotModelLogo,
phi: MicrosoftModelLogo,
baichuan: BaichuanModelLogo,
claude: ClaudeModelLogo
}
for (const key in logoMap) {
if (modelId.toLowerCase().includes(key)) {
return logoMap[key]
}
}
return undefined
}
export const PROVIDER_CONFIG = {
openai: {
websites: {
@@ -47,6 +132,22 @@ export const PROVIDER_CONFIG = {
models: 'https://platform.moonshot.cn/docs/intro#%E6%A8%A1%E5%9E%8B%E5%88%97%E8%A1%A8'
}
},
baichuan: {
websites: {
official: 'https://www.baichuan-ai.com/',
apiKey: 'https://platform.baichuan-ai.com/console/apikey',
docs: 'https://platform.baichuan-ai.com/docs',
models: 'https://platform.baichuan-ai.com/price'
}
},
dashscope: {
websites: {
official: 'https://dashscope.aliyun.com/',
apiKey: 'https://help.aliyun.com/zh/dashscope/developer-reference/acquisition-and-configuration-of-api-key',
docs: 'https://help.aliyun.com/zh/dashscope/',
models: 'https://dashscope.console.aliyun.com/model'
}
},
openrouter: {
websites: {
official: 'https://openrouter.ai/',
@@ -70,12 +171,20 @@ export const PROVIDER_CONFIG = {
models: 'https://ollama.com/library'
}
},
baichuan: {
anthropic: {
websites: {
official: 'https://www.baichuan-ai.com/',
apiKey: 'https://platform.baichuan-ai.com/console/apikey',
docs: 'https://platform.baichuan-ai.com/docs',
models: 'https://platform.baichuan-ai.com/price'
official: 'https://anthropic.com/',
apiKey: 'https://console.anthropic.com/settings/keys',
docs: 'https://docs.anthropic.com/en/docs',
models: 'https://docs.anthropic.com/en/docs/about-claude/models'
}
},
aihubmix: {
websites: {
official: 'https://aihubmix.com/',
apiKey: 'https://aihubmix.com/token',
docs: 'https://doc.aihubmix.com/',
models: 'https://aihubmix.com/models'
}
}
}

View File

@@ -3,17 +3,28 @@ import {
addModel as _addModel,
removeModel as _removeModel,
updateProvider as _updateProvider,
updateProviders as _updateProviders
updateProviders as _updateProviders,
addProvider,
removeProvider
} from '@renderer/store/llm'
import { Assistant, Model, Provider } from '@renderer/types'
import { useDefaultModel } from './useAssistant'
import { createSelector } from '@reduxjs/toolkit'
const selectEnabledProviders = createSelector(
(state) => state.llm.providers,
(providers) => providers.filter((p) => p.enabled)
)
export function useProviders() {
const providers = useAppSelector((state) => state.llm.providers.filter((p) => p.enabled))
const providers = useAppSelector(selectEnabledProviders)
const dispatch = useAppDispatch()
return {
providers,
addProvider: (provider: Provider) => dispatch(addProvider(provider)),
removeProvider: (provider: Provider) => dispatch(removeProvider(provider)),
updateProvider: (provider: Provider) => dispatch(_updateProvider(provider)),
updateProviders: (providers: Provider[]) => dispatch(_updateProviders(providers))
}
}
@@ -22,6 +33,14 @@ export function useSystemProviders() {
return useAppSelector((state) => state.llm.providers.filter((p) => p.isSystem))
}
export function useUserProviders() {
return useAppSelector((state) => state.llm.providers.filter((p) => !p.isSystem))
}
export function useAllProviders() {
return useAppSelector((state) => state.llm.providers)
}
export function useProvider(id: string) {
const provider = useAppSelector((state) => state.llm.providers.find((p) => p.id === id) as Provider)
const dispatch = useAppDispatch()

View File

@@ -44,7 +44,7 @@ const resources = {
'chat.completion.paused': 'Chat completion paused'
},
assistant: {
'default.name': 'Default Assistant',
'default.name': '😀 Default Assistant',
'default.description': "Hello, I'm Default Assistant. You can start chatting with me right away",
'default.topic.name': 'Default Topic',
'topics.title': 'Topics',
@@ -64,7 +64,17 @@ const resources = {
'input.clear.content': 'Are you sure to clear all messages?',
'input.placeholder': 'Type your message here...',
'input.send': 'Send',
'input.pause': 'Pause'
'input.pause': 'Pause',
'input.settings': 'Settings',
'input.estimated_tokens': 'Estimated tokens: ',
'settings.temperature': 'Temperature',
'settings.temperature.tip':
'Lower values make the model more creative and unpredictable, while higher values make it more deterministic and precise.',
'settings.conext_count': 'Context',
'settings.conext_count.tip': 'The number of previous messages to keep in the context.',
'settings.reset': 'Reset',
'settings.set_as_default': 'Apply to default assistant',
'settings.max': 'Max'
},
apps: {
title: 'Agents'
@@ -79,7 +89,10 @@ const resources = {
zhipu: 'ZHIPU AI',
groq: 'Groq',
ollama: 'Ollama',
baichuan: 'Baichuan'
baichuan: 'Baichuan',
dashscope: 'DashScope',
anthropic: 'Anthropic',
aihubmix: 'AiHubMix'
},
settings: {
title: 'Settings',
@@ -99,7 +112,6 @@ const resources = {
'models.default_assistant_model': 'Default Assistant Model',
'models.topic_naming_model': 'Topic Naming Model',
'models.add.add_model': 'Add Model',
'models.add.provider_name.placeholder': 'Provider Name',
'models.add.model_id.placeholder': 'Required e.g. gpt-3.5-turbo',
'models.add.model_id': 'Model ID',
'models.add.model_id.tooltip': 'Example: gpt-3.5-turbo',
@@ -110,12 +122,17 @@ const resources = {
'models.add.group_name.placeholder': 'Optional e.g. ChatGPT',
'models.empty': 'No models found',
'assistant.title': 'Default Assistant',
'assistant.model_params': 'Model Parameters',
'about.description': 'A powerful AI assistant for producer',
'about.updateNotAvailable': 'You are using the latest version',
'about.checkingUpdate': 'Checking for updates...',
'about.updateError': 'Update error',
'about.checkUpdate': 'Check Update',
'about.downloading': 'Downloading...'
'about.downloading': 'Downloading...',
'provider.delete.title': 'Delete Provider',
'provider.delete.content': 'Are you sure you want to delete this provider?',
'provider.edit.name': 'Provider Name',
'provider.edit.name.placeholder': 'Example: OpenAI'
}
}
},
@@ -156,11 +173,11 @@ const resources = {
'error.enter.api.host': '请输入您的 API 地址',
'error.enter.model': '请选择一个模型',
'api.connection.failed': '连接失败',
'api.connection.successful': '连接成功',
'api.connection.success': '连接成功',
'chat.completion.paused': '会话已停止'
},
assistant: {
'default.name': '默认助手',
'default.name': '😃 默认助手 - Assistant',
'default.description': '你好,我是默认助手。你可以立刻开始跟我聊天。',
'default.topic.name': '默认话题',
'topics.title': '话题',
@@ -180,7 +197,18 @@ const resources = {
'input.clear.content': '确定要清除所有消息吗?',
'input.placeholder': '在这里输入消息...',
'input.send': '发送',
'input.pause': '暂停'
'input.pause': '暂停',
'input.settings': '设置',
'input.estimated_tokens': '预估消耗',
'settings.temperature': '模型温度',
'settings.temperature.tip':
'模型生成文本的随机程度。值越大,回复内容越赋有多样性、创造性、随机性;设为 0 根据事实回答。日常聊天建议设置为 0.7',
'settings.conext_count': '上下文数',
'settings.conext_count.tip':
'要保留在上下文中的消息数量,数值越大,上下文越长,消耗的 token 越多。普通聊天建议 5-10代码生成建议 5-10',
'settings.reset': '重置',
'settings.set_as_default': '应用到默认助手',
'settings.max': '不限'
},
apps: {
title: '智能体'
@@ -195,11 +223,14 @@ const resources = {
zhipu: '智谱AI',
groq: 'Groq',
ollama: 'Ollama',
baichuan: '百川'
baichuan: '百川',
dashscope: '阿里云灵积',
anthropic: 'Anthropic',
aihubmix: 'AiHubMix'
},
settings: {
title: '设置',
general: '常规',
general: '常规设置',
provider: '模型提供商',
model: '模型设置',
assistant: '默认助手',
@@ -215,7 +246,6 @@ const resources = {
'models.default_assistant_model': '默认助手模型',
'models.topic_naming_model': '话题命名模型',
'models.add.add_model': '添加模型',
'models.add.provider_name.placeholder': '必填 例如 OpenAI',
'models.add.model_id.placeholder': '必填 例如 gpt-3.5-turbo',
'models.add.model_id': '模型 ID',
'models.add.model_id.tooltip': '例如 gpt-3.5-turbo',
@@ -226,12 +256,17 @@ const resources = {
'models.add.group_name.placeholder': '例如 ChatGPT',
'models.empty': '没有模型',
'assistant.title': '默认助手',
'assistant.model_params': '模型参数',
'about.description': '一个为创造者而生的 AI 助手',
'about.updateNotAvailable': '你的软件已是最新版本',
'about.checkingUpdate': '正在检查更新...',
'about.updateError': '更新出错',
'about.checkUpdate': '检查更新',
'about.downloading': '正在下载更新...'
'about.downloading': '正在下载更新...',
'provider.delete.title': '删除提供商',
'provider.delete.content': '确定要删除此模型提供商吗?',
'provider.edit.name': '模型提供商名称',
'provider.edit.name.placeholder': '例如 OpenAI'
}
}
}

View File

@@ -0,0 +1,180 @@
import { QuestionCircleOutlined } from '@ant-design/icons'
import { DEFAULT_CONEXTCOUNT, DEFAULT_TEMPERATURE } from '@renderer/config/constant'
import { useAssistants } from '@renderer/hooks/useAssistant'
import { Assistant } from '@renderer/types'
import { Button, Col, InputNumber, Popover, Row, Slider, Tooltip } from 'antd'
import { debounce } from 'lodash'
import { FC, PropsWithChildren, useCallback, useEffect, useState } from 'react'
import { useTranslation } from 'react-i18next'
import styled from 'styled-components'
interface Props {
assistant: Assistant
}
const PopoverContent: FC<Props> = ({ assistant }) => {
const { updateAssistant } = useAssistants()
const [temperature, setTemperature] = useState(assistant.settings?.temperature ?? DEFAULT_TEMPERATURE)
const [contextCount, setConextCount] = useState(assistant.settings?.contextCount ?? DEFAULT_CONEXTCOUNT)
const { t } = useTranslation()
const onUpdateAssistantSettings = useCallback(
debounce(
({ _temperature, _contextCount }: { _temperature?: number; _contextCount?: number }) => {
updateAssistant({
...assistant,
settings: {
...assistant.settings,
temperature: _temperature ?? temperature,
contextCount: _contextCount ?? contextCount
}
})
},
1000,
{ leading: false, trailing: true }
),
[]
)
const onTemperatureChange = (value) => {
if (!isNaN(value as number)) {
setTemperature(value)
onUpdateAssistantSettings({ _temperature: value })
}
}
const onConextCountChange = (value) => {
if (!isNaN(value as number)) {
setConextCount(value)
onUpdateAssistantSettings({ _contextCount: value })
}
}
const onReset = () => {
setTemperature(DEFAULT_TEMPERATURE)
setConextCount(DEFAULT_CONEXTCOUNT)
updateAssistant({
...assistant,
settings: {
...assistant.settings,
temperature: DEFAULT_TEMPERATURE,
contextCount: DEFAULT_CONEXTCOUNT
}
})
}
useEffect(() => {
setTemperature(assistant.settings?.temperature ?? DEFAULT_TEMPERATURE)
setConextCount(assistant.settings?.contextCount ?? DEFAULT_CONEXTCOUNT)
}, [assistant])
return (
<Container>
<Row align="middle" style={{ marginBottom: 10 }} gutter={20}>
<Col span={6}>
<Row align="middle" justify="end">
<Label>{t('assistant.settings.temperature')}</Label>
<Tooltip title={t('assistant.settings.temperature.tip')}>
<QuestionIcon />
</Tooltip>
</Row>
</Col>
<Col span={14}>
<Slider
min={0}
max={1.2}
onChange={onTemperatureChange}
value={typeof temperature === 'number' ? temperature : 0}
marks={{ 0: '0', 0.7: '0.7', 1: '1', 1.2: '1.2' }}
step={0.1}
/>
</Col>
<Col span={3}>
<InputNumber
min={0}
max={1.2}
style={{ width: 50, marginLeft: 5, textAlign: 'center' }}
step={0.1}
value={temperature}
onChange={onTemperatureChange}
controls={false}
/>
</Col>
</Row>
<Row align="middle" style={{ marginBottom: 10 }} gutter={20}>
<Col span={6}>
<Row align="middle" justify="end">
<Label>{t('assistant.settings.conext_count')}</Label>
<Tooltip title={t('assistant.settings.conext_count.tip')}>
<QuestionIcon />
</Tooltip>
</Row>
</Col>
<Col span={14}>
<Slider
min={0}
max={20}
marks={{ 0: '0', 5: '5', 10: '10', 15: '15', 20: t('assistant.settings.max') }}
onChange={onConextCountChange}
value={typeof contextCount === 'number' ? contextCount : 0}
step={1}
/>
</Col>
<Col span={3}>
<InputNumber
min={0}
max={20}
style={{ width: 50, marginLeft: 5, textAlign: 'center' }}
step={1}
value={contextCount}
onChange={onConextCountChange}
controls={false}
/>
</Col>
</Row>
<Row justify="center">
<Button onClick={onReset}>{t('assistant.settings.reset')}</Button>
</Row>
</Container>
)
}
const AssistantSettings: FC<Props & PropsWithChildren> = ({ children, assistant }) => {
const [open, setOpen] = useState(false)
const { t } = useTranslation()
return (
<Popover content={<PopoverContent assistant={assistant} />} trigger="click" onOpenChange={setOpen}>
{open ? (
children
) : (
<Tooltip placement="top" title={t('assistant.input.settings')} arrow>
{children}
</Tooltip>
)}
</Popover>
)
}
const Container = styled.div`
display: flex;
flex-direction: column;
margin-bottom: 8px;
width: 420px;
padding: 5px;
`
const Label = styled.p`
margin: 0;
font-size: 14px;
font-weight: bold;
margin-right: 5px;
`
const QuestionIcon = styled(QuestionCircleOutlined)`
font-size: 14px;
cursor: pointer;
color: var(--color-text-3);
`
export default AssistantSettings

View File

@@ -24,11 +24,9 @@ const Assistants: FC<Props> = ({ activeAssistant, setActiveAssistant, onCreateAs
const { t } = useTranslation()
const onDelete = (assistant: Assistant) => {
const _assistant = last(assistants.filter((a) => a.id !== assistant.id))
_assistant ? setActiveAssistant(_assistant) : onCreateAssistant()
removeAssistant(assistant.id)
setTimeout(() => {
const _assistant = last(assistants.filter((a) => a.id !== assistant.id))
_assistant ? setActiveAssistant(_assistant) : onCreateAssistant()
}, 0)
}
const items: MenuProps['items'] = [

View File

@@ -16,10 +16,6 @@ const Chat: FC<Props> = (props) => {
const { assistant } = useAssistant(props.assistant.id)
const { activeTopic, setActiveTopic } = useActiveTopic(assistant)
if (!assistant) {
return null
}
return (
<Container id="chat">
<Flex vertical flex={1} justify="space-between">

View File

@@ -1,7 +1,7 @@
import { EVENT_NAMES, EventEmitter } from '@renderer/services/event'
import { Assistant, Message, Topic } from '@renderer/types'
import { uuid } from '@renderer/utils'
import { FC, useCallback, useEffect, useRef, useState } from 'react'
import { estimateInputTokenCount, uuid } from '@renderer/utils'
import { FC, useCallback, useEffect, useMemo, useRef, useState } from 'react'
import styled from 'styled-components'
import { MoreOutlined } from '@ant-design/icons'
import { Button, Popconfirm, Tooltip } from 'antd'
@@ -9,6 +9,7 @@ import { useShowRightSidebar } from '@renderer/hooks/useStore'
import { useAssistant } from '@renderer/hooks/useAssistant'
import {
ClearOutlined,
ControlOutlined,
FullscreenExitOutlined,
FullscreenOutlined,
HistoryOutlined,
@@ -16,7 +17,7 @@ import {
PlusCircleOutlined
} from '@ant-design/icons'
import TextArea, { TextAreaRef } from 'antd/es/input/TextArea'
import { isEmpty } from 'lodash'
import { debounce, isEmpty } from 'lodash'
import SendMessageSetting from './SendMessageSetting'
import { useSettings } from '@renderer/hooks/useSettings'
import dayjs from 'dayjs'
@@ -24,6 +25,7 @@ import store, { useAppSelector } from '@renderer/store'
import { getDefaultTopic } from '@renderer/services/assistant'
import { useTranslation } from 'react-i18next'
import { setGenerating } from '@renderer/store/runtime'
import AssistantSettings from './AssistantSettings'
interface Props {
assistant: Assistant
@@ -36,6 +38,7 @@ const Inputbar: FC<Props> = ({ assistant, setActiveTopic }) => {
const { addTopic } = useAssistant(assistant.id)
const { sendMessageShortcut } = useSettings()
const [expended, setExpend] = useState(false)
const [estimateTokenCount, setEstimateTokenCount] = useState(0)
const generating = useAppSelector((state) => state.runtime.generating)
const inputRef = useRef<TextAreaRef>(null)
@@ -65,6 +68,8 @@ const Inputbar: FC<Props> = ({ assistant, setActiveTopic }) => {
setText('')
}
const inputTokenCount = useMemo(() => estimateInputTokenCount(text), [text])
const handleKeyDown = (event: React.KeyboardEvent<HTMLTextAreaElement>) => {
if (sendMessageShortcut === 'Enter' && event.key === 'Enter') {
if (event.shiftKey) {
@@ -108,11 +113,13 @@ const Inputbar: FC<Props> = ({ assistant, setActiveTopic }) => {
}, [addNewTopic, generating])
useEffect(() => {
const _setEstimateTokenCount = debounce(setEstimateTokenCount, 100, { leading: false, trailing: true })
const unsubscribes = [
EventEmitter.on(EVENT_NAMES.EDIT_MESSAGE, (message: Message) => {
setText(message.content)
inputRef.current?.focus()
})
}),
EventEmitter.on(EVENT_NAMES.ESTIMATED_TOKEN_COUNT, _setEstimateTokenCount)
]
return () => unsubscribes.forEach((unsub) => unsub())
}, [])
@@ -148,6 +155,11 @@ const Inputbar: FC<Props> = ({ assistant, setActiveTopic }) => {
</ToolbarButton>
</Popconfirm>
</Tooltip>
<AssistantSettings assistant={assistant}>
<ToolbarButton type="text">
<ControlOutlined />
</ToolbarButton>
</AssistantSettings>
<Tooltip placement="top" title={expended ? t('assistant.input.collapse') : t('assistant.input.expand')} arrow>
<ToolbarButton type="text" onClick={() => setExpend(!expended)}>
{expended ? <FullscreenExitOutlined /> : <FullscreenOutlined />}
@@ -177,9 +189,13 @@ const Inputbar: FC<Props> = ({ assistant, setActiveTopic }) => {
autoFocus
contextMenu="true"
variant="borderless"
styles={{ textarea: { paddingLeft: 0 } }}
showCount
ref={inputRef}
styles={{ textarea: { paddingLeft: 0 } }}
/>
<TextCount>
{t('assistant.input.estimated_tokens')}: {`${inputTokenCount}/${estimateTokenCount}`}
</TextCount>
</Container>
)
}
@@ -192,6 +208,7 @@ const Container = styled.div`
border-top: 0.5px solid var(--color-border);
padding: 5px 15px;
transition: all 0.3s ease;
position: relative;
`
const Textarea = styled(TextArea)`
@@ -235,4 +252,12 @@ const ToolbarButton = styled(Button)`
}
`
const TextCount = styled.div`
position: absolute;
right: 8px;
bottom: 8px;
font-size: 11px;
color: var(--color-text-3);
`
export default Inputbar

View File

@@ -7,7 +7,7 @@ import { CopyOutlined, DeleteOutlined, EditOutlined } from '@ant-design/icons'
import Markdown from 'react-markdown'
import CodeBlock from './CodeBlock'
import { EVENT_NAMES, EventEmitter } from '@renderer/services/event'
import { getModelLogo } from '@renderer/services/provider'
import { getModelLogo } from '@renderer/config/provider'
import Logo from '@renderer/assets/images/logo.png'
import { SyncOutlined } from '@ant-design/icons'
import { firstLetter } from '@renderer/utils'
@@ -104,8 +104,8 @@ const MessageItem: FC<Props> = ({ message, index, showMenu, onDeleteMessage }) =
<MessageMetadata>{message.modelId}</MessageMetadata>
{message.usage && (
<>
<MessageMetadata style={{ textTransform: 'uppercase' }}>
tokens used: {message.usage.total_tokens} (IN:{message.usage.prompt_tokens}/OUT:
<MessageMetadata>
Tokens: {message.usage.total_tokens} (IN:{message.usage.prompt_tokens}/OUT:
{message.usage.completion_tokens})
</MessageMetadata>
</>

View File

@@ -7,7 +7,7 @@ import MessageItem from './Message'
import { reverse } from 'lodash'
import { fetchChatCompletion, fetchMessagesSummary } from '@renderer/services/api'
import { useAssistant } from '@renderer/hooks/useAssistant'
import { runAsyncFunction } from '@renderer/utils'
import { estimateHistoryTokenCount, runAsyncFunction } from '@renderer/utils'
import LocalStorage from '@renderer/services/storage'
import { useProviderByAssistant } from '@renderer/hooks/useProvider'
import { t } from 'i18next'
@@ -22,7 +22,7 @@ const Messages: FC<Props> = ({ assistant, topic }) => {
const [lastMessage, setLastMessage] = useState<Message | null>(null)
const { updateTopic } = useAssistant(assistant.id)
const provider = useProviderByAssistant(assistant)
const messagesRef = useRef<HTMLDivElement>(null)
const containerRef = useRef<HTMLDivElement>(null)
const assistantDefaultMessage: Message = {
id: 'assistant',
@@ -95,11 +95,15 @@ const Messages: FC<Props> = ({ assistant, topic }) => {
}, [topic.id])
useEffect(() => {
messagesRef.current?.scrollTo({ top: 100000, behavior: 'auto' })
containerRef.current?.scrollTo({ top: 100000, behavior: 'auto' })
}, [messages])
useEffect(() => {
EventEmitter.emit(EVENT_NAMES.ESTIMATED_TOKEN_COUNT, estimateHistoryTokenCount(assistant, messages))
}, [assistant, messages])
return (
<Container id="messages" key={assistant.id} ref={messagesRef}>
<Container id="messages" key={assistant.id} ref={containerRef}>
{lastMessage && <MessageItem message={lastMessage} />}
{reverse([...messages]).map((message, index) => (
<MessageItem key={message.id} message={message} showMenu index={index} onDeleteMessage={onDeleteMessage} />

View File

@@ -13,15 +13,16 @@ interface Props {
}
const Navigation: FC<Props> = ({ activeAssistant }) => {
const { providers } = useProviders()
const { assistant } = useAssistant(activeAssistant.id)
const { model, setModel } = useAssistant(activeAssistant.id)
const { providers } = useProviders()
const { t } = useTranslation()
const items: MenuProps['items'] = providers
.filter((p) => p.models.length > 0)
.map((p) => ({
key: p.id,
label: t(`provider.${p.id}`),
label: p.isSystem ? t(`provider.${p.id}`) : p.name,
type: 'group',
children: p.models.map((m) => ({
key: m.id,
@@ -33,7 +34,7 @@ const Navigation: FC<Props> = ({ activeAssistant }) => {
return (
<NavbarCenter style={{ border: 'none', padding: '0 15px' }}>
{activeAssistant?.name}
{assistant?.name}
<DropdownMenu menu={{ items, style: { maxHeight: '80vh', overflow: 'auto' } }} trigger={['click']}>
<Button size="small" type="primary" ghost style={{ fontSize: '11px' }}>
{model ? model.name : t('button.select_model')}

View File

@@ -28,7 +28,7 @@ const SendMessageSetting: FC<Props> = ({ children }) => {
return (
<Dropdown
menu={{ items: sendSettingItems, selectable: true, defaultSelectedKeys: [sendMessageShortcut] }}
placement="top"
placement="topRight"
trigger={['click']}
arrow>
{children}

View File

@@ -82,12 +82,8 @@ const Topics: FC<Props> = ({ assistant, activeTopic, setActiveTopic }) => {
}
}
if (!showRightSidebar) {
return null
}
return (
<Container className={showRightSidebar ? '' : 'collapsed'}>
<Container style={{ display: showRightSidebar ? 'block' : 'none' }}>
<TopicTitle>
<span>
{t('assistant.topics.title')} ({assistant.topics.length})

View File

@@ -1,15 +1,66 @@
import { FC } from 'react'
import { SettingContainer, SettingDivider, SettingSubtitle, SettingTitle } from './components'
import { Input } from 'antd'
import TextArea from 'antd/es/input/TextArea'
import { QuestionCircleOutlined } from '@ant-design/icons'
import { DEFAULT_CONEXTCOUNT, DEFAULT_TEMPERATURE } from '@renderer/config/constant'
import { useDefaultAssistant } from '@renderer/hooks/useAssistant'
import { Button, Col, Input, InputNumber, Row, Slider, Tooltip } from 'antd'
import TextArea from 'antd/es/input/TextArea'
import { FC, useCallback, useState } from 'react'
import { useTranslation } from 'react-i18next'
import styled from 'styled-components'
import { SettingContainer, SettingDivider, SettingSubtitle, SettingTitle } from './components'
import { debounce } from 'lodash'
const AssistantSettings: FC = () => {
const { defaultAssistant, updateDefaultAssistant } = useDefaultAssistant()
const [temperature, setTemperature] = useState(defaultAssistant.settings?.temperature ?? DEFAULT_TEMPERATURE)
const [contextCount, setConextCount] = useState(defaultAssistant.settings?.contextCount ?? DEFAULT_CONEXTCOUNT)
const { t } = useTranslation()
const onUpdateAssistantSettings = useCallback(
debounce(
({ _temperature, _contextCount }: { _temperature?: number; _contextCount?: number }) => {
updateDefaultAssistant({
...defaultAssistant,
settings: {
...defaultAssistant.settings,
temperature: _temperature ?? temperature,
contextCount: _contextCount ?? contextCount
}
})
},
1000,
{ leading: false, trailing: true }
),
[]
)
const onTemperatureChange = (value) => {
if (!isNaN(value as number)) {
setTemperature(value)
onUpdateAssistantSettings({ _temperature: value })
}
}
const onConextCountChange = (value) => {
if (!isNaN(value as number)) {
setConextCount(value)
onUpdateAssistantSettings({ _contextCount: value })
}
}
const onReset = () => {
setTemperature(DEFAULT_TEMPERATURE)
setConextCount(DEFAULT_CONEXTCOUNT)
updateDefaultAssistant({
...defaultAssistant,
settings: {
...defaultAssistant.settings,
temperature: DEFAULT_TEMPERATURE,
contextCount: DEFAULT_CONEXTCOUNT
}
})
}
return (
<SettingContainer>
<SettingTitle>{t('settings.assistant.title')}</SettingTitle>
@@ -27,8 +78,82 @@ const AssistantSettings: FC = () => {
value={defaultAssistant.prompt}
onChange={(e) => updateDefaultAssistant({ ...defaultAssistant, prompt: e.target.value })}
/>
<SettingDivider />
<SettingSubtitle style={{ marginTop: 0 }}>{t('settings.assistant.model_params')}</SettingSubtitle>
<Row align="middle">
<Label>{t('assistant.settings.temperature')}</Label>
<Tooltip title={t('assistant.settings.temperature.tip')}>
<QuestionIcon />
</Tooltip>
</Row>
<Row align="middle" style={{ marginBottom: 10 }} gutter={20}>
<Col span={22}>
<Slider
min={0}
max={1.2}
onChange={onTemperatureChange}
value={typeof temperature === 'number' ? temperature : 0}
marks={{ 0: '0', 0.7: '0.7', 1: '1', 1.2: '1.2' }}
step={0.1}
/>
</Col>
<Col span={2}>
<InputNumber
min={0}
max={1.2}
step={0.1}
value={temperature}
onChange={onTemperatureChange}
style={{ width: '100%' }}
/>
</Col>
</Row>
<Row align="middle">
<Label>{t('assistant.settings.conext_count')}</Label>
<Tooltip title={t('assistant.settings.conext_count.tip')}>
<QuestionIcon />
</Tooltip>
</Row>
<Row align="middle" style={{ marginBottom: 10 }} gutter={20}>
<Col span={22}>
<Slider
min={0}
max={20}
marks={{ 0: '0', 5: '5', 10: '10', 15: '15', 20: t('assistant.settings.max') }}
onChange={onConextCountChange}
value={typeof contextCount === 'number' ? contextCount : 0}
step={1}
/>
</Col>
<Col span={2}>
<InputNumber
min={0}
max={20}
step={1}
value={contextCount}
onChange={onConextCountChange}
style={{ width: '100%' }}
/>
</Col>
</Row>
<Button onClick={onReset} style={{ width: 100 }}>
{t('assistant.settings.reset')}
</Button>
</SettingContainer>
)
}
const Label = styled.p`
margin: 0;
font-size: 14px;
font-weight: bold;
margin-right: 5px;
`
const QuestionIcon = styled(QuestionCircleOutlined)`
font-size: 14px;
cursor: pointer;
color: var(--color-text-3);
`
export default AssistantSettings

View File

@@ -10,7 +10,7 @@ import { setAvatar } from '@renderer/store/runtime'
import { useSettings } from '@renderer/hooks/useSettings'
import { setLanguage } from '@renderer/store/settings'
import { useTranslation } from 'react-i18next'
import i18next from 'i18next'
import i18n from '@renderer/i18n'
const GeneralSettings: FC = () => {
const avatar = useAvatar()
@@ -20,7 +20,7 @@ const GeneralSettings: FC = () => {
const onSelectLanguage = (value: string) => {
dispatch(setLanguage(value))
i18next.changeLanguage(value)
i18n.changeLanguage(value)
localStorage.setItem('language', value)
}

View File

@@ -16,7 +16,7 @@ const ModelSettings: FC = () => {
const selectOptions = providers
.filter((p) => p.models.length > 0)
.map((p) => ({
label: t(`provider.${p.id}`),
label: p.isSystem ? t(`provider.${p.id}`) : p.name,
title: p.name,
options: p.models.map((m) => ({
label: m.name,

View File

@@ -1,21 +1,26 @@
import { DragDropContext, Draggable, Droppable, DropResult } from '@hello-pangea/dnd'
import { useProviders, useSystemProviders } from '@renderer/hooks/useProvider'
import { getProviderLogo } from '@renderer/services/provider'
import { useAllProviders, useProviders } from '@renderer/hooks/useProvider'
import { getProviderLogo } from '@renderer/config/provider'
import { Provider } from '@renderer/types'
import { droppableReorder } from '@renderer/utils'
import { Avatar, Tag } from 'antd'
import { droppableReorder, generateColorFromChar, getFirstCharacter, uuid } from '@renderer/utils'
import { Avatar, Button, Dropdown, MenuProps, Tag } from 'antd'
import { FC, useState } from 'react'
import styled from 'styled-components'
import ProviderSetting from './components/ProviderSetting'
import { useTranslation } from 'react-i18next'
import { PlusOutlined } from '@ant-design/icons'
import { DeleteOutlined, EditOutlined } from '@ant-design/icons'
import AddProviderPopup from './components/AddProviderPopup'
const ProviderSettings: FC = () => {
const providers = useSystemProviders()
const { updateProviders } = useProviders()
const providers = useAllProviders()
const { updateProviders, addProvider, removeProvider, updateProvider } = useProviders()
const [selectedProvider, setSelectedProvider] = useState<Provider>(providers[0])
const { t } = useTranslation()
const [dragging, setDragging] = useState(false)
const onDragEnd = (result: DropResult) => {
setDragging(false)
if (result.destination) {
const sourceIndex = result.source.index
const destIndex = result.destination.index
@@ -24,37 +29,109 @@ const ProviderSettings: FC = () => {
}
}
const onAddProvider = async () => {
const prividerName = await AddProviderPopup.show()
if (!prividerName) {
return
}
const provider = {
id: uuid(),
name: prividerName,
apiKey: '',
apiHost: '',
models: [],
enabled: false,
isSystem: false
} as Provider
addProvider(provider)
setSelectedProvider(provider)
}
const getDropdownMenus = (provider: Provider): MenuProps['items'] => {
return [
{
label: t('common.edit'),
key: 'edit',
icon: <EditOutlined />,
async onClick() {
const name = await AddProviderPopup.show(provider)
name && updateProvider({ ...provider, name })
}
},
{
label: t('common.delete'),
key: 'delete',
icon: <DeleteOutlined />,
danger: true,
async onClick() {
window.modal.confirm({
title: t('settings.provider.delete.title'),
content: t('settings.provider.delete.content'),
okButtonProps: { danger: true },
okText: t('common.delete'),
onOk: () => {
setSelectedProvider(providers.filter((p) => p.isSystem)[0])
removeProvider(provider)
}
})
}
}
]
}
return (
<Container>
<ProviderListContainer>
<DragDropContext onDragEnd={onDragEnd}>
<Droppable droppableId="droppable">
{(provided) => (
<div {...provided.droppableProps} ref={provided.innerRef}>
{providers.map((provider, index) => (
<Draggable key={`draggable_${provider.id}_${index}`} draggableId={provider.id} index={index}>
{(provided) => (
<div ref={provided.innerRef} {...provided.draggableProps} {...provided.dragHandleProps}>
<ProviderListItem
key={JSON.stringify(provider)}
className={provider.id === selectedProvider?.id ? 'active' : ''}
onClick={() => setSelectedProvider(provider)}>
<Avatar src={getProviderLogo(provider.id)} size={22} />
<ProviderItemName>{t(`provider.${provider.id}`)}</ProviderItemName>
{provider.enabled && (
<Tag color="green" style={{ marginLeft: 'auto' }}>
ON
</Tag>
)}
</ProviderListItem>
</div>
)}
</Draggable>
))}
</div>
)}
</Droppable>
</DragDropContext>
<ProviderList>
<DragDropContext onDragStart={() => setDragging(true)} onDragEnd={onDragEnd}>
<Droppable droppableId="droppable">
{(provided) => (
<div {...provided.droppableProps} ref={provided.innerRef}>
{providers.map((provider, index) => (
<Draggable key={`draggable_${provider.id}_${index}`} draggableId={provider.id} index={index}>
{(provided) => (
<div ref={provided.innerRef} {...provided.draggableProps} {...provided.dragHandleProps}>
<Dropdown
menu={{ items: provider.isSystem ? [] : getDropdownMenus(provider) }}
trigger={['contextMenu']}>
<ProviderListItem
key={JSON.stringify(provider)}
className={provider.id === selectedProvider?.id ? 'active' : ''}
onClick={() => setSelectedProvider(provider)}>
{provider.isSystem && <Avatar src={getProviderLogo(provider.id)} size={28} />}
{!provider.isSystem && (
<Avatar
size={28}
style={{ backgroundColor: generateColorFromChar(provider.name), minWidth: 28 }}>
{getFirstCharacter(provider.name)}
</Avatar>
)}
<ProviderItemName>
{provider.isSystem ? t(`provider.${provider.id}`) : provider.name}
</ProviderItemName>
{provider.enabled && (
<Tag color="green" style={{ marginLeft: 'auto' }}>
ON
</Tag>
)}
</ProviderListItem>
</Dropdown>
</div>
)}
</Draggable>
))}
</div>
)}
</Droppable>
</DragDropContext>
</ProviderList>
{!dragging && (
<AddButtonWrapper>
<Button type="dashed" style={{ width: '100%' }} icon={<PlusOutlined />} onClick={onAddProvider} />
</AddButtonWrapper>
)}
</ProviderListContainer>
<ProviderSetting provider={selectedProvider} key={JSON.stringify(selectedProvider)} />
</Container>
@@ -65,22 +142,30 @@ const Container = styled.div`
width: 100%;
display: flex;
flex-direction: row;
justify-content: space-between;
`
const ProviderListContainer = styled.div`
display: flex;
flex-direction: column;
width: var(--assistants-width);
height: 100%;
height: calc(100vh - var(--navbar-height));
border-right: 0.5px solid var(--color-border);
padding: 10px;
padding: 10px 8px;
overflow-y: auto;
`
const ProviderList = styled.div`
display: flex;
flex: 1;
flex-direction: column;
`
const ProviderListItem = styled.div`
display: flex;
flex-direction: row;
align-items: center;
padding: 6px 10px;
padding: 5px 8px;
margin-bottom: 5px;
width: 100%;
cursor: pointer;
@@ -99,6 +184,17 @@ const ProviderListItem = styled.div`
const ProviderItemName = styled.div`
margin-left: 10px;
font-weight: bold;
overflow: hidden;
white-space: nowrap;
text-overflow: ellipsis;
`
const AddButtonWrapper = styled.div`
height: 50px;
flex-direction: row;
justify-content: center;
align-items: center;
padding: 10px 0;
`
export default ProviderSettings

View File

@@ -48,8 +48,8 @@ const PopupContainer: React.FC<Props> = ({ title, provider, resolve }) => {
}
const model: Model = {
id: values.id,
provider: provider.id,
id: values.id,
name: values.name ? values.name : values.id.toUpperCase(),
group: getDefaultGroupName(values.group || values.id)
}
@@ -75,9 +75,6 @@ const PopupContainer: React.FC<Props> = ({ title, provider, resolve }) => {
colon={false}
style={{ marginTop: 25 }}
onFinish={onFinish}>
<Form.Item name="provider" label={t('common.provider')} initialValue={provider.id} rules={[{ required: true }]}>
<Input placeholder={t('settings.models.add.provider_name.placeholder')} disabled />
</Form.Item>
<Form.Item
name="id"
label={t('settings.models.add.model_id')}
@@ -86,13 +83,17 @@ const PopupContainer: React.FC<Props> = ({ title, provider, resolve }) => {
<Input
placeholder={t('settings.models.add.model_id.placeholder')}
spellCheck={false}
maxLength={50}
onChange={(e) => {
form.setFieldValue('name', e.target.value.toUpperCase())
form.setFieldValue('group', getDefaultGroupName(e.target.value))
}}
/>
</Form.Item>
<Form.Item name="name" label={t('settings.models.add.model_name')} tooltip="Example: GPT-3.5">
<Form.Item
name="name"
label={t('settings.models.add.model_name')}
tooltip={t('settings.models.add.model_name.placeholder')}>
<Input placeholder={t('settings.models.add.model_name.placeholder')} spellCheck={false} />
</Form.Item>
<Form.Item

View File

@@ -0,0 +1,72 @@
import { TopView } from '@renderer/components/TopView'
import { Provider } from '@renderer/types'
import { Input, Modal } from 'antd'
import { useState } from 'react'
import { useTranslation } from 'react-i18next'
interface Props {
provider?: Provider
resolve: (name: string) => void
}
const PopupContainer: React.FC<Props> = ({ provider, resolve }) => {
const [open, setOpen] = useState(true)
const [name, setName] = useState(provider?.name || '')
const { t } = useTranslation()
const onOk = () => {
setOpen(false)
resolve(name)
}
const onCancel = () => {
setOpen(false)
resolve('')
}
const onClose = () => {
resolve(name)
}
const buttonDisabled = name.length === 0
return (
<Modal
open={open}
onOk={onOk}
onCancel={onCancel}
afterClose={onClose}
width={360}
closable={false}
title={t('settings.provider.edit.name')}
okButtonProps={{ disabled: buttonDisabled }}>
<Input
value={name}
onChange={(e) => setName(e.target.value.trim())}
placeholder={t('settings.provider.edit.name.placeholder')}
onKeyDown={(e) => e.key === 'Enter' && onOk()}
maxLength={32}
/>
</Modal>
)
}
export default class AddProviderPopup {
static topviewId = 0
static hide() {
TopView.hide(this.topviewId)
}
static show(provider?: Provider) {
return new Promise<string>((resolve) => {
this.topviewId = TopView.show(
<PopupContainer
provider={provider}
resolve={(v) => {
resolve(v)
this.hide()
}}
/>
)
})
}
}

View File

@@ -2,7 +2,7 @@ import { LoadingOutlined, MinusOutlined, PlusOutlined, QuestionCircleOutlined }
import { SYSTEM_MODELS } from '@renderer/config/models'
import { useProvider } from '@renderer/hooks/useProvider'
import { fetchModels } from '@renderer/services/api'
import { getModelLogo } from '@renderer/services/provider'
import { getModelLogo } from '@renderer/config/provider'
import { Model, Provider } from '@renderer/types'
import { getDefaultGroupName, isFreeModel, runAsyncFunction } from '@renderer/utils'
import { Avatar, Button, Empty, Flex, Modal, Tag } from 'antd'
@@ -86,7 +86,7 @@ const PopupContainer: React.FC<Props> = ({ provider: _provider, resolve }) => {
return (
<Flex>
<ModelHeaderTitle>
{t(`provider.${provider.id}`)} {t('common.models')}
{provider.isSystem ? t(`provider.${provider.id}`) : provider.name} {t('common.models')}
</ModelHeaderTitle>
{loading && <LoadingOutlined size={20} />}
</Flex>

View File

@@ -5,7 +5,7 @@ import { Avatar, Button, Card, Divider, Flex, Input, Space, Switch } from 'antd'
import { useProvider } from '@renderer/hooks/useProvider'
import { groupBy } from 'lodash'
import { SettingContainer, SettingSubtitle, SettingTitle } from '.'
import { getModelLogo } from '@renderer/services/provider'
import { getModelLogo } from '@renderer/config/provider'
import { CheckOutlined, EditOutlined, ExportOutlined, LoadingOutlined, PlusOutlined } from '@ant-design/icons'
import AddModelPopup from './AddModelPopup'
import EditModelsPopup from './EditModelsPopup'
@@ -37,7 +37,7 @@ const ProviderSetting: FC<Props> = ({ provider: _provider }) => {
const onUpdateApiKey = () => updateProvider({ ...provider, apiKey })
const onUpdateApiHost = () => updateProvider({ ...provider, apiHost })
const onManageModel = () => EditModelsPopup.show({ provider })
const onAddModel = () => AddModelPopup.show({ title: t('settings.models.add_model'), provider })
const onAddModel = () => AddModelPopup.show({ title: t('settings.models.add.add_model'), provider })
const onCheckApi = async () => {
setApiChecking(true)
@@ -59,7 +59,7 @@ const ProviderSetting: FC<Props> = ({ provider: _provider }) => {
<SettingContainer>
<SettingTitle>
<Flex align="center">
<span>{t(`provider.${provider.id}`)}</span>
<span>{provider.isSystem ? t(`provider.${provider.id}`) : provider.name}</span>
{officialWebsite! && (
<Link target="_blank" href={providerConfig.websites.official}>
<ExportOutlined style={{ marginLeft: '8px', color: 'white', fontSize: '12px' }} />
@@ -75,13 +75,14 @@ const ProviderSetting: FC<Props> = ({ provider: _provider }) => {
<Divider style={{ width: '100%', margin: '10px 0' }} />
<SettingSubtitle style={{ marginTop: 5 }}>{t('settings.provider.api_key')}</SettingSubtitle>
<Space.Compact style={{ width: '100%' }}>
<Input
<Input.Password
value={apiKey}
placeholder={t('settings.provider.api_key')}
onChange={(e) => setApiKey(e.target.value)}
onBlur={onUpdateApiKey}
spellCheck={false}
disabled={apiKeyDisabled}
type="password"
autoFocus={provider.enabled && apiKey === ''}
/>
{!apiKeyDisabled && (

View File

@@ -0,0 +1,144 @@
import { Assistant, Message, Provider } from '@renderer/types'
import OpenAI from 'openai'
import Anthropic from '@anthropic-ai/sdk'
import { getDefaultModel, getTopNamingModel } from './assistant'
import { ChatCompletionCreateParamsNonStreaming, ChatCompletionMessageParam } from 'openai/resources'
import { sum, takeRight } from 'lodash'
import { MessageCreateParamsNonStreaming, MessageParam } from '@anthropic-ai/sdk/resources'
import { EVENT_NAMES } from './event'
import { getAssistantSettings, removeQuotes } from '@renderer/utils'
export default class ProviderSDK {
provider: Provider
openaiSdk: OpenAI
anthropicSdk: Anthropic
constructor(provider: Provider) {
this.provider = provider
const host = provider.apiHost
const baseURL = host.endsWith('/') ? host : `${provider.apiHost}/v1/`
this.anthropicSdk = new Anthropic({ apiKey: provider.apiKey, baseURL })
this.openaiSdk = new OpenAI({ dangerouslyAllowBrowser: true, apiKey: provider.apiKey, baseURL })
}
private get isAnthropic() {
return this.provider.id === 'anthropic'
}
public async completions(
messages: Message[],
assistant: Assistant,
onChunk: ({ text, usage }: { text?: string; usage?: OpenAI.Completions.CompletionUsage }) => void
) {
const defaultModel = getDefaultModel()
const model = assistant.model || defaultModel
const { contextCount } = getAssistantSettings(assistant)
const systemMessage = assistant.prompt ? { role: 'system', content: assistant.prompt } : undefined
const userMessages = takeRight(messages, contextCount + 1).map((message) => ({
role: message.role,
content: message.content
}))
if (this.isAnthropic) {
await this.anthropicSdk.messages
.stream({
model: model.id,
messages: [systemMessage, ...userMessages].filter(Boolean) as MessageParam[],
max_tokens: 4096,
temperature: assistant.settings?.temperature
})
.on('text', (text) => onChunk({ text: text || '' }))
.on('finalMessage', (message) =>
onChunk({
usage: {
prompt_tokens: message.usage.input_tokens,
completion_tokens: message.usage.output_tokens,
total_tokens: sum(Object.values(message.usage))
}
})
)
} else {
const stream = await this.openaiSdk.chat.completions.create({
model: model.id,
messages: [systemMessage, ...userMessages].filter(Boolean) as ChatCompletionMessageParam[],
stream: true,
temperature: assistant.settings?.temperature
})
for await (const chunk of stream) {
if (window.keyv.get(EVENT_NAMES.CHAT_COMPLETION_PAUSED)) break
onChunk({ text: chunk.choices[0]?.delta?.content || '', usage: chunk.usage })
}
}
}
public async summaries(messages: Message[], assistant: Assistant): Promise<string | null> {
const model = getTopNamingModel() || assistant.model || getDefaultModel()
const userMessages = takeRight(messages, 5).map((message) => ({
role: 'user',
content: message.content
}))
const systemMessage = {
role: 'system',
content: '你是一名擅长会话的助理,你需要将用户的会话总结为 10 个字以内的标题,不要加标点符号'
}
if (this.isAnthropic) {
const message = await this.anthropicSdk.messages.create({
messages: [systemMessage, ...userMessages] as Anthropic.Messages.MessageParam[],
model: model.id,
stream: false,
max_tokens: 50
})
return message.content[0].type === 'text' ? message.content[0].text : null
} else {
const response = await this.openaiSdk.chat.completions.create({
model: model.id,
messages: [systemMessage, ...userMessages] as ChatCompletionMessageParam[],
stream: false,
max_tokens: 50
})
return removeQuotes(response.choices[0].message?.content || '')
}
}
public async check(): Promise<{ valid: boolean; error: Error | null }> {
const model = this.provider.models[0]
const body = {
model: model.id,
messages: [{ role: 'user', content: 'hi' }],
max_tokens: 100,
stream: false
}
try {
if (this.isAnthropic) {
const message = await this.anthropicSdk.messages.create(body as MessageCreateParamsNonStreaming)
return { valid: message.content.length > 0, error: null }
} else {
const response = await this.openaiSdk.chat.completions.create(body as ChatCompletionCreateParamsNonStreaming)
return { valid: Boolean(response?.choices[0].message), error: null }
}
} catch (error: any) {
return { valid: false, error }
}
}
public async models(): Promise<OpenAI.Models.Model[]> {
try {
if (this.isAnthropic) {
return []
}
const response = await this.openaiSdk.models.list()
return response.data
} catch (error) {
return []
}
}
}

View File

@@ -1,38 +1,30 @@
import { Assistant, Message, Provider, Topic } from '@renderer/types'
import { uuid } from '@renderer/utils'
import { EVENT_NAMES, EventEmitter } from './event'
import { ChatCompletionMessageParam, ChatCompletionSystemMessageParam } from 'openai/resources'
import OpenAI from 'openai'
import { getAssistantProvider, getDefaultModel, getProviderByModel, getTopNamingModel } from './assistant'
import { takeRight } from 'lodash'
import dayjs from 'dayjs'
import i18n from '@renderer/i18n'
import store from '@renderer/store'
import { setGenerating } from '@renderer/store/runtime'
import i18n from '@renderer/i18n'
import { Assistant, Message, Provider, Topic } from '@renderer/types'
import { getErrorMessage, uuid } from '@renderer/utils'
import dayjs from 'dayjs'
import { getAssistantProvider, getDefaultModel, getProviderByModel, getTopNamingModel } from './assistant'
import { EVENT_NAMES, EventEmitter } from './event'
import ProviderSDK from './ProviderSDK'
interface FetchChatCompletionParams {
export async function fetchChatCompletion({
messages,
topic,
assistant,
onResponse
}: {
messages: Message[]
topic: Topic
assistant: Assistant
onResponse: (message: Message) => void
}
const getOpenAiProvider = (provider: Provider) => {
const host = provider.apiHost
return new OpenAI({
dangerouslyAllowBrowser: true,
apiKey: provider.apiKey,
baseURL: host.endsWith('/') ? host : `${provider.apiHost}/v1/`
})
}
export async function fetchChatCompletion({ messages, topic, assistant, onResponse }: FetchChatCompletionParams) {
}) {
window.keyv.set(EVENT_NAMES.CHAT_COMPLETION_PAUSED, false)
const provider = getAssistantProvider(assistant)
const openaiProvider = getOpenAiProvider(provider)
const defaultModel = getDefaultModel()
const model = assistant.model || defaultModel
const providerSdk = new ProviderSDK(provider)
store.dispatch(setGenerating(true))
@@ -49,79 +41,36 @@ export async function fetchChatCompletion({ messages, topic, assistant, onRespon
onResponse({ ...message })
const systemMessage = assistant.prompt ? { role: 'system', content: assistant.prompt } : undefined
const userMessages = takeRight(messages, 5).map((message) => ({
role: message.role,
content: message.content
}))
try {
const stream = await openaiProvider.chat.completions.create({
model: model.id,
messages: [systemMessage, ...userMessages].filter(Boolean) as ChatCompletionMessageParam[],
stream: true
await providerSdk.completions(messages, assistant, ({ text, usage }) => {
message.content = message.content + text || ''
message.usage = usage
onResponse({ ...message, status: 'pending' })
})
let content = ''
let usage: OpenAI.Completions.CompletionUsage | undefined = undefined
for await (const chunk of stream) {
if (window.keyv.get(EVENT_NAMES.CHAT_COMPLETION_PAUSED)) {
break
}
content = content + (chunk.choices[0]?.delta?.content || '')
chunk.usage && (usage = chunk.usage)
onResponse({ ...message, content, status: 'pending' })
}
message.content = content
message.usage = usage
} catch (error: any) {
message.content = `Error: ${error.message}`
}
const paused = window.keyv.get(EVENT_NAMES.CHAT_COMPLETION_PAUSED)
message.status = paused ? 'paused' : 'success'
// Update message status
message.status = window.keyv.get(EVENT_NAMES.CHAT_COMPLETION_PAUSED) ? 'paused' : 'success'
// Emit chat completion event
EventEmitter.emit(EVENT_NAMES.AI_CHAT_COMPLETION, message)
// Reset generating state
store.dispatch(setGenerating(false))
return message
}
interface FetchMessagesSummaryParams {
messages: Message[]
assistant: Assistant
}
export async function fetchMessagesSummary({ messages, assistant }: FetchMessagesSummaryParams) {
export async function fetchMessagesSummary({ messages, assistant }: { messages: Message[]; assistant: Assistant }) {
const model = getTopNamingModel() || assistant.model || getDefaultModel()
const provider = getProviderByModel(model)
const openaiProvider = getOpenAiProvider(provider)
const userMessages: ChatCompletionMessageParam[] = takeRight(messages, 5).map((message) => ({
role: 'user',
content: message.content
}))
const systemMessage: ChatCompletionSystemMessageParam = {
role: 'system',
content:
'你是一名擅长会话的助理,你需要将用户的会话总结为 10 个字以内的标题,回复内容不需要用引号引起来,不需要在结尾加上句号。'
}
const response = await openaiProvider.chat.completions.create({
model: model.id,
messages: [systemMessage, ...userMessages],
stream: false
})
return response.choices[0].message?.content
const providerSdk = new ProviderSDK(provider)
return providerSdk.summaries(messages, assistant)
}
export async function checkApi(provider: Provider) {
const openaiProvider = getOpenAiProvider(provider)
const model = provider.models[0]
const key = 'api-check'
const style = { marginTop: '3vh' }
@@ -141,22 +90,9 @@ export async function checkApi(provider: Provider) {
return false
}
let valid = false
let errorMessage = ''
const providerSdk = new ProviderSDK(provider)
try {
const response = await openaiProvider.chat.completions.create({
model: model.id,
messages: [{ role: 'user', content: 'hi' }],
max_tokens: 100,
stream: false
})
valid = Boolean(response?.choices[0].message)
} catch (error) {
errorMessage = (error as Error).message
valid = false
}
const { valid, error } = await providerSdk.check()
window.message[valid ? 'success' : 'error']({
key: 'api-check',
@@ -164,17 +100,17 @@ export async function checkApi(provider: Provider) {
duration: valid ? 2 : 8,
content: valid
? i18n.t('message.api.connection.success')
: i18n.t('message.api.connection.failed') + ' ' + errorMessage
: i18n.t('message.api.connection.failed') + ' : ' + getErrorMessage(error)
})
return valid
}
export async function fetchModels(provider: Provider) {
const providerSdk = new ProviderSDK(provider)
try {
const openaiProvider = getOpenAiProvider(provider)
const response = await openaiProvider.models.list()
return response.data
return await providerSdk.models()
} catch (error) {
return []
}

View File

@@ -10,5 +10,6 @@ export const EVENT_NAMES = {
ADD_ASSISTANT: 'ADD_ASSISTANT',
EDIT_MESSAGE: 'EDIT_MESSAGE',
REGENERATE_MESSAGE: 'REGENERATE_MESSAGE',
CHAT_COMPLETION_PAUSED: 'CHAT_COMPLETION_PAUSED'
CHAT_COMPLETION_PAUSED: 'CHAT_COMPLETION_PAUSED',
ESTIMATED_TOKEN_COUNT: 'ESTIMATED_TOKEN_COUNT'
}

View File

@@ -1,73 +0,0 @@
import OpenAiProviderLogo from '@renderer/assets/images/providers/openai.jpeg'
import SiliconFlowProviderLogo from '@renderer/assets/images/providers/silicon.png'
import DeepSeekProviderLogo from '@renderer/assets/images/providers/deepseek.png'
import YiProviderLogo from '@renderer/assets/images/providers/yi.svg'
import GroqProviderLogo from '@renderer/assets/images/providers/groq.png'
import ZhipuProviderLogo from '@renderer/assets/images/providers/zhipu.png'
import OllamaProviderLogo from '@renderer/assets/images/providers/ollama.png'
import MoonshotProviderLogo from '@renderer/assets/images/providers/moonshot.jpeg'
import OpenRouterProviderLogo from '@renderer/assets/images/providers/openrouter.png'
import BaichuanProviderLogo from '@renderer/assets/images/providers/baichuan.png'
import ChatGPTModelLogo from '@renderer/assets/images/models/chatgpt.jpeg'
import ChatGLMModelLogo from '@renderer/assets/images/models/chatglm.jpeg'
import DeepSeekModelLogo from '@renderer/assets/images/models/deepseek.png'
import GemmaModelLogo from '@renderer/assets/images/models/gemma.jpeg'
import QwenModelLogo from '@renderer/assets/images/models/qwen.jpeg'
import YiModelLogo from '@renderer/assets/images/models/yi.svg'
import LlamaModelLogo from '@renderer/assets/images/models/llama.jpeg'
import MixtralModelLogo from '@renderer/assets/images/models/mixtral.jpeg'
import MoonshotModelLogo from '@renderer/assets/images/providers/moonshot.jpeg'
import MicrosoftModelLogo from '@renderer/assets/images/models/microsoft.png'
import BaichuanModelLogo from '@renderer/assets/images/models/baichuan.png'
export function getProviderLogo(providerId: string) {
switch (providerId) {
case 'openai':
return OpenAiProviderLogo
case 'silicon':
return SiliconFlowProviderLogo
case 'deepseek':
return DeepSeekProviderLogo
case 'yi':
return YiProviderLogo
case 'groq':
return GroqProviderLogo
case 'zhipu':
return ZhipuProviderLogo
case 'ollama':
return OllamaProviderLogo
case 'moonshot':
return MoonshotProviderLogo
case 'openrouter':
return OpenRouterProviderLogo
case 'baichuan':
return BaichuanProviderLogo
default:
return undefined
}
}
export function getModelLogo(modelId: string) {
const logoMap = {
gpt: ChatGPTModelLogo,
glm: ChatGLMModelLogo,
deepseek: DeepSeekModelLogo,
qwen: QwenModelLogo,
gemma: GemmaModelLogo,
'yi-': YiModelLogo,
llama: LlamaModelLogo,
mixtral: MixtralModelLogo,
mistral: MixtralModelLogo,
moonshot: MoonshotModelLogo,
phi: MicrosoftModelLogo,
baichuan: BaichuanModelLogo
}
for (const key in logoMap) {
if (modelId.toLowerCase().includes(key)) {
return logoMap[key]
}
}
return undefined
}

View File

@@ -19,7 +19,7 @@ const persistedReducer = persistReducer(
{
key: 'cherry-studio',
storage,
version: 10,
version: 13,
blacklist: ['runtime'],
migrate
},

View File

@@ -18,7 +18,7 @@ const initialState: LlmState = {
name: 'OpenAI',
apiKey: '',
apiHost: 'https://api.openai.com',
models: SYSTEM_MODELS.openai.filter((m) => m.defaultEnabled),
models: SYSTEM_MODELS.openai.filter((m) => m.enabled),
isSystem: true,
enabled: true
},
@@ -27,7 +27,7 @@ const initialState: LlmState = {
name: 'Silicon',
apiKey: '',
apiHost: 'https://api.siliconflow.cn',
models: SYSTEM_MODELS.silicon.filter((m) => m.defaultEnabled),
models: SYSTEM_MODELS.silicon.filter((m) => m.enabled),
isSystem: true,
enabled: false
},
@@ -36,7 +36,7 @@ const initialState: LlmState = {
name: 'deepseek',
apiKey: '',
apiHost: 'https://api.deepseek.com',
models: SYSTEM_MODELS.deepseek.filter((m) => m.defaultEnabled),
models: SYSTEM_MODELS.deepseek.filter((m) => m.enabled),
isSystem: true,
enabled: false
},
@@ -45,7 +45,7 @@ const initialState: LlmState = {
name: 'Yi',
apiKey: '',
apiHost: 'https://api.lingyiwanwu.com',
models: SYSTEM_MODELS.yi.filter((m) => m.defaultEnabled),
models: SYSTEM_MODELS.yi.filter((m) => m.enabled),
isSystem: true,
enabled: false
},
@@ -54,7 +54,7 @@ const initialState: LlmState = {
name: 'ZhiPu',
apiKey: '',
apiHost: 'https://open.bigmodel.cn/api/paas/v4/',
models: SYSTEM_MODELS.zhipu.filter((m) => m.defaultEnabled),
models: SYSTEM_MODELS.zhipu.filter((m) => m.enabled),
isSystem: true,
enabled: false
},
@@ -63,7 +63,43 @@ const initialState: LlmState = {
name: 'Moonshot AI',
apiKey: '',
apiHost: 'https://api.moonshot.cn',
models: SYSTEM_MODELS.moonshot.filter((m) => m.defaultEnabled),
models: SYSTEM_MODELS.moonshot.filter((m) => m.enabled),
isSystem: true,
enabled: false
},
{
id: 'baichuan',
name: 'BAICHUAN AI',
apiKey: '',
apiHost: 'https://api.baichuan-ai.com',
models: SYSTEM_MODELS.baichuan.filter((m) => m.enabled),
isSystem: true,
enabled: false
},
{
id: 'dashscope',
name: 'DashScope',
apiKey: '',
apiHost: 'https://dashscope.aliyuncs.com/compatible-mode/v1/',
models: SYSTEM_MODELS.dashscope.filter((m) => m.enabled),
isSystem: true,
enabled: false
},
{
id: 'anthropic',
name: 'Anthropic',
apiKey: '',
apiHost: 'https://api.anthropic.com/',
models: SYSTEM_MODELS.anthropic.filter((m) => m.enabled),
isSystem: true,
enabled: false
},
{
id: 'aihubmix',
name: 'AiHubMix',
apiKey: '',
apiHost: 'https://aihubmix.com',
models: SYSTEM_MODELS.aihubmix.filter((m) => m.enabled),
isSystem: true,
enabled: false
},
@@ -72,7 +108,7 @@ const initialState: LlmState = {
name: 'OpenRouter',
apiKey: '',
apiHost: 'https://openrouter.ai/api/v1/',
models: SYSTEM_MODELS.openrouter.filter((m) => m.defaultEnabled),
models: SYSTEM_MODELS.openrouter.filter((m) => m.enabled),
isSystem: true,
enabled: false
},
@@ -81,7 +117,7 @@ const initialState: LlmState = {
name: 'Groq',
apiKey: '',
apiHost: 'https://api.groq.com/openai',
models: SYSTEM_MODELS.groq.filter((m) => m.defaultEnabled),
models: SYSTEM_MODELS.groq.filter((m) => m.enabled),
isSystem: true,
enabled: false
},
@@ -93,15 +129,6 @@ const initialState: LlmState = {
models: [],
isSystem: true,
enabled: false
},
{
id: 'baichuan',
name: 'BAICHUAN AI',
apiKey: '',
apiHost: 'https://api.baichuan-ai.com',
models: SYSTEM_MODELS.baichuan.filter((m) => m.defaultEnabled),
isSystem: true,
enabled: false
}
]
}
@@ -119,8 +146,8 @@ const settingsSlice = createSlice({
addProvider: (state, action: PayloadAction<Provider>) => {
state.providers.push(action.payload)
},
removeProvider: (state, action: PayloadAction<{ id: string }>) => {
state.providers = state.providers.filter((p) => p.id !== action.payload.id && !p.isSystem)
removeProvider: (state, action: PayloadAction<Provider>) => {
state.providers = state.providers.filter((p) => p.id !== action.payload.id)
},
addModel: (state, action: PayloadAction<{ providerId: string; model: Model }>) => {
state.providers = state.providers.map((p) =>

View File

@@ -20,7 +20,7 @@ const migrate = createMigrate({
apiKey: '',
apiHost: 'https://api.lingyiwanwu.com',
isSystem: true,
models: SYSTEM_MODELS.yi.filter((m) => m.defaultEnabled)
models: SYSTEM_MODELS.yi.filter((m) => m.enabled)
}
]
}
@@ -40,7 +40,7 @@ const migrate = createMigrate({
apiKey: '',
apiHost: 'https://open.bigmodel.cn/api/paas/v4/',
isSystem: true,
models: SYSTEM_MODELS.zhipu.filter((m) => m.defaultEnabled)
models: SYSTEM_MODELS.zhipu.filter((m) => m.enabled)
}
]
}
@@ -80,7 +80,7 @@ const migrate = createMigrate({
apiKey: '',
apiHost: 'https://api.moonshot.cn',
isSystem: true,
models: SYSTEM_MODELS.moonshot.filter((m) => m.defaultEnabled)
models: SYSTEM_MODELS.moonshot.filter((m) => m.enabled)
}
]
}
@@ -99,7 +99,7 @@ const migrate = createMigrate({
name: 'OpenRouter',
apiKey: '',
apiHost: 'https://openrouter.ai/api/v1/',
models: SYSTEM_MODELS.openrouter.filter((m) => m.defaultEnabled),
models: SYSTEM_MODELS.openrouter.filter((m) => m.enabled),
isSystem: true
}
]
@@ -150,7 +150,7 @@ const migrate = createMigrate({
...state.llm,
providers: state.llm.providers.map((provider) => {
if (provider.id === 'zhipu' && provider.models[0] && provider.models[0].id === 'llama3-70b-8192') {
provider.models = SYSTEM_MODELS.zhipu.filter((m) => m.defaultEnabled)
provider.models = SYSTEM_MODELS.zhipu.filter((m) => m.enabled)
}
return provider
})
@@ -170,13 +170,79 @@ const migrate = createMigrate({
name: 'BAICHUAN AI',
apiKey: '',
apiHost: 'https://api.baichuan-ai.com',
models: SYSTEM_MODELS.baichuan.filter((m) => m.defaultEnabled),
models: SYSTEM_MODELS.baichuan.filter((m) => m.enabled),
isSystem: true,
enabled: false
}
]
}
}
},
// @ts-ignore store type is unknown
'11': (state: RootState) => {
return {
...state,
llm: {
...state.llm,
providers: [
...state.llm.providers,
{
id: 'dashscope',
name: 'DashScope',
apiKey: '',
apiHost: 'https://dashscope.aliyuncs.com/compatible-mode/v1/',
models: SYSTEM_MODELS.dashscope.filter((m) => m.enabled),
isSystem: true,
enabled: false
},
{
id: 'anthropic',
name: 'Anthropic',
apiKey: '',
apiHost: 'https://api.anthropic.com/',
models: SYSTEM_MODELS.anthropic.filter((m) => m.enabled),
isSystem: true,
enabled: false
}
]
}
}
},
// @ts-ignore store type is unknown
'12': (state: RootState) => {
return {
...state,
llm: {
...state.llm,
providers: [
...state.llm.providers,
{
id: 'aihubmix',
name: 'AiHubMix',
apiKey: '',
apiHost: 'https://aihubmix.com',
models: SYSTEM_MODELS.aihubmix.filter((m) => m.enabled),
isSystem: true,
enabled: false
}
]
}
}
},
// @ts-ignore store type is unknown
'13': (state: RootState) => {
return {
...state,
assistants: {
...state.assistants,
defaultAssistant: {
...state.assistants.defaultAssistant,
name: ['Default Assistant', '默认助手'].includes(state.assistants.defaultAssistant.name)
? i18n.t(`assistant.default.name`)
: state.assistants.defaultAssistant.name
}
}
}
}
})

View File

@@ -7,6 +7,12 @@ export type Assistant = {
prompt: string
topics: Topic[]
model?: Model
settings?: AssistantSettings
}
export type AssistantSettings = {
contextCount: number
temperature: number
}
export type Message = {

View File

@@ -1,6 +1,9 @@
import { v4 as uuidv4 } from 'uuid'
import imageCompression from 'browser-image-compression'
import { Model } from '@renderer/types'
import { Assistant, AssistantSettings, Message, Model } from '@renderer/types'
import { GPTTokens } from 'gpt-tokens'
import { DEFAULT_CONEXTCOUNT, DEFAULT_TEMPERATURE } from '@renderer/config/constant'
import { takeRight } from 'lodash'
export const runAsyncFunction = async (fn: () => void) => {
await fn()
@@ -108,3 +111,90 @@ export async function isDev() {
const isProd = await isProduction()
return !isProd
}
export function getErrorMessage(error: any) {
if (!error) {
return ''
}
if (typeof error === 'string') {
return error
}
if (error?.error) {
return getErrorMessage(error.error)
}
if (error?.message) {
return error.message
}
return ''
}
export function removeQuotes(str) {
return str.replace(/['"]+/g, '')
}
export function generateColorFromChar(char) {
// 使用字符的Unicode值作为随机种子
const seed = char.charCodeAt(0)
// 使用简单的线性同余生成器创建伪随机数
const a = 1664525
const c = 1013904223
const m = Math.pow(2, 32)
// 生成三个伪随机数作为RGB值
let r = (a * seed + c) % m
let g = (a * r + c) % m
let b = (a * g + c) % m
// 将伪随机数转换为0-255范围内的整数
r = Math.floor((r / m) * 256)
g = Math.floor((g / m) * 256)
b = Math.floor((b / m) * 256)
// 返回十六进制颜色字符串
return `#${r.toString(16).padStart(2, '0')}${g.toString(16).padStart(2, '0')}${b.toString(16).padStart(2, '0')}`
}
export function getFirstCharacter(str) {
if (str.length === 0) return ''
// 使用 for...of 循环来获取第一个字符
for (const char of str) {
return char
}
}
export const getAssistantSettings = (assistant: Assistant): AssistantSettings => {
const contextCount = assistant.settings?.contextCount ?? DEFAULT_CONEXTCOUNT
return {
contextCount: contextCount === 20 ? 100000 : contextCount,
temperature: assistant.settings?.temperature ?? DEFAULT_TEMPERATURE
}
}
export function estimateInputTokenCount(text: string) {
const input = new GPTTokens({
model: 'gpt-4o',
messages: [{ role: 'user', content: text }]
})
return input.usedTokens - 7
}
export function estimateHistoryTokenCount(assistant: Assistant, msgs: Message[]) {
const { contextCount } = getAssistantSettings(assistant)
const all = new GPTTokens({
model: 'gpt-4o',
messages: [
{ role: 'system', content: assistant.prompt },
...takeRight(msgs, contextCount).map((message) => ({ role: message.role, content: message.content }))
]
})
return all.usedTokens - 7
}

View File

@@ -5,8 +5,8 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<meta
name="description"
content="Cherry Studio 是一款强大的多模型 AI 助手,支持 iOS、macOS 和 Windows 平台。快速切换多个先进的 LLM 模型,提升工作学习效率。" />
<meta name="keywords" content="Cherry Studio, AI 助手, GPT 客户端, 多模型, iOS, macOS, Windows, LLM" />
content="Cherry Studio AI 是一款强大的多模型 AI 助手,支持 iOS、macOS 和 Windows 平台。快速切换多个先进的 LLM 模型,提升工作学习效率。" />
<meta name="keywords" content="Cherry Studio AI, AI 助手, GPT 客户端, 多模型, iOS, macOS, Windows, LLM" />
<meta name="author" content="kangfenmao" />
<link rel="canonical" href="https://easys.run/cherry-studio" />
<link rel="icon" type="image/png" href="https://easys.run/cherry-studio/logo.png" />
@@ -14,24 +14,24 @@
<!-- Open Graph / Facebook -->
<meta property="og:type" content="website" />
<meta property="og:url" content="https://easys.run/cherry-studio" />
<meta property="og:title" content="Cherry Studio - 多模型 AI 助手" />
<meta property="og:title" content="Cherry Studio AI - 多模型 AI 助手" />
<meta
property="og:description"
content="Cherry Studio 是一款强大的多模型 AI 助手,支持 iOS、macOS 和 Windows 平台。快速切换多个先进的 LLM 模型,提升工作学习效率。" />
content="Cherry Studio AI 是一款强大的多模型 AI 助手,支持 iOS、macOS 和 Windows 平台。快速切换多个先进的 LLM 模型,提升工作学习效率。" />
<meta property="og:image" content="https://github.com/kangfenmao/cherry-studio/blob/main/build/icon.png?raw=true" />
<!-- Twitter -->
<meta property="twitter:card" content="summary_large_image" />
<meta property="twitter:url" content="https://x.com/kangfenmao" />
<meta property="twitter:title" content="Cherry Studio - 多模型 AI 助手" />
<meta property="twitter:title" content="Cherry Studio AI - 多模型 AI 助手" />
<meta
property="twitter:description"
content="Cherry Studio 是一款强大的多模型 AI 助手,支持 iOS、macOS 和 Windows 平台。快速切换多个先进的 LLM 模型,提升工作学习效率。" />
content="Cherry Studio AI 是一款强大的多模型 AI 助手,支持 iOS、macOS 和 Windows 平台。快速切换多个先进的 LLM 模型,提升工作学习效率。" />
<meta
property="twitter:image"
content="https://github.com/kangfenmao/cherry-studio/blob/main/build/icon.png?raw=true" />
<title>Cherry Studio - 多模型AI助手</title>
<title>Cherry Studio AI - 多模型AI助手</title>
<style>
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, 'Open Sans',
@@ -111,13 +111,13 @@
</head>
<body>
<a href="https://github.com/kangfenmao/cherry-studio" target="_blank">
<img src="https://easys.run/cherry-studio/logo.png" alt="Cherry Studio Logo" class="logo" />
<img src="https://easys.run/cherry-studio/logo.png" alt="Cherry Studio AI Logo" class="logo" />
</a>
<h1>Cherry Studio</h1>
<h1>Cherry Studio AI</h1>
<p class="description">Windows/macOS GPT 客户端</p>
<div class="download-buttons">
<a
href="https://github.com/kangfenmao/cherry-studio/releases/download/v0.2.6/Cherry-Studio-0.2.6-x64.dmg"
href="https://github.com/kangfenmao/cherry-studio/releases/download/v0.3.0/Cherry-Studio-0.3.0-x64.dmg"
class="download-btn">
<svg viewBox="0 0 384 512" width="24" height="24">
<path
@@ -127,7 +127,7 @@
macOS Intel
</a>
<a
href="https://github.com/kangfenmao/cherry-studio/releases/download/v0.2.6/Cherry-Studio-0.2.6-arm64.dmg"
href="https://github.com/kangfenmao/cherry-studio/releases/download/v0.3.0/Cherry-Studio-0.3.0-arm64.dmg"
class="download-btn">
<svg viewBox="0 0 384 512" width="24" height="24">
<path
@@ -137,7 +137,7 @@
macOS Apple Silicon
</a>
<a
href="https://github.com/kangfenmao/cherry-studio/releases/download/v0.2.6/Cherry-Studio-0.2.6-setup.exe"
href="https://github.com/kangfenmao/cherry-studio/releases/download/v0.3.0/Cherry-Studio-0.3.0-setup.exe"
class="download-btn">
<svg viewBox="0 0 448 512" width="24" height="24">
<path
@@ -148,8 +148,8 @@
</a>
</div>
<p class="new-app">
🎉 <a href="https://github.com/kangfenmao/cherry-studio" target="_blank">Cherry Studio</a> 最新版本
<a href="https://github.com/kangfenmao/cherry-studio/releases/tag/v0.2.6" target="_blank">v0.2.6</a> 发布啦!
🎉 <a href="https://github.com/kangfenmao/cherry-studio" target="_blank">Cherry Studio AI</a> 最新版本
<a href="https://github.com/kangfenmao/cherry-studio/releases/tag/v0.3.0" target="_blank">v0.3.0</a> 发布啦!
</p>
<div class="footer">
<a href="https://github.com/kangfenmao/cherry-studio" target="_blank">开源</a> |
@@ -161,10 +161,10 @@
{
"@context": "https://schema.org",
"@type": "SoftwareApplication",
"name": "Cherry Studio",
"name": "Cherry Studio AI",
"applicationCategory": "UtilitiesApplication",
"operatingSystem": "iOS, macOS, Windows",
"description": "Cherry Studio 是一款强大的多模型 AI 助手,支持 iOS、macOS 和 Windows 平台。快速切换多个先进的 LLM 模型,提升工作学习效率。",
"description": "Cherry Studio AI 是一款强大的多模型 AI 助手,支持 iOS、macOS 和 Windows 平台。快速切换多个先进的 LLM 模型,提升工作学习效率。",
"offers": {
"@type": "Offer",
"price": "0",

View File

@@ -87,6 +87,22 @@ __metadata:
languageName: node
linkType: hard
"@anthropic-ai/sdk@npm:^0.24.3":
version: 0.24.3
resolution: "@anthropic-ai/sdk@npm:0.24.3"
dependencies:
"@types/node": "npm:^18.11.18"
"@types/node-fetch": "npm:^2.6.4"
abort-controller: "npm:^3.0.0"
agentkeepalive: "npm:^4.2.1"
form-data-encoder: "npm:1.7.2"
formdata-node: "npm:^4.3.2"
node-fetch: "npm:^2.6.7"
web-streams-polyfill: "npm:^3.2.1"
checksum: 10c0/1c73c3df9637522da548d2cddfaf89513dac935c5cdb7c0b3db1c427c069a0de76df935bd189e477822063e9f944360e2d059827d5be4dca33bd388c61e97a30
languageName: node
linkType: hard
"@babel/code-frame@npm:^7.23.5, @babel/code-frame@npm:^7.24.2":
version: 7.24.2
resolution: "@babel/code-frame@npm:7.24.2"
@@ -3391,6 +3407,7 @@ __metadata:
version: 0.0.0-use.local
resolution: "cherry-studio@workspace:."
dependencies:
"@anthropic-ai/sdk": "npm:^0.24.3"
"@electron-toolkit/eslint-config-prettier": "npm:^2.0.0"
"@electron-toolkit/eslint-config-ts": "npm:^1.0.1"
"@electron-toolkit/preload": "npm:^3.0.0"
@@ -3423,6 +3440,7 @@ __metadata:
eslint-plugin-react: "npm:^7.34.3"
eslint-plugin-react-hooks: "npm:^4.6.2"
eslint-plugin-unused-imports: "npm:^4.0.0"
gpt-tokens: "npm:^1.3.6"
i18next: "npm:^23.11.5"
localforage: "npm:^1.10.0"
lodash: "npm:^4.17.21"
@@ -3780,6 +3798,13 @@ __metadata:
languageName: node
linkType: hard
"decimal.js@npm:^10.4.3":
version: 10.4.3
resolution: "decimal.js@npm:10.4.3"
checksum: 10c0/6d60206689ff0911f0ce968d40f163304a6c1bc739927758e6efc7921cfa630130388966f16bf6ef6b838cb33679fbe8e7a78a2f3c478afce841fd55ac8fb8ee
languageName: node
linkType: hard
"decode-named-character-reference@npm:^1.0.0":
version: 1.0.2
resolution: "decode-named-character-reference@npm:1.0.2"
@@ -5224,6 +5249,17 @@ __metadata:
languageName: node
linkType: hard
"gpt-tokens@npm:^1.3.6":
version: 1.3.6
resolution: "gpt-tokens@npm:1.3.6"
dependencies:
decimal.js: "npm:^10.4.3"
js-tiktoken: "npm:^1.0.10"
openai-chat-tokens: "npm:^0.2.8"
checksum: 10c0/0efc1da655a16a306df4f17646832693d7cbec569fe44d4fcc9d4a605f8614f1eb974e04b24a4e8c71095fe0fab6de7251a34c6e2d6805a5e1b5811eea37437b
languageName: node
linkType: hard
"graceful-fs@npm:^4.1.6, graceful-fs@npm:^4.2.0, graceful-fs@npm:^4.2.6":
version: 4.2.11
resolution: "graceful-fs@npm:4.2.11"
@@ -6050,6 +6086,15 @@ __metadata:
languageName: node
linkType: hard
"js-tiktoken@npm:^1.0.10, js-tiktoken@npm:^1.0.7":
version: 1.0.12
resolution: "js-tiktoken@npm:1.0.12"
dependencies:
base64-js: "npm:^1.5.1"
checksum: 10c0/7afb4826e21342386a1884754fbc1c1828f948c4dd0ab093bf778d1323e65343bd5343d15f7cda46af396f1fe4a0297739936149b7c40a0601eefe3fcaef8727
languageName: node
linkType: hard
"js-tokens@npm:^3.0.0 || ^4.0.0, js-tokens@npm:^4.0.0":
version: 4.0.0
resolution: "js-tokens@npm:4.0.0"
@@ -7177,6 +7222,15 @@ __metadata:
languageName: node
linkType: hard
"openai-chat-tokens@npm:^0.2.8":
version: 0.2.8
resolution: "openai-chat-tokens@npm:0.2.8"
dependencies:
js-tiktoken: "npm:^1.0.7"
checksum: 10c0/b415fda706b408f29b4584998990f29ad7f80f2ac1e84179a0976742ba8a80859fedeae5745a9bfe73443d95960b77328610074952ad198a18bc0e5c0ceb5b7b
languageName: node
linkType: hard
"openai@npm:^4.52.1":
version: 4.52.1
resolution: "openai@npm:4.52.1"