Compare commits

...

16 Commits

Author SHA1 Message Date
kangfenmao
74d5355e02 chore(version): 0.7.8 2024-09-25 12:52:42 +08:00
kangfenmao
bb137cc799 feat: auto-scroll to bottom on new messages, return null for empty suggestions
- Added functionality to automatically scroll to the bottom of the messages container upon receiving new messages.
- Return null when suggestions list is empty instead of displaying an empty container.
2024-09-25 12:33:03 +08:00
kangfenmao
6aee3d8088 feat: add hugging chat minapp 2024-09-25 12:09:51 +08:00
kangfenmao
51cedcb644 fix: auto scroll to bottom #120
close #120
2024-09-25 11:36:44 +08:00
kangfenmao
270c754c00 refactor: ant design styles separation 2024-09-25 11:29:38 +08:00
kangfenmao
8f68aca24c feat: add streaming output options #93 2024-09-25 11:23:45 +08:00
kangfenmao
93710c1e78 fix: gemini safety settings #110 2024-09-25 09:49:19 +08:00
kangfenmao
ac2a3fd38e docs(README.md): update readme 2024-09-25 00:26:14 +08:00
kangfenmao
750f1cd63d feat: new providers logos 2024-09-25 00:21:20 +08:00
kangfenmao
4413528d0e feat: add ocoolai provider 2024-09-25 00:21:20 +08:00
kangfenmao
e8b992c289 fix: transparent window 2024-09-25 00:21:20 +08:00
kangfenmao
938ff38aeb fix: windows ico icon 2024-09-25 00:20:10 +08:00
亢奋猫
77cb534e16 Update README.md 2024-09-25 00:20:10 +08:00
亢奋猫
58513b63a3 docs(README.md): add contributors 2024-09-25 00:20:10 +08:00
牡丹凤凰
712e7ff104 docs(README.md): add banners
Delete docs/images/1.png

update
2024-09-25 00:19:52 +08:00
kangfenmao
c68d283766 feat: improved layout and accessibility
- Added hidden overflow to HomePage content container for improved layout.
- Changed the upload list type to text to improve accessibility.
2024-09-25 00:00:34 +08:00
62 changed files with 2339 additions and 371 deletions

1
.gitignore vendored
View File

@@ -41,6 +41,7 @@ Thumbs.db
node_modules
dist
out
build/icons
# ENV
.env

View File

@@ -1,3 +1,10 @@
<div align="center">
<a href="https://github.com/kangfenmao/cherry-studio/releases">
<img src="https://github.com/user-attachments/assets/7b4f2f78-5cbe-4be8-9aec-f98d8405a505" alt="banner" />
</a>
English | <a href="./docs/README_zh.md">中文</a>
</div>
# 🍒 Cherry Studio
Cherry Studio is a desktop client that supports for multiple LLM providers, available on Windows, Mac and Linux.
@@ -57,6 +64,12 @@ $ yarn build:linux
[![Star History Chart](https://api.star-history.com/svg?repos=kangfenmao/cherry-studio&type=Timeline)](https://star-history.com/#kangfenmao/cherry-studio&Timeline)
# 🚀 Contributors
<a href="https://github.com/kangfenmao/cherry-studio/graphs/contributors">
<img src="https://contrib.rocks/image?repo=kangfenmao/cherry-studio" />
</a>
# 📃 License
[LICENSE](./LICENSE)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 39 KiB

After

Width:  |  Height:  |  Size: 353 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 195 KiB

After

Width:  |  Height:  |  Size: 210 KiB

BIN
build/logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 195 KiB

View File

@@ -1,8 +1,16 @@
<div align="center">
<a href="https://github.com/kangfenmao/cherry-studio/releases">
<img src="https://github.com/user-attachments/assets/995910f3-177a-4d1e-97ea-04e3b009ba36" alt="banner"/>
</a>
English / <a href="https://github.com/kangfenmao/cherry-studio">中文</a>
</div>
# 🍒 Cherry Studio
Cherry Studio 是一款跨平台桌面客户端支持多个大语言模型LLM服务商兼容 Windows、Mac 和 Linux 系统,并拥丰富的个性化选项与领先的功能设计。
# 🌠 界面
<img width="1582" alt="Xnip2024-09-23_15-01-53" src="https://github.com/user-attachments/assets/554aa31b-87b6-49fe-877d-af313e1608b0">
<img width="1582" alt="Xnip2024-09-23_15-02-27" src="https://github.com/user-attachments/assets/f43fb4c8-194a-4f46-8575-6db2bd136cb9">
<img width="1582" alt="Xnip2024-09-23_16-12-19" src="https://github.com/user-attachments/assets/82ce3cc1-5a0b-49aa-9fe4-0376d34be1f8">
@@ -10,9 +18,10 @@ Cherry Studio 是一款跨平台桌面客户端,支持多个大语言模型(
<img width="1582" alt="Xnip2024-09-23_16-11-50" src="https://github.com/user-attachments/assets/7413384e-a7c7-4525-96ea-ccd395d7e51a">
<img width="1582" alt="Xnip2024-09-23_16-12-59" src="https://github.com/user-attachments/assets/894b5e97-569f-4471-813c-c48d19455215">
# 🌟 特性
## 😌 轻松上手
🍏WindowsMacLinux跨平台支持
📦开箱即用,无需 Python 与 Docker
@@ -20,12 +29,15 @@ Cherry Studio 是一款跨平台桌面客户端,支持多个大语言模型(
🤝简洁、友好的界面与交互设计
## 🛠️多样化的 LLM 服务模式支持
☁️ 全面覆盖 LLM 云服务,支持自定义 api key 与模型管理OpenAIGeminiAnthropic硅基流动...
🔗汇聚流行的 AI Web 服务并计划通过功能增强提升体验ClaudePeplexityPoe腾讯元宝知乎直答...
💻支持 Ollama 运行本地模型
## 📲个性化的功能体验
📄完整的 Markdown 与 Mermaid 渲染支持
🤖使用与创建智能体提升工作效率

View File

@@ -21,6 +21,8 @@ nsis:
shortcutName: ${productName}
uninstallDisplayName: ${productName}
createDesktopShortcut: always
allowToChangeInstallationDirectory: true
oneClick: false
mac:
entitlementsInherit: build/entitlements.mac.plist
extendInfo:
@@ -63,9 +65,15 @@ afterSign: scripts/notarize.js
releaseInfo:
releaseNotes: |
本次更新:
增加流式输出开关
Windows 安装程序支持修改安装位置了
服务商和小程序图标更新
增加 ocoolAI 服务商
小程序增加 HuggingChat
Gemini 模型回复安全级别关闭
修复 macOS 切换窗口透明不生效问题
修复消息回复完成界面会自动滚动到最底部的问题
近期更新:
全新应用图标
模型图标更新
近期更新:
支持话题导出为图片
支持粘贴图片和文件到聊天输入框
支持将对话移动到其他智能体了
支持 Linux ARM 架构

View File

@@ -1,6 +1,6 @@
{
"name": "CherryStudio",
"version": "0.7.7",
"version": "0.7.8",
"private": true,
"description": "A powerful AI assistant for producer.",
"main": "./out/main/index.js",
@@ -26,7 +26,8 @@
"build:mac": "dotenv electron-vite build && electron-builder --mac --publish never",
"build:linux": "dotenv electron-vite build && electron-builder --linux --publish never",
"release": "node scripts/version.js",
"publish": "yarn release patch push"
"publish": "yarn release patch push",
"generate:icons": "electron-icon-builder --input=./build/logo.png --output=build"
},
"dependencies": {
"@electron-toolkit/preload": "^3.0.0",
@@ -62,6 +63,7 @@
"electron": "^28.3.3",
"electron-builder": "^24.9.1",
"electron-devtools-installer": "^3.2.0",
"electron-icon-builder": "^2.0.1",
"electron-vite": "^2.0.0",
"emittery": "^1.0.3",
"emoji-picker-element": "^1.22.1",

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.2 KiB

View File

@@ -0,0 +1,14 @@
<svg xmlns="http://www.w3.org/2000/svg" width="32" height="32" fill="none">
<path
fill="#FFD21E"
d="M4 15.55C4 9.72 8.72 5 14.55 5h4.11a9.34 9.34 0 1 1 0 18.68H7.58l-2.89 2.8a.41.41 0 0 1-.69-.3V15.55Z"
/>
<path
fill="#32343D"
d="M19.63 12.48c.37.14.52.9.9.7.71-.38.98-1.27.6-1.98a1.46 1.46 0 0 0-1.98-.61 1.47 1.47 0 0 0-.6 1.99c.17.34.74-.21 1.08-.1ZM12.72 12.48c-.37.14-.52.9-.9.7a1.47 1.47 0 0 1-.6-1.98 1.46 1.46 0 0 1 1.98-.61c.71.38.98 1.27.6 1.99-.18.34-.74-.21-1.08-.1ZM16.24 19.55c2.89 0 3.82-2.58 3.82-3.9 0-1.33-1.71.7-3.82.7-2.1 0-3.8-2.03-3.8-.7 0 1.32.92 3.9 3.8 3.9Z"
/>
<path
fill="#FF323D"
d="M18.56 18.8c-.57.44-1.33.75-2.32.75-.92 0-1.65-.27-2.2-.68.3-.63.87-1.11 1.55-1.32.12-.03.24.17.36.38.12.2.24.4.37.4s.26-.2.39-.4.26-.4.38-.36a2.56 2.56 0 0 1 1.47 1.23Z"
/>
</svg>

After

Width:  |  Height:  |  Size: 810 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.0 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.3 KiB

After

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 6.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.4 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 20 KiB

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 8.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

View File

@@ -1,3 +0,0 @@
<svg width="32" height="32" viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg">
<path fill-rule="evenodd" clip-rule="evenodd" d="M16 0C7.16 0 0 7.16 0 16C0 23.08 4.58 29.06 10.94 31.18C11.74 31.32 12.04 30.84 12.04 30.42C12.04 30.04 12.02 28.78 12.02 27.44C8 28.18 6.96 26.46 6.64 25.56C6.46 25.1 5.68 23.68 5 23.3C4.44 23 3.64 22.26 4.98 22.24C6.24 22.22 7.14 23.4 7.44 23.88C8.88 26.3 11.18 25.62 12.1 25.2C12.24 24.16 12.66 23.46 13.12 23.06C9.56 22.66 5.84 21.28 5.84 15.16C5.84 13.42 6.46 11.98 7.48 10.86C7.32 10.46 6.76 8.82 7.64 6.62C7.64 6.62 8.98 6.2 12.04 8.26C13.32 7.9 14.68 7.72 16.04 7.72C17.4 7.72 18.76 7.9 20.04 8.26C23.1 6.18 24.44 6.62 24.44 6.62C25.32 8.82 24.76 10.46 24.6 10.86C25.62 11.98 26.24 13.4 26.24 15.16C26.24 21.3 22.5 22.66 18.94 23.06C19.52 23.56 20.02 24.52 20.02 26.02C20.02 28.16 20 29.88 20 30.42C20 30.84 20.3 31.34 21.1 31.18C27.42 29.06 32 23.06 32 16C32 7.16 24.84 0 16 0V0Z" fill="#24292E"/>
</svg>

Before

Width:  |  Height:  |  Size: 959 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.2 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.9 KiB

After

Width:  |  Height:  |  Size: 10 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 6.6 KiB

After

Width:  |  Height:  |  Size: 5.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.1 KiB

After

Width:  |  Height:  |  Size: 7.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.2 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.3 KiB

View File

@@ -0,0 +1,73 @@
#inputbar .ant-input {
resize: none;
}
.chat-nav-dropdown {
.ant-dropdown-menu {
padding-bottom: 12px;
}
}
.ant-segmented-group {
gap: 4px;
}
.minapp-drawer {
max-width: calc(100vw - var(--sidebar-width));
.ant-drawer-content-wrapper {
box-shadow: none;
}
.ant-drawer-header {
position: absolute;
-webkit-app-region: drag;
min-height: calc(var(--navbar-height) + 0.5px);
width: calc(100vw - var(--sidebar-width));
margin-top: -0.5px;
border-bottom: none;
}
.ant-drawer-body {
padding: 0;
margin-top: var(--navbar-height);
overflow: hidden;
@extend #content-container;
}
.minapp-mask {
background-color: transparent !important;
}
}
.ant-drawer-header {
-webkit-app-region: no-drag;
}
.segmented-tab {
.ant-segmented-item-selected {
background-color: var(--color-background-mute);
}
.ant-segmented-item-label {
align-items: center;
display: flex;
flex-direction: row;
justify-content: center;
font-size: 13px;
}
.iconfont {
font-size: 13px;
margin-left: -2px;
}
.anticon-setting {
font-size: 12px;
}
.icon-business-smart-assistant {
margin-right: -2px;
}
.ant-segmented-item-icon + * {
margin-left: 4px;
}
}
.message-attachments {
.ant-upload-list-item:hover {
background-color: initial !important;
}
}

View File

@@ -1,5 +1,6 @@
@import './markdown.scss';
@import './scrollbar.scss';
@import './ant.scss';
@import '../fonts/icon-fonts/iconfont.css';
@import '../fonts/ubuntu/ubuntu.css';
@@ -41,7 +42,7 @@
--color-hover: rgba(40, 40, 40, 1);
--color-active: rgba(55, 55, 55, 1);
--navbar-background-mac: rgba(30, 30, 30, 0.4);
--navbar-background-mac: rgba(30, 30, 30, 0.6);
--navbar-background: rgba(30, 30, 30);
--navbar-height: 40px;
@@ -92,7 +93,7 @@ body[theme-mode='light'] {
--color-hover: var(--color-white-mute);
--color-active: var(--color-white-soft);
--navbar-background-mac: rgba(255, 255, 255, 0.4);
--navbar-background-mac: rgba(255, 255, 255, 0.6);
--navbar-background: rgba(255, 255, 255);
}
@@ -171,16 +172,6 @@ body[os='windows'] {
}
}
#inputbar .ant-input {
resize: none;
}
.chat-nav-dropdown {
.ant-dropdown-menu {
padding-bottom: 12px;
}
}
.loader {
width: 16px;
height: 16px;
@@ -193,10 +184,6 @@ body[os='windows'] {
animation: flash 0.5s ease-out infinite alternate;
}
.ant-segmented-group {
gap: 4px;
}
.drag {
-webkit-app-region: drag;
}
@@ -210,57 +197,3 @@ body[os='windows'] {
overflow: hidden;
text-overflow: ellipsis;
}
.minapp-drawer {
max-width: calc(100vw - var(--sidebar-width));
.ant-drawer-content-wrapper {
box-shadow: none;
}
.ant-drawer-header {
position: absolute;
-webkit-app-region: drag;
min-height: calc(var(--navbar-height) + 0.5px);
width: calc(100vw - var(--sidebar-width));
margin-top: -0.5px;
border-bottom: none;
}
.ant-drawer-body {
padding: 0;
margin-top: var(--navbar-height);
overflow: hidden;
@extend #content-container;
}
.minapp-mask {
background-color: transparent !important;
}
}
.ant-drawer-header {
-webkit-app-region: no-drag;
}
.segmented-tab {
.ant-segmented-item-selected {
background-color: var(--color-background-mute);
}
.ant-segmented-item-label {
align-items: center;
display: flex;
flex-direction: row;
justify-content: center;
font-size: 13px;
}
.iconfont {
font-size: 13px;
margin-left: -2px;
}
.anticon-setting {
font-size: 12px;
}
.icon-business-smart-assistant {
margin-right: -2px;
}
.ant-segmented-item-icon + * {
margin-left: 4px;
}
}

View File

@@ -99,11 +99,11 @@ const Container = styled.div`
`
const AvatarImg = styled(Avatar)`
width: 32px;
height: 32px;
width: 31px;
height: 31px;
background-color: var(--color-background-soft);
margin-bottom: ${isMac ? '12px' : '12px'};
margin-top: ${isMac ? '-5px' : '2px'};
margin-top: ${isMac ? '0px' : '2px'};
border: none;
cursor: pointer;
`

View File

@@ -1,103 +0,0 @@
import AiAssistantAppLogo from '@renderer/assets/images/apps/360-ai.png'
import AiSearchAppLogo from '@renderer/assets/images/apps/ai-search.png'
import BaiduAiAppLogo from '@renderer/assets/images/apps/baidu-ai.png'
import DevvAppLogo from '@renderer/assets/images/apps/devv.png'
import MetasoAppLogo from '@renderer/assets/images/apps/metaso.webp'
import PerplexityAppLogo from '@renderer/assets/images/apps/perplexity.webp'
import PoeAppLogo from '@renderer/assets/images/apps/poe.webp'
import SensetimeAppLogo from '@renderer/assets/images/apps/sensetime.png'
import SparkDeskAppLogo from '@renderer/assets/images/apps/sparkdesk.png'
import TiangongAiLogo from '@renderer/assets/images/apps/tiangong.png'
import TencentYuanbaoAppLogo from '@renderer/assets/images/apps/yuanbao.png'
import ZhihuAppLogo from '@renderer/assets/images/apps/zhihu.png'
import MinApp from '@renderer/components/MinApp'
import { PROVIDER_CONFIG } from '@renderer/config/provider'
import { MinAppType } from '@renderer/types'
const _apps: MinAppType[] = [
{
id: '360-ai-so',
name: '360AI搜索',
logo: AiSearchAppLogo,
url: 'https://so.360.com/'
},
{
id: '360-ai-bot',
name: 'AI 助手',
logo: AiAssistantAppLogo,
url: 'https://bot.360.com/'
},
{
id: 'baidu-ai-chat',
name: '文心一言',
logo: BaiduAiAppLogo,
url: 'https://yiyan.baidu.com/'
},
{
id: 'tencent-yuanbao',
name: '腾讯元宝',
logo: TencentYuanbaoAppLogo,
url: 'https://yuanbao.tencent.com/chat'
},
{
id: 'sensetime-chat',
name: '商量',
logo: SensetimeAppLogo,
url: 'https://chat.sensetime.com/wb/chat'
},
{
id: 'spark-desk',
name: 'SparkDesk',
logo: SparkDeskAppLogo,
url: 'https://xinghuo.xfyun.cn/desk'
},
{
id: 'metaso',
name: '秘塔AI搜索',
logo: MetasoAppLogo,
url: 'https://metaso.cn/'
},
{
id: 'poe',
name: 'Poe',
logo: PoeAppLogo,
url: 'https://poe.com'
},
{
id: 'perplexity',
name: 'perplexity',
logo: PerplexityAppLogo,
url: 'https://www.perplexity.ai/'
},
{
id: 'devv',
name: 'DEVV_',
logo: DevvAppLogo,
url: 'https://devv.ai/'
},
{
id: 'tiangong-ai',
name: '天工AI',
logo: TiangongAiLogo,
url: 'https://www.tiangong.cn/'
},
{
id: 'zhihu-zhiada',
name: '知乎直答',
logo: ZhihuAppLogo,
url: 'https://zhida.zhihu.com/'
}
]
export function getAllMinApps() {
const list: MinAppType[] = (Object.entries(PROVIDER_CONFIG) as any[])
.filter(([, config]) => config.app)
.map(([key, config]) => ({ id: key, ...config.app }))
.concat(_apps)
return list
}
export function startMinAppById(id: string) {
const app = getAllMinApps().find((app) => app?.id === id)
app && MinApp.start(app)
}

View File

@@ -0,0 +1,212 @@
import AiAssistantAppLogo from '@renderer/assets/images/apps/360-ai.png'
import AiSearchAppLogo from '@renderer/assets/images/apps/ai-search.png'
import BaiduAiAppLogo from '@renderer/assets/images/apps/baidu-ai.png'
import BaicuanAppLogo from '@renderer/assets/images/apps/baixiaoying.webp'
import DevvAppLogo from '@renderer/assets/images/apps/devv.png'
import DoubaoAppLogo from '@renderer/assets/images/apps/doubao.png'
import GeminiAppLogo from '@renderer/assets/images/apps/gemini.png'
import HuggingChatLogo from '@renderer/assets/images/apps/huggingchat.svg'
import KimiAppLogo from '@renderer/assets/images/apps/kimi.jpg'
import MetasoAppLogo from '@renderer/assets/images/apps/metaso.webp'
import PerplexityAppLogo from '@renderer/assets/images/apps/perplexity.webp'
import PoeAppLogo from '@renderer/assets/images/apps/poe.webp'
import SensetimeAppLogo from '@renderer/assets/images/apps/sensetime.png'
import SparkDeskAppLogo from '@renderer/assets/images/apps/sparkdesk.png'
import TiangongAiLogo from '@renderer/assets/images/apps/tiangong.png'
import WanZhiAppLogo from '@renderer/assets/images/apps/wanzhi.jpg'
import TencentYuanbaoAppLogo from '@renderer/assets/images/apps/yuanbao.png'
import YuewenAppLogo from '@renderer/assets/images/apps/yuewen.png'
import ZhihuAppLogo from '@renderer/assets/images/apps/zhihu.png'
import ClaudeAppLogo from '@renderer/assets/images/models/claude.png'
import HailuoModelLogo from '@renderer/assets/images/models/hailuo.png'
import QwenModelLogo from '@renderer/assets/images/models/qwen.png'
import DeepSeekProviderLogo from '@renderer/assets/images/providers/deepseek.png'
import GroqProviderLogo from '@renderer/assets/images/providers/groq.png'
import OpenAiProviderLogo from '@renderer/assets/images/providers/openai.png'
import SiliconFlowProviderLogo from '@renderer/assets/images/providers/silicon.png'
import ZhipuProviderLogo from '@renderer/assets/images/providers/zhipu.png'
import MinApp from '@renderer/components/MinApp'
import { MinAppType } from '@renderer/types'
const _apps: MinAppType[] = [
{
id: 'openai',
name: 'ChatGPT',
url: 'https://chatgpt.com/',
logo: OpenAiProviderLogo,
bodered: true
},
{
id: 'gemini',
name: 'Gemini',
url: 'https://gemini.google.com/',
logo: GeminiAppLogo
},
{
id: 'silicon',
name: 'SiliconFlow',
url: 'https://cloud.siliconflow.cn/playground/chat',
logo: SiliconFlowProviderLogo
},
{
id: 'deepseek',
name: 'DeepSeek',
url: 'https://chat.deepseek.com/',
logo: DeepSeekProviderLogo
},
{
id: 'yi',
name: '万知',
url: 'https://www.wanzhi.com/',
logo: WanZhiAppLogo,
bodered: true
},
{
id: 'zhipu',
name: '智谱',
url: 'https://chatglm.cn/main/alltoolsdetail',
logo: ZhipuProviderLogo
},
{
id: 'moonshot',
name: 'Kimi',
url: 'https://kimi.moonshot.cn/',
logo: KimiAppLogo
},
{
id: 'baichuan',
name: '百小应',
url: 'https://ying.baichuan-ai.com/chat',
logo: BaicuanAppLogo
},
{
id: 'dashscope',
name: '通义千问',
url: 'https://tongyi.aliyun.com/qianwen/',
logo: QwenModelLogo
},
{
id: 'stepfun',
name: '跃问',
url: 'https://yuewen.cn/chats/new',
logo: YuewenAppLogo,
bodered: true
},
{
id: 'doubao',
name: '豆包',
url: 'https://www.doubao.com/chat/',
logo: DoubaoAppLogo
},
{
id: 'minimax',
name: '海螺',
url: 'https://hailuoai.com/',
logo: HailuoModelLogo
},
{
id: 'groq',
name: 'Groq',
url: 'https://chat.groq.com/',
logo: GroqProviderLogo
},
{
id: 'anthropic',
name: 'Claude',
url: 'https://claude.ai/',
logo: ClaudeAppLogo
},
{
id: '360-ai-so',
name: '360AI搜索',
logo: AiSearchAppLogo,
url: 'https://so.360.com/'
},
{
id: '360-ai-bot',
name: 'AI 助手',
logo: AiAssistantAppLogo,
url: 'https://bot.360.com/',
bodered: true
},
{
id: 'baidu-ai-chat',
name: '文心一言',
logo: BaiduAiAppLogo,
url: 'https://yiyan.baidu.com/'
},
{
id: 'tencent-yuanbao',
name: '腾讯元宝',
logo: TencentYuanbaoAppLogo,
url: 'https://yuanbao.tencent.com/chat',
bodered: true
},
{
id: 'sensetime-chat',
name: '商量',
logo: SensetimeAppLogo,
url: 'https://chat.sensetime.com/wb/chat',
bodered: true
},
{
id: 'spark-desk',
name: 'SparkDesk',
logo: SparkDeskAppLogo,
url: 'https://xinghuo.xfyun.cn/desk'
},
{
id: 'metaso',
name: '秘塔AI搜索',
logo: MetasoAppLogo,
url: 'https://metaso.cn/'
},
{
id: 'poe',
name: 'Poe',
logo: PoeAppLogo,
url: 'https://poe.com'
},
{
id: 'perplexity',
name: 'perplexity',
logo: PerplexityAppLogo,
url: 'https://www.perplexity.ai/'
},
{
id: 'devv',
name: 'DEVV_',
logo: DevvAppLogo,
url: 'https://devv.ai/'
},
{
id: 'tiangong-ai',
name: '天工AI',
logo: TiangongAiLogo,
url: 'https://www.tiangong.cn/',
bodered: true
},
{
id: 'zhihu-zhiada',
name: '知乎直答',
logo: ZhihuAppLogo,
url: 'https://zhida.zhihu.com/',
bodered: true
},
{
id: 'hugging-chat',
name: 'HuggingChat',
logo: HuggingChatLogo,
url: 'https://huggingface.co/chat/',
bodered: true
}
]
export function getAllMinApps() {
return _apps as MinAppType[]
}
export function startMinAppById(id: string) {
const app = getAllMinApps().find((app) => app?.id === id)
app && MinApp.start(app)
}

View File

@@ -260,6 +260,14 @@ export const SYSTEM_MODELS: Record<string, Model[]> = {
group: 'DeepSeek Coder'
}
],
ocoolai: [
{
id: 'gpt-4o',
provider: 'ocoolai',
name: 'OpenAI GPT-4o',
group: 'OpenAI'
}
],
github: [
{
id: 'gpt-4o',

View File

@@ -1,26 +1,22 @@
import BaicuanAppLogo from '@renderer/assets/images/apps/baixiaoying.webp'
import KimiAppLogo from '@renderer/assets/images/apps/kimi.jpg'
import YuewenAppLogo from '@renderer/assets/images/apps/yuewen.png'
import HailuoModelLogo from '@renderer/assets/images/models/hailuo.png'
import QwenModelLogo from '@renderer/assets/images/models/qwen.png'
import AiHubMixProviderLogo from '@renderer/assets/images/providers/aihubmix.jpg'
import AnthropicProviderLogo from '@renderer/assets/images/providers/anthropic.jpeg'
import AnthropicProviderLogo from '@renderer/assets/images/providers/anthropic.png'
import BaichuanProviderLogo from '@renderer/assets/images/providers/baichuan.png'
import BytedanceProviderLogo from '@renderer/assets/images/providers/bytedance.png'
import DashScopeProviderLogo from '@renderer/assets/images/providers/dashscope.png'
import DeepSeekProviderLogo from '@renderer/assets/images/providers/deepseek.png'
import DoubaoProviderLogo from '@renderer/assets/images/providers/doubao.png'
import GeminiProviderLogo from '@renderer/assets/images/providers/gemini.png'
import GithubProviderLogo from '@renderer/assets/images/providers/github.svg'
import GithubProviderLogo from '@renderer/assets/images/providers/github.png'
import GoogleProviderLogo from '@renderer/assets/images/providers/google.png'
import GraphRagProviderLogo from '@renderer/assets/images/providers/graph-rag.png'
import GroqProviderLogo from '@renderer/assets/images/providers/groq.png'
import MinimaxProviderLogo from '@renderer/assets/images/providers/minimax.png'
import MoonshotProviderLogo from '@renderer/assets/images/providers/moonshot.png'
import OcoolAiProviderLogo from '@renderer/assets/images/providers/ocoolai.png'
import OllamaProviderLogo from '@renderer/assets/images/providers/ollama.png'
import OpenAiProviderLogo from '@renderer/assets/images/providers/openai.png'
import OpenRouterProviderLogo from '@renderer/assets/images/providers/openrouter.png'
import SiliconFlowProviderLogo from '@renderer/assets/images/providers/silicon.png'
import StepFunProviderLogo from '@renderer/assets/images/providers/stepfun.png'
import YiProviderLogo from '@renderer/assets/images/providers/yi.png'
import StepProviderLogo from '@renderer/assets/images/providers/step.png'
import ZeroOneProviderLogo from '@renderer/assets/images/providers/zero-one.png'
import ZhipuProviderLogo from '@renderer/assets/images/providers/zhipu.png'
export function getProviderLogo(providerId: string) {
@@ -32,7 +28,7 @@ export function getProviderLogo(providerId: string) {
case 'deepseek':
return DeepSeekProviderLogo
case 'yi':
return YiProviderLogo
return ZeroOneProviderLogo
case 'groq':
return GroqProviderLogo
case 'zhipu':
@@ -52,17 +48,19 @@ export function getProviderLogo(providerId: string) {
case 'aihubmix':
return AiHubMixProviderLogo
case 'gemini':
return GeminiProviderLogo
return GoogleProviderLogo
case 'stepfun':
return StepFunProviderLogo
return StepProviderLogo
case 'doubao':
return DoubaoProviderLogo
return BytedanceProviderLogo
case 'graphrag-kylin-mountain':
return GraphRagProviderLogo
case 'minimax':
return MinimaxProviderLogo
case 'github':
return GithubProviderLogo
case 'ocoolai':
return OcoolAiProviderLogo
default:
return undefined
}
@@ -78,12 +76,6 @@ export const PROVIDER_CONFIG = {
apiKey: 'https://platform.openai.com/api-keys',
docs: 'https://platform.openai.com/docs',
models: 'https://platform.openai.com/docs/models'
},
app: {
id: 'openai',
name: 'ChatGPT',
url: 'https://chatgpt.com/',
logo: OpenAiProviderLogo
}
},
gemini: {
@@ -95,12 +87,6 @@ export const PROVIDER_CONFIG = {
apiKey: 'https://aistudio.google.com/app/apikey',
docs: 'https://ai.google.dev/gemini-api/docs',
models: 'https://ai.google.dev/gemini-api/docs/models/gemini'
},
app: {
id: 'gemini',
name: 'Gemini',
url: 'https://gemini.google.com/',
logo: GeminiProviderLogo
}
},
silicon: {
@@ -112,12 +98,6 @@ export const PROVIDER_CONFIG = {
apiKey: 'https://cloud.siliconflow.cn/account/ak?referrer=clxty1xuy0014lvqwh5z50i88',
docs: 'https://docs.siliconflow.cn/',
models: 'https://docs.siliconflow.cn/docs/model-names'
},
app: {
id: 'silicon',
name: 'SiliconFlow',
url: 'https://cloud.siliconflow.cn/playground/chat',
logo: SiliconFlowProviderLogo
}
},
deepseek: {
@@ -129,12 +109,17 @@ export const PROVIDER_CONFIG = {
apiKey: 'https://platform.deepseek.com/api_keys',
docs: 'https://platform.deepseek.com/api-docs/',
models: 'https://platform.deepseek.com/api-docs/'
}
},
ocoolai: {
api: {
url: 'https://one.ooo.cool'
},
app: {
id: 'deepseek',
name: 'DeepSeek',
url: 'https://chat.deepseek.com/',
logo: DeepSeekProviderLogo
websites: {
official: 'https://ocoolai.com/',
apiKey: 'https://one.ocoolai.com/token',
docs: 'https://docs.ooo.cool/',
models: 'https://docs.ooo.cool/guides/jiage/'
}
},
github: {
@@ -157,12 +142,6 @@ export const PROVIDER_CONFIG = {
apiKey: 'https://platform.lingyiwanwu.com/apikeys',
docs: 'https://platform.lingyiwanwu.com/docs',
models: 'https://platform.lingyiwanwu.com/docs#%E6%A8%A1%E5%9E%8B'
},
app: {
id: 'yi',
name: 'Yi',
url: 'https://www.wanzhi.com/',
logo: YiProviderLogo
}
},
zhipu: {
@@ -174,12 +153,6 @@ export const PROVIDER_CONFIG = {
apiKey: 'https://open.bigmodel.cn/usercenter/apikeys',
docs: 'https://open.bigmodel.cn/dev/howuse/introduction',
models: 'https://open.bigmodel.cn/modelcenter/square'
},
app: {
id: 'zhipu',
name: '智谱',
url: 'https://chatglm.cn/main/alltoolsdetail',
logo: ZhipuProviderLogo
}
},
moonshot: {
@@ -191,12 +164,6 @@ export const PROVIDER_CONFIG = {
apiKey: 'https://platform.moonshot.cn/console/api-keys',
docs: 'https://platform.moonshot.cn/docs/',
models: 'https://platform.moonshot.cn/docs/intro#%E6%A8%A1%E5%9E%8B%E5%88%97%E8%A1%A8'
},
app: {
id: 'moonshot',
name: 'Kimi',
url: 'https://kimi.moonshot.cn/',
logo: KimiAppLogo
}
},
baichuan: {
@@ -208,12 +175,6 @@ export const PROVIDER_CONFIG = {
apiKey: 'https://platform.baichuan-ai.com/console/apikey',
docs: 'https://platform.baichuan-ai.com/docs',
models: 'https://platform.baichuan-ai.com/price'
},
app: {
id: 'baichuan',
name: '百小应',
url: 'https://ying.baichuan-ai.com/chat',
logo: BaicuanAppLogo
}
},
dashscope: {
@@ -225,12 +186,6 @@ export const PROVIDER_CONFIG = {
apiKey: 'https://help.aliyun.com/zh/dashscope/developer-reference/acquisition-and-configuration-of-api-key',
docs: 'https://help.aliyun.com/zh/dashscope/',
models: 'https://dashscope.console.aliyun.com/model'
},
app: {
id: 'dashscope',
name: '通义千问',
url: 'https://tongyi.aliyun.com/qianwen/',
logo: QwenModelLogo
}
},
stepfun: {
@@ -242,12 +197,6 @@ export const PROVIDER_CONFIG = {
apiKey: 'https://platform.stepfun.com/interface-key',
docs: 'https://platform.stepfun.com/docs/overview/concept',
models: 'https://platform.stepfun.com/docs/llm/text'
},
app: {
id: 'stepfun',
name: '跃问',
url: 'https://yuewen.cn/chats/new',
logo: YuewenAppLogo
}
},
doubao: {
@@ -259,12 +208,6 @@ export const PROVIDER_CONFIG = {
apiKey: 'https://console.volcengine.com/ark/region:ark+cn-beijing/apiKey',
docs: 'https://www.volcengine.com/docs/82379/1182403',
models: 'https://console.volcengine.com/ark/region:ark+cn-beijing/endpoint'
},
app: {
id: 'doubao',
name: '豆包',
url: 'https://www.doubao.com/chat/',
logo: DoubaoProviderLogo
}
},
minimax: {
@@ -276,12 +219,6 @@ export const PROVIDER_CONFIG = {
apiKey: 'https://platform.minimaxi.com/user-center/basic-information/interface-key',
docs: 'https://platform.minimaxi.com/document/Announcement',
models: 'https://platform.minimaxi.com/document/Models'
},
app: {
id: 'minimax',
name: '海螺',
url: 'https://hailuoai.com/',
logo: HailuoModelLogo
}
},
'graphrag-kylin-mountain': {
@@ -309,12 +246,6 @@ export const PROVIDER_CONFIG = {
apiKey: 'https://console.groq.com/keys',
docs: 'https://console.groq.com/docs/quickstart',
models: 'https://console.groq.com/docs/models'
},
app: {
id: 'groq',
name: 'Groq',
url: 'https://chat.groq.com/',
logo: GroqProviderLogo
}
},
ollama: {
@@ -336,12 +267,6 @@ export const PROVIDER_CONFIG = {
apiKey: 'https://console.anthropic.com/settings/keys',
docs: 'https://docs.anthropic.com/en/docs',
models: 'https://docs.anthropic.com/en/docs/about-claude/models'
},
app: {
id: 'anthropic',
name: 'Claude',
url: 'https://claude.ai/',
logo: AnthropicProviderLogo
}
},
aihubmix: {

View File

@@ -14,6 +14,7 @@ interface ImportMeta {
declare global {
interface Window {
root: HTMLElement
message: MessageInstance
modal: HookAPI
keyv: KeyvStorage

View File

@@ -41,9 +41,7 @@ export function useAppInit() {
useEffect(() => {
const transparentWindow = windowStyle === 'transparent' && isMac && !minappShow
window.document.body.style.background = transparentWindow
? 'var(--navbar-background-mac)'
: 'var(--navbar-background)'
window.root.style.background = transparentWindow ? 'var(--navbar-background-mac)' : 'var(--navbar-background)'
}, [windowStyle, minappShow])
useEffect(() => {

View File

@@ -116,6 +116,9 @@ const resources = {
abbr: 'Assistant',
search: 'Search assistants...'
},
model: {
stream_output: 'Stream Output'
},
files: {
title: 'Files',
file: 'File',
@@ -159,7 +162,8 @@ const resources = {
doubao: 'Doubao',
minimax: 'MiniMax',
'graphrag-kylin-mountain': 'GraphRAG',
github: 'GitHub Models'
github: 'GitHub Models',
ocoolai: 'ocoolAI'
},
settings: {
title: 'Settings',
@@ -397,6 +401,9 @@ const resources = {
abbr: '助手',
search: '搜索助手'
},
model: {
stream_output: '流式输出'
},
files: {
title: '文件',
file: '文件',
@@ -440,7 +447,8 @@ const resources = {
doubao: '豆包',
minimax: 'MiniMax',
'graphrag-kylin-mountain': 'GraphRAG',
github: 'GitHub Models'
github: 'GitHub Models',
ocoolai: 'ocoolAI'
},
settings: {
title: '设置',

View File

@@ -1,5 +1,4 @@
import MinApp from '@renderer/components/MinApp'
import { useTheme } from '@renderer/context/ThemeProvider'
import { MinAppType } from '@renderer/types'
import { FC } from 'react'
import styled from 'styled-components'
@@ -9,15 +8,13 @@ interface Props {
}
const App: FC<Props> = ({ app }) => {
const { theme } = useTheme()
const onClick = () => {
MinApp.start(app)
}
return (
<Container onClick={onClick}>
<AppIcon src={app.logo} style={{ border: theme === 'dark' ? 'none' : '0.5px solid var(--color-border' }} />
<AppIcon src={app.logo} style={{ border: app.bodered ? '0.5px solid var(--color-border)' : 'none' }} />
<AppTitle>{app.name}</AppTitle>
</Container>
)

View File

@@ -1,7 +1,7 @@
import { SearchOutlined } from '@ant-design/icons'
import { Navbar, NavbarCenter } from '@renderer/components/app/Navbar'
import { Center } from '@renderer/components/Layout'
import { getAllMinApps } from '@renderer/config/minapp'
import { getAllMinApps } from '@renderer/config/minapps'
import { Empty, Input } from 'antd'
import { isEmpty } from 'lodash'
import { FC, useMemo, useState } from 'react'

View File

@@ -53,6 +53,7 @@ const ContentContainer = styled.div`
display: flex;
flex: 1;
flex-direction: row;
overflow: hidden;
`
export default HomePage

View File

@@ -2,7 +2,7 @@ import { SyncOutlined } from '@ant-design/icons'
import UserPopup from '@renderer/components/Popups/UserPopup'
import { FONT_FAMILY } from '@renderer/config/constant'
import { APP_NAME, AppLogo, isLocalAi } from '@renderer/config/env'
import { startMinAppById } from '@renderer/config/minapp'
import { startMinAppById } from '@renderer/config/minapps'
import { getModelLogo } from '@renderer/config/models'
import { useTheme } from '@renderer/context/ThemeProvider'
import { useAssistant } from '@renderer/hooks/useAssistant'
@@ -27,10 +27,11 @@ interface Props {
message: Message
index?: number
total?: number
lastMessage?: boolean
onDeleteMessage?: (message: Message) => void
}
const MessageItem: FC<Props> = ({ message, index, onDeleteMessage }) => {
const MessageItem: FC<Props> = ({ message, index, lastMessage, onDeleteMessage }) => {
const avatar = useAvatar()
const { t } = useTranslation()
const { assistant, setModel } = useAssistant(message.assistantId)
@@ -38,7 +39,7 @@ const MessageItem: FC<Props> = ({ message, index, onDeleteMessage }) => {
const { userName, showMessageDivider, messageFont, fontSize } = useSettings()
const { theme } = useTheme()
const isLastMessage = index === 0
const isLastMessage = lastMessage || index === 0
const isAssistantMessage = message.role === 'assistant'
const getUserName = useCallback(() => {
@@ -56,6 +57,7 @@ const MessageItem: FC<Props> = ({ message, index, onDeleteMessage }) => {
const avatarSource = useMemo(() => {
if (isLocalAi) return AppLogo
return message.modelId ? getModelLogo(message.modelId) : undefined
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [message.modelId, theme])
const avatarName = useMemo(() => firstLetter(assistant?.name).toUpperCase(), [assistant?.name])
@@ -105,18 +107,20 @@ const MessageItem: FC<Props> = ({ message, index, onDeleteMessage }) => {
</MessageHeader>
<MessageContentContainer style={{ fontFamily, fontSize }}>
<MessageContent message={message} />
<MessageFooter style={{ border: messageBorder, flexDirection: isLastMessage ? 'row-reverse' : undefined }}>
<MessgeTokens message={message} />
<MessageMenubar
message={message}
model={model}
index={index}
isLastMessage={isLastMessage}
isAssistantMessage={isAssistantMessage}
setModel={setModel}
onDeleteMessage={onDeleteMessage}
/>
</MessageFooter>
{!lastMessage && (
<MessageFooter style={{ border: messageBorder, flexDirection: isLastMessage ? 'row-reverse' : undefined }}>
<MessgeTokens message={message} />
<MessageMenubar
message={message}
model={model}
index={index}
isLastMessage={isLastMessage}
isAssistantMessage={isAssistantMessage}
setModel={setModel}
onDeleteMessage={onDeleteMessage}
/>
</MessageFooter>
)}
</MessageContentContainer>
</MessageContainer>
)

View File

@@ -22,9 +22,9 @@ const MessageAttachments: FC<Props> = ({ message }) => {
}
return (
<Container style={{ marginTop: 2, marginBottom: 8 }}>
<Container style={{ marginTop: 2, marginBottom: 8 }} className="message-attachments">
<Upload
listType="picture"
listType="text"
disabled
fileList={message.files?.map((file) => ({
uid: file.id,

View File

@@ -73,6 +73,7 @@ const Messages: FC<Props> = ({ assistant, topic, setActiveTopic }) => {
const unsubscribes = [
EventEmitter.on(EVENT_NAMES.SEND_MESSAGE, async (msg: Message) => {
await onSendMessage(msg)
containerRef.current?.scrollTo({ top: containerRef.current.scrollHeight, behavior: 'auto' })
fetchChatCompletion({
assistant,
messages: [...messages, msg],
@@ -173,10 +174,6 @@ const Messages: FC<Props> = ({ assistant, topic, setActiveTopic }) => {
})
}, [topic.id])
useEffect(() => {
setTimeout(() => containerRef.current?.scrollTo({ top: containerRef.current.scrollHeight, behavior: 'auto' }), 0)
}, [messages])
useEffect(() => {
runAsyncFunction(async () => {
EventEmitter.emit(EVENT_NAMES.ESTIMATED_TOKEN_COUNT, {
@@ -189,7 +186,7 @@ const Messages: FC<Props> = ({ assistant, topic, setActiveTopic }) => {
return (
<Container id="messages" key={assistant.id} ref={containerRef}>
<Suggestions assistant={assistant} messages={messages} lastMessage={lastMessage} />
{lastMessage && <MessageItem key={lastMessage.id} message={lastMessage} />}
{lastMessage && <MessageItem key={lastMessage.id} message={lastMessage} lastMessage />}
{reverse([...messages]).map((message, index) => (
<MessageItem key={message.id} message={message} index={index} onDeleteMessage={onDeleteMessage} />
))}
@@ -207,6 +204,7 @@ const Container = styled.div`
max-height: calc(100vh - var(--input-bar-height) - var(--navbar-height));
padding: 10px 0;
background-color: var(--color-background);
padding-bottom: 20px;
`
export default Messages

View File

@@ -29,6 +29,7 @@ const SettingsTab: FC<Props> = (props) => {
const [contextCount, setConextCount] = useState(assistant?.settings?.contextCount ?? DEFAULT_CONEXTCOUNT)
const [enableMaxTokens, setEnableMaxTokens] = useState(assistant?.settings?.enableMaxTokens ?? false)
const [maxTokens, setMaxTokens] = useState(assistant?.settings?.maxTokens ?? 0)
const [streamOutput, setStreamOutput] = useState(assistant?.settings?.streamOutput ?? true)
const [fontSizeValue, setFontSizeValue] = useState(fontSize)
const { t } = useTranslation()
@@ -48,7 +49,8 @@ const SettingsTab: FC<Props> = (props) => {
temperature: settings.temperature ?? temperature,
contextCount: settings.contextCount ?? contextCount,
enableMaxTokens: settings.enableMaxTokens ?? enableMaxTokens,
maxTokens: settings.maxTokens ?? maxTokens
maxTokens: settings.maxTokens ?? maxTokens,
streamOutput: settings.streamOutput ?? streamOutput
})
}
@@ -80,7 +82,8 @@ const SettingsTab: FC<Props> = (props) => {
temperature: DEFAULT_TEMPERATURE,
contextCount: DEFAULT_CONEXTCOUNT,
enableMaxTokens: false,
maxTokens: DEFAULT_MAX_TOKENS
maxTokens: DEFAULT_MAX_TOKENS,
streamOutput: true
}
})
}
@@ -90,6 +93,7 @@ const SettingsTab: FC<Props> = (props) => {
setConextCount(assistant?.settings?.contextCount ?? DEFAULT_CONEXTCOUNT)
setEnableMaxTokens(assistant?.settings?.enableMaxTokens ?? false)
setMaxTokens(assistant?.settings?.maxTokens ?? DEFAULT_MAX_TOKENS)
setStreamOutput(assistant?.settings?.streamOutput ?? true)
}, [assistant])
return (
@@ -137,6 +141,18 @@ const SettingsTab: FC<Props> = (props) => {
/>
</Col>
</Row>
<SettingRow>
<SettingRowTitleSmall>{t('model.stream_output')}</SettingRowTitleSmall>
<Switch
size="small"
checked={streamOutput}
onChange={(checked) => {
setStreamOutput(checked)
onUpdateAssistantSettings({ streamOutput: checked })
}}
/>
</SettingRow>
<SettingDivider />
<Row align="middle" justify="space-between">
<HStack alignItems="center">
<Label>{t('chat.settings.max_tokens')}</Label>

View File

@@ -67,7 +67,7 @@ const Suggestions: FC<Props> = ({ assistant, messages, lastMessage }) => {
}
if (suggestions.length === 0) {
return <Container style={{ paddingBottom: 10 }} />
return null
}
return (

View File

@@ -28,7 +28,8 @@ const AssistantSettings: FC = () => {
temperature: settings.temperature ?? temperature,
contextCount: settings.contextCount ?? contextCount,
enableMaxTokens: settings.enableMaxTokens ?? enableMaxTokens,
maxTokens: settings.maxTokens ?? maxTokens
maxTokens: settings.maxTokens ?? maxTokens,
streamOutput: settings.streamOutput ?? true
}
})
}
@@ -63,7 +64,8 @@ const AssistantSettings: FC = () => {
temperature: DEFAULT_TEMPERATURE,
contextCount: DEFAULT_CONEXTCOUNT,
enableMaxTokens: false,
maxTokens: DEFAULT_MAX_TOKENS
maxTokens: DEFAULT_MAX_TOKENS,
streamOutput: true
}
})
}

View File

@@ -8,7 +8,7 @@ import {
} from '@ant-design/icons'
import VisionIcon from '@renderer/components/Icons/VisionIcon'
import { getModelLogo, isVisionModel } from '@renderer/config/models'
import { PROVIDER_CONFIG } from '@renderer/config/provider'
import { PROVIDER_CONFIG } from '@renderer/config/providers'
import { useTheme } from '@renderer/context/ThemeProvider'
import { useProvider } from '@renderer/hooks/useProvider'
import { checkApi } from '@renderer/services/api'

View File

@@ -1,6 +1,6 @@
import { DeleteOutlined, EditOutlined, PlusOutlined } from '@ant-design/icons'
import { DragDropContext, Draggable, Droppable, DropResult } from '@hello-pangea/dnd'
import { getProviderLogo } from '@renderer/config/provider'
import { getProviderLogo } from '@renderer/config/providers'
import { useAllProviders, useProviders } from '@renderer/hooks/useProvider'
import { Provider } from '@renderer/types'
import { droppableReorder, generateColorFromChar, getFirstCharacter, uuid } from '@renderer/utils'
@@ -106,15 +106,15 @@ const ProvidersList: FC = () => {
className={provider.id === selectedProvider?.id ? 'active' : ''}
onClick={() => setSelectedProvider(provider)}>
{provider.isSystem && (
<Avatar shape="square" src={getProviderLogo(provider.id)} size={25} />
<ProviderLogo shape="square" src={getProviderLogo(provider.id)} size={25} />
)}
{!provider.isSystem && (
<Avatar
<ProviderLogo
size={25}
shape="square"
style={{ backgroundColor: generateColorFromChar(provider.name), minWidth: 25 }}>
{getFirstCharacter(provider.name)}
</Avatar>
</ProviderLogo>
)}
<ProviderItemName>
{provider.isSystem ? t(`provider.${provider.id}`) : provider.name}
@@ -190,6 +190,10 @@ const ProviderListItem = styled.div`
}
`
const ProviderLogo = styled(Avatar)`
border: 0.5px solid var(--color-border);
`
const ProviderItemName = styled.div`
margin-left: 10px;
overflow: hidden;

View File

@@ -55,7 +55,7 @@ export default class AnthropicProvider extends BaseProvider {
public async completions({ messages, assistant, onChunk, onFilterMessages }: CompletionsParams) {
const defaultModel = getDefaultModel()
const model = assistant.model || defaultModel
const { contextCount, maxTokens } = getAssistantSettings(assistant)
const { contextCount, maxTokens, streamOutput } = getAssistantSettings(assistant)
const userMessagesParams: MessageParam[] = []
const _messages = filterMessages(filterContextMessages(takeRight(messages, contextCount + 2)))
@@ -72,16 +72,25 @@ export default class AnthropicProvider extends BaseProvider {
userMessages.shift()
}
const body: MessageCreateParamsNonStreaming = {
model: model.id,
messages: userMessages,
max_tokens: maxTokens || DEFAULT_MAX_TOKENS,
temperature: assistant?.settings?.temperature,
system: assistant.prompt
}
if (!streamOutput) {
const message = await this.sdk.messages.create({ ...body, stream: false })
return onChunk({
text: message.content[0].type === 'text' ? message.content[0].text : '',
usage: message.usage
})
}
return new Promise<void>((resolve, reject) => {
const stream = this.sdk.messages
.stream({
model: model.id,
messages: userMessages,
max_tokens: maxTokens || DEFAULT_MAX_TOKENS,
temperature: assistant?.settings?.temperature,
system: assistant.prompt,
stream: true
})
.stream({ ...body, stream: true })
.on('text', (text) => {
if (window.keyv.get(EVENT_NAMES.CHAT_COMPLETION_PAUSED)) {
stream.controller.abort()

View File

@@ -1,4 +1,12 @@
import { Content, GoogleGenerativeAI, InlineDataPart, Part, TextPart } from '@google/generative-ai'
import {
Content,
GoogleGenerativeAI,
HarmBlockThreshold,
HarmCategory,
InlineDataPart,
Part,
TextPart
} from '@google/generative-ai'
import { getAssistantSettings, getDefaultModel, getTopNamingModel } from '@renderer/services/assistant'
import { EVENT_NAMES } from '@renderer/services/event'
import { filterContextMessages, filterMessages } from '@renderer/services/messages'
@@ -49,7 +57,7 @@ export default class GeminiProvider extends BaseProvider {
public async completions({ messages, assistant, onChunk, onFilterMessages }: CompletionsParams) {
const defaultModel = getDefaultModel()
const model = assistant.model || defaultModel
const { contextCount, maxTokens } = getAssistantSettings(assistant)
const { contextCount, maxTokens, streamOutput } = getAssistantSettings(assistant)
const userMessages = filterMessages(filterContextMessages(takeRight(messages, contextCount + 1)))
onFilterMessages(userMessages)
@@ -68,11 +76,34 @@ export default class GeminiProvider extends BaseProvider {
generationConfig: {
maxOutputTokens: maxTokens,
temperature: assistant?.settings?.temperature
}
},
safetySettings: [
{ category: HarmCategory.HARM_CATEGORY_HATE_SPEECH, threshold: HarmBlockThreshold.BLOCK_NONE },
{
category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold: HarmBlockThreshold.BLOCK_NONE
},
{ category: HarmCategory.HARM_CATEGORY_HARASSMENT, threshold: HarmBlockThreshold.BLOCK_NONE },
{ category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, threshold: HarmBlockThreshold.BLOCK_NONE }
]
})
const chat = geminiModel.startChat({ history })
const messageContents = await this.getMessageContents(userLastMessage!)
if (!streamOutput) {
const { response } = await chat.sendMessage(messageContents.parts)
onChunk({
text: response.candidates?.[0].content.parts[0].text,
usage: {
prompt_tokens: response.usageMetadata?.promptTokenCount || 0,
completion_tokens: response.usageMetadata?.candidatesTokenCount || 0,
total_tokens: response.usageMetadata?.totalTokenCount || 0
}
})
return
}
const userMessagesStream = await chat.sendMessageStream(messageContents.parts)
for await (const chunk of userMessagesStream.stream) {

View File

@@ -28,7 +28,7 @@ export default class OpenAIProvider extends BaseProvider {
}
private isSupportStreamOutput(modelId: string): boolean {
if (this.provider.id === 'openai' && modelId.includes('o1-')) {
if (modelId.includes('o1-')) {
return false
}
return true
@@ -112,7 +112,7 @@ export default class OpenAIProvider extends BaseProvider {
async completions({ messages, assistant, onChunk, onFilterMessages }: CompletionsParams): Promise<void> {
const defaultModel = getDefaultModel()
const model = assistant.model || defaultModel
const { contextCount, maxTokens } = getAssistantSettings(assistant)
const { contextCount, maxTokens, streamOutput } = getAssistantSettings(assistant)
const systemMessage = assistant.prompt ? { role: 'system', content: assistant.prompt } : undefined
const userMessages: ChatCompletionMessageParam[] = []
@@ -124,16 +124,16 @@ export default class OpenAIProvider extends BaseProvider {
userMessages.push(await this.getMessageParam(message, model))
}
const isSupportStreamOutput = this.isSupportStreamOutput(model.id)
const isSupportStreamOutput = streamOutput && this.isSupportStreamOutput(model.id)
// @ts-ignore key is not typed
const stream = await this.sdk.chat.completions.create({
model: model.id,
messages: [systemMessage, ...userMessages].filter(Boolean) as ChatCompletionMessageParam[],
stream: isSupportStreamOutput,
temperature: assistant?.settings?.temperature,
max_tokens: maxTokens,
keep_alive: this.keepAliveTime
keep_alive: this.keepAliveTime,
stream: isSupportStreamOutput
})
if (!isSupportStreamOutput) {
@@ -263,8 +263,6 @@ export default class OpenAIProvider extends BaseProvider {
public async models(): Promise<OpenAI.Models.Model[]> {
try {
const response = await this.sdk.models.list()
if (this.provider.id === 'github') {
// @ts-ignore key is not typed
return response.body.map((model) => ({
@@ -275,6 +273,7 @@ export default class OpenAIProvider extends BaseProvider {
}))
}
const response = await this.sdk.models.list()
return response.data
} catch (error) {
return []

View File

@@ -80,7 +80,8 @@ export const getAssistantSettings = (assistant: Assistant): AssistantSettings =>
contextCount: contextCount === 20 ? 100000 : contextCount,
temperature: assistant?.settings?.temperature ?? DEFAULT_TEMPERATURE,
enableMaxTokens: assistant?.settings?.enableMaxTokens ?? false,
maxTokens: getAssistantMaxTokens()
maxTokens: getAssistantMaxTokens(),
streamOutput: assistant?.settings?.streamOutput ?? true
}
}

View File

@@ -22,7 +22,7 @@ const persistedReducer = persistReducer(
{
key: 'cherry-studio',
storage,
version: 25,
version: 26,
blacklist: ['runtime'],
migrate
},

View File

@@ -77,6 +77,15 @@ const initialState: LlmState = {
isSystem: true,
enabled: false
},
{
id: 'ocoolai',
name: 'ocoolAI',
apiKey: '',
apiHost: 'https://one.ooo.cool',
models: [],
isSystem: true,
enabled: false
},
{
id: 'github',
name: 'Github Models',

View File

@@ -413,6 +413,26 @@ const migrateConfig = {
]
}
}
},
'26': (state: RootState) => {
return {
...state,
llm: {
...state.llm,
providers: [
...state.llm.providers,
{
id: 'ocoolai',
name: 'ocoolAI',
apiKey: '',
apiHost: 'https://one.ooo.cool',
models: [],
isSystem: true,
enabled: false
}
]
}
}
}
}

View File

@@ -32,7 +32,7 @@ const initialState: SettingsState = {
messageFont: 'system',
showInputEstimatedTokens: false,
theme: ThemeMode.light,
windowStyle: 'opaque',
windowStyle: 'transparent',
fontSize: 14,
topicPosition: 'right',
pasteLongTextAsFile: true,

View File

@@ -16,6 +16,7 @@ export type AssistantSettings = {
temperature: number
maxTokens: number | undefined
enableMaxTokens: boolean
streamOutput: boolean
}
export type Message = {
@@ -85,6 +86,7 @@ export type MinAppType = {
name: string
logo: string
url: string
bodered?: boolean
}
export interface FileType {

1819
yarn.lock

File diff suppressed because it is too large Load Diff