Compare commits

...

22 Commits

Author SHA1 Message Date
copilot-swe-agent[bot]
ab1a9f56f5 🎨 style: add missing semicolon for consistency
Co-authored-by: GeorgeDong32 <98630204+GeorgeDong32@users.noreply.github.com>
2025-11-04 12:50:54 +00:00
copilot-swe-agent[bot]
de1e2a94bd test: add tests for OpenAIAPIClient reasoning effort parameter format
Co-authored-by: GeorgeDong32 <98630204+GeorgeDong32@users.noreply.github.com>
2025-11-04 12:48:40 +00:00
copilot-swe-agent[bot]
044eac0cf9 🐛 fix: use correct reasoning parameter format for GitHub Copilot GPT-5 models
Co-authored-by: GeorgeDong32 <98630204+GeorgeDong32@users.noreply.github.com>
2025-11-04 12:44:34 +00:00
copilot-swe-agent[bot]
7fceb434b8 Initial plan 2025-11-04 12:29:14 +00:00
beyondkmp
5fea202a7d fix: add PowerMonitorService for system shutdown handling (#11115)
* feat: add PowerMonitorService for system shutdown handling

- Add PowerMonitorService to monitor system shutdown events
- Use @paymoapp/electron-shutdown-handler for Windows platform
- Use Electron's powerMonitor for macOS and Linux platforms
- Support registering multiple shutdown handlers via dependency injection
- Register shutdown handlers in ipc.ts to disable auto-update and save data

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>

* format code

---------

Co-authored-by: Claude <noreply@anthropic.com>
2025-11-04 18:56:09 +08:00
fullex
7dce1d776b feat: app's version history log (#11097)
* feat: integrate version tracking in app initialization

- Added versionService to record the current version during app startup.
- This change prepares for upcoming data refactoring in version 2.

* fix: lint from other PRs & format

* feat: enhance version tracking with meaningful change detection

- Updated VersionService to check for changes in version, OS, environment, packaged status, and install mode before recording a new entry.
- Improved logging to reflect whether version information has changed or remained the same.
2025-11-04 14:13:07 +08:00
beyondkmp
346af4d338 fix: add CherryAI provider support and update API host formatting (#11135)
* fix: add CherryAI provider support and update API host formatting

* format code

* add ut

* format code
2025-11-04 12:59:14 +08:00
Zephyr
abd5d3b96f feat: amazon bedrock request use bedrock api key (#10727)
* feat: amazon bedrock request use bedrock api key

* feat: ai-core/provider support bedrock api key

* refactor: extract AWS Bedrock auth type and remove redundant state

* feat: add bedrock reasoning support

Add AWS Bedrock-specific reasoning parameter handling to support Extended Thinking feature for Claude models via Bedrock API.

Changes:
- Add `buildBedrockProviderOptions` function in options.ts to handle Bedrock-specific provider options
- Add `getBedrockReasoningParams` function in reasoning.ts to generate reasoning config with budget tokens
- Register 'bedrock' case in provider options switch to route to Bedrock-specific builder
- Reuse `getAnthropicThinkingBudget` helper for consistent token budget calculation

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>

* feat: add migration for Bedrock auth type and API key fields

* refactor: replace any type with BedrockRuntimeClientConfig in AWS Bedrock client

* fix: bug fix

* fix: lint error

* fix: bedrock reasoning

* chore: bump persisted reducer version to 171

* Update src/renderer/src/store/migrate.ts

---------

Co-authored-by: Claude <noreply@anthropic.com>
Co-authored-by: icarus <eurfelux@gmail.com>
2025-11-03 21:05:10 +08:00
Phantom
49bd298d37 feat(InputbarTools): add reasoning effort button to quick panel (#10959)
Add new menu item with lightbulb icon that opens the reasoning effort quick panel when clicked
2025-11-03 20:36:52 +08:00
Phantom
714a28ac29 fix(QuickPanel): Hide the options that should be hidden in the quick panel. (#10931)
* feat(QuickPanel): add hidden property to list items

Add support for hiding QuickPanel items by introducing a hidden property. This allows conditional visibility of items like the knowledge base button based on application state.

* docs(types): clarify settings field comment in Assistant type
2025-11-03 20:34:24 +08:00
beyondkmp
0cf81c04c8 chore: update electron-builder.yml to exclude additional configuration files from build (#11129)
* chore: update electron-builder.yml to exclude additional configuration files from build

* delete all hide files
2025-11-03 17:54:29 +08:00
kangfenmao
4186e9c990 feat: add support for TopP in model capabilities and update parameter builder to utilize it 2025-11-03 16:37:12 +08:00
kangfenmao
d8f68a6056 feat: initialize painting model with first available option and update default provider to 'cherryin' 2025-11-03 15:12:58 +08:00
kangfenmao
11bf50e722 fix(i18n): improve label retrieval for paintings image size options 2025-11-03 14:45:21 +08:00
kangfenmao
32a84311aa feat: add SophNet LLM provider 2025-11-03 13:28:40 +08:00
beyondkmp
6eaa2b2461 refactor: remove main window dependency from PythonService and utilize WindowService for window management (#11116)
* refactor: remove main window dependency from PythonService and utilize WindowService for window management

* format code
2025-11-03 13:09:40 +08:00
defi-failure
9f00f00546 chore: update v1.7.0-beta.3 release notes (#11105)
* chore: update v1.7.0-beta.3 release notes

* fix(i18n): Auto update translations for PR #11105

* fix: code lint error

---------

Co-authored-by: GitHub Action <action@github.com>
2025-11-02 22:28:36 +08:00
SuYao
bd94d23343 refactor:Unify the naming of configuration fields in thinking, change to using underscore style. (#11106)
* refactor:Unify the naming of configuration fields in thinking, change to using underscore style.

* fix(i18n): Auto update translations for PR #11106

* chore: lint

* fix: typecheck

---------

Co-authored-by: GitHub Action <action@github.com>
2025-11-02 19:24:23 +08:00
chenxue
5f1c14e2c0 fix(aihubmix): fix default rules missing app code (#11100)
* add imagen

* Update aihubmix.ts

* fix type

---------

Co-authored-by: zhaochenxue <zhaochenxue@bixin.cn>
2025-11-02 17:03:05 +08:00
dependabot[bot]
cdc12d5092 ci(deps): bump actions/stale from 9 to 10 (#11088)
Bumps [actions/stale](https://github.com/actions/stale) from 9 to 10.
- [Release notes](https://github.com/actions/stale/releases)
- [Changelog](https://github.com/actions/stale/blob/main/CHANGELOG.md)
- [Commits](https://github.com/actions/stale/compare/v9...v10)

---
updated-dependencies:
- dependency-name: actions/stale
  dependency-version: '10'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-11-02 08:44:53 +08:00
dependabot[bot]
e5967fd874 ci(deps): bump actions/upload-artifact from 4 to 5 (#11089)
Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4 to 5.
- [Release notes](https://github.com/actions/upload-artifact/releases)
- [Commits](https://github.com/actions/upload-artifact/compare/v4...v5)

---
updated-dependencies:
- dependency-name: actions/upload-artifact
  dependency-version: '5'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-11-02 08:44:41 +08:00
dependabot[bot]
e2f1d80697 ci(deps): bump actions/setup-node from 4 to 6 (#11090)
Bumps [actions/setup-node](https://github.com/actions/setup-node) from 4 to 6.
- [Release notes](https://github.com/actions/setup-node/releases)
- [Commits](https://github.com/actions/setup-node/compare/v4...v6)

---
updated-dependencies:
- dependency-name: actions/setup-node
  dependency-version: '6'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-11-02 08:44:28 +08:00
56 changed files with 2164 additions and 2092 deletions

View File

@@ -27,7 +27,7 @@ jobs:
ref: ${{ github.event.pull_request.head.ref }}
- name: 📦 Setting Node.js
uses: actions/setup-node@v5
uses: actions/setup-node@v6
with:
node-version: 20
package-manager-cache: false

View File

@@ -54,7 +54,7 @@ jobs:
- name: Setup Node.js
if: steps.check_time.outputs.should_delay == 'false'
uses: actions/setup-node@v4
uses: actions/setup-node@v6
with:
node-version: '20'
@@ -121,7 +121,7 @@ jobs:
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
uses: actions/setup-node@v6
with:
node-version: '20'

View File

@@ -21,7 +21,7 @@ jobs:
contents: none
steps:
- name: Close needs-more-info issues
uses: actions/stale@v9
uses: actions/stale@v10
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
only-labels: 'needs-more-info'
@@ -42,7 +42,7 @@ jobs:
days-before-pr-close: -1
- name: Close inactive issues
uses: actions/stale@v9
uses: actions/stale@v10
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
days-before-stale: ${{ env.daysBeforeStale }}

View File

@@ -56,7 +56,7 @@ jobs:
ref: main
- name: Install Node.js
uses: actions/setup-node@v5
uses: actions/setup-node@v6
with:
node-version: 20
@@ -208,7 +208,7 @@ jobs:
echo "总计: $(find renamed-artifacts -type f | wc -l) 个文件"
- name: Upload artifacts
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v5
with:
name: cherry-studio-nightly-${{ steps.date.outputs.date }}-${{ matrix.os }}
path: renamed-artifacts/*

View File

@@ -24,7 +24,7 @@ jobs:
uses: actions/checkout@v5
- name: Install Node.js
uses: actions/setup-node@v5
uses: actions/setup-node@v6
with:
node-version: 20

View File

@@ -47,7 +47,7 @@ jobs:
npm version "$VERSION" --no-git-tag-version --allow-same-version
- name: Install Node.js
uses: actions/setup-node@v5
uses: actions/setup-node@v6
with:
node-version: 20

View File

@@ -21,6 +21,8 @@ files:
- "**/*"
- "!**/{.vscode,.yarn,.yarn-lock,.github,.cursorrules,.prettierrc}"
- "!electron.vite.config.{js,ts,mjs,cjs}}"
- "!.*"
- "!components.json"
- "!**/{.eslintignore,.eslintrc.js,.eslintrc.json,.eslintcache,root.eslint.config.js,eslint.config.js,.eslintrc.cjs,.prettierignore,.prettierrc.yaml,eslint.config.mjs,dev-app-update.yml,CHANGELOG.md,README.md,biome.jsonc}"
- "!**/{.env,.env.*,.npmrc,pnpm-lock.yaml}"
- "!**/{tsconfig.json,tsconfig.tsbuildinfo,tsconfig.node.json,tsconfig.web.json}"
@@ -161,6 +163,9 @@ releaseInfo:
- MCP Confirmation: Added confirmation modal when activating protocol-installed MCP servers
- Translation: Enhanced translation script with concurrency and validation
- Electron & Vite: Updated to Electron 38 and Vite 4.0.1
- QR Code Generation: Optimized performance for phone LAN export
- Enterprise Settings: Added enterprise section in About settings
- Assistant/Agent Popup: Enhanced UI for adding assistants and agents
Claude Code Tool Improvements:
- GlobTool: Now counts lines instead of files in output for better clarity
@@ -188,6 +193,9 @@ releaseInfo:
- Fixed reranker API error response capture
- Fixed right-click paste file content into inputbar
- Fixed minimax-m2 support in aiCore
- Fixed Azure embedding issues
- Fixed agent edit modal loading race condition
- Fixed debounced save cancellation on file path update
<!--LANG:zh-CN-->
v1.7.0-beta.3 新特性
@@ -218,6 +226,9 @@ releaseInfo:
- MCP 确认:添加激活协议安装的 MCP 服务器时的确认模态框
- 翻译:增强翻译脚本的并发和验证功能
- Electron & Vite更新至 Electron 38 和 Vite 4.0.1
- 二维码生成:优化手机局域网导出性能
- 企业设置:在关于设置中添加企业部分
- 助手/Agent 弹窗:增强添加助手和 Agent 的界面
Claude Code 工具改进:
- GlobTool现在计算行数而不是文件数提供更清晰的输出
@@ -245,4 +256,7 @@ releaseInfo:
- 修复 reranker API 错误响应捕获
- 修复右键粘贴文件内容到输入栏
- 修复 aiCore 中的 minimax-m2 支持
- 修复 Azure embedding 问题
- 修复 Agent 编辑模态框加载竞态条件
- 修复文件路径更新时防抖保存取消问题
<!--LANG:END-->

View File

@@ -82,6 +82,7 @@
"@libsql/client": "0.14.0",
"@libsql/win32-x64-msvc": "^0.4.7",
"@napi-rs/system-ocr": "patch:@napi-rs/system-ocr@npm%3A1.0.2#~/.yarn/patches/@napi-rs-system-ocr-npm-1.0.2-59e7a78e8b.patch",
"@paymoapp/electron-shutdown-handler": "^1.1.2",
"@strongtz/win32-arm64-msvc": "^0.4.7",
"express": "^5.1.0",
"font-list": "^2.0.0",
@@ -113,9 +114,9 @@
"@ant-design/v5-patch-for-react-19": "^1.0.3",
"@anthropic-ai/sdk": "^0.41.0",
"@anthropic-ai/vertex-sdk": "patch:@anthropic-ai/vertex-sdk@npm%3A0.11.4#~/.yarn/patches/@anthropic-ai-vertex-sdk-npm-0.11.4-c19cb41edb.patch",
"@aws-sdk/client-bedrock": "^3.840.0",
"@aws-sdk/client-bedrock-runtime": "^3.840.0",
"@aws-sdk/client-s3": "^3.840.0",
"@aws-sdk/client-bedrock": "^3.910.0",
"@aws-sdk/client-bedrock-runtime": "^3.910.0",
"@aws-sdk/client-s3": "^3.910.0",
"@biomejs/biome": "2.2.4",
"@cherrystudio/ai-core": "workspace:^1.0.0-alpha.18",
"@cherrystudio/embedjs": "^0.1.31",

View File

@@ -21,6 +21,7 @@ import { appMenuService } from './services/AppMenuService'
import { configManager } from './services/ConfigManager'
import mcpService from './services/MCPService'
import { nodeTraceService } from './services/NodeTraceService'
import powerMonitorService from './services/PowerMonitorService'
import {
CHERRY_STUDIO_PROTOCOL,
handleProtocolUrl,
@@ -30,6 +31,7 @@ import {
import selectionService, { initSelectionService } from './services/SelectionService'
import { registerShortcuts } from './services/ShortcutService'
import { TrayService } from './services/TrayService'
import { versionService } from './services/VersionService'
import { windowService } from './services/WindowService'
import { initWebviewHotkeys } from './services/WebviewService'
@@ -110,6 +112,10 @@ if (!app.requestSingleInstanceLock()) {
// Some APIs can only be used after this event occurs.
app.whenReady().then(async () => {
// Record current version for tracking
// A preparation for v2 data refactoring
versionService.recordCurrentVersion()
initWebviewHotkeys()
// Set app user model id for windows
electronApp.setAppUserModelId(import.meta.env.VITE_MAIN_BUNDLE_ID || 'com.kangfenmao.CherryStudio')
@@ -127,6 +133,7 @@ if (!app.requestSingleInstanceLock()) {
appMenuService?.setupApplicationMenu()
nodeTraceService.init()
powerMonitorService.init()
app.on('activate', function () {
const mainWindow = windowService.getMainWindow()

View File

@@ -50,6 +50,7 @@ import * as NutstoreService from './services/NutstoreService'
import ObsidianVaultService from './services/ObsidianVaultService'
import { ocrService } from './services/ocr/OcrService'
import OvmsManager from './services/OvmsManager'
import powerMonitorService from './services/PowerMonitorService'
import { proxyManager } from './services/ProxyManager'
import { pythonService } from './services/PythonService'
import { FileServiceManager } from './services/remotefile/FileServiceManager'
@@ -115,8 +116,17 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
const appUpdater = new AppUpdater()
const notificationService = new NotificationService()
// Initialize Python service with main window
pythonService.setMainWindow(mainWindow)
// Register shutdown handlers
powerMonitorService.registerShutdownHandler(() => {
appUpdater.setAutoUpdate(false)
})
powerMonitorService.registerShutdownHandler(() => {
const mw = windowService.getMainWindow()
if (mw && !mw.isDestroyed()) {
mw.webContents.send(IpcChannel.App_SaveData)
}
})
const checkMainWindow = () => {
if (!mainWindow || mainWindow.isDestroyed()) {

View File

@@ -0,0 +1,112 @@
import { loggerService } from '@logger'
import { isLinux, isMac, isWin } from '@main/constant'
import ElectronShutdownHandler from '@paymoapp/electron-shutdown-handler'
import { BrowserWindow } from 'electron'
import { powerMonitor } from 'electron'
const logger = loggerService.withContext('PowerMonitorService')
type ShutdownHandler = () => void | Promise<void>
export class PowerMonitorService {
private static instance: PowerMonitorService
private initialized = false
private shutdownHandlers: ShutdownHandler[] = []
private constructor() {
// Private constructor to prevent direct instantiation
}
public static getInstance(): PowerMonitorService {
if (!PowerMonitorService.instance) {
PowerMonitorService.instance = new PowerMonitorService()
}
return PowerMonitorService.instance
}
/**
* Register a shutdown handler to be called when system shutdown is detected
* @param handler - The handler function to be called on shutdown
*/
public registerShutdownHandler(handler: ShutdownHandler): void {
this.shutdownHandlers.push(handler)
logger.info('Shutdown handler registered', { totalHandlers: this.shutdownHandlers.length })
}
/**
* Initialize power monitor to listen for shutdown events
*/
public init(): void {
if (this.initialized) {
logger.warn('PowerMonitorService already initialized')
return
}
if (isWin) {
this.initWindowsShutdownHandler()
} else if (isMac || isLinux) {
this.initElectronPowerMonitor()
}
this.initialized = true
logger.info('PowerMonitorService initialized', { platform: process.platform })
}
/**
* Execute all registered shutdown handlers
*/
private async executeShutdownHandlers(): Promise<void> {
logger.info('Executing shutdown handlers', { count: this.shutdownHandlers.length })
for (const handler of this.shutdownHandlers) {
try {
await handler()
} catch (error) {
logger.error('Error executing shutdown handler', error as Error)
}
}
}
/**
* Initialize shutdown handler for Windows using @paymoapp/electron-shutdown-handler
*/
private initWindowsShutdownHandler(): void {
try {
const zeroMemoryWindow = new BrowserWindow({ show: false })
// Set the window handle for the shutdown handler
ElectronShutdownHandler.setWindowHandle(zeroMemoryWindow.getNativeWindowHandle())
// Listen for shutdown event
ElectronShutdownHandler.on('shutdown', async () => {
logger.info('System shutdown event detected (Windows)')
// Execute all registered shutdown handlers
await this.executeShutdownHandlers()
// Release the shutdown block to allow the system to shut down
ElectronShutdownHandler.releaseShutdown()
})
logger.info('Windows shutdown handler registered')
} catch (error) {
logger.error('Failed to initialize Windows shutdown handler', error as Error)
}
}
/**
* Initialize power monitor for macOS and Linux using Electron's powerMonitor
*/
private initElectronPowerMonitor(): void {
try {
powerMonitor.on('shutdown', async () => {
logger.info('System shutdown event detected', { platform: process.platform })
// Execute all registered shutdown handlers
await this.executeShutdownHandlers()
})
logger.info('Electron powerMonitor shutdown listener registered')
} catch (error) {
logger.error('Failed to initialize Electron powerMonitor', error as Error)
}
}
}
// Default export as singleton instance
export default PowerMonitorService.getInstance()

View File

@@ -1,8 +1,9 @@
import { randomUUID } from 'node:crypto'
import type { BrowserWindow } from 'electron'
import { ipcMain } from 'electron'
import { windowService } from './WindowService'
interface PythonExecutionRequest {
id: string
script: string
@@ -21,7 +22,6 @@ interface PythonExecutionResponse {
*/
export class PythonService {
private static instance: PythonService | null = null
private mainWindow: BrowserWindow | null = null
private pendingRequests = new Map<string, { resolve: (value: string) => void; reject: (error: Error) => void }>()
private constructor() {
@@ -51,10 +51,6 @@ export class PythonService {
})
}
public setMainWindow(mainWindow: BrowserWindow) {
this.mainWindow = mainWindow
}
/**
* Execute Python code by sending request to renderer PyodideService
*/
@@ -63,8 +59,8 @@ export class PythonService {
context: Record<string, any> = {},
timeout: number = 60000
): Promise<string> {
if (!this.mainWindow) {
throw new Error('Main window not set in PythonService')
if (!windowService.getMainWindow()) {
throw new Error('Main window not found')
}
return new Promise((resolve, reject) => {
@@ -95,7 +91,7 @@ export class PythonService {
// Send request to renderer
const request: PythonExecutionRequest = { id: requestId, script, context, timeout }
this.mainWindow?.webContents.send('python-execution-request', request)
windowService.getMainWindow()?.webContents.send('python-execution-request', request)
})
}
}

View File

@@ -0,0 +1,285 @@
import { loggerService } from '@logger'
import { app } from 'electron'
import fs from 'fs'
import path from 'path'
const logger = loggerService.withContext('VersionService')
type OS = 'win' | 'mac' | 'linux' | 'unknown'
type Environment = 'prod' | 'dev'
type Packaged = 'packaged' | 'unpackaged'
type Mode = 'install' | 'portable'
/**
* Version record stored in version.log
*/
interface VersionRecord {
version: string
os: OS
environment: Environment
packaged: Packaged
mode: Mode
timestamp: string
}
/**
* Service for tracking application version history
* Stores version information in userData/version.log for data migration and diagnostics
*/
class VersionService {
private readonly VERSION_LOG_FILE = 'version.log'
private versionLogPath: string | null = null
constructor() {
// Lazy initialization of path since app.getPath may not be available during construction
}
/**
* Gets the full path to version.log file
* @returns {string} Full path to version log file
*/
private getVersionLogPath(): string {
if (!this.versionLogPath) {
this.versionLogPath = path.join(app.getPath('userData'), this.VERSION_LOG_FILE)
}
return this.versionLogPath
}
/**
* Gets current operating system identifier
* @returns {OS} OS identifier
*/
private getCurrentOS(): OS {
switch (process.platform) {
case 'win32':
return 'win'
case 'darwin':
return 'mac'
case 'linux':
return 'linux'
default:
return 'unknown'
}
}
/**
* Gets current environment (production or development)
* @returns {Environment} Environment identifier
*/
private getCurrentEnvironment(): Environment {
return import.meta.env.MODE === 'production' ? 'prod' : 'dev'
}
/**
* Gets packaging status
* @returns {Packaged} Packaging status
*/
private getPackagedStatus(): Packaged {
return app.isPackaged ? 'packaged' : 'unpackaged'
}
/**
* Gets installation mode (install or portable)
* @returns {Mode} Installation mode
*/
private getInstallMode(): Mode {
return process.env.PORTABLE_EXECUTABLE_DIR !== undefined ? 'portable' : 'install'
}
/**
* Generates version log line for current application state
* @returns {string} Pipe-separated version record line
*/
private generateCurrentVersionLine(): string {
const version = app.getVersion()
const os = this.getCurrentOS()
const environment = this.getCurrentEnvironment()
const packaged = this.getPackagedStatus()
const mode = this.getInstallMode()
const timestamp = new Date().toISOString()
return `${version}|${os}|${environment}|${packaged}|${mode}|${timestamp}`
}
/**
* Parses a version log line into a VersionRecord object
* @param {string} line - Pipe-separated version record line
* @returns {VersionRecord | null} Parsed version record or null if invalid
*/
private parseVersionLine(line: string): VersionRecord | null {
try {
const parts = line.trim().split('|')
if (parts.length !== 6) {
return null
}
const [version, os, environment, packaged, mode, timestamp] = parts
// Validate data
if (
!version ||
!['win', 'mac', 'linux', 'unknown'].includes(os) ||
!['prod', 'dev'].includes(environment) ||
!['packaged', 'unpackaged'].includes(packaged) ||
!['install', 'portable'].includes(mode) ||
!timestamp
) {
return null
}
return {
version,
os: os as OS,
environment: environment as Environment,
packaged: packaged as Packaged,
mode: mode as Mode,
timestamp
}
} catch (error) {
logger.warn(`Failed to parse version line: ${line}`, error as Error)
return null
}
}
/**
* Reads the last 1KB from version.log and returns all lines
* Uses reverse reading from file end to avoid reading the entire file
* @returns {string[]} Array of version lines from the last 1KB
*/
private readLastVersionLines(): string[] {
const logPath = this.getVersionLogPath()
try {
if (!fs.existsSync(logPath)) {
return []
}
const stats = fs.statSync(logPath)
const fileSize = stats.size
if (fileSize === 0) {
return []
}
// Read from the end of the file, 1KB is enough to find previous version
// Typical line: "1.7.0-beta.3|win|prod|packaged|install|2025-01-15T08:30:00.000Z\n" (~70 bytes)
// 1KB can store ~14 lines, which is more than enough
const bufferSize = Math.min(1024, fileSize)
const buffer = Buffer.alloc(bufferSize)
const fd = fs.openSync(logPath, 'r')
try {
const startPosition = Math.max(0, fileSize - bufferSize)
fs.readSync(fd, buffer, 0, bufferSize, startPosition)
const content = buffer.toString('utf-8')
const lines = content
.trim()
.split('\n')
.filter((line) => line.trim())
return lines
} finally {
fs.closeSync(fd)
}
} catch (error) {
logger.error('Failed to read version log:', error as Error)
return []
}
}
/**
* Appends a version record line to version.log
* @param {string} line - Version record line to append
*/
private appendVersionLine(line: string): void {
const logPath = this.getVersionLogPath()
try {
fs.appendFileSync(logPath, line + '\n', 'utf-8')
logger.debug(`Version recorded: ${line}`)
} catch (error) {
logger.error('Failed to append version log:', error as Error)
}
}
/**
* Records the current version on application startup
* Only adds a new record if the version has changed since the last run
*/
recordCurrentVersion(): void {
try {
const currentLine = this.generateCurrentVersionLine()
const lines = this.readLastVersionLines()
// Add new record if this is the first run or version has changed
if (lines.length === 0) {
logger.info('First run detected, creating version log')
this.appendVersionLine(currentLine)
return
}
const lastLine = lines[lines.length - 1]
const lastRecord = this.parseVersionLine(lastLine)
const currentVersion = app.getVersion()
// Check if any meaningful field has changed (version, os, environment, packaged, mode)
const currentOS = this.getCurrentOS()
const currentEnvironment = this.getCurrentEnvironment()
const currentPackaged = this.getPackagedStatus()
const currentMode = this.getInstallMode()
const hasMeaningfulChange =
!lastRecord ||
lastRecord.version !== currentVersion ||
lastRecord.os !== currentOS ||
lastRecord.environment !== currentEnvironment ||
lastRecord.packaged !== currentPackaged ||
lastRecord.mode !== currentMode
if (hasMeaningfulChange) {
logger.info(`Version information changed, recording new entry`)
this.appendVersionLine(currentLine)
} else {
logger.debug(`Version information not changed, skip recording`)
}
} catch (error) {
logger.error('Failed to record current version:', error as Error)
}
}
/**
* Gets the previous version record (last record with different version than current)
* Reads from the last 1KB of version.log to find the most recent different version
* Useful for detecting version upgrades and running migrations
* @returns {VersionRecord | null} Previous version record or null if not available
*/
getPreviousVersion(): VersionRecord | null {
try {
const lines = this.readLastVersionLines()
if (lines.length === 0) {
return null
}
const currentVersion = app.getVersion()
// Read from the end backwards to find the first different version
for (let i = lines.length - 1; i >= 0; i--) {
const record = this.parseVersionLine(lines[i])
if (record && record.version !== currentVersion) {
return record
}
}
return null
} catch (error) {
logger.error('Failed to get previous version:', error as Error)
return null
}
}
}
/**
* Singleton instance of VersionService
*/
export const versionService = new VersionService()

View File

@@ -1,5 +1,5 @@
import { loggerService } from '@logger'
import { WebSocketCandidatesResponse, WebSocketStatusResponse } from '@shared/config/types'
import type { WebSocketCandidatesResponse, WebSocketStatusResponse } from '@shared/config/types'
import * as fs from 'fs'
import { networkInterfaces } from 'os'
import * as path from 'path'

View File

@@ -0,0 +1,219 @@
import type { Assistant, Model, Provider } from '@renderer/types'
import { beforeEach, describe, expect, it, vi } from 'vitest'
import { OpenAIAPIClient } from '../openai/OpenAIApiClient'
// Mock dependencies
vi.mock('@renderer/config/models', () => ({
isSupportedReasoningEffortOpenAIModel: vi.fn((model: Model) => {
const modelId = model.id.toLowerCase()
return (
modelId.includes('gpt-5') ||
(modelId.includes('o1') && !modelId.includes('o1-preview') && !modelId.includes('o1-mini')) ||
modelId.includes('o3') ||
modelId.includes('o4')
)
}),
isSupportedReasoningEffortGrokModel: vi.fn((model: Model) => {
return model.id.toLowerCase().includes('grok')
}),
isSupportedReasoningEffortPerplexityModel: vi.fn((model: Model) => {
return model.id.toLowerCase().includes('sonar-deep-research')
}),
isSupportedReasoningEffortModel: vi.fn((model: Model) => {
const modelId = model.id.toLowerCase()
return (
modelId.includes('gpt-5') ||
modelId.includes('o1') ||
modelId.includes('o3') ||
modelId.includes('o4') ||
modelId.includes('grok') ||
modelId.includes('sonar-deep-research')
)
}),
isReasoningModel: vi.fn(() => true),
isOpenAIDeepResearchModel: vi.fn(() => false),
isSupportedThinkingTokenZhipuModel: vi.fn(() => false),
isDeepSeekHybridInferenceModel: vi.fn(() => false),
isSupportedThinkingTokenGeminiModel: vi.fn(() => false),
isSupportedThinkingTokenQwenModel: vi.fn(() => false),
isSupportedThinkingTokenHunyuanModel: vi.fn(() => false),
isSupportedThinkingTokenClaudeModel: vi.fn(() => false),
isSupportedThinkingTokenDoubaoModel: vi.fn(() => false),
isQwenReasoningModel: vi.fn(() => false),
isGrokReasoningModel: vi.fn(() => false),
isOpenAIReasoningModel: vi.fn(() => false),
isSupportedThinkingTokenModel: vi.fn(() => false),
isQwenAlwaysThinkModel: vi.fn(() => false),
isDoubaoThinkingAutoModel: vi.fn(() => false),
getThinkModelType: vi.fn(() => 'default'),
GEMINI_FLASH_MODEL_REGEX: /gemini.*flash/i,
MODEL_SUPPORTED_REASONING_EFFORT: {
default: ['low', 'medium', 'high'],
grok: ['low', 'high'],
perplexity: ['low', 'medium', 'high'],
gpt5: ['minimal', 'low', 'medium', 'high']
},
findTokenLimit: vi.fn()
}))
vi.mock('@renderer/config/providers', () => ({
isSupportEnableThinkingProvider: vi.fn(() => false)
}))
vi.mock('@renderer/hooks/useSettings', () => ({
getStoreSetting: vi.fn(() => ({
summaryText: 'off'
}))
}))
vi.mock('@renderer/types', () => ({
SystemProviderIds: {
groq: 'groq',
openrouter: 'openrouter',
dashscope: 'dashscope',
doubao: 'doubao',
silicon: 'silicon',
ppio: 'ppio',
poe: 'poe'
},
EFFORT_RATIO: {
minimal: 0.1,
low: 0.3,
medium: 0.5,
high: 0.8,
auto: 1
}
}))
describe('OpenAIAPIClient - Reasoning Effort', () => {
let client: OpenAIAPIClient
let provider: Provider
let assistant: Assistant
beforeEach(() => {
provider = {
id: 'copilot',
name: 'Github Copilot',
type: 'openai',
apiKey: 'test-key',
apiHost: 'https://api.githubcopilot.com/',
models: []
}
client = new OpenAIAPIClient(provider)
assistant = {
id: 'test-assistant',
name: 'Test Assistant',
emoji: '🤖',
prompt: 'You are a helpful assistant',
topics: [],
messages: [],
type: 'assistant',
regularPhrases: [],
settings: {
reasoning_effort: 'medium'
}
}
})
describe('GPT-5 models through GitHub Copilot', () => {
it('should return reasoning object format for gpt-5-mini', () => {
const model: Model = {
id: 'gpt-5-mini',
name: 'GPT-5 Mini',
provider: 'copilot',
group: 'openai'
}
const result = client.getReasoningEffort(assistant, model)
// Should use base class implementation which returns { reasoning: { effort, summary } }
expect(result).toHaveProperty('reasoning')
expect(result.reasoning).toHaveProperty('effort', 'medium')
expect(result.reasoning).toHaveProperty('summary')
expect(result).not.toHaveProperty('reasoning_effort')
})
it('should return reasoning object format for o1-2024-12-17', () => {
const model: Model = {
id: 'o1-2024-12-17',
name: 'O1',
provider: 'copilot',
group: 'openai'
}
const result = client.getReasoningEffort(assistant, model)
expect(result).toHaveProperty('reasoning')
expect(result.reasoning).toHaveProperty('effort', 'medium')
expect(result).not.toHaveProperty('reasoning_effort')
})
it('should return reasoning object format for o3-mini', () => {
const model: Model = {
id: 'o3-mini',
name: 'O3 Mini',
provider: 'copilot',
group: 'openai'
}
const result = client.getReasoningEffort(assistant, model)
expect(result).toHaveProperty('reasoning')
expect(result.reasoning).toHaveProperty('effort', 'medium')
expect(result).not.toHaveProperty('reasoning_effort')
})
})
describe('Non-OpenAI reasoning models', () => {
it('should return reasoning_effort format for Grok models', () => {
const model: Model = {
id: 'grok-3-mini',
name: 'Grok 3 Mini',
provider: 'grok',
group: 'xai'
}
const result = client.getReasoningEffort(assistant, model)
// Should use reasoning_effort for non-OpenAI models
expect(result).toHaveProperty('reasoning_effort', 'medium')
expect(result).not.toHaveProperty('reasoning')
})
it('should return reasoning_effort format for Perplexity models', () => {
const model: Model = {
id: 'sonar-deep-research',
name: 'Sonar Deep Research',
provider: 'perplexity',
group: 'perplexity'
}
const result = client.getReasoningEffort(assistant, model)
expect(result).toHaveProperty('reasoning_effort', 'medium')
expect(result).not.toHaveProperty('reasoning')
})
})
describe('When reasoning_effort is not set', () => {
beforeEach(() => {
assistant.settings = {}
})
it('should return empty object for GPT-5 models', () => {
const model: Model = {
id: 'gpt-5-mini',
name: 'GPT-5 Mini',
provider: 'copilot',
group: 'openai'
}
const result = client.getReasoningEffort(assistant, model)
expect(result).toEqual({})
})
})
})

View File

@@ -1,6 +1,7 @@
import { BedrockClient, ListFoundationModelsCommand, ListInferenceProfilesCommand } from '@aws-sdk/client-bedrock'
import {
BedrockRuntimeClient,
type BedrockRuntimeClientConfig,
ConverseCommand,
InvokeModelCommand,
InvokeModelWithResponseStreamCommand
@@ -11,6 +12,8 @@ import { DEFAULT_MAX_TOKENS } from '@renderer/config/constant'
import { findTokenLimit, isReasoningModel } from '@renderer/config/models'
import {
getAwsBedrockAccessKeyId,
getAwsBedrockApiKey,
getAwsBedrockAuthType,
getAwsBedrockRegion,
getAwsBedrockSecretAccessKey
} from '@renderer/hooks/useAwsBedrock'
@@ -75,32 +78,48 @@ export class AwsBedrockAPIClient extends BaseApiClient<
}
const region = getAwsBedrockRegion()
const accessKeyId = getAwsBedrockAccessKeyId()
const secretAccessKey = getAwsBedrockSecretAccessKey()
const authType = getAwsBedrockAuthType()
if (!region) {
throw new Error('AWS region is required. Please configure AWS-Region in extra headers.')
throw new Error('AWS region is required. Please configure AWS region in settings.')
}
if (!accessKeyId || !secretAccessKey) {
throw new Error('AWS credentials are required. Please configure AWS-Access-Key-ID and AWS-Secret-Access-Key.')
// Build client configuration based on auth type
let clientConfig: BedrockRuntimeClientConfig
if (authType === 'iam') {
// IAM credentials authentication
const accessKeyId = getAwsBedrockAccessKeyId()
const secretAccessKey = getAwsBedrockSecretAccessKey()
if (!accessKeyId || !secretAccessKey) {
throw new Error('AWS credentials are required. Please configure Access Key ID and Secret Access Key.')
}
clientConfig = {
region,
credentials: {
accessKeyId,
secretAccessKey
}
}
} else {
// API Key authentication
const awsBedrockApiKey = getAwsBedrockApiKey()
if (!awsBedrockApiKey) {
throw new Error('AWS Bedrock API Key is required. Please configure API Key in settings.')
}
clientConfig = {
region,
token: { token: awsBedrockApiKey },
authSchemePreference: ['httpBearerAuth']
}
}
const client = new BedrockRuntimeClient({
region,
credentials: {
accessKeyId,
secretAccessKey
}
})
const bedrockClient = new BedrockClient({
region,
credentials: {
accessKeyId,
secretAccessKey
}
})
const client = new BedrockRuntimeClient(clientConfig)
const bedrockClient = new BedrockClient(clientConfig)
this.sdkInstance = { client, bedrockClient, region }
return this.sdkInstance

View File

@@ -192,7 +192,7 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
extra_body: {
google: {
thinking_config: {
thinkingBudget: 0
thinking_budget: 0
}
}
}
@@ -306,6 +306,13 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
// Grok models/Perplexity models/OpenAI models
if (isSupportedReasoningEffortModel(model)) {
// For OpenAI models (GPT-5, o1, o3, o4, etc), use the base class implementation
// which returns the correct { reasoning: { effort, summary } } format
if (isSupportedReasoningEffortOpenAIModel(model)) {
return super.getReasoningEffort(assistant, model);
}
// For non-OpenAI models (Grok, Perplexity, etc), use reasoning_effort parameter
// 检查模型是否支持所选选项
const modelType = getThinkModelType(model)
const supportedOptions = MODEL_SUPPORTED_REASONING_EFFORT[modelType]
@@ -327,8 +334,8 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
extra_body: {
google: {
thinking_config: {
thinkingBudget: -1,
includeThoughts: true
thinking_budget: -1,
include_thoughts: true
}
}
}
@@ -338,8 +345,8 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
extra_body: {
google: {
thinking_config: {
thinkingBudget: budgetTokens,
includeThoughts: true
thinking_budget: budgetTokens,
include_thoughts: true
}
}
}
@@ -670,7 +677,7 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
} else if (isClaudeReasoningModel(model) && reasoningEffort.thinking?.budget_tokens) {
suffix = ` --thinking_budget ${reasoningEffort.thinking.budget_tokens}`
} else if (isGeminiReasoningModel(model) && reasoningEffort.extra_body?.google?.thinking_config) {
suffix = ` --thinking_budget ${reasoningEffort.extra_body.google.thinking_config.thinkingBudget}`
suffix = ` --thinking_budget ${reasoningEffort.extra_body.google.thinking_config.thinking_budget}`
}
// FIXME: poe 不支持多个text part上传文本文件的时候用的不是file part而是text part因此会出问题
// 临时解决方案是强制poe用string content但是其实poe部分支持array

View File

@@ -85,6 +85,19 @@ export function supportsLargeFileUpload(model: Model): boolean {
})
}
/**
* 检查模型是否支持TopP
*/
export function supportsTopP(model: Model): boolean {
const provider = getProviderByModel(model)
if (provider?.type === 'anthropic' || model?.endpoint_type === 'anthropic') {
return false
}
return true
}
/**
* 获取提供商特定的文件大小限制
*/

View File

@@ -34,6 +34,7 @@ import { setupToolsConfig } from '../utils/mcp'
import { buildProviderOptions } from '../utils/options'
import { getAnthropicThinkingBudget } from '../utils/reasoning'
import { buildProviderBuiltinWebSearchConfig } from '../utils/websearch'
import { supportsTopP } from './modelCapabilities'
import { getTemperature, getTopP } from './modelParameters'
const logger = loggerService.withContext('parameterBuilder')
@@ -176,20 +177,27 @@ export async function buildStreamTextParams(
messages: sdkMessages,
maxOutputTokens: maxTokens,
temperature: getTemperature(assistant, model),
topP: getTopP(assistant, model),
abortSignal: options.requestOptions?.signal,
headers: options.requestOptions?.headers,
providerOptions,
stopWhen: stepCountIs(20),
maxRetries: 0
}
if (supportsTopP(model)) {
params.topP = getTopP(assistant, model)
}
if (tools) {
params.tools = tools
}
if (assistant.prompt) {
params.system = await replacePromptVariables(assistant.prompt, model.name)
}
logger.debug('params', params)
return {
params,
modelId: model.id,

View File

@@ -21,10 +21,44 @@ vi.mock('@renderer/store', () => ({
}
}))
vi.mock('@renderer/utils/api', () => ({
formatApiHost: vi.fn((host, isSupportedAPIVersion = true) => {
if (isSupportedAPIVersion === false) {
return host // Return host as-is when isSupportedAPIVersion is false
}
return `${host}/v1` // Default behavior when isSupportedAPIVersion is true
}),
routeToEndpoint: vi.fn((host) => ({
baseURL: host,
endpoint: '/chat/completions'
}))
}))
vi.mock('@renderer/config/providers', async (importOriginal) => {
const actual = (await importOriginal()) as any
return {
...actual,
isCherryAIProvider: vi.fn(),
isAnthropicProvider: vi.fn(() => false),
isAzureOpenAIProvider: vi.fn(() => false),
isGeminiProvider: vi.fn(() => false),
isNewApiProvider: vi.fn(() => false)
}
})
vi.mock('@renderer/hooks/useVertexAI', () => ({
isVertexProvider: vi.fn(() => false),
isVertexAIConfigured: vi.fn(() => false),
createVertexProvider: vi.fn()
}))
import { isCherryAIProvider } from '@renderer/config/providers'
import { getProviderByModel } from '@renderer/services/AssistantService'
import type { Model, Provider } from '@renderer/types'
import { formatApiHost } from '@renderer/utils/api'
import { COPILOT_DEFAULT_HEADERS, COPILOT_EDITOR_VERSION, isCopilotResponsesModel } from '../constants'
import { providerToAiSdkConfig } from '../providerConfig'
import { getActualProvider, providerToAiSdkConfig } from '../providerConfig'
const createWindowKeyv = () => {
const store = new Map<string, string>()
@@ -46,11 +80,21 @@ const createCopilotProvider = (): Provider => ({
isSystem: true
})
const createModel = (id: string, name = id): Model => ({
const createModel = (id: string, name = id, provider = 'copilot'): Model => ({
id,
name,
provider: 'copilot',
group: 'copilot'
provider,
group: provider
})
const createCherryAIProvider = (): Provider => ({
id: 'cherryai',
type: 'openai',
name: 'CherryAI',
apiKey: 'test-key',
apiHost: 'https://api.cherryai.com',
models: [],
isSystem: false
})
describe('Copilot responses routing', () => {
@@ -87,3 +131,67 @@ describe('Copilot responses routing', () => {
expect(config.options.headers?.['Copilot-Integration-Id']).toBe(COPILOT_DEFAULT_HEADERS['Copilot-Integration-Id'])
})
})
describe('CherryAI provider configuration', () => {
beforeEach(() => {
;(globalThis as any).window = {
...(globalThis as any).window,
keyv: createWindowKeyv()
}
vi.clearAllMocks()
})
it('formats CherryAI provider apiHost with false parameter', () => {
const provider = createCherryAIProvider()
const model = createModel('gpt-4', 'GPT-4', 'cherryai')
// Mock the functions to simulate CherryAI provider detection
vi.mocked(isCherryAIProvider).mockReturnValue(true)
vi.mocked(getProviderByModel).mockReturnValue(provider)
// Call getActualProvider which should trigger formatProviderApiHost
const actualProvider = getActualProvider(model)
// Verify that formatApiHost was called with false as the second parameter
expect(formatApiHost).toHaveBeenCalledWith('https://api.cherryai.com', false)
expect(actualProvider.apiHost).toBe('https://api.cherryai.com')
})
it('does not format non-CherryAI provider with false parameter', () => {
const provider = {
id: 'openai',
type: 'openai',
name: 'OpenAI',
apiKey: 'test-key',
apiHost: 'https://api.openai.com',
models: [],
isSystem: false
} as Provider
const model = createModel('gpt-4', 'GPT-4', 'openai')
// Mock the functions to simulate non-CherryAI provider
vi.mocked(isCherryAIProvider).mockReturnValue(false)
vi.mocked(getProviderByModel).mockReturnValue(provider)
// Call getActualProvider
const actualProvider = getActualProvider(model)
// Verify that formatApiHost was called with default parameters (true)
expect(formatApiHost).toHaveBeenCalledWith('https://api.openai.com')
expect(actualProvider.apiHost).toBe('https://api.openai.com/v1')
})
it('handles CherryAI provider with empty apiHost', () => {
const provider = createCherryAIProvider()
provider.apiHost = ''
const model = createModel('gpt-4', 'GPT-4', 'cherryai')
vi.mocked(isCherryAIProvider).mockReturnValue(true)
vi.mocked(getProviderByModel).mockReturnValue(provider)
const actualProvider = getActualProvider(model)
expect(formatApiHost).toHaveBeenCalledWith('', false)
expect(actualProvider.apiHost).toBe('')
})
})

View File

@@ -52,7 +52,7 @@ const AIHUBMIX_RULES: RuleSet = {
}
}
],
fallbackRule: (provider: Provider) => provider
fallbackRule: (provider: Provider) => extraProviderConfig(provider)
}
export const aihubmixProviderCreator = provider2Provider.bind(null, AIHUBMIX_RULES)

View File

@@ -9,11 +9,14 @@ import { isOpenAIChatCompletionOnlyModel } from '@renderer/config/models'
import {
isAnthropicProvider,
isAzureOpenAIProvider,
isCherryAIProvider,
isGeminiProvider,
isNewApiProvider
} from '@renderer/config/providers'
import {
getAwsBedrockAccessKeyId,
getAwsBedrockApiKey,
getAwsBedrockAuthType,
getAwsBedrockRegion,
getAwsBedrockSecretAccessKey
} from '@renderer/hooks/useAwsBedrock'
@@ -98,6 +101,8 @@ function formatProviderApiHost(provider: Provider): Provider {
formatted.apiHost = formatAzureOpenAIApiHost(formatted.apiHost)
} else if (isVertexProvider(formatted)) {
formatted.apiHost = formatVertexApiHost(formatted)
} else if (isCherryAIProvider(formatted)) {
formatted.apiHost = formatApiHost(formatted.apiHost, false)
} else {
formatted.apiHost = formatApiHost(formatted.apiHost)
}
@@ -192,9 +197,15 @@ export function providerToAiSdkConfig(
// bedrock
if (aiSdkProviderId === 'bedrock') {
const authType = getAwsBedrockAuthType()
extraOptions.region = getAwsBedrockRegion()
extraOptions.accessKeyId = getAwsBedrockAccessKeyId()
extraOptions.secretAccessKey = getAwsBedrockSecretAccessKey()
if (authType === 'apiKey') {
extraOptions.apiKey = getAwsBedrockApiKey()
} else {
extraOptions.accessKeyId = getAwsBedrockAccessKeyId()
extraOptions.secretAccessKey = getAwsBedrockSecretAccessKey()
}
}
// google-vertex
if (aiSdkProviderId === 'google-vertex' || aiSdkProviderId === 'google-vertex-anthropic') {

View File

@@ -17,6 +17,7 @@ import { getAiSdkProviderId } from '../provider/factory'
import { buildGeminiGenerateImageParams } from './image'
import {
getAnthropicReasoningParams,
getBedrockReasoningParams,
getCustomParameters,
getGeminiReasoningParams,
getOpenAIReasoningParams,
@@ -127,6 +128,9 @@ export function buildProviderOptions(
case 'google-vertex-anthropic':
providerSpecificOptions = buildAnthropicProviderOptions(assistant, model, capabilities)
break
case 'bedrock':
providerSpecificOptions = buildBedrockProviderOptions(assistant, model, capabilities)
break
default:
// 对于其他 provider使用通用的构建逻辑
providerSpecificOptions = {
@@ -266,6 +270,32 @@ function buildXAIProviderOptions(
return providerOptions
}
/**
* Build Bedrock providerOptions
*/
function buildBedrockProviderOptions(
assistant: Assistant,
model: Model,
capabilities: {
enableReasoning: boolean
enableWebSearch: boolean
enableGenerateImage: boolean
}
): Record<string, any> {
const { enableReasoning } = capabilities
let providerOptions: Record<string, any> = {}
if (enableReasoning) {
const reasoningParams = getBedrockReasoningParams(assistant, model)
providerOptions = {
...providerOptions,
...reasoningParams
}
}
return providerOptions
}
/**
* 构建通用的 providerOptions用于其他 provider
*/

View File

@@ -98,7 +98,7 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
extra_body: {
google: {
thinking_config: {
thinkingBudget: 0
thinking_budget: 0
}
}
}
@@ -259,8 +259,8 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
extra_body: {
google: {
thinking_config: {
thinkingBudget: -1,
includeThoughts: true
thinking_budget: -1,
include_thoughts: true
}
}
}
@@ -270,8 +270,8 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
extra_body: {
google: {
thinking_config: {
thinkingBudget: budgetTokens,
includeThoughts: true
thinking_budget: budgetTokens ?? -1,
include_thoughts: true
}
}
}
@@ -431,8 +431,8 @@ export function getGeminiReasoningParams(assistant: Assistant, model: Model): Re
if (reasoningEffort === undefined) {
return {
thinkingConfig: {
includeThoughts: false,
...(GEMINI_FLASH_MODEL_REGEX.test(model.id) ? { thinkingBudget: 0 } : {})
include_thoughts: false,
...(GEMINI_FLASH_MODEL_REGEX.test(model.id) ? { thinking_budget: 0 } : {})
}
}
}
@@ -442,7 +442,7 @@ export function getGeminiReasoningParams(assistant: Assistant, model: Model): Re
if (effortRatio > 1) {
return {
thinkingConfig: {
includeThoughts: true
include_thoughts: true
}
}
}
@@ -452,8 +452,8 @@ export function getGeminiReasoningParams(assistant: Assistant, model: Model): Re
return {
thinkingConfig: {
...(budget > 0 ? { thinkingBudget: budget } : {}),
includeThoughts: true
...(budget > 0 ? { thinking_budget: budget } : {}),
include_thoughts: true
}
}
}
@@ -485,6 +485,34 @@ export function getXAIReasoningParams(assistant: Assistant, model: Model): Recor
}
}
/**
* Get Bedrock reasoning parameters
*/
export function getBedrockReasoningParams(assistant: Assistant, model: Model): Record<string, any> {
if (!isReasoningModel(model)) {
return {}
}
const reasoningEffort = assistant?.settings?.reasoning_effort
if (reasoningEffort === undefined) {
return {}
}
// Only apply thinking budget for Claude reasoning models
if (!isSupportedThinkingTokenClaudeModel(model)) {
return {}
}
const budgetTokens = getAnthropicThinkingBudget(assistant, model)
return {
reasoningConfig: {
type: 'enabled',
budgetTokens: budgetTokens
}
}
}
/**
* 获取自定义参数
* 从 assistant 设置中提取自定义参数

View File

@@ -0,0 +1,13 @@
<svg xmlns="http://www.w3.org/2000/svg" width="33" height="38" viewBox="0 0 33 38" fill="none">
<g clip-path="url(#clip0_4321_9943)">
<path d="M1.51221 6.59813C1.51221 4.09263 3.54331 2.06152 6.04881 2.06152H27.9757C30.4812 2.06152 32.5123 4.09263 32.5123 6.59813C32.5123 9.10362 30.4812 11.1347 27.9757 11.1347H6.04881C3.54331 11.1347 1.51221 9.10362 1.51221 6.59813Z" fill="#6200EE"/>
<path d="M3.38905 3.56467C5.26076 1.89906 8.12831 2.06615 9.79391 3.93785L22.1493 17.8221C23.8149 19.6938 23.6478 22.5614 21.7761 24.227C19.9044 25.8926 17.0369 25.7255 15.3713 23.8538L3.01586 9.96953C1.35026 8.09782 1.51734 5.23027 3.38905 3.56467Z" fill="#6200EE"/>
<path d="M1.51221 20.9643C1.51221 18.4588 3.54331 16.4277 6.04881 16.4277H18.9025C21.408 16.4277 23.4391 18.4588 23.4391 20.9643C23.4391 23.4698 21.408 25.5009 18.9025 25.5009H6.04881C3.54331 25.5009 1.51221 23.4698 1.51221 20.9643Z" fill="#6200EE"/>
<path d="M10.5854 32.3052C10.5854 34.8107 8.55431 36.8418 6.04881 36.8418C3.54331 36.8418 1.51221 34.8107 1.51221 32.3052C1.51221 29.7997 3.54331 27.7686 6.04881 27.7686C8.55431 27.7686 10.5854 29.7997 10.5854 32.3052Z" fill="#BF7AFF"/>
</g>
<defs>
<clipPath id="clip0_4321_9943">
<rect width="32.5124" height="36.9029" fill="white" transform="translate(0 0.548828)"/>
</clipPath>
</defs>
</svg>

After

Width:  |  Height:  |  Size: 1.3 KiB

View File

@@ -5,7 +5,7 @@ import { Spinner } from '@heroui/spinner'
import { loggerService } from '@logger'
import { AppLogo } from '@renderer/config/env'
import { SettingHelpText, SettingRow } from '@renderer/pages/settings'
import { WebSocketCandidatesResponse } from '@shared/config/types'
import type { WebSocketCandidatesResponse } from '@shared/config/types'
import { QRCodeSVG } from 'qrcode.react'
import { useCallback, useEffect, useMemo, useState } from 'react'
import { useTranslation } from 'react-i18next'

View File

@@ -64,6 +64,7 @@ export type QuickPanelListItem = {
isSelected?: boolean
isMenu?: boolean
disabled?: boolean
hidden?: boolean
/**
* 固定显示项:不参与过滤,始终出现在列表顶部。
* 例如“清除”按钮可设置为 alwaysVisible从而在有匹配项时始终可见

View File

@@ -143,7 +143,8 @@ export const QuickPanelView: React.FC<Props> = ({ setInputText }) => {
prevSymbolRef.current = ctx.symbol
// 固定项置顶 + 过滤后的普通项
return [...pinnedItems, ...filteredNormalItems]
const pinnedFiltered = [...pinnedItems, ...filteredNormalItems]
return pinnedFiltered.filter((item) => !item.hidden)
}, [ctx.isVisible, ctx.symbol, ctx.list, searchText])
const canForwardAndBackward = useMemo(() => {

View File

@@ -27,6 +27,7 @@ export const SYSTEM_MODELS: Record<SystemProviderId | 'defaultModel', Model[]> =
],
cherryin: [],
vertexai: [],
sophnet: [],
'302ai': [
{
id: 'deepseek-chat',

View File

@@ -46,6 +46,7 @@ import Ph8ProviderLogo from '@renderer/assets/images/providers/ph8.png'
import PPIOProviderLogo from '@renderer/assets/images/providers/ppio.png'
import QiniuProviderLogo from '@renderer/assets/images/providers/qiniu.webp'
import SiliconFlowProviderLogo from '@renderer/assets/images/providers/silicon.png'
import SophnetProviderLogo from '@renderer/assets/images/providers/sophnet.svg'
import StepProviderLogo from '@renderer/assets/images/providers/step.png'
import TencentCloudProviderLogo from '@renderer/assets/images/providers/tencent-cloud-ti.png'
import TogetherProviderLogo from '@renderer/assets/images/providers/together.png'
@@ -246,6 +247,16 @@ export const SYSTEM_PROVIDERS_CONFIG: Record<SystemProviderId, SystemProvider> =
isSystem: true,
enabled: false
},
sophnet: {
id: 'sophnet',
name: 'SophNet',
type: 'openai',
apiKey: '',
apiHost: 'https://www.sophnet.com/api/open-apis/v1',
models: [],
isSystem: true,
enabled: false
},
ppio: {
id: 'ppio',
name: 'PPIO',
@@ -729,7 +740,8 @@ export const PROVIDER_LOGO_MAP: AtLeast<SystemProviderId, string> = {
poe: 'poe', // use svg icon component
aionly: AiOnlyProviderLogo,
longcat: LongCatProviderLogo,
huggingface: HuggingfaceProviderLogo
huggingface: HuggingfaceProviderLogo,
sophnet: SophnetProviderLogo
} as const
export function getProviderLogo(providerId: string) {
@@ -808,6 +820,17 @@ export const PROVIDER_URLS: Record<SystemProviderId, ProviderUrls> = {
models: 'https://ai.burncloud.com/pricing'
}
},
sophnet: {
api: {
url: 'https://www.sophnet.com/api/open-apis/v1'
},
websites: {
official: 'https://sophnet.com',
apiKey: 'https://sophnet.com/#/project/key',
docs: 'https://sophnet.com/docs/component/introduce.html',
models: 'https://sophnet.com/#/model/list'
}
},
ppio: {
api: {
url: 'https://api.ppinfra.com/v3/openai'
@@ -1463,6 +1486,10 @@ export const isNewApiProvider = (provider: Provider) => {
return ['new-api', 'cherryin'].includes(provider.id) || provider.type === 'new-api'
}
export function isCherryAIProvider(provider: Provider): boolean {
return provider.id === 'cherryai'
}
/**
* 判断是否为 OpenAI 兼容的提供商
* @param {Provider} provider 提供商对象

View File

@@ -1,5 +1,12 @@
import store, { useAppSelector } from '@renderer/store'
import { setAwsBedrockAccessKeyId, setAwsBedrockRegion, setAwsBedrockSecretAccessKey } from '@renderer/store/llm'
import {
setAwsBedrockAccessKeyId,
setAwsBedrockApiKey,
setAwsBedrockAuthType,
setAwsBedrockRegion,
setAwsBedrockSecretAccessKey
} from '@renderer/store/llm'
import type { AwsBedrockAuthType } from '@renderer/types'
import { useDispatch } from 'react-redux'
export function useAwsBedrockSettings() {
@@ -8,8 +15,10 @@ export function useAwsBedrockSettings() {
return {
...settings,
setAuthType: (authType: AwsBedrockAuthType) => dispatch(setAwsBedrockAuthType(authType)),
setAccessKeyId: (accessKeyId: string) => dispatch(setAwsBedrockAccessKeyId(accessKeyId)),
setSecretAccessKey: (secretAccessKey: string) => dispatch(setAwsBedrockSecretAccessKey(secretAccessKey)),
setApiKey: (apiKey: string) => dispatch(setAwsBedrockApiKey(apiKey)),
setRegion: (region: string) => dispatch(setAwsBedrockRegion(region))
}
}
@@ -18,6 +27,10 @@ export function getAwsBedrockSettings() {
return store.getState().llm.settings.awsBedrock
}
export function getAwsBedrockAuthType() {
return store.getState().llm.settings.awsBedrock.authType
}
export function getAwsBedrockAccessKeyId() {
return store.getState().llm.settings.awsBedrock.accessKeyId
}
@@ -26,6 +39,10 @@ export function getAwsBedrockSecretAccessKey() {
return store.getState().llm.settings.awsBedrock.secretAccessKey
}
export function getAwsBedrockApiKey() {
return store.getState().llm.settings.awsBedrock.apiKey
}
export function getAwsBedrockRegion() {
return store.getState().llm.settings.awsBedrock.region
}

View File

@@ -85,7 +85,8 @@ const providerKeyMap = {
poe: 'provider.poe',
aionly: 'provider.aionly',
longcat: 'provider.longcat',
huggingface: 'provider.huggingface'
huggingface: 'provider.huggingface',
sophnet: 'provider.sophnet'
} as const
/**
@@ -238,7 +239,7 @@ const paintingsImageSizeOptionsKeyMap = {
} as const
export const getPaintingsImageSizeOptionsLabel = (key: string): string => {
return getLabel(paintingsImageSizeOptionsKeyMap, key)
return paintingsImageSizeOptionsKeyMap[key] ? getLabel(paintingsImageSizeOptionsKeyMap, key) : key
}
const paintingsQualityOptionsKeyMap = {

View File

@@ -2482,6 +2482,7 @@
"qiniu": "Qiniu AI",
"qwenlm": "QwenLM",
"silicon": "SiliconFlow",
"sophnet": "SophNet",
"stepfun": "StepFun",
"tencent-cloud-ti": "Tencent Cloud TI",
"together": "Together",
@@ -4259,6 +4260,12 @@
"aws-bedrock": {
"access_key_id": "AWS Access Key ID",
"access_key_id_help": "Your AWS Access Key ID for accessing AWS Bedrock services",
"api_key": "Bedrock API Key",
"api_key_help": "Your AWS Bedrock API Key for authentication",
"auth_type": "Authentication Type",
"auth_type_api_key": "Bedrock API Key",
"auth_type_help": "Choose between IAM credentials or Bedrock API Key authentication",
"auth_type_iam": "IAM Credentials",
"description": "AWS Bedrock is Amazon's fully managed foundation model service that supports various advanced large language models",
"region": "AWS Region",
"region_help": "Your AWS service region, e.g., us-east-1",

View File

@@ -2482,6 +2482,7 @@
"qiniu": "七牛云 AI 推理",
"qwenlm": "QwenLM",
"silicon": "硅基流动",
"sophnet": "SophNet",
"stepfun": "阶跃星辰",
"tencent-cloud-ti": "腾讯云 TI",
"together": "Together",
@@ -4259,6 +4260,12 @@
"aws-bedrock": {
"access_key_id": "AWS 访问密钥 ID",
"access_key_id_help": "您的 AWS 访问密钥 ID用于访问 AWS Bedrock 服务",
"api_key": "Bedrock API 密钥",
"api_key_help": "您的 AWS Bedrock API 密钥,用于身份验证",
"auth_type": "认证方式",
"auth_type_api_key": "Bedrock API 密钥",
"auth_type_help": "选择使用 IAM 凭证或 Bedrock API 密钥进行身份验证",
"auth_type_iam": "IAM 凭证",
"description": "AWS Bedrock 是亚马逊提供的全托管基础模型服务,支持多种先进的大语言模型",
"region": "AWS 区域",
"region_help": "您的 AWS 服务区域,例如 us-east-1",

View File

@@ -2482,6 +2482,7 @@
"qiniu": "七牛雲 AI 推理",
"qwenlm": "QwenLM",
"silicon": "SiliconFlow",
"sophnet": "SophNet",
"stepfun": "StepFun",
"tencent-cloud-ti": "騰訊雲 TI",
"together": "Together",
@@ -4259,6 +4260,12 @@
"aws-bedrock": {
"access_key_id": "AWS 存取密鑰 ID",
"access_key_id_help": "您的 AWS 存取密鑰 ID用於存取 AWS Bedrock 服務",
"api_key": "Bedrock API 金鑰",
"api_key_help": "您的 AWS Bedrock API 金鑰,用於身份驗證",
"auth_type": "認證方式",
"auth_type_api_key": "Bedrock API 金鑰",
"auth_type_help": "選擇使用 IAM 憑證或 Bedrock API 金鑰進行身份驗證",
"auth_type_iam": "IAM 憑證",
"description": "AWS Bedrock 是亞馬遜提供的全托管基础模型服務,支持多種先進的大語言模型",
"region": "AWS 區域",
"region_help": "您的 AWS 服務區域,例如 us-east-1",

View File

@@ -1,6 +1,7 @@
{
"agent": {
"add": {
"description": "Bewältigen Sie komplexe Aufgaben mit verschiedenen Werkzeugen",
"error": {
"failed": "Agent hinzufügen fehlgeschlagen",
"invalid_agent": "Ungültiger Agent"
@@ -547,8 +548,12 @@
"chat": {
"add": {
"assistant": {
"description": "Tägliche Gespräche und schnelle Fragen & Antworten",
"title": "Assistent hinzufügen"
},
"option": {
"title": "Typ auswählen"
},
"topic": {
"title": "Neues Thema erstellen"
}
@@ -2471,12 +2476,13 @@
"openrouter": "OpenRouter",
"ovms": "Intel OVMS",
"perplexity": "Perplexity",
"ph8": "PH8 Großmodell-Plattform",
"ph8": "PH8",
"poe": "Poe",
"ppio": "PPIO Cloud",
"qiniu": "Qiniu Cloud KI-Inferenz",
"qwenlm": "QwenLM",
"silicon": "SiliconFlow",
"sophnet": "SophNet",
"stepfun": "StepFun",
"tencent-cloud-ti": "Tencent Cloud TI",
"together": "Together",

View File

@@ -1,6 +1,7 @@
{
"agent": {
"add": {
"description": "Χειριστείτε πολύπλοκες εργασίες με διάφορα εργαλεία",
"error": {
"failed": "Αποτυχία προσθήκης πράκτορα",
"invalid_agent": "Μη έγκυρος Agent"
@@ -547,8 +548,12 @@
"chat": {
"add": {
"assistant": {
"description": "Καθημερινές συνομιλίες και γρήγορες ερωταπαντήσεις",
"title": "Προσθήκη βοηθού"
},
"option": {
"title": "Επιλέξτε Τύπο"
},
"topic": {
"title": "Δημιουργία νέου θέματος"
}
@@ -2471,12 +2476,13 @@
"openrouter": "OpenRouter",
"ovms": "Intel OVMS",
"perplexity": "Perplexity",
"ph8": "Πλατφόρμα Ανοιχτής Μεγάλης Μοντέλου PH8",
"ph8": "PH8",
"poe": "Poe",
"ppio": "PPIO Piao Yun",
"qiniu": "Qiniu AI",
"qwenlm": "QwenLM",
"silicon": "Σιδηρική Παρουσία",
"sophnet": "SophNet",
"stepfun": "Βήμα Ουράς",
"tencent-cloud-ti": "Tencent Cloud TI",
"together": "Together",

View File

@@ -1,6 +1,7 @@
{
"agent": {
"add": {
"description": "Maneja tareas complejas con varias herramientas",
"error": {
"failed": "Error al añadir agente",
"invalid_agent": "Agent inválido"
@@ -547,8 +548,12 @@
"chat": {
"add": {
"assistant": {
"description": "Conversaciones diarias y preguntas y respuestas rápidas",
"title": "Agregar asistente"
},
"option": {
"title": "Seleccionar Tipo"
},
"topic": {
"title": "Crear nuevo tema"
}
@@ -2471,12 +2476,13 @@
"openrouter": "OpenRouter",
"ovms": "Intel OVMS",
"perplexity": "Perplejidad",
"ph8": "Plataforma Abierta de Grandes Modelos PH8",
"ph8": "PH8",
"poe": "Poe",
"ppio": "PPIO Cloud Piao",
"qiniu": "Qiniu AI",
"qwenlm": "QwenLM",
"silicon": "Silicio Fluido",
"sophnet": "SophNet",
"stepfun": "Función Salto",
"tencent-cloud-ti": "Tencent Nube TI",
"together": "Juntos",

View File

@@ -1,6 +1,7 @@
{
"agent": {
"add": {
"description": "Gérez des tâches complexes avec divers outils",
"error": {
"failed": "Échec de l'ajout de l'agent",
"invalid_agent": "Agent invalide"
@@ -547,8 +548,12 @@
"chat": {
"add": {
"assistant": {
"description": "Conversations quotidiennes et Q&R rapides",
"title": "Ajouter un assistant"
},
"option": {
"title": "Sélectionner le type"
},
"topic": {
"title": "Nouveau sujet"
}
@@ -2471,12 +2476,13 @@
"openrouter": "OpenRouter",
"ovms": "Intel OVMS",
"perplexity": "Perplexité",
"ph8": "Plateforme ouverte de grands modèles PH8",
"ph8": "PH8",
"poe": "Poe",
"ppio": "PPIO Cloud Piou",
"qiniu": "Qiniu AI",
"qwenlm": "QwenLM",
"silicon": "Silicium Fluide",
"sophnet": "SophNet",
"stepfun": "Échelon Étoile",
"tencent-cloud-ti": "Tencent Cloud TI",
"together": "Ensemble",

View File

@@ -1,6 +1,7 @@
{
"agent": {
"add": {
"description": "さまざまなツールを使って複雑なタスクを処理する",
"error": {
"failed": "エージェントの追加に失敗しました",
"invalid_agent": "無効なエージェント"
@@ -547,8 +548,12 @@
"chat": {
"add": {
"assistant": {
"description": "日常会話と簡単なQ&A",
"title": "アシスタントを追加"
},
"option": {
"title": "種類を選択"
},
"topic": {
"title": "新しいトピック"
}
@@ -2477,6 +2482,7 @@
"qiniu": "七牛云 AI 推理",
"qwenlm": "QwenLM",
"silicon": "SiliconFlow",
"sophnet": "SophNet",
"stepfun": "StepFun",
"tencent-cloud-ti": "Tencent Cloud TI",
"together": "Together",

View File

@@ -1,6 +1,7 @@
{
"agent": {
"add": {
"description": "Lide com tarefas complexas usando várias ferramentas",
"error": {
"failed": "Falha ao adicionar agente",
"invalid_agent": "Agent inválido"
@@ -547,8 +548,12 @@
"chat": {
"add": {
"assistant": {
"description": "Conversas diárias e perguntas e respostas rápidas",
"title": "Adicionar assistente"
},
"option": {
"title": "Selecionar Tipo"
},
"topic": {
"title": "Novo Tópico"
}
@@ -2471,12 +2476,13 @@
"openrouter": "OpenRouter",
"ovms": "Intel OVMS",
"perplexity": "Perplexidade",
"ph8": "Plataforma Aberta de Grandes Modelos PH8",
"ph8": "PH8",
"poe": "Poe",
"ppio": "PPIO Nuvem Piao",
"qiniu": "Qiniu AI",
"qwenlm": "QwenLM",
"silicon": "Silício em Fluxo",
"sophnet": "SophNet",
"stepfun": "Função de Passo Estelar",
"tencent-cloud-ti": "Nuvem TI da Tencent",
"together": "Juntos",

View File

@@ -1,6 +1,7 @@
{
"agent": {
"add": {
"description": "Справляйтесь со сложными задачами с помощью различных инструментов",
"error": {
"failed": "Не удалось добавить агента",
"invalid_agent": "Недействительный агент"
@@ -547,8 +548,12 @@
"chat": {
"add": {
"assistant": {
"description": "Ежедневные разговоры и быстрые вопросы и ответы",
"title": "Добавить ассистента"
},
"option": {
"title": "Выберите тип"
},
"topic": {
"title": "Новый топик"
}
@@ -2477,6 +2482,7 @@
"qiniu": "Qiniu AI",
"qwenlm": "QwenLM",
"silicon": "SiliconFlow",
"sophnet": "SophNet",
"stepfun": "StepFun",
"tencent-cloud-ti": "Tencent Cloud TI",
"together": "Together",

View File

@@ -2,6 +2,7 @@ import type { DropResult } from '@hello-pangea/dnd'
import { DragDropContext, Draggable, Droppable } from '@hello-pangea/dnd'
import { loggerService } from '@logger'
import { ActionIconButton } from '@renderer/components/Buttons'
import { MdiLightbulbOn } from '@renderer/components/Icons'
import type { QuickPanelListItem } from '@renderer/components/QuickPanel'
import {
isAnthropicModel,
@@ -230,6 +231,15 @@ const InputbarTools = ({
quickPhrasesButtonRef.current?.openQuickPanel()
}
},
{
label: t('assistants.settings.reasoning_effort.label'),
description: '',
icon: <MdiLightbulbOn />,
isMenu: true,
action: () => {
thinkingButtonRef.current?.openQuickPanel()
}
},
{
label: t('assistants.presets.edit.model.select.title'),
description: '',
@@ -245,6 +255,7 @@ const InputbarTools = ({
icon: <FileSearch />,
isMenu: true,
disabled: files.length > 0,
hidden: !showKnowledgeBaseButton,
action: () => {
knowledgeBaseButtonRef.current?.openQuickPanel()
}
@@ -312,7 +323,7 @@ const InputbarTools = ({
translate()
}
}
]
] satisfies QuickPanelListItem[]
}
const handleDragEnd = (result: DropResult) => {

View File

@@ -482,6 +482,13 @@ const NewApiPage: FC<{ Options: string[] }> = ({ Options }) => {
}
}, [])
// if painting.model is not set, set it to the first model in modelOptions
useEffect(() => {
if (!painting.model && modelOptions.length > 0) {
updatePaintingState({ model: modelOptions[0].value })
}
}, [modelOptions, painting.model, updatePaintingState])
return (
<Container>
<Navbar>

View File

@@ -3,6 +3,7 @@ import { isNewApiProvider } from '@renderer/config/providers'
import { useAllProviders } from '@renderer/hooks/useProvider'
import { useAppDispatch } from '@renderer/store'
import { setDefaultPaintingProvider } from '@renderer/store/settings'
import { updateTab } from '@renderer/store/tabs'
import type { PaintingProvider, SystemProviderId } from '@renderer/types'
import type { FC } from 'react'
import { useEffect, useMemo, useState } from 'react'
@@ -25,11 +26,11 @@ const PaintingsRoutePage: FC = () => {
const provider = params['*']
const dispatch = useAppDispatch()
const providers = useAllProviders()
const Options = useMemo(() => {
return [...BASE_OPTIONS, ...providers.filter((p) => isNewApiProvider(p)).map((p) => p.id)]
}, [providers])
const [ovmsStatus, setOvmsStatus] = useState<'not-installed' | 'not-running' | 'running'>('not-running')
const Options = useMemo(() => [...BASE_OPTIONS, ...providers.filter(isNewApiProvider).map((p) => p.id)], [providers])
const newApiProviders = useMemo(() => providers.filter(isNewApiProvider), [providers])
useEffect(() => {
const checkStatus = async () => {
const status = await window.api.ovms.getStatus()
@@ -44,25 +45,24 @@ const PaintingsRoutePage: FC = () => {
logger.debug(`defaultPaintingProvider: ${provider}`)
if (provider && validOptions.includes(provider)) {
dispatch(setDefaultPaintingProvider(provider as PaintingProvider))
dispatch(updateTab({ id: 'paintings', updates: { path: `/paintings/${provider}` } }))
}
}, [provider, dispatch, validOptions])
return (
<Routes>
<Route path="*" element={<ZhipuPage Options={validOptions} />} />
<Route path="*" element={<NewApiPage Options={validOptions} />} />
<Route path="/zhipu" element={<ZhipuPage Options={validOptions} />} />
<Route path="/aihubmix" element={<AihubmixPage Options={validOptions} />} />
<Route path="/silicon" element={<SiliconPage Options={validOptions} />} />
<Route path="/dmxapi" element={<DmxapiPage Options={validOptions} />} />
<Route path="/tokenflux" element={<TokenFluxPage Options={validOptions} />} />
<Route path="/ovms" element={<OvmsPage Options={validOptions} />} />
{/* new-api family providers are mounted dynamically below */}
{providers
.filter((p) => isNewApiProvider(p))
.map((p) => (
<Route key={p.id} path={`/${p.id}`} element={<NewApiPage Options={validOptions} />} />
))}
<Route path="/new-api" element={<NewApiPage Options={validOptions} />} />
{/* new-api family providers are mounted dynamically below */}
{newApiProviders.map((p) => (
<Route key={p.id} path={`/${p.id}`} element={<NewApiPage Options={validOptions} />} />
))}
</Routes>
)
}

View File

@@ -1,7 +1,7 @@
import { HStack } from '@renderer/components/Layout'
import { PROVIDER_URLS } from '@renderer/config/providers'
import { useAwsBedrockSettings } from '@renderer/hooks/useAwsBedrock'
import { Alert, Input } from 'antd'
import { Alert, Input, Radio } from 'antd'
import type { FC } from 'react'
import { useState } from 'react'
import { useTranslation } from 'react-i18next'
@@ -10,14 +10,25 @@ import { SettingHelpLink, SettingHelpText, SettingHelpTextRow, SettingSubtitle }
const AwsBedrockSettings: FC = () => {
const { t } = useTranslation()
const { accessKeyId, secretAccessKey, region, setAccessKeyId, setSecretAccessKey, setRegion } =
useAwsBedrockSettings()
const {
authType,
accessKeyId,
secretAccessKey,
apiKey,
region,
setAuthType,
setAccessKeyId,
setSecretAccessKey,
setApiKey,
setRegion
} = useAwsBedrockSettings()
const providerConfig = PROVIDER_URLS['aws-bedrock']
const apiKeyWebsite = providerConfig?.websites?.apiKey
const [localAccessKeyId, setLocalAccessKeyId] = useState(accessKeyId)
const [localSecretAccessKey, setLocalSecretAccessKey] = useState(secretAccessKey)
const [localApiKey, setLocalApiKey] = useState(apiKey)
const [localRegion, setLocalRegion] = useState(region)
return (
@@ -25,39 +36,75 @@ const AwsBedrockSettings: FC = () => {
<SettingSubtitle style={{ marginTop: 5 }}>{t('settings.provider.aws-bedrock.title')}</SettingSubtitle>
<Alert type="info" style={{ marginTop: 5 }} message={t('settings.provider.aws-bedrock.description')} showIcon />
<SettingSubtitle style={{ marginTop: 5 }}>{t('settings.provider.aws-bedrock.access_key_id')}</SettingSubtitle>
<Input
value={localAccessKeyId}
placeholder="Access Key ID"
onChange={(e) => setLocalAccessKeyId(e.target.value)}
onBlur={() => setAccessKeyId(localAccessKeyId)}
style={{ marginTop: 5 }}
/>
{/* Authentication Type Selector */}
<SettingSubtitle style={{ marginTop: 15 }}>{t('settings.provider.aws-bedrock.auth_type')}</SettingSubtitle>
<Radio.Group value={authType} onChange={(e) => setAuthType(e.target.value)} style={{ marginTop: 5 }}>
<Radio value="iam">{t('settings.provider.aws-bedrock.auth_type_iam')}</Radio>
<Radio value="apiKey">{t('settings.provider.aws-bedrock.auth_type_api_key')}</Radio>
</Radio.Group>
<SettingHelpTextRow>
<SettingHelpText>{t('settings.provider.aws-bedrock.access_key_id_help')}</SettingHelpText>
<SettingHelpText>{t('settings.provider.aws-bedrock.auth_type_help')}</SettingHelpText>
</SettingHelpTextRow>
<SettingSubtitle style={{ marginTop: 5 }}>{t('settings.provider.aws-bedrock.secret_access_key')}</SettingSubtitle>
<Input.Password
value={localSecretAccessKey}
placeholder="Secret Access Key"
onChange={(e) => setLocalSecretAccessKey(e.target.value)}
onBlur={() => setSecretAccessKey(localSecretAccessKey)}
style={{ marginTop: 5 }}
spellCheck={false}
/>
{apiKeyWebsite && (
<SettingHelpTextRow style={{ justifyContent: 'space-between' }}>
<HStack>
<SettingHelpLink target="_blank" href={apiKeyWebsite}>
{t('settings.provider.get_api_key')}
</SettingHelpLink>
</HStack>
<SettingHelpText>{t('settings.provider.aws-bedrock.secret_access_key_help')}</SettingHelpText>
</SettingHelpTextRow>
{/* IAM Credentials Fields */}
{authType === 'iam' && (
<>
<SettingSubtitle style={{ marginTop: 15 }}>
{t('settings.provider.aws-bedrock.access_key_id')}
</SettingSubtitle>
<Input
value={localAccessKeyId}
placeholder="Access Key ID"
onChange={(e) => setLocalAccessKeyId(e.target.value)}
onBlur={() => setAccessKeyId(localAccessKeyId)}
style={{ marginTop: 5 }}
/>
<SettingHelpTextRow>
<SettingHelpText>{t('settings.provider.aws-bedrock.access_key_id_help')}</SettingHelpText>
</SettingHelpTextRow>
<SettingSubtitle style={{ marginTop: 15 }}>
{t('settings.provider.aws-bedrock.secret_access_key')}
</SettingSubtitle>
<Input.Password
value={localSecretAccessKey}
placeholder="Secret Access Key"
onChange={(e) => setLocalSecretAccessKey(e.target.value)}
onBlur={() => setSecretAccessKey(localSecretAccessKey)}
style={{ marginTop: 5 }}
spellCheck={false}
/>
{apiKeyWebsite && (
<SettingHelpTextRow style={{ justifyContent: 'space-between' }}>
<HStack>
<SettingHelpLink target="_blank" href={apiKeyWebsite}>
{t('settings.provider.get_api_key')}
</SettingHelpLink>
</HStack>
<SettingHelpText>{t('settings.provider.aws-bedrock.secret_access_key_help')}</SettingHelpText>
</SettingHelpTextRow>
)}
</>
)}
<SettingSubtitle style={{ marginTop: 5 }}>{t('settings.provider.aws-bedrock.region')}</SettingSubtitle>
{authType === 'apiKey' && (
<>
<SettingSubtitle style={{ marginTop: 15 }}>{t('settings.provider.aws-bedrock.api_key')}</SettingSubtitle>
<Input.Password
value={localApiKey}
placeholder="Bedrock API Key"
onChange={(e) => setLocalApiKey(e.target.value)}
onBlur={() => setApiKey(localApiKey)}
style={{ marginTop: 5 }}
spellCheck={false}
/>
<SettingHelpTextRow>
<SettingHelpText>{t('settings.provider.aws-bedrock.api_key_help')}</SettingHelpText>
</SettingHelpTextRow>
</>
)}
<SettingSubtitle style={{ marginTop: 15 }}>{t('settings.provider.aws-bedrock.region')}</SettingSubtitle>
<Input
value={localRegion}
placeholder="us-east-1"

View File

@@ -131,10 +131,11 @@ const PopupContainer: React.FC<Props> = ({ providerId, resolve }) => {
(model: Model) => {
if (!isEmpty(model.name)) {
if (isNewApiProvider(provider)) {
if (model.supported_endpoint_types && model.supported_endpoint_types.length > 0) {
const endpointTypes = model.supported_endpoint_types
if (endpointTypes && endpointTypes.length > 0) {
addModel({
...model,
endpoint_type: model.supported_endpoint_types[0],
endpoint_type: endpointTypes.includes('image-generation') ? 'image-generation' : endpointTypes[0],
supported_text_delta: !isNotSupportedTextDelta(model)
})
} else {

View File

@@ -230,8 +230,10 @@ vi.mock('@renderer/store/llm.ts', () => {
location: ''
},
awsBedrock: {
authType: 'iam',
accessKeyId: '',
secretAccessKey: '',
apiKey: '',
region: ''
}
}

View File

@@ -67,7 +67,7 @@ const persistedReducer = persistReducer(
{
key: 'cherry-studio',
storage,
version: 169,
version: 171,
blacklist: ['runtime', 'messages', 'messageBlocks', 'tabs', 'toolPermissions'],
migrate
},

View File

@@ -3,7 +3,7 @@ import { createSlice } from '@reduxjs/toolkit'
import { isLocalAi } from '@renderer/config/env'
import { SYSTEM_MODELS } from '@renderer/config/models'
import { SYSTEM_PROVIDERS } from '@renderer/config/providers'
import type { Model, Provider } from '@renderer/types'
import type { AwsBedrockAuthType, Model, Provider } from '@renderer/types'
import { uniqBy } from 'lodash'
type LlmSettings = {
@@ -25,8 +25,10 @@ type LlmSettings = {
location: string
}
awsBedrock: {
authType: AwsBedrockAuthType
accessKeyId: string
secretAccessKey: string
apiKey: string
region: string
}
}
@@ -68,8 +70,10 @@ export const initialState: LlmState = {
location: ''
},
awsBedrock: {
authType: 'iam',
accessKeyId: '',
secretAccessKey: '',
apiKey: '',
region: ''
}
}
@@ -197,12 +201,18 @@ const llmSlice = createSlice({
setVertexAIServiceAccountClientEmail: (state, action: PayloadAction<string>) => {
state.settings.vertexai.serviceAccount.clientEmail = action.payload
},
setAwsBedrockAuthType: (state, action: PayloadAction<AwsBedrockAuthType>) => {
state.settings.awsBedrock.authType = action.payload
},
setAwsBedrockAccessKeyId: (state, action: PayloadAction<string>) => {
state.settings.awsBedrock.accessKeyId = action.payload
},
setAwsBedrockSecretAccessKey: (state, action: PayloadAction<string>) => {
state.settings.awsBedrock.secretAccessKey = action.payload
},
setAwsBedrockApiKey: (state, action: PayloadAction<string>) => {
state.settings.awsBedrock.apiKey = action.payload
},
setAwsBedrockRegion: (state, action: PayloadAction<string>) => {
state.settings.awsBedrock.region = action.payload
},
@@ -242,8 +252,10 @@ export const {
setVertexAILocation,
setVertexAIServiceAccountPrivateKey,
setVertexAIServiceAccountClientEmail,
setAwsBedrockAuthType,
setAwsBedrockAccessKeyId,
setAwsBedrockSecretAccessKey,
setAwsBedrockApiKey,
setAwsBedrockRegion,
updateModel
} = llmSlice.actions

View File

@@ -2783,6 +2783,28 @@ const migrateConfig = {
logger.error('migrate 169 error', error as Error)
return state
}
},
'170': (state: RootState) => {
try {
addProvider(state, 'sophnet')
state.llm.providers = moveProvider(state.llm.providers, 'sophnet', 17)
state.settings.defaultPaintingProvider = 'cherryin'
return state
} catch (error) {
logger.error('migrate 170 error', error as Error)
return state
}
},
'171': (state: RootState) => {
try {
addProvider(state, 'sophnet')
state.llm.providers = moveProvider(state.llm.providers, 'sophnet', 17)
state.settings.defaultPaintingProvider = 'cherryin'
return state
} catch (error) {
logger.error('migrate 171 error', error as Error)
return state
}
}
}

View File

@@ -390,7 +390,7 @@ export const initialState: SettingsState = {
localBackupSyncInterval: 0,
localBackupMaxBackups: 0,
localBackupSkipBackupFile: false,
defaultPaintingProvider: 'zhipu',
defaultPaintingProvider: 'cherryin',
s3: {
endpoint: '',
region: '',

View File

@@ -36,6 +36,7 @@ export type Assistant = {
description?: string
model?: Model
defaultModel?: Model
// This field should be considered as not Partial and not optional in v2
settings?: Partial<AssistantSettings>
messages?: AssistantMessage[]
/** enableWebSearch 代表使用模型内置网络搜索功能 */
@@ -280,7 +281,7 @@ export type PaintingParams = {
providerId?: string
}
export type PaintingProvider = 'zhipu' | 'aihubmix' | 'silicon' | 'dmxapi' | 'new-api' | 'ovms'
export type PaintingProvider = 'zhipu' | 'aihubmix' | 'silicon' | 'dmxapi' | 'new-api' | 'ovms' | 'cherryin'
export interface Painting extends PaintingParams {
model?: string

View File

@@ -73,6 +73,17 @@ export function isServiceTier(tier: string): tier is ServiceTier {
return isGroqServiceTier(tier) || isOpenAIServiceTier(tier)
}
export const AwsBedrockAuthTypes = {
iam: 'iam',
apiKey: 'apiKey'
} as const
export type AwsBedrockAuthType = keyof typeof AwsBedrockAuthTypes
export function isAwsBedrockAuthType(type: string): type is AwsBedrockAuthType {
return Object.hasOwn(AwsBedrockAuthTypes, type)
}
export type Provider = {
id: string
type: ProviderType
@@ -123,6 +134,7 @@ export const SystemProviderIds = {
cephalon: 'cephalon',
lanyun: 'lanyun',
ph8: 'ph8',
sophnet: 'sophnet',
openrouter: 'openrouter',
ollama: 'ollama',
ovms: 'ovms',

View File

@@ -23,7 +23,6 @@ import type {
GoogleGenAI,
Model as GeminiModel,
SendMessageParameters,
ThinkingConfig,
Tool
} from '@google/genai'
@@ -92,7 +91,10 @@ export type ReasoningEffortOptionalParams = {
}
extra_body?: {
google?: {
thinking_config: ThinkingConfig
thinking_config: {
thinking_budget: number
include_thoughts?: boolean
}
}
}
// Add any other potential reasoning-related keys here if they exist

2889
yarn.lock

File diff suppressed because it is too large Load Diff