diff --git a/electron-builder.yml b/electron-builder.yml index 5e1f97f00..631b483cd 100644 --- a/electron-builder.yml +++ b/electron-builder.yml @@ -36,8 +36,11 @@ files: - '!**/*.{spec,test}.{js,jsx,ts,tsx}' - '!**/*.min.*.map' - '!**/*.d.ts' + - '!**/dist/es6/**' + - '!**/dist/demo/**' + - '!**/amd/**' - '!**/{.DS_Store,Thumbs.db,thumbs.db,__pycache__}' - - '!**/{LICENSE,LICENSE.txt,LICENSE-MIT.txt,*.LICENSE.txt,NOTICE.txt,README.md,readme.md,CHANGELOG.md}' + - '!**/{LICENSE,license,LICENSE.*,*.LICENSE.txt,NOTICE.txt,README.md,readme.md,CHANGELOG.md}' - '!node_modules/rollup-plugin-visualizer' - '!node_modules/js-tiktoken' - '!node_modules/@tavily/core/node_modules/js-tiktoken' diff --git a/electron.vite.config.ts b/electron.vite.config.ts index b6e0148e8..7f4a4e3a6 100644 --- a/electron.vite.config.ts +++ b/electron.vite.config.ts @@ -9,25 +9,7 @@ const visualizerPlugin = (type: 'renderer' | 'main') => { export default defineConfig({ main: { - plugins: [ - externalizeDepsPlugin({ - exclude: [ - '@cherrystudio/embedjs', - '@cherrystudio/embedjs-openai', - '@cherrystudio/embedjs-loader-web', - '@cherrystudio/embedjs-loader-markdown', - '@cherrystudio/embedjs-loader-msoffice', - '@cherrystudio/embedjs-loader-xml', - '@cherrystudio/embedjs-loader-pdf', - '@cherrystudio/embedjs-loader-sitemap', - '@cherrystudio/embedjs-libsql', - '@cherrystudio/embedjs-loader-image', - 'p-queue', - 'webdav' - ] - }), - ...visualizerPlugin('main') - ], + plugins: [externalizeDepsPlugin(), ...visualizerPlugin('main')], resolve: { alias: { '@main': resolve('src/main'), diff --git a/src/renderer/src/providers/WebSearchProvider/BochaProvider.ts b/src/renderer/src/providers/WebSearchProvider/BochaProvider.ts index 63ef9b0a9..6d2bc2401 100644 --- a/src/renderer/src/providers/WebSearchProvider/BochaProvider.ts +++ b/src/renderer/src/providers/WebSearchProvider/BochaProvider.ts @@ -1,6 +1,6 @@ import { WebSearchState } from '@renderer/store/websearch' import { WebSearchProvider, WebSearchProviderResponse } from '@renderer/types' -import { BochaSearchParams, BochaSearchResponse } from '@renderer/types/bocha' +import { BochaSearchParams, BochaSearchResponse } from '@renderer/utils/bocha' import BaseWebSearchProvider from './BaseWebSearchProvider' diff --git a/src/renderer/src/types/index.ts b/src/renderer/src/types/index.ts index 4b29fb5ca..e85c45a6a 100644 --- a/src/renderer/src/types/index.ts +++ b/src/renderer/src/types/index.ts @@ -1,7 +1,7 @@ import type { WebSearchResultBlock } from '@anthropic-ai/sdk/resources' import type { GroundingMetadata } from '@google/genai' import type OpenAI from 'openai' -import React from 'react' +import type { CSSProperties } from 'react' import type { Message } from './newMessage' @@ -274,7 +274,7 @@ export type MinAppType = { url: string bodered?: boolean background?: string - style?: React.CSSProperties + style?: CSSProperties addTime?: string type?: 'Custom' | 'Default' // Added the 'type' property } diff --git a/src/renderer/src/types/model.ts b/src/renderer/src/types/model.ts deleted file mode 100644 index ed3763e2a..000000000 --- a/src/renderer/src/types/model.ts +++ /dev/null @@ -1,222 +0,0 @@ -import { z } from 'zod' - -export const InputType = z.enum(['text', 'image', 'audio', 'video', 'document']) -export type InputType = z.infer - -export const OutputType = z.enum(['text', 'image', 'audio', 'video', 'vector']) -export type OutputType = z.infer - -export const OutputMode = z.enum(['sync', 'streaming']) -export type OutputMode = z.infer - -export const ModelCapability = z.enum([ - 'audioGeneration', - 'cache', - 'codeExecution', - 'embedding', - 'fineTuning', - 'imageGeneration', - 'OCR', - 'realTime', - 'rerank', - 'reasoning', - 'streaming', - 'structuredOutput', - 'textGeneration', - 'translation', - 'transcription', - 'toolUse', - 'videoGeneration', - 'webSearch' -]) -export type ModelCapability = z.infer - -export const ModelSchema = z - .object({ - id: z.string(), - modelId: z.string(), - providerId: z.string(), - name: z.string(), - group: z.string(), - description: z.string().optional(), - owned_by: z.string().optional(), - - supportedInputs: z.array(InputType), - supportedOutputs: z.array(OutputType), - supportedOutputModes: z.array(OutputMode), - - limits: z - .object({ - inputTokenLimit: z.number().optional(), - outputTokenLimit: z.number().optional(), - contextWindow: z.number().optional() - }) - .optional(), - - price: z - .object({ - inputTokenPrice: z.number().optional(), - outputTokenPrice: z.number().optional() - }) - .optional(), - - capabilities: z.array(ModelCapability) - }) - .refine( - (data) => { - // 如果模型支持streaming,则必须支持streamingOutputMode - if (data.capabilities.includes('streaming') && !data.supportedOutputModes.includes('streaming')) { - return false - } - - // 如果模型有OCR能力,则必须支持图像输入类型或者文件输入类型 - if ( - data.capabilities.includes('OCR') && - !data.supportedInputs.includes('image') && - !data.supportedInputs.includes('document') - ) { - return false - } - - // 如果模型有图像生成能力,则必须支持图像输出 - if (data.capabilities.includes('imageGeneration') && !data.supportedOutputs.includes('image')) { - return false - } - - // 如果有音频生成能力,则必须支持音频输出类型 - if (data.capabilities.includes('audioGeneration') && !data.supportedOutputs.includes('audio')) { - return false - } - - // 如果有音频识别能力,则必须支持音频输入类型 - if ( - (data.capabilities.includes('transcription') || data.capabilities.includes('translation')) && - !data.supportedInputs.includes('audio') - ) { - return false - } - - // 如果有视频生成能力,则必须支持视频输出类型 - if (data.capabilities.includes('videoGeneration') && !data.supportedOutputs.includes('video')) { - return false - } - - // 如果模型有embedding能力,则必须支持向量输出类型 - if (data.capabilities.includes('embedding') && !data.supportedOutputs.includes('vector')) { - return false - } - - // 如果模型有toolUse, Reasoning, streaming, cache, codeExecution, imageGeneration, audioGeneration, videoGeneration, webSearch能力,则必须支持文字的输入 - if ( - (data.capabilities.includes('toolUse') || - data.capabilities.includes('reasoning') || - data.capabilities.includes('streaming') || - data.capabilities.includes('cache') || - data.capabilities.includes('codeExecution') || - data.capabilities.includes('imageGeneration') || - data.capabilities.includes('audioGeneration') || - data.capabilities.includes('videoGeneration') || - data.capabilities.includes('webSearch')) && - !data.supportedInputs.includes('text') - ) { - return false - } - - // 如果模型有toolUse, Reasoning, streaming, cache, codeExecution, OCR, textGeneration, translation, transcription, webSearch, structuredOutput能力,则必须支持文字的输出 - if ( - (data.capabilities.includes('toolUse') || - data.capabilities.includes('reasoning') || - data.capabilities.includes('streaming') || - data.capabilities.includes('cache') || - data.capabilities.includes('codeExecution') || - data.capabilities.includes('OCR') || - data.capabilities.includes('textGeneration') || - data.capabilities.includes('translation') || - data.capabilities.includes('transcription') || - data.capabilities.includes('webSearch') || - data.capabilities.includes('structuredOutput')) && - !data.supportedOutputs.includes('text') - ) { - return false - } - - return true - }, - { - message: 'ModelCard has inconsistent capabilities and supported input/output type' - } - ) - -export type ModelCard = z.infer - -export function createModelCard(model: ModelCard): ModelCard { - return ModelSchema.parse(model) -} - -export function supportesInputType(model: ModelCard, inputType: InputType) { - return model.supportedInputs.includes(inputType) -} - -export function supportesOutputType(model: ModelCard, outputType: OutputType) { - return model.supportedOutputs.includes(outputType) -} - -export function supportesOutputMode(model: ModelCard, outputMode: OutputMode) { - return model.supportedOutputModes.includes(outputMode) -} - -export function supportesCapability(model: ModelCard, capability: ModelCapability) { - return model.capabilities.includes(capability) -} - -export function isVisionModel(model: ModelCard) { - return supportesInputType(model, 'image') -} - -export function isImageGenerationModel(model: ModelCard) { - return isVisionModel(model) && supportesCapability(model, 'imageGeneration') -} - -export function isAudioModel(model: ModelCard) { - return supportesInputType(model, 'audio') -} - -export function isAudioGenerationModel(model: ModelCard) { - return supportesCapability(model, 'audioGeneration') -} - -export function isVideoModel(model: ModelCard) { - return supportesInputType(model, 'video') -} - -export function isEmbedModel(model: ModelCard) { - return supportesOutputType(model, 'vector') && supportesCapability(model, 'embedding') -} - -export function isTextEmbeddingModel(model: ModelCard) { - return isEmbedModel(model) && supportesInputType(model, 'text') && model.supportedInputs.length === 1 -} - -export function isMultiModalEmbeddingModel(model: ModelCard) { - return isEmbedModel(model) && model.supportedInputs.length > 1 -} - -export function isRerankModel(model: ModelCard) { - return supportesCapability(model, 'rerank') -} - -export function isReasoningModel(model: ModelCard) { - return supportesCapability(model, 'reasoning') -} - -export function isToolUseModel(model: ModelCard) { - return supportesCapability(model, 'toolUse') -} - -export function isOnlyStreamingModel(model: ModelCard) { - return ( - supportesCapability(model, 'streaming') && - supportesOutputMode(model, 'streaming') && - model.supportedOutputModes.length === 1 - ) -} diff --git a/src/renderer/src/types/newMessage.ts b/src/renderer/src/types/newMessage.ts index 6005d75db..5e9a263ae 100644 --- a/src/renderer/src/types/newMessage.ts +++ b/src/renderer/src/types/newMessage.ts @@ -1,4 +1,4 @@ -import { CompletionUsage } from 'openai/resources' +import type { CompletionUsage } from 'openai/resources' import type { Assistant, diff --git a/src/renderer/src/types/bocha.ts b/src/renderer/src/utils/bocha.ts similarity index 100% rename from src/renderer/src/types/bocha.ts rename to src/renderer/src/utils/bocha.ts