chore: update electron-builder configuration to refine file exclusion patterns

- Added exclusions for various distribution directories and module types to optimize the build process.
- Updated license file exclusions to be more inclusive of different casing variations.
This commit is contained in:
kangfenmao
2025-05-26 19:37:31 +08:00
parent 11daf93094
commit c77d627077
7 changed files with 9 additions and 246 deletions

View File

@@ -36,8 +36,11 @@ files:
- '!**/*.{spec,test}.{js,jsx,ts,tsx}' - '!**/*.{spec,test}.{js,jsx,ts,tsx}'
- '!**/*.min.*.map' - '!**/*.min.*.map'
- '!**/*.d.ts' - '!**/*.d.ts'
- '!**/dist/es6/**'
- '!**/dist/demo/**'
- '!**/amd/**'
- '!**/{.DS_Store,Thumbs.db,thumbs.db,__pycache__}' - '!**/{.DS_Store,Thumbs.db,thumbs.db,__pycache__}'
- '!**/{LICENSE,LICENSE.txt,LICENSE-MIT.txt,*.LICENSE.txt,NOTICE.txt,README.md,readme.md,CHANGELOG.md}' - '!**/{LICENSE,license,LICENSE.*,*.LICENSE.txt,NOTICE.txt,README.md,readme.md,CHANGELOG.md}'
- '!node_modules/rollup-plugin-visualizer' - '!node_modules/rollup-plugin-visualizer'
- '!node_modules/js-tiktoken' - '!node_modules/js-tiktoken'
- '!node_modules/@tavily/core/node_modules/js-tiktoken' - '!node_modules/@tavily/core/node_modules/js-tiktoken'

View File

@@ -9,25 +9,7 @@ const visualizerPlugin = (type: 'renderer' | 'main') => {
export default defineConfig({ export default defineConfig({
main: { main: {
plugins: [ plugins: [externalizeDepsPlugin(), ...visualizerPlugin('main')],
externalizeDepsPlugin({
exclude: [
'@cherrystudio/embedjs',
'@cherrystudio/embedjs-openai',
'@cherrystudio/embedjs-loader-web',
'@cherrystudio/embedjs-loader-markdown',
'@cherrystudio/embedjs-loader-msoffice',
'@cherrystudio/embedjs-loader-xml',
'@cherrystudio/embedjs-loader-pdf',
'@cherrystudio/embedjs-loader-sitemap',
'@cherrystudio/embedjs-libsql',
'@cherrystudio/embedjs-loader-image',
'p-queue',
'webdav'
]
}),
...visualizerPlugin('main')
],
resolve: { resolve: {
alias: { alias: {
'@main': resolve('src/main'), '@main': resolve('src/main'),

View File

@@ -1,6 +1,6 @@
import { WebSearchState } from '@renderer/store/websearch' import { WebSearchState } from '@renderer/store/websearch'
import { WebSearchProvider, WebSearchProviderResponse } from '@renderer/types' import { WebSearchProvider, WebSearchProviderResponse } from '@renderer/types'
import { BochaSearchParams, BochaSearchResponse } from '@renderer/types/bocha' import { BochaSearchParams, BochaSearchResponse } from '@renderer/utils/bocha'
import BaseWebSearchProvider from './BaseWebSearchProvider' import BaseWebSearchProvider from './BaseWebSearchProvider'

View File

@@ -1,7 +1,7 @@
import type { WebSearchResultBlock } from '@anthropic-ai/sdk/resources' import type { WebSearchResultBlock } from '@anthropic-ai/sdk/resources'
import type { GroundingMetadata } from '@google/genai' import type { GroundingMetadata } from '@google/genai'
import type OpenAI from 'openai' import type OpenAI from 'openai'
import React from 'react' import type { CSSProperties } from 'react'
import type { Message } from './newMessage' import type { Message } from './newMessage'
@@ -274,7 +274,7 @@ export type MinAppType = {
url: string url: string
bodered?: boolean bodered?: boolean
background?: string background?: string
style?: React.CSSProperties style?: CSSProperties
addTime?: string addTime?: string
type?: 'Custom' | 'Default' // Added the 'type' property type?: 'Custom' | 'Default' // Added the 'type' property
} }

View File

@@ -1,222 +0,0 @@
import { z } from 'zod'
export const InputType = z.enum(['text', 'image', 'audio', 'video', 'document'])
export type InputType = z.infer<typeof InputType>
export const OutputType = z.enum(['text', 'image', 'audio', 'video', 'vector'])
export type OutputType = z.infer<typeof OutputType>
export const OutputMode = z.enum(['sync', 'streaming'])
export type OutputMode = z.infer<typeof OutputMode>
export const ModelCapability = z.enum([
'audioGeneration',
'cache',
'codeExecution',
'embedding',
'fineTuning',
'imageGeneration',
'OCR',
'realTime',
'rerank',
'reasoning',
'streaming',
'structuredOutput',
'textGeneration',
'translation',
'transcription',
'toolUse',
'videoGeneration',
'webSearch'
])
export type ModelCapability = z.infer<typeof ModelCapability>
export const ModelSchema = z
.object({
id: z.string(),
modelId: z.string(),
providerId: z.string(),
name: z.string(),
group: z.string(),
description: z.string().optional(),
owned_by: z.string().optional(),
supportedInputs: z.array(InputType),
supportedOutputs: z.array(OutputType),
supportedOutputModes: z.array(OutputMode),
limits: z
.object({
inputTokenLimit: z.number().optional(),
outputTokenLimit: z.number().optional(),
contextWindow: z.number().optional()
})
.optional(),
price: z
.object({
inputTokenPrice: z.number().optional(),
outputTokenPrice: z.number().optional()
})
.optional(),
capabilities: z.array(ModelCapability)
})
.refine(
(data) => {
// 如果模型支持streaming则必须支持streamingOutputMode
if (data.capabilities.includes('streaming') && !data.supportedOutputModes.includes('streaming')) {
return false
}
// 如果模型有OCR能力则必须支持图像输入类型或者文件输入类型
if (
data.capabilities.includes('OCR') &&
!data.supportedInputs.includes('image') &&
!data.supportedInputs.includes('document')
) {
return false
}
// 如果模型有图像生成能力,则必须支持图像输出
if (data.capabilities.includes('imageGeneration') && !data.supportedOutputs.includes('image')) {
return false
}
// 如果有音频生成能力,则必须支持音频输出类型
if (data.capabilities.includes('audioGeneration') && !data.supportedOutputs.includes('audio')) {
return false
}
// 如果有音频识别能力,则必须支持音频输入类型
if (
(data.capabilities.includes('transcription') || data.capabilities.includes('translation')) &&
!data.supportedInputs.includes('audio')
) {
return false
}
// 如果有视频生成能力,则必须支持视频输出类型
if (data.capabilities.includes('videoGeneration') && !data.supportedOutputs.includes('video')) {
return false
}
// 如果模型有embedding能力则必须支持向量输出类型
if (data.capabilities.includes('embedding') && !data.supportedOutputs.includes('vector')) {
return false
}
// 如果模型有toolUse, Reasoning, streaming, cache, codeExecution, imageGeneration, audioGeneration, videoGeneration webSearch能力则必须支持文字的输入
if (
(data.capabilities.includes('toolUse') ||
data.capabilities.includes('reasoning') ||
data.capabilities.includes('streaming') ||
data.capabilities.includes('cache') ||
data.capabilities.includes('codeExecution') ||
data.capabilities.includes('imageGeneration') ||
data.capabilities.includes('audioGeneration') ||
data.capabilities.includes('videoGeneration') ||
data.capabilities.includes('webSearch')) &&
!data.supportedInputs.includes('text')
) {
return false
}
// 如果模型有toolUse, Reasoning, streaming, cache, codeExecution, OCR, textGeneration, translation, transcription, webSearch, structuredOutput能力则必须支持文字的输出
if (
(data.capabilities.includes('toolUse') ||
data.capabilities.includes('reasoning') ||
data.capabilities.includes('streaming') ||
data.capabilities.includes('cache') ||
data.capabilities.includes('codeExecution') ||
data.capabilities.includes('OCR') ||
data.capabilities.includes('textGeneration') ||
data.capabilities.includes('translation') ||
data.capabilities.includes('transcription') ||
data.capabilities.includes('webSearch') ||
data.capabilities.includes('structuredOutput')) &&
!data.supportedOutputs.includes('text')
) {
return false
}
return true
},
{
message: 'ModelCard has inconsistent capabilities and supported input/output type'
}
)
export type ModelCard = z.infer<typeof ModelSchema>
export function createModelCard(model: ModelCard): ModelCard {
return ModelSchema.parse(model)
}
export function supportesInputType(model: ModelCard, inputType: InputType) {
return model.supportedInputs.includes(inputType)
}
export function supportesOutputType(model: ModelCard, outputType: OutputType) {
return model.supportedOutputs.includes(outputType)
}
export function supportesOutputMode(model: ModelCard, outputMode: OutputMode) {
return model.supportedOutputModes.includes(outputMode)
}
export function supportesCapability(model: ModelCard, capability: ModelCapability) {
return model.capabilities.includes(capability)
}
export function isVisionModel(model: ModelCard) {
return supportesInputType(model, 'image')
}
export function isImageGenerationModel(model: ModelCard) {
return isVisionModel(model) && supportesCapability(model, 'imageGeneration')
}
export function isAudioModel(model: ModelCard) {
return supportesInputType(model, 'audio')
}
export function isAudioGenerationModel(model: ModelCard) {
return supportesCapability(model, 'audioGeneration')
}
export function isVideoModel(model: ModelCard) {
return supportesInputType(model, 'video')
}
export function isEmbedModel(model: ModelCard) {
return supportesOutputType(model, 'vector') && supportesCapability(model, 'embedding')
}
export function isTextEmbeddingModel(model: ModelCard) {
return isEmbedModel(model) && supportesInputType(model, 'text') && model.supportedInputs.length === 1
}
export function isMultiModalEmbeddingModel(model: ModelCard) {
return isEmbedModel(model) && model.supportedInputs.length > 1
}
export function isRerankModel(model: ModelCard) {
return supportesCapability(model, 'rerank')
}
export function isReasoningModel(model: ModelCard) {
return supportesCapability(model, 'reasoning')
}
export function isToolUseModel(model: ModelCard) {
return supportesCapability(model, 'toolUse')
}
export function isOnlyStreamingModel(model: ModelCard) {
return (
supportesCapability(model, 'streaming') &&
supportesOutputMode(model, 'streaming') &&
model.supportedOutputModes.length === 1
)
}

View File

@@ -1,4 +1,4 @@
import { CompletionUsage } from 'openai/resources' import type { CompletionUsage } from 'openai/resources'
import type { import type {
Assistant, Assistant,