Compare commits

...

3 Commits

Author SHA1 Message Date
Martial BE
fc284cc1f0 🐛 fix return to first page after refresh 2023-12-29 17:02:09 +08:00
Martial BE
9c0a49b97a add custom test model 2023-12-29 16:23:25 +08:00
Martial BE
61c47a3b08 🎨 Support Baichuan2 2023-12-29 15:23:05 +08:00
18 changed files with 257 additions and 46 deletions

View File

@@ -189,6 +189,7 @@ const (
ChannelTypeTencent = 23
ChannelTypeAzureSpeech = 24
ChannelTypeGemini = 25
ChannelTypeBaichuan = 26
)
var ChannelBaseURLs = []string{
@@ -218,6 +219,7 @@ var ChannelBaseURLs = []string{
"https://hunyuan.cloud.tencent.com", //23
"", //24
"", //25
"https://api.baichuan-ai.com", //26
}
const (

View File

@@ -101,6 +101,10 @@ var ModelRatio = map[string]float64{
"embedding_s1_v1": 0.0715, // ¥0.001 / 1k tokens
"semantic_similarity_s1_v1": 0.0715, // ¥0.001 / 1k tokens
"hunyuan": 7.143, // ¥0.1 / 1k tokens // https://cloud.tencent.com/document/product/1729/97731#e0e6be58-60c8-469f-bdeb-6c264ce3b4d0
"Baichuan2-Turbo": 0.5715, // ¥0.008 / 1k tokens
"Baichuan2-Turbo-192k": 1.143, // ¥0.016 / 1k tokens
"Baichuan2-53B": 1.4286, // ¥0.02 / 1k tokens
"Baichuan-Text-Embedding": 0.0357, // ¥0.0005 / 1k tokens
}
func ModelRatio2JSONString() string {

View File

@@ -190,13 +190,13 @@ func countImageTokens(url string, detail string) (_ int, err error) {
func CountTokenInput(input any, model string) int {
switch v := input.(type) {
case string:
return CountTokenInput(v, model)
return CountTokenText(v, model)
case []string:
text := ""
for _, s := range v {
text += s
}
return CountTokenInput(text, model)
return CountTokenText(text, model)
}
return 0
}

View File

@@ -18,6 +18,10 @@ import (
)
func testChannel(channel *model.Channel, request types.ChatCompletionRequest) (err error, openaiErr *types.OpenAIError) {
if channel.TestModel == "" {
return errors.New("请填写测速模型后再试"), nil
}
// 创建一个 http.Request
req, err := http.NewRequest("POST", "/v1/chat/completions", nil)
if err != nil {
@@ -28,26 +32,7 @@ func testChannel(channel *model.Channel, request types.ChatCompletionRequest) (e
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
c.Request = req
// 创建映射
channelTypeToModel := map[int]string{
common.ChannelTypePaLM: "PaLM-2",
common.ChannelTypeAnthropic: "claude-2",
common.ChannelTypeBaidu: "ERNIE-Bot",
common.ChannelTypeZhipu: "chatglm_lite",
common.ChannelTypeAli: "qwen-turbo",
common.ChannelType360: "360GPT_S2_V9",
common.ChannelTypeXunfei: "SparkDesk",
common.ChannelTypeTencent: "hunyuan",
common.ChannelTypeAzure: "gpt-3.5-turbo",
}
// 从映射中获取模型名称
model, ok := channelTypeToModel[channel.Type]
if !ok {
model = "gpt-3.5-turbo" // 默认值
}
request.Model = model
request.Model = channel.TestModel
provider := providers.GetProvider(channel, c)
if provider == nil {
@@ -69,13 +54,15 @@ func testChannel(channel *model.Channel, request types.ChatCompletionRequest) (e
promptTokens := common.CountTokenMessages(request.Messages, request.Model)
Usage, openAIErrorWithStatusCode := chatProvider.ChatAction(&request, true, promptTokens)
if openAIErrorWithStatusCode != nil {
return nil, &openAIErrorWithStatusCode.OpenAIError
return errors.New(openAIErrorWithStatusCode.Message), &openAIErrorWithStatusCode.OpenAIError
}
if Usage.CompletionTokens == 0 {
return fmt.Errorf("channel %s, message 补全 tokens 非预期返回 0", channel.Name), nil
}
common.SysLog(fmt.Sprintf("测试模型 %s 返回内容为:%s", channel.Name, w.Body.String()))
return nil, nil
}

View File

@@ -26,6 +26,7 @@ type Channel struct {
ModelMapping *string `json:"model_mapping" gorm:"type:varchar(1024);default:''"`
Priority *int64 `json:"priority" gorm:"bigint;default:0"`
Proxy string `json:"proxy" gorm:"type:varchar(255);default:''"`
TestModel string `json:"test_model" gorm:"type:varchar(50);default:''"`
}
func GetAllChannels(startIdx int, num int, selectAll bool) ([]*Channel, error) {

View File

@@ -0,0 +1,30 @@
package baichuan
import (
"one-api/providers/base"
"one-api/providers/openai"
"github.com/gin-gonic/gin"
)
// 定义供应商工厂
type BaichuanProviderFactory struct{}
// 创建 BaichuanProvider
// https://platform.baichuan-ai.com/docs/api
func (f BaichuanProviderFactory) Create(c *gin.Context) base.ProviderInterface {
return &BaichuanProvider{
OpenAIProvider: openai.OpenAIProvider{
BaseProvider: base.BaseProvider{
BaseURL: "https://api.baichuan-ai.com",
ChatCompletions: "/v1/chat/completions",
Embeddings: "/v1/embeddings",
Context: c,
},
},
}
}
type BaichuanProvider struct {
openai.OpenAIProvider
}

100
providers/baichuan/chat.go Normal file
View File

@@ -0,0 +1,100 @@
package baichuan
import (
"net/http"
"one-api/common"
"one-api/providers/openai"
"one-api/types"
"strings"
)
func (baichuanResponse *BaichuanChatResponse) ResponseHandler(resp *http.Response) (OpenAIResponse any, errWithCode *types.OpenAIErrorWithStatusCode) {
if baichuanResponse.Error.Message != "" {
errWithCode = &types.OpenAIErrorWithStatusCode{
OpenAIError: baichuanResponse.Error,
StatusCode: resp.StatusCode,
}
return
}
OpenAIResponse = types.ChatCompletionResponse{
ID: baichuanResponse.ID,
Object: baichuanResponse.Object,
Created: baichuanResponse.Created,
Model: baichuanResponse.Model,
Choices: baichuanResponse.Choices,
Usage: baichuanResponse.Usage,
}
return
}
// 获取聊天请求体
func (p *BaichuanProvider) getChatRequestBody(request *types.ChatCompletionRequest) *BaichuanChatRequest {
messages := make([]BaichuanMessage, 0, len(request.Messages))
for i := 0; i < len(request.Messages); i++ {
message := request.Messages[i]
if message.Role == "system" || message.Role == "assistant" {
message.Role = "assistant"
} else {
message.Role = "user"
}
messages = append(messages, BaichuanMessage{
Content: message.StringContent(),
Role: strings.ToLower(message.Role),
})
}
return &BaichuanChatRequest{
Model: request.Model,
Messages: messages,
Stream: request.Stream,
Temperature: request.Temperature,
TopP: request.TopP,
TopK: request.N,
}
}
// 聊天
func (p *BaichuanProvider) ChatAction(request *types.ChatCompletionRequest, isModelMapped bool, promptTokens int) (usage *types.Usage, errWithCode *types.OpenAIErrorWithStatusCode) {
requestBody := p.getChatRequestBody(request)
fullRequestURL := p.GetFullRequestURL(p.ChatCompletions, request.Model)
headers := p.GetRequestHeaders()
if request.Stream {
headers["Accept"] = "text/event-stream"
}
client := common.NewClient()
req, err := client.NewRequest(p.Context.Request.Method, fullRequestURL, common.WithBody(requestBody), common.WithHeader(headers))
if err != nil {
return nil, common.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
}
if request.Stream {
openAIProviderChatStreamResponse := &openai.OpenAIProviderChatStreamResponse{}
var textResponse string
errWithCode, textResponse = p.SendStreamRequest(req, openAIProviderChatStreamResponse)
if errWithCode != nil {
return
}
usage = &types.Usage{
PromptTokens: promptTokens,
CompletionTokens: common.CountTokenText(textResponse, request.Model),
TotalTokens: promptTokens + common.CountTokenText(textResponse, request.Model),
}
} else {
baichuanResponse := &BaichuanChatResponse{}
errWithCode = p.SendRequest(req, baichuanResponse, false)
if errWithCode != nil {
return
}
usage = baichuanResponse.Usage
}
return
}

View File

@@ -0,0 +1,36 @@
package baichuan
import "one-api/providers/openai"
type BaichuanMessage struct {
Role string `json:"role"`
Content string `json:"content"`
}
type BaichuanKnowledgeBase struct {
Ids []string `json:"id"`
}
type BaichuanChatRequest struct {
Model string `json:"model"`
Messages []BaichuanMessage `json:"messages"`
Stream bool `json:"stream,omitempty"`
Temperature float64 `json:"temperature,omitempty"`
TopP float64 `json:"top_p,omitempty"`
TopK int `json:"top_k,omitempty"`
WithSearchEnhance bool `json:"with_search_enhance,omitempty"`
KnowledgeBase BaichuanKnowledgeBase `json:"knowledge_base,omitempty"`
}
type BaichuanKnowledgeBaseResponse struct {
Cites []struct {
Title string `json:"title"`
Content string `json:"content"`
FileId string `json:"file_id"`
} `json:"cites"`
}
type BaichuanChatResponse struct {
openai.OpenAIProviderChatResponse
KnowledgeBase BaichuanKnowledgeBaseResponse `json:"knowledge_base,omitempty"`
}

View File

@@ -108,7 +108,7 @@ func (p *OpenAIProvider) GetRequestBody(request any, isModelMapped bool) (reques
}
// 发送流式请求
func (p *OpenAIProvider) sendStreamRequest(req *http.Request, response OpenAIProviderStreamResponseHandler) (openAIErrorWithStatusCode *types.OpenAIErrorWithStatusCode, responseText string) {
func (p *OpenAIProvider) SendStreamRequest(req *http.Request, response OpenAIProviderStreamResponseHandler) (openAIErrorWithStatusCode *types.OpenAIErrorWithStatusCode, responseText string) {
defer req.Body.Close()
client := common.GetHttpClient(p.Channel.Proxy)

View File

@@ -46,7 +46,7 @@ func (p *OpenAIProvider) ChatAction(request *types.ChatCompletionRequest, isMode
if request.Stream {
openAIProviderChatStreamResponse := &OpenAIProviderChatStreamResponse{}
var textResponse string
errWithCode, textResponse = p.sendStreamRequest(req, openAIProviderChatStreamResponse)
errWithCode, textResponse = p.SendStreamRequest(req, openAIProviderChatStreamResponse)
if errWithCode != nil {
return
}

View File

@@ -47,7 +47,7 @@ func (p *OpenAIProvider) CompleteAction(request *types.CompletionRequest, isMode
if request.Stream {
// TODO
var textResponse string
errWithCode, textResponse = p.sendStreamRequest(req, openAIProviderCompletionResponse)
errWithCode, textResponse = p.SendStreamRequest(req, openAIProviderCompletionResponse)
if errWithCode != nil {
return
}

View File

@@ -10,6 +10,7 @@ import (
"one-api/providers/api2gpt"
"one-api/providers/azure"
azurespeech "one-api/providers/azureSpeech"
"one-api/providers/baichuan"
"one-api/providers/baidu"
"one-api/providers/base"
"one-api/providers/claude"
@@ -52,6 +53,7 @@ func init() {
providerFactories[common.ChannelTypeAPI2GPT] = api2gpt.Api2gptProviderFactory{}
providerFactories[common.ChannelTypeAzureSpeech] = azurespeech.AzureSpeechProviderFactory{}
providerFactories[common.ChannelTypeGemini] = gemini.GeminiProviderFactory{}
providerFactories[common.ChannelTypeBaichuan] = baichuan.BaichuanProviderFactory{}
}

View File

@@ -65,6 +65,12 @@ export const CHANNEL_OPTIONS = {
value: 23,
color: 'default'
},
26: {
key: 26,
text: '百川',
value: 26,
color: 'orange'
},
24: {
key: 24,
text: 'Azure Speech',

View File

@@ -36,6 +36,7 @@ const validationSchema = Yup.object().shape({
key: Yup.string().when('is_edit', { is: false, then: Yup.string().required('密钥 不能为空') }),
other: Yup.string(),
proxy: Yup.string(),
test_model: Yup.string(),
models: Yup.array().min(1, '模型 不能为空'),
groups: Yup.array().min(1, '用户组 不能为空'),
base_url: Yup.string().when('type', {
@@ -90,7 +91,7 @@ const EditModal = ({ open, channelId, onCancel, onOk }) => {
if (newInput) {
Object.keys(newInput).forEach((key) => {
if (
(!Array.isArray(values[key]) && values[key] !== null && values[key] !== undefined) ||
(!Array.isArray(values[key]) && values[key] !== null && values[key] !== undefined && values[key] !== '') ||
(Array.isArray(values[key]) && values[key].length > 0)
) {
return;
@@ -464,6 +465,29 @@ const EditModal = ({ open, channelId, onCancel, onOk }) => {
<FormHelperText id="helper-tex-channel-proxy-label"> {inputPrompt.proxy} </FormHelperText>
)}
</FormControl>
{inputPrompt.test_model && (
<FormControl fullWidth error={Boolean(touched.test_model && errors.test_model)} sx={{ ...theme.typography.otherInput }}>
<InputLabel htmlFor="channel-test_model-label">{inputLabel.test_model}</InputLabel>
<OutlinedInput
id="channel-test_model-label"
label={inputLabel.test_model}
type="text"
value={values.test_model}
name="test_model"
onBlur={handleBlur}
onChange={handleChange}
inputProps={{}}
aria-describedby="helper-text-channel-test_model-label"
/>
{touched.test_model && errors.test_model ? (
<FormHelperText error id="helper-tex-channel-test_model-label">
{errors.test_model}
</FormHelperText>
) : (
<FormHelperText id="helper-tex-channel-test_model-label"> {inputPrompt.test_model} </FormHelperText>
)}
</FormControl>
)}
<DialogActions>
<Button onClick={onCancel}>取消</Button>
<Button disableElevation disabled={isSubmitting} type="submit" variant="contained" color="primary">

View File

@@ -116,7 +116,7 @@ export default function ChannelPage() {
if (success) {
showSuccess('操作成功完成!');
if (action === 'delete') {
await loadChannels(0);
await handleRefresh();
}
} else {
showError(message);
@@ -127,9 +127,7 @@ export default function ChannelPage() {
// 处理刷新
const handleRefresh = async () => {
await loadChannels(0);
setActivePage(0);
setSearchKeyword('');
await loadChannels(activePage);
};
// 处理测试所有启用渠道

View File

@@ -6,6 +6,7 @@ const defaultConfig = {
base_url: '',
other: '',
proxy: '',
test_model: '',
model_mapping: '',
models: [],
groups: ['default']
@@ -17,6 +18,7 @@ const defaultConfig = {
key: '密钥',
other: '其他参数',
proxy: '代理地址',
test_model: '测速模型',
models: '模型',
model_mapping: '模型映射关系',
groups: '用户组'
@@ -28,6 +30,7 @@ const defaultConfig = {
key: '请输入渠道对应的鉴权密钥',
other: '',
proxy: '单独设置代理地址支持http和socks5例如http://127.0.0.1:1080',
test_model: '用于测试使用的模型,为空时无法测速,如gpt-3.5-turbo',
models: '请选择该渠道所支持的模型',
model_mapping:
'请输入要修改的模型映射关系格式为api请求模型ID:实际转发给渠道的模型ID使用JSON数组表示例如{"gpt-3.5": "gpt-35"}',
@@ -48,17 +51,20 @@ const typeConfig = {
},
11: {
input: {
models: ['PaLM-2']
models: ['PaLM-2'],
test_model: 'PaLM-2'
}
},
14: {
input: {
models: ['claude-instant-1', 'claude-2', 'claude-2.0', 'claude-2.1']
models: ['claude-instant-1', 'claude-2', 'claude-2.0', 'claude-2.1'],
test_model: 'claude-2'
}
},
15: {
input: {
models: ['ERNIE-Bot', 'ERNIE-Bot-turbo', 'ERNIE-Bot-4', 'Embedding-V1']
models: ['ERNIE-Bot', 'ERNIE-Bot-turbo', 'ERNIE-Bot-4', 'Embedding-V1'],
test_model: 'ERNIE-Bot'
},
prompt: {
key: '按照如下格式输入APIKey|SecretKey'
@@ -66,7 +72,8 @@ const typeConfig = {
},
16: {
input: {
models: ['chatglm_turbo', 'chatglm_pro', 'chatglm_std', 'chatglm_lite']
models: ['chatglm_turbo', 'chatglm_pro', 'chatglm_std', 'chatglm_lite'],
test_model: 'chatglm_lite'
}
},
17: {
@@ -84,7 +91,8 @@ const typeConfig = {
'qwen-plus-internet',
'qwen-max-internet',
'qwen-max-longcontext-internet'
]
],
test_model: 'qwen-turbo'
},
prompt: {
other: '请输入插件参数,即 X-DashScope-Plugin 请求头的取值'
@@ -104,7 +112,8 @@ const typeConfig = {
},
19: {
input: {
models: ['360GPT_S2_V9', 'embedding-bert-512-v1', 'embedding_s1_v1', 'semantic_similarity_s1_v1']
models: ['360GPT_S2_V9', 'embedding-bert-512-v1', 'embedding_s1_v1', 'semantic_similarity_s1_v1'],
test_model: '360GPT_S2_V9'
}
},
22: {
@@ -114,7 +123,8 @@ const typeConfig = {
},
23: {
input: {
models: ['hunyuan']
models: ['hunyuan'],
test_model: 'hunyuan'
},
prompt: {
key: '按照如下格式输入AppId|SecretId|SecretKey'
@@ -125,11 +135,26 @@ const typeConfig = {
other: '版本号'
},
input: {
models: ['gemini-pro']
models: ['gemini-pro', 'gemini-pro-vision'],
test_model: 'gemini-pro'
},
prompt: {
other: '请输入版本号例如v1'
}
},
26: {
input: {
models: ['Baichuan2-Turbo', 'Baichuan2-Turbo-192k', 'Baichuan2-53B', 'Baichuan-Text-Embedding'],
test_model: 'Baichuan2-Turbo'
}
},
24: {
input: {
models: ['tts-1', 'tts-1-hd']
},
prompt: {
test_model: ''
}
}
};

View File

@@ -108,7 +108,7 @@ export default function Token() {
if (success) {
showSuccess('操作成功完成!');
if (action === 'delete') {
await loadTokens(0);
await handleRefresh();
}
} else {
showError(message);
@@ -119,9 +119,7 @@ export default function Token() {
// 处理刷新
const handleRefresh = async () => {
await loadTokens(0);
setActivePage(0);
setSearchKeyword('');
await loadTokens(activePage);
};
const handleOpenModal = (tokenId) => {

View File

@@ -109,9 +109,7 @@ export default function Users() {
// 处理刷新
const handleRefresh = async () => {
await loadUsers(0);
setActivePage(0);
setSearchKeyword('');
await loadUsers(activePage);
};
const handleOpenModal = (userId) => {