Compare commits

...

8 Commits

Author SHA1 Message Date
JustSong
ed717211aa chore: adjust default rate limit config 2024-06-13 00:35:37 +08:00
JustSong
6ccf3f3cfc chore: add logger.SysLogf function 2024-06-13 00:28:56 +08:00
jinjianming
f74577141c fix: fix default token not created in some cases (#1510)
* 修复git、微信等用户注册不会创建默认令牌问题

修复git、微信等用户注册不会创建默认令牌问题

* 修复git、微信等用户注册不会创建默认令牌问题

删除普通用户注册代码

* fix: do not block if error happened

---------

Co-authored-by: JustSong <songquanpeng@foxmail.com>
2024-06-13 00:20:48 +08:00
Buer
6aafb7a99e fix: channel edit settings key error (#1496) 2024-06-13 00:08:49 +08:00
Zhong Liu
c1971870fa fix: support for Spark Lite model (#1526)
* fix: Support for Spark Lite model

* fix: fix panic

* fix: fix xunfei version config

---------

Co-authored-by: JustSong <39998050+songquanpeng@users.noreply.github.com>
Co-authored-by: JustSong <songquanpeng@foxmail.com>
2024-06-13 00:07:26 +08:00
wagxuebing
f83894c83f fix: xunfei interface call 4001 error (#1499)
Co-authored-by: lynnssb <lynntobing@gmail.com>
2024-06-12 23:12:58 +08:00
fxsome
e9981fff36 feat: post all messages for cloudflare (#1515) 2024-06-08 13:34:23 +08:00
取梦为饮
98669d5d48 feat: add support for bytedance's doubao (#1438)
* 增加豆包大模型支持

* chore: update channel options & add prompt

---------

Co-authored-by: 康龙彪 <longbiao.kang@i-tudou.com>
Co-authored-by: JustSong <songquanpeng@foxmail.com>
2024-06-08 13:26:26 +08:00
22 changed files with 142 additions and 86 deletions

View File

@@ -68,6 +68,7 @@ _✨ 通过标准的 OpenAI API 格式访问所有的大模型,开箱即用
+ [x] [Anthropic Claude 系列模型](https://anthropic.com) (支持 AWS Claude)
+ [x] [Google PaLM2/Gemini 系列模型](https://developers.generativeai.google)
+ [x] [Mistral 系列模型](https://mistral.ai/)
+ [x] [字节跳动豆包大模型](https://console.volcengine.com/ark/region:ark+cn-beijing/model)
+ [x] [百度文心一言系列模型](https://cloud.baidu.com/doc/WENXINWORKSHOP/index.html)
+ [x] [阿里通义千问系列模型](https://help.aliyun.com/document_detail/2400395.html)
+ [x] [讯飞星火认知大模型](https://www.xfyun.cn/doc/spark/Web.html)
@@ -76,7 +77,6 @@ _✨ 通过标准的 OpenAI API 格式访问所有的大模型,开箱即用
+ [x] [腾讯混元大模型](https://cloud.tencent.com/document/product/1729)
+ [x] [Moonshot AI](https://platform.moonshot.cn/)
+ [x] [百川大模型](https://platform.baichuan-ai.com)
+ [ ] [字节云雀大模型](https://www.volcengine.com/product/ark) (WIP)
+ [x] [MINIMAX](https://api.minimax.chat/)
+ [x] [Groq](https://wow.groq.com/)
+ [x] [Ollama](https://github.com/ollama/ollama)

View File

@@ -117,10 +117,10 @@ var ValidThemes = map[string]bool{
// All duration's unit is seconds
// Shouldn't larger then RateLimitKeyExpirationDuration
var (
GlobalApiRateLimitNum = env.Int("GLOBAL_API_RATE_LIMIT", 180)
GlobalApiRateLimitNum = env.Int("GLOBAL_API_RATE_LIMIT", 240)
GlobalApiRateLimitDuration int64 = 3 * 60
GlobalWebRateLimitNum = env.Int("GLOBAL_WEB_RATE_LIMIT", 60)
GlobalWebRateLimitNum = env.Int("GLOBAL_WEB_RATE_LIMIT", 120)
GlobalWebRateLimitDuration int64 = 3 * 60
UploadRateLimitNum = 10

View File

@@ -43,11 +43,19 @@ func SysLog(s string) {
_, _ = fmt.Fprintf(gin.DefaultWriter, "[SYS] %v | %s \n", t.Format("2006/01/02 - 15:04:05"), s)
}
func SysLogf(format string, a ...any) {
SysLog(fmt.Sprintf(format, a...))
}
func SysError(s string) {
t := time.Now()
_, _ = fmt.Fprintf(gin.DefaultErrorWriter, "[SYS] %v | %s \n", t.Format("2006/01/02 - 15:04:05"), s)
}
func SysErrorf(format string, a ...any) {
SysError(fmt.Sprintf(format, a...))
}
func Debug(ctx context.Context, msg string) {
if config.DebugEnabled {
logHelper(ctx, loggerDEBUG, msg)

View File

@@ -6,8 +6,6 @@ import (
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/common/ctxkey"
"github.com/songquanpeng/one-api/common/helper"
"github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/common/random"
"github.com/songquanpeng/one-api/model"
"net/http"
@@ -111,7 +109,6 @@ func Logout(c *gin.Context) {
}
func Register(c *gin.Context) {
ctx := c.Request.Context()
if !config.RegisterEnabled {
c.JSON(http.StatusOK, gin.H{
"message": "管理员关闭了新用户注册",
@@ -176,28 +173,7 @@ func Register(c *gin.Context) {
})
return
}
go func() {
err := user.ValidateAndFill()
if err != nil {
logger.Errorf(ctx, "user.ValidateAndFill failed: %w", err)
return
}
cleanToken := model.Token{
UserId: user.Id,
Name: "default",
Key: random.GenerateKey(),
CreatedTime: helper.GetTimestamp(),
AccessedTime: helper.GetTimestamp(),
ExpiredTime: -1,
RemainQuota: -1,
UnlimitedQuota: true,
}
err = cleanToken.Insert()
if err != nil {
logger.Errorf(ctx, "cleanToken.Insert failed: %w", err)
return
}
}()
c.JSON(http.StatusOK, gin.H{
"success": true,
"message": "",

View File

@@ -24,7 +24,7 @@ var buildFS embed.FS
func main() {
logger.SetupLogger()
logger.SysLog(fmt.Sprintf("One API %s started", common.Version))
logger.SysLogf("One API %s started", common.Version)
if os.Getenv("GIN_MODE") != "debug" {
gin.SetMode(gin.ReleaseMode)
}

View File

@@ -67,26 +67,28 @@ func SetupContextForSelectedChannel(c *gin.Context, channel *model.Channel, mode
c.Set(ctxkey.BaseURL, channel.GetBaseURL())
cfg, _ := channel.LoadConfig()
// this is for backward compatibility
switch channel.Type {
case channeltype.Azure:
if cfg.APIVersion == "" {
cfg.APIVersion = channel.Other
}
case channeltype.Xunfei:
if cfg.APIVersion == "" {
cfg.APIVersion = channel.Other
}
case channeltype.Gemini:
if cfg.APIVersion == "" {
cfg.APIVersion = channel.Other
}
case channeltype.AIProxyLibrary:
if cfg.LibraryID == "" {
cfg.LibraryID = channel.Other
}
case channeltype.Ali:
if cfg.Plugin == "" {
cfg.Plugin = channel.Other
if channel.Other != nil {
switch channel.Type {
case channeltype.Azure:
if cfg.APIVersion == "" {
cfg.APIVersion = *channel.Other
}
case channeltype.Xunfei:
if cfg.APIVersion == "" {
cfg.APIVersion = *channel.Other
}
case channeltype.Gemini:
if cfg.APIVersion == "" {
cfg.APIVersion = *channel.Other
}
case channeltype.AIProxyLibrary:
if cfg.LibraryID == "" {
cfg.LibraryID = *channel.Other
}
case channeltype.Ali:
if cfg.Plugin == "" {
cfg.Plugin = *channel.Other
}
}
}
c.Set(ctxkey.Config, cfg)

View File

@@ -27,7 +27,7 @@ type Channel struct {
TestTime int64 `json:"test_time" gorm:"bigint"`
ResponseTime int `json:"response_time"` // in milliseconds
BaseURL *string `json:"base_url" gorm:"column:base_url;default:''"`
Other string `json:"other"` // DEPRECATED: please save config to field Config
Other *string `json:"other"` // DEPRECATED: please save config to field Config
Balance float64 `json:"balance"` // in USD
BalanceUpdatedTime int64 `json:"balance_updated_time" gorm:"bigint"`
Models string `json:"models"`

View File

@@ -6,6 +6,7 @@ import (
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/blacklist"
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/common/helper"
"github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/common/random"
"gorm.io/gorm"
@@ -140,6 +141,22 @@ func (user *User) Insert(inviterId int) error {
RecordLog(inviterId, LogTypeSystem, fmt.Sprintf("邀请用户赠送 %s", common.LogQuota(config.QuotaForInviter)))
}
}
// create default token
cleanToken := Token{
UserId: user.Id,
Name: "default",
Key: random.GenerateKey(),
CreatedTime: helper.GetTimestamp(),
AccessedTime: helper.GetTimestamp(),
ExpiredTime: -1,
RemainQuota: -1,
UnlimitedQuota: true,
}
result.Error = cleanToken.Insert()
if result.Error != nil {
// do not block
logger.SysError(fmt.Sprintf("create default token for user %d failed: %s", user.Id, result.Error.Error()))
}
return nil
}

View File

@@ -17,15 +17,21 @@ import (
)
func ConvertRequest(textRequest model.GeneralOpenAIRequest) *Request {
lastMessage := textRequest.Messages[len(textRequest.Messages)-1]
return &Request{
MaxTokens: textRequest.MaxTokens,
Prompt: lastMessage.StringContent(),
Stream: textRequest.Stream,
Temperature: textRequest.Temperature,
}
var promptBuilder strings.Builder
for _, message := range textRequest.Messages {
promptBuilder.WriteString(message.StringContent())
promptBuilder.WriteString("\n") // 添加换行符来分隔每个消息
}
return &Request{
MaxTokens: textRequest.MaxTokens,
Prompt: promptBuilder.String(),
Stream: textRequest.Stream,
Temperature: textRequest.Temperature,
}
}
func ResponseCloudflare2OpenAI(cloudflareResponse *Response) *openai.TextResponse {
choice := openai.TextResponseChoice{
Index: 0,

View File

@@ -0,0 +1,13 @@
package doubao
// https://console.volcengine.com/ark/region:ark+cn-beijing/model
var ModelList = []string{
"Doubao-pro-128k",
"Doubao-pro-32k",
"Doubao-pro-4k",
"Doubao-lite-128k",
"Doubao-lite-32k",
"Doubao-lite-4k",
"Doubao-embedding",
}

View File

@@ -0,0 +1,14 @@
package doubao
import (
"fmt"
"github.com/songquanpeng/one-api/relay/meta"
"github.com/songquanpeng/one-api/relay/relaymode"
)
func GetRequestURL(meta *meta.Meta) (string, error) {
if meta.Mode == relaymode.ChatCompletions {
return fmt.Sprintf("%s/api/v3/chat/completions", meta.BaseURL), nil
}
return "", fmt.Errorf("unsupported relay mode %d for doubao", meta.Mode)
}

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/relay/adaptor"
"github.com/songquanpeng/one-api/relay/adaptor/doubao"
"github.com/songquanpeng/one-api/relay/adaptor/minimax"
"github.com/songquanpeng/one-api/relay/channeltype"
"github.com/songquanpeng/one-api/relay/meta"
@@ -45,6 +46,8 @@ func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) {
return GetFullRequestURL(meta.BaseURL, requestURL, meta.ChannelType), nil
case channeltype.Minimax:
return minimax.GetRequestURL(meta)
case channeltype.Doubao:
return doubao.GetRequestURL(meta)
default:
return GetFullRequestURL(meta.BaseURL, meta.RequestURLPath, meta.ChannelType), nil
}

View File

@@ -4,6 +4,7 @@ import (
"github.com/songquanpeng/one-api/relay/adaptor/ai360"
"github.com/songquanpeng/one-api/relay/adaptor/baichuan"
"github.com/songquanpeng/one-api/relay/adaptor/deepseek"
"github.com/songquanpeng/one-api/relay/adaptor/doubao"
"github.com/songquanpeng/one-api/relay/adaptor/groq"
"github.com/songquanpeng/one-api/relay/adaptor/lingyiwanwu"
"github.com/songquanpeng/one-api/relay/adaptor/minimax"
@@ -20,6 +21,7 @@ var CompatibleChannels = []int{
channeltype.Moonshot,
channeltype.Baichuan,
channeltype.Minimax,
channeltype.Doubao,
channeltype.Mistral,
channeltype.Groq,
channeltype.LingYiWanWu,
@@ -52,6 +54,8 @@ func GetCompatibleChannelMeta(channelType int) (string, []string) {
return "deepseek", deepseek.ModelList
case channeltype.TogetherAI:
return "together.ai", togetherai.ModelList
case channeltype.Doubao:
return "doubao", doubao.ModelList
default:
return "openai", ModelList
}

View File

@@ -27,14 +27,6 @@ func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) {
func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *meta.Meta) error {
adaptor.SetupCommonRequestHeader(c, req, meta)
version := parseAPIVersionByModelName(meta.ActualModelName)
if version == "" {
version = a.meta.Config.APIVersion
}
if version == "" {
version = "v1.1"
}
a.meta.Config.APIVersion = version
// check DoResponse for auth part
return nil
}
@@ -69,6 +61,14 @@ func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *meta.Met
if a.request == nil {
return nil, openai.ErrorWrapper(errors.New("request is nil"), "request_is_nil", http.StatusBadRequest)
}
version := parseAPIVersionByModelName(meta.ActualModelName)
if version == "" {
version = a.meta.Config.APIVersion
}
if version == "" {
version = "v1.1"
}
a.meta.Config.APIVersion = version
if meta.IsStream {
err, usage = StreamHandler(c, meta, *a.request, splits[0], splits[1], splits[2])
} else {

View File

@@ -5,7 +5,14 @@ import (
"crypto/sha256"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"time"
"github.com/gin-gonic/gin"
"github.com/gorilla/websocket"
"github.com/songquanpeng/one-api/common"
@@ -16,11 +23,6 @@ import (
"github.com/songquanpeng/one-api/relay/constant"
"github.com/songquanpeng/one-api/relay/meta"
"github.com/songquanpeng/one-api/relay/model"
"io"
"net/http"
"net/url"
"strings"
"time"
)
// https://console.xfyun.cn/services/cbm
@@ -28,11 +30,7 @@ import (
func requestOpenAI2Xunfei(request model.GeneralOpenAIRequest, xunfeiAppId string, domain string) *ChatRequest {
messages := make([]Message, 0, len(request.Messages))
var lastToolCalls []model.Tool
for _, message := range request.Messages {
if message.ToolCalls != nil {
lastToolCalls = message.ToolCalls
}
messages = append(messages, Message{
Role: message.Role,
Content: message.StringContent(),
@@ -45,9 +43,10 @@ func requestOpenAI2Xunfei(request model.GeneralOpenAIRequest, xunfeiAppId string
xunfeiRequest.Parameter.Chat.TopK = request.N
xunfeiRequest.Parameter.Chat.MaxTokens = request.MaxTokens
xunfeiRequest.Payload.Message.Text = messages
if len(lastToolCalls) != 0 {
for _, toolCall := range lastToolCalls {
xunfeiRequest.Payload.Functions.Text = append(xunfeiRequest.Payload.Functions.Text, toolCall.Function)
if strings.HasPrefix(domain, "generalv3") {
xunfeiRequest.Payload.Functions = &Functions{
Text: request.Tools,
}
}
@@ -203,7 +202,7 @@ func Handler(c *gin.Context, meta *meta.Meta, textRequest model.GeneralOpenAIReq
}
}
if len(xunfeiResponse.Payload.Choices.Text) == 0 {
return openai.ErrorWrapper(err, "xunfei_empty_response_detected", http.StatusInternalServerError), nil
return openai.ErrorWrapper(errors.New("xunfei empty response detected"), "xunfei_empty_response_detected", http.StatusInternalServerError), nil
}
xunfeiResponse.Payload.Choices.Text[0].Content = content

View File

@@ -9,6 +9,10 @@ type Message struct {
Content string `json:"content"`
}
type Functions struct {
Text []model.Tool `json:"text,omitempty"`
}
type ChatRequest struct {
Header struct {
AppId string `json:"app_id"`
@@ -26,9 +30,7 @@ type ChatRequest struct {
Message struct {
Text []Message `json:"text"`
} `json:"message"`
Functions struct {
Text []model.Function `json:"text,omitempty"`
} `json:"functions,omitempty"`
Functions *Functions `json:"functions,omitempty"`
} `json:"payload"`
}

View File

@@ -41,6 +41,6 @@ const (
Cloudflare
DeepL
TogetherAI
Doubao
Dummy
)

View File

@@ -41,6 +41,7 @@ var ChannelBaseURLs = []string{
"https://api.cloudflare.com", // 37
"https://api-free.deepl.com", // 38
"https://api.together.xyz", // 39
"https://ark.cn-beijing.volces.com", // 40
}
func init() {

View File

@@ -47,6 +47,12 @@ export const CHANNEL_OPTIONS = {
value: 28,
color: 'warning'
},
40: {
key: 40,
text: '字节跳动豆包',
value: 40,
color: 'primary'
},
15: {
key: 15,
text: '百度文心千帆',

View File

@@ -163,7 +163,7 @@ const EditModal = ({ open, channelId, onCancel, onOk }) => {
values.other = 'v2.1';
}
if (values.key === '') {
if (values.config.ak !== '' && values.config.sk !== '' && values.config.region !== '') {
if (values.config.ak && values.config.sk && values.config.region) {
values.key = `${values.config.ak}|${values.config.sk}|${values.config.region}`;
}
}

View File

@@ -6,6 +6,7 @@ export const CHANNEL_OPTIONS = [
{key: 11, text: 'Google PaLM2', value: 11, color: 'orange'},
{key: 24, text: 'Google Gemini', value: 24, color: 'orange'},
{key: 28, text: 'Mistral AI', value: 28, color: 'orange'},
{key: 40, text: '字节跳动豆包', value: 40, color: 'blue'},
{key: 15, text: '百度文心千帆', value: 15, color: 'blue'},
{key: 17, text: '阿里通义千问', value: 17, color: 'orange'},
{key: 18, text: '讯飞星火认知', value: 18, color: 'blue'},

View File

@@ -181,9 +181,6 @@ const EditChannel = () => {
if (localInputs.type === 3 && localInputs.other === '') {
localInputs.other = '2024-03-01-preview';
}
if (localInputs.type === 18 && localInputs.other === '') {
localInputs.other = 'v2.1';
}
let res;
localInputs.models = localInputs.models.join(',');
localInputs.group = localInputs.groups.join(',');
@@ -362,6 +359,13 @@ const EditChannel = () => {
</Message>
)
}
{
inputs.type === 40 && (
<Message>
对于豆包而言需要手动去 <a target="_blank" href="https://console.volcengine.com/ark/region:ark+cn-beijing/endpoint">模型推理页面</a> `ep-20240608051426-tkxvl`
</Message>
)
}
<Form.Field>
<Form.Dropdown
label='模型'