Compare commits

...

13 Commits

Author SHA1 Message Date
JustSong
593e1926e9 feat: able to disable quota consumption recording (close #156) 2023-06-15 16:32:16 +08:00
quzard
e87ad1f402 chore: remove -0613 suffix for Azure (#163) 2023-06-14 16:33:03 +08:00
JustSong
07cccdc8c0 docs: update issue template 2023-06-14 15:13:05 +08:00
JustSong
f71f01662c docs: update issue template 2023-06-14 15:03:51 +08:00
JustSong
54d7a1c2e8 docs: update issue template 2023-06-14 15:02:36 +08:00
JustSong
f426f31bd7 docs: update issue template 2023-06-14 14:59:24 +08:00
JustSong
2930577cd6 docs: update issue template 2023-06-14 14:51:48 +08:00
JustSong
e09512177a docs: add issue templates 2023-06-14 14:48:31 +08:00
JustSong
d6dbaff3c2 fix: fix file not committed 2023-06-14 12:52:56 +08:00
JustSong
7f9577a386 feat: now one channel can belong to multiple groups (close #153) 2023-06-14 12:14:08 +08:00
JustSong
38668e7331 chore: update gpt3.5 completion ratio 2023-06-14 09:41:06 +08:00
JustSong
323f3d263a feat: add new released models 2023-06-14 09:12:14 +08:00
JustSong
0c34ed4c61 docs: update README 2023-06-13 17:45:01 +08:00
14 changed files with 161 additions and 23 deletions

23
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View File

@@ -0,0 +1,23 @@
---
name: 报告问题
about: 使用简练详细的语言描述你遇到的问题
title: ''
labels: bug
assignees: ''
---
**例行检查**
+ [ ] 我已确认目前没有类似 issue
+ [ ] 我已确认我已升级到最新版本
+ [ ] 我理解并愿意跟进此 issue协助测试和提供反馈
+ [ ] 我理解并认可上述内容,并理解项目维护者精力有限,不遵循规则的 issue 可能会被无视或直接关闭
**问题描述**
**复现步骤**
**预期结果**
**相关截图**
如果没有的话,请删除此节。

11
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View File

@@ -0,0 +1,11 @@
blank_issues_enabled: false
contact_links:
- name: 项目群聊
url: https://openai.justsong.cn/
about: 演示站首页有官方群聊信息
- name: 赞赏支持
url: https://iamazing.cn/page/reward
about: 请作者喝杯咖啡,以激励作者持续开发
- name: 付费部署或定制功能
url: https://openai.justsong.cn/
about: 加群后联系群主

View File

@@ -0,0 +1,18 @@
---
name: 功能请求
about: 使用简练详细的语言描述希望加入的新功能
title: ''
labels: enhancement
assignees: ''
---
**例行检查**
+ [ ] 我已确认目前没有类似 issue
+ [ ] 我已确认我已升级到最新版本
+ [ ] 我理解并愿意跟进此 issue协助测试和提供反馈
+ [ ] 我理解并认可上述内容,并理解项目维护者精力有限,不遵循规则的 issue 可能会被无视或直接关闭
**功能描述**
**应用场景**

View File

@@ -117,6 +117,8 @@ sudo certbot --nginx
sudo service nginx restart sudo service nginx restart
``` ```
初始账号用户名为 `root`,密码为 `123456`
### 手动部署 ### 手动部署
1. 从 [GitHub Releases](https://github.com/songquanpeng/one-api/releases/latest) 下载可执行文件或者从源码编译: 1. 从 [GitHub Releases](https://github.com/songquanpeng/one-api/releases/latest) 下载可执行文件或者从源码编译:
```shell ```shell

View File

@@ -35,6 +35,8 @@ var WeChatAuthEnabled = false
var TurnstileCheckEnabled = false var TurnstileCheckEnabled = false
var RegisterEnabled = true var RegisterEnabled = true
var LogConsumeEnabled = true
var SMTPServer = "" var SMTPServer = ""
var SMTPPort = 587 var SMTPPort = 587
var SMTPAccount = "" var SMTPAccount = ""

View File

@@ -2,16 +2,23 @@ package common
import "encoding/json" import "encoding/json"
// ModelRatio
// https://platform.openai.com/docs/models/model-endpoint-compatibility // https://platform.openai.com/docs/models/model-endpoint-compatibility
// https://openai.com/pricing // https://openai.com/pricing
// TODO: when a new api is enabled, check the pricing here // TODO: when a new api is enabled, check the pricing here
// 1 === $0.002 / 1K tokens
var ModelRatio = map[string]float64{ var ModelRatio = map[string]float64{
"gpt-4": 15, "gpt-4": 15,
"gpt-4-0314": 15, "gpt-4-0314": 15,
"gpt-4-0613": 15,
"gpt-4-32k": 30, "gpt-4-32k": 30,
"gpt-4-32k-0314": 30, "gpt-4-32k-0314": 30,
"gpt-3.5-turbo": 1, // $0.002 / 1K tokens "gpt-4-32k-0613": 30,
"gpt-3.5-turbo-0301": 1, "gpt-3.5-turbo": 0.75, // $0.0015 / 1K tokens
"gpt-3.5-turbo-0301": 0.75,
"gpt-3.5-turbo-0613": 0.75,
"gpt-3.5-turbo-16k": 1.5, // $0.003 / 1K tokens
"gpt-3.5-turbo-16k-0613": 1.5,
"text-ada-001": 0.2, "text-ada-001": 0.2,
"text-babbage-001": 0.25, "text-babbage-001": 0.25,
"text-curie-001": 1, "text-curie-001": 1,

View File

@@ -71,6 +71,33 @@ func init() {
Root: "gpt-3.5-turbo-0301", Root: "gpt-3.5-turbo-0301",
Parent: nil, Parent: nil,
}, },
{
Id: "gpt-3.5-turbo-0613",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-3.5-turbo-0613",
Parent: nil,
},
{
Id: "gpt-3.5-turbo-16k",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-3.5-turbo-16k",
Parent: nil,
},
{
Id: "gpt-3.5-turbo-16k-0613",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-3.5-turbo-16k-0613",
Parent: nil,
},
{ {
Id: "gpt-4", Id: "gpt-4",
Object: "model", Object: "model",
@@ -89,6 +116,15 @@ func init() {
Root: "gpt-4-0314", Root: "gpt-4-0314",
Parent: nil, Parent: nil,
}, },
{
Id: "gpt-4-0613",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-4-0613",
Parent: nil,
},
{ {
Id: "gpt-4-32k", Id: "gpt-4-32k",
Object: "model", Object: "model",
@@ -107,6 +143,15 @@ func init() {
Root: "gpt-4-32k-0314", Root: "gpt-4-32k-0314",
Parent: nil, Parent: nil,
}, },
{
Id: "gpt-4-32k-0613",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "gpt-4-32k-0613",
Parent: nil,
},
{ {
Id: "text-embedding-ada-002", Id: "text-embedding-ada-002",
Object: "model", Object: "model",

View File

@@ -177,6 +177,7 @@ func relayHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
// https://github.com/songquanpeng/one-api/issues/67 // https://github.com/songquanpeng/one-api/issues/67
model_ = strings.TrimSuffix(model_, "-0301") model_ = strings.TrimSuffix(model_, "-0301")
model_ = strings.TrimSuffix(model_, "-0314") model_ = strings.TrimSuffix(model_, "-0314")
model_ = strings.TrimSuffix(model_, "-0613")
fullRequestURL = fmt.Sprintf("%s/openai/deployments/%s/%s", baseURL, model_, task) fullRequestURL = fmt.Sprintf("%s/openai/deployments/%s/%s", baseURL, model_, task)
} else if channelType == common.ChannelTypePaLM { } else if channelType == common.ChannelTypePaLM {
err := relayPaLM(textRequest, c) err := relayPaLM(textRequest, c)
@@ -239,16 +240,15 @@ func relayHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
defer func() { defer func() {
if consumeQuota { if consumeQuota {
quota := 0 quota := 0
usingGPT4 := strings.HasPrefix(textRequest.Model, "gpt-4") completionRatio := 1.34 // default for gpt-3
completionRatio := 1 if strings.HasPrefix(textRequest.Model, "gpt-4") {
if usingGPT4 {
completionRatio = 2 completionRatio = 2
} }
if isStream { if isStream {
responseTokens := countTokenText(streamResponseText, textRequest.Model) responseTokens := countTokenText(streamResponseText, textRequest.Model)
quota = promptTokens + responseTokens*completionRatio quota = promptTokens + int(float64(responseTokens)*completionRatio)
} else { } else {
quota = textResponse.Usage.PromptTokens + textResponse.Usage.CompletionTokens*completionRatio quota = textResponse.Usage.PromptTokens + int(float64(textResponse.Usage.CompletionTokens)*completionRatio)
} }
quota = int(float64(quota) * ratio) quota = int(float64(quota) * ratio)
if ratio != 0 && quota <= 0 { if ratio != 0 && quota <= 0 {
@@ -260,7 +260,7 @@ func relayHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
common.SysError("Error consuming token remain quota: " + err.Error()) common.SysError("Error consuming token remain quota: " + err.Error())
} }
userId := c.GetInt("id") userId := c.GetInt("id")
model.RecordLog(userId, model.LogTypeConsume, fmt.Sprintf("使用模型 %s 消耗 %d 点额度(模型倍率 %.2f,分组倍率 %.2f", textRequest.Model, quota, modelRatio, groupRatio)) model.RecordLog(userId, model.LogTypeConsume, fmt.Sprintf("使用模型 %s 消耗 %d 点额度(模型倍率 %.2f,分组倍率 %.2f,补全倍率 %.2f", textRequest.Model, quota, modelRatio, groupRatio, completionRatio))
} }
}() }()

View File

@@ -30,15 +30,18 @@ func GetRandomSatisfiedChannel(group string, model string) (*Channel, error) {
func (channel *Channel) AddAbilities() error { func (channel *Channel) AddAbilities() error {
models_ := strings.Split(channel.Models, ",") models_ := strings.Split(channel.Models, ",")
groups_ := strings.Split(channel.Group, ",")
abilities := make([]Ability, 0, len(models_)) abilities := make([]Ability, 0, len(models_))
for _, model := range models_ { for _, model := range models_ {
ability := Ability{ for _, group := range groups_ {
Group: channel.Group, ability := Ability{
Model: model, Group: group,
ChannelId: channel.Id, Model: model,
Enabled: channel.Status == common.ChannelStatusEnabled, ChannelId: channel.Id,
Enabled: channel.Status == common.ChannelStatusEnabled,
}
abilities = append(abilities, ability)
} }
abilities = append(abilities, ability)
} }
return DB.Create(&abilities).Error return DB.Create(&abilities).Error
} }

View File

@@ -22,6 +22,9 @@ const (
) )
func RecordLog(userId int, logType int, content string) { func RecordLog(userId int, logType int, content string) {
if logType == LogTypeConsume && !common.LogConsumeEnabled {
return
}
log := &Log{ log := &Log{
UserId: userId, UserId: userId,
CreatedAt: common.GetTimestamp(), CreatedAt: common.GetTimestamp(),

View File

@@ -34,6 +34,7 @@ func InitOptionMap() {
common.OptionMap["TurnstileCheckEnabled"] = strconv.FormatBool(common.TurnstileCheckEnabled) common.OptionMap["TurnstileCheckEnabled"] = strconv.FormatBool(common.TurnstileCheckEnabled)
common.OptionMap["RegisterEnabled"] = strconv.FormatBool(common.RegisterEnabled) common.OptionMap["RegisterEnabled"] = strconv.FormatBool(common.RegisterEnabled)
common.OptionMap["AutomaticDisableChannelEnabled"] = strconv.FormatBool(common.AutomaticDisableChannelEnabled) common.OptionMap["AutomaticDisableChannelEnabled"] = strconv.FormatBool(common.AutomaticDisableChannelEnabled)
common.OptionMap["LogConsumeEnabled"] = strconv.FormatBool(common.LogConsumeEnabled)
common.OptionMap["ChannelDisableThreshold"] = strconv.FormatFloat(common.ChannelDisableThreshold, 'f', -1, 64) common.OptionMap["ChannelDisableThreshold"] = strconv.FormatFloat(common.ChannelDisableThreshold, 'f', -1, 64)
common.OptionMap["SMTPServer"] = "" common.OptionMap["SMTPServer"] = ""
common.OptionMap["SMTPFrom"] = "" common.OptionMap["SMTPFrom"] = ""
@@ -134,6 +135,8 @@ func updateOptionMap(key string, value string) (err error) {
common.RegisterEnabled = boolValue common.RegisterEnabled = boolValue
case "AutomaticDisableChannelEnabled": case "AutomaticDisableChannelEnabled":
common.AutomaticDisableChannelEnabled = boolValue common.AutomaticDisableChannelEnabled = boolValue
case "LogConsumeEnabled":
common.LogConsumeEnabled = boolValue
} }
} }
switch key { switch key {

View File

@@ -34,6 +34,7 @@ const SystemSetting = () => {
TopUpLink: '', TopUpLink: '',
AutomaticDisableChannelEnabled: '', AutomaticDisableChannelEnabled: '',
ChannelDisableThreshold: 0, ChannelDisableThreshold: 0,
LogConsumeEnabled: '',
}); });
const [originInputs, setOriginInputs] = useState({}); const [originInputs, setOriginInputs] = useState({});
let [loading, setLoading] = useState(false); let [loading, setLoading] = useState(false);
@@ -68,6 +69,7 @@ const SystemSetting = () => {
case 'TurnstileCheckEnabled': case 'TurnstileCheckEnabled':
case 'RegisterEnabled': case 'RegisterEnabled':
case 'AutomaticDisableChannelEnabled': case 'AutomaticDisableChannelEnabled':
case 'LogConsumeEnabled':
value = inputs[key] === 'true' ? 'false' : 'true'; value = inputs[key] === 'true' ? 'false' : 'true';
break; break;
default: default:
@@ -349,6 +351,12 @@ const SystemSetting = () => {
placeholder='为一个 JSON 文本,键为分组名称,值为倍率' placeholder='为一个 JSON 文本,键为分组名称,值为倍率'
/> />
</Form.Group> </Form.Group>
<Form.Checkbox
checked={inputs.LogConsumeEnabled === 'true'}
label='启用额度消费日志记录'
name='LogConsumeEnabled'
onChange={handleInputChange}
/>
<Form.Button onClick={submitOperationConfig}>保存运营设置</Form.Button> <Form.Button onClick={submitOperationConfig}>保存运营设置</Form.Button>
<Divider /> <Divider />
<Header as='h3'> <Header as='h3'>

View File

@@ -10,10 +10,17 @@ export function renderText(text, limit) {
export function renderGroup(group) { export function renderGroup(group) {
if (group === "") { if (group === "") {
return <Label>default</Label> return <Label>default</Label>
} else if (group === "vip" || group === "pro") {
return <Label color='yellow'>{group}</Label>
} else if (group === "svip" || group === "premium") {
return <Label color='red'>{group}</Label>
} }
return <Label>{group}</Label> let groups = group.split(",");
groups.sort();
return <>
{groups.map((group) => {
if (group === "vip" || group === "pro") {
return <Label color='yellow'>{group}</Label>
} else if (group === "svip" || group === "premium") {
return <Label color='red'>{group}</Label>
}
return <Label>{group}</Label>
})}
</>
} }

View File

@@ -15,8 +15,8 @@ const EditChannel = () => {
key: '', key: '',
base_url: '', base_url: '',
other: '', other: '',
group: 'default',
models: [], models: [],
groups: ['default']
}; };
const [batch, setBatch] = useState(false); const [batch, setBatch] = useState(false);
const [inputs, setInputs] = useState(originInputs); const [inputs, setInputs] = useState(originInputs);
@@ -37,6 +37,11 @@ const EditChannel = () => {
} else { } else {
data.models = data.models.split(",") data.models = data.models.split(",")
} }
if (data.group === "") {
data.groups = []
} else {
data.groups = data.group.split(",")
}
setInputs(data); setInputs(data);
} else { } else {
showError(message); showError(message);
@@ -94,6 +99,7 @@ const EditChannel = () => {
} }
let res; let res;
localInputs.models = localInputs.models.join(",") localInputs.models = localInputs.models.join(",")
localInputs.group = localInputs.groups.join(",")
if (isEdit) { if (isEdit) {
res = await API.put(`/api/channel/`, { ...localInputs, id: parseInt(channelId) }); res = await API.put(`/api/channel/`, { ...localInputs, id: parseInt(channelId) });
} else { } else {
@@ -185,14 +191,14 @@ const EditChannel = () => {
<Form.Dropdown <Form.Dropdown
label='分组' label='分组'
placeholder={'请选择分组'} placeholder={'请选择分组'}
name='group' name='groups'
fluid fluid
search multiple
selection selection
allowAdditions allowAdditions
additionLabel={'请在系统设置页面编辑分组倍率以添加新的分组:'} additionLabel={'请在系统设置页面编辑分组倍率以添加新的分组:'}
onChange={handleInputChange} onChange={handleInputChange}
value={inputs.group} value={inputs.groups}
autoComplete='new-password' autoComplete='new-password'
options={groupOptions} options={groupOptions}
/> />