Compare commits

..

12 Commits

Author SHA1 Message Date
Soulter
af48975a6b chore: v3.4.18 2025-02-03 16:14:27 +08:00
Soulter
6441b149ab fix: 修复主动概率回复关闭后仍然回复的问题 #317 2025-02-03 14:33:53 +08:00
Soulter
f8892881f8 fix: 尝试修复 gewechat 群聊收不到 at 的回复 #294 2025-02-03 14:28:14 +08:00
Soulter
228aec5401 perf: 移除了默认人格 2025-02-03 14:17:45 +08:00
Soulter
68ad48ff55 fix: 修复HTTP代理删除后不生效 #319 2025-02-03 14:11:50 +08:00
Soulter
541ba64032 fix: 调用Gemini API输出多余空行问题 #318 2025-02-03 13:27:56 +08:00
Soulter
2d870b798c feat: 添加硅基流动模版 2025-02-03 13:24:22 +08:00
Soulter
0f1fe1ab63 fix: 硅基流动 not a vlm 和 tool calling not supported 报错 #305 # 291
perf: 安装和更新插件后全量重启避免奇奇怪怪的bug
feat: 支持 /tool off_all 停用所有函数工具
2025-02-03 13:20:49 +08:00
Soulter
73cc86ddb1 perf: 回复时艾特发送者之后添加空格或换行 #312 2025-02-03 12:04:26 +08:00
Soulter
23128f4be2 perf: 主动回复不支持 qq_official 的 hint 2025-02-03 12:00:05 +08:00
Soulter
92200d0e82 fix: docker容器内时区不对 2025-02-03 01:15:09 +08:00
Soulter
d6e8655792 fix: 抱错时首先移除 tool 2025-02-02 23:17:59 +08:00
12 changed files with 121 additions and 46 deletions

View File

@@ -12,6 +12,8 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
RUN ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
RUN python -m pip install -r requirements.txt
EXPOSE 6185

View File

@@ -2,7 +2,7 @@
如需修改配置,请在 `data/cmd_config.json` 中修改或者在管理面板中可视化修改。
"""
VERSION = "3.4.17"
VERSION = "3.4.18"
DB_PATH = "data/data_v3.db"
# 默认配置
@@ -83,14 +83,7 @@ DEFAULT_CONFIG = {
"pip_install_arg": "",
"plugin_repo_mirror": "",
"knowledge_db": {},
"persona": [
{
"name": "default",
"prompt": "",
"begin_dialogs": [],
"mood_imitation_dialogs": [],
}
],
"persona": [],
}
@@ -387,6 +380,16 @@ CONFIG_METADATA_2 = {
"model": "glm-4-flash",
},
},
"硅基流动": {
"id": "siliconflow",
"type": "openai_chat_completion",
"enable": True,
"key": [],
"api_base": "https://api.siliconflow.cn/v1",
"model_config": {
"model": "deepseek-ai/DeepSeek-V3",
},
},
"llmtuner": {
"id": "llmtuner_default",
"type": "llm_tuner",
@@ -616,14 +619,14 @@ CONFIG_METADATA_2 = {
"description": "预设对话",
"type": "list",
"items": {"type": "string"},
"hint": "可选。在每个对话前会插入这些预设对话。对话需要成对(用户和助手),输入完一个角色之后按回车",
"hint": "可选。在每个对话前会插入这些预设对话。对话需要成对(用户和助手),输入完一个角色的内容之后按回车】。需要偶数个对话",
"obvious_hint": True,
},
"mood_imitation_dialogs": {
"description": "对话风格模仿",
"type": "list",
"items": {"type": "string"},
"hint": "旨在让模型尽可能模仿学习到所填写的对话的语气风格。格式和 `预设对话` 一。对话需要成对(用户和助手),输入完一个角色之后按回车",
"hint": "旨在让模型尽可能模仿学习到所填写的对话的语气风格。格式和 `预设对话` 一。对话需要成对(用户和助手),输入完一个角色的内容之后按回车】。需要偶数个对话",
"obvious_hint": True,
},
},
@@ -696,7 +699,7 @@ CONFIG_METADATA_2 = {
"description": "启用主动回复",
"type": "bool",
"obvious_hint": True,
"hint": "启用后,会根据触发概率主动回复群聊内的对话。",
"hint": "启用后,会根据触发概率主动回复群聊内的对话。QQ官方API(qq_official)不可用",
},
"method": {
"description": "回复方法",

View File

@@ -25,9 +25,8 @@ class AstrBotCoreLifecycle:
self.astrbot_config = astrbot_config
self.db = db
if self.astrbot_config['http_proxy']:
os.environ['https_proxy'] = self.astrbot_config['http_proxy']
os.environ['http_proxy'] = self.astrbot_config['http_proxy']
os.environ['https_proxy'] = self.astrbot_config['http_proxy']
os.environ['http_proxy'] = self.astrbot_config['http_proxy']
async def initialize(self):
logger.info("AstrBot v"+ VERSION)

View File

@@ -83,6 +83,8 @@ class LLMRequestSubStage(Stage):
# text completion
event.set_result(MessageEventResult().message(llm_response.completion_text)
.set_result_content_type(ResultContentType.LLM_RESULT))
elif llm_response.role == 'err':
event.set_result(MessageEventResult().message(f"AstrBot 请求失败。\n错误信息: {llm_response.completion_text}"))
elif llm_response.role == 'tool':
# function calling
function_calling_result = {}

View File

@@ -103,8 +103,12 @@ class ResultDecorateStage:
if url:
result.chain = [Image.fromURL(url)]
# at 回复
if self.reply_with_mention and event.get_message_type() != MessageType.FRIEND_MESSAGE:
result.chain.insert(0, At(qq=event.get_sender_id(), name=event.get_sender_name()))
if len(result.chain) > 1 and isinstance(result.chain[1], Plain):
result.chain[1].text = "\n" + result.chain[1].text
# 引用回复
if self.reply_with_quote:
result.chain.insert(0, Reply(id=event.message_obj.message_id))

View File

@@ -95,6 +95,8 @@ class SimpleGewechatClient():
if f'<atuserlist><![CDATA[,{abm.self_id}]]>' in msg_source \
or f'<atuserlist><![CDATA[{abm.self_id}]]>' in msg_source:
at_me = True
if '在群聊中@了你' in d.get('PushContent', ''):
at_me = True
else:
abm.type = MessageType.FRIEND_MESSAGE
user_id = from_user_name

View File

@@ -190,7 +190,8 @@ class ProviderGoogleGenAI(Provider):
llm_response.role = "tool"
llm_response.tools_call_args.append(candidate['functionCall']['args'])
llm_response.tools_call_name.append(candidate['functionCall']['name'])
llm_response.completion_text = llm_response.completion_text.strip()
return llm_response

View File

@@ -1,6 +1,6 @@
import base64
import json
import re
import os
from openai import AsyncOpenAI, NOT_GIVEN
from openai.types.chat.chat_completion import ChatCompletion
@@ -94,26 +94,11 @@ class ProviderOpenAIOfficial(Provider):
if tool_list:
payloads['tools'] = tool_list
completion = None
try:
completion = await self.client.chat.completions.create(
**payloads,
stream=False
)
except BaseException as e:
# 处理不支持 Function Calling 的模型
if 'does not support Function Calling' in str(e) \
or 'does not support tools' in str(e) \
or 'Function call is not supported' in str(e): # siliconcloud
del payloads['tools']
logger.debug(f"模型 {self.model_name} 不支持 tools已自动移除")
completion = await self.client.chat.completions.create(
**payloads,
stream=False
)
else:
raise e
completion = await self.client.chat.completions.create(
**payloads,
stream=False
)
assert isinstance(completion, ChatCompletion)
logger.debug(f"completion: {completion}")
@@ -168,32 +153,86 @@ class ProviderOpenAIOfficial(Provider):
"messages": context_query,
**self.provider_config.get("model_config", {})
}
llm_response = None
try:
llm_response = await self._query(payloads, func_tool)
if kwargs.get("persist", True):
await self.save_history(contexts, new_record, session_id, llm_response)
return llm_response
except Exception as e:
if "maximum context length" in str(e):
# 重试 10 次
retry_cnt = 10
while retry_cnt > 0:
logger.warning("上下文长度超过限制。尝试弹出最早的记录然后重试。")
try:
await self.pop_record(session_id)
llm_response = await self._query(payloads, func_tool)
if kwargs.get("persist", True):
await self.save_history(contexts, new_record, session_id, llm_response)
return llm_response
break
except Exception as e:
if "maximum context length" in str(e):
retry_cnt -= 1
else:
raise e
if retry_cnt == 0:
llm_response = LLMResponse("err", "err: 请尝试 /reset 清除会话记录。")
elif "The model is not a VLM" in str(e): # siliconcloud
# 尝试删除所有 image
print(context_query)
new_contexts = await self._remove_image_from_context(context_query)
print(new_contexts)
payloads['messages'] = new_contexts
llm_response = await self._query(payloads, func_tool)
elif 'does not support Function Calling' in str(e) \
or 'does not support tools' in str(e) \
or 'Function call is not supported' in str(e) \
or 'Tool calling is not supported' in str(e): # siliconcloud
logger.info(f"{self.get_model()} 不支持函数调用工具调用,已经自动去除")
if 'tools' in payloads:
del payloads['tools']
llm_response = await self._query(payloads, None)
else:
logger.error(f"发生了错误。Provider 配置如下: {self.provider_config}")
if 'tool' in str(e).lower() and 'support' in str(e).lower():
logger.error(f"疑似该模型不支持函数调用工具调用。请输入 /tool off_all")
if 'Connection error.' in str(e):
proxy = os.environ.get("http_proxy", None)
if proxy:
logger.error(f"可能为代理原因,请检查代理是否正常。当前代理: {proxy}")
raise e
if kwargs.get("persist", True) and llm_response:
await self.save_history(contexts, new_record, session_id, llm_response)
return llm_response
async def _remove_image_from_context(self, contexts: List):
'''
从上下文中删除所有带有 image 的记录
'''
new_contexts = []
flag = False
for context in contexts:
if flag:
flag = False # 删除 image 后下一条LLM 响应)也要删除
continue
if isinstance(context['content'], list):
flag = True
# continue
new_content = []
for item in context['content']:
if isinstance(item, dict) and 'image_url' in item:
continue
new_content.append(item)
if not new_content:
# 用户只发了图片
new_content = [{"type": "text", "text": "[图片]"}]
context['content'] = new_content
new_contexts.append(context)
return new_contexts
async def save_history(self, contexts: List, new_record: dict, session_id: str, llm_response: LLMResponse):
if llm_response.role == "assistant" and session_id:

View File

@@ -56,6 +56,7 @@ class PluginRoute(Route):
try:
logger.info(f"正在安装插件 {repo_url}")
await self.plugin_manager.install_plugin(repo_url)
self.core_lifecycle.restart()
logger.info(f"安装插件 {repo_url} 成功。")
return Response().ok(None, "安装成功。").__dict__
except Exception as e:
@@ -70,6 +71,7 @@ class PluginRoute(Route):
file_path = f"data/temp/{file.filename}"
await file.save(file_path)
await self.plugin_manager.install_plugin_from_file(file_path)
self.core_lifecycle.restart()
logger.info(f"安装插件 {file.filename} 成功")
return Response().ok(None, "安装成功。").__dict__
except Exception as e:
@@ -94,6 +96,7 @@ class PluginRoute(Route):
try:
logger.info(f"正在更新插件 {plugin_name}")
await self.plugin_manager.update_plugin(plugin_name)
self.core_lifecycle.restart()
logger.info(f"更新插件 {plugin_name} 成功。")
return Response().ok(None, "更新成功。").__dict__
except Exception as e:

12
changelogs/v3.4.18.md Normal file
View File

@@ -0,0 +1,12 @@
# What's Changed
- fix: 修复主动概率回复关闭后仍然回复的问题 #317
- fix: 尝试修复 gewechat 群聊收不到 at 的回复 #294
- perf: 移除了默认人格
- fix: 修复HTTP代理删除后不生效 #319
- fix: 调用Gemini API输出多余空行问题 #318
- feat: 添加硅基流动模版
- fix: 硅基流动 not a vlm 和 tool calling not supported 报错 #305 #291
- perf: 回复时艾特发送者之后添加空格或换行 #312
- fix: docker容器内时区不对导致 reminder 时间错误
- perf: siliconcloud 不支持 tool 的模型

View File

@@ -27,6 +27,7 @@ class LongTermMemory:
self.image_caption_prompt = self.config["image_caption_prompt"]
self.active_reply = self.config["active_reply"]
self.enable_active_reply = self.active_reply.get("enable", False)
self.ar_method = self.active_reply["method"]
self.ar_possibility = self.active_reply["possibility_reply"]
self.ar_prompt = self.active_reply.get("prompt", "")
@@ -51,7 +52,7 @@ class LongTermMemory:
return response.completion_text
async def need_active_reply(self, event: AstrMessageEvent) -> bool:
if not self.active_reply:
if not self.enable_active_reply:
return False
if event.get_message_type() != MessageType.GROUP_MESSAGE:
return False

View File

@@ -91,7 +91,7 @@ class Main(star.Star):
active = " (启用)" if tool.active else "(停用)"
msg += f"- {tool.name}: {tool.description} {active}\n"
msg += "\n使用 /tool on/off <工具名> 激活或者停用工具。"
msg += "\n使用 /tool on/off <工具名> 激活或者停用函数工具。/tool off_all 停用所有函数工具。"
event.set_result(MessageEventResult().message(msg).use_t2i(False))
@tool.command("on")
@@ -107,6 +107,13 @@ class Main(star.Star):
event.set_result(MessageEventResult().message(f"停用工具 {tool_name} 成功。"))
else:
event.set_result(MessageEventResult().message(f"停用工具 {tool_name} 失败,未找到此工具。"))
@tool.command("off_all")
async def tool_all_off(self, event: AstrMessageEvent):
tm = self.context.get_llm_tool_manager()
for tool in tm.func_list:
self.context.deactivate_llm_tool(tool.name)
event.set_result(MessageEventResult().message(f"停用所有工具成功。"))
@filter.command("plugin")
async def plugin(self, event: AstrMessageEvent, oper1: str = None, oper2: str = None):