Compare commits

...

73 Commits

Author SHA1 Message Date
Soulter
4e2154feb7 fix(ci): repository name must be lowercase 2025-11-20 23:46:34 +08:00
Soulter
604958898c chore: bump version to 4.6.0 2025-11-20 23:41:20 +08:00
Soulter
a093f5ad0a fix(dependencies): specify upper version limit for google-genai 2025-11-20 23:32:05 +08:00
Soulter
a7e9a7f30c fix(gemini): ensure extra_content is not empty before processing 2025-11-20 23:30:19 +08:00
Soulter
5d1e9de096 Merge pull request #3678 from AstrBotDevs/refactor/webchat-session
refactor: Implement WebChat session management and migration
2025-11-20 17:23:10 +08:00
Soulter
89da4eb747 Merge branch 'master' into refactor/webchat-session 2025-11-20 17:21:48 +08:00
Soulter
8899a1dee1 feat(chat): refactor chat component structure and add new features (#3701)
- Introduced `ConversationSidebar.vue` for improved conversation management and sidebar functionality.
- Enhanced `MessageList.vue` to handle loading states and improved message rendering.
- Created new composables: `useConversations`, `useMessages`, `useMediaHandling`, `useRecording` for better code organization and reusability.
- Added loading indicators and improved user experience during message processing.
- Ensured backward compatibility and maintained existing functionalities.
2025-11-20 17:19:45 +08:00
Soulter
384a687ec3 delete: remove useConversations composable 2025-11-20 17:15:47 +08:00
Soulter
70cfdd2f8b feat(chat): refactor chat component structure and add new features (#3701)
- Introduced `ConversationSidebar.vue` for improved conversation management and sidebar functionality.
- Enhanced `MessageList.vue` to handle loading states and improved message rendering.
- Created new composables: `useConversations`, `useMessages`, `useMediaHandling`, `useRecording` for better code organization and reusability.
- Added loading indicators and improved user experience during message processing.
- Ensured backward compatibility and maintained existing functionalities.
2025-11-20 17:15:04 +08:00
Soulter
bdbd2f009a delete: useConversations 2025-11-20 17:11:01 +08:00
Soulter
164e0d26e0 feat(chat): refactor chat component structure and add new features (#3701)
- Introduced `ConversationSidebar.vue` for improved conversation management and sidebar functionality.
- Enhanced `MessageList.vue` to handle loading states and improved message rendering.
- Created new composables: `useConversations`, `useMessages`, `useMediaHandling`, `useRecording` for better code organization and reusability.
- Added loading indicators and improved user experience during message processing.
- Ensured backward compatibility and maintained existing functionalities.
2025-11-20 17:10:36 +08:00
Soulter
cb087b5ff9 refactor: update timestamp handling in session management and chat components 2025-11-20 17:02:01 +08:00
Soulter
1d3928d145 refactor(sqlite): remove auto-generation of session_id in insert method 2025-11-20 16:33:57 +08:00
Soulter
6dc3d161e7 feat(chat): refactor chat component structure and add new features (#3701)
- Introduced `ConversationSidebar.vue` for improved conversation management and sidebar functionality.
- Enhanced `MessageList.vue` to handle loading states and improved message rendering.
- Created new composables: `useConversations`, `useMessages`, `useMediaHandling`, `useRecording` for better code organization and reusability.
- Added loading indicators and improved user experience during message processing.
- Ensured backward compatibility and maintained existing functionalities.
2025-11-20 16:30:05 +08:00
Soulter
e9805ba205 fix: anyio.ClosedResourceError when calling mcp tools (#3700)
* fix: anyio.ClosedResourceError when calling mcp tools

added reconnect mechanism

fixes: 3676

* fix(mcp_client): implement thread-safe reconnection using asyncio.Lock
2025-11-20 16:24:02 +08:00
Dt8333
d5280dcd88 fix(core.platform): 修复启用多个企业微信智能机器人适配器时消息混乱的问题 (#3693)
* fix(core.platform): 修复启用多个企业微信智能机器人适配器时消息混乱的问题

移除了全局的消息队列,改为每个适配器处理自己的队列。修改相关方法适应该更改。

#3673

* chore: apply suggestions from code review

Co-authored-by: sourcery-ai[bot] <58596630+sourcery-ai[bot]@users.noreply.github.com>

---------

Co-authored-by: Soulter <37870767+Soulter@users.noreply.github.com>
Co-authored-by: sourcery-ai[bot] <58596630+sourcery-ai[bot]@users.noreply.github.com>
2025-11-20 16:24:02 +08:00
Dt8333
67a9663eff fix(dashboard.i18n): complete the missing i18n keys(#3699)
#3679
2025-11-20 16:24:02 +08:00
Soulter
77dd89b8eb feat: add supports for gemini-3 series thought signature (#3698)
* feat: add supports for gemini-3 series thought signature

* feat: refactor tools_call_extra_content to use a dictionary for better structure
2025-11-20 16:24:02 +08:00
Soulter
8e511bf14b fix: build docker ci failed 2025-11-20 16:24:02 +08:00
Soulter
164a4226ea feat(chat): refactor chat component structure and add new features (#3701)
- Introduced `ConversationSidebar.vue` for improved conversation management and sidebar functionality.
- Enhanced `MessageList.vue` to handle loading states and improved message rendering.
- Created new composables: `useConversations`, `useMessages`, `useMediaHandling`, `useRecording` for better code organization and reusability.
- Added loading indicators and improved user experience during message processing.
- Ensured backward compatibility and maintained existing functionalities.
2025-11-20 16:07:09 +08:00
Soulter
6d6fefc435 fix: anyio.ClosedResourceError when calling mcp tools (#3700)
* fix: anyio.ClosedResourceError when calling mcp tools

added reconnect mechanism

fixes: 3676

* fix(mcp_client): implement thread-safe reconnection using asyncio.Lock
2025-11-20 16:01:22 +08:00
Soulter
aa59532287 refactor: implement migration for WebChat sessions by creating PlatformSession records from platform_message_history 2025-11-20 15:58:27 +08:00
Dt8333
8488c9aeab fix(core.platform): 修复启用多个企业微信智能机器人适配器时消息混乱的问题 (#3693)
* fix(core.platform): 修复启用多个企业微信智能机器人适配器时消息混乱的问题

移除了全局的消息队列,改为每个适配器处理自己的队列。修改相关方法适应该更改。

#3673

* chore: apply suggestions from code review

Co-authored-by: sourcery-ai[bot] <58596630+sourcery-ai[bot]@users.noreply.github.com>

---------

Co-authored-by: Soulter <37870767+Soulter@users.noreply.github.com>
Co-authored-by: sourcery-ai[bot] <58596630+sourcery-ai[bot]@users.noreply.github.com>
2025-11-19 21:44:38 +08:00
Dt8333
676f9fd4ff fix(dashboard.i18n): complete the missing i18n keys(#3699)
#3679
2025-11-19 21:36:34 +08:00
Soulter
1935ce4700 refactor: update session handling by replacing conversation_id with session_id in chat routes and components 2025-11-19 19:54:29 +08:00
Soulter
e760956353 refactor: enhance PlatformSession migration by adding display_name from Conversations and improve session item styling 2025-11-19 19:41:57 +08:00
Soulter
be3e5f3f8b refactor: update message history deletion logic to remove newer records based on offset 2025-11-19 19:41:25 +08:00
Soulter
cdf617feac refactor: optimize WebChat session migration by batch inserting records 2025-11-19 19:16:15 +08:00
Soulter
afb56cf707 feat: add supports for gemini-3 series thought signature (#3698)
* feat: add supports for gemini-3 series thought signature

* feat: refactor tools_call_extra_content to use a dictionary for better structure
2025-11-19 18:54:56 +08:00
Soulter
cd2556ab94 fix: build docker ci failed 2025-11-19 15:40:41 +08:00
Soulter
cf4a5d9ea4 refactor: change to platform session 2025-11-18 22:37:55 +08:00
Soulter
0747099cac fix: restore migration check for version 4.7 2025-11-18 22:07:43 +08:00
Soulter
323ec29b02 refactor: Implement WebChat session management and migration from version 4.6 to 4.7
- Added WebChatSession model for managing user sessions.
- Introduced methods for creating, retrieving, updating, and deleting WebChat sessions in the database.
- Updated core lifecycle to include migration from version 4.6 to 4.7, creating WebChat sessions from existing platform message history.
- Refactored chat routes to support new session-based architecture, replacing conversation-related endpoints with session endpoints.
- Updated frontend components to handle sessions instead of conversations, including session creation and management.
2025-11-18 22:04:26 +08:00
magisk317
ae81d70685 ci(docker-build): build nightly image everyday (#3120)
* ci: build test image on master pushes

* ci: split workflows for master test and release builds

* test ci

* test ci

* Update docker-image.yml

* test ci

Updated README to enhance deployment instructions.

* Make GHCR publishing optional in Docker workflow

* chore: Update DockerHub password secret in workflow

* Update .github/workflows/docker-image.yml

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* chore: rename job to build nightly image in workflow

* feat: schedule the nightly build at 0:00 am everyday, if have new commits

* fix: update build-nightly-image job to trigger only on schedule events

* Update fetch-depth and enable fetch-tag in workflows

---------

Co-authored-by: Soulter <37870767+Soulter@users.noreply.github.com>
Co-authored-by: LIghtJUNction <lightjunction.me@gmail.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: Soulter <905617992@qq.com>
2025-11-18 10:47:58 +08:00
RC-CHN
270c89c12f feat: Add URL document parser for knowledge base (#3622)
* feat: 添加从 URL 上传文档的功能,支持进度回调和错误处理

* feat: 添加从 URL 上传文档的前端

* chore: 添加 URL 上传功能的警告提示,确保用户配置正确

* feat: 添加内容清洗功能,支持从 URL 上传文档时的清洗设置和服务提供商选择

* feat: 更新内容清洗系统提示,增强信息提取规则;添加 URL 上传功能的测试版标识

* style: format code

* perf: 优化上传设置,增强 URL 上传时的禁用逻辑和清洗提供商验证

* refactor:使用自带chunking模块

* refactor: 提取prompt到单独文件

* feat: 添加 Tavily API Key 配置对话框,增强网页搜索功能的配置体验

* fix: update URL hint and warning messages for clarity in knowledge base upload settings

* fix: 修复设置tavily_key的热重载问题

---------

Co-authored-by: Soulter <905617992@qq.com>
2025-11-17 19:05:14 +08:00
Soulter
c7a58252fe feat: supports knowledge base agentic search (#3667)
* feat: supports knowledge base agentic search

* fix: correct formatting of system prompt in knowledge base results
2025-11-17 17:29:18 +08:00
Soulter
47ad8c86e5 docs: update translations of README 2025-11-17 12:50:01 +08:00
Soulter
937e879e5e chore: revise the issue template
Updated the bug report template to include English translations for all fields and improved clarity.
2025-11-17 11:35:24 +08:00
Soulter
1ecf26eead chore: revice pr template
Removed unnecessary comments and streamlined the pull request template.
2025-11-17 11:27:48 +08:00
Soulter
adbb84530a chore: bump version to 4.5.8 2025-11-17 09:58:02 +08:00
piexian
6cf169f4f2 fix: ImageURLPart typo (#3665)
* 修复新版本更新对不上格式的问题

entities.py生成的格式:{"type": "image_url", "image_url": {"url": "data:image/jpeg;base64,..."}}
ImageURLPart期望的格式:{"type": "image_url", "image_url": "data:image/jpeg;base64,..."}

* Revert "修复新版本更新对不上格式的问题"

This reverts commit 28b4791391.

* fix(core.agent): 修复ImageURLPart的声明,修复pydantic校验失败的问题。

---------

Co-authored-by: piexian <piexian@users.noreply.github.com>
Co-authored-by: Dt8333 <lb0016@foxmail.com>
2025-11-17 09:52:31 +08:00
Soulter
5ab9ea12c0 chore: bump verstion to 4.5.7 2025-11-16 14:01:25 +08:00
Soulter
fd9cb703db refactor: update ToolSet initialization to use Pydantic Field and clean up deprecated methods in Context 2025-11-16 12:13:11 +08:00
Soulter
388c1ab16d fix: ensure parameter properties are correctly handled in spec_to_func 2025-11-16 11:50:58 +08:00
Soulter
f867c2a271 feat: enhance parameter type handling in LLM tool registration with JSON schema support (#3655)
* feat: enhance parameter type handling in LLM tool registration with JSON schema support

* refactor: remove debug print statement from FunctionToolManager
2025-11-16 00:55:40 +08:00
Soulter
605bb2cb90 refactor: disable debug logging for chunk delta in OpenAI provider 2025-11-15 22:29:06 +08:00
Soulter
5ea15dde5a feat: enhance LLM handsoff tool execution with system prompt and run hooks 2025-11-15 22:26:13 +08:00
Soulter
3ca545c4c7 Merge pull request #3636 from AstrBotDevs/feat/context-llm-capability
refactor: better invoke the LLM / Agent capabilities
2025-11-15 21:41:42 +08:00
Soulter
e200835074 refactor: remove unused Message import and context_model initialization in LLMRequestSubStage 2025-11-15 21:36:54 +08:00
Soulter
3a90348353 Merge branch 'master' into feat/context-llm-capability 2025-11-15 21:34:54 +08:00
Soulter
5a11d8f0ee refactor: LLM response handling with reasoning content (#3632)
* refactor: LLM response handling with reasoning content

- Added a `show_reasoning` parameter to `run_agent` to control the display of reasoning content.
- Updated `LLMResponse` to include a `reasoning_content` field for storing reasoning text.
- Modified `WebChatMessageEvent` to handle and send reasoning content in streaming responses.
- Implemented reasoning extraction in various provider sources (e.g., OpenAI, Gemini).
- Updated the chat interface to display reasoning content in a collapsible format.
- Removed the deprecated `thinking_filter` package and its associated logic.
- Updated localization files to include new reasoning-related strings.

* feat: add Groq chat completion provider and associated configurations

* Update astrbot/core/provider/sources/gemini_source.py

Co-authored-by: sourcery-ai[bot] <58596630+sourcery-ai[bot]@users.noreply.github.com>

---------

Co-authored-by: sourcery-ai[bot] <58596630+sourcery-ai[bot]@users.noreply.github.com>
2025-11-15 21:31:03 +08:00
Soulter
824af5eeea fix: Provider.meta() error (#3647)
fixes: #3643
2025-11-15 21:30:05 +08:00
Dt8333
08ec787491 fix(core.platform): make DingTalk user-ID compliant with UMO (#3634) 2025-11-15 21:30:05 +08:00
Soulter
b062e83d54 refactor: remove redundant session lock management from message sending logic in RespondStage (#3645)
fixes: #3644

Co-authored-by: Dt8333 <lb0016@foxmail.com>
2025-11-15 21:30:05 +08:00
Soulter
17422ba9c3 feat: introduce messages field in agent RunContext 2025-11-15 21:15:20 +08:00
Soulter
6849af2bad refactor: LLM response handling with reasoning content (#3632)
* refactor: LLM response handling with reasoning content

- Added a `show_reasoning` parameter to `run_agent` to control the display of reasoning content.
- Updated `LLMResponse` to include a `reasoning_content` field for storing reasoning text.
- Modified `WebChatMessageEvent` to handle and send reasoning content in streaming responses.
- Implemented reasoning extraction in various provider sources (e.g., OpenAI, Gemini).
- Updated the chat interface to display reasoning content in a collapsible format.
- Removed the deprecated `thinking_filter` package and its associated logic.
- Updated localization files to include new reasoning-related strings.

* feat: add Groq chat completion provider and associated configurations

* Update astrbot/core/provider/sources/gemini_source.py

Co-authored-by: sourcery-ai[bot] <58596630+sourcery-ai[bot]@users.noreply.github.com>

---------

Co-authored-by: sourcery-ai[bot] <58596630+sourcery-ai[bot]@users.noreply.github.com>
2025-11-15 18:59:17 +08:00
Soulter
09c3da64f9 fix: Provider.meta() error (#3647)
fixes: #3643
2025-11-15 18:01:51 +08:00
Dt8333
2c8470e8ac fix(core.platform): make DingTalk user-ID compliant with UMO (#3634) 2025-11-15 17:31:03 +08:00
Soulter
c4ea3db73d refactor: remove redundant session lock management from message sending logic in RespondStage (#3645)
fixes: #3644

Co-authored-by: Dt8333 <lb0016@foxmail.com>
2025-11-15 16:39:49 +08:00
Soulter
89e79863f6 fix: ensure image_urls and system_prompt default to empty values in ProviderRequest 2025-11-14 22:45:55 +08:00
Soulter
d19945009f refactor: decople the agent impl part and introduce some helper context method to call llm 2025-11-14 19:17:24 +08:00
Soulter
c77256ee0e feat: add id field to ProviderMetaData and update provider manager to set provider ID 2025-11-14 12:35:30 +08:00
Soulter
7d823af627 refactor: update provider metadata handling and enhance ProviderMetaData structure 2025-11-13 19:53:23 +08:00
Soulter
3957861878 refactor: streamline llm processing logic (#3607)
* refactor: streamline llm processing logic

* perf: merge-nested-ifs

Co-authored-by: sourcery-ai[bot] <58596630+sourcery-ai[bot]@users.noreply.github.com>

* fix: ruff format

* refactor: remove unnecessary debug logs in FunctionToolExecutor and LLMRequestSubStage

---------

Co-authored-by: sourcery-ai[bot] <58596630+sourcery-ai[bot]@users.noreply.github.com>
2025-11-13 10:08:57 +08:00
Dt8333
6ac43c600e perf: improve streaming fallback strategy for streaming-unsupported platform (#3547)
* feat: 修改tool_loop_agent_runner,新增stream_to_general属性。

Co-authored-by: aider (openai/gemini-2.5-flash-preview) <aider@aider.chat>

* refactor: 优化text_chat_stream,直接yield完整信息

Co-authored-by: aider (openai/gemini-2.5-flash-preview) <aider@aider.chat>

* feat(core):  添加streaming_fallback选项,允许进行流式请求和非流式输出

添加了streaming_fallback配置,默认为false。在PlatformMetadata中新增字段用于标识是否支持真流式输出。在LLMRequest中添加判断是否启用Fallback。

#3431 #2793 #3014

* refactor(core): 将stream_to_general移出toolLoopAgentRunner

* refactor(core.platform): 修改metadata中的属性名称

* fix: update streaming provider settings descriptions and add conditions

* fix: update streaming configuration to use unsupported_streaming_strategy and adjust related logic

* fix: remove support_streaming_message flag from WecomAIBotAdapter registration

* fix: update hint for non-streaming platform handling in configuration

* fix(core.pipeline): Update astrbot/core/pipeline/process_stage/method/llm_request.py

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* fix(core.pipeline): Update astrbot/core/pipeline/process_stage/method/llm_request.py

---------

Co-authored-by: aider (openai/gemini-2.5-flash-preview) <aider@aider.chat>
Co-authored-by: Soulter <37870767+Soulter@users.noreply.github.com>
Co-authored-by: Soulter <905617992@qq.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-11-12 18:01:20 +08:00
RC-CHN
27af9ebb6b feat: changelog display improvement
* feat: 添加旧版本changelog的modal

* style: 调整发布说明对话框的样式,移除背景颜色
2025-11-12 14:54:03 +08:00
Soulter
b360c8446e feat: add default model selection chip in provider model selector 2025-11-10 13:04:28 +08:00
Soulter
6d00717655 feat: add streaming support with toggle in chat interface and adjust layout for mobile 2025-11-09 21:57:30 +08:00
Soulter
bb5f06498e perf: refine login page 2025-11-09 20:57:45 +08:00
Dt8333
aca5743ab6 feat: 为部分适配器添加缺失的 send_streaming 方法 (#3545)
为Wechatpadpro和discord添加缺失的方法。
2025-11-09 16:00:24 +08:00
Soulter
6903032f7e fix: improve knowledge base chip display with truncation and styling (#3582)
fixes: #3546
2025-11-09 15:30:41 +08:00
nazo
1ce0ff87bd feat: supports to add custom headers for openai providers (#3581)
* feat: OPENAI系支持自定义添加请求头

* chore: add custom headers and extra body to config for zhipu

---------

Co-authored-by: Soulter <37870767+Soulter@users.noreply.github.com>
2025-11-09 15:12:52 +08:00
Soulter
e39d6bae0b fix: update JSON submission link in plugin publish template 2025-11-09 15:06:40 +08:00
117 changed files with 5872 additions and 3352 deletions

View File

@@ -16,7 +16,7 @@ body:
请将插件信息填写到下方的 JSON 代码块中。其中 `tags`(插件标签)和 `social_link`(社交链接)选填。
不熟悉 JSON ?可以从 [此](https://plugins.astrbot.app/submit) 生成 JSON ,生成后记得复制粘贴过来.
不熟悉 JSON ?可以从 [此](https://plugins.astrbot.app) 右下角提交。
- type: textarea
id: plugin-info

View File

@@ -1,46 +1,44 @@
name: '🐛 报告 Bug'
name: '🐛 Report Bug / 报告 Bug'
title: '[Bug]'
description: 提交报告帮助我们改进。
description: Submit bug report to help us improve. / 提交报告帮助我们改进。
labels: [ 'bug' ]
body:
- type: markdown
attributes:
value: |
感谢您抽出时间报告问题!请准确解释您的问题。如果可能,请提供一个可复现的片段(这有助于更快地解决问题)。请注意,不详细 / 没有日志的 issue 会被直接关闭,谢谢理解。
Thank you for taking the time to report this issue! Please describe your problem accurately. If possible, please provide a reproducible snippet (this will help resolve the issue more quickly). Please note that issues that are not detailed or have no logs will be closed immediately. Thank you for your understanding. / 感谢您抽出时间报告问题!请准确解释您的问题。如果可能,请提供一个可复现的片段(这有助于更快地解决问题)。请注意,不详细 / 没有日志的 issue 会被直接关闭,谢谢理解。
- type: textarea
attributes:
label: 发生了什么
description: 描述你遇到的异常
label: What happened / 发生了什么
description: Description
placeholder: >
一个清晰且具体的描述这个异常是什么。请注意,不详细 / 没有日志的 issue 会被直接关闭,谢谢理解。
Please provide a clear and specific description of what this exception is. Please note that issues that are not detailed or have no logs will be closed immediately. Thank you for your understanding. / 一个清晰且具体的描述这个异常是什么。请注意,不详细 / 没有日志的 issue 会被直接关闭,谢谢理解。
validations:
required: true
- type: textarea
attributes:
label: 如何复现?
label: Reproduce / 如何复现?
description: >
复现该问题的步骤
The steps to reproduce the issue. / 复现该问题的步骤
placeholder: >
: 1. 打开 '...'
Example: 1. Open '...'
validations:
required: true
- type: textarea
attributes:
label: AstrBot 版本、部署方式(如 Windows Docker Desktop 部署)、使用的提供商、使用的消息平台适配器
description: >
请提供您的 AstrBot 版本和部署方式。
label: AstrBot version, deployment method (e.g., Windows Docker Desktop deployment), provider used, and messaging platform used. / AstrBot 版本、部署方式(如 Windows Docker Desktop 部署)、使用的提供商、使用的消息平台适配器
placeholder: >
如: 3.1.8 Docker, 3.1.7 Windows启动器
Example: 4.5.7 Docker, 3.1.7 Windows Launcher
validations:
required: true
- type: dropdown
attributes:
label: 操作系统
label: OS
description: |
你在哪个操作系统上遇到了这个问题?
On which operating system did you encounter this problem? / 你在哪个操作系统上遇到了这个问题?
multiple: false
options:
- 'Windows'
@@ -53,30 +51,30 @@ body:
- type: textarea
attributes:
label: 报错日志
label: Logs / 报错日志
description: >
如报错日志、截图等。请提供完整的 Debug 级别的日志,不要介意它很长!请注意,不详细 / 没有日志的 issue 会被直接关闭,谢谢理解。
Please provide complete Debug-level logs, such as error logs and screenshots. Don't worry if they're long! Please note that issues with insufficient details or no logs will be closed immediately. Thank you for your understanding. / 如报错日志、截图等。请提供完整的 Debug 级别的日志,不要介意它很长!请注意,不详细 / 没有日志的 issue 会被直接关闭,谢谢理解。
placeholder: >
请提供完整的报错日志或截图。
Please provide a complete error log or screenshot. / 请提供完整的报错日志或截图。
validations:
required: true
- type: checkboxes
attributes:
label: 你愿意提交 PR 吗?
label: Are you willing to submit a PR? / 你愿意提交 PR 吗?
description: >
这不是必需的,但我们很乐意在贡献过程中为您提供指导特别是如果你已经很好地理解了如何实现修复。
This is not required, but we would be happy to provide guidance during the contribution process, especially if you already have a good understanding of how to implement the fix. / 这不是必需的,但我们很乐意在贡献过程中为您提供指导特别是如果你已经很好地理解了如何实现修复。
options:
- label: 是的,我愿意提交 PR!
- label: Yes!
- type: checkboxes
attributes:
label: Code of Conduct
options:
- label: >
我已阅读并同意遵守该项目的 [行为准则](https://docs.github.com/zh/site-policy/github-terms/github-community-code-of-conduct)。
I have read and agree to abide by the project's [Code of Conduct](https://docs.github.com/zh/site-policy/github-terms/github-community-code-of-conduct)。
required: true
- type: markdown
attributes:
value: "感谢您填写我们的表单!"
value: "Thank you for filling out our form! / 感谢您填写我们的表单!"

View File

@@ -1,44 +1,25 @@
<!-- 如果有的话,请指定此 PR 旨在解决的 ISSUE 编号。 -->
<!-- If applicable, please specify the ISSUE number this PR aims to resolve. -->
fixes #XYZ
---
### Motivation / 动机
<!--请描述此项更改的动机:它解决了什么问题?(例如:修复了 XX 错误,添加了 YY 功能)-->
<!--Please describe the motivation for this change: What problem does it solve? (e.g., Fixes XX bug, adds YY feature)-->
<!--Please describe the motivation for this change: What problem does it solve? (e.g., Fixes XX issue, adds YY feature)-->
<!--请描述此项更改的动机:它解决了什么问题?(例如:修复了 XX issue添加了 YY 功能)-->
### Modifications / 改动点
<!--请总结你的改动:哪些核心文件被修改了?实现了什么功能?-->
<!--Please summarize your changes: What core files were modified? What functionality was implemented?-->
### Verification Steps / 验证步骤
<!--请为审查者 (Reviewer) 提供清晰、可复现的验证步骤例如1. 导航到... 2. 点击...)。-->
<!--Please provide clear and reproducible verification steps for the Reviewer (e.g., 1. Navigate to... 2. Click...).-->
- [x] This is NOT a breaking change. / 这不是一个破坏性变更。
<!-- If your changes is a breaking change, please uncheck the checkbox above -->
### Screenshots or Test Results / 运行截图或测试结果
<!--请粘贴截图、GIF 或测试日志,作为执行“验证步骤”的证据,证明此改动有效。-->
<!--Please paste screenshots, GIFs, or test logs here as evidence of executing the "Verification Steps" to prove this change is effective.-->
### Compatibility & Breaking Changes / 兼容性与破坏性变更
<!--请说明此变更的兼容性:哪些是破坏性变更?哪些地方做了向后兼容处理?是否提供了数据迁移方法?-->
<!--Please explain the compatibility of this change: What are the breaking changes? What backward-compatible measures were taken? Are data migration paths provided?-->
- [ ] 这是一个破坏性变更 (Breaking Change)。/ This is a breaking change.
- [ ] 这不是一个破坏性变更。/ This is NOT a breaking change.
<!--请粘贴截图、GIF 或测试日志,作为执行“验证步骤”的证据,证明此改动有效。-->
---
### Checklist / 检查清单
<!--如果分支被合并,您的代码将服务于数万名用户!在提交前,请核查一下几点内容。-->
<!--If merged, your code will serve tens of thousands of users! Please double-check the following items before submitting.-->
<!--如果分支被合并,您的代码将服务于数万名用户!在提交前,请核查一下几点内容。-->
- [ ] 😊 如果 PR 中有新加入的功能,已经通过 Issue / 邮件等方式和作者讨论过。/ If there are new features added in the PR, I have discussed it with the authors through issues/emails, etc.
- [ ] 👀 我的更改经过了良好的测试,**并已在上方提供了“验证步骤”和“运行截图”**。/ My changes have been well-tested, **and "Verification Steps" and "Screenshots" have been provided above**.

View File

@@ -3,18 +3,125 @@ name: Docker Image CI/CD
on:
push:
tags:
- 'v*'
- "v*"
schedule:
# Run at 00:00 UTC every day
- cron: "0 0 * * *"
workflow_dispatch:
jobs:
publish-docker:
build-nightly-image:
if: github.event_name == 'schedule'
runs-on: ubuntu-latest
env:
DOCKER_HUB_USERNAME: ${{ secrets.DOCKER_HUB_USERNAME }}
GHCR_OWNER: soulter
HAS_GHCR_TOKEN: ${{ secrets.GHCR_GITHUB_TOKEN != '' }}
steps:
- name: Pull The Codes
- name: Checkout
uses: actions/checkout@v5
with:
fetch-depth: 0 # Must be 0 so we can fetch tags
fetch-depth: 1
fetch-tag: true
- name: Check for new commits today
if: github.event_name == 'schedule'
id: check-commits
run: |
# Get commits from the last 24 hours
commits=$(git log --since="24 hours ago" --oneline)
if [ -z "$commits" ]; then
echo "No commits in the last 24 hours, skipping build"
echo "has_commits=false" >> $GITHUB_OUTPUT
else
echo "Found commits in the last 24 hours:"
echo "$commits"
echo "has_commits=true" >> $GITHUB_OUTPUT
fi
- name: Exit if no commits
if: github.event_name == 'schedule' && steps.check-commits.outputs.has_commits == 'false'
run: exit 0
- name: Build Dashboard
run: |
cd dashboard
npm install
npm run build
mkdir -p dist/assets
echo $(git rev-parse HEAD) > dist/assets/version
cd ..
mkdir -p data
cp -r dashboard/dist data/
- name: Determine test image tags
id: test-meta
run: |
short_sha=$(echo "${GITHUB_SHA}" | cut -c1-12)
build_date=$(date +%Y%m%d)
echo "short_sha=$short_sha" >> $GITHUB_OUTPUT
echo "build_date=$build_date" >> $GITHUB_OUTPUT
- name: Set QEMU
uses: docker/setup-qemu-action@v3
- name: Set Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
- name: Login to GitHub Container Registry
if: env.HAS_GHCR_TOKEN == 'true'
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ env.GHCR_OWNER }}
password: ${{ secrets.GHCR_GITHUB_TOKEN }}
- name: Build nightly image tags list
id: test-tags
run: |
TAGS="${{ env.DOCKER_HUB_USERNAME }}/astrbot:nightly-latest
${{ env.DOCKER_HUB_USERNAME }}/astrbot:nightly-${{ steps.test-meta.outputs.build_date }}-${{ steps.test-meta.outputs.short_sha }}"
if [ "${{ env.HAS_GHCR_TOKEN }}" = "true" ]; then
TAGS="$TAGS
ghcr.io/${{ env.GHCR_OWNER }}/astrbot:nightly-latest
ghcr.io/${{ env.GHCR_OWNER }}/astrbot:nightly-${{ steps.test-meta.outputs.build_date }}-${{ steps.test-meta.outputs.short_sha }}"
fi
echo "tags<<EOF" >> $GITHUB_OUTPUT
echo "$TAGS" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
- name: Build and Push Nightly Image
uses: docker/build-push-action@v6
with:
context: .
platforms: linux/amd64,linux/arm64
push: true
tags: ${{ steps.test-tags.outputs.tags }}
- name: Post build notifications
run: echo "Test Docker image has been built and pushed successfully"
build-release-image:
if: github.event_name == 'workflow_dispatch' || (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v'))
runs-on: ubuntu-latest
env:
DOCKER_HUB_USERNAME: ${{ secrets.DOCKER_HUB_USERNAME }}
GHCR_OWNER: soulter
HAS_GHCR_TOKEN: ${{ secrets.GHCR_GITHUB_TOKEN != '' }}
steps:
- name: Checkout
uses: actions/checkout@v5
with:
fetch-depth: 1
fetch-tag: true
- name: Get latest tag (only on manual trigger)
id: get-latest-tag
@@ -27,21 +134,22 @@ jobs:
if: github.event_name == 'workflow_dispatch'
run: git checkout ${{ steps.get-latest-tag.outputs.latest_tag }}
- name: Check if version is pre-release
id: check-prerelease
- name: Compute release metadata
id: release-meta
run: |
if [ "${{ github.event_name }}" == "workflow_dispatch" ]; then
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
version="${{ steps.get-latest-tag.outputs.latest_tag }}"
else
version="${{ github.ref_name }}"
version="${GITHUB_REF#refs/tags/}"
fi
if [[ "$version" == *"beta"* ]] || [[ "$version" == *"alpha"* ]]; then
echo "is_prerelease=true" >> $GITHUB_OUTPUT
echo "Version $version is a pre-release, will not push latest tag"
echo "Version $version marked as pre-release"
else
echo "is_prerelease=false" >> $GITHUB_OUTPUT
echo "Version $version is a stable release, will push latest tag"
echo "Version $version marked as stable"
fi
echo "version=$version" >> $GITHUB_OUTPUT
- name: Build Dashboard
run: |
@@ -67,23 +175,24 @@ jobs:
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
- name: Login to GitHub Container Registry
if: env.HAS_GHCR_TOKEN == 'true'
uses: docker/login-action@v3
with:
registry: ghcr.io
username: Soulter
username: ${{ env.GHCR_OWNER }}
password: ${{ secrets.GHCR_GITHUB_TOKEN }}
- name: Build and Push Docker to DockerHub and Github GHCR
- name: Build and Push Release Image
uses: docker/build-push-action@v6
with:
context: .
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ steps.check-prerelease.outputs.is_prerelease == 'false' && format('{0}/astrbot:latest', secrets.DOCKER_HUB_USERNAME) || '' }}
${{ secrets.DOCKER_HUB_USERNAME }}/astrbot:${{ github.event_name == 'workflow_dispatch' && steps.get-latest-tag.outputs.latest_tag || github.ref_name }}
${{ steps.check-prerelease.outputs.is_prerelease == 'false' && 'ghcr.io/soulter/astrbot:latest' || '' }}
ghcr.io/soulter/astrbot:${{ github.event_name == 'workflow_dispatch' && steps.get-latest-tag.outputs.latest_tag || github.ref_name }}
${{ steps.release-meta.outputs.is_prerelease == 'false' && format('{0}/astrbot:latest', env.DOCKER_HUB_USERNAME) || '' }}
${{ steps.release-meta.outputs.is_prerelease == 'false' && env.HAS_GHCR_TOKEN == 'true' && format('ghcr.io/{0}/astrbot:latest', env.GHCR_OWNER) || '' }}
${{ format('{0}/astrbot:{1}', env.DOCKER_HUB_USERNAME, steps.release-meta.outputs.version) }}
${{ env.HAS_GHCR_TOKEN == 'true' && format('ghcr.io/{0}/astrbot:{1}', env.GHCR_OWNER, steps.release-meta.outputs.version) || '' }}
- name: Post build notifications
run: echo "Docker image has been built and pushed successfully"
run: echo "Release Docker image has been built and pushed successfully"

View File

@@ -8,7 +8,7 @@
<div>
<a href="https://trendshift.io/repositories/12875" target="_blank"><img src="https://trendshift.io/api/badge/repositories/12875" alt="Soulter%2FAstrBot | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
<a href="https://hellogithub.com/repository/AstrBotDevs/AstrBot" target="_blank"><img src="https://api.hellogithub.com/v1/widgets/recommend.svg?rid=d127d50cd5e54c5382328acc3bb25483&claim_uid=ZO9by7qCXgSd6Lp&t=1" alt="FeaturedHelloGitHub" style="width: 250px; height: 54px;" width="250" height="54" /></a>
<a href="https://hellogithub.com/repository/AstrBotDevs/AstrBot" target="_blank"><img src="https://api.hellogithub.com/v1/widgets/recommend.svg?rid=d127d50cd5e54c5382328acc3bb25483&claim_uid=ZO9by7qCXgSd6Lp&t=2" alt="FeaturedHelloGitHub" style="width: 250px; height: 54px;" width="250" height="54" /></a>
</div>
<br>
@@ -42,7 +42,7 @@ AstrBot 是一个开源的一站式 Agent 聊天机器人平台及开发框架
4. **插件扩展**。深度优化的插件机制,支持[开发插件](https://astrbot.app/dev/plugin.html)扩展功能,社区插件生态丰富。
5. **WebUI**。可视化配置和管理机器人,功能齐全。
## 部署方式
## 部署方式
#### Docker 部署(推荐 🥳)

View File

@@ -1,182 +1,233 @@
<p align="center">
![6e1279651f16d7fdf4727558b72bbaf1](https://github.com/user-attachments/assets/ead4c551-fc3c-48f7-a6f7-afbfdb820512)
![AstrBot-Logo-Simplified](https://github.com/user-attachments/assets/ffd99b6b-3272-4682-beaa-6fe74250f7d9)
</p>
<div align="center">
_✨ Easy-to-use Multi-platform LLM Chatbot & Development Framework ✨_
<br>
<div>
<a href="https://trendshift.io/repositories/12875" target="_blank"><img src="https://trendshift.io/api/badge/repositories/12875" alt="Soulter%2FAstrBot | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
[![GitHub release (latest by date)](https://img.shields.io/github/v/release/AstrBotDevs/AstrBot)](https://github.com/AstrBotDevs/AstrBot/releases/latest)
<img src="https://img.shields.io/badge/python-3.10+-blue.svg" alt="python">
<a href="https://hub.docker.com/r/soulter/astrbot"><img alt="Docker pull" src="https://img.shields.io/docker/pulls/soulter/astrbot"/></a>
<a href="https://qm.qq.com/cgi-bin/qm/qr?k=wtbaNx7EioxeaqS9z7RQWVXPIxg2zYr7&jump_from=webapi&authKey=vlqnv/AV2DbJEvGIcxdlNSpfxVy+8vVqijgreRdnVKOaydpc+YSw4MctmEbr0k5"><img alt="Static Badge" src="https://img.shields.io/badge/QQ群-630166526-purple"></a>
[![wakatime](https://wakatime.com/badge/user/915e5316-99c6-4563-a483-ef186cf000c9/project/018e705a-a1a7-409a-a849-3013485e6c8e.svg)](https://wakatime.com/badge/user/915e5316-99c6-4563-a483-ef186cf000c9/project/018e705a-a1a7-409a-a849-3013485e6c8e)
![Dynamic JSON Badge](https://img.shields.io/badge/dynamic/json?url=https%3A%2F%2Fapi.soulter.top%2Fastrbot%2Fstats&query=v&label=7%E6%97%A5%E6%B6%88%E6%81%AF%E4%B8%8A%E8%A1%8C%E9%87%8F&cacheSeconds=3600)
[![codecov](https://codecov.io/gh/AstrBotDevs/AstrBot/graph/badge.svg?token=FF3P5967B8)](https://codecov.io/gh/AstrBotDevs/AstrBot)
<a href="https://astrbot.app/">Documentation</a>
<a href="https://github.com/AstrBotDevs/AstrBot/issues">Issue Tracking</a>
<a href="https://hellogithub.com/repository/AstrBotDevs/AstrBot" target="_blank"><img src="https://api.hellogithub.com/v1/widgets/recommend.svg?rid=d127d50cd5e54c5382328acc3bb25483&claim_uid=ZO9by7qCXgSd6Lp&t=2" alt="FeaturedHelloGitHub" style="width: 250px; height: 54px;" width="250" height="54" /></a>
</div>
AstrBot is a loosely coupled, asynchronous chatbot and development framework that supports multi-platform deployment, featuring an easy-to-use plugin system and comprehensive Large Language Model (LLM) integration capabilities.
<br>
## ✨ Key Features
<div>
<img src="https://img.shields.io/github/v/release/AstrBotDevs/AstrBot?style=for-the-badge&color=76bad9" href="https://github.com/AstrBotDevs/AstrBot/releases/latest">
<img src="https://img.shields.io/badge/python-3.10+-blue.svg?style=for-the-badge&color=76bad9" alt="python">
<a href="https://hub.docker.com/r/soulter/astrbot"><img alt="Docker pull" src="https://img.shields.io/docker/pulls/soulter/astrbot.svg?style=for-the-badge&color=76bad9"/></a>
<a href="https://qm.qq.com/cgi-bin/qm/qr?k=wtbaNx7EioxeaqS9z7RQWVXPIxg2zYr7&jump_from=webapi&authKey=vlqnv/AV2DbJEvGIcxdlNSpfxVy+8vVqijgreRdnVKOaydpc+YSw4MctmEbr0k5"><img alt="QQ_community" src="https://img.shields.io/badge/QQ群-775869627-purple?style=for-the-badge&color=76bad9"></a>
<a href="https://t.me/+hAsD2Ebl5as3NmY1"><img alt="Telegram_community" src="https://img.shields.io/badge/Telegram-AstrBot-purple?style=for-the-badge&color=76bad9"></a>
<img src="https://img.shields.io/badge/dynamic/json?url=https%3A%2F%2Fapi.soulter.top%2Fastrbot%2Fplugin-num&query=%24.result&suffix=%E4%B8%AA&style=for-the-badge&label=%E6%8F%92%E4%BB%B6%E5%B8%82%E5%9C%BA&cacheSeconds=3600">
</div>
1. **LLM Conversations** - Supports various LLMs including OpenAI API, Google Gemini, Llama, Deepseek, ChatGLM, etc. Enables local model deployment via Ollama/LLMTuner. Features multi-turn dialogues, personality contexts, multimodal capabilities (image understanding), and speech-to-text (Whisper).
2. **Multi-platform Integration** - Supports QQ (OneBot), QQ Channels, WeChat (Gewechat), Feishu, and Telegram. Planned support for DingTalk, Discord, WhatsApp, and Xiaomi Smart Speakers. Includes rate limiting, whitelisting, keyword filtering, and Baidu content moderation.
3. **Agent Capabilities** - Native support for code execution, natural language TODO lists, web search. Integrates with [Dify Platform](https://dify.ai/) for easy access to Dify assistants/knowledge bases/workflows.
4. **Plugin System** - Optimized plugin mechanism with minimal development effort. Supports multiple installed plugins.
5. **Web Dashboard** - Visual configuration management, plugin controls, logging, and WebChat interface for direct LLM interaction.
6. **High Stability & Modularity** - Event bus and pipeline architecture ensures high modularization and loose coupling.
<br>
> [!TIP]
> Dashboard Demo: [https://demo.astrbot.app/](https://demo.astrbot.app/)
> Username: `astrbot`, Password: `astrbot` (LLM not configured for chat page)
<a href="https://github.com/AstrBotDevs/AstrBot/blob/master/README.md">中文</a>
<a href="https://github.com/AstrBotDevs/AstrBot/blob/master/README_ja.md">日本語</a>
<a href="https://astrbot.app/">Documentation</a>
<a href="https://blog.astrbot.app/">Blog</a>
<a href="https://astrbot.featurebase.app/roadmap">Roadmap</a>
<a href="https://github.com/AstrBotDevs/AstrBot/issues">Issue Tracker</a>
</div>
## ✨ Deployment
AstrBot is an open-source all-in-one Agent chatbot platform and development framework.
#### Docker Deployment
## Key Features
See docs: [Deploy with Docker](https://astrbot.app/deploy/astrbot/docker.html#docker-deployment)
1. **LLM Conversations**. Supports integration with various large language model services. Features include multimodal capabilities, tool calling, MCP, native knowledge base, character personas, and more.
2. **Multi-Platform Support**. Integrates with QQ, WeChat Work, WeChat Official Accounts, Feishu, Telegram, DingTalk, Discord, KOOK, and other platforms. Supports rate limiting, whitelisting, and Baidu content moderation.
3. **Agent Capabilities**. Fully optimized agentic features including multi-turn tool calling, built-in sandboxed code executor, web search, and more.
4. **Plugin Extensions**. Deeply optimized plugin mechanism supporting [plugin development](https://astrbot.app/dev/plugin.html) to extend functionality, with a rich community plugin ecosystem.
5. **Web UI**. Visual configuration and management of your bot with comprehensive features.
#### Windows Installer
## Deployment Methods
Requires Python (>3.10). See docs: [Windows Installer Guide](https://astrbot.app/deploy/astrbot/windows.html)
#### Docker Deployment (Recommended 🥳)
#### Replit Deployment
We recommend deploying AstrBot using Docker or Docker Compose.
Please refer to the official documentation: [Deploy AstrBot with Docker](https://astrbot.app/deploy/astrbot/docker.html#%E4%BD%BF%E7%94%A8-docker-%E9%83%A8%E7%BD%B2-astrbot).
#### BT-Panel Deployment
AstrBot has partnered with BT-Panel and is now available in their marketplace.
Please refer to the official documentation: [BT-Panel Deployment](https://astrbot.app/deploy/astrbot/btpanel.html).
#### 1Panel Deployment
AstrBot has been officially listed on the 1Panel marketplace.
Please refer to the official documentation: [1Panel Deployment](https://astrbot.app/deploy/astrbot/1panel.html).
#### Deploy on RainYun
AstrBot has been officially listed on RainYun's cloud application platform with one-click deployment.
[![Deploy on RainYun](https://rainyun-apps.cn-nb1.rains3.com/materials/deploy-on-rainyun-en.svg)](https://app.rainyun.com/apps/rca/store/5994?ref=NjU1ODg0)
#### Deploy on Replit
Community-contributed deployment method.
[![Run on Repl.it](https://repl.it/badge/github/AstrBotDevs/AstrBot)](https://repl.it/github/AstrBotDevs/AstrBot)
#### Windows One-Click Installer
Please refer to the official documentation: [Deploy AstrBot with Windows One-Click Installer](https://astrbot.app/deploy/astrbot/windows.html).
#### CasaOS Deployment
Community-contributed method.
See docs: [CasaOS Deployment](https://astrbot.app/deploy/astrbot/casaos.html)
Community-contributed deployment method.
Please refer to the official documentation: [CasaOS Deployment](https://astrbot.app/deploy/astrbot/casaos.html).
#### Manual Deployment
See docs: [Source Code Deployment](https://astrbot.app/deploy/astrbot/cli.html)
First, install uv:
## ⚡ Platform Support
```bash
pip install uv
```
| Platform | Status | Details | Message Types |
| -------------------------------------------------------------- | ------ | ------------------- | ------------------- |
| QQ (Official Bot) | ✔ | Private/Group chats | Text, Images |
| QQ (OneBot) | ✔ | Private/Group chats | Text, Images, Voice |
| WeChat (Personal) | ✔ | Private/Group chats | Text, Images, Voice |
| [Telegram](https://github.com/AstrBotDevs/AstrBot_plugin_telegram) | ✔ | Private/Group chats | Text, Images |
| [WeChat Work](https://github.com/AstrBotDevs/AstrBot_plugin_wecom) | ✔ | Private chats | Text, Images, Voice |
| Feishu | ✔ | Group chats | Text, Images |
| WeChat Open Platform | 🚧 | Planned | - |
| Discord | 🚧 | Planned | - |
| WhatsApp | 🚧 | Planned | - |
| Xiaomi Speakers | 🚧 | Planned | - |
Install AstrBot via Git Clone:
## Provider Support Status
```bash
git clone https://github.com/AstrBotDevs/AstrBot && cd AstrBot
uv run main.py
```
| Name | Support | Type | Notes |
|---------------------------|---------|------------------------|-----------------------------------------------------------------------|
| OpenAI API | ✔ | Text Generation | Supports all OpenAI API-compatible services including DeepSeek, Google Gemini, GLM, Moonshot, Alibaba Cloud Bailian, Silicon Flow, xAI, etc. |
| Claude API | ✔ | Text Generation | |
| Google Gemini API | ✔ | Text Generation | |
| Dify | ✔ | LLMOps | |
| DashScope (Alibaba Cloud) | ✔ | LLMOps | |
| Ollama | ✔ | Model Loader | Local deployment for open-source LLMs (DeepSeek, Llama, etc.) |
| LM Studio | ✔ | Model Loader | Local deployment for open-source LLMs (DeepSeek, Llama, etc.) |
| LLMTuner | ✔ | Model Loader | Local loading of fine-tuned models (e.g. LoRA) |
| OneAPI | ✔ | LLM Distribution | |
| Whisper | ✔ | Speech-to-Text | Supports API and local deployment |
| SenseVoice | ✔ | Speech-to-Text | Local deployment |
| OpenAI TTS API | ✔ | Text-to-Speech | |
| Fishaudio | ✔ | Text-to-Speech | Project involving GPT-Sovits author |
Or refer to the official documentation: [Deploy AstrBot from Source](https://astrbot.app/deploy/astrbot/cli.html).
# 🦌 Roadmap
## 🌍 Community
> [!TIP]
> Suggestions welcome via Issues <3
### QQ Groups
- [ ] Ensure feature parity across all platform adapters
- [ ] Optimize plugin APIs
- [ ] Add default TTS services (e.g., GPT-Sovits)
- [ ] Enhance chat features with persistent memory
- [ ] i18n Planning
- Group 1: 322154837
- Group 3: 630166526
- Group 5: 822130018
- Group 6: 753075035
- Developer Group: 975206796
## ❤️ Contributions
### Telegram Group
All Issues/PRs welcome! Simply submit your changes to this project :)
<a href="https://t.me/+hAsD2Ebl5as3NmY1"><img alt="Telegram_community" src="https://img.shields.io/badge/Telegram-AstrBot-purple?style=for-the-badge&color=76bad9"></a>
For major features, please discuss via Issues first.
### Discord Server
## 🌟 Support
<a href="https://discord.gg/hAVk6tgV36"><img alt="Discord_community" src="https://img.shields.io/badge/Discord-AstrBot-purple?style=for-the-badge&color=76bad9"></a>
- Star this project!
- Support via [Afdian](https://afdian.com/a/soulter)
- WeChat support: [QR Code](https://drive.soulter.top/f/pYfA/d903f4fa49a496fda3f16d2be9e023b5.png)
## Supported Messaging Platforms
## ✨ Demos
**Officially Maintained**
> [!NOTE]
> Code executor file I/O currently tested with Napcat(QQ)/Lagrange(QQ)
- QQ (Official Platform & OneBot)
- Telegram
- WeChat Work Application & WeChat Work Intelligent Bot
- WeChat Customer Service & WeChat Official Accounts
- Feishu (Lark)
- DingTalk
- Slack
- Discord
- Satori
- Misskey
- WhatsApp (Coming Soon)
- LINE (Coming Soon)
<div align='center'>
**Community Maintained**
<img src="https://github.com/user-attachments/assets/4ee688d9-467d-45c8-99d6-368f9a8a92d8" width="600">
- [KOOK](https://github.com/wuyan1003/astrbot_plugin_kook_adapter)
- [VoceChat](https://github.com/HikariFroya/astrbot_plugin_vocechat)
- [Bilibili Direct Messages](https://github.com/Hina-Chat/astrbot_plugin_bilibili_adapter)
- [wxauto](https://github.com/luosheng520qaq/wxauto-repost-onebotv11)
_✨ Docker-based Sandboxed Code Executor (Beta) ✨_
## Supported Model Services
<img src="https://github.com/user-attachments/assets/0378f407-6079-4f64-ae4c-e97ab20611d2" height=500>
**LLM Services**
_✨ Multimodal Input, Web Search, Text-to-Image ✨_
- OpenAI and Compatible Services
- Anthropic
- Google Gemini
- Moonshot AI
- Zhipu AI
- DeepSeek
- Ollama (Self-hosted)
- LM Studio (Self-hosted)
- [CompShare](https://www.compshare.cn/?ytag=GPU_YY-gh_astrbot&referral_code=FV7DcGowN4hB5UuXKgpE74)
- [302.AI](https://share.302.ai/rr1M3l)
- [TokenPony](https://www.tokenpony.cn/3YPyf)
- [SiliconFlow](https://docs.siliconflow.cn/cn/usecases/use-siliconcloud-in-astrbot)
- [PPIO Cloud](https://ppio.com/user/register?invited_by=AIOONE)
- ModelScope
- OneAPI
<img src="https://github.com/user-attachments/assets/8ec12797-e70f-460a-959e-48eca39ca2bb" height=100>
**LLMOps Platforms**
_✨ Natural Language TODO Lists ✨_
- Dify
- Alibaba Cloud Bailian Applications
- Coze
<img src="https://github.com/user-attachments/assets/e137a9e1-340a-4bf2-bb2b-771132780735" height=150>
<img src="https://github.com/user-attachments/assets/480f5e82-cf6a-4955-a869-0d73137aa6e1" height=150>
**Speech-to-Text Services**
_✨ Plugin System Showcase ✨_
- OpenAI Whisper
- SenseVoice
<img src="https://github.com/user-attachments/assets/592a8630-14c7-4e06-b496-9c0386e4f36c" width=600>
**Text-to-Speech Services**
_✨ Web Dashboard ✨_
- OpenAI TTS
- Gemini TTS
- GPT-Sovits-Inference
- GPT-Sovits
- FishAudio
- Edge TTS
- Alibaba Cloud Bailian TTS
- Azure TTS
- Minimax TTS
- Volcano Engine TTS
![webchat](https://drive.soulter.top/f/vlsA/ezgif-5-fb044b2542.gif)
## ❤️ Contributing
_✨ Built-in Web Chat Interface ✨_
Issues and Pull Requests are always welcome! Feel free to submit your changes to this project :)
</div>
### How to Contribute
You can contribute by reviewing issues or helping with pull request reviews. Any issues or PRs are welcome to encourage community participation. Of course, these are just suggestions—you can contribute in any way you like. For adding new features, please discuss through an Issue first.
### Development Environment
AstrBot uses `ruff` for code formatting and linting.
```bash
git clone https://github.com/AstrBotDevs/AstrBot
pip install pre-commit
pre-commit install
```
## ❤️ Special Thanks
Special thanks to all Contributors and plugin developers for their contributions to AstrBot ❤️
<a href="https://github.com/AstrBotDevs/AstrBot/graphs/contributors">
<img src="https://contrib.rocks/image?repo=AstrBotDevs/AstrBot" />
</a>
Additionally, the birth of this project would not have been possible without the help of the following open-source projects:
- [NapNeko/NapCatQQ](https://github.com/NapNeko/NapCatQQ) - The amazing cat framework
## ⭐ Star History
> [!TIP]
> If this project helps you, please give it a star <3
> [!TIP]
> If this project has helped you in your life or work, or if you're interested in its future development, please give the project a Star. It's the driving force behind maintaining this open-source project <3
<div align="center">
[![Star History Chart](https://api.star-history.com/svg?repos=AstrBotDevs/AstrBot&type=Date)](https://star-history.com/#AstrBotDevs/AstrBot&Date)
[![Star History Chart](https://api.star-history.com/svg?repos=astrbotdevs/astrbot&type=Date)](https://star-history.com/#astrbotdevs/astrbot&Date)
</div>
## Disclaimer
1. Licensed under `AGPL-v3`.
2. WeChat integration uses [Gewechat](https://github.com/Devo919/Gewechat). Use at your own risk with non-critical accounts.
3. Users must comply with local laws and regulations.
<!-- ## ✨ ATRI [Beta]
Available as plugin: [astrbot_plugin_atri](https://github.com/AstrBotDevs/AstrBot_plugin_atri)
1. Qwen1.5-7B-Chat Lora model fine-tuned with ATRI character data
2. Long-term memory
3. Meme understanding & responses
4. TTS integration
-->
</details>
_私は、高性能ですから!_

View File

@@ -1,167 +1,233 @@
<p align="center">
![6e1279651f16d7fdf4727558b72bbaf1](https://github.com/user-attachments/assets/ead4c551-fc3c-48f7-a6f7-afbfdb820512)
![AstrBot-Logo-Simplified](https://github.com/user-attachments/assets/ffd99b6b-3272-4682-beaa-6fe74250f7d9)
</p>
<div align="center">
_✨ 簡単に使えるマルチプラットフォーム LLM チャットボットおよび開発フレームワーク ✨_
<br>
<div>
<a href="https://trendshift.io/repositories/12875" target="_blank"><img src="https://trendshift.io/api/badge/repositories/12875" alt="Soulter%2FAstrBot | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
[![GitHub release (latest by date)](https://img.shields.io/github/v/release/AstrBotDevs/AstrBot)](https://github.com/AstrBotDevs/AstrBot/releases/latest)
<img src="https://img.shields.io/badge/python-3.10+-blue.svg" alt="python">
<a href="https://hub.docker.com/r/soulter/astrbot"><img alt="Docker pull" src="https://img.shields.io/docker/pulls/soulter/astrbot.svg"/></a>
<img alt="Static Badge" src="https://img.shields.io/badge/QQ群-630166526-purple">
[![wakatime](https://wakatime.com/badge/user/915e5316-99c6-4563-a483-ef186cf000c9/project/018e705a-a1a7-409a-a849-3013485e6c8e.svg)](https://wakatime.com/badge/user/915e5316-99c6-4563-a483-ef186cf000c9/project/018e705a-a1a7-409a-a849-3013485e6c8e)
![Dynamic JSON Badge](https://img.shields.io/badge/dynamic/json?url=https%3A%2F%2Fapi.soulter.top%2Fastrbot%2Fstats&query=v&label=7%E6%97%A5%E6%B6%88%E6%81%AF%E4%B8%8A%E8%A1%8C%E9%87%8F&cacheSeconds=3600)
[![codecov](https://codecov.io/gh/AstrBotDevs/AstrBot/graph/badge.svg?token=FF3P5967B8)](https://codecov.io/gh/AstrBotDevs/AstrBot)
<a href="https://astrbot.app/">ドキュメントを見る</a>
<a href="https://github.com/AstrBotDevs/AstrBot/issues">問題を報告する</a>
<a href="https://hellogithub.com/repository/AstrBotDevs/AstrBot" target="_blank"><img src="https://api.hellogithub.com/v1/widgets/recommend.svg?rid=d127d50cd5e54c5382328acc3bb25483&claim_uid=ZO9by7qCXgSd6Lp&t=2" alt="FeaturedHelloGitHub" style="width: 250px; height: 54px;" width="250" height="54" /></a>
</div>
AstrBot は、疎結合、非同期、複数のメッセージプラットフォームに対応したデプロイ、使いやすいプラグインシステム、および包括的な大規模言語モデルLLM接続機能を備えたチャットボットおよび開発フレームワークです。
<br>
## ✨ 主な機能
<div>
<img src="https://img.shields.io/github/v/release/AstrBotDevs/AstrBot?style=for-the-badge&color=76bad9" href="https://github.com/AstrBotDevs/AstrBot/releases/latest">
<img src="https://img.shields.io/badge/python-3.10+-blue.svg?style=for-the-badge&color=76bad9" alt="python">
<a href="https://hub.docker.com/r/soulter/astrbot"><img alt="Docker pull" src="https://img.shields.io/docker/pulls/soulter/astrbot.svg?style=for-the-badge&color=76bad9"/></a>
<a href="https://qm.qq.com/cgi-bin/qm/qr?k=wtbaNx7EioxeaqS9z7RQWVXPIxg2zYr7&jump_from=webapi&authKey=vlqnv/AV2DbJEvGIcxdlNSpfxVy+8vVqijgreRdnVKOaydpc+YSw4MctmEbr0k5"><img alt="QQ_community" src="https://img.shields.io/badge/QQ群-775869627-purple?style=for-the-badge&color=76bad9"></a>
<a href="https://t.me/+hAsD2Ebl5as3NmY1"><img alt="Telegram_community" src="https://img.shields.io/badge/Telegram-AstrBot-purple?style=for-the-badge&color=76bad9"></a>
<img src="https://img.shields.io/badge/dynamic/json?url=https%3A%2F%2Fapi.soulter.top%2Fastrbot%2Fplugin-num&query=%24.result&suffix=%E4%B8%AA&style=for-the-badge&label=%E6%8F%92%E4%BB%B6%E5%B8%82%E5%9C%BA&cacheSeconds=3600">
</div>
1. **大規模言語モデルの対話**。OpenAI API、Google Gemini、Llama、Deepseek、ChatGLM など、さまざまな大規模言語モデルをサポートし、Ollama、LLMTuner を介してローカルにデプロイされた大規模モデルをサポートします。多輪対話、人格シナリオ、多モーダル機能を備え、画像理解、音声からテキストへの変換Whisperをサポートします。
2. **複数のメッセージプラットフォームの接続**。QQOneBot、QQ チャンネル、Feishu、Telegram への接続をサポートします。今後、DingTalk、Discord、WhatsApp、Xiaoai 音響をサポートする予定です。レート制限、ホワイトリスト、キーワードフィルタリング、Baidu コンテンツ監査をサポートします。
3. **エージェント**。一部のエージェント機能をネイティブにサポートし、コードエグゼキューター、自然言語タスク、ウェブ検索などを提供します。[Dify プラットフォーム](https://dify.ai/)と連携し、Dify スマートアシスタント、ナレッジベース、Dify ワークフローを簡単に接続できます。
4. **プラグインの拡張**。深く最適化されたプラグインメカニズムを備え、[プラグインの開発](https://astrbot.app/dev/plugin.html)をサポートし、機能を拡張できます。複数のプラグインのインストールをサポートします。
5. **ビジュアル管理パネル**。設定の視覚的な変更、プラグイン管理、ログの表示などをサポートし、設定の難易度を低減します。WebChat を統合し、パネル上で大規模モデルと対話できます。
6. **高い安定性と高いモジュール性**。イベントバスとパイプラインに基づくアーキテクチャ設計により、高度にモジュール化され、低結合です。
<br>
> [!TIP]
> 管理パネルのオンラインデモを体験する: [https://demo.astrbot.app/](https://demo.astrbot.app/)
>
> ユーザー名: `astrbot`, パスワード: `astrbot`。LLM が設定されていないため、チャットページで大規模モデルを使用することはできません。(デモのログインパスワードを変更しないでください 😭)
<a href="https://github.com/AstrBotDevs/AstrBot/blob/master/README.md">中文</a>
<a href="https://github.com/AstrBotDevs/AstrBot/blob/master/README_en.md">English</a>
<a href="https://astrbot.app/">ドキュメント</a>
<a href="https://blog.astrbot.app/">Blog</a>
<a href="https://astrbot.featurebase.app/roadmap">ロードマップ</a>
<a href="https://github.com/AstrBotDevs/AstrBot/issues">Issue</a>
</div>
## ✨ 使用方法
AstrBot は、オープンソースのオールインワン Agent チャットボットプラットフォーム及び開発フレームワークです。
#### Docker デプロイ
## 主な機能
公式ドキュメント [Docker を使用して AstrBot をデプロイする](https://astrbot.app/deploy/astrbot/docker.html#%E4%BD%BF%E7%94%A8-docker-%E9%83%A8%E7%BD%B2-astrbot) を参照してください
1. **大規模言語モデル対話**。多様な大規模言語モデルサービスとの統合をサポート。マルチモーダル、ツール呼び出し、MCP、ネイティブナレッジベース、キャラクター設定などの機能を搭載
2. **マルチメッセージプラットフォームサポート**。QQ、WeChat Work、WeChat公式アカウント、Feishu、Telegram、DingTalk、Discord、KOOK などのプラットフォームと統合可能。レート制限、ホワイトリスト、Baidu コンテンツ審査をサポート。
3. **Agent**。完全に最適化された Agentic 機能。マルチターンツール呼び出し、内蔵サンドボックスコード実行環境、Web 検索などの機能をサポート。
4. **プラグイン拡張**。深く最適化されたプラグインメカニズムで、[プラグイン開発](https://astrbot.app/dev/plugin.html)による機能拡張をサポート。豊富なコミュニティプラグインエコシステム。
5. **WebUI**。ビジュアル設定とボット管理、充実した機能。
#### Windows ワンクリックインストーラーのデプロイ
## デプロイ方法
コンピュータに Python>3.10)がインストールされている必要があります。公式ドキュメント [Windows ワンクリックインストーラーを使用して AstrBot をデプロイする](https://astrbot.app/deploy/astrbot/windows.html) を参照してください。
#### Docker デプロイ(推奨 🥳)
#### Replit デプロイ
Docker / Docker Compose を使用した AstrBot デプロイを推奨します。
公式ドキュメント [Docker を使用した AstrBot のデプロイ](https://astrbot.app/deploy/astrbot/docker.html#%E4%BD%BF%E7%94%A8-docker-%E9%83%A8%E7%BD%B2-astrbot) をご参照ください。
#### 宝塔パネルデプロイ
AstrBot は宝塔パネルと提携し、宝塔パネルに公開されています。
公式ドキュメント [宝塔パネルデプロイ](https://astrbot.app/deploy/astrbot/btpanel.html) をご参照ください。
#### 1Panel デプロイ
AstrBot は 1Panel 公式により 1Panel パネルに公開されています。
公式ドキュメント [1Panel デプロイ](https://astrbot.app/deploy/astrbot/1panel.html) をご参照ください。
#### 雨云でのデプロイ
AstrBot は雨云公式によりクラウドアプリケーションプラットフォームに公開され、ワンクリックでデプロイ可能です。
[![Deploy on RainYun](https://rainyun-apps.cn-nb1.rains3.com/materials/deploy-on-rainyun-en.svg)](https://app.rainyun.com/apps/rca/store/5994?ref=NjU1ODg0)
#### Replit でのデプロイ
コミュニティ貢献によるデプロイ方法。
[![Run on Repl.it](https://repl.it/badge/github/AstrBotDevs/AstrBot)](https://repl.it/github/AstrBotDevs/AstrBot)
#### Windows ワンクリックインストーラーデプロイ
公式ドキュメント [Windows ワンクリックインストーラーを使用した AstrBot のデプロイ](https://astrbot.app/deploy/astrbot/windows.html) をご参照ください。
#### CasaOS デプロイ
コミュニティが提供するデプロイ方法です
コミュニティ貢献によるデプロイ方法。
公式ドキュメント [ソースコードを使用して AstrBot をデプロイする](https://astrbot.app/deploy/astrbot/casaos.html) を参照してください。
公式ドキュメント [CasaOS デプロイ](https://astrbot.app/deploy/astrbot/casaos.html) を参照ください。
#### 手動デプロイ
公式ドキュメント [ソースコードを使用して AstrBot をデプロイする](https://astrbot.app/deploy/astrbot/cli.html) を参照してください。
まず uv をインストールします:
## ⚡ メッセージプラットフォームのサポート状況
```bash
pip install uv
```
| プラットフォーム | サポート状況 | 詳細 | メッセージタイプ |
| -------- | ------- | ------- | ------ |
| QQ(公式ロボットインターフェース) | ✔ | プライベートチャット、グループチャット、QQ チャンネルプライベートチャット、グループチャット | テキスト、画像 |
| QQ(OneBot) | ✔ | プライベートチャット、グループチャット | テキスト、画像、音声 |
| WeChat(個人アカウント) | ✔ | WeChat 個人アカウントのプライベートチャット、グループチャット | テキスト、画像、音声 |
| [Telegram](https://github.com/Soulter/astrbot_plugin_telegram) | ✔ | プライベートチャット、グループチャット | テキスト、画像 |
| [WeChat(企業 WeChat)](https://github.com/Soulter/astrbot_plugin_wecom) | ✔ | プライベートチャット | テキスト、画像、音声 |
| Feishu | ✔ | グループチャット | テキスト、画像 |
| WeChat 対話オープンプラットフォーム | 🚧 | 計画中 | - |
| Discord | 🚧 | 計画中 | - |
| WhatsApp | 🚧 | 計画中 | - |
| Xiaoai 音響 | 🚧 | 計画中 | - |
Git Clone で AstrBot をインストール:
# 🦌 今後のロードマップ
```bash
git clone https://github.com/AstrBotDevs/AstrBot && cd AstrBot
uv run main.py
```
> [!TIP]
> Issue でさらに多くの提案を歓迎します <3
または、公式ドキュメント [ソースコードから AstrBot をデプロイ](https://astrbot.app/deploy/astrbot/cli.html) をご参照ください。
- [ ] 現在のすべてのプラットフォームアダプターの機能の一貫性を確保し、改善する
- [ ] プラグインインターフェースの最適化
- [ ] GPT-Sovits などの TTS サービスをデフォルトでサポート
- [ ] "チャット強化" 部分を完成させ、永続的な記憶をサポート
- [ ] i18n の計画
## 🌍 コミュニティ
## ❤️ 貢献
### QQ グループ
Issue や Pull Request を歓迎します!このプロジェクトに変更を加えるだけです :)
- 1群:322154837
- 3群:630166526
- 5群:822130018
- 6群:753075035
- 開発者群:975206796
新機能の追加については、まず Issue で議論してください。
### Telegram グループ
## 🌟 サポート
<a href="https://t.me/+hAsD2Ebl5as3NmY1"><img alt="Telegram_community" src="https://img.shields.io/badge/Telegram-AstrBot-purple?style=for-the-badge&color=76bad9"></a>
- このプロジェクトに Star を付けてください!
- [愛発電](https://afdian.com/a/soulter)で私をサポートしてください!
- [WeChat](https://drive.soulter.top/f/pYfA/d903f4fa49a496fda3f16d2be9e023b5.png)で私をサポートしてください~
### Discord サーバー
## ✨ デモ
<a href="https://discord.gg/hAVk6tgV36"><img alt="Discord_community" src="https://img.shields.io/badge/Discord-AstrBot-purple?style=for-the-badge&color=76bad9"></a>
> [!NOTE]
> コードエグゼキューターのファイル入力/出力は現在 Napcat(QQ)、Lagrange(QQ) でのみテストされています
## サポートされているメッセージプラットフォーム
<div align='center'>
**公式メンテナンス**
<img src="https://github.com/user-attachments/assets/4ee688d9-467d-45c8-99d6-368f9a8a92d8" width="600">
- QQ (公式プラットフォーム & OneBot)
- Telegram
- WeChat Work アプリケーション & WeChat Work インテリジェントボット
- WeChat カスタマーサービス & WeChat 公式アカウント
- Feishu (Lark)
- DingTalk
- Slack
- Discord
- Satori
- Misskey
- WhatsApp (近日対応予定)
- LINE (近日対応予定)
_✨ Docker ベースのサンドボックス化されたコードエグゼキューターベータテスト中✨_
**コミュニティメンテナンス**
<img src="https://github.com/user-attachments/assets/0378f407-6079-4f64-ae4c-e97ab20611d2" height=500>
- [KOOK](https://github.com/wuyan1003/astrbot_plugin_kook_adapter)
- [VoceChat](https://github.com/HikariFroya/astrbot_plugin_vocechat)
- [Bilibili ダイレクトメッセージ](https://github.com/Hina-Chat/astrbot_plugin_bilibili_adapter)
- [wxauto](https://github.com/luosheng520qaq/wxauto-repost-onebotv11)
_✨ 多モーダル、ウェブ検索、長文の画像変換設定可能✨_
## サポートされているモデルサービス
<img src="https://github.com/user-attachments/assets/8ec12797-e70f-460a-959e-48eca39ca2bb" height=100>
**大規模言語モデルサービス**
_✨ 自然言語タスク ✨_
- OpenAI および互換サービス
- Anthropic
- Google Gemini
- Moonshot AI
- 智谱 AI
- DeepSeek
- Ollama (セルフホスト)
- LM Studio (セルフホスト)
- [優云智算](https://www.compshare.cn/?ytag=GPU_YY-gh_astrbot&referral_code=FV7DcGowN4hB5UuXKgpE74)
- [302.AI](https://share.302.ai/rr1M3l)
- [小馬算力](https://www.tokenpony.cn/3YPyf)
- [硅基流動](https://docs.siliconflow.cn/cn/usercases/use-siliconcloud-in-astrbot)
- [PPIO 派欧云](https://ppio.com/user/register?invited_by=AIOONE)
- ModelScope
- OneAPI
<img src="https://github.com/user-attachments/assets/e137a9e1-340a-4bf2-bb2b-771132780735" height=150>
<img src="https://github.com/user-attachments/assets/480f5e82-cf6a-4955-a869-0d73137aa6e1" height=150>
**LLMOps プラットフォーム**
_✨ プラグインシステム - 一部のプラグインの展示 ✨_
- Dify
- Alibaba Cloud 百炼アプリケーション
- Coze
<img src="https://github.com/user-attachments/assets/592a8630-14c7-4e06-b496-9c0386e4f36c" width="600">
**音声認識サービス**
_✨ 管理パネル ✨_
- OpenAI Whisper
- SenseVoice
![webchat](https://drive.soulter.top/f/vlsA/ezgif-5-fb044b2542.gif)
**音声合成サービス**
_✨ 内蔵 Web Chat、オンラインでボットと対話 ✨_
- OpenAI TTS
- Gemini TTS
- GPT-Sovits-Inference
- GPT-Sovits
- FishAudio
- Edge TTS
- Alibaba Cloud 百炼 TTS
- Azure TTS
- Minimax TTS
- Volcano Engine TTS
</div>
## ❤️ コントリビューション
Issue や Pull Request は大歓迎です!このプロジェクトに変更を送信してください :)
### コントリビュート方法
Issue を確認したり、PR(プルリクエスト)のレビューを手伝うことで貢献できます。どんな Issue や PR への参加も歓迎され、コミュニティ貢献を促進します。もちろん、これらは提案に過ぎず、どんな方法でも貢献できます。新機能の追加については、まず Issue で議論してください。
### 開発環境
AstrBot はコードのフォーマットとチェックに `ruff` を使用しています。
```bash
git clone https://github.com/AstrBotDevs/AstrBot
pip install pre-commit
pre-commit install
```
## ❤️ Special Thanks
AstrBot への貢献をしていただいたすべてのコントリビューターとプラグイン開発者に特別な感謝を ❤️
<a href="https://github.com/AstrBotDevs/AstrBot/graphs/contributors">
<img src="https://contrib.rocks/image?repo=AstrBotDevs/AstrBot" />
</a>
また、このプロジェクトの誕生は以下のオープンソースプロジェクトの助けなしには実現できませんでした:
- [NapNeko/NapCatQQ](https://github.com/NapNeko/NapCatQQ) - 素晴らしい猫猫フレームワーク
## ⭐ Star History
> [!TIP]
> このプロジェクトがあなたの生活や仕事に役立った場合、またはこのプロジェクトの将来の発展に関心がある場合は、プロジェクトに Star を付けてください。これこのオープンソースプロジェクトを維持するためのモチベーションです <3
> このプロジェクトがあなたの生活や仕事に役立ったり、このプロジェクトの今後の発展に関心がある場合は、プロジェクトに Star をください。これこのオープンソースプロジェクトを維持する原動力です <3
<div align="center">
[![Star History Chart](https://api.star-history.com/svg?repos=soulter/astrbot&type=Date)](https://star-history.com/#soulter/astrbot&Date)
[![Star History Chart](https://api.star-history.com/svg?repos=astrbotdevs/astrbot&type=Date)](https://star-history.com/#astrbotdevs/astrbot&Date)
</div>
## スポンサー
[<img src="https://api.gitsponsors.com/api/badge/img?id=575865240" height="20">](https://api.gitsponsors.com/api/badge/link?p=XEpbdGxlitw/RbcwiTX93UMzNK/jgDYC8NiSzamIPMoKvG2lBFmyXhSS/b0hFoWlBBMX2L5X5CxTDsUdyvcIEHTOfnkXz47UNOZvMwyt5CzbYpq0SEzsSV1OJF1cCo90qC/ZyYKYOWedal3MhZ3ikw==)
## 免責事項
1. このプロジェクトは `AGPL-v3` オープンソースライセンスの下で保護されています。
2. このプロジェクトを使用する際は、現地の法律および規制を遵守してください。
<!-- ## ✨ ATRI [ベータテスト]
この機能はプラグインとしてロードされます。プラグインリポジトリのアドレス:[astrbot_plugin_atri](https://github.com/Soulter/astrbot_plugin_atri)
1. 《ATRI ~ My Dear Moments》の主人公 ATRI のキャラクターセリフを微調整データセットとして使用した `Qwen1.5-7B-Chat Lora` 微調整モデル。
2. 長期記憶
3. ミームの理解と返信
4. TTS
-->
</details>
_私は、高性能ですから!_

View File

@@ -36,7 +36,8 @@ from astrbot.core.star.config import *
# provider
from astrbot.core.provider import Provider, Personality, ProviderMetaData
from astrbot.core.provider import Provider, ProviderMetaData
from astrbot.core.db.po import Personality
# platform
from astrbot.core.platform import (

View File

@@ -1,4 +1,5 @@
from astrbot.core.provider import Personality, Provider, STTProvider
from astrbot.core.db.po import Personality
from astrbot.core.provider import Provider, STTProvider
from astrbot.core.provider.entities import (
LLMResponse,
ProviderMetaData,

View File

@@ -4,6 +4,14 @@ from contextlib import AsyncExitStack
from datetime import timedelta
from typing import Generic
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from astrbot import logger
from astrbot.core.agent.run_context import ContextWrapper
from astrbot.core.utils.log_pipe import LogPipe
@@ -12,21 +20,24 @@ from .run_context import TContext
from .tool import FunctionTool
try:
import anyio
import mcp
from mcp.client.sse import sse_client
except (ModuleNotFoundError, ImportError):
logger.warning("警告: 缺少依赖库 'mcp',将无法使用 MCP 服务。")
logger.warning(
"Warning: Missing 'mcp' dependency, MCP services will be unavailable."
)
try:
from mcp.client.streamable_http import streamablehttp_client
except (ModuleNotFoundError, ImportError):
logger.warning(
"警告: 缺少依赖库 'mcp' 或者 mcp 库版本过低,无法使用 Streamable HTTP 连接方式。",
"Warning: Missing 'mcp' dependency or MCP library version too old, Streamable HTTP connection unavailable.",
)
def _prepare_config(config: dict) -> dict:
"""准备配置,处理嵌套格式"""
"""Prepare configuration, handle nested format"""
if config.get("mcpServers"):
first_key = next(iter(config["mcpServers"]))
config = config["mcpServers"][first_key]
@@ -35,7 +46,7 @@ def _prepare_config(config: dict) -> dict:
async def _quick_test_mcp_connection(config: dict) -> tuple[bool, str]:
"""快速测试 MCP 服务器可达性"""
"""Quick test MCP server connectivity"""
import aiohttp
cfg = _prepare_config(config.copy())
@@ -50,7 +61,7 @@ async def _quick_test_mcp_connection(config: dict) -> tuple[bool, str]:
elif "type" in cfg:
transport_type = cfg["type"]
else:
raise Exception("MCP 连接配置缺少 transport type 字段")
raise Exception("MCP connection config missing transport or type field")
async with aiohttp.ClientSession() as session:
if transport_type == "streamable_http":
@@ -91,7 +102,7 @@ async def _quick_test_mcp_connection(config: dict) -> tuple[bool, str]:
return False, f"HTTP {response.status}: {response.reason}"
except asyncio.TimeoutError:
return False, f"连接超时: {timeout}"
return False, f"Connection timeout: {timeout} seconds"
except Exception as e:
return False, f"{e!s}"
@@ -101,6 +112,7 @@ class MCPClient:
# Initialize session and client objects
self.session: mcp.ClientSession | None = None
self.exit_stack = AsyncExitStack()
self._old_exit_stacks: list[AsyncExitStack] = [] # Track old stacks for cleanup
self.name: str | None = None
self.active: bool = True
@@ -108,22 +120,32 @@ class MCPClient:
self.server_errlogs: list[str] = []
self.running_event = asyncio.Event()
async def connect_to_server(self, mcp_server_config: dict, name: str):
"""连接到 MCP 服务器
# Store connection config for reconnection
self._mcp_server_config: dict | None = None
self._server_name: str | None = None
self._reconnect_lock = asyncio.Lock() # Lock for thread-safe reconnection
self._reconnecting: bool = False # For logging and debugging
如果 `url` 参数存在:
1. 当 transport 指定为 `streamable_http` 时,使用 Streamable HTTP 连接方式。
1. 当 transport 指定为 `sse` 时,使用 SSE 连接方式。
2. 如果没有指定,默认使用 SSE 的方式连接到 MCP 服务。
async def connect_to_server(self, mcp_server_config: dict, name: str):
"""Connect to MCP server
If `url` parameter exists:
1. When transport is specified as `streamable_http`, use Streamable HTTP connection.
2. When transport is specified as `sse`, use SSE connection.
3. If not specified, default to SSE connection to MCP service.
Args:
mcp_server_config (dict): Configuration for the MCP server. See https://modelcontextprotocol.io/quickstart/server
"""
# Store config for reconnection
self._mcp_server_config = mcp_server_config
self._server_name = name
cfg = _prepare_config(mcp_server_config.copy())
def logging_callback(msg: str):
# 处理 MCP 服务的错误日志
# Handle MCP service error logs
print(f"MCP Server {name} Error: {msg}")
self.server_errlogs.append(msg)
@@ -137,7 +159,7 @@ class MCPClient:
elif "type" in cfg:
transport_type = cfg["type"]
else:
raise Exception("MCP 连接配置缺少 transport type 字段")
raise Exception("MCP connection config missing transport or type field")
if transport_type != "streamable_http":
# SSE transport method
@@ -193,7 +215,7 @@ class MCPClient:
)
def callback(msg: str):
# 处理 MCP 服务的错误日志
# Handle MCP service error logs
self.server_errlogs.append(msg)
stdio_transport = await self.exit_stack.enter_async_context(
@@ -222,10 +244,120 @@ class MCPClient:
self.tools = response.tools
return response
async def _reconnect(self) -> None:
"""Reconnect to the MCP server using the stored configuration.
Uses asyncio.Lock to ensure thread-safe reconnection in concurrent environments.
Raises:
Exception: raised when reconnection fails
"""
async with self._reconnect_lock:
# Check if already reconnecting (useful for logging)
if self._reconnecting:
logger.debug(
f"MCP Client {self._server_name} is already reconnecting, skipping"
)
return
if not self._mcp_server_config or not self._server_name:
raise Exception("Cannot reconnect: missing connection configuration")
self._reconnecting = True
try:
logger.info(
f"Attempting to reconnect to MCP server {self._server_name}..."
)
# Save old exit_stack for later cleanup (don't close it now to avoid cancel scope issues)
if self.exit_stack:
self._old_exit_stacks.append(self.exit_stack)
# Mark old session as invalid
self.session = None
# Create new exit stack for new connection
self.exit_stack = AsyncExitStack()
# Reconnect using stored config
await self.connect_to_server(self._mcp_server_config, self._server_name)
await self.list_tools_and_save()
logger.info(
f"Successfully reconnected to MCP server {self._server_name}"
)
except Exception as e:
logger.error(
f"Failed to reconnect to MCP server {self._server_name}: {e}"
)
raise
finally:
self._reconnecting = False
async def call_tool_with_reconnect(
self,
tool_name: str,
arguments: dict,
read_timeout_seconds: timedelta,
) -> mcp.types.CallToolResult:
"""Call MCP tool with automatic reconnection on failure, max 2 retries.
Args:
tool_name: tool name
arguments: tool arguments
read_timeout_seconds: read timeout
Returns:
MCP tool call result
Raises:
ValueError: MCP session is not available
anyio.ClosedResourceError: raised after reconnection failure
"""
@retry(
retry=retry_if_exception_type(anyio.ClosedResourceError),
stop=stop_after_attempt(2),
wait=wait_exponential(multiplier=1, min=1, max=3),
before_sleep=before_sleep_log(logger, logging.WARNING),
reraise=True,
)
async def _call_with_retry():
if not self.session:
raise ValueError("MCP session is not available for MCP function tools.")
try:
return await self.session.call_tool(
name=tool_name,
arguments=arguments,
read_timeout_seconds=read_timeout_seconds,
)
except anyio.ClosedResourceError:
logger.warning(
f"MCP tool {tool_name} call failed (ClosedResourceError), attempting to reconnect..."
)
# Attempt to reconnect
await self._reconnect()
# Reraise the exception to trigger tenacity retry
raise
return await _call_with_retry()
async def cleanup(self):
"""Clean up resources"""
await self.exit_stack.aclose()
self.running_event.set() # Set the running event to indicate cleanup is done
"""Clean up resources including old exit stacks from reconnections"""
# Set running_event first to unblock any waiting tasks
self.running_event.set()
# Close current exit stack
try:
await self.exit_stack.aclose()
except Exception as e:
logger.debug(f"Error closing current exit stack: {e}")
# Don't close old exit stacks as they may be in different task contexts
# They will be garbage collected naturally
# Just clear the list to release references
self._old_exit_stacks.clear()
class MCPTool(FunctionTool, Generic[TContext]):
@@ -246,14 +378,8 @@ class MCPTool(FunctionTool, Generic[TContext]):
async def call(
self, context: ContextWrapper[TContext], **kwargs
) -> mcp.types.CallToolResult:
session = self.mcp_client.session
if not session:
raise ValueError("MCP session is not available for MCP function tools.")
res = await session.call_tool(
name=self.mcp_tool.name,
return await self.mcp_client.call_tool_with_reconnect(
tool_name=self.mcp_tool.name,
arguments=kwargs,
read_timeout_seconds=timedelta(
seconds=context.tool_call_timeout,
),
read_timeout_seconds=timedelta(seconds=context.tool_call_timeout),
)
return res

View File

@@ -76,7 +76,7 @@ class ImageURLPart(ContentPart):
"""The ID of the image, to allow LLMs to distinguish different images."""
type: str = "image_url"
image_url: str
image_url: ImageURL
class AudioURLPart(ContentPart):
@@ -119,6 +119,13 @@ class ToolCall(BaseModel):
"""The ID of the tool call."""
function: FunctionBody
"""The function body of the tool call."""
extra_content: dict[str, Any] | None = None
"""Extra metadata for the tool call."""
def model_dump(self, **kwargs: Any) -> dict[str, Any]:
if self.extra_content is None:
kwargs.setdefault("exclude", set()).add("extra_content")
return super().model_dump(**kwargs)
class ToolCallPart(BaseModel):

View File

@@ -1,16 +1,21 @@
from dataclasses import dataclass
from typing import Any, Generic
from pydantic import Field
from pydantic.dataclasses import dataclass
from typing_extensions import TypeVar
from .message import Message
TContext = TypeVar("TContext", default=Any)
@dataclass
@dataclass(config={"arbitrary_types_allowed": True})
class ContextWrapper(Generic[TContext]):
"""A context for running an agent, which can be used to pass additional data or state."""
context: TContext
messages: list[Message] = Field(default_factory=list)
"""This field stores the llm message context for the agent run, agent runners will maintain this field automatically."""
tool_call_timeout: int = 60 # Default tool call timeout in seconds

View File

@@ -40,6 +40,13 @@ class BaseAgentRunner(T.Generic[TContext]):
"""Process a single step of the agent."""
...
@abc.abstractmethod
async def step_until_done(
self, max_step: int
) -> T.AsyncGenerator[AgentResponse, None]:
"""Process steps until the agent is done."""
...
@abc.abstractmethod
def done(self) -> bool:
"""Check if the agent has completed its task.

View File

@@ -23,7 +23,7 @@ from astrbot.core.provider.entities import (
from astrbot.core.provider.provider import Provider
from ..hooks import BaseAgentRunHooks
from ..message import AssistantMessageSegment, ToolCallMessageSegment
from ..message import AssistantMessageSegment, Message, ToolCallMessageSegment
from ..response import AgentResponseData
from ..run_context import ContextWrapper, TContext
from ..tool_executor import BaseFunctionToolExecutor
@@ -55,6 +55,20 @@ class ToolLoopAgentRunner(BaseAgentRunner[TContext]):
self.agent_hooks = agent_hooks
self.run_context = run_context
messages = []
# append existing messages in the run context
for msg in request.contexts:
messages.append(Message.model_validate(msg))
if request.prompt is not None:
m = await request.assemble_context()
messages.append(Message.model_validate(m))
if request.system_prompt:
messages.insert(
0,
Message(role="system", content=request.system_prompt),
)
self.run_context.messages = messages
def _transition_state(self, new_state: AgentState) -> None:
"""转换 Agent 状态"""
if self._state != new_state:
@@ -96,13 +110,22 @@ class ToolLoopAgentRunner(BaseAgentRunner[TContext]):
type="streaming_delta",
data=AgentResponseData(chain=llm_response.result_chain),
)
else:
elif llm_response.completion_text:
yield AgentResponse(
type="streaming_delta",
data=AgentResponseData(
chain=MessageChain().message(llm_response.completion_text),
),
)
elif llm_response.reasoning_content:
yield AgentResponse(
type="streaming_delta",
data=AgentResponseData(
chain=MessageChain(type="reasoning").message(
llm_response.reasoning_content,
),
),
)
continue
llm_resp_result = llm_response
break # got final response
@@ -130,6 +153,13 @@ class ToolLoopAgentRunner(BaseAgentRunner[TContext]):
# 如果没有工具调用,转换到完成状态
self.final_llm_resp = llm_resp
self._transition_state(AgentState.DONE)
# record the final assistant message
self.run_context.messages.append(
Message(
role="assistant",
content=llm_resp.completion_text or "",
),
)
try:
await self.agent_hooks.on_agent_done(self.run_context, llm_resp)
except Exception as e:
@@ -156,13 +186,16 @@ class ToolLoopAgentRunner(BaseAgentRunner[TContext]):
yield AgentResponse(
type="tool_call",
data=AgentResponseData(
chain=MessageChain().message(f"🔨 调用工具: {tool_call_name}"),
chain=MessageChain(type="tool_call").message(
f"🔨 调用工具: {tool_call_name}"
),
),
)
async for result in self._handle_function_tools(self.req, llm_resp):
if isinstance(result, list):
tool_call_result_blocks = result
elif isinstance(result, MessageChain):
result.type = "tool_call_result"
yield AgentResponse(
type="tool_call_result",
data=AgentResponseData(chain=result),
@@ -175,8 +208,23 @@ class ToolLoopAgentRunner(BaseAgentRunner[TContext]):
),
tool_calls_result=tool_call_result_blocks,
)
# record the assistant message with tool calls
self.run_context.messages.extend(
tool_calls_result.to_openai_messages_model()
)
self.req.append_tool_calls_result(tool_calls_result)
async def step_until_done(
self, max_step: int
) -> T.AsyncGenerator[AgentResponse, None]:
"""Process steps until the agent is done."""
step_count = 0
while not self.done() and step_count < max_step:
step_count += 1
async for resp in self.step():
yield resp
async def _handle_function_tools(
self,
req: ProviderRequest,

View File

@@ -4,12 +4,13 @@ from typing import Any, Generic
import jsonschema
import mcp
from deprecated import deprecated
from pydantic import model_validator
from pydantic import Field, model_validator
from pydantic.dataclasses import dataclass
from .run_context import ContextWrapper, TContext
ParametersType = dict[str, Any]
ToolExecResult = str | mcp.types.CallToolResult
@dataclass
@@ -55,15 +56,14 @@ class FunctionTool(ToolSchema, Generic[TContext]):
def __repr__(self):
return f"FuncTool(name={self.name}, parameters={self.parameters}, description={self.description})"
async def call(
self, context: ContextWrapper[TContext], **kwargs
) -> str | mcp.types.CallToolResult:
async def call(self, context: ContextWrapper[TContext], **kwargs) -> ToolExecResult:
"""Run the tool with the given arguments. The handler field has priority."""
raise NotImplementedError(
"FunctionTool.call() must be implemented by subclasses or set a handler."
)
@dataclass
class ToolSet:
"""A set of function tools that can be used in function calling.
@@ -71,8 +71,7 @@ class ToolSet:
convert the tools to different API formats (OpenAI, Anthropic, Google GenAI).
"""
def __init__(self, tools: list[FunctionTool] | None = None):
self.tools: list[FunctionTool] = tools or []
tools: list[FunctionTool] = Field(default_factory=list)
def empty(self) -> bool:
"""Check if the tool set is empty."""

View File

@@ -1,14 +1,19 @@
from dataclasses import dataclass
from pydantic import Field
from pydantic.dataclasses import dataclass
from astrbot.core.agent.run_context import ContextWrapper
from astrbot.core.platform.astr_message_event import AstrMessageEvent
from astrbot.core.provider import Provider
from astrbot.core.provider.entities import ProviderRequest
from astrbot.core.star.context import Context
@dataclass
@dataclass(config={"arbitrary_types_allowed": True})
class AstrAgentContext:
provider: Provider
first_provider_request: ProviderRequest
curr_provider_request: ProviderRequest
streaming: bool
context: Context
"""The star context instance"""
event: AstrMessageEvent
"""The message event associated with the agent context."""
extra: dict[str, str] = Field(default_factory=dict)
"""Customized extra data."""
AgentContextWrapper = ContextWrapper[AstrAgentContext]

View File

@@ -0,0 +1,36 @@
from typing import Any
from mcp.types import CallToolResult
from astrbot.core.agent.hooks import BaseAgentRunHooks
from astrbot.core.agent.run_context import ContextWrapper
from astrbot.core.agent.tool import FunctionTool
from astrbot.core.astr_agent_context import AstrAgentContext
from astrbot.core.pipeline.context_utils import call_event_hook
from astrbot.core.star.star_handler import EventType
class MainAgentHooks(BaseAgentRunHooks[AstrAgentContext]):
async def on_agent_done(self, run_context, llm_response):
# 执行事件钩子
await call_event_hook(
run_context.context.event,
EventType.OnLLMResponseEvent,
llm_response,
)
async def on_tool_end(
self,
run_context: ContextWrapper[AstrAgentContext],
tool: FunctionTool[Any],
tool_args: dict | None,
tool_result: CallToolResult | None,
):
run_context.context.event.clear_result()
class EmptyAgentHooks(BaseAgentRunHooks[AstrAgentContext]):
pass
MAIN_AGENT_HOOKS = MainAgentHooks()

View File

@@ -0,0 +1,80 @@
import traceback
from collections.abc import AsyncGenerator
from astrbot.core import logger
from astrbot.core.agent.runners.tool_loop_agent_runner import ToolLoopAgentRunner
from astrbot.core.astr_agent_context import AstrAgentContext
from astrbot.core.message.message_event_result import (
MessageChain,
MessageEventResult,
ResultContentType,
)
AgentRunner = ToolLoopAgentRunner[AstrAgentContext]
async def run_agent(
agent_runner: AgentRunner,
max_step: int = 30,
show_tool_use: bool = True,
stream_to_general: bool = False,
show_reasoning: bool = False,
) -> AsyncGenerator[MessageChain | None, None]:
step_idx = 0
astr_event = agent_runner.run_context.context.event
while step_idx < max_step:
step_idx += 1
try:
async for resp in agent_runner.step():
if astr_event.is_stopped():
return
if resp.type == "tool_call_result":
msg_chain = resp.data["chain"]
if msg_chain.type == "tool_direct_result":
# tool_direct_result 用于标记 llm tool 需要直接发送给用户的内容
await astr_event.send(resp.data["chain"])
continue
# 对于其他情况,暂时先不处理
continue
elif resp.type == "tool_call":
if agent_runner.streaming:
# 用来标记流式响应需要分节
yield MessageChain(chain=[], type="break")
if show_tool_use:
await astr_event.send(resp.data["chain"])
continue
if stream_to_general and resp.type == "streaming_delta":
continue
if stream_to_general or not agent_runner.streaming:
content_typ = (
ResultContentType.LLM_RESULT
if resp.type == "llm_result"
else ResultContentType.GENERAL_RESULT
)
astr_event.set_result(
MessageEventResult(
chain=resp.data["chain"].chain,
result_content_type=content_typ,
),
)
yield
astr_event.clear_result()
elif resp.type == "streaming_delta":
chain = resp.data["chain"]
if chain.type == "reasoning" and not show_reasoning:
# display the reasoning content only when configured
continue
yield resp.data["chain"] # MessageChain
if agent_runner.done():
break
except Exception as e:
logger.error(traceback.format_exc())
err_msg = f"\n\nAstrBot 请求失败。\n错误类型: {type(e).__name__}\n错误信息: {e!s}\n\n请在控制台查看和分享错误详情。\n"
if agent_runner.streaming:
yield MessageChain().message(err_msg)
else:
astr_event.set_result(MessageEventResult().message(err_msg))
return

View File

@@ -0,0 +1,246 @@
import asyncio
import inspect
import traceback
import typing as T
import mcp
from astrbot import logger
from astrbot.core.agent.handoff import HandoffTool
from astrbot.core.agent.mcp_client import MCPTool
from astrbot.core.agent.run_context import ContextWrapper
from astrbot.core.agent.tool import FunctionTool, ToolSet
from astrbot.core.agent.tool_executor import BaseFunctionToolExecutor
from astrbot.core.astr_agent_context import AstrAgentContext
from astrbot.core.message.message_event_result import (
CommandResult,
MessageChain,
MessageEventResult,
)
from astrbot.core.provider.register import llm_tools
class FunctionToolExecutor(BaseFunctionToolExecutor[AstrAgentContext]):
@classmethod
async def execute(cls, tool, run_context, **tool_args):
"""执行函数调用。
Args:
event (AstrMessageEvent): 事件对象, 当 origin 为 local 时必须提供。
**kwargs: 函数调用的参数。
Returns:
AsyncGenerator[None | mcp.types.CallToolResult, None]
"""
if isinstance(tool, HandoffTool):
async for r in cls._execute_handoff(tool, run_context, **tool_args):
yield r
return
elif isinstance(tool, MCPTool):
async for r in cls._execute_mcp(tool, run_context, **tool_args):
yield r
return
else:
async for r in cls._execute_local(tool, run_context, **tool_args):
yield r
return
@classmethod
async def _execute_handoff(
cls,
tool: HandoffTool,
run_context: ContextWrapper[AstrAgentContext],
**tool_args,
):
input_ = tool_args.get("input")
# make toolset for the agent
tools = tool.agent.tools
if tools:
toolset = ToolSet()
for t in tools:
if isinstance(t, str):
_t = llm_tools.get_func(t)
if _t:
toolset.add_tool(_t)
elif isinstance(t, FunctionTool):
toolset.add_tool(t)
else:
toolset = None
ctx = run_context.context.context
event = run_context.context.event
umo = event.unified_msg_origin
prov_id = await ctx.get_current_chat_provider_id(umo)
llm_resp = await ctx.tool_loop_agent(
event=event,
chat_provider_id=prov_id,
prompt=input_,
system_prompt=tool.agent.instructions,
tools=toolset,
max_steps=30,
run_hooks=tool.agent.run_hooks,
)
yield mcp.types.CallToolResult(
content=[mcp.types.TextContent(type="text", text=llm_resp.completion_text)]
)
@classmethod
async def _execute_local(
cls,
tool: FunctionTool,
run_context: ContextWrapper[AstrAgentContext],
**tool_args,
):
event = run_context.context.event
if not event:
raise ValueError("Event must be provided for local function tools.")
is_override_call = False
for ty in type(tool).mro():
if "call" in ty.__dict__ and ty.__dict__["call"] is not FunctionTool.call:
is_override_call = True
break
# 检查 tool 下有没有 run 方法
if not tool.handler and not hasattr(tool, "run") and not is_override_call:
raise ValueError("Tool must have a valid handler or override 'run' method.")
awaitable = None
method_name = ""
if tool.handler:
awaitable = tool.handler
method_name = "decorator_handler"
elif is_override_call:
awaitable = tool.call
method_name = "call"
elif hasattr(tool, "run"):
awaitable = getattr(tool, "run")
method_name = "run"
if awaitable is None:
raise ValueError("Tool must have a valid handler or override 'run' method.")
wrapper = call_local_llm_tool(
context=run_context,
handler=awaitable,
method_name=method_name,
**tool_args,
)
while True:
try:
resp = await asyncio.wait_for(
anext(wrapper),
timeout=run_context.tool_call_timeout,
)
if resp is not None:
if isinstance(resp, mcp.types.CallToolResult):
yield resp
else:
text_content = mcp.types.TextContent(
type="text",
text=str(resp),
)
yield mcp.types.CallToolResult(content=[text_content])
else:
# NOTE: Tool 在这里直接请求发送消息给用户
# TODO: 是否需要判断 event.get_result() 是否为空?
# 如果为空,则说明没有发送消息给用户,并且返回值为空,将返回一个特殊的 TextContent,其内容如"工具没有返回内容"
if res := run_context.context.event.get_result():
if res.chain:
try:
await event.send(
MessageChain(
chain=res.chain,
type="tool_direct_result",
)
)
except Exception as e:
logger.error(
f"Tool 直接发送消息失败: {e}",
exc_info=True,
)
yield None
except asyncio.TimeoutError:
raise Exception(
f"tool {tool.name} execution timeout after {run_context.tool_call_timeout} seconds.",
)
except StopAsyncIteration:
break
@classmethod
async def _execute_mcp(
cls,
tool: FunctionTool,
run_context: ContextWrapper[AstrAgentContext],
**tool_args,
):
res = await tool.call(run_context, **tool_args)
if not res:
return
yield res
async def call_local_llm_tool(
context: ContextWrapper[AstrAgentContext],
handler: T.Callable[..., T.Awaitable[T.Any]],
method_name: str,
*args,
**kwargs,
) -> T.AsyncGenerator[T.Any, None]:
"""执行本地 LLM 工具的处理函数并处理其返回结果"""
ready_to_call = None # 一个协程或者异步生成器
trace_ = None
event = context.context.event
try:
if method_name == "run" or method_name == "decorator_handler":
ready_to_call = handler(event, *args, **kwargs)
elif method_name == "call":
ready_to_call = handler(context, *args, **kwargs)
else:
raise ValueError(f"未知的方法名: {method_name}")
except ValueError as e:
logger.error(f"调用本地 LLM 工具时出错: {e}", exc_info=True)
except TypeError:
logger.error("处理函数参数不匹配,请检查 handler 的定义。", exc_info=True)
except Exception as e:
trace_ = traceback.format_exc()
logger.error(f"调用本地 LLM 工具时出错: {e}\n{trace_}")
if not ready_to_call:
return
if inspect.isasyncgen(ready_to_call):
_has_yielded = False
try:
async for ret in ready_to_call:
# 这里逐步执行异步生成器, 对于每个 yield 返回的 ret, 执行下面的代码
# 返回值只能是 MessageEventResult 或者 None无返回值
_has_yielded = True
if isinstance(ret, (MessageEventResult, CommandResult)):
# 如果返回值是 MessageEventResult, 设置结果并继续
event.set_result(ret)
yield
else:
# 如果返回值是 None, 则不设置结果并继续
# 继续执行后续阶段
yield ret
if not _has_yielded:
# 如果这个异步生成器没有执行到 yield 分支
yield
except Exception as e:
logger.error(f"Previous Error: {trace_}")
raise e
elif inspect.iscoroutine(ready_to_call):
# 如果只是一个协程, 直接执行
ret = await ready_to_call
if isinstance(ret, (MessageEventResult, CommandResult)):
event.set_result(ret)
yield
else:
yield ret

View File

@@ -4,7 +4,7 @@ import os
from astrbot.core.utils.astrbot_path import get_astrbot_data_path
VERSION = "4.5.6"
VERSION = "4.6.0"
DB_PATH = os.path.join(get_astrbot_data_path(), "data_v4.db")
# 默认配置
@@ -68,7 +68,7 @@ DEFAULT_CONFIG = {
"dequeue_context_length": 1,
"streaming_response": False,
"show_tool_use_status": False,
"streaming_segmented": False,
"unsupported_streaming_strategy": "realtime_segmenting",
"max_agent_step": 30,
"tool_call_timeout": 60,
},
@@ -137,6 +137,7 @@ DEFAULT_CONFIG = {
"kb_names": [], # 默认知识库名称列表
"kb_fusion_top_k": 20, # 知识库检索融合阶段返回结果数量
"kb_final_top_k": 5, # 知识库检索最终返回结果数量
"kb_agentic_mode": False,
}
@@ -740,6 +741,7 @@ CONFIG_METADATA_2 = {
"api_base": "https://api.openai.com/v1",
"timeout": 120,
"model_config": {"model": "gpt-4o-mini", "temperature": 0.4},
"custom_headers": {},
"custom_extra_body": {},
"modalities": ["text", "image", "tool_use"],
"hint": "也兼容所有与 OpenAI API 兼容的服务。",
@@ -755,6 +757,7 @@ CONFIG_METADATA_2 = {
"api_base": "",
"timeout": 120,
"model_config": {"model": "gpt-4o-mini", "temperature": 0.4},
"custom_headers": {},
"custom_extra_body": {},
"modalities": ["text", "image", "tool_use"],
},
@@ -768,6 +771,7 @@ CONFIG_METADATA_2 = {
"api_base": "https://api.x.ai/v1",
"timeout": 120,
"model_config": {"model": "grok-2-latest", "temperature": 0.4},
"custom_headers": {},
"custom_extra_body": {},
"xai_native_search": False,
"modalities": ["text", "image", "tool_use"],
@@ -799,6 +803,7 @@ CONFIG_METADATA_2 = {
"key": ["ollama"], # ollama 的 key 默认是 ollama
"api_base": "http://localhost:11434/v1",
"model_config": {"model": "llama3.1-8b", "temperature": 0.4},
"custom_headers": {},
"custom_extra_body": {},
"modalities": ["text", "image", "tool_use"],
},
@@ -813,6 +818,7 @@ CONFIG_METADATA_2 = {
"model_config": {
"model": "llama-3.1-8b",
},
"custom_headers": {},
"custom_extra_body": {},
"modalities": ["text", "image", "tool_use"],
},
@@ -829,6 +835,7 @@ CONFIG_METADATA_2 = {
"model": "gemini-1.5-flash",
"temperature": 0.4,
},
"custom_headers": {},
"custom_extra_body": {},
"modalities": ["text", "image", "tool_use"],
},
@@ -870,6 +877,24 @@ CONFIG_METADATA_2 = {
"api_base": "https://api.deepseek.com/v1",
"timeout": 120,
"model_config": {"model": "deepseek-chat", "temperature": 0.4},
"custom_headers": {},
"custom_extra_body": {},
"modalities": ["text", "tool_use"],
},
"Groq": {
"id": "groq_default",
"provider": "groq",
"type": "groq_chat_completion",
"provider_type": "chat_completion",
"enable": True,
"key": [],
"api_base": "https://api.groq.com/openai/v1",
"timeout": 120,
"model_config": {
"model": "openai/gpt-oss-20b",
"temperature": 0.4,
},
"custom_headers": {},
"custom_extra_body": {},
"modalities": ["text", "tool_use"],
},
@@ -883,6 +908,7 @@ CONFIG_METADATA_2 = {
"api_base": "https://api.302.ai/v1",
"timeout": 120,
"model_config": {"model": "gpt-4.1-mini", "temperature": 0.4},
"custom_headers": {},
"custom_extra_body": {},
"modalities": ["text", "image", "tool_use"],
},
@@ -899,6 +925,7 @@ CONFIG_METADATA_2 = {
"model": "deepseek-ai/DeepSeek-V3",
"temperature": 0.4,
},
"custom_headers": {},
"custom_extra_body": {},
"modalities": ["text", "image", "tool_use"],
},
@@ -915,6 +942,7 @@ CONFIG_METADATA_2 = {
"model": "deepseek/deepseek-r1",
"temperature": 0.4,
},
"custom_headers": {},
"custom_extra_body": {},
},
"小马算力": {
@@ -930,6 +958,7 @@ CONFIG_METADATA_2 = {
"model": "kimi-k2-instruct-0905",
"temperature": 0.7,
},
"custom_headers": {},
"custom_extra_body": {},
},
"优云智算": {
@@ -944,6 +973,7 @@ CONFIG_METADATA_2 = {
"model_config": {
"model": "moonshotai/Kimi-K2-Instruct",
},
"custom_headers": {},
"custom_extra_body": {},
"modalities": ["text", "image", "tool_use"],
},
@@ -957,6 +987,7 @@ CONFIG_METADATA_2 = {
"timeout": 120,
"api_base": "https://api.moonshot.cn/v1",
"model_config": {"model": "moonshot-v1-8k", "temperature": 0.4},
"custom_headers": {},
"custom_extra_body": {},
"modalities": ["text", "image", "tool_use"],
},
@@ -972,6 +1003,8 @@ CONFIG_METADATA_2 = {
"model_config": {
"model": "glm-4-flash",
},
"custom_headers": {},
"custom_extra_body": {},
"modalities": ["text", "image", "tool_use"],
},
"Dify": {
@@ -1028,6 +1061,7 @@ CONFIG_METADATA_2 = {
"timeout": 120,
"api_base": "https://api-inference.modelscope.cn/v1",
"model_config": {"model": "Qwen/Qwen3-32B", "temperature": 0.4},
"custom_headers": {},
"custom_extra_body": {},
"modalities": ["text", "image", "tool_use"],
},
@@ -1040,6 +1074,7 @@ CONFIG_METADATA_2 = {
"key": [],
"api_base": "https://api.fastgpt.in/api/v1",
"timeout": 60,
"custom_headers": {},
"custom_extra_body": {},
},
"Whisper(API)": {
@@ -1321,6 +1356,12 @@ CONFIG_METADATA_2 = {
"render_type": "checkbox",
"hint": "模型支持的模态。如所填写的模型不支持图像,请取消勾选图像。",
},
"custom_headers": {
"description": "自定义添加请求头",
"type": "dict",
"items": {},
"hint": "此处添加的键值对将被合并到 OpenAI SDK 的 default_headers 中,用于自定义 HTTP 请求头。值必须为字符串。",
},
"custom_extra_body": {
"description": "自定义请求体参数",
"type": "dict",
@@ -1970,8 +2011,8 @@ CONFIG_METADATA_2 = {
"show_tool_use_status": {
"type": "bool",
},
"streaming_segmented": {
"type": "bool",
"unsupported_streaming_strategy": {
"type": "string",
},
"max_agent_step": {
"description": "工具调用轮数上限",
@@ -2106,6 +2147,7 @@ CONFIG_METADATA_2 = {
"kb_names": {"type": "list", "items": {"type": "string"}},
"kb_fusion_top_k": {"type": "int", "default": 20},
"kb_final_top_k": {"type": "int", "default": 5},
"kb_agentic_mode": {"type": "bool"},
},
},
}
@@ -2201,6 +2243,11 @@ CONFIG_METADATA_3 = {
"type": "int",
"hint": "从知识库中检索到的结果数量,越大可能获得越多相关信息,但也可能引入噪音。建议根据实际需求调整",
},
"kb_agentic_mode": {
"description": "Agentic 知识库检索",
"type": "bool",
"hint": "启用后,知识库检索将作为 LLM Tool由模型自主决定何时调用知识库进行查询。需要模型支持函数调用能力。",
},
},
},
"websearch": {
@@ -2276,9 +2323,15 @@ CONFIG_METADATA_3 = {
"description": "流式回复",
"type": "bool",
},
"provider_settings.streaming_segmented": {
"description": "不支持流式回复的平台采取分段输出",
"type": "bool",
"provider_settings.unsupported_streaming_strategy": {
"description": "不支持流式回复的平台",
"type": "string",
"options": ["realtime_segmenting", "turn_off"],
"hint": "选择在不支持流式回复的平台上的处理方式。实时分段回复会在系统接收流式响应检测到诸如标点符号等分段点时,立即发送当前已接收的内容",
"labels": ["实时分段回复", "关闭流式回复"],
"condition": {
"provider_settings.streaming_response": True,
},
},
"provider_settings.max_context_length": {
"description": "最多携带对话轮数",

View File

@@ -22,6 +22,7 @@ from astrbot.core.config.default import VERSION
from astrbot.core.conversation_mgr import ConversationManager
from astrbot.core.db import BaseDatabase
from astrbot.core.db.migration.migra_45_to_46 import migrate_45_to_46
from astrbot.core.db.migration.migra_webchat_session import migrate_webchat_session
from astrbot.core.knowledge_base.kb_mgr import KnowledgeBaseManager
from astrbot.core.persona_mgr import PersonaManager
from astrbot.core.pipeline.scheduler import PipelineContext, PipelineScheduler
@@ -103,6 +104,13 @@ class AstrBotCoreLifecycle:
logger.error(f"Migration from version 4.5 to 4.6 failed: {e!s}")
logger.error(traceback.format_exc())
# migration for webchat session
try:
await migrate_webchat_session(self.db)
except Exception as e:
logger.error(f"Migration for webchat session failed: {e!s}")
logger.error(traceback.format_exc())
# 初始化事件队列
self.event_queue = Queue()

View File

@@ -13,6 +13,7 @@ from astrbot.core.db.po import (
ConversationV2,
Persona,
PlatformMessageHistory,
PlatformSession,
PlatformStat,
Preference,
Stats,
@@ -183,7 +184,7 @@ class BaseDatabase(abc.ABC):
user_id: str,
offset_sec: int = 86400,
) -> None:
"""Delete platform message history records older than the specified offset."""
"""Delete platform message history records newer than the specified offset."""
...
@abc.abstractmethod
@@ -313,3 +314,51 @@ class BaseDatabase(abc.ABC):
) -> tuple[list[dict], int]:
"""Get paginated session conversations with joined conversation and persona details, support search and platform filter."""
...
# ====
# Platform Session Management
# ====
@abc.abstractmethod
async def create_platform_session(
self,
creator: str,
platform_id: str = "webchat",
session_id: str | None = None,
display_name: str | None = None,
is_group: int = 0,
) -> PlatformSession:
"""Create a new Platform session."""
...
@abc.abstractmethod
async def get_platform_session_by_id(
self, session_id: str
) -> PlatformSession | None:
"""Get a Platform session by its ID."""
...
@abc.abstractmethod
async def get_platform_sessions_by_creator(
self,
creator: str,
platform_id: str | None = None,
page: int = 1,
page_size: int = 20,
) -> list[PlatformSession]:
"""Get all Platform sessions for a specific creator (username) and optionally platform."""
...
@abc.abstractmethod
async def update_platform_session(
self,
session_id: str,
display_name: str | None = None,
) -> None:
"""Update a Platform session's updated_at timestamp and optionally display_name."""
...
@abc.abstractmethod
async def delete_platform_session(self, session_id: str) -> None:
"""Delete a Platform session by its ID."""
...

View File

@@ -0,0 +1,131 @@
"""Migration script for WebChat sessions.
This migration creates PlatformSession from existing platform_message_history records.
Changes:
- Creates platform_sessions table
- Adds platform_id field (default: 'webchat')
- Adds display_name field
- Session_id format: {platform_id}_{uuid}
"""
from sqlalchemy import func, select
from sqlmodel import col
from astrbot.api import logger, sp
from astrbot.core.db import BaseDatabase
from astrbot.core.db.po import ConversationV2, PlatformMessageHistory, PlatformSession
async def migrate_webchat_session(db_helper: BaseDatabase):
"""Create PlatformSession records from platform_message_history.
This migration extracts all unique user_ids from platform_message_history
where platform_id='webchat' and creates corresponding PlatformSession records.
"""
# 检查是否已经完成迁移
migration_done = await db_helper.get_preference(
"global", "global", "migration_done_webchat_session"
)
if migration_done:
return
logger.info("开始执行数据库迁移WebChat 会话迁移)...")
try:
async with db_helper.get_db() as session:
# 从 platform_message_history 创建 PlatformSession
query = (
select(
col(PlatformMessageHistory.user_id),
col(PlatformMessageHistory.sender_name),
func.min(PlatformMessageHistory.created_at).label("earliest"),
func.max(PlatformMessageHistory.updated_at).label("latest"),
)
.where(col(PlatformMessageHistory.platform_id) == "webchat")
.where(col(PlatformMessageHistory.sender_id) == "astrbot")
.group_by(col(PlatformMessageHistory.user_id))
)
result = await session.execute(query)
webchat_users = result.all()
if not webchat_users:
logger.info("没有找到需要迁移的 WebChat 数据")
await sp.put_async(
"global", "global", "migration_done_webchat_session", True
)
return
logger.info(f"找到 {len(webchat_users)} 个 WebChat 会话需要迁移")
# 检查已存在的会话
existing_query = select(col(PlatformSession.session_id))
existing_result = await session.execute(existing_query)
existing_session_ids = {row[0] for row in existing_result.fetchall()}
# 查询 Conversations 表中的 title用于设置 display_name
# 对于每个 user_id对应的 conversation user_id 格式为: webchat:FriendMessage:webchat!astrbot!{user_id}
user_ids_to_query = [
f"webchat:FriendMessage:webchat!astrbot!{user_id}"
for user_id, _, _, _ in webchat_users
]
conv_query = select(
col(ConversationV2.user_id), col(ConversationV2.title)
).where(col(ConversationV2.user_id).in_(user_ids_to_query))
conv_result = await session.execute(conv_query)
# 创建 user_id -> title 的映射字典
title_map = {
user_id.replace("webchat:FriendMessage:webchat!astrbot!", ""): title
for user_id, title in conv_result.fetchall()
}
# 批量创建 PlatformSession 记录
sessions_to_add = []
skipped_count = 0
for user_id, sender_name, created_at, updated_at in webchat_users:
# user_id 就是 webchat_conv_id (session_id)
session_id = user_id
# sender_name 通常是 username但可能为 None
creator = sender_name if sender_name else "guest"
# 检查是否已经存在该会话
if session_id in existing_session_ids:
logger.debug(f"会话 {session_id} 已存在,跳过")
skipped_count += 1
continue
# 从 Conversations 表中获取 display_name
display_name = title_map.get(user_id)
# 创建新的 PlatformSession保留原有的时间戳
new_session = PlatformSession(
session_id=session_id,
platform_id="webchat",
creator=creator,
is_group=0,
created_at=created_at,
updated_at=updated_at,
display_name=display_name,
)
sessions_to_add.append(new_session)
# 批量插入
if sessions_to_add:
session.add_all(sessions_to_add)
await session.commit()
logger.info(
f"WebChat 会话迁移完成!成功迁移: {len(sessions_to_add)}, 跳过: {skipped_count}",
)
else:
logger.info("没有新会话需要迁移")
# 标记迁移完成
await sp.put_async("global", "global", "migration_done_webchat_session", True)
except Exception as e:
logger.error(f"迁移过程中发生错误: {e}", exc_info=True)
raise

View File

@@ -3,13 +3,7 @@ from dataclasses import dataclass, field
from datetime import datetime, timezone
from typing import TypedDict
from sqlmodel import (
JSON,
Field,
SQLModel,
Text,
UniqueConstraint,
)
from sqlmodel import JSON, Field, SQLModel, Text, UniqueConstraint
class PlatformStat(SQLModel, table=True):
@@ -18,7 +12,7 @@ class PlatformStat(SQLModel, table=True):
Note: In astrbot v4, we moved `platform` table to here.
"""
__tablename__ = "platform_stats"
__tablename__ = "platform_stats" # type: ignore
id: int = Field(primary_key=True, sa_column_kwargs={"autoincrement": True})
timestamp: datetime = Field(nullable=False)
@@ -37,7 +31,7 @@ class PlatformStat(SQLModel, table=True):
class ConversationV2(SQLModel, table=True):
__tablename__ = "conversations"
__tablename__ = "conversations" # type: ignore
inner_conversation_id: int = Field(
primary_key=True,
@@ -74,7 +68,7 @@ class Persona(SQLModel, table=True):
It can be used to customize the behavior of LLMs.
"""
__tablename__ = "personas"
__tablename__ = "personas" # type: ignore
id: int | None = Field(
primary_key=True,
@@ -104,7 +98,7 @@ class Persona(SQLModel, table=True):
class Preference(SQLModel, table=True):
"""This class represents preferences for bots."""
__tablename__ = "preferences"
__tablename__ = "preferences" # type: ignore
id: int | None = Field(
default=None,
@@ -140,7 +134,7 @@ class PlatformMessageHistory(SQLModel, table=True):
or platform-specific messages.
"""
__tablename__ = "platform_message_history"
__tablename__ = "platform_message_history" # type: ignore
id: int | None = Field(
primary_key=True,
@@ -161,13 +155,55 @@ class PlatformMessageHistory(SQLModel, table=True):
)
class PlatformSession(SQLModel, table=True):
"""Platform session table for managing user sessions across different platforms.
A session represents a chat window for a specific user on a specific platform.
Each session can have multiple conversations (对话) associated with it.
"""
__tablename__ = "platform_sessions" # type: ignore
inner_id: int | None = Field(
primary_key=True,
sa_column_kwargs={"autoincrement": True},
default=None,
)
session_id: str = Field(
max_length=100,
nullable=False,
unique=True,
default_factory=lambda: f"webchat_{uuid.uuid4()}",
)
platform_id: str = Field(default="webchat", nullable=False)
"""Platform identifier (e.g., 'webchat', 'qq', 'discord')"""
creator: str = Field(nullable=False)
"""Username of the session creator"""
display_name: str | None = Field(default=None, max_length=255)
"""Display name for the session"""
is_group: int = Field(default=0, nullable=False)
"""0 for private chat, 1 for group chat (not implemented yet)"""
created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
updated_at: datetime = Field(
default_factory=lambda: datetime.now(timezone.utc),
sa_column_kwargs={"onupdate": datetime.now(timezone.utc)},
)
__table_args__ = (
UniqueConstraint(
"session_id",
name="uix_platform_session_id",
),
)
class Attachment(SQLModel, table=True):
"""This class represents attachments for messages in AstrBot.
Attachments can be images, files, or other media types.
"""
__tablename__ = "attachments"
__tablename__ = "attachments" # type: ignore
inner_attachment_id: int | None = Field(
primary_key=True,

View File

@@ -1,7 +1,7 @@
import asyncio
import threading
import typing as T
from datetime import datetime, timedelta
from datetime import datetime, timedelta, timezone
from sqlalchemy.ext.asyncio import AsyncSession
from sqlmodel import col, delete, desc, func, or_, select, text, update
@@ -12,6 +12,7 @@ from astrbot.core.db.po import (
ConversationV2,
Persona,
PlatformMessageHistory,
PlatformSession,
PlatformStat,
Preference,
SQLModel,
@@ -412,7 +413,7 @@ class SQLiteDatabase(BaseDatabase):
user_id,
offset_sec=86400,
):
"""Delete platform message history records older than the specified offset."""
"""Delete platform message history records newer than the specified offset."""
async with self.get_db() as session:
session: AsyncSession
async with session.begin():
@@ -422,7 +423,7 @@ class SQLiteDatabase(BaseDatabase):
delete(PlatformMessageHistory).where(
col(PlatformMessageHistory.platform_id) == platform_id,
col(PlatformMessageHistory.user_id) == user_id,
col(PlatformMessageHistory.created_at) < cutoff_time,
col(PlatformMessageHistory.created_at) >= cutoff_time,
),
)
@@ -709,3 +710,101 @@ class SQLiteDatabase(BaseDatabase):
t.start()
t.join()
return result
# ====
# Platform Session Management
# ====
async def create_platform_session(
self,
creator: str,
platform_id: str = "webchat",
session_id: str | None = None,
display_name: str | None = None,
is_group: int = 0,
) -> PlatformSession:
"""Create a new Platform session."""
kwargs = {}
if session_id:
kwargs["session_id"] = session_id
async with self.get_db() as session:
session: AsyncSession
async with session.begin():
new_session = PlatformSession(
creator=creator,
platform_id=platform_id,
display_name=display_name,
is_group=is_group,
**kwargs,
)
session.add(new_session)
await session.flush()
await session.refresh(new_session)
return new_session
async def get_platform_session_by_id(
self, session_id: str
) -> PlatformSession | None:
"""Get a Platform session by its ID."""
async with self.get_db() as session:
session: AsyncSession
query = select(PlatformSession).where(
PlatformSession.session_id == session_id,
)
result = await session.execute(query)
return result.scalar_one_or_none()
async def get_platform_sessions_by_creator(
self,
creator: str,
platform_id: str | None = None,
page: int = 1,
page_size: int = 20,
) -> list[PlatformSession]:
"""Get all Platform sessions for a specific creator (username) and optionally platform."""
async with self.get_db() as session:
session: AsyncSession
offset = (page - 1) * page_size
query = select(PlatformSession).where(PlatformSession.creator == creator)
if platform_id:
query = query.where(PlatformSession.platform_id == platform_id)
query = (
query.order_by(desc(PlatformSession.updated_at))
.offset(offset)
.limit(page_size)
)
result = await session.execute(query)
return list(result.scalars().all())
async def update_platform_session(
self,
session_id: str,
display_name: str | None = None,
) -> None:
"""Update a Platform session's updated_at timestamp and optionally display_name."""
async with self.get_db() as session:
session: AsyncSession
async with session.begin():
values: dict[str, T.Any] = {"updated_at": datetime.now(timezone.utc)}
if display_name is not None:
values["display_name"] = display_name
await session.execute(
update(PlatformSession)
.where(col(PlatformSession.session_id == session_id))
.values(**values),
)
async def delete_platform_session(self, session_id: str) -> None:
"""Delete a Platform session by its ID."""
async with self.get_db() as session:
session: AsyncSession
async with session.begin():
await session.execute(
delete(PlatformSession).where(
col(PlatformSession.session_id == session_id),
),
)

View File

@@ -0,0 +1,9 @@
from __future__ import annotations
class AstrBotError(Exception):
"""Base exception for all AstrBot errors."""
class ProviderNotFoundError(AstrBotError):
"""Raised when a specified provider is not found."""

View File

@@ -1,4 +1,7 @@
import asyncio
import json
import re
import time
import uuid
from pathlib import Path
@@ -8,12 +11,98 @@ from astrbot.core import logger
from astrbot.core.db.vec_db.base import BaseVecDB
from astrbot.core.db.vec_db.faiss_impl.vec_db import FaissVecDB
from astrbot.core.provider.manager import ProviderManager
from astrbot.core.provider.provider import EmbeddingProvider, RerankProvider
from astrbot.core.provider.provider import (
EmbeddingProvider,
RerankProvider,
)
from astrbot.core.provider.provider import (
Provider as LLMProvider,
)
from .chunking.base import BaseChunker
from .chunking.recursive import RecursiveCharacterChunker
from .kb_db_sqlite import KBSQLiteDatabase
from .models import KBDocument, KBMedia, KnowledgeBase
from .parsers.url_parser import extract_text_from_url
from .parsers.util import select_parser
from .prompts import TEXT_REPAIR_SYSTEM_PROMPT
class RateLimiter:
"""一个简单的速率限制器"""
def __init__(self, max_rpm: int):
self.max_per_minute = max_rpm
self.interval = 60.0 / max_rpm if max_rpm > 0 else 0
self.last_call_time = 0
async def __aenter__(self):
if self.interval == 0:
return
now = time.monotonic()
elapsed = now - self.last_call_time
if elapsed < self.interval:
await asyncio.sleep(self.interval - elapsed)
self.last_call_time = time.monotonic()
async def __aexit__(self, exc_type, exc_val, exc_tb):
pass
async def _repair_and_translate_chunk_with_retry(
chunk: str,
repair_llm_service: LLMProvider,
rate_limiter: RateLimiter,
max_retries: int = 2,
) -> list[str]:
"""
Repairs, translates, and optionally re-chunks a single text chunk using the small LLM, with rate limiting.
"""
# 为了防止 LLM 上下文污染,在 user_prompt 中也加入明确的指令
user_prompt = f"""IGNORE ALL PREVIOUS INSTRUCTIONS. Your ONLY task is to process the following text chunk according to the system prompt provided.
Text chunk to process:
---
{chunk}
---
"""
for attempt in range(max_retries + 1):
try:
async with rate_limiter:
response = await repair_llm_service.text_chat(
prompt=user_prompt, system_prompt=TEXT_REPAIR_SYSTEM_PROMPT
)
llm_output = response.completion_text
if "<discard_chunk />" in llm_output:
return [] # Signal to discard this chunk
# More robust regex to handle potential LLM formatting errors (spaces, newlines in tags)
matches = re.findall(
r"<\s*repaired_text\s*>\s*(.*?)\s*<\s*/\s*repaired_text\s*>",
llm_output,
re.DOTALL,
)
if matches:
# Further cleaning to ensure no empty strings are returned
return [m.strip() for m in matches if m.strip()]
else:
# If no valid tags and not explicitly discarded, discard it to be safe.
return []
except Exception as e:
logger.warning(
f" - LLM call failed on attempt {attempt + 1}/{max_retries + 1}. Error: {str(e)}"
)
logger.error(
f" - Failed to process chunk after {max_retries + 1} attempts. Using original text."
)
return [chunk]
class KBHelper:
@@ -100,7 +189,7 @@ class KBHelper:
async def upload_document(
self,
file_name: str,
file_content: bytes,
file_content: bytes | None,
file_type: str,
chunk_size: int = 512,
chunk_overlap: int = 50,
@@ -108,6 +197,7 @@ class KBHelper:
tasks_limit: int = 3,
max_retries: int = 3,
progress_callback=None,
pre_chunked_text: list[str] | None = None,
) -> KBDocument:
"""上传并处理文档(带原子性保证和失败清理)
@@ -130,46 +220,63 @@ class KBHelper:
await self._ensure_vec_db()
doc_id = str(uuid.uuid4())
media_paths: list[Path] = []
file_size = 0
# file_path = self.kb_files_dir / f"{doc_id}.{file_type}"
# async with aiofiles.open(file_path, "wb") as f:
# await f.write(file_content)
try:
# 阶段1: 解析文档
if progress_callback:
await progress_callback("parsing", 0, 100)
parser = await select_parser(f".{file_type}")
parse_result = await parser.parse(file_content, file_name)
text_content = parse_result.text
media_items = parse_result.media
if progress_callback:
await progress_callback("parsing", 100, 100)
# 保存媒体文件
chunks_text = []
saved_media = []
for media_item in media_items:
media = await self._save_media(
doc_id=doc_id,
media_type=media_item.media_type,
file_name=media_item.file_name,
content=media_item.content,
mime_type=media_item.mime_type,
if pre_chunked_text is not None:
# 如果提供了预分块文本,直接使用
chunks_text = pre_chunked_text
file_size = sum(len(chunk) for chunk in chunks_text)
logger.info(f"使用预分块文本进行上传,共 {len(chunks_text)} 个块。")
else:
# 否则,执行标准的文件解析和分块流程
if file_content is None:
raise ValueError(
"当未提供 pre_chunked_text 时file_content 不能为空。"
)
file_size = len(file_content)
# 阶段1: 解析文档
if progress_callback:
await progress_callback("parsing", 0, 100)
parser = await select_parser(f".{file_type}")
parse_result = await parser.parse(file_content, file_name)
text_content = parse_result.text
media_items = parse_result.media
if progress_callback:
await progress_callback("parsing", 100, 100)
# 保存媒体文件
for media_item in media_items:
media = await self._save_media(
doc_id=doc_id,
media_type=media_item.media_type,
file_name=media_item.file_name,
content=media_item.content,
mime_type=media_item.mime_type,
)
saved_media.append(media)
media_paths.append(Path(media.file_path))
# 阶段2: 分块
if progress_callback:
await progress_callback("chunking", 0, 100)
chunks_text = await self.chunker.chunk(
text_content,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
)
saved_media.append(media)
media_paths.append(Path(media.file_path))
# 阶段2: 分块
if progress_callback:
await progress_callback("chunking", 0, 100)
chunks_text = await self.chunker.chunk(
text_content,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
)
contents = []
metadatas = []
for idx, chunk_text in enumerate(chunks_text):
@@ -205,7 +312,7 @@ class KBHelper:
kb_id=self.kb.kb_id,
doc_name=file_name,
file_type=file_type,
file_size=len(file_content),
file_size=file_size,
# file_path=str(file_path),
file_path="",
chunk_count=len(chunks_text),
@@ -359,3 +466,177 @@ class KBHelper:
)
return media
async def upload_from_url(
self,
url: str,
chunk_size: int = 512,
chunk_overlap: int = 50,
batch_size: int = 32,
tasks_limit: int = 3,
max_retries: int = 3,
progress_callback=None,
enable_cleaning: bool = False,
cleaning_provider_id: str | None = None,
) -> KBDocument:
"""从 URL 上传并处理文档(带原子性保证和失败清理)
Args:
url: 要提取内容的网页 URL
chunk_size: 文本块大小
chunk_overlap: 文本块重叠大小
batch_size: 批处理大小
tasks_limit: 并发任务限制
max_retries: 最大重试次数
progress_callback: 进度回调函数,接收参数 (stage, current, total)
- stage: 当前阶段 ('extracting', 'cleaning', 'parsing', 'chunking', 'embedding')
- current: 当前进度
- total: 总数
Returns:
KBDocument: 上传的文档对象
Raises:
ValueError: 如果 URL 为空或无法提取内容
IOError: 如果网络请求失败
"""
# 获取 Tavily API 密钥
config = self.prov_mgr.acm.default_conf
tavily_keys = config.get("provider_settings", {}).get(
"websearch_tavily_key", []
)
if not tavily_keys:
raise ValueError(
"Error: Tavily API key is not configured in provider_settings."
)
# 阶段1: 从 URL 提取内容
if progress_callback:
await progress_callback("extracting", 0, 100)
try:
text_content = await extract_text_from_url(url, tavily_keys)
except Exception as e:
logger.error(f"Failed to extract content from URL {url}: {e}")
raise OSError(f"Failed to extract content from URL {url}: {e}") from e
if not text_content:
raise ValueError(f"No content extracted from URL: {url}")
if progress_callback:
await progress_callback("extracting", 100, 100)
# 阶段2: (可选)清洗内容并分块
final_chunks = await self._clean_and_rechunk_content(
content=text_content,
url=url,
progress_callback=progress_callback,
enable_cleaning=enable_cleaning,
cleaning_provider_id=cleaning_provider_id,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
)
if enable_cleaning and not final_chunks:
raise ValueError(
"内容清洗后未提取到有效文本。请尝试关闭内容清洗功能或更换更高性能的LLM模型后重试。"
)
# 创建一个虚拟文件名
file_name = url.split("/")[-1] or f"document_from_{url}"
if not Path(file_name).suffix:
file_name += ".url"
# 复用现有的 upload_document 方法,但传入预分块文本
return await self.upload_document(
file_name=file_name,
file_content=None,
file_type="url", # 使用 'url' 作为特殊文件类型
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
batch_size=batch_size,
tasks_limit=tasks_limit,
max_retries=max_retries,
progress_callback=progress_callback,
pre_chunked_text=final_chunks,
)
async def _clean_and_rechunk_content(
self,
content: str,
url: str,
progress_callback=None,
enable_cleaning: bool = False,
cleaning_provider_id: str | None = None,
repair_max_rpm: int = 60,
chunk_size: int = 512,
chunk_overlap: int = 50,
) -> list[str]:
"""
对从 URL 获取的内容进行清洗、修复、翻译和重新分块。
"""
if not enable_cleaning:
# 如果不启用清洗,则使用从前端传递的参数进行分块
logger.info(
f"内容清洗未启用,使用指定参数进行分块: chunk_size={chunk_size}, chunk_overlap={chunk_overlap}"
)
return await self.chunker.chunk(
content, chunk_size=chunk_size, chunk_overlap=chunk_overlap
)
if not cleaning_provider_id:
logger.warning(
"启用了内容清洗,但未提供 cleaning_provider_id跳过清洗并使用默认分块。"
)
return await self.chunker.chunk(content)
if progress_callback:
await progress_callback("cleaning", 0, 100)
try:
# 获取指定的 LLM Provider
llm_provider = await self.prov_mgr.get_provider_by_id(cleaning_provider_id)
if not llm_provider or not isinstance(llm_provider, LLMProvider):
raise ValueError(
f"无法找到 ID 为 {cleaning_provider_id} 的 LLM Provider 或类型不正确"
)
# 初步分块
# 优化分隔符,优先按段落分割,以获得更高质量的文本块
text_splitter = RecursiveCharacterChunker(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
separators=["\n\n", "\n", " "], # 优先使用段落分隔符
)
initial_chunks = await text_splitter.chunk(content)
logger.info(f"初步分块完成,生成 {len(initial_chunks)} 个块用于修复。")
# 并发处理所有块
rate_limiter = RateLimiter(repair_max_rpm)
tasks = [
_repair_and_translate_chunk_with_retry(
chunk, llm_provider, rate_limiter
)
for chunk in initial_chunks
]
repaired_results = await asyncio.gather(*tasks, return_exceptions=True)
final_chunks = []
for i, result in enumerate(repaired_results):
if isinstance(result, Exception):
logger.warning(f"{i} 处理异常: {str(result)}. 回退到原始块。")
final_chunks.append(initial_chunks[i])
elif isinstance(result, list):
final_chunks.extend(result)
logger.info(
f"文本修复完成: {len(initial_chunks)} 个原始块 -> {len(final_chunks)} 个最终块。"
)
if progress_callback:
await progress_callback("cleaning", 100, 100)
return final_chunks
except Exception as e:
logger.error(f"使用 Provider '{cleaning_provider_id}' 清洗内容失败: {e}")
# 清洗失败,返回默认分块结果,保证流程不中断
return await self.chunker.chunk(content)

View File

@@ -8,7 +8,7 @@ from astrbot.core.provider.manager import ProviderManager
from .chunking.recursive import RecursiveCharacterChunker
from .kb_db_sqlite import KBSQLiteDatabase
from .kb_helper import KBHelper
from .models import KnowledgeBase
from .models import KBDocument, KnowledgeBase
from .retrieval.manager import RetrievalManager, RetrievalResult
from .retrieval.rank_fusion import RankFusion
from .retrieval.sparse_retriever import SparseRetriever
@@ -284,3 +284,47 @@ class KnowledgeBaseManager:
await self.kb_db.close()
except Exception as e:
logger.error(f"关闭知识库元数据数据库失败: {e}")
async def upload_from_url(
self,
kb_id: str,
url: str,
chunk_size: int = 512,
chunk_overlap: int = 50,
batch_size: int = 32,
tasks_limit: int = 3,
max_retries: int = 3,
progress_callback=None,
) -> KBDocument:
"""从 URL 上传文档到指定的知识库
Args:
kb_id: 知识库 ID
url: 要提取内容的网页 URL
chunk_size: 文本块大小
chunk_overlap: 文本块重叠大小
batch_size: 批处理大小
tasks_limit: 并发任务限制
max_retries: 最大重试次数
progress_callback: 进度回调函数
Returns:
KBDocument: 上传的文档对象
Raises:
ValueError: 如果知识库不存在或 URL 为空
IOError: 如果网络请求失败
"""
kb_helper = await self.get_kb(kb_id)
if not kb_helper:
raise ValueError(f"Knowledge base with id {kb_id} not found.")
return await kb_helper.upload_from_url(
url=url,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
batch_size=batch_size,
tasks_limit=tasks_limit,
max_retries=max_retries,
progress_callback=progress_callback,
)

View File

@@ -0,0 +1,103 @@
import asyncio
import aiohttp
class URLExtractor:
"""URL 内容提取器,封装了 Tavily API 调用和密钥管理"""
def __init__(self, tavily_keys: list[str]):
"""
初始化 URL 提取器
Args:
tavily_keys: Tavily API 密钥列表
"""
if not tavily_keys:
raise ValueError("Error: Tavily API keys are not configured.")
self.tavily_keys = tavily_keys
self.tavily_key_index = 0
self.tavily_key_lock = asyncio.Lock()
async def _get_tavily_key(self) -> str:
"""并发安全的从列表中获取并轮换Tavily API密钥。"""
async with self.tavily_key_lock:
key = self.tavily_keys[self.tavily_key_index]
self.tavily_key_index = (self.tavily_key_index + 1) % len(self.tavily_keys)
return key
async def extract_text_from_url(self, url: str) -> str:
"""
使用 Tavily API 从 URL 提取主要文本内容。
这是 web_searcher 插件中 tavily_extract_web_page 方法的简化版本,
专门为知识库模块设计,不依赖 AstrMessageEvent。
Args:
url: 要提取内容的网页 URL
Returns:
提取的文本内容
Raises:
ValueError: 如果 URL 为空或 API 密钥未配置
IOError: 如果请求失败或返回错误
"""
if not url:
raise ValueError("Error: url must be a non-empty string.")
tavily_key = await self._get_tavily_key()
api_url = "https://api.tavily.com/extract"
headers = {
"Authorization": f"Bearer {tavily_key}",
"Content-Type": "application/json",
}
payload = {
"urls": [url],
"extract_depth": "basic", # 使用基础提取深度
}
try:
async with aiohttp.ClientSession(trust_env=True) as session:
async with session.post(
api_url,
json=payload,
headers=headers,
timeout=30.0, # 增加超时时间,因为内容提取可能需要更长时间
) as response:
if response.status != 200:
reason = await response.text()
raise OSError(
f"Tavily web extraction failed: {reason}, status: {response.status}"
)
data = await response.json()
results = data.get("results", [])
if not results:
raise ValueError(f"No content extracted from URL: {url}")
# 返回第一个结果的内容
return results[0].get("raw_content", "")
except aiohttp.ClientError as e:
raise OSError(f"Failed to fetch URL {url}: {e}") from e
except Exception as e:
raise OSError(f"Failed to extract content from URL {url}: {e}") from e
# 为了向后兼容,提供一个简单的函数接口
async def extract_text_from_url(url: str, tavily_keys: list[str]) -> str:
"""
简单的函数接口,用于从 URL 提取文本内容
Args:
url: 要提取内容的网页 URL
tavily_keys: Tavily API 密钥列表
Returns:
提取的文本内容
"""
extractor = URLExtractor(tavily_keys)
return await extractor.extract_text_from_url(url)

View File

@@ -0,0 +1,65 @@
TEXT_REPAIR_SYSTEM_PROMPT = """You are a meticulous digital archivist. Your mission is to reconstruct a clean, readable article from raw, noisy text chunks.
**Core Task:**
1. **Analyze:** Examine the text chunk to separate "signal" (substantive information) from "noise" (UI elements, ads, navigation, footers).
2. **Process:** Clean and repair the signal. **Do not translate it.** Keep the original language.
**Crucial Rules:**
- **NEVER discard a chunk if it contains ANY valuable information.** Your primary duty is to salvage content.
- **If a chunk contains multiple distinct topics, split them.** Enclose each topic in its own `<repaired_text>` tag.
- Your output MUST be ONLY `<repaired_text>...</repaired_text>` tags or a single `<discard_chunk />` tag.
---
**Example 1: Chunk with Noise and Signal**
*Input Chunk:*
"Home | About | Products | **The Llama is a domesticated South American camelid.** | © 2025 ACME Corp."
*Your Thought Process:*
1. "Home | About | Products..." and "© 2025 ACME Corp." are noise.
2. "The Llama is a domesticated..." is the signal.
3. I must extract the signal and wrap it.
*Your Output:*
<repaired_text>
The Llama is a domesticated South American camelid.
</repaired_text>
---
**Example 2: Chunk with ONLY Noise**
*Input Chunk:*
"Next Page > | Subscribe to our newsletter | Follow us on X"
*Your Thought Process:*
1. This entire chunk is noise. There is no signal.
2. I must discard this.
*Your Output:*
<discard_chunk />
---
**Example 3: Chunk with Multiple Topics (Requires Splitting)**
*Input Chunk:*
"## Chapter 1: The Sun
The Sun is the star at the center of the Solar System.
## Chapter 2: The Moon
The Moon is Earth's only natural satellite."
*Your Thought Process:*
1. This chunk contains two distinct topics.
2. I must process them separately to maintain semantic integrity.
3. I will create two `<repaired_text>` blocks.
*Your Output:*
<repaired_text>
## Chapter 1: The Sun
The Sun is the star at the center of the Solar System.
</repaired_text>
<repaired_text>
## Chapter 2: The Moon
The Moon is Earth's only natural satellite.
</repaired_text>
"""

View File

@@ -3,7 +3,7 @@ from dataclasses import dataclass
from astrbot.core.config import AstrBotConfig
from astrbot.core.star import PluginManager
from .context_utils import call_event_hook, call_handler, call_local_llm_tool
from .context_utils import call_event_hook, call_handler
@dataclass
@@ -15,4 +15,3 @@ class PipelineContext:
astrbot_config_id: str
call_handler = call_handler
call_event_hook = call_event_hook
call_local_llm_tool = call_local_llm_tool

View File

@@ -3,8 +3,6 @@ import traceback
import typing as T
from astrbot import logger
from astrbot.core.agent.run_context import ContextWrapper
from astrbot.core.astr_agent_context import AstrAgentContext
from astrbot.core.message.message_event_result import CommandResult, MessageEventResult
from astrbot.core.platform.astr_message_event import AstrMessageEvent
from astrbot.core.star.star import star_map
@@ -107,66 +105,3 @@ async def call_event_hook(
return True
return event.is_stopped()
async def call_local_llm_tool(
context: ContextWrapper[AstrAgentContext],
handler: T.Callable[..., T.Awaitable[T.Any]],
method_name: str,
*args,
**kwargs,
) -> T.AsyncGenerator[T.Any, None]:
"""执行本地 LLM 工具的处理函数并处理其返回结果"""
ready_to_call = None # 一个协程或者异步生成器
trace_ = None
event = context.context.event
try:
if method_name == "run" or method_name == "decorator_handler":
ready_to_call = handler(event, *args, **kwargs)
elif method_name == "call":
ready_to_call = handler(context, *args, **kwargs)
else:
raise ValueError(f"未知的方法名: {method_name}")
except ValueError as e:
logger.error(f"调用本地 LLM 工具时出错: {e}", exc_info=True)
except TypeError:
logger.error("处理函数参数不匹配,请检查 handler 的定义。", exc_info=True)
except Exception as e:
trace_ = traceback.format_exc()
logger.error(f"调用本地 LLM 工具时出错: {e}\n{trace_}")
if not ready_to_call:
return
if inspect.isasyncgen(ready_to_call):
_has_yielded = False
try:
async for ret in ready_to_call:
# 这里逐步执行异步生成器, 对于每个 yield 返回的 ret, 执行下面的代码
# 返回值只能是 MessageEventResult 或者 None无返回值
_has_yielded = True
if isinstance(ret, (MessageEventResult, CommandResult)):
# 如果返回值是 MessageEventResult, 设置结果并继续
event.set_result(ret)
yield
else:
# 如果返回值是 None, 则不设置结果并继续
# 继续执行后续阶段
yield ret
if not _has_yielded:
# 如果这个异步生成器没有执行到 yield 分支
yield
except Exception as e:
logger.error(f"Previous Error: {trace_}")
raise e
elif inspect.iscoroutine(ready_to_call):
# 如果只是一个协程, 直接执行
ret = await ready_to_call
if isinstance(ret, (MessageEventResult, CommandResult)):
event.set_result(ret)
yield
else:
yield ret

View File

@@ -3,20 +3,10 @@
import asyncio
import copy
import json
import traceback
from collections.abc import AsyncGenerator
from typing import Any
from mcp.types import CallToolResult
from astrbot.core import logger
from astrbot.core.agent.handoff import HandoffTool
from astrbot.core.agent.hooks import BaseAgentRunHooks
from astrbot.core.agent.mcp_client import MCPTool
from astrbot.core.agent.run_context import ContextWrapper
from astrbot.core.agent.runners.tool_loop_agent_runner import ToolLoopAgentRunner
from astrbot.core.agent.tool import FunctionTool, ToolSet
from astrbot.core.agent.tool_executor import BaseFunctionToolExecutor
from astrbot.core.agent.tool import ToolSet
from astrbot.core.astr_agent_context import AstrAgentContext
from astrbot.core.conversation_mgr import Conversation
from astrbot.core.message.components import Image
@@ -31,323 +21,18 @@ from astrbot.core.provider.entities import (
LLMResponse,
ProviderRequest,
)
from astrbot.core.provider.register import llm_tools
from astrbot.core.star.session_llm_manager import SessionServiceManager
from astrbot.core.star.star_handler import EventType, star_map
from astrbot.core.utils.metrics import Metric
from astrbot.core.utils.session_lock import session_lock_manager
from ...context import PipelineContext, call_event_hook, call_local_llm_tool
from ....astr_agent_context import AgentContextWrapper
from ....astr_agent_hooks import MAIN_AGENT_HOOKS
from ....astr_agent_run_util import AgentRunner, run_agent
from ....astr_agent_tool_exec import FunctionToolExecutor
from ...context import PipelineContext, call_event_hook
from ..stage import Stage
from ..utils import inject_kb_context
try:
import mcp
except (ModuleNotFoundError, ImportError):
logger.warning("警告: 缺少依赖库 'mcp',将无法使用 MCP 服务。")
AgentContextWrapper = ContextWrapper[AstrAgentContext]
AgentRunner = ToolLoopAgentRunner[AstrAgentContext]
class FunctionToolExecutor(BaseFunctionToolExecutor[AstrAgentContext]):
@classmethod
async def execute(cls, tool, run_context, **tool_args):
"""执行函数调用。
Args:
event (AstrMessageEvent): 事件对象, 当 origin 为 local 时必须提供。
**kwargs: 函数调用的参数。
Returns:
AsyncGenerator[None | mcp.types.CallToolResult, None]
"""
if isinstance(tool, HandoffTool):
async for r in cls._execute_handoff(tool, run_context, **tool_args):
yield r
return
elif isinstance(tool, MCPTool):
async for r in cls._execute_mcp(tool, run_context, **tool_args):
yield r
return
else:
async for r in cls._execute_local(tool, run_context, **tool_args):
yield r
return
@classmethod
async def _execute_handoff(
cls,
tool: HandoffTool,
run_context: ContextWrapper[AstrAgentContext],
**tool_args,
):
input_ = tool_args.get("input", "agent")
agent_runner = AgentRunner()
# make toolset for the agent
tools = tool.agent.tools
if tools:
toolset = ToolSet()
for t in tools:
if isinstance(t, str):
_t = llm_tools.get_func(t)
if _t:
toolset.add_tool(_t)
elif isinstance(t, FunctionTool):
toolset.add_tool(t)
else:
toolset = None
request = ProviderRequest(
prompt=input_,
system_prompt=tool.description or "",
image_urls=[], # 暂时不传递原始 agent 的上下文
contexts=[], # 暂时不传递原始 agent 的上下文
func_tool=toolset,
)
astr_agent_ctx = AstrAgentContext(
provider=run_context.context.provider,
first_provider_request=run_context.context.first_provider_request,
curr_provider_request=request,
streaming=run_context.context.streaming,
event=run_context.context.event,
)
event = run_context.context.event
logger.debug(f"正在将任务委托给 Agent: {tool.agent.name}, input: {input_}")
await event.send(
MessageChain().message("✨ 正在将任务委托给 Agent: " + tool.agent.name),
)
await agent_runner.reset(
provider=run_context.context.provider,
request=request,
run_context=AgentContextWrapper(
context=astr_agent_ctx,
tool_call_timeout=run_context.tool_call_timeout,
),
tool_executor=FunctionToolExecutor(),
agent_hooks=tool.agent.run_hooks or BaseAgentRunHooks[AstrAgentContext](),
streaming=run_context.context.streaming,
)
async for _ in run_agent(agent_runner, 15, True):
pass
if agent_runner.done():
llm_response = agent_runner.get_final_llm_resp()
if not llm_response:
text_content = mcp.types.TextContent(
type="text",
text=f"error when deligate task to {tool.agent.name}",
)
yield mcp.types.CallToolResult(content=[text_content])
return
logger.debug(
f"Agent {tool.agent.name} 任务完成, response: {llm_response.completion_text}",
)
result = (
f"Agent {tool.agent.name} respond with: {llm_response.completion_text}\n\n"
"Note: If the result is error or need user provide more information, please provide more information to the agent(you can ask user for more information first)."
)
text_content = mcp.types.TextContent(
type="text",
text=result,
)
yield mcp.types.CallToolResult(content=[text_content])
else:
text_content = mcp.types.TextContent(
type="text",
text=f"error when deligate task to {tool.agent.name}",
)
yield mcp.types.CallToolResult(content=[text_content])
return
@classmethod
async def _execute_local(
cls,
tool: FunctionTool,
run_context: ContextWrapper[AstrAgentContext],
**tool_args,
):
event = run_context.context.event
if not event:
raise ValueError("Event must be provided for local function tools.")
is_override_call = False
for ty in type(tool).mro():
if "call" in ty.__dict__ and ty.__dict__["call"] is not FunctionTool.call:
logger.debug(f"Found call in: {ty}")
is_override_call = True
break
# 检查 tool 下有没有 run 方法
if not tool.handler and not hasattr(tool, "run") and not is_override_call:
raise ValueError("Tool must have a valid handler or override 'run' method.")
awaitable = None
method_name = ""
if tool.handler:
awaitable = tool.handler
method_name = "decorator_handler"
elif is_override_call:
awaitable = tool.call
method_name = "call"
elif hasattr(tool, "run"):
awaitable = getattr(tool, "run")
method_name = "run"
if awaitable is None:
raise ValueError("Tool must have a valid handler or override 'run' method.")
wrapper = call_local_llm_tool(
context=run_context,
handler=awaitable,
method_name=method_name,
**tool_args,
)
while True:
try:
resp = await asyncio.wait_for(
anext(wrapper),
timeout=run_context.tool_call_timeout,
)
if resp is not None:
if isinstance(resp, mcp.types.CallToolResult):
yield resp
else:
text_content = mcp.types.TextContent(
type="text",
text=str(resp),
)
yield mcp.types.CallToolResult(content=[text_content])
else:
# NOTE: Tool 在这里直接请求发送消息给用户
# TODO: 是否需要判断 event.get_result() 是否为空?
# 如果为空,则说明没有发送消息给用户,并且返回值为空,将返回一个特殊的 TextContent,其内容如"工具没有返回内容"
if res := run_context.context.event.get_result():
if res.chain:
try:
await event.send(
MessageChain(
chain=res.chain,
type="tool_direct_result",
)
)
except Exception as e:
logger.error(
f"Tool 直接发送消息失败: {e}",
exc_info=True,
)
yield None
except asyncio.TimeoutError:
raise Exception(
f"tool {tool.name} execution timeout after {run_context.tool_call_timeout} seconds.",
)
except StopAsyncIteration:
break
@classmethod
async def _execute_mcp(
cls,
tool: FunctionTool,
run_context: ContextWrapper[AstrAgentContext],
**tool_args,
):
res = await tool.call(run_context, **tool_args)
if not res:
return
yield res
class MainAgentHooks(BaseAgentRunHooks[AstrAgentContext]):
async def on_agent_done(self, run_context, llm_response):
# 执行事件钩子
await call_event_hook(
run_context.context.event,
EventType.OnLLMResponseEvent,
llm_response,
)
async def on_tool_end(
self,
run_context: ContextWrapper[AstrAgentContext],
tool: FunctionTool[Any],
tool_args: dict | None,
tool_result: CallToolResult | None,
):
run_context.context.event.clear_result()
MAIN_AGENT_HOOKS = MainAgentHooks()
async def run_agent(
agent_runner: AgentRunner,
max_step: int = 30,
show_tool_use: bool = True,
) -> AsyncGenerator[MessageChain, None]:
step_idx = 0
astr_event = agent_runner.run_context.context.event
while step_idx < max_step:
step_idx += 1
try:
async for resp in agent_runner.step():
if astr_event.is_stopped():
return
if resp.type == "tool_call_result":
msg_chain = resp.data["chain"]
if msg_chain.type == "tool_direct_result":
# tool_direct_result 用于标记 llm tool 需要直接发送给用户的内容
resp.data["chain"].type = "tool_call_result"
await astr_event.send(resp.data["chain"])
continue
# 对于其他情况,暂时先不处理
continue
elif resp.type == "tool_call":
if agent_runner.streaming:
# 用来标记流式响应需要分节
yield MessageChain(chain=[], type="break")
if show_tool_use or astr_event.get_platform_name() == "webchat":
resp.data["chain"].type = "tool_call"
await astr_event.send(resp.data["chain"])
continue
if not agent_runner.streaming:
content_typ = (
ResultContentType.LLM_RESULT
if resp.type == "llm_result"
else ResultContentType.GENERAL_RESULT
)
astr_event.set_result(
MessageEventResult(
chain=resp.data["chain"].chain,
result_content_type=content_typ,
),
)
yield
astr_event.clear_result()
elif resp.type == "streaming_delta":
yield resp.data["chain"] # MessageChain
if agent_runner.done():
break
except Exception as e:
logger.error(traceback.format_exc())
err_msg = f"\n\nAstrBot 请求失败。\n错误类型: {type(e).__name__}\n错误信息: {e!s}\n\n请在控制台查看和分享错误详情。\n"
if agent_runner.streaming:
yield MessageChain().message(err_msg)
else:
astr_event.set_result(MessageEventResult().message(err_msg))
return
from ..utils import KNOWLEDGE_BASE_QUERY_TOOL, retrieve_knowledge_base
class LLMRequestSubStage(Stage):
@@ -363,11 +48,16 @@ class LLMRequestSubStage(Stage):
self.max_context_length - 1,
)
self.streaming_response: bool = settings["streaming_response"]
self.unsupported_streaming_strategy: str = settings[
"unsupported_streaming_strategy"
]
self.max_step: int = settings.get("max_agent_step", 30)
self.tool_call_timeout: int = settings.get("tool_call_timeout", 60)
if isinstance(self.max_step, bool): # workaround: #2622
self.max_step = 30
self.show_tool_use: bool = settings.get("show_tool_use_status", True)
self.show_reasoning = settings.get("display_reasoning_text", False)
self.kb_agentic_mode: bool = conf.get("kb_agentic_mode", False)
for bwp in self.bot_wake_prefixs:
if self.provider_wake_prefix.startswith(bwp):
@@ -406,109 +96,68 @@ class LLMRequestSubStage(Stage):
raise RuntimeError("无法创建新的对话。")
return conversation
async def process(
async def _apply_kb(
self,
event: AstrMessageEvent,
_nested: bool = False,
) -> None | AsyncGenerator[None, None]:
req: ProviderRequest | None = None
if not self.ctx.astrbot_config["provider_settings"]["enable"]:
logger.debug("未启用 LLM 能力,跳过处理。")
return
# 检查会话级别的LLM启停状态
if not SessionServiceManager.should_process_llm_request(event):
logger.debug(f"会话 {event.unified_msg_origin} 禁用了 LLM跳过处理。")
return
provider = self._select_provider(event)
if provider is None:
return
if not isinstance(provider, Provider):
logger.error(f"选择的提供商类型无效({type(provider)}),跳过 LLM 请求处理。")
return
if event.get_extra("provider_request"):
req = event.get_extra("provider_request")
assert isinstance(req, ProviderRequest), (
"provider_request 必须是 ProviderRequest 类型。"
)
if req.conversation:
req.contexts = json.loads(req.conversation.history)
else:
req = ProviderRequest(prompt="", image_urls=[])
if sel_model := event.get_extra("selected_model"):
req.model = sel_model
if self.provider_wake_prefix:
if not event.message_str.startswith(self.provider_wake_prefix):
req: ProviderRequest,
):
"""Apply knowledge base context to the provider request"""
if not self.kb_agentic_mode:
if req.prompt is None:
return
try:
kb_result = await retrieve_knowledge_base(
query=req.prompt,
umo=event.unified_msg_origin,
context=self.ctx.plugin_manager.context,
)
if not kb_result:
return
req.prompt = event.message_str[len(self.provider_wake_prefix) :]
# func_tool selection 现在已经转移到 packages/astrbot 插件中进行选择。
# req.func_tool = self.ctx.plugin_manager.context.get_llm_tool_manager()
for comp in event.message_obj.message:
if isinstance(comp, Image):
image_path = await comp.convert_to_file_path()
req.image_urls.append(image_path)
if req.system_prompt is not None:
req.system_prompt += (
f"\n\n[Related Knowledge Base Results]:\n{kb_result}"
)
except Exception as e:
logger.error(f"Error occurred while retrieving knowledge base: {e}")
else:
if req.func_tool is None:
req.func_tool = ToolSet()
req.func_tool.add_tool(KNOWLEDGE_BASE_QUERY_TOOL)
conversation = await self._get_session_conv(event)
req.conversation = conversation
req.contexts = json.loads(conversation.history)
def _truncate_contexts(
self,
contexts: list[dict],
) -> list[dict]:
"""截断上下文列表,确保不超过最大长度"""
if self.max_context_length == -1:
return contexts
event.set_extra("provider_request", req)
if len(contexts) // 2 <= self.max_context_length:
return contexts
if not req.prompt and not req.image_urls:
return
truncated_contexts = contexts[
-(self.max_context_length - self.dequeue_context_length + 1) * 2 :
]
# 找到第一个role 为 user 的索引,确保上下文格式正确
index = next(
(
i
for i, item in enumerate(truncated_contexts)
if item.get("role") == "user"
),
None,
)
if index is not None and index > 0:
truncated_contexts = truncated_contexts[index:]
# 应用知识库
try:
await inject_kb_context(
umo=event.unified_msg_origin,
p_ctx=self.ctx,
req=req,
)
except Exception as e:
logger.error(f"调用知识库时遇到问题: {e}")
return truncated_contexts
# 执行请求 LLM 前事件钩子。
if await call_event_hook(event, EventType.OnLLMRequestEvent, req):
return
if isinstance(req.contexts, str):
req.contexts = json.loads(req.contexts)
# max context length
if (
self.max_context_length != -1 # -1 为不限制
and len(req.contexts) // 2 > self.max_context_length
):
logger.debug("上下文长度超过限制,将截断。")
req.contexts = req.contexts[
-(self.max_context_length - self.dequeue_context_length + 1) * 2 :
]
# 找到第一个role 为 user 的索引,确保上下文格式正确
index = next(
(
i
for i, item in enumerate(req.contexts)
if item.get("role") == "user"
),
None,
)
if index is not None and index > 0:
req.contexts = req.contexts[index:]
# session_id
if not req.session_id:
req.session_id = event.unified_msg_origin
# fix messages
req.contexts = self.fix_messages(req.contexts)
# check provider modalities
# 如果提供商不支持图像/工具使用,但请求中包含图像/工具列表,则清空。图片转述等的检测和调用发生在这之前,因此这里可以这样处理。
def _modalities_fix(
self,
provider: Provider,
req: ProviderRequest,
):
"""检查提供商的模态能力,清理请求中的不支持内容"""
if req.image_urls:
provider_cfg = provider.provider_config.get("modalities", ["image"])
if "image" not in provider_cfg:
@@ -522,7 +171,13 @@ class LLMRequestSubStage(Stage):
f"用户设置提供商 {provider} 不支持工具使用,清空工具列表。",
)
req.func_tool = None
# 插件可用性设置
def _plugin_tool_fix(
self,
event: AstrMessageEvent,
req: ProviderRequest,
):
"""根据事件中的插件设置,过滤请求中的工具列表"""
if event.plugins_name is not None and req.func_tool:
new_tool_set = ToolSet()
for tool in req.func_tool.tools:
@@ -536,80 +191,6 @@ class LLMRequestSubStage(Stage):
new_tool_set.add_tool(tool)
req.func_tool = new_tool_set
# 备份 req.contexts
backup_contexts = copy.deepcopy(req.contexts)
# run agent
agent_runner = AgentRunner()
logger.debug(
f"handle provider[id: {provider.provider_config['id']}] request: {req}",
)
astr_agent_ctx = AstrAgentContext(
provider=provider,
first_provider_request=req,
curr_provider_request=req,
streaming=self.streaming_response,
event=event,
)
await agent_runner.reset(
provider=provider,
request=req,
run_context=AgentContextWrapper(
context=astr_agent_ctx,
tool_call_timeout=self.tool_call_timeout,
),
tool_executor=FunctionToolExecutor(),
agent_hooks=MAIN_AGENT_HOOKS,
streaming=self.streaming_response,
)
if self.streaming_response:
# 流式响应
event.set_result(
MessageEventResult()
.set_result_content_type(ResultContentType.STREAMING_RESULT)
.set_async_stream(
run_agent(agent_runner, self.max_step, self.show_tool_use),
),
)
yield
if agent_runner.done():
if final_llm_resp := agent_runner.get_final_llm_resp():
if final_llm_resp.completion_text:
chain = (
MessageChain().message(final_llm_resp.completion_text).chain
)
elif final_llm_resp.result_chain:
chain = final_llm_resp.result_chain.chain
else:
chain = MessageChain().chain
event.set_result(
MessageEventResult(
chain=chain,
result_content_type=ResultContentType.STREAMING_FINISH,
),
)
else:
async for _ in run_agent(agent_runner, self.max_step, self.show_tool_use):
yield
# 恢复备份的 contexts
req.contexts = backup_contexts
await self._save_to_history(event, req, agent_runner.get_final_llm_resp())
# 异步处理 WebChat 特殊情况
if event.get_platform_name() == "webchat":
asyncio.create_task(self._handle_webchat(event, req, provider))
asyncio.create_task(
Metric.upload(
llm_tick=1,
model_name=agent_runner.provider.get_model(),
provider_type=agent_runner.provider.meta().type,
),
)
async def _handle_webchat(
self,
event: AstrMessageEvent,
@@ -657,9 +238,6 @@ class LLMRequestSubStage(Stage):
),
)
if llm_resp and llm_resp.completion_text:
logger.debug(
f"WebChat 对话标题生成响应: {llm_resp.completion_text.strip()}",
)
title = llm_resp.completion_text.strip()
if not title or "<None>" in title:
return
@@ -687,6 +265,9 @@ class LLMRequestSubStage(Stage):
logger.debug("LLM 响应为空,不保存记录。")
return
if req.contexts is None:
req.contexts = []
# 历史上下文
messages = copy.deepcopy(req.contexts)
# 这一轮对话请求的用户输入
@@ -706,7 +287,7 @@ class LLMRequestSubStage(Stage):
history=messages,
)
def fix_messages(self, messages: list[dict]) -> list[dict]:
def _fix_messages(self, messages: list[dict]) -> list[dict]:
"""验证并且修复上下文"""
fixed_messages = []
for message in messages:
@@ -721,3 +302,184 @@ class LLMRequestSubStage(Stage):
else:
fixed_messages.append(message)
return fixed_messages
async def process(
self,
event: AstrMessageEvent,
_nested: bool = False,
) -> None | AsyncGenerator[None, None]:
req: ProviderRequest | None = None
if not self.ctx.astrbot_config["provider_settings"]["enable"]:
logger.debug("未启用 LLM 能力,跳过处理。")
return
# 检查会话级别的LLM启停状态
if not SessionServiceManager.should_process_llm_request(event):
logger.debug(f"会话 {event.unified_msg_origin} 禁用了 LLM跳过处理。")
return
provider = self._select_provider(event)
if provider is None:
return
if not isinstance(provider, Provider):
logger.error(f"选择的提供商类型无效({type(provider)}),跳过 LLM 请求处理。")
return
streaming_response = self.streaming_response
if (enable_streaming := event.get_extra("enable_streaming")) is not None:
streaming_response = bool(enable_streaming)
logger.debug("ready to request llm provider")
async with session_lock_manager.acquire_lock(event.unified_msg_origin):
logger.debug("acquired session lock for llm request")
if event.get_extra("provider_request"):
req = event.get_extra("provider_request")
assert isinstance(req, ProviderRequest), (
"provider_request 必须是 ProviderRequest 类型。"
)
if req.conversation:
req.contexts = json.loads(req.conversation.history)
else:
req = ProviderRequest()
req.prompt = ""
req.image_urls = []
if sel_model := event.get_extra("selected_model"):
req.model = sel_model
if self.provider_wake_prefix and not event.message_str.startswith(
self.provider_wake_prefix
):
return
req.prompt = event.message_str[len(self.provider_wake_prefix) :]
# func_tool selection 现在已经转移到 packages/astrbot 插件中进行选择。
# req.func_tool = self.ctx.plugin_manager.context.get_llm_tool_manager()
for comp in event.message_obj.message:
if isinstance(comp, Image):
image_path = await comp.convert_to_file_path()
req.image_urls.append(image_path)
conversation = await self._get_session_conv(event)
req.conversation = conversation
req.contexts = json.loads(conversation.history)
event.set_extra("provider_request", req)
if not req.prompt and not req.image_urls:
return
# call event hook
if await call_event_hook(event, EventType.OnLLMRequestEvent, req):
return
# apply knowledge base feature
await self._apply_kb(event, req)
# fix contexts json str
if isinstance(req.contexts, str):
req.contexts = json.loads(req.contexts)
# truncate contexts to fit max length
if req.contexts:
req.contexts = self._truncate_contexts(req.contexts)
self._fix_messages(req.contexts)
# session_id
if not req.session_id:
req.session_id = event.unified_msg_origin
# check provider modalities, if provider does not support image/tool_use, clear them in request.
self._modalities_fix(provider, req)
# filter tools, only keep tools from this pipeline's selected plugins
self._plugin_tool_fix(event, req)
stream_to_general = (
self.unsupported_streaming_strategy == "turn_off"
and not event.platform_meta.support_streaming_message
)
# 备份 req.contexts
backup_contexts = copy.deepcopy(req.contexts)
# run agent
agent_runner = AgentRunner()
logger.debug(
f"handle provider[id: {provider.provider_config['id']}] request: {req}",
)
astr_agent_ctx = AstrAgentContext(
context=self.ctx.plugin_manager.context,
event=event,
)
await agent_runner.reset(
provider=provider,
request=req,
run_context=AgentContextWrapper(
context=astr_agent_ctx,
tool_call_timeout=self.tool_call_timeout,
),
tool_executor=FunctionToolExecutor(),
agent_hooks=MAIN_AGENT_HOOKS,
streaming=streaming_response,
)
if streaming_response and not stream_to_general:
# 流式响应
event.set_result(
MessageEventResult()
.set_result_content_type(ResultContentType.STREAMING_RESULT)
.set_async_stream(
run_agent(
agent_runner,
self.max_step,
self.show_tool_use,
show_reasoning=self.show_reasoning,
),
),
)
yield
if agent_runner.done():
if final_llm_resp := agent_runner.get_final_llm_resp():
if final_llm_resp.completion_text:
chain = (
MessageChain()
.message(final_llm_resp.completion_text)
.chain
)
elif final_llm_resp.result_chain:
chain = final_llm_resp.result_chain.chain
else:
chain = MessageChain().chain
event.set_result(
MessageEventResult(
chain=chain,
result_content_type=ResultContentType.STREAMING_FINISH,
),
)
else:
async for _ in run_agent(
agent_runner,
self.max_step,
self.show_tool_use,
stream_to_general,
show_reasoning=self.show_reasoning,
):
yield
# 恢复备份的 contexts
req.contexts = backup_contexts
await self._save_to_history(event, req, agent_runner.get_final_llm_resp())
# 异步处理 WebChat 特殊情况
if event.get_platform_name() == "webchat":
asyncio.create_task(self._handle_webchat(event, req, provider))
asyncio.create_task(
Metric.upload(
llm_tick=1,
model_name=agent_runner.provider.get_model(),
provider_type=agent_runner.provider.meta().type,
),
)

View File

@@ -1,23 +1,64 @@
from pydantic import Field
from pydantic.dataclasses import dataclass
from astrbot.api import logger, sp
from astrbot.core.provider.entities import ProviderRequest
from ..context import PipelineContext
from astrbot.core.agent.run_context import ContextWrapper
from astrbot.core.agent.tool import FunctionTool, ToolExecResult
from astrbot.core.astr_agent_context import AstrAgentContext
from astrbot.core.star.context import Context
async def inject_kb_context(
@dataclass
class KnowledgeBaseQueryTool(FunctionTool[AstrAgentContext]):
name: str = "astr_kb_search"
description: str = (
"Query the knowledge base for facts or relevant context. "
"Use this tool when the user's question requires factual information, "
"definitions, background knowledge, or previously indexed content. "
"Only send short keywords or a concise question as the query."
)
parameters: dict = Field(
default_factory=lambda: {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "A concise keyword query for the knowledge base.",
},
},
"required": ["query"],
}
)
async def call(
self, context: ContextWrapper[AstrAgentContext], **kwargs
) -> ToolExecResult:
query = kwargs.get("query", "")
if not query:
return "error: Query parameter is empty."
result = await retrieve_knowledge_base(
query=kwargs.get("query", ""),
umo=context.context.event.unified_msg_origin,
context=context.context.context,
)
if not result:
return "No relevant knowledge found."
return result
async def retrieve_knowledge_base(
query: str,
umo: str,
p_ctx: PipelineContext,
req: ProviderRequest,
) -> None:
context: Context,
) -> str | None:
"""Inject knowledge base context into the provider request
Args:
umo: Unique message object (session ID)
p_ctx: Pipeline context
req: Provider request
"""
kb_mgr = p_ctx.plugin_manager.context.kb_manager
kb_mgr = context.kb_manager
config = context.get_config(umo=umo)
# 1. 优先读取会话级配置
session_config = await sp.session_get(umo, "kb_config", default={})
@@ -54,18 +95,18 @@ async def inject_kb_context(
logger.debug(f"[知识库] 使用会话级配置,知识库数量: {len(kb_names)}")
else:
kb_names = p_ctx.astrbot_config.get("kb_names", [])
top_k = p_ctx.astrbot_config.get("kb_final_top_k", 5)
kb_names = config.get("kb_names", [])
top_k = config.get("kb_final_top_k", 5)
logger.debug(f"[知识库] 使用全局配置,知识库数量: {len(kb_names)}")
top_k_fusion = p_ctx.astrbot_config.get("kb_fusion_top_k", 20)
top_k_fusion = config.get("kb_fusion_top_k", 20)
if not kb_names:
return
logger.debug(f"[知识库] 开始检索知识库,数量: {len(kb_names)}, top_k={top_k}")
kb_context = await kb_mgr.retrieve(
query=req.prompt,
query=query,
kb_names=kb_names,
top_k_fusion=top_k_fusion,
top_m_final=top_k,
@@ -78,4 +119,7 @@ async def inject_kb_context(
if formatted:
results = kb_context.get("results", [])
logger.debug(f"[知识库] 为会话 {umo} 注入了 {len(results)} 条相关知识块")
req.system_prompt = f"{formatted}\n\n{req.system_prompt or ''}"
return formatted
KNOWLEDGE_BASE_QUERY_TOOL = KnowledgeBaseQueryTool()

View File

@@ -10,7 +10,6 @@ from astrbot.core.message.message_event_result import MessageChain, ResultConten
from astrbot.core.platform.astr_message_event import AstrMessageEvent
from astrbot.core.star.star_handler import EventType
from astrbot.core.utils.path_util import path_Mapping
from astrbot.core.utils.session_lock import session_lock_manager
from ..context import PipelineContext, call_event_hook
from ..stage import Stage, register_stage
@@ -169,12 +168,15 @@ class RespondStage(Stage):
logger.warning("async_stream 为空,跳过发送。")
return
# 流式结果直接交付平台适配器处理
use_fallback = self.config.get("provider_settings", {}).get(
"streaming_segmented",
False,
realtime_segmenting = (
self.config.get("provider_settings", {}).get(
"unsupported_streaming_strategy",
"realtime_segmenting",
)
== "realtime_segmenting"
)
logger.info(f"应用流式输出({event.get_platform_id()})")
await event.send_streaming(result.async_stream, use_fallback)
await event.send_streaming(result.async_stream, realtime_segmenting)
return
if len(result.chain) > 0:
# 检查路径映射
@@ -218,21 +220,20 @@ class RespondStage(Stage):
f"实际消息链为空, 跳过发送阶段。header_chain: {header_comps}, actual_chain: {result.chain}",
)
return
async with session_lock_manager.acquire_lock(event.unified_msg_origin):
for comp in result.chain:
i = await self._calc_comp_interval(comp)
await asyncio.sleep(i)
try:
if comp.type in need_separately:
await event.send(MessageChain([comp]))
else:
await event.send(MessageChain([*header_comps, comp]))
header_comps.clear()
except Exception as e:
logger.error(
f"发送消息链失败: chain = {MessageChain([comp])}, error = {e}",
exc_info=True,
)
for comp in result.chain:
i = await self._calc_comp_interval(comp)
await asyncio.sleep(i)
try:
if comp.type in need_separately:
await event.send(MessageChain([comp]))
else:
await event.send(MessageChain([*header_comps, comp]))
header_comps.clear()
except Exception as e:
logger.error(
f"发送消息链失败: chain = {MessageChain([comp])}, error = {e}",
exc_info=True,
)
else:
if all(
comp.type in {ComponentType.Reply, ComponentType.At}

View File

@@ -16,3 +16,6 @@ class PlatformMetadata:
"""显示在 WebUI 配置页中的平台名称,如空则是 name"""
logo_path: str | None = None
"""平台适配器的 logo 文件路径(相对于插件目录)"""
support_streaming_message: bool = True
"""平台是否支持真实流式传输"""

View File

@@ -14,6 +14,7 @@ def register_platform_adapter(
default_config_tmpl: dict | None = None,
adapter_display_name: str | None = None,
logo_path: str | None = None,
support_streaming_message: bool = True,
):
"""用于注册平台适配器的带参装饰器。
@@ -42,6 +43,7 @@ def register_platform_adapter(
default_config_tmpl=default_config_tmpl,
adapter_display_name=adapter_display_name,
logo_path=logo_path,
support_streaming_message=support_streaming_message,
)
platform_registry.append(pm)
platform_cls_map[adapter_name] = cls

View File

@@ -29,6 +29,7 @@ from .aiocqhttp_message_event import AiocqhttpMessageEvent
@register_platform_adapter(
"aiocqhttp",
"适用于 OneBot V11 标准的消息平台适配器,支持反向 WebSockets。",
support_streaming_message=False,
)
class AiocqhttpAdapter(Platform):
def __init__(
@@ -49,6 +50,7 @@ class AiocqhttpAdapter(Platform):
name="aiocqhttp",
description="适用于 OneBot 标准的消息平台适配器,支持反向 WebSockets。",
id=self.config.get("id"),
support_streaming_message=False,
)
self.bot = CQHttp(

View File

@@ -37,7 +37,9 @@ class MyEventHandler(dingtalk_stream.EventHandler):
return AckMessage.STATUS_OK, "OK"
@register_platform_adapter("dingtalk", "钉钉机器人官方 API 适配器")
@register_platform_adapter(
"dingtalk", "钉钉机器人官方 API 适配器", support_streaming_message=False
)
class DingtalkPlatformAdapter(Platform):
def __init__(
self,
@@ -74,6 +76,14 @@ class DingtalkPlatformAdapter(Platform):
)
self.client_ = client # 用于 websockets 的 client
def _id_to_sid(self, dingtalk_id: str | None) -> str | None:
if not dingtalk_id:
return dingtalk_id
prefix = "$:LWCP_v1:$"
if dingtalk_id.startswith(prefix):
return dingtalk_id[len(prefix) :]
return dingtalk_id
async def send_by_session(
self,
session: MessageSesion,
@@ -86,6 +96,7 @@ class DingtalkPlatformAdapter(Platform):
name="dingtalk",
description="钉钉机器人官方 API 适配器",
id=self.config.get("id"),
support_streaming_message=False,
)
async def convert_msg(
@@ -102,10 +113,10 @@ class DingtalkPlatformAdapter(Platform):
else MessageType.FRIEND_MESSAGE
)
abm.sender = MessageMember(
user_id=message.sender_id,
user_id=self._id_to_sid(message.sender_id),
nickname=message.sender_nick,
)
abm.self_id = message.chatbot_user_id
abm.self_id = self._id_to_sid(message.chatbot_user_id)
abm.message_id = message.message_id
abm.raw_message = message
@@ -113,8 +124,8 @@ class DingtalkPlatformAdapter(Platform):
# 处理所有被 @ 的用户(包括机器人自己,因 at_users 已包含)
if message.at_users:
for user in message.at_users:
if user.dingtalk_id:
abm.message.append(At(qq=user.dingtalk_id))
if id := self._id_to_sid(user.dingtalk_id):
abm.message.append(At(qq=id))
abm.group_id = message.conversation_id
if self.unique_session:
abm.session_id = abm.sender.user_id

View File

@@ -34,7 +34,9 @@ else:
# 注册平台适配器
@register_platform_adapter("discord", "Discord 适配器 (基于 Pycord)")
@register_platform_adapter(
"discord", "Discord 适配器 (基于 Pycord)", support_streaming_message=False
)
class DiscordPlatformAdapter(Platform):
def __init__(
self,
@@ -111,6 +113,7 @@ class DiscordPlatformAdapter(Platform):
"Discord 适配器",
id=self.config.get("id"),
default_config_tmpl=self.config,
support_streaming_message=False,
)
@override

View File

@@ -1,7 +1,7 @@
import asyncio
import base64
import binascii
import sys
from collections.abc import AsyncGenerator
from io import BytesIO
from pathlib import Path
@@ -21,11 +21,6 @@ from astrbot.api.platform import AstrBotMessage, At, PlatformMetadata
from .client import DiscordBotClient
from .components import DiscordEmbed, DiscordView
if sys.version_info >= (3, 12):
from typing import override
else:
from typing_extensions import override
# 自定义Discord视图组件兼容旧版本
class DiscordViewComponent(BaseMessageComponent):
@@ -49,7 +44,6 @@ class DiscordPlatformEvent(AstrMessageEvent):
self.client = client
self.interaction_followup_webhook = interaction_followup_webhook
@override
async def send(self, message: MessageChain):
"""发送消息到Discord平台"""
# 解析消息链为 Discord 所需的对象
@@ -98,6 +92,21 @@ class DiscordPlatformEvent(AstrMessageEvent):
await super().send(message)
async def send_streaming(
self, generator: AsyncGenerator[MessageChain, None], use_fallback: bool = False
):
buffer = None
async for chain in generator:
if not buffer:
buffer = chain
else:
buffer.chain.extend(chain.chain)
if not buffer:
return None
buffer.squash_plain()
await self.send(buffer)
return await super().send_streaming(generator, use_fallback)
async def _get_channel(self) -> discord.abc.Messageable | None:
"""获取当前事件对应的频道对象"""
try:

View File

@@ -23,7 +23,9 @@ from ...register import register_platform_adapter
from .lark_event import LarkMessageEvent
@register_platform_adapter("lark", "飞书机器人官方 API 适配器")
@register_platform_adapter(
"lark", "飞书机器人官方 API 适配器", support_streaming_message=False
)
class LarkPlatformAdapter(Platform):
def __init__(
self,
@@ -115,6 +117,7 @@ class LarkPlatformAdapter(Platform):
name="lark",
description="飞书机器人官方 API 适配器",
id=self.config.get("id"),
support_streaming_message=False,
)
async def convert_msg(self, event: lark.im.v1.P2ImMessageReceiveV1):

View File

@@ -45,7 +45,9 @@ MAX_FILE_UPLOAD_COUNT = 16
DEFAULT_UPLOAD_CONCURRENCY = 3
@register_platform_adapter("misskey", "Misskey 平台适配器")
@register_platform_adapter(
"misskey", "Misskey 平台适配器", support_streaming_message=False
)
class MisskeyPlatformAdapter(Platform):
def __init__(
self,
@@ -120,6 +122,7 @@ class MisskeyPlatformAdapter(Platform):
description="Misskey 平台适配器",
id=self.config.get("id", "misskey"),
default_config_tmpl=default_config,
support_streaming_message=False,
)
async def run(self):

View File

@@ -29,8 +29,7 @@ from astrbot.core.platform.astr_message_event import MessageSession
@register_platform_adapter(
"satori",
"Satori 协议适配器",
"satori", "Satori 协议适配器", support_streaming_message=False
)
class SatoriPlatformAdapter(Platform):
def __init__(
@@ -60,6 +59,7 @@ class SatoriPlatformAdapter(Platform):
name="satori",
description="Satori 通用协议适配器",
id=self.config["id"],
support_streaming_message=False,
)
self.ws: ClientConnection | None = None

View File

@@ -30,6 +30,7 @@ from .slack_event import SlackMessageEvent
@register_platform_adapter(
"slack",
"适用于 Slack 的消息平台适配器,支持 Socket Mode 和 Webhook Mode。",
support_streaming_message=False,
)
class SlackAdapter(Platform):
def __init__(
@@ -68,6 +69,7 @@ class SlackAdapter(Platform):
name="slack",
description="适用于 Slack 的消息平台适配器,支持 Socket Mode 和 Webhook Mode。",
id=self.config.get("id"),
support_streaming_message=False,
)
# 初始化 Slack Web Client

View File

@@ -163,6 +163,9 @@ class WebChatAdapter(Platform):
_, _, payload = message.raw_message # type: ignore
message_event.set_extra("selected_provider", payload.get("selected_provider"))
message_event.set_extra("selected_model", payload.get("selected_model"))
message_event.set_extra(
"enable_streaming", payload.get("enable_streaming", True)
)
self.commit_event(message_event)

View File

@@ -109,6 +109,7 @@ class WebChatMessageEvent(AstrMessageEvent):
async def send_streaming(self, generator, use_fallback: bool = False):
final_data = ""
reasoning_content = ""
cid = self.session_id.split("!")[-1]
web_chat_back_queue = webchat_queue_mgr.get_or_create_back_queue(cid)
async for chain in generator:
@@ -124,16 +125,22 @@ class WebChatMessageEvent(AstrMessageEvent):
)
final_data = ""
continue
final_data += await WebChatMessageEvent._send(
r = await WebChatMessageEvent._send(
chain,
session_id=self.session_id,
streaming=True,
)
if chain.type == "reasoning":
reasoning_content += chain.get_plain_text()
else:
final_data += r
await web_chat_back_queue.put(
{
"type": "complete", # complete means we return the final result
"data": final_data,
"reasoning": reasoning_content,
"streaming": True,
"cid": cid,
},

View File

@@ -32,7 +32,9 @@ except ImportError as e:
)
@register_platform_adapter("wechatpadpro", "WeChatPadPro 消息平台适配器")
@register_platform_adapter(
"wechatpadpro", "WeChatPadPro 消息平台适配器", support_streaming_message=False
)
class WeChatPadProAdapter(Platform):
def __init__(
self,
@@ -51,6 +53,7 @@ class WeChatPadProAdapter(Platform):
name="wechatpadpro",
description="WeChatPadPro 消息平台适配器",
id=self.config.get("id", "wechatpadpro"),
support_streaming_message=False,
)
# 保存配置信息

View File

@@ -1,6 +1,7 @@
import asyncio
import base64
import io
from collections.abc import AsyncGenerator
from typing import TYPE_CHECKING
import aiohttp
@@ -50,6 +51,21 @@ class WeChatPadProMessageEvent(AstrMessageEvent):
await self._send_voice(session, comp)
await super().send(message)
async def send_streaming(
self, generator: AsyncGenerator[MessageChain, None], use_fallback: bool = False
):
buffer = None
async for chain in generator:
if not buffer:
buffer = chain
else:
buffer.chain.extend(chain.chain)
if not buffer:
return None
buffer.squash_plain()
await self.send(buffer)
return await super().send_streaming(generator, use_fallback)
async def _send_image(self, session: aiohttp.ClientSession, comp: Image):
b64 = await comp.convert_to_base64()
raw = self._validate_base64(b64)

View File

@@ -110,7 +110,7 @@ class WecomServer:
await self.shutdown_event.wait()
@register_platform_adapter("wecom", "wecom 适配器")
@register_platform_adapter("wecom", "wecom 适配器", support_streaming_message=False)
class WecomPlatformAdapter(Platform):
def __init__(
self,
@@ -196,6 +196,7 @@ class WecomPlatformAdapter(Platform):
"wecom",
"wecom 适配器",
id=self.config.get("id", "wecom"),
support_streaming_message=False,
)
@override

View File

@@ -30,7 +30,7 @@ from .wecomai_api import (
WecomAIBotStreamMessageBuilder,
)
from .wecomai_event import WecomAIBotMessageEvent
from .wecomai_queue_mgr import WecomAIQueueMgr, wecomai_queue_mgr
from .wecomai_queue_mgr import WecomAIQueueMgr
from .wecomai_server import WecomAIBotServer
from .wecomai_utils import (
WecomAIBotConstants,
@@ -144,9 +144,12 @@ class WecomAIBotAdapter(Platform):
# 事件循环和关闭信号
self.shutdown_event = asyncio.Event()
# 队列管理器
self.queue_mgr = WecomAIQueueMgr()
# 队列监听器
self.queue_listener = WecomAIQueueListener(
wecomai_queue_mgr,
self.queue_mgr,
self._handle_queued_message,
)
@@ -189,7 +192,7 @@ class WecomAIBotAdapter(Platform):
stream_id,
session_id,
)
wecomai_queue_mgr.set_pending_response(stream_id, callback_params)
self.queue_mgr.set_pending_response(stream_id, callback_params)
resp = WecomAIBotStreamMessageBuilder.make_text_stream(
stream_id,
@@ -207,7 +210,7 @@ class WecomAIBotAdapter(Platform):
elif msgtype == "stream":
# wechat server is requesting for updates of a stream
stream_id = message_data["stream"]["id"]
if not wecomai_queue_mgr.has_back_queue(stream_id):
if not self.queue_mgr.has_back_queue(stream_id):
logger.error(f"Cannot find back queue for stream_id: {stream_id}")
# 返回结束标志,告诉微信服务器流已结束
@@ -222,7 +225,7 @@ class WecomAIBotAdapter(Platform):
callback_params["timestamp"],
)
return resp
queue = wecomai_queue_mgr.get_or_create_back_queue(stream_id)
queue = self.queue_mgr.get_or_create_back_queue(stream_id)
if queue.empty():
logger.debug(
f"No new messages in back queue for stream_id: {stream_id}",
@@ -242,10 +245,9 @@ class WecomAIBotAdapter(Platform):
elif msg["type"] == "end":
# stream end
finish = True
wecomai_queue_mgr.remove_queues(stream_id)
self.queue_mgr.remove_queues(stream_id)
break
else:
pass
logger.debug(
f"Aggregated content: {latest_plain_content}, image: {len(image_base64)}, finish: {finish}",
)
@@ -313,8 +315,8 @@ class WecomAIBotAdapter(Platform):
session_id: str,
):
"""将消息放入队列进行异步处理"""
input_queue = wecomai_queue_mgr.get_or_create_queue(stream_id)
_ = wecomai_queue_mgr.get_or_create_back_queue(stream_id)
input_queue = self.queue_mgr.get_or_create_queue(stream_id)
_ = self.queue_mgr.get_or_create_back_queue(stream_id)
message_payload = {
"message_data": message_data,
"callback_params": callback_params,
@@ -453,6 +455,7 @@ class WecomAIBotAdapter(Platform):
platform_meta=self.meta(),
session_id=message.session_id,
api_client=self.api_client,
queue_mgr=self.queue_mgr,
)
self.commit_event(message_event)

View File

@@ -8,7 +8,7 @@ from astrbot.api.message_components import (
)
from .wecomai_api import WecomAIBotAPIClient
from .wecomai_queue_mgr import wecomai_queue_mgr
from .wecomai_queue_mgr import WecomAIQueueMgr
class WecomAIBotMessageEvent(AstrMessageEvent):
@@ -21,6 +21,7 @@ class WecomAIBotMessageEvent(AstrMessageEvent):
platform_meta,
session_id: str,
api_client: WecomAIBotAPIClient,
queue_mgr: WecomAIQueueMgr,
):
"""初始化消息事件
@@ -34,14 +35,16 @@ class WecomAIBotMessageEvent(AstrMessageEvent):
"""
super().__init__(message_str, message_obj, platform_meta, session_id)
self.api_client = api_client
self.queue_mgr = queue_mgr
@staticmethod
async def _send(
message_chain: MessageChain,
stream_id: str,
queue_mgr: WecomAIQueueMgr,
streaming: bool = False,
):
back_queue = wecomai_queue_mgr.get_or_create_back_queue(stream_id)
back_queue = queue_mgr.get_or_create_back_queue(stream_id)
if not message_chain:
await back_queue.put(
@@ -94,7 +97,7 @@ class WecomAIBotMessageEvent(AstrMessageEvent):
"wecom_ai_bot platform event raw_message should be a dict"
)
stream_id = raw.get("stream_id", self.session_id)
await WecomAIBotMessageEvent._send(message, stream_id)
await WecomAIBotMessageEvent._send(message, stream_id, self.queue_mgr)
await super().send(message)
async def send_streaming(self, generator, use_fallback=False):
@@ -105,7 +108,7 @@ class WecomAIBotMessageEvent(AstrMessageEvent):
"wecom_ai_bot platform event raw_message should be a dict"
)
stream_id = raw.get("stream_id", self.session_id)
back_queue = wecomai_queue_mgr.get_or_create_back_queue(stream_id)
back_queue = self.queue_mgr.get_or_create_back_queue(stream_id)
# 企业微信智能机器人不支持增量发送,因此我们需要在这里将增量内容累积起来,积累发送
increment_plain = ""
@@ -134,6 +137,7 @@ class WecomAIBotMessageEvent(AstrMessageEvent):
final_data += await WecomAIBotMessageEvent._send(
chain,
stream_id=stream_id,
queue_mgr=self.queue_mgr,
streaming=True,
)

View File

@@ -151,7 +151,3 @@ class WecomAIQueueMgr:
"output_queues": len(self.back_queues),
"pending_responses": len(self.pending_responses),
}
# 全局队列管理器实例
wecomai_queue_mgr = WecomAIQueueMgr()

View File

@@ -113,7 +113,9 @@ class WecomServer:
await self.shutdown_event.wait()
@register_platform_adapter("weixin_official_account", "微信公众平台 适配器")
@register_platform_adapter(
"weixin_official_account", "微信公众平台 适配器", support_streaming_message=False
)
class WeixinOfficialAccountPlatformAdapter(Platform):
def __init__(
self,
@@ -195,6 +197,7 @@ class WeixinOfficialAccountPlatformAdapter(Platform):
"weixin_official_account",
"微信公众平台 适配器",
id=self.config.get("id", "weixin_official_account"),
support_streaming_message=False,
)
@override

View File

@@ -1,4 +1,4 @@
from .entities import ProviderMetaData
from .provider import Personality, Provider, STTProvider
from .provider import Provider, STTProvider
__all__ = ["Personality", "Provider", "ProviderMetaData", "STTProvider"]
__all__ = ["Provider", "ProviderMetaData", "STTProvider"]

View File

@@ -30,18 +30,31 @@ class ProviderType(enum.Enum):
@dataclass
class ProviderMetaData:
type: str
"""提供商适配器名称,如 openai, ollama"""
desc: str = ""
"""提供商适配器描述"""
provider_type: ProviderType = ProviderType.CHAT_COMPLETION
cls_type: Any = None
class ProviderMeta:
"""The basic metadata of a provider instance."""
id: str
"""the unique id of the provider instance that user configured"""
model: str | None
"""the model name of the provider instance currently used"""
type: str
"""the name of the provider adapter, such as openai, ollama"""
provider_type: ProviderType = ProviderType.CHAT_COMPLETION
"""the capability type of the provider adapter"""
@dataclass
class ProviderMetaData(ProviderMeta):
"""The metadata of a provider adapter for registration."""
desc: str = ""
"""the short description of the provider adapter"""
cls_type: Any = None
"""the class type of the provider adapter"""
default_config_tmpl: dict | None = None
"""平台的默认配置模板"""
"""the default configuration template of the provider adapter"""
provider_display_name: str | None = None
"""显示在 WebUI 配置页中的提供商名称,如空则是 type"""
"""the display name of the provider shown in the WebUI configuration page; if empty, the type is used"""
@dataclass
@@ -60,12 +73,20 @@ class ToolCallsResult:
]
return ret
def to_openai_messages_model(
self,
) -> list[AssistantMessageSegment | ToolCallMessageSegment]:
return [
self.tool_calls_info,
*self.tool_calls_result,
]
@dataclass
class ProviderRequest:
prompt: str
prompt: str | None = None
"""提示词"""
session_id: str = ""
session_id: str | None = ""
"""会话 ID"""
image_urls: list[str] = field(default_factory=list)
"""图片 URL 列表"""
@@ -181,25 +202,30 @@ class ProviderRequest:
@dataclass
class LLMResponse:
role: str
"""角色, assistant, tool, err"""
"""The role of the message, e.g., assistant, tool, err"""
result_chain: MessageChain | None = None
"""返回的消息链"""
"""A chain of message components representing the text completion from LLM."""
tools_call_args: list[dict[str, Any]] = field(default_factory=list)
"""工具调用参数"""
"""Tool call arguments."""
tools_call_name: list[str] = field(default_factory=list)
"""工具调用名称"""
"""Tool call names."""
tools_call_ids: list[str] = field(default_factory=list)
"""工具调用 ID"""
"""Tool call IDs."""
tools_call_extra_content: dict[str, dict[str, Any]] = field(default_factory=dict)
"""Tool call extra content. tool_call_id -> extra_content dict"""
reasoning_content: str = ""
"""The reasoning content extracted from the LLM, if any."""
raw_completion: (
ChatCompletion | GenerateContentResponse | AnthropicMessage | None
) = None
_new_record: dict[str, Any] | None = None
"""The raw completion response from the LLM provider."""
_completion_text: str = ""
"""The plain text of the completion."""
is_chunk: bool = False
"""是否是流式输出的单个 Chunk"""
"""Indicates if the response is a chunked response."""
def __init__(
self,
@@ -209,11 +235,11 @@ class LLMResponse:
tools_call_args: list[dict[str, Any]] | None = None,
tools_call_name: list[str] | None = None,
tools_call_ids: list[str] | None = None,
tools_call_extra_content: dict[str, dict[str, Any]] | None = None,
raw_completion: ChatCompletion
| GenerateContentResponse
| AnthropicMessage
| None = None,
_new_record: dict[str, Any] | None = None,
is_chunk: bool = False,
):
"""初始化 LLMResponse
@@ -233,6 +259,8 @@ class LLMResponse:
tools_call_name = []
if tools_call_ids is None:
tools_call_ids = []
if tools_call_extra_content is None:
tools_call_extra_content = {}
self.role = role
self.completion_text = completion_text
@@ -240,8 +268,8 @@ class LLMResponse:
self.tools_call_args = tools_call_args
self.tools_call_name = tools_call_name
self.tools_call_ids = tools_call_ids
self.tools_call_extra_content = tools_call_extra_content
self.raw_completion = raw_completion
self._new_record = _new_record
self.is_chunk = is_chunk
@property
@@ -266,16 +294,19 @@ class LLMResponse:
"""Convert to OpenAI tool calls format. Deprecated, use to_openai_to_calls_model instead."""
ret = []
for idx, tool_call_arg in enumerate(self.tools_call_args):
ret.append(
{
"id": self.tools_call_ids[idx],
"function": {
"name": self.tools_call_name[idx],
"arguments": json.dumps(tool_call_arg),
},
"type": "function",
payload = {
"id": self.tools_call_ids[idx],
"function": {
"name": self.tools_call_name[idx],
"arguments": json.dumps(tool_call_arg),
},
)
"type": "function",
}
if self.tools_call_extra_content.get(self.tools_call_ids[idx]):
payload["extra_content"] = self.tools_call_extra_content[
self.tools_call_ids[idx]
]
ret.append(payload)
return ret
def to_openai_to_calls_model(self) -> list[ToolCall]:
@@ -289,6 +320,10 @@ class LLMResponse:
name=self.tools_call_name[idx],
arguments=json.dumps(tool_call_arg),
),
# the extra_content will not serialize if it's None when calling ToolCall.model_dump()
extra_content=self.tools_call_extra_content.get(
self.tools_call_ids[idx]
),
),
)
return ret

View File

@@ -1,6 +1,7 @@
from __future__ import annotations
import asyncio
import copy
import json
import os
from collections.abc import Awaitable, Callable
@@ -24,7 +25,16 @@ SUPPORTED_TYPES = [
"boolean",
] # json schema 支持的数据类型
PY_TO_JSON_TYPE = {
"int": "number",
"float": "number",
"bool": "boolean",
"str": "string",
"dict": "object",
"list": "array",
"tuple": "array",
"set": "array",
}
# alias
FuncTool = FunctionTool
@@ -106,7 +116,7 @@ class FunctionToolManager:
def spec_to_func(
self,
name: str,
func_args: list,
func_args: list[dict],
desc: str,
handler: Callable[..., Awaitable[Any]],
) -> FuncTool:
@@ -115,10 +125,9 @@ class FunctionToolManager:
"properties": {},
}
for param in func_args:
params["properties"][param["name"]] = {
"type": param["type"],
"description": param["description"],
}
p = copy.deepcopy(param)
p.pop("name", None)
params["properties"][param["name"]] = p
return FuncTool(
name=name,
parameters=params,
@@ -271,19 +280,22 @@ class FunctionToolManager:
async def _terminate_mcp_client(self, name: str) -> None:
"""关闭并清理MCP客户端"""
if name in self.mcp_client_dict:
client = self.mcp_client_dict[name]
try:
# 关闭MCP连接
await self.mcp_client_dict[name].cleanup()
self.mcp_client_dict.pop(name)
await client.cleanup()
except Exception as e:
logger.error(f"清空 MCP 客户端资源 {name}: {e}")
# 移除关联的FuncTool
self.func_list = [
f
for f in self.func_list
if not (isinstance(f, MCPTool) and f.mcp_server_name == name)
]
logger.info(f"已关闭 MCP 服务 {name}")
finally:
# Remove client from dict after cleanup attempt (successful or not)
self.mcp_client_dict.pop(name, None)
# 移除关联的FuncTool
self.func_list = [
f
for f in self.func_list
if not (isinstance(f, MCPTool) and f.mcp_server_name == name)
]
logger.info(f"已关闭 MCP 服务 {name}")
@staticmethod
async def test_mcp_server_connection(config: dict) -> list[str]:

View File

@@ -241,6 +241,8 @@ class ProviderManager:
)
case "zhipu_chat_completion":
from .sources.zhipu_source import ProviderZhipu as ProviderZhipu
case "groq_chat_completion":
from .sources.groq_source import ProviderGroq as ProviderGroq
case "anthropic_chat_completion":
from .sources.anthropic_source import (
ProviderAnthropic as ProviderAnthropic,
@@ -354,6 +356,8 @@ class ProviderManager:
logger.error(f"无法找到 {provider_metadata.type} 的类")
return
provider_metadata.id = provider_config["id"]
if provider_metadata.provider_type == ProviderType.SPEECH_TO_TEXT:
# STT 任务
inst = cls_type(provider_config, self.provider_settings)
@@ -394,7 +398,6 @@ class ProviderManager:
inst = cls_type(
provider_config,
self.provider_settings,
self.selected_default_persona,
)
if getattr(inst, "initialize", None):

View File

@@ -1,28 +1,18 @@
import abc
import asyncio
from collections.abc import AsyncGenerator
from dataclasses import dataclass
from astrbot.core.agent.message import Message
from astrbot.core.agent.tool import ToolSet
from astrbot.core.db.po import Personality
from astrbot.core.provider.entities import (
LLMResponse,
ProviderType,
ProviderMeta,
RerankResult,
ToolCallsResult,
)
from astrbot.core.provider.register import provider_cls_map
@dataclass
class ProviderMeta:
id: str
model: str
type: str
provider_type: ProviderType
class AbstractProvider(abc.ABC):
"""Provider Abstract Class"""
@@ -43,15 +33,15 @@ class AbstractProvider(abc.ABC):
"""Get the provider metadata"""
provider_type_name = self.provider_config["type"]
meta_data = provider_cls_map.get(provider_type_name)
provider_type = meta_data.provider_type if meta_data else None
if provider_type is None:
raise ValueError(f"Cannot find provider type: {provider_type_name}")
return ProviderMeta(
id=self.provider_config["id"],
if not meta_data:
raise ValueError(f"Provider type {provider_type_name} not registered")
meta = ProviderMeta(
id=self.provider_config.get("id", "default"),
model=self.get_model(),
type=provider_type_name,
provider_type=provider_type,
provider_type=meta_data.provider_type,
)
return meta
class Provider(AbstractProvider):
@@ -61,15 +51,10 @@ class Provider(AbstractProvider):
self,
provider_config: dict,
provider_settings: dict,
default_persona: Personality | None = None,
) -> None:
super().__init__(provider_config)
self.provider_settings = provider_settings
self.curr_personality = default_persona
"""维护了当前的使用的 persona即人格。可能为 None"""
@abc.abstractmethod
def get_current_key(self) -> str:
raise NotImplementedError

View File

@@ -36,6 +36,8 @@ def register_provider_adapter(
default_config_tmpl["id"] = provider_type_name
pm = ProviderMetaData(
id="default", # will be replaced when instantiated
model=None,
type=provider_type_name,
desc=desc,
provider_type=provider_type,

View File

@@ -25,12 +25,10 @@ class ProviderAnthropic(Provider):
self,
provider_config,
provider_settings,
default_persona=None,
) -> None:
super().__init__(
provider_config,
provider_settings,
default_persona,
)
self.chosen_api_key: str = ""

View File

@@ -20,12 +20,10 @@ class ProviderCoze(Provider):
self,
provider_config,
provider_settings,
default_persona=None,
) -> None:
super().__init__(
provider_config,
provider_settings,
default_persona,
)
self.api_key = provider_config.get("coze_api_key", "")
if not self.api_key:

View File

@@ -8,7 +8,7 @@ from dashscope.app.application_response import ApplicationResponse
from astrbot.core import logger, sp
from astrbot.core.message.message_event_result import MessageChain
from .. import Personality, Provider
from .. import Provider
from ..entities import LLMResponse
from ..register import register_provider_adapter
from .openai_source import ProviderOpenAIOfficial
@@ -20,13 +20,11 @@ class ProviderDashscope(ProviderOpenAIOfficial):
self,
provider_config: dict,
provider_settings: dict,
default_persona: Personality | None = None,
) -> None:
Provider.__init__(
self,
provider_config,
provider_settings,
default_persona,
)
self.api_key = provider_config.get("dashscope_api_key", "")
if not self.api_key:

View File

@@ -18,12 +18,10 @@ class ProviderDify(Provider):
self,
provider_config,
provider_settings,
default_persona=None,
) -> None:
super().__init__(
provider_config,
provider_settings,
default_persona,
)
self.api_key = provider_config.get("dify_api_key", "")
if not self.api_key:

View File

@@ -53,12 +53,10 @@ class ProviderGoogleGenAI(Provider):
self,
provider_config,
provider_settings,
default_persona=None,
) -> None:
super().__init__(
provider_config,
provider_settings,
default_persona,
)
self.api_keys: list = super().get_keys()
self.chosen_api_key: str = self.api_keys[0] if len(self.api_keys) > 0 else ""
@@ -292,13 +290,24 @@ class ProviderGoogleGenAI(Provider):
parts = [types.Part.from_text(text=content)]
append_or_extend(gemini_contents, parts, types.ModelContent)
elif not native_tool_enabled and "tool_calls" in message:
parts = [
types.Part.from_function_call(
parts = []
for tool in message["tool_calls"]:
part = types.Part.from_function_call(
name=tool["function"]["name"],
args=json.loads(tool["function"]["arguments"]),
)
for tool in message["tool_calls"]
]
# we should set thought_signature back to part if exists
# for more info about thought_signature, see:
# https://ai.google.dev/gemini-api/docs/thought-signatures
if "extra_content" in tool and tool["extra_content"]:
ts_bs64 = (
tool["extra_content"]
.get("google", {})
.get("thought_signature")
)
if ts_bs64:
part.thought_signature = base64.b64decode(ts_bs64)
parts.append(part)
append_or_extend(gemini_contents, parts, types.ModelContent)
else:
logger.warning("assistant 角色的消息内容为空,已添加空格占位")
@@ -326,8 +335,18 @@ class ProviderGoogleGenAI(Provider):
return gemini_contents
@staticmethod
def _extract_reasoning_content(self, candidate: types.Candidate) -> str:
"""Extract reasoning content from candidate parts"""
if not candidate.content or not candidate.content.parts:
return ""
thought_buf: list[str] = [
(p.text or "") for p in candidate.content.parts if p.thought
]
return "".join(thought_buf).strip()
def _process_content_parts(
self,
candidate: types.Candidate,
llm_response: LLMResponse,
) -> MessageChain:
@@ -358,6 +377,11 @@ class ProviderGoogleGenAI(Provider):
logger.warning(f"收到的 candidate.content.parts 为空: {candidate}")
raise Exception("API 返回的 candidate.content.parts 为空。")
# 提取 reasoning content
reasoning = self._extract_reasoning_content(candidate)
if reasoning:
llm_response.reasoning_content = reasoning
chain = []
part: types.Part
@@ -380,10 +404,15 @@ class ProviderGoogleGenAI(Provider):
llm_response.role = "tool"
llm_response.tools_call_name.append(part.function_call.name)
llm_response.tools_call_args.append(part.function_call.args)
# gemini 返回的 function_call.id 可能为 None
llm_response.tools_call_ids.append(
part.function_call.id or part.function_call.name,
)
# function_call.id might be None, use name as fallback
tool_call_id = part.function_call.id or part.function_call.name
llm_response.tools_call_ids.append(tool_call_id)
# extra_content
if part.thought_signature:
ts_bs64 = base64.b64encode(part.thought_signature).decode("utf-8")
llm_response.tools_call_extra_content[tool_call_id] = {
"google": {"thought_signature": ts_bs64}
}
elif (
part.inline_data
and part.inline_data.mime_type
@@ -422,6 +451,7 @@ class ProviderGoogleGenAI(Provider):
contents=conversation,
config=config,
)
logger.debug(f"genai result: {result}")
if not result.candidates:
logger.error(f"请求失败, 返回的 candidates 为空: {result}")
@@ -515,6 +545,7 @@ class ProviderGoogleGenAI(Provider):
# Accumulate the complete response text for the final response
accumulated_text = ""
accumulated_reasoning = ""
final_response = None
async for chunk in result:
@@ -539,9 +570,19 @@ class ProviderGoogleGenAI(Provider):
yield llm_response
return
_f = False
# 提取 reasoning content
reasoning = self._extract_reasoning_content(chunk.candidates[0])
if reasoning:
_f = True
accumulated_reasoning += reasoning
llm_response.reasoning_content = reasoning
if chunk.text:
_f = True
accumulated_text += chunk.text
llm_response.result_chain = MessageChain(chain=[Comp.Plain(chunk.text)])
if _f:
yield llm_response
if chunk.candidates[0].finish_reason:
@@ -559,6 +600,10 @@ class ProviderGoogleGenAI(Provider):
if not final_response:
final_response = LLMResponse("assistant", is_chunk=False)
# Set the complete accumulated reasoning in the final response
if accumulated_reasoning:
final_response.reasoning_content = accumulated_reasoning
# Set the complete accumulated text in the final response
if accumulated_text:
final_response.result_chain = MessageChain(

View File

@@ -0,0 +1,15 @@
from ..register import register_provider_adapter
from .openai_source import ProviderOpenAIOfficial
@register_provider_adapter(
"groq_chat_completion", "Groq Chat Completion Provider Adapter"
)
class ProviderGroq(ProviderOpenAIOfficial):
def __init__(
self,
provider_config: dict,
provider_settings: dict,
) -> None:
super().__init__(provider_config, provider_settings)
self.reasoning_key = "reasoning"

View File

@@ -4,12 +4,14 @@ import inspect
import json
import os
import random
import re
from collections.abc import AsyncGenerator
from openai import AsyncAzureOpenAI, AsyncOpenAI
from openai._exceptions import NotFoundError, UnprocessableEntityError
from openai._exceptions import NotFoundError
from openai.lib.streaming.chat._completions import ChatCompletionStreamState
from openai.types.chat.chat_completion import ChatCompletion
from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
import astrbot.core.message.components as Comp
from astrbot import logger
@@ -28,37 +30,37 @@ from ..register import register_provider_adapter
"OpenAI API Chat Completion 提供商适配器",
)
class ProviderOpenAIOfficial(Provider):
def __init__(
self,
provider_config,
provider_settings,
default_persona=None,
) -> None:
super().__init__(
provider_config,
provider_settings,
default_persona,
)
def __init__(self, provider_config, provider_settings) -> None:
super().__init__(provider_config, provider_settings)
self.chosen_api_key = None
self.api_keys: list = super().get_keys()
self.chosen_api_key = self.api_keys[0] if len(self.api_keys) > 0 else None
self.timeout = provider_config.get("timeout", 120)
self.custom_headers = provider_config.get("custom_headers", {})
if isinstance(self.timeout, str):
self.timeout = int(self.timeout)
# 适配 azure openai #332
if not isinstance(self.custom_headers, dict) or not self.custom_headers:
self.custom_headers = None
else:
for key in self.custom_headers:
self.custom_headers[key] = str(self.custom_headers[key])
if "api_version" in provider_config:
# 使用 azure api
# Using Azure OpenAI API
self.client = AsyncAzureOpenAI(
api_key=self.chosen_api_key,
api_version=provider_config.get("api_version", None),
default_headers=self.custom_headers,
base_url=provider_config.get("api_base", ""),
timeout=self.timeout,
)
else:
# 使用 openai api
# Using OpenAI Official API
self.client = AsyncOpenAI(
api_key=self.chosen_api_key,
base_url=provider_config.get("api_base", None),
default_headers=self.custom_headers,
timeout=self.timeout,
)
@@ -70,6 +72,8 @@ class ProviderOpenAIOfficial(Provider):
model = model_config.get("model", "unknown")
self.set_model(model)
self.reasoning_key = "reasoning_content"
def _maybe_inject_xai_search(self, payloads: dict, **kwargs):
"""当开启 xAI 原生搜索时,向请求体注入 Live Search 参数。
@@ -147,7 +151,7 @@ class ProviderOpenAIOfficial(Provider):
logger.debug(f"completion: {completion}")
llm_response = await self.parse_openai_completion(completion, tools)
llm_response = await self._parse_openai_completion(completion, tools)
return llm_response
@@ -200,39 +204,82 @@ class ProviderOpenAIOfficial(Provider):
if len(chunk.choices) == 0:
continue
delta = chunk.choices[0].delta
# 处理文本内容
# logger.debug(f"chunk delta: {delta}")
# handle the content delta
reasoning = self._extract_reasoning_content(chunk)
_y = False
if reasoning:
llm_response.reasoning_content = reasoning
_y = True
if delta.content:
completion_text = delta.content
llm_response.result_chain = MessageChain(
chain=[Comp.Plain(completion_text)],
)
_y = True
if _y:
yield llm_response
final_completion = state.get_final_completion()
llm_response = await self.parse_openai_completion(final_completion, tools)
llm_response = await self._parse_openai_completion(final_completion, tools)
yield llm_response
async def parse_openai_completion(
def _extract_reasoning_content(
self,
completion: ChatCompletion | ChatCompletionChunk,
) -> str:
"""Extract reasoning content from OpenAI ChatCompletion if available."""
reasoning_text = ""
if len(completion.choices) == 0:
return reasoning_text
if isinstance(completion, ChatCompletion):
choice = completion.choices[0]
reasoning_attr = getattr(choice.message, self.reasoning_key, None)
if reasoning_attr:
reasoning_text = str(reasoning_attr)
elif isinstance(completion, ChatCompletionChunk):
delta = completion.choices[0].delta
reasoning_attr = getattr(delta, self.reasoning_key, None)
if reasoning_attr:
reasoning_text = str(reasoning_attr)
return reasoning_text
async def _parse_openai_completion(
self, completion: ChatCompletion, tools: ToolSet | None
) -> LLMResponse:
"""解析 OpenAI ChatCompletion 响应"""
"""Parse OpenAI ChatCompletion into LLMResponse"""
llm_response = LLMResponse("assistant")
if len(completion.choices) == 0:
raise Exception("API 返回的 completion 为空。")
choice = completion.choices[0]
# parse the text completion
if choice.message.content is not None:
# text completion
completion_text = str(choice.message.content).strip()
# specially, some providers may set <think> tags around reasoning content in the completion text,
# we use regex to remove them, and store then in reasoning_content field
reasoning_pattern = re.compile(r"<think>(.*?)</think>", re.DOTALL)
matches = reasoning_pattern.findall(completion_text)
if matches:
llm_response.reasoning_content = "\n".join(
[match.strip() for match in matches],
)
completion_text = reasoning_pattern.sub("", completion_text).strip()
llm_response.result_chain = MessageChain().message(completion_text)
# parse the reasoning content if any
# the priority is higher than the <think> tag extraction
llm_response.reasoning_content = self._extract_reasoning_content(completion)
# parse tool calls if any
if choice.message.tool_calls and tools is not None:
# tools call (function calling)
args_ls = []
func_name_ls = []
tool_call_ids = []
tool_call_extra_content_dict = {}
for tool_call in choice.message.tool_calls:
if isinstance(tool_call, str):
# workaround for #1359
@@ -250,16 +297,21 @@ class ProviderOpenAIOfficial(Provider):
args_ls.append(args)
func_name_ls.append(tool_call.function.name)
tool_call_ids.append(tool_call.id)
# gemini-2.5 / gemini-3 series extra_content handling
extra_content = getattr(tool_call, "extra_content", None)
if extra_content is not None:
tool_call_extra_content_dict[tool_call.id] = extra_content
llm_response.role = "tool"
llm_response.tools_call_args = args_ls
llm_response.tools_call_name = func_name_ls
llm_response.tools_call_ids = tool_call_ids
llm_response.tools_call_extra_content = tool_call_extra_content_dict
# specially handle finish reason
if choice.finish_reason == "content_filter":
raise Exception(
"API 返回的 completion 由于内容安全过滤被拒绝(非 AstrBot)。",
)
if llm_response.completion_text is None and not llm_response.tools_call_args:
logger.error(f"API 返回的 completion 无法解析:{completion}")
raise Exception(f"API 返回的 completion 无法解析:{completion}")
@@ -307,7 +359,7 @@ class ProviderOpenAIOfficial(Provider):
payloads = {"messages": context_query, **model_config}
# xAI 原生搜索参数(最小侵入地在此处注入)
# xAI origin search tool inject
self._maybe_inject_xai_search(payloads, **kwargs)
return payloads, context_query
@@ -429,12 +481,6 @@ class ProviderOpenAIOfficial(Provider):
self.client.api_key = chosen_key
llm_response = await self._query(payloads, func_tool)
break
except UnprocessableEntityError as e:
logger.warning(f"不可处理的实体错误:{e},尝试删除图片。")
# 尝试删除所有 image
new_contexts = await self._remove_image_from_context(context_query)
payloads["messages"] = new_contexts
context_query = new_contexts
except Exception as e:
last_exception = e
(
@@ -499,12 +545,6 @@ class ProviderOpenAIOfficial(Provider):
async for response in self._query_stream(payloads, func_tool):
yield response
break
except UnprocessableEntityError as e:
logger.warning(f"不可处理的实体错误:{e},尝试删除图片。")
# 尝试删除所有 image
new_contexts = await self._remove_image_from_context(context_query)
payloads["messages"] = new_contexts
context_query = new_contexts
except Exception as e:
last_exception = e
(
@@ -600,4 +640,3 @@ class ProviderOpenAIOfficial(Provider):
with open(image_url, "rb") as f:
image_bs64 = base64.b64encode(f.read()).decode("utf-8")
return "data:image/jpeg;base64," + image_bs64
return ""

View File

@@ -12,10 +12,5 @@ class ProviderZhipu(ProviderOpenAIOfficial):
self,
provider_config: dict,
provider_settings: dict,
default_persona=None,
) -> None:
super().__init__(
provider_config,
provider_settings,
default_persona,
)
super().__init__(provider_config, provider_settings)

View File

@@ -5,6 +5,10 @@ from typing import Any
from deprecated import deprecated
from astrbot.core.agent.hooks import BaseAgentRunHooks
from astrbot.core.agent.message import Message
from astrbot.core.agent.runners.tool_loop_agent_runner import ToolLoopAgentRunner
from astrbot.core.agent.tool import ToolSet
from astrbot.core.astrbot_config_mgr import AstrBotConfigManager
from astrbot.core.config.astrbot_config import AstrBotConfig
from astrbot.core.conversation_mgr import ConversationManager
@@ -13,10 +17,10 @@ from astrbot.core.knowledge_base.kb_mgr import KnowledgeBaseManager
from astrbot.core.message.message_event_result import MessageChain
from astrbot.core.persona_mgr import PersonaManager
from astrbot.core.platform import Platform
from astrbot.core.platform.astr_message_event import MessageSesion
from astrbot.core.platform.astr_message_event import AstrMessageEvent, MessageSesion
from astrbot.core.platform.manager import PlatformManager
from astrbot.core.platform_message_history_mgr import PlatformMessageHistoryManager
from astrbot.core.provider.entities import ProviderType
from astrbot.core.provider.entities import LLMResponse, ProviderRequest, ProviderType
from astrbot.core.provider.func_tool_manager import FunctionTool, FunctionToolManager
from astrbot.core.provider.manager import ProviderManager
from astrbot.core.provider.provider import (
@@ -31,6 +35,7 @@ from astrbot.core.star.filter.platform_adapter_type import (
PlatformAdapterType,
)
from ..exceptions import ProviderNotFoundError
from .filter.command import CommandFilter
from .filter.regex import RegexFilter
from .star import StarMetadata, star_map, star_registry
@@ -75,6 +80,153 @@ class Context:
self.astrbot_config_mgr = astrbot_config_mgr
self.kb_manager = knowledge_base_manager
async def llm_generate(
self,
*,
chat_provider_id: str,
prompt: str | None = None,
image_urls: list[str] | None = None,
tools: ToolSet | None = None,
system_prompt: str | None = None,
contexts: list[Message] | None = None,
**kwargs: Any,
) -> LLMResponse:
"""Call the LLM to generate a response. The method will not automatically execute tool calls. If you want to use tool calls, please use `tool_loop_agent()`.
.. versionadded:: 4.5.7 (sdk)
Args:
chat_provider_id: The chat provider ID to use.
prompt: The prompt to send to the LLM, if `contexts` and `prompt` are both provided, `prompt` will be appended as the last user message
image_urls: List of image URLs to include in the prompt, if `contexts` and `prompt` are both provided, `image_urls` will be appended to the last user message
tools: ToolSet of tools available to the LLM
system_prompt: System prompt to guide the LLM's behavior, if provided, it will always insert as the first system message in the context
contexts: context messages for the LLM
**kwargs: Additional keyword arguments for LLM generation, OpenAI compatible
Raises:
ChatProviderNotFoundError: If the specified chat provider ID is not found
Exception: For other errors during LLM generation
"""
prov = await self.provider_manager.get_provider_by_id(chat_provider_id)
if not prov or not isinstance(prov, Provider):
raise ProviderNotFoundError(f"Provider {chat_provider_id} not found")
llm_resp = await prov.text_chat(
prompt=prompt,
image_urls=image_urls,
func_tool=tools,
contexts=contexts,
system_prompt=system_prompt,
**kwargs,
)
return llm_resp
async def tool_loop_agent(
self,
*,
event: AstrMessageEvent,
chat_provider_id: str,
prompt: str | None = None,
image_urls: list[str] | None = None,
tools: ToolSet | None = None,
system_prompt: str | None = None,
contexts: list[Message] | None = None,
max_steps: int = 30,
tool_call_timeout: int = 60,
**kwargs: Any,
) -> LLMResponse:
"""Run an agent loop that allows the LLM to call tools iteratively until a final answer is produced.
If you do not pass the agent_context parameter, the method will recreate a new agent context.
.. versionadded:: 4.5.7 (sdk)
Args:
chat_provider_id: The chat provider ID to use.
prompt: The prompt to send to the LLM, if `contexts` and `prompt` are both provided, `prompt` will be appended as the last user message
image_urls: List of image URLs to include in the prompt, if `contexts` and `prompt` are both provided, `image_urls` will be appended to the last user message
tools: ToolSet of tools available to the LLM
system_prompt: System prompt to guide the LLM's behavior, if provided, it will always insert as the first system message in the context
contexts: context messages for the LLM
max_steps: Maximum number of tool calls before stopping the loop
**kwargs: Additional keyword arguments. The kwargs will not be passed to the LLM directly for now, but can include:
agent_hooks: BaseAgentRunHooks[AstrAgentContext] - hooks to run during agent execution
agent_context: AstrAgentContext - context to use for the agent
Returns:
The final LLMResponse after tool calls are completed.
Raises:
ChatProviderNotFoundError: If the specified chat provider ID is not found
Exception: For other errors during LLM generation
"""
# Import here to avoid circular imports
from astrbot.core.astr_agent_context import (
AgentContextWrapper,
AstrAgentContext,
)
from astrbot.core.astr_agent_tool_exec import FunctionToolExecutor
prov = await self.provider_manager.get_provider_by_id(chat_provider_id)
if not prov or not isinstance(prov, Provider):
raise ProviderNotFoundError(f"Provider {chat_provider_id} not found")
agent_hooks = kwargs.get("agent_hooks") or BaseAgentRunHooks[AstrAgentContext]()
agent_context = kwargs.get("agent_context")
context_ = []
for msg in contexts or []:
if isinstance(msg, Message):
context_.append(msg.model_dump())
else:
context_.append(msg)
request = ProviderRequest(
prompt=prompt,
image_urls=image_urls or [],
func_tool=tools,
contexts=context_,
system_prompt=system_prompt or "",
)
if agent_context is None:
agent_context = AstrAgentContext(
context=self,
event=event,
)
agent_runner = ToolLoopAgentRunner()
tool_executor = FunctionToolExecutor()
await agent_runner.reset(
provider=prov,
request=request,
run_context=AgentContextWrapper(
context=agent_context,
tool_call_timeout=tool_call_timeout,
),
tool_executor=tool_executor,
agent_hooks=agent_hooks,
streaming=kwargs.get("stream", False),
)
async for _ in agent_runner.step_until_done(max_steps):
pass
llm_resp = agent_runner.get_final_llm_resp()
if not llm_resp:
raise Exception("Agent did not produce a final LLM response")
return llm_resp
async def get_current_chat_provider_id(self, umo: str) -> str:
"""Get the ID of the currently used chat provider.
Args:
umo(str): unified_message_origin value, if provided and user has enabled provider session isolation, the provider preferred by that session will be used.
Raises:
ProviderNotFoundError: If the specified chat provider is not found
"""
prov = self.get_using_provider(umo)
if not prov:
raise ProviderNotFoundError("Provider not found")
return prov.meta().id
def get_registered_star(self, star_name: str) -> StarMetadata | None:
"""根据插件名获取插件的 Metadata"""
for star in star_registry:
@@ -107,10 +259,6 @@ class Context:
"""
return self.provider_manager.llm_tools.deactivate_llm_tool(name)
def register_provider(self, provider: Provider):
"""注册一个 LLM Provider(Chat_Completion 类型)。"""
self.provider_manager.provider_insts.append(provider)
def get_provider_by_id(
self,
provider_id: str,
@@ -189,45 +337,6 @@ class Context:
return self._config
return self.astrbot_config_mgr.get_conf(umo)
def get_db(self) -> BaseDatabase:
"""获取 AstrBot 数据库。"""
return self._db
def get_event_queue(self) -> Queue:
"""获取事件队列。"""
return self._event_queue
@deprecated(version="4.0.0", reason="Use get_platform_inst instead")
def get_platform(self, platform_type: PlatformAdapterType | str) -> Platform | None:
"""获取指定类型的平台适配器。
该方法已经过时,请使用 get_platform_inst 方法。(>= AstrBot v4.0.0)
"""
for platform in self.platform_manager.platform_insts:
name = platform.meta().name
if isinstance(platform_type, str):
if name == platform_type:
return platform
elif (
name in ADAPTER_NAME_2_TYPE
and ADAPTER_NAME_2_TYPE[name] & platform_type
):
return platform
def get_platform_inst(self, platform_id: str) -> Platform | None:
"""获取指定 ID 的平台适配器实例。
Args:
platform_id (str): 平台适配器的唯一标识符。你可以通过 event.get_platform_id() 获取。
Returns:
Platform: 平台适配器实例,如果未找到则返回 None。
"""
for platform in self.platform_manager.platform_insts:
if platform.meta().id == platform_id:
return platform
async def send_message(
self,
session: str | MessageSesion,
@@ -300,6 +409,49 @@ class Context:
以下的方法已经不推荐使用。请从 AstrBot 文档查看更好的注册方式。
"""
def get_event_queue(self) -> Queue:
"""获取事件队列。"""
return self._event_queue
@deprecated(version="4.0.0", reason="Use get_platform_inst instead")
def get_platform(self, platform_type: PlatformAdapterType | str) -> Platform | None:
"""获取指定类型的平台适配器。
该方法已经过时,请使用 get_platform_inst 方法。(>= AstrBot v4.0.0)
"""
for platform in self.platform_manager.platform_insts:
name = platform.meta().name
if isinstance(platform_type, str):
if name == platform_type:
return platform
elif (
name in ADAPTER_NAME_2_TYPE
and ADAPTER_NAME_2_TYPE[name] & platform_type
):
return platform
def get_platform_inst(self, platform_id: str) -> Platform | None:
"""获取指定 ID 的平台适配器实例。
Args:
platform_id (str): 平台适配器的唯一标识符。你可以通过 event.get_platform_id() 获取。
Returns:
Platform: 平台适配器实例,如果未找到则返回 None。
"""
for platform in self.platform_manager.platform_insts:
if platform.meta().id == platform_id:
return platform
def get_db(self) -> BaseDatabase:
"""获取 AstrBot 数据库。"""
return self._db
def register_provider(self, provider: Provider):
"""注册一个 LLM Provider(Chat_Completion 类型)。"""
self.provider_manager.provider_insts.append(provider)
def register_llm_tool(
self,
name: str,

View File

@@ -1,5 +1,6 @@
from __future__ import annotations
import re
from collections.abc import Awaitable, Callable
from typing import Any
@@ -11,7 +12,7 @@ from astrbot.core.agent.handoff import HandoffTool
from astrbot.core.agent.hooks import BaseAgentRunHooks
from astrbot.core.agent.tool import FunctionTool
from astrbot.core.astr_agent_context import AstrAgentContext
from astrbot.core.provider.func_tool_manager import SUPPORTED_TYPES
from astrbot.core.provider.func_tool_manager import PY_TO_JSON_TYPE, SUPPORTED_TYPES
from astrbot.core.provider.register import llm_tools
from ..filter.command import CommandFilter
@@ -417,18 +418,37 @@ def register_llm_tool(name: str | None = None, **kwargs):
docstring = docstring_parser.parse(func_doc)
args = []
for arg in docstring.params:
if arg.type_name not in SUPPORTED_TYPES:
sub_type_name = None
type_name = arg.type_name
if not type_name:
raise ValueError(
f"LLM 函数工具 {awaitable.__module__}_{llm_tool_name} 的参数 {arg.arg_name} 缺少类型注释。",
)
# parse type_name to handle cases like "list[string]"
match = re.match(r"(\w+)\[(\w+)\]", type_name)
if match:
type_name = match.group(1)
sub_type_name = match.group(2)
type_name = PY_TO_JSON_TYPE.get(type_name, type_name)
if sub_type_name:
sub_type_name = PY_TO_JSON_TYPE.get(sub_type_name, sub_type_name)
if type_name not in SUPPORTED_TYPES or (
sub_type_name and sub_type_name not in SUPPORTED_TYPES
):
raise ValueError(
f"LLM 函数工具 {awaitable.__module__}_{llm_tool_name} 不支持的参数类型:{arg.type_name}",
)
args.append(
{
"type": arg.type_name,
"name": arg.arg_name,
"description": arg.description,
},
)
# print(llm_tool_name, registering_agent)
arg_json_schema = {
"type": type_name,
"name": arg.arg_name,
"description": arg.description,
}
if sub_type_name:
if type_name == "array":
arg_json_schema["items"] = {"type": sub_type_name}
args.append(arg_json_schema)
if not registering_agent:
doc_desc = docstring.description.strip() if docstring.description else ""
md = get_handler_or_create(awaitable, EventType.OnCallingFuncToolEvent)

View File

@@ -10,7 +10,6 @@ from quart import g, make_response, request
from astrbot.core import logger
from astrbot.core.core_lifecycle import AstrBotCoreLifecycle
from astrbot.core.db import BaseDatabase
from astrbot.core.platform.astr_message_event import MessageSession
from astrbot.core.platform.sources.webchat.webchat_queue_mgr import webchat_queue_mgr
from astrbot.core.utils.astrbot_path import get_astrbot_data_path
@@ -36,11 +35,14 @@ class ChatRoute(Route):
super().__init__(context)
self.routes = {
"/chat/send": ("POST", self.chat),
"/chat/new_conversation": ("GET", self.new_conversation),
"/chat/conversations": ("GET", self.get_conversations),
"/chat/get_conversation": ("GET", self.get_conversation),
"/chat/delete_conversation": ("GET", self.delete_conversation),
"/chat/rename_conversation": ("POST", self.rename_conversation),
"/chat/new_session": ("GET", self.new_session),
"/chat/sessions": ("GET", self.get_sessions),
"/chat/get_session": ("GET", self.get_session),
"/chat/delete_session": ("GET", self.delete_webchat_session),
"/chat/update_session_display_name": (
"POST",
self.update_session_display_name,
),
"/chat/get_file": ("GET", self.get_file),
"/chat/post_image": ("POST", self.post_image),
"/chat/post_file": ("POST", self.post_file),
@@ -53,6 +55,7 @@ class ChatRoute(Route):
self.supported_imgs = ["jpg", "jpeg", "png", "gif", "webp"]
self.conv_mgr = core_lifecycle.conversation_manager
self.platform_history_mgr = core_lifecycle.platform_message_history_manager
self.db = db
self.running_convs: dict[str, bool] = {}
@@ -116,26 +119,31 @@ class ChatRoute(Route):
if "message" not in post_data and "image_url" not in post_data:
return Response().error("Missing key: message or image_url").__dict__
if "conversation_id" not in post_data:
return Response().error("Missing key: conversation_id").__dict__
if "session_id" not in post_data and "conversation_id" not in post_data:
return (
Response().error("Missing key: session_id or conversation_id").__dict__
)
message = post_data["message"]
conversation_id = post_data["conversation_id"]
# conversation_id = post_data["conversation_id"]
session_id = post_data.get("session_id", post_data.get("conversation_id"))
image_url = post_data.get("image_url")
audio_url = post_data.get("audio_url")
selected_provider = post_data.get("selected_provider")
selected_model = post_data.get("selected_model")
enable_streaming = post_data.get("enable_streaming", True) # 默认为 True
if not message and not image_url and not audio_url:
return (
Response()
.error("Message and image_url and audio_url are empty")
.__dict__
)
if not conversation_id:
return Response().error("conversation_id is empty").__dict__
if not session_id:
return Response().error("session_id is empty").__dict__
# 追加用户消息
webchat_conv_id = await self._get_webchat_conv_id_from_conv_id(conversation_id)
webchat_conv_id = session_id
# 获取会话特定的队列
back_queue = webchat_queue_mgr.get_or_create_back_queue(webchat_conv_id)
@@ -202,6 +210,8 @@ class ChatRoute(Route):
):
# 追加机器人消息
new_his = {"type": "bot", "message": result_text}
if "reasoning" in result:
new_his["reasoning"] = result["reasoning"]
await self.platform_history_mgr.insert(
platform_id="webchat",
user_id=webchat_conv_id,
@@ -224,6 +234,7 @@ class ChatRoute(Route):
"audio_url": audio_url,
"selected_provider": selected_provider,
"selected_model": selected_model,
"enable_streaming": enable_streaming,
},
),
)
@@ -240,88 +251,110 @@ class ChatRoute(Route):
response.timeout = None # fix SSE auto disconnect issue
return response
async def _get_webchat_conv_id_from_conv_id(self, conversation_id: str) -> str:
"""从对话 ID 中提取 WebChat 会话 ID
NOTE: 关于这里为什么要单独做一个 WebChat 的 Conversation ID 出来,这个是为了向前兼容。
"""
conversation = await self.conv_mgr.get_conversation(
unified_msg_origin="webchat",
conversation_id=conversation_id,
)
if not conversation:
raise ValueError(f"Conversation with ID {conversation_id} not found.")
conv_user_id = conversation.user_id
webchat_session_id = MessageSession.from_str(conv_user_id).session_id
if "!" not in webchat_session_id:
raise ValueError(f"Invalid conv user ID: {conv_user_id}")
return webchat_session_id.split("!")[-1]
async def delete_conversation(self):
conversation_id = request.args.get("conversation_id")
if not conversation_id:
return Response().error("Missing key: conversation_id").__dict__
async def delete_webchat_session(self):
"""Delete a Platform session and all its related data."""
session_id = request.args.get("session_id")
if not session_id:
return Response().error("Missing key: session_id").__dict__
username = g.get("username", "guest")
# Clean up queues when deleting conversation
webchat_queue_mgr.remove_queues(conversation_id)
webchat_conv_id = await self._get_webchat_conv_id_from_conv_id(conversation_id)
await self.conv_mgr.delete_conversation(
unified_msg_origin=f"webchat:FriendMessage:webchat!{username}!{webchat_conv_id}",
conversation_id=conversation_id,
)
# 验证会话是否存在且属于当前用户
session = await self.db.get_platform_session_by_id(session_id)
if not session:
return Response().error(f"Session {session_id} not found").__dict__
if session.creator != username:
return Response().error("Permission denied").__dict__
# 删除该会话下的所有对话
unified_msg_origin = f"{session.platform_id}:FriendMessage:{session.platform_id}!{username}!{session_id}"
await self.conv_mgr.delete_conversations_by_user_id(unified_msg_origin)
# 删除消息历史
await self.platform_history_mgr.delete(
platform_id="webchat",
user_id=webchat_conv_id,
platform_id=session.platform_id,
user_id=session_id,
offset_sec=99999999,
)
# 清理队列(仅对 webchat
if session.platform_id == "webchat":
webchat_queue_mgr.remove_queues(session_id)
# 删除会话
await self.db.delete_platform_session(session_id)
return Response().ok().__dict__
async def new_conversation(self):
async def new_session(self):
"""Create a new Platform session (default: webchat)."""
username = g.get("username", "guest")
webchat_conv_id = str(uuid.uuid4())
conv_id = await self.conv_mgr.new_conversation(
unified_msg_origin=f"webchat:FriendMessage:webchat!{username}!{webchat_conv_id}",
platform_id="webchat",
content=[],
# 获取可选的 platform_id 参数,默认为 webchat
platform_id = request.args.get("platform_id", "webchat")
# 创建新会话
session = await self.db.create_platform_session(
creator=username,
platform_id=platform_id,
is_group=0,
)
return Response().ok(data={"conversation_id": conv_id}).__dict__
async def rename_conversation(self):
post_data = await request.json
if "conversation_id" not in post_data or "title" not in post_data:
return Response().error("Missing key: conversation_id or title").__dict__
conversation_id = post_data["conversation_id"]
title = post_data["title"]
await self.conv_mgr.update_conversation(
unified_msg_origin="webchat", # fake
conversation_id=conversation_id,
title=title,
return (
Response()
.ok(
data={
"session_id": session.session_id,
"platform_id": session.platform_id,
}
)
.__dict__
)
return Response().ok(message="重命名成功!").__dict__
async def get_conversations(self):
conversations = await self.conv_mgr.get_conversations(platform_id="webchat")
# remove content
conversations_ = []
for conv in conversations:
conv.history = None
conversations_.append(conv)
return Response().ok(data=conversations_).__dict__
async def get_sessions(self):
"""Get all Platform sessions for the current user."""
username = g.get("username", "guest")
async def get_conversation(self):
conversation_id = request.args.get("conversation_id")
if not conversation_id:
return Response().error("Missing key: conversation_id").__dict__
# 获取可选的 platform_id 参数
platform_id = request.args.get("platform_id")
webchat_conv_id = await self._get_webchat_conv_id_from_conv_id(conversation_id)
sessions = await self.db.get_platform_sessions_by_creator(
creator=username,
platform_id=platform_id,
page=1,
page_size=100, # 暂时返回前100个
)
# Get platform message history
# 转换为字典格式,并添加额外信息
sessions_data = []
for session in sessions:
sessions_data.append(
{
"session_id": session.session_id,
"platform_id": session.platform_id,
"creator": session.creator,
"display_name": session.display_name,
"is_group": session.is_group,
"created_at": session.created_at.astimezone().isoformat(),
"updated_at": session.updated_at.astimezone().isoformat(),
}
)
return Response().ok(data=sessions_data).__dict__
async def get_session(self):
"""Get session information and message history by session_id."""
session_id = request.args.get("session_id")
if not session_id:
return Response().error("Missing key: session_id").__dict__
# 获取会话信息以确定 platform_id
session = await self.db.get_platform_session_by_id(session_id)
platform_id = session.platform_id if session else "webchat"
# Get platform message history using session_id
history_ls = await self.platform_history_mgr.get(
platform_id="webchat",
user_id=webchat_conv_id,
platform_id=platform_id,
user_id=session_id,
page=1,
page_size=1000,
)
@@ -333,8 +366,37 @@ class ChatRoute(Route):
.ok(
data={
"history": history_res,
"is_running": self.running_convs.get(webchat_conv_id, False),
"is_running": self.running_convs.get(session_id, False),
},
)
.__dict__
)
async def update_session_display_name(self):
"""Update a Platform session's display name."""
post_data = await request.json
session_id = post_data.get("session_id")
display_name = post_data.get("display_name")
if not session_id:
return Response().error("Missing key: session_id").__dict__
if display_name is None:
return Response().error("Missing key: display_name").__dict__
username = g.get("username", "guest")
# 验证会话是否存在且属于当前用户
session = await self.db.get_platform_session_by_id(session_id)
if not session:
return Response().error(f"Session {session_id} not found").__dict__
if session.creator != username:
return Response().error("Permission denied").__dict__
# 更新 display_name
await self.db.update_platform_session(
session_id=session_id,
display_name=display_name,
)
return Response().ok().__dict__

View File

@@ -48,6 +48,7 @@ class KnowledgeBaseRoute(Route):
# 文档管理
"/kb/document/list": ("GET", self.list_documents),
"/kb/document/upload": ("POST", self.upload_document),
"/kb/document/upload/url": ("POST", self.upload_document_from_url),
"/kb/document/upload/progress": ("GET", self.get_upload_progress),
"/kb/document/get": ("GET", self.get_document),
"/kb/document/delete": ("POST", self.delete_document),
@@ -1070,3 +1071,174 @@ class KnowledgeBaseRoute(Route):
logger.error(f"删除会话知识库配置失败: {e}")
logger.error(traceback.format_exc())
return Response().error(f"删除会话知识库配置失败: {e!s}").__dict__
async def upload_document_from_url(self):
"""从 URL 上传文档
Body:
- kb_id: 知识库 ID (必填)
- url: 要提取内容的网页 URL (必填)
- chunk_size: 分块大小 (可选, 默认512)
- chunk_overlap: 块重叠大小 (可选, 默认50)
- batch_size: 批处理大小 (可选, 默认32)
- tasks_limit: 并发任务限制 (可选, 默认3)
- max_retries: 最大重试次数 (可选, 默认3)
返回:
- task_id: 任务ID用于查询上传进度和结果
"""
try:
kb_manager = self._get_kb_manager()
data = await request.json
kb_id = data.get("kb_id")
if not kb_id:
return Response().error("缺少参数 kb_id").__dict__
url = data.get("url")
if not url:
return Response().error("缺少参数 url").__dict__
chunk_size = data.get("chunk_size", 512)
chunk_overlap = data.get("chunk_overlap", 50)
batch_size = data.get("batch_size", 32)
tasks_limit = data.get("tasks_limit", 3)
max_retries = data.get("max_retries", 3)
enable_cleaning = data.get("enable_cleaning", False)
cleaning_provider_id = data.get("cleaning_provider_id")
# 获取知识库
kb_helper = await kb_manager.get_kb(kb_id)
if not kb_helper:
return Response().error("知识库不存在").__dict__
# 生成任务ID
task_id = str(uuid.uuid4())
# 初始化任务状态
self.upload_tasks[task_id] = {
"status": "pending",
"result": None,
"error": None,
}
# 启动后台任务
asyncio.create_task(
self._background_upload_from_url_task(
task_id=task_id,
kb_helper=kb_helper,
url=url,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
batch_size=batch_size,
tasks_limit=tasks_limit,
max_retries=max_retries,
enable_cleaning=enable_cleaning,
cleaning_provider_id=cleaning_provider_id,
),
)
return (
Response()
.ok(
{
"task_id": task_id,
"url": url,
"message": "URL upload task created, processing in background",
},
)
.__dict__
)
except ValueError as e:
return Response().error(str(e)).__dict__
except Exception as e:
logger.error(f"从URL上传文档失败: {e}")
logger.error(traceback.format_exc())
return Response().error(f"从URL上传文档失败: {e!s}").__dict__
async def _background_upload_from_url_task(
self,
task_id: str,
kb_helper,
url: str,
chunk_size: int,
chunk_overlap: int,
batch_size: int,
tasks_limit: int,
max_retries: int,
enable_cleaning: bool,
cleaning_provider_id: str | None,
):
"""后台上传URL任务"""
try:
# 初始化任务状态
self.upload_tasks[task_id] = {
"status": "processing",
"result": None,
"error": None,
}
self.upload_progress[task_id] = {
"status": "processing",
"file_index": 0,
"file_total": 1,
"file_name": f"URL: {url}",
"stage": "extracting",
"current": 0,
"total": 100,
}
# 创建进度回调函数
async def progress_callback(stage, current, total):
if task_id in self.upload_progress:
self.upload_progress[task_id].update(
{
"status": "processing",
"file_index": 0,
"file_name": f"URL: {url}",
"stage": stage,
"current": current,
"total": total,
},
)
# 上传文档
doc = await kb_helper.upload_from_url(
url=url,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
batch_size=batch_size,
tasks_limit=tasks_limit,
max_retries=max_retries,
progress_callback=progress_callback,
enable_cleaning=enable_cleaning,
cleaning_provider_id=cleaning_provider_id,
)
# 更新任务完成状态
result = {
"task_id": task_id,
"uploaded": [doc.model_dump()],
"failed": [],
"total": 1,
"success_count": 1,
"failed_count": 0,
}
self.upload_tasks[task_id] = {
"status": "completed",
"result": result,
"error": None,
}
self.upload_progress[task_id]["status"] = "completed"
except Exception as e:
logger.error(f"后台上传URL任务 {task_id} 失败: {e}")
logger.error(traceback.format_exc())
self.upload_tasks[task_id] = {
"status": "failed",
"result": None,
"error": str(e),
}
if task_id in self.upload_progress:
self.upload_progress[task_id]["status"] = "failed"

12
changelogs/v4.5.7.md Normal file
View File

@@ -0,0 +1,12 @@
## What's Changed
1. 新增:支持为 OpenAI API 提供商自定义请求头 ([#3581](https://github.com/AstrBotDevs/AstrBot/issues/3581))
2. 新增:为 WebChat 为 Thinking 模型添加思考过程展示功能;支持快捷切换流式输出 / 非流式输出。([#3632](https://github.com/AstrBotDevs/AstrBot/issues/3632))
3. 新增:优化插件调用 LLM 和 Agent 的路径,为 Context 类引入多个调用 LLM 和 Agent 的便捷方法 ([#3636](https://github.com/AstrBotDevs/AstrBot/issues/3636))
4. 优化:改善不支持流式输出的消息平台的回退策略 ([#3547](https://github.com/AstrBotDevs/AstrBot/issues/3547))
5. 优化当同一个会话umo下同时有多个请求时执行排队处理避免并发请求导致的上下文混乱问题 ([#3607](https://github.com/AstrBotDevs/AstrBot/issues/3607))
6. 优化:优化 WebUI 的登录界面和 Changelog 页面的显示效果
7. 修复:修复在知识库名字过长的情况下,“选择知识库”按钮显示异常的问题 ([#3582](https://github.com/AstrBotDevs/AstrBot/issues/3582))
8. 修复:修复部分情况下,分段消息发送时导致的死锁问题(由 PR #3607 引入)
9. 修复:钉钉适配器使用部分指令无法生效的问题 ([#3634](https://github.com/AstrBotDevs/AstrBot/issues/3634))
10. 其他:为部分适配器添加缺失的 send_streaming 方法 ([#3545](https://github.com/AstrBotDevs/AstrBot/issues/3545))

5
changelogs/v4.5.8.md Normal file
View File

@@ -0,0 +1,5 @@
## What's Changed
hot fix of 4.5.7
fix: 无法正常发送图片,报错 `pydantic_core._pydantic_core.ValidationError`

23
changelogs/v4.6.0.md Normal file
View File

@@ -0,0 +1,23 @@
## What's Changed
1. 新增: 支持 gemini-3 系列的 thought signature ([#3698](https://github.com/AstrBotDevs/AstrBot/issues/3698))
2. 新增: 支持知识库的 Agentic 检索功能 ([#3667](https://github.com/AstrBotDevs/AstrBot/issues/3667))
3. 新增: 为知识库添加 URL 文档解析器 ([#3622](https://github.com/AstrBotDevs/AstrBot/issues/3622))
4. 修复(core.platform): 修复启用多个企业微信智能机器人适配器时消息混乱的问题 ([#3693](https://github.com/AstrBotDevs/AstrBot/issues/3693))
5. 修复: MCP Server 连接成功一段时间后,调用 mcp 工具时可能出现 `anyio.ClosedResourceError` 错误 ([#3700](https://github.com/AstrBotDevs/AstrBot/issues/3700))
6. 新增(chat): 重构聊天组件结构并添加新功能 ([#3701](https://github.com/AstrBotDevs/AstrBot/issues/3701))
7. 修复(dashboard.i18n): 完善缺失的英文国际化键值 ([#3699](https://github.com/AstrBotDevs/AstrBot/issues/3699))
8. 重构: 实现 WebChat 会话管理及从版本 4.6 迁移到 4.7
9. 持续集成(docker-build): 每日构建 Nightly 版本 Docker 镜像 ([#3120](https://github.com/AstrBotDevs/AstrBot/issues/3120))
---
1. feat: add supports for gemini-3 series thought signature ([#3698](https://github.com/AstrBotDevs/AstrBot/issues/3698))
2. feat: supports knowledge base agentic search ([#3667](https://github.com/AstrBotDevs/AstrBot/issues/3667))
3. feat: Add URL document parser for knowledge base ([#3622](https://github.com/AstrBotDevs/AstrBot/issues/3622))
4. fix(core.platform): fix message mix-up issue when enabling multiple WeCom AI Bot adapters ([#3693](https://github.com/AstrBotDevs/AstrBot/issues/3693))
5. fix: fix `anyio.ClosedResourceError` that may occur when calling mcp tools after a period of successful connection to MCP Server ([#3700](https://github.com/AstrBotDevs/AstrBot/issues/3700))
6. feat(chat): refactor chat component structure and add new features ([#3701](https://github.com/AstrBotDevs/AstrBot/issues/3701))
7. fix(dashboard.i18n): complete the missing i18n keys for en([#3699](https://github.com/AstrBotDevs/AstrBot/issues/3699))
8. refactor: Implement WebChat session management and migration from version 4.6 to 4.7
9. ci(docker-build): build nightly image everyday ([#3120](https://github.com/AstrBotDevs/AstrBot/issues/3120))

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 58 KiB

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,283 @@
<template>
<div class="input-area fade-in">
<div class="input-container"
style="width: 85%; max-width: 900px; margin: 0 auto; border: 1px solid #e0e0e0; border-radius: 24px;">
<textarea
ref="inputField"
v-model="localPrompt"
@keydown="handleKeyDown"
:disabled="disabled"
placeholder="Ask AstrBot..."
style="width: 100%; resize: none; outline: none; border: 1px solid var(--v-theme-border); border-radius: 12px; padding: 8px 16px; min-height: 40px; font-family: inherit; font-size: 16px; background-color: var(--v-theme-surface);"></textarea>
<div style="display: flex; justify-content: space-between; align-items: center; padding: 0px 12px;">
<div style="display: flex; justify-content: flex-start; margin-top: 4px; align-items: center; gap: 8px;">
<ProviderModelSelector ref="providerModelSelectorRef" />
<v-tooltip :text="enableStreaming ? tm('streaming.enabled') : tm('streaming.disabled')" location="top">
<template v-slot:activator="{ props }">
<v-chip v-bind="props" @click="$emit('toggleStreaming')" size="x-small" class="streaming-toggle-chip">
<v-icon start :icon="enableStreaming ? 'mdi-flash' : 'mdi-flash-off'" size="small"></v-icon>
{{ enableStreaming ? tm('streaming.on') : tm('streaming.off') }}
</v-chip>
</template>
</v-tooltip>
</div>
<div style="display: flex; justify-content: flex-end; margin-top: 8px; align-items: center;">
<input type="file" ref="imageInputRef" @change="handleFileSelect" accept="image/*"
style="display: none" multiple />
<v-progress-circular v-if="disabled" indeterminate size="16" class="mr-1" width="1.5" />
<v-btn @click="triggerImageInput" icon="mdi-plus" variant="text" color="deep-purple"
class="add-btn" size="small" />
<v-btn @click="handleRecordClick"
:icon="isRecording ? 'mdi-stop-circle' : 'mdi-microphone'" variant="text"
:color="isRecording ? 'error' : 'deep-purple'" class="record-btn" size="small" />
<v-btn @click="$emit('send')" icon="mdi-send" variant="text" color="deep-purple"
:disabled="!canSend" class="send-btn" size="small" />
</div>
</div>
</div>
<!-- 附件预览区 -->
<div class="attachments-preview" v-if="stagedImagesUrl.length > 0 || stagedAudioUrl">
<div v-for="(img, index) in stagedImagesUrl" :key="index" class="image-preview">
<img :src="img" class="preview-image" />
<v-btn @click="$emit('removeImage', index)" class="remove-attachment-btn" icon="mdi-close"
size="small" color="error" variant="text" />
</div>
<div v-if="stagedAudioUrl" class="audio-preview">
<v-chip color="deep-purple-lighten-4" class="audio-chip">
<v-icon start icon="mdi-microphone" size="small"></v-icon>
{{ tm('voice.recording') }}
</v-chip>
<v-btn @click="$emit('removeAudio')" class="remove-attachment-btn" icon="mdi-close" size="small"
color="error" variant="text" />
</div>
</div>
</div>
</template>
<script setup lang="ts">
import { ref, computed, onMounted, onBeforeUnmount, watch } from 'vue';
import { useModuleI18n } from '@/i18n/composables';
import ProviderModelSelector from './ProviderModelSelector.vue';
interface Props {
prompt: string;
stagedImagesUrl: string[];
stagedAudioUrl: string;
disabled: boolean;
enableStreaming: boolean;
isRecording: boolean;
}
const props = defineProps<Props>();
const emit = defineEmits<{
'update:prompt': [value: string];
send: [];
toggleStreaming: [];
removeImage: [index: number];
removeAudio: [];
startRecording: [];
stopRecording: [];
pasteImage: [event: ClipboardEvent];
fileSelect: [files: FileList];
}>();
const { tm } = useModuleI18n('features/chat');
const inputField = ref<HTMLTextAreaElement | null>(null);
const imageInputRef = ref<HTMLInputElement | null>(null);
const providerModelSelectorRef = ref<InstanceType<typeof ProviderModelSelector> | null>(null);
const localPrompt = computed({
get: () => props.prompt,
set: (value) => emit('update:prompt', value)
});
const canSend = computed(() => {
return (props.prompt && props.prompt.trim()) || props.stagedImagesUrl.length > 0 || props.stagedAudioUrl;
});
// Ctrl+B 长按录音相关
const ctrlKeyDown = ref(false);
const ctrlKeyTimer = ref<number | null>(null);
const ctrlKeyLongPressThreshold = 300;
function handleKeyDown(e: KeyboardEvent) {
// Enter 发送消息
if (e.keyCode === 13 && !e.shiftKey) {
e.preventDefault();
if (canSend.value) {
emit('send');
}
}
// Ctrl+B 录音
if (e.ctrlKey && e.keyCode === 66) {
e.preventDefault();
if (ctrlKeyDown.value) return;
ctrlKeyDown.value = true;
ctrlKeyTimer.value = window.setTimeout(() => {
if (ctrlKeyDown.value && !props.isRecording) {
emit('startRecording');
}
}, ctrlKeyLongPressThreshold);
}
}
function handleKeyUp(e: KeyboardEvent) {
if (e.keyCode === 66) {
ctrlKeyDown.value = false;
if (ctrlKeyTimer.value) {
clearTimeout(ctrlKeyTimer.value);
ctrlKeyTimer.value = null;
}
if (props.isRecording) {
emit('stopRecording');
}
}
}
function handlePaste(e: ClipboardEvent) {
emit('pasteImage', e);
}
function triggerImageInput() {
imageInputRef.value?.click();
}
function handleFileSelect(event: Event) {
const target = event.target as HTMLInputElement;
const files = target.files;
if (files) {
emit('fileSelect', files);
}
target.value = '';
}
function handleRecordClick() {
if (props.isRecording) {
emit('stopRecording');
} else {
emit('startRecording');
}
}
function getCurrentSelection() {
return providerModelSelectorRef.value?.getCurrentSelection();
}
onMounted(() => {
if (inputField.value) {
inputField.value.addEventListener('paste', handlePaste);
}
document.addEventListener('keyup', handleKeyUp);
});
onBeforeUnmount(() => {
if (inputField.value) {
inputField.value.removeEventListener('paste', handlePaste);
}
document.removeEventListener('keyup', handleKeyUp);
});
defineExpose({
getCurrentSelection
});
</script>
<style scoped>
.input-area {
padding: 16px;
background-color: var(--v-theme-surface);
position: relative;
border-top: 1px solid var(--v-theme-border);
flex-shrink: 0;
}
.attachments-preview {
display: flex;
gap: 8px;
margin-top: 8px;
max-width: 900px;
margin: 8px auto 0;
flex-wrap: wrap;
}
.image-preview,
.audio-preview {
position: relative;
display: inline-flex;
}
.preview-image {
width: 60px;
height: 60px;
object-fit: cover;
border-radius: 8px;
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
}
.audio-chip {
height: 36px;
border-radius: 18px;
}
.remove-attachment-btn {
position: absolute;
top: -8px;
right: -8px;
opacity: 0.8;
transition: opacity 0.2s;
}
.remove-attachment-btn:hover {
opacity: 1;
}
.streaming-toggle-chip {
cursor: pointer;
transition: all 0.2s ease;
user-select: none;
}
.streaming-toggle-chip:hover {
opacity: 0.8;
}
.fade-in {
animation: fadeIn 0.3s ease-in-out;
}
@keyframes fadeIn {
from {
opacity: 0;
transform: translateY(10px);
}
to {
opacity: 1;
transform: translateY(0);
}
}
@media (max-width: 768px) {
.input-area {
padding: 0 !important;
}
.input-container {
width: 100% !important;
max-width: 100% !important;
margin: 0 !important;
border-radius: 0 !important;
border-left: none !important;
border-right: none !important;
border-bottom: none !important;
}
}
</style>

View File

@@ -0,0 +1,296 @@
<template>
<div class="sidebar-panel"
:class="{
'sidebar-collapsed': sidebarCollapsed && !isMobile,
'mobile-sidebar-open': isMobile && mobileMenuOpen,
'mobile-sidebar': isMobile
}"
:style="{ 'background-color': isDark ? sidebarCollapsed ? '#1e1e1e' : '#2d2d2d' : sidebarCollapsed ? '#ffffff' : '#f1f4f9' }"
@mouseenter="handleSidebarMouseEnter"
@mouseleave="handleSidebarMouseLeave">
<div style="display: flex; align-items: center; justify-content: center; padding: 16px; padding-bottom: 0px;"
v-if="chatboxMode">
<img width="50" src="@/assets/images/icon-no-shadow.svg" alt="AstrBot Logo">
<span v-if="!sidebarCollapsed"
style="font-weight: 1000; font-size: 26px; margin-left: 8px;">AstrBot</span>
</div>
<div class="sidebar-collapse-btn-container" v-if="!isMobile">
<v-btn icon class="sidebar-collapse-btn" @click="toggleSidebar" variant="text" color="deep-purple">
<v-icon>{{ (sidebarCollapsed || (!sidebarCollapsed && sidebarHoverExpanded)) ?
'mdi-chevron-right' : 'mdi-chevron-left' }}</v-icon>
</v-btn>
</div>
<div class="sidebar-collapse-btn-container" v-if="isMobile">
<v-btn icon class="sidebar-collapse-btn" @click="$emit('closeMobileSidebar')" variant="text"
color="deep-purple">
<v-icon>mdi-close</v-icon>
</v-btn>
</div>
<div style="padding: 16px; padding-top: 8px;">
<v-btn block variant="text" class="new-chat-btn" @click="$emit('newChat')" :disabled="!currSessionId"
v-if="!sidebarCollapsed || isMobile" prepend-icon="mdi-plus"
style="background-color: transparent !important; border-radius: 4px;">{{ tm('actions.newChat') }}</v-btn>
<v-btn icon="mdi-plus" rounded="lg" @click="$emit('newChat')" :disabled="!currSessionId"
v-if="sidebarCollapsed && !isMobile" elevation="0"></v-btn>
</div>
<div v-if="!sidebarCollapsed || isMobile">
<v-divider class="mx-4"></v-divider>
</div>
<div style="overflow-y: auto; flex-grow: 1;" :class="{ 'fade-in': sidebarHoverExpanded }"
v-if="!sidebarCollapsed || isMobile">
<v-card v-if="sessions.length > 0" flat style="background-color: transparent;">
<v-list density="compact" nav class="conversation-list"
style="background-color: transparent;" :selected="selectedSessions"
@update:selected="$emit('selectConversation', $event)">
<v-list-item v-for="item in sessions" :key="item.session_id" :value="item.session_id"
rounded="lg" class="conversation-item" active-color="secondary">
<v-list-item-title v-if="!sidebarCollapsed || isMobile" class="conversation-title">
{{ item.display_name || tm('conversation.newConversation') }}
</v-list-item-title>
<v-list-item-subtitle v-if="!sidebarCollapsed || isMobile" class="timestamp">
{{ new Date(item.updated_at).toLocaleString() }}
</v-list-item-subtitle>
<template v-if="!sidebarCollapsed || isMobile" v-slot:append>
<div class="conversation-actions">
<v-btn icon="mdi-pencil" size="x-small" variant="text"
class="edit-title-btn"
@click.stop="$emit('editTitle', item.session_id, item.display_name)" />
<v-btn icon="mdi-delete" size="x-small" variant="text"
class="delete-conversation-btn" color="error"
@click.stop="$emit('deleteConversation', item.session_id)" />
</div>
</template>
</v-list-item>
</v-list>
</v-card>
<v-fade-transition>
<div class="no-conversations" v-if="sessions.length === 0">
<v-icon icon="mdi-message-text-outline" size="large" color="grey-lighten-1"></v-icon>
<div class="no-conversations-text" v-if="!sidebarCollapsed || sidebarHoverExpanded || isMobile">
{{ tm('conversation.noHistory') }}
</div>
</div>
</v-fade-transition>
</div>
</div>
</template>
<script setup lang="ts">
import { ref } from 'vue';
import { useI18n, useModuleI18n } from '@/i18n/composables';
import type { Session } from '@/composables/useSessions';
interface Props {
sessions: Session[];
selectedSessions: string[];
currSessionId: string;
isDark: boolean;
chatboxMode: boolean;
isMobile: boolean;
mobileMenuOpen: boolean;
}
const props = defineProps<Props>();
const emit = defineEmits<{
newChat: [];
selectConversation: [sessionIds: string[]];
editTitle: [sessionId: string, title: string];
deleteConversation: [sessionId: string];
closeMobileSidebar: [];
}>();
const { tm } = useModuleI18n('features/chat');
const { t } = useI18n();
const sidebarCollapsed = ref(true);
const sidebarHovered = ref(false);
const sidebarHoverTimer = ref<number | null>(null);
const sidebarHoverExpanded = ref(false);
const sidebarHoverDelay = 100;
// 从 localStorage 读取侧边栏折叠状态
const savedCollapsedState = localStorage.getItem('sidebarCollapsed');
if (savedCollapsedState !== null) {
sidebarCollapsed.value = JSON.parse(savedCollapsedState);
} else {
sidebarCollapsed.value = true;
}
function toggleSidebar() {
if (sidebarHoverExpanded.value) {
sidebarHoverExpanded.value = false;
return;
}
sidebarCollapsed.value = !sidebarCollapsed.value;
localStorage.setItem('sidebarCollapsed', JSON.stringify(sidebarCollapsed.value));
}
function handleSidebarMouseEnter() {
if (!sidebarCollapsed.value || props.isMobile) return;
sidebarHovered.value = true;
sidebarHoverTimer.value = window.setTimeout(() => {
if (sidebarHovered.value) {
sidebarHoverExpanded.value = true;
sidebarCollapsed.value = false;
}
}, sidebarHoverDelay);
}
function handleSidebarMouseLeave() {
sidebarHovered.value = false;
if (sidebarHoverTimer.value) {
clearTimeout(sidebarHoverTimer.value);
sidebarHoverTimer.value = null;
}
if (sidebarHoverExpanded.value) {
sidebarCollapsed.value = true;
}
sidebarHoverExpanded.value = false;
}
</script>
<style scoped>
.sidebar-panel {
max-width: 270px;
min-width: 240px;
display: flex;
flex-direction: column;
padding: 0;
border-right: 1px solid rgba(0, 0, 0, 0.04);
height: 100%;
max-height: 100%;
position: relative;
transition: all 0.3s ease;
overflow: hidden;
}
.sidebar-collapsed {
max-width: 75px;
min-width: 75px;
transition: all 0.3s ease;
}
.mobile-sidebar {
position: fixed;
top: 0;
left: 0;
bottom: 0;
max-width: 280px !important;
min-width: 280px !important;
transform: translateX(-100%);
transition: transform 0.3s ease;
z-index: 1000;
}
.mobile-sidebar-open {
transform: translateX(0) !important;
}
.sidebar-collapse-btn-container {
margin: 16px;
margin-bottom: 0px;
z-index: 10;
}
.sidebar-collapse-btn {
opacity: 0.6;
max-height: none;
overflow-y: visible;
padding: 0;
}
.conversation-item {
margin-bottom: 4px;
border-radius: 8px !important;
transition: all 0.2s ease;
height: auto !important;
min-height: 56px;
padding: 8px 16px !important;
position: relative;
}
.conversation-item:hover {
background-color: rgba(103, 58, 183, 0.05);
}
.conversation-item:hover .conversation-actions {
opacity: 1;
visibility: visible;
}
.conversation-actions {
display: flex;
gap: 4px;
opacity: 0;
visibility: hidden;
transition: all 0.2s ease;
}
.edit-title-btn,
.delete-conversation-btn {
opacity: 0.7;
transition: opacity 0.2s ease;
}
.edit-title-btn:hover,
.delete-conversation-btn:hover {
opacity: 1;
}
.conversation-title {
font-weight: 500;
font-size: 14px;
line-height: 1.3;
margin-bottom: 2px;
transition: opacity 0.25s ease;
}
.timestamp {
font-size: 11px;
color: var(--v-theme-secondaryText);
line-height: 1;
transition: opacity 0.25s ease;
}
.no-conversations {
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
height: 150px;
opacity: 0.6;
gap: 12px;
}
.no-conversations-text {
font-size: 14px;
color: var(--v-theme-secondaryText);
transition: opacity 0.25s ease;
}
.fade-in {
animation: fadeInContent 0.3s ease;
}
@keyframes fadeInContent {
from {
opacity: 0;
}
to {
opacity: 1;
}
}
</style>

View File

@@ -33,33 +33,53 @@
<v-avatar class="bot-avatar" size="36">
<v-progress-circular :index="index" v-if="isStreaming && index === messages.length - 1" indeterminate size="28"
width="2"></v-progress-circular>
<span v-else-if="messages[index - 1]?.content.type !== 'bot'" class="text-h2"></span>
<v-icon v-else-if="messages[index - 1]?.content.type !== 'bot'" size="64" color="#8fb6d2">mdi-star-four-points-small</v-icon>
</v-avatar>
<div class="bot-message-content">
<div class="message-bubble bot-bubble">
<!-- Text -->
<div v-if="msg.content.message && msg.content.message.trim()"
v-html="md.render(msg.content.message)" class="markdown-content"></div>
<!-- Image -->
<div class="embedded-images"
v-if="msg.content.embedded_images && msg.content.embedded_images.length > 0">
<div v-for="(img, imgIndex) in msg.content.embedded_images" :key="imgIndex"
class="embedded-image">
<img :src="img" class="bot-embedded-image"
@click="$emit('openImagePreview', img)" />
<!-- Loading state -->
<div v-if="msg.content.isLoading" class="loading-container">
<span class="loading-text">{{ tm('message.loading') }}</span>
</div>
<template v-else>
<!-- Reasoning Block (Collapsible) -->
<div v-if="msg.content.reasoning && msg.content.reasoning.trim()" class="reasoning-container">
<div class="reasoning-header" @click="toggleReasoning(index)">
<v-icon size="small" class="reasoning-icon">
{{ isReasoningExpanded(index) ? 'mdi-chevron-down' : 'mdi-chevron-right' }}
</v-icon>
<span class="reasoning-label">{{ tm('reasoning.thinking') }}</span>
</div>
<div v-if="isReasoningExpanded(index)" class="reasoning-content">
<div v-html="md.render(msg.content.reasoning)" class="markdown-content reasoning-text"></div>
</div>
</div>
</div>
<!-- Text -->
<div v-if="msg.content.message && msg.content.message.trim()"
v-html="md.render(msg.content.message)" class="markdown-content"></div>
<!-- Audio -->
<div class="embedded-audio" v-if="msg.content.embedded_audio">
<audio controls class="audio-player">
<source :src="msg.content.embedded_audio" type="audio/wav">
{{ t('messages.errors.browser.audioNotSupported') }}
</audio>
</div>
<!-- Image -->
<div class="embedded-images"
v-if="msg.content.embedded_images && msg.content.embedded_images.length > 0">
<div v-for="(img, imgIndex) in msg.content.embedded_images" :key="imgIndex"
class="embedded-image">
<img :src="img" class="bot-embedded-image"
@click="$emit('openImagePreview', img)" />
</div>
</div>
<!-- Audio -->
<div class="embedded-audio" v-if="msg.content.embedded_audio">
<audio controls class="audio-player">
<source :src="msg.content.embedded_audio" type="audio/wav">
{{ t('messages.errors.browser.audioNotSupported') }}
</audio>
</div>
</template>
</div>
<div class="message-actions">
<div class="message-actions" v-if="!msg.content.isLoading">
<v-btn :icon="getCopyIcon(index)" size="small" variant="text" class="copy-message-btn"
:class="{ 'copy-success': isCopySuccess(index) }"
@click="copyBotMessage(msg.content.message, index)" :title="t('core.common.copy')" />
@@ -125,7 +145,8 @@ export default {
copiedMessages: new Set(),
isUserNearBottom: true,
scrollThreshold: 1,
scrollTimer: null
scrollTimer: null,
expandedReasoning: new Set(), // Track which reasoning blocks are expanded
};
},
mounted() {
@@ -142,6 +163,22 @@ export default {
}
},
methods: {
// Toggle reasoning expansion state
toggleReasoning(messageIndex) {
if (this.expandedReasoning.has(messageIndex)) {
this.expandedReasoning.delete(messageIndex);
} else {
this.expandedReasoning.add(messageIndex);
}
// Force reactivity
this.expandedReasoning = new Set(this.expandedReasoning);
},
// Check if reasoning is expanded
isReasoningExpanded(messageIndex) {
return this.expandedReasoning.has(messageIndex);
},
// 复制代码到剪贴板
copyCodeToClipboard(code) {
navigator.clipboard.writeText(code).then(() => {
@@ -348,7 +385,7 @@ export default {
@keyframes fadeIn {
from {
opacity: 0;
transform: translateY(10px);
transform: translateY(0);
}
to {
@@ -539,6 +576,69 @@ export default {
.fade-in {
animation: fadeIn 0.3s ease-in-out;
}
/* Reasoning 区块样式 */
.reasoning-container {
margin-bottom: 12px;
margin-top: 6px;
border: 1px solid var(--v-theme-border);
border-radius: 8px;
overflow: hidden;
width: fit-content;
}
.v-theme--dark .reasoning-container {
background-color: rgba(103, 58, 183, 0.08);
}
.reasoning-header {
display: inline-flex;
align-items: center;
padding: 8px 8px;
cursor: pointer;
user-select: none;
transition: background-color 0.2s ease;
border-radius: 8px;
}
.reasoning-header:hover {
background-color: rgba(103, 58, 183, 0.08);
}
.v-theme--dark .reasoning-header:hover {
background-color: rgba(103, 58, 183, 0.15);
}
.reasoning-icon {
margin-right: 6px;
color: var(--v-theme-secondary);
transition: transform 0.2s ease;
}
.reasoning-label {
font-size: 13px;
font-weight: 500;
color: var(--v-theme-secondary);
letter-spacing: 0.3px;
}
.reasoning-content {
padding: 0px 12px;
border-top: 1px solid var(--v-theme-border);
color: gray;
animation: fadeIn 0.2s ease-in-out;
font-style: italic;
}
.reasoning-text {
font-size: 14px;
line-height: 1.6;
color: var(--v-theme-secondaryText);
}
.v-theme--dark .reasoning-text {
opacity: 0.85;
}
</style>
<style>
@@ -748,6 +848,29 @@ export default {
margin: 10px 0;
}
.loading-container {
display: flex;
align-items: center;
gap: 12px;
padding: 8px 0;
margin-top: 2px;
}
.loading-text {
font-size: 14px;
color: var(--v-theme-secondaryText);
animation: pulse 1.5s ease-in-out infinite;
}
@keyframes pulse {
0%, 100% {
opacity: 0.6;
}
50% {
opacity: 1;
}
}
.markdown-content blockquote {
border-left: 4px solid var(--v-theme-secondary);
padding-left: 16px;

View File

@@ -1,13 +1,13 @@
<template>
<div>
<!-- 选择提供商和模型按钮 -->
<v-btn class="text-none" variant="tonal" rounded="xl" size="small"
<v-chip class="text-none" variant="tonal" size="x-small"
v-if="selectedProviderId && selectedModelName" @click="openDialog">
{{ selectedProviderId }} / {{ selectedModelName }}
</v-btn>
<v-btn variant="tonal" rounded="xl" size="small" v-else @click="openDialog">
</v-chip>
<v-chip variant="tonal" rounded="xl" size="x-small" v-else @click="openDialog">
选择模型
</v-btn>
</v-chip>
<!-- 选择提供商和模型对话框 -->
<v-dialog v-model="showDialog" max-width="800" persistent>

View File

@@ -154,7 +154,8 @@ function hasVisibleItemsAfter(items, currentIndex) {
<div class="w-100" v-if="!itemMeta?._special">
<!-- Select input for JSON selector -->
<v-select v-if="itemMeta?.options" v-model="createSelectorModel(itemKey).value"
:items="itemMeta?.options" :disabled="itemMeta?.readonly" density="compact" variant="outlined"
:items="itemMeta?.labels ? itemMeta.options.map((value, index) => ({ title: itemMeta.labels[index] || value, value: value })) : itemMeta.options"
:disabled="itemMeta?.readonly" density="compact" variant="outlined"
class="config-field" hide-details></v-select>
<!-- Code Editor for JSON selector -->

View File

@@ -1,22 +1,25 @@
<template>
<div class="d-flex align-center justify-space-between">
<span v-if="!modelValue || (Array.isArray(modelValue) && modelValue.length === 0)"
style="color: rgb(var(--v-theme-primaryText));">
未选择
</span>
<div v-else class="d-flex flex-wrap gap-1">
<v-chip
v-for="name in modelValue"
:key="name"
size="small"
color="primary"
variant="tonal"
closable
@click:close="removeKnowledgeBase(name)">
{{ name }}
</v-chip>
<div class="d-flex align-center justify-space-between" style="gap: 8px;">
<div style="flex: 1; min-width: 0; overflow: hidden;">
<span v-if="!modelValue || (Array.isArray(modelValue) && modelValue.length === 0)"
style="color: rgb(var(--v-theme-primaryText));">
未选择
</span>
<div v-else class="d-flex flex-wrap gap-1">
<v-chip
v-for="name in modelValue"
:key="name"
size="small"
color="primary"
variant="tonal"
closable
@click:close="removeKnowledgeBase(name)"
style="max-width: 100%;">
<span class="text-truncate" style="max-width: 200px;">{{ name }}</span>
</v-chip>
</div>
</div>
<v-btn size="small" color="primary" variant="tonal" @click="openDialog">
<v-btn size="small" color="primary" variant="tonal" @click="openDialog" style="flex-shrink: 0;">
{{ buttonText }}
</v-btn>
</div>
@@ -220,4 +223,11 @@ function goToKnowledgeBasePage() {
.gap-1 {
gap: 4px;
}
.text-truncate {
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
display: inline-block;
}
</style>

View File

@@ -70,10 +70,6 @@ const formatTitle = (title: string) => {
transition: transform 0.3s ease;
}
.logo-image img:hover {
transform: scale(1.05);
}
.logo-text {
display: flex;
flex-direction: column;

View File

@@ -0,0 +1,145 @@
import { ref, computed } from 'vue';
import axios from 'axios';
import { useRouter } from 'vue-router';
export interface Conversation {
cid: string;
title: string;
updated_at: number;
}
export function useConversations(chatboxMode: boolean = false) {
const router = useRouter();
const conversations = ref<Conversation[]>([]);
const selectedConversations = ref<string[]>([]);
const currCid = ref('');
const pendingCid = ref<string | null>(null);
// 编辑标题相关
const editTitleDialog = ref(false);
const editingTitle = ref('');
const editingCid = ref('');
const getCurrentConversation = computed(() => {
if (!currCid.value) return null;
return conversations.value.find(c => c.cid === currCid.value);
});
async function getConversations() {
try {
const response = await axios.get('/api/chat/conversations');
conversations.value = response.data.data;
// 处理待加载的会话
if (pendingCid.value) {
const conversation = conversations.value.find(c => c.cid === pendingCid.value);
if (conversation) {
selectedConversations.value = [pendingCid.value];
pendingCid.value = null;
}
} else if (!currCid.value && conversations.value.length > 0) {
// 默认选择第一个会话
const firstConversation = conversations.value[0];
selectedConversations.value = [firstConversation.cid];
}
} catch (err: any) {
if (err.response?.status === 401) {
router.push('/auth/login?redirect=/chatbox');
}
console.error(err);
}
}
async function newConversation() {
try {
const response = await axios.get('/api/chat/new_conversation');
const cid = response.data.data.conversation_id;
currCid.value = cid;
// 更新 URL
const basePath = chatboxMode ? '/chatbox' : '/chat';
router.push(`${basePath}/${cid}`);
await getConversations();
return cid;
} catch (err) {
console.error(err);
throw err;
}
}
async function deleteConversation(cid: string) {
try {
await axios.get('/api/chat/delete_conversation?conversation_id=' + cid);
await getConversations();
currCid.value = '';
selectedConversations.value = [];
} catch (err) {
console.error(err);
}
}
function showEditTitleDialog(cid: string, title: string) {
editingCid.value = cid;
editingTitle.value = title || '';
editTitleDialog.value = true;
}
async function saveTitle() {
if (!editingCid.value) return;
const trimmedTitle = editingTitle.value.trim();
try {
await axios.post('/api/chat/rename_conversation', {
conversation_id: editingCid.value,
title: trimmedTitle
});
// 更新本地会话标题
const conversation = conversations.value.find(c => c.cid === editingCid.value);
if (conversation) {
conversation.title = trimmedTitle;
}
editTitleDialog.value = false;
} catch (err) {
console.error('重命名对话失败:', err);
}
}
function updateConversationTitle(cid: string, title: string) {
const conversation = conversations.value.find(c => c.cid === cid);
if (conversation) {
conversation.title = title;
}
}
function newChat(closeMobileSidebar?: () => void) {
currCid.value = '';
selectedConversations.value = [];
const basePath = chatboxMode ? '/chatbox' : '/chat';
router.push(basePath);
if (closeMobileSidebar) {
closeMobileSidebar();
}
}
return {
conversations,
selectedConversations,
currCid,
pendingCid,
editTitleDialog,
editingTitle,
editingCid,
getCurrentConversation,
getConversations,
newConversation,
deleteConversation,
showEditTitleDialog,
saveTitle,
updateConversationTitle,
newChat
};
}

View File

@@ -0,0 +1,104 @@
import { ref } from 'vue';
import axios from 'axios';
export function useMediaHandling() {
const stagedImagesName = ref<string[]>([]);
const stagedImagesUrl = ref<string[]>([]);
const stagedAudioUrl = ref<string>('');
const mediaCache = ref<Record<string, string>>({});
async function getMediaFile(filename: string): Promise<string> {
if (mediaCache.value[filename]) {
return mediaCache.value[filename];
}
try {
const response = await axios.get('/api/chat/get_file', {
params: { filename },
responseType: 'blob'
});
const blobUrl = URL.createObjectURL(response.data);
mediaCache.value[filename] = blobUrl;
return blobUrl;
} catch (error) {
console.error('Error fetching media file:', error);
return '';
}
}
async function processAndUploadImage(file: File) {
const formData = new FormData();
formData.append('file', file);
try {
const response = await axios.post('/api/chat/post_image', formData, {
headers: {
'Content-Type': 'multipart/form-data'
}
});
const img = response.data.data.filename;
stagedImagesName.value.push(img);
stagedImagesUrl.value.push(URL.createObjectURL(file));
} catch (err) {
console.error('Error uploading image:', err);
}
}
async function handlePaste(event: ClipboardEvent) {
const items = event.clipboardData?.items;
if (!items) return;
for (let i = 0; i < items.length; i++) {
if (items[i].type.indexOf('image') !== -1) {
const file = items[i].getAsFile();
if (file) {
await processAndUploadImage(file);
}
}
}
}
function removeImage(index: number) {
const urlToRevoke = stagedImagesUrl.value[index];
if (urlToRevoke && urlToRevoke.startsWith('blob:')) {
URL.revokeObjectURL(urlToRevoke);
}
stagedImagesName.value.splice(index, 1);
stagedImagesUrl.value.splice(index, 1);
}
function removeAudio() {
stagedAudioUrl.value = '';
}
function clearStaged() {
stagedImagesName.value = [];
stagedImagesUrl.value = [];
stagedAudioUrl.value = '';
}
function cleanupMediaCache() {
Object.values(mediaCache.value).forEach(url => {
if (url.startsWith('blob:')) {
URL.revokeObjectURL(url);
}
});
mediaCache.value = {};
}
return {
stagedImagesName,
stagedImagesUrl,
stagedAudioUrl,
getMediaFile,
processAndUploadImage,
handlePaste,
removeImage,
removeAudio,
clearStaged,
cleanupMediaCache
};
}

View File

@@ -0,0 +1,304 @@
import { ref, reactive, type Ref } from 'vue';
import axios from 'axios';
import { useToast } from '@/utils/toast';
export interface MessageContent {
type: string;
message: string;
reasoning?: string;
image_url?: string[];
audio_url?: string;
embedded_images?: string[];
embedded_audio?: string;
isLoading?: boolean;
}
export interface Message {
content: MessageContent;
}
export function useMessages(
currSessionId: Ref<string>,
getMediaFile: (filename: string) => Promise<string>,
updateSessionTitle: (sessionId: string, title: string) => void,
onSessionsUpdate: () => void
) {
const messages = ref<Message[]>([]);
const isStreaming = ref(false);
const isConvRunning = ref(false);
const isToastedRunningInfo = ref(false);
const activeSSECount = ref(0);
const enableStreaming = ref(true);
// 从 localStorage 读取流式响应开关状态
const savedStreamingState = localStorage.getItem('enableStreaming');
if (savedStreamingState !== null) {
enableStreaming.value = JSON.parse(savedStreamingState);
}
function toggleStreaming() {
enableStreaming.value = !enableStreaming.value;
localStorage.setItem('enableStreaming', JSON.stringify(enableStreaming.value));
}
async function getSessionMessages(sessionId: string, router: any) {
if (!sessionId) return;
try {
const response = await axios.get('/api/chat/get_session?session_id=' + sessionId);
isConvRunning.value = response.data.data.is_running || false;
let history = response.data.data.history;
if (isConvRunning.value) {
if (!isToastedRunningInfo.value) {
useToast().info("该会话正在运行中。", { timeout: 5000 });
isToastedRunningInfo.value = true;
}
// 如果会话还在运行3秒后重新获取消息
setTimeout(() => {
getSessionMessages(currSessionId.value, router);
}, 3000);
}
// 处理历史消息中的媒体文件
for (let i = 0; i < history.length; i++) {
let content = history[i].content;
if (content.message?.startsWith('[IMAGE]')) {
let img = content.message.replace('[IMAGE]', '');
const imageUrl = await getMediaFile(img);
if (!content.embedded_images) {
content.embedded_images = [];
}
content.embedded_images.push(imageUrl);
content.message = '';
}
if (content.message?.startsWith('[RECORD]')) {
let audio = content.message.replace('[RECORD]', '');
const audioUrl = await getMediaFile(audio);
content.embedded_audio = audioUrl;
content.message = '';
}
if (content.image_url && content.image_url.length > 0) {
for (let j = 0; j < content.image_url.length; j++) {
content.image_url[j] = await getMediaFile(content.image_url[j]);
}
}
if (content.audio_url) {
content.audio_url = await getMediaFile(content.audio_url);
}
}
messages.value = history;
} catch (err) {
console.error(err);
}
}
async function sendMessage(
prompt: string,
imageNames: string[],
audioName: string,
selectedProviderId: string,
selectedModelName: string
) {
// Create user message
const userMessage: MessageContent = {
type: 'user',
message: prompt,
image_url: [],
audio_url: undefined
};
// Convert image filenames to blob URLs
if (imageNames.length > 0) {
const imagePromises = imageNames.map(name => {
if (!name.startsWith('blob:')) {
return getMediaFile(name);
}
return Promise.resolve(name);
});
userMessage.image_url = await Promise.all(imagePromises);
}
// Convert audio filename to blob URL
if (audioName) {
if (!audioName.startsWith('blob:')) {
userMessage.audio_url = await getMediaFile(audioName);
} else {
userMessage.audio_url = audioName;
}
}
messages.value.push({ content: userMessage });
// 添加一个加载中的机器人消息占位符
const loadingMessage = reactive({
type: 'bot',
message: '',
reasoning: '',
isLoading: true
});
messages.value.push({ content: loadingMessage });
try {
activeSSECount.value++;
if (activeSSECount.value === 1) {
isConvRunning.value = true;
}
const response = await fetch('/api/chat/send', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + localStorage.getItem('token')
},
body: JSON.stringify({
message: prompt,
session_id: currSessionId.value,
image_url: imageNames,
audio_url: audioName ? [audioName] : [],
selected_provider: selectedProviderId,
selected_model: selectedModelName,
enable_streaming: enableStreaming.value
})
});
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
const reader = response.body!.getReader();
const decoder = new TextDecoder();
let in_streaming = false;
let message_obj: any = null;
isStreaming.value = true;
while (true) {
try {
const { done, value } = await reader.read();
if (done) {
console.log('SSE stream completed');
break;
}
const chunk = decoder.decode(value, { stream: true });
const lines = chunk.split('\n\n');
for (let i = 0; i < lines.length; i++) {
let line = lines[i].trim();
if (!line) continue;
let chunk_json;
try {
chunk_json = JSON.parse(line.replace('data: ', ''));
} catch (parseError) {
console.warn('JSON解析失败:', line, parseError);
continue;
}
if (!chunk_json || typeof chunk_json !== 'object' || !chunk_json.hasOwnProperty('type')) {
console.warn('无效的数据对象:', chunk_json);
continue;
}
if (chunk_json.type === 'error') {
console.error('Error received:', chunk_json.data);
continue;
}
if (chunk_json.type === 'image') {
let img = chunk_json.data.replace('[IMAGE]', '');
const imageUrl = await getMediaFile(img);
let bot_resp: MessageContent = {
type: 'bot',
message: '',
embedded_images: [imageUrl]
};
messages.value.push({ content: bot_resp });
} else if (chunk_json.type === 'record') {
let audio = chunk_json.data.replace('[RECORD]', '');
const audioUrl = await getMediaFile(audio);
let bot_resp: MessageContent = {
type: 'bot',
message: '',
embedded_audio: audioUrl
};
messages.value.push({ content: bot_resp });
} else if (chunk_json.type === 'plain') {
const chain_type = chunk_json.chain_type || 'normal';
if (!in_streaming) {
// 移除加载占位符
const lastMsg = messages.value[messages.value.length - 1];
if (lastMsg?.content?.isLoading) {
messages.value.pop();
}
message_obj = reactive({
type: 'bot',
message: chain_type === 'reasoning' ? '' : chunk_json.data,
reasoning: chain_type === 'reasoning' ? chunk_json.data : '',
});
messages.value.push({ content: message_obj });
in_streaming = true;
} else {
if (chain_type === 'reasoning') {
// 使用 reactive 对象,直接修改属性会触发响应式更新
message_obj.reasoning = (message_obj.reasoning || '') + chunk_json.data;
} else {
message_obj.message = (message_obj.message || '') + chunk_json.data;
}
}
} else if (chunk_json.type === 'update_title') {
updateSessionTitle(chunk_json.session_id, chunk_json.data);
}
if ((chunk_json.type === 'break' && chunk_json.streaming) || !chunk_json.streaming) {
in_streaming = false;
if (!chunk_json.streaming) {
isStreaming.value = false;
}
}
}
} catch (readError) {
console.error('SSE读取错误:', readError);
break;
}
}
// 获取最新的会话列表
onSessionsUpdate();
} catch (err) {
console.error('发送消息失败:', err);
// 移除加载占位符
const lastMsg = messages.value[messages.value.length - 1];
if (lastMsg?.content?.isLoading) {
messages.value.pop();
}
} finally {
isStreaming.value = false;
activeSSECount.value--;
if (activeSSECount.value === 0) {
isConvRunning.value = false;
}
}
}
return {
messages,
isStreaming,
isConvRunning,
enableStreaming,
getSessionMessages,
sendMessage,
toggleStreaming
};
}

View File

@@ -0,0 +1,74 @@
import { ref } from 'vue';
import axios from 'axios';
export function useRecording() {
const isRecording = ref(false);
const audioChunks = ref<Blob[]>([]);
const mediaRecorder = ref<MediaRecorder | null>(null);
async function startRecording(onStart?: (label: string) => void) {
try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
mediaRecorder.value = new MediaRecorder(stream);
mediaRecorder.value.ondataavailable = (event) => {
audioChunks.value.push(event.data);
};
mediaRecorder.value.start();
isRecording.value = true;
if (onStart) {
onStart('录音中...');
}
} catch (error) {
console.error('Failed to start recording:', error);
}
}
async function stopRecording(onStop?: (label: string) => void): Promise<string> {
return new Promise((resolve, reject) => {
if (!mediaRecorder.value) {
reject('No media recorder');
return;
}
isRecording.value = false;
if (onStop) {
onStop('聊天输入框');
}
mediaRecorder.value.stop();
mediaRecorder.value.onstop = async () => {
const audioBlob = new Blob(audioChunks.value, { type: 'audio/wav' });
audioChunks.value = [];
mediaRecorder.value?.stream.getTracks().forEach(track => track.stop());
const formData = new FormData();
formData.append('file', audioBlob);
try {
const response = await axios.post('/api/chat/post_file', formData, {
headers: {
'Content-Type': 'multipart/form-data'
}
});
const audio = response.data.data.filename;
console.log('Audio uploaded:', audio);
resolve(audio);
} catch (err) {
console.error('Error uploading audio:', err);
reject(err);
}
};
});
}
return {
isRecording,
startRecording,
stopRecording
};
}

View File

@@ -0,0 +1,145 @@
import { ref, computed } from 'vue';
import axios from 'axios';
import { useRouter } from 'vue-router';
export interface Session {
session_id: string;
display_name: string;
updated_at: string;
}
export function useSessions(chatboxMode: boolean = false) {
const router = useRouter();
const sessions = ref<Session[]>([]);
const selectedSessions = ref<string[]>([]);
const currSessionId = ref('');
const pendingSessionId = ref<string | null>(null);
// 编辑标题相关
const editTitleDialog = ref(false);
const editingTitle = ref('');
const editingSessionId = ref('');
const getCurrentSession = computed(() => {
if (!currSessionId.value) return null;
return sessions.value.find(s => s.session_id === currSessionId.value);
});
async function getSessions() {
try {
const response = await axios.get('/api/chat/sessions');
sessions.value = response.data.data;
// 处理待加载的会话
if (pendingSessionId.value) {
const session = sessions.value.find(s => s.session_id === pendingSessionId.value);
if (session) {
selectedSessions.value = [pendingSessionId.value];
pendingSessionId.value = null;
}
} else if (!currSessionId.value && sessions.value.length > 0) {
// 默认选择第一个会话
const firstSession = sessions.value[0];
selectedSessions.value = [firstSession.session_id];
}
} catch (err: any) {
if (err.response?.status === 401) {
router.push('/auth/login?redirect=/chatbox');
}
console.error(err);
}
}
async function newSession() {
try {
const response = await axios.get('/api/chat/new_session');
const sessionId = response.data.data.session_id;
currSessionId.value = sessionId;
// 更新 URL
const basePath = chatboxMode ? '/chatbox' : '/chat';
router.push(`${basePath}/${sessionId}`);
await getSessions();
return sessionId;
} catch (err) {
console.error(err);
throw err;
}
}
async function deleteSession(sessionId: string) {
try {
await axios.get('/api/chat/delete_session?session_id=' + sessionId);
await getSessions();
currSessionId.value = '';
selectedSessions.value = [];
} catch (err) {
console.error(err);
}
}
function showEditTitleDialog(sessionId: string, title: string) {
editingSessionId.value = sessionId;
editingTitle.value = title || '';
editTitleDialog.value = true;
}
async function saveTitle() {
if (!editingSessionId.value) return;
const trimmedTitle = editingTitle.value.trim();
try {
await axios.post('/api/chat/update_session_display_name', {
session_id: editingSessionId.value,
display_name: trimmedTitle
});
// 更新本地会话标题
const session = sessions.value.find(s => s.session_id === editingSessionId.value);
if (session) {
session.display_name = trimmedTitle;
}
editTitleDialog.value = false;
} catch (err) {
console.error('重命名会话失败:', err);
}
}
function updateSessionTitle(sessionId: string, title: string) {
const session = sessions.value.find(s => s.session_id === sessionId);
if (session) {
session.display_name = title;
}
}
function newChat(closeMobileSidebar?: () => void) {
currSessionId.value = '';
selectedSessions.value = [];
const basePath = chatboxMode ? '/chatbox' : '/chat';
router.push(basePath);
if (closeMobileSidebar) {
closeMobileSidebar();
}
}
return {
sessions,
selectedSessions,
currSessionId,
pendingSessionId,
editTitleDialog,
editingTitle,
editingSessionId,
getCurrentSession,
getSessions,
newSession,
deleteSession,
showEditTitleDialog,
saveTitle,
updateSessionTitle,
newChat
};
}

View File

@@ -72,7 +72,8 @@
"enabled": "Enabled",
"disabled": "Disabled",
"delete": "Delete",
"copy": "Copy",
"edit": "Edit",
"noData": "No data available"
}
}
}

View File

@@ -32,7 +32,6 @@
"issueLink": "GitHub Issues"
},
"tip": "💡 TIP:",
"tipLink": "",
"tipContinue": "By default, the corresponding version of the WebUI files will be downloaded when switching versions. The WebUI code is located in the dashboard directory of the project, and you can use npm to build it yourself.",
"dockerTip": "When switching versions, it will try to update both the bot main program and the dashboard. If you are using Docker deployment, you can also re-pull the image or use",
"dockerTipLink": "watchtower",
@@ -56,6 +55,9 @@
"linkText": "View master branch commit history (click copy on the right to copy)",
"confirm": "Confirm Switch"
},
"releaseNotes": {
"title": "Release Notes"
},
"dashboardUpdate": {
"title": "Update Dashboard to Latest Version Only",
"currentVersion": "Current Version",
@@ -88,4 +90,4 @@
"updateFailed": "Update failed, please try again"
}
}
}
}

View File

@@ -5,8 +5,8 @@
"persona": "Persona",
"toolUse": "MCP Tools",
"config": "Config",
"extension": "Extensions",
"chat": "Chat",
"extension": "Extensions",
"conversation": "Conversations",
"sessionManagement": "Session Management",
"console": "Console",
@@ -20,4 +20,4 @@
"groups": {
"more": "More Features"
}
}
}

View File

@@ -2,6 +2,7 @@
"login": "Login",
"username": "Username",
"password": "Password",
"defaultHint": "Default username and password: astrbot",
"logo": {
"title": "AstrBot Dashboard",
"subtitle": "Welcome"

View File

@@ -47,7 +47,11 @@
"noHistory": "No conversation history",
"systemStatus": "System Status",
"llmService": "LLM Service",
"speechToText": "Speech to Text"
"speechToText": "Speech to Text",
"editDisplayName": "Edit Session Name",
"displayName": "Session Name",
"displayNameUpdated": "Session name updated",
"displayNameUpdateFailed": "Failed to update session name"
},
"modes": {
"darkMode": "Switch to Dark Mode",
@@ -57,6 +61,15 @@
"voiceRecord": "Record Voice",
"pasteImage": "Paste Image"
},
"streaming": {
"enabled": "Streaming enabled",
"disabled": "Streaming disabled",
"on": "Stream",
"off": "Normal"
},
"reasoning": {
"thinking": "Thinking Process"
},
"connection": {
"title": "Connection Status Notice",
"message": "The system detected that the chat connection needs to be re-established.",

View File

@@ -30,7 +30,11 @@
"configApplyError": "Configuration not applied, JSON format error.",
"saveSuccess": "Configuration saved successfully",
"saveError": "Failed to save configuration",
"loadError": "Failed to load configuration"
"loadError": "Failed to load configuration",
"deleteSuccess": "Deleted successfully",
"deleteError": "Failed to delete",
"updateSuccess": "Updated successfully",
"updateError": "Failed to update"
},
"sections": {
"general": "General Settings",
@@ -59,4 +63,4 @@
"rateLimit": "Rate Limit",
"encryption": "Encryption Settings"
}
}
}

View File

@@ -4,6 +4,7 @@
"tabs": {
"overview": "Overview",
"documents": "Documents",
"retrieval": "Retrieval",
"sessions": "Sessions",
"settings": "Settings"
},
@@ -49,6 +50,10 @@
"maxSize": "Max file size: 128MB",
"chunkSettings": "Chunk Settings",
"batchSettings": "Batch Settings",
"cleaningSettings": "Cleaning Settings",
"enableCleaning": "Enable Content Cleaning",
"cleaningProvider": "Cleaning Service Provider",
"cleaningProviderHint": "Select an LLM provider to clean and summarize the extracted web page content",
"chunkSize": "Chunk Size",
"chunkSizeHint": "Number of characters per chunk (default: 512)",
"chunkOverlap": "Chunk Overlap",
@@ -61,7 +66,37 @@
"maxRetriesHint": "Number of times to retry a failed upload task (default: 3)",
"cancel": "Cancel",
"submit": "Upload",
"fileRequired": "Please select a file to upload"
"fileRequired": "Please select a file to upload",
"fileUpload": "File Upload",
"fromUrl": "From URL",
"urlPlaceholder": "Enter the URL of the web page to extract content from",
"urlRequired": "Please enter a URL",
"urlHint": "The main content will be automatically extracted from the target URL as a document. Currently supports {supported} pages. Before use, please ensure that the target web page allows crawler access.",
"beta": "Beta"
},
"retrieval": {
"title": "Retrieval",
"subtitle": "Test the knowledge base using dense and sparse retrieval methods",
"query": "Query",
"queryPlaceholder": "Enter a query...",
"search": "Search",
"searching": "Searching...",
"results": "Results",
"noResults": "No results found",
"tryDifferentQuery": "Try a different query",
"settings": "Retrieval Settings",
"topK": "Number of Results",
"topKHint": "Maximum number of results to return",
"enableRerank": "Enable Rerank",
"enableRerankHint": "Use a rerank model to improve retrieval quality",
"score": "Relevance Score",
"document": "Document",
"chunk": "Chunk #{index}",
"content": "Content",
"charCount": "{count} characters",
"searchSuccess": "Search completed, found {count} results",
"searchFailed": "Search failed",
"queryRequired": "Please enter a query"
},
"settings": {
"title": "Knowledge Base Settings",

View File

@@ -22,7 +22,10 @@
"preview": "Preview",
"search": "Search Chunks",
"searchPlaceholder": "Enter keywords to search chunks...",
"showing": "Showing"
"showing": "Showing",
"deleteConfirm": "Are you sure you want to delete this chunk?",
"deleteSuccess": "Chunk deleted successfully",
"deleteFailed": "Failed to delete chunk"
},
"edit": {
"title": "Edit Chunk",

View File

@@ -96,6 +96,42 @@
},
"confirmDelete": "Are you sure you want to delete server {name}?"
},
"syncProvider": {
"title": "Sync MCP Servers",
"subtitle": "Sync MCP server configurations from providers to local",
"steps": {
"selectProvider": "Step 1: Select Provider",
"configureAuth": "Step 2: Configure Authentication",
"syncServers": "Step 3: Sync Servers"
},
"providers": {
"modelscope": "ModelScope",
"description": "ModelScope is an open model community providing MCP servers for various machine learning and AI services"
},
"fields": {
"provider": "Select Provider",
"accessToken": "Access Token",
"tokenRequired": "Access token is required",
"tokenHint": "Please enter your ModelScope access token"
},
"buttons": {
"cancel": "Cancel",
"previous": "Previous",
"next": "Next",
"sync": "Start Sync",
"getToken": "Get Token"
},
"status": {
"selectProvider": "Please select an MCP server provider",
"enterToken": "Please enter the access token to continue",
"readyToSync": "Ready to sync server configurations"
},
"messages": {
"syncSuccess": "MCP servers synced successfully!",
"syncError": "Sync failed: {error}",
"tokenHelp": "How to get a ModelScope access token? Click the button on the right for instructions"
}
},
"messages": {
"getServersError": "Failed to get MCP server list: {error}",
"getToolsError": "Failed to get function tools list: {error}",
@@ -117,4 +153,4 @@
"toggleToolError": "Failed to toggle tool status: {error}",
"testError": "Test connection failed: {error}"
}
}
}

View File

@@ -55,6 +55,9 @@
"linkText": "查看 master 分支提交记录(点击右边的 copy 即可复制)",
"confirm": "确定切换"
},
"releaseNotes": {
"title": "更新日志"
},
"dashboardUpdate": {
"title": "单独更新管理面板到最新版本",
"currentVersion": "当前版本",

View File

@@ -2,8 +2,9 @@
"login": "登录",
"username": "用户名",
"password": "密码",
"defaultHint": "默认账户和密码均为astrbot",
"logo": {
"title": "AstrBot 仪表盘",
"title": "AstrBot WebUI",
"subtitle": "欢迎使用"
},
"theme": {

Some files were not shown because too many files have changed in this diff Show More