Compare commits

...

23 Commits

Author SHA1 Message Date
Soulter
c76b7ec387 Merge remote-tracking branch 'origin/master' into feat/memory 2025-11-21 20:23:41 +08:00
Soulter
b7f3010d72 stage simple webui 2025-11-21 17:59:22 +08:00
Soulter
fbbaf1cd08 delete(memory): remove memory module and its components 2025-11-21 17:34:33 +08:00
Soulter
9c8025acce stage 2025-11-21 17:25:55 +08:00
Soulter
98c5466b5d feat(chat): refactor chat component structure and add new features (#3701)
- Introduced `ConversationSidebar.vue` for improved conversation management and sidebar functionality.
- Enhanced `MessageList.vue` to handle loading states and improved message rendering.
- Created new composables: `useConversations`, `useMessages`, `useMediaHandling`, `useRecording` for better code organization and reusability.
- Added loading indicators and improved user experience during message processing.
- Ensured backward compatibility and maintained existing functionalities.
2025-11-20 17:30:51 +08:00
Soulter
6345ac6ff8 feat(chat): refactor chat component structure and add new features (#3701)
- Introduced `ConversationSidebar.vue` for improved conversation management and sidebar functionality.
- Enhanced `MessageList.vue` to handle loading states and improved message rendering.
- Created new composables: `useConversations`, `useMessages`, `useMediaHandling`, `useRecording` for better code organization and reusability.
- Added loading indicators and improved user experience during message processing.
- Ensured backward compatibility and maintained existing functionalities.
2025-11-20 17:29:27 +08:00
Soulter
5bcd683012 delete: remove useConversations composable 2025-11-20 17:29:27 +08:00
Soulter
eaa193c6c5 feat(chat): refactor chat component structure and add new features (#3701)
- Introduced `ConversationSidebar.vue` for improved conversation management and sidebar functionality.
- Enhanced `MessageList.vue` to handle loading states and improved message rendering.
- Created new composables: `useConversations`, `useMessages`, `useMediaHandling`, `useRecording` for better code organization and reusability.
- Added loading indicators and improved user experience during message processing.
- Ensured backward compatibility and maintained existing functionalities.
2025-11-20 17:29:27 +08:00
Soulter
1bdcaa1318 delete: useConversations 2025-11-20 17:29:27 +08:00
Soulter
6b6c48354d feat(chat): refactor chat component structure and add new features (#3701)
- Introduced `ConversationSidebar.vue` for improved conversation management and sidebar functionality.
- Enhanced `MessageList.vue` to handle loading states and improved message rendering.
- Created new composables: `useConversations`, `useMessages`, `useMediaHandling`, `useRecording` for better code organization and reusability.
- Added loading indicators and improved user experience during message processing.
- Ensured backward compatibility and maintained existing functionalities.
2025-11-20 17:29:27 +08:00
Soulter
774efb2fe0 refactor: update timestamp handling in session management and chat components 2025-11-20 17:29:27 +08:00
Soulter
3ec76636f9 refactor(sqlite): remove auto-generation of session_id in insert method 2025-11-20 17:29:26 +08:00
Soulter
283810d103 feat(chat): refactor chat component structure and add new features (#3701)
- Introduced `ConversationSidebar.vue` for improved conversation management and sidebar functionality.
- Enhanced `MessageList.vue` to handle loading states and improved message rendering.
- Created new composables: `useConversations`, `useMessages`, `useMediaHandling`, `useRecording` for better code organization and reusability.
- Added loading indicators and improved user experience during message processing.
- Ensured backward compatibility and maintained existing functionalities.
2025-11-20 17:29:26 +08:00
Soulter
81a76bc8e5 fix: anyio.ClosedResourceError when calling mcp tools (#3700)
* fix: anyio.ClosedResourceError when calling mcp tools

added reconnect mechanism

fixes: 3676

* fix(mcp_client): implement thread-safe reconnection using asyncio.Lock
2025-11-20 17:29:26 +08:00
Soulter
788764be02 refactor: implement migration for WebChat sessions by creating PlatformSession records from platform_message_history 2025-11-20 17:29:26 +08:00
Soulter
802ab26934 refactor: update session handling by replacing conversation_id with session_id in chat routes and components 2025-11-20 17:29:26 +08:00
Soulter
6857c81a14 refactor: enhance PlatformSession migration by adding display_name from Conversations and improve session item styling 2025-11-20 17:29:26 +08:00
Soulter
a6ed511a30 refactor: update message history deletion logic to remove newer records based on offset 2025-11-20 17:29:26 +08:00
Soulter
44c2b58206 refactor: optimize WebChat session migration by batch inserting records 2025-11-20 17:29:26 +08:00
Soulter
0e2adab3fd refactor: change to platform session 2025-11-20 17:29:26 +08:00
Soulter
0fe87d6b98 fix: restore migration check for version 4.7 2025-11-20 17:29:26 +08:00
Soulter
31ef3d1084 refactor: Implement WebChat session management and migration from version 4.6 to 4.7
- Added WebChatSession model for managing user sessions.
- Introduced methods for creating, retrieving, updating, and deleting WebChat sessions in the database.
- Updated core lifecycle to include migration from version 4.6 to 4.7, creating WebChat sessions from existing platform message history.
- Refactored chat routes to support new session-based architecture, replacing conversation-related endpoints with session endpoints.
- Updated frontend components to handle sessions instead of conversations, including session creation and management.
2025-11-20 17:29:26 +08:00
Soulter
b984bb2513 stage 2025-11-20 13:51:53 +08:00
20 changed files with 2149 additions and 3 deletions

View File

@@ -24,6 +24,7 @@ from astrbot.core.db import BaseDatabase
from astrbot.core.db.migration.migra_45_to_46 import migrate_45_to_46 from astrbot.core.db.migration.migra_45_to_46 import migrate_45_to_46
from astrbot.core.db.migration.migra_webchat_session import migrate_webchat_session from astrbot.core.db.migration.migra_webchat_session import migrate_webchat_session
from astrbot.core.knowledge_base.kb_mgr import KnowledgeBaseManager from astrbot.core.knowledge_base.kb_mgr import KnowledgeBaseManager
from astrbot.core.memory.memory_manager import MemoryManager
from astrbot.core.persona_mgr import PersonaManager from astrbot.core.persona_mgr import PersonaManager
from astrbot.core.pipeline.scheduler import PipelineContext, PipelineScheduler from astrbot.core.pipeline.scheduler import PipelineContext, PipelineScheduler
from astrbot.core.platform.manager import PlatformManager from astrbot.core.platform.manager import PlatformManager
@@ -136,6 +137,8 @@ class AstrBotCoreLifecycle:
# 初始化知识库管理器 # 初始化知识库管理器
self.kb_manager = KnowledgeBaseManager(self.provider_manager) self.kb_manager = KnowledgeBaseManager(self.provider_manager)
# 初始化记忆管理器
self.memory_manager = MemoryManager()
# 初始化提供给插件的上下文 # 初始化提供给插件的上下文
self.star_context = Context( self.star_context = Context(
@@ -149,6 +152,7 @@ class AstrBotCoreLifecycle:
self.persona_mgr, self.persona_mgr,
self.astrbot_config_mgr, self.astrbot_config_mgr,
self.kb_manager, self.kb_manager,
self.memory_manager,
) )
# 初始化插件管理器 # 初始化插件管理器

View File

@@ -1,11 +1,20 @@
import abc import abc
from dataclasses import dataclass from dataclasses import dataclass
from typing import TypedDict
@dataclass @dataclass
class Result: class Result:
class ResultData(TypedDict):
id: str
doc_id: str
text: str
metadata: str
created_at: int
updated_at: int
similarity: float similarity: float
data: dict data: ResultData | dict
class BaseVecDB: class BaseVecDB:

View File

@@ -0,0 +1,822 @@
{
"type": "excalidraw",
"version": 2,
"source": "https://marketplace.visualstudio.com/items?itemName=pomdtr.excalidraw-editor",
"elements": [
{
"id": "l6cYurMvF69IM4Kc33Qou",
"type": "rectangle",
"x": 173.140625,
"y": -29.0234375,
"width": 92.95703125,
"height": 77.109375,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "a0",
"roundness": {
"type": 3
},
"seed": 1409469537,
"version": 91,
"versionNonce": 307958671,
"isDeleted": false,
"boundElements": [],
"updated": 1763703733605,
"link": null,
"locked": false
},
{
"id": "1ZvS6t8U6ihUjNU0dakgl",
"type": "arrow",
"x": 409.30859375,
"y": 9.6875,
"width": 118.2734375,
"height": 1.9609375,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "a1",
"roundness": {
"type": 2
},
"seed": 326508865,
"version": 120,
"versionNonce": 199367023,
"isDeleted": false,
"boundElements": null,
"updated": 1763703733605,
"link": null,
"locked": false,
"points": [
[
0,
0
],
[
-118.2734375,
-1.9609375
]
],
"lastCommittedPoint": null,
"startBinding": null,
"endBinding": null,
"startArrowhead": null,
"endArrowhead": "arrow",
"elbowed": false
},
{
"id": "tfdUGiJdcMoOHGfqFHXK6",
"type": "text",
"x": 153.46875,
"y": -70.9765625,
"width": 136.4598846435547,
"height": 25,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "a2",
"roundness": null,
"seed": 688712865,
"version": 67,
"versionNonce": 300660705,
"isDeleted": false,
"boundElements": null,
"updated": 1763703743816,
"link": null,
"locked": false,
"text": "FAISS+SQLite",
"fontSize": 20,
"fontFamily": 5,
"textAlign": "left",
"verticalAlign": "top",
"containerId": null,
"originalText": "FAISS+SQLite",
"autoResize": true,
"lineHeight": 1.25
},
{
"id": "AeL3kEB9a8_TAvAXpAbpl",
"type": "text",
"x": 438.36328125,
"y": -3.78125,
"width": 116.109375,
"height": 25,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "a3",
"roundness": null,
"seed": 788579535,
"version": 33,
"versionNonce": 946602095,
"isDeleted": false,
"boundElements": null,
"updated": 1763703932431,
"link": null,
"locked": false,
"text": "FACT",
"fontSize": 20,
"fontFamily": 5,
"textAlign": "left",
"verticalAlign": "top",
"containerId": null,
"originalText": "FACT",
"autoResize": false,
"lineHeight": 1.25
},
{
"id": "Pe3TeMZvxQ8tRTcbD5v6P",
"type": "arrow",
"x": 297.125,
"y": 40.2578125,
"width": 120.2421875,
"height": 1.421875,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "a4",
"roundness": {
"type": 2
},
"seed": 1146229999,
"version": 44,
"versionNonce": 636917679,
"isDeleted": false,
"boundElements": null,
"updated": 1763703759050,
"link": null,
"locked": false,
"points": [
[
0,
0
],
[
120.2421875,
1.421875
]
],
"lastCommittedPoint": null,
"startBinding": null,
"endBinding": null,
"startArrowhead": null,
"endArrowhead": "arrow",
"elbowed": false
},
{
"id": "GhmQoadtQRK8c8aEEbYKQ",
"type": "text",
"x": 283.53515625,
"y": 64.76171875,
"width": 130.85989379882812,
"height": 50,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "a5",
"roundness": null,
"seed": 1445650959,
"version": 79,
"versionNonce": 566193167,
"isDeleted": false,
"boundElements": null,
"updated": 1763703768982,
"link": null,
"locked": false,
"text": "top-n Similary\n",
"fontSize": 20,
"fontFamily": 5,
"textAlign": "left",
"verticalAlign": "top",
"containerId": null,
"originalText": "top-n Similary\n",
"autoResize": true,
"lineHeight": 1.25
},
{
"id": "uTEFJs8cNS09WFq2pi9P7",
"type": "rectangle",
"x": 528.1586158430439,
"y": -173.43472375183552,
"width": 135.7578125,
"height": 128.73828125,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "a6",
"roundness": {
"type": 3
},
"seed": 223409231,
"version": 44,
"versionNonce": 1066827105,
"isDeleted": false,
"boundElements": [
{
"id": "FfWdx1_yCq6UYfXamJX9N",
"type": "arrow"
}
],
"updated": 1763704050188,
"link": null,
"locked": false
},
{
"id": "2SzqzpJ4C2ymVj8-8vN7H",
"type": "text",
"x": 548.1480270948795,
"y": -211,
"width": 86.43992614746094,
"height": 25,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "a7",
"roundness": null,
"seed": 1015608623,
"version": 23,
"versionNonce": 950374849,
"isDeleted": false,
"boundElements": null,
"updated": 1763704047884,
"link": null,
"locked": false,
"text": "Memories",
"fontSize": 20,
"fontFamily": 5,
"textAlign": "left",
"verticalAlign": "top",
"containerId": null,
"originalText": "Memories",
"autoResize": true,
"lineHeight": 1.25
},
{
"id": "CgW6Yf9v0a9q1tsjhDl7b",
"type": "text",
"x": 568.3099317299038,
"y": -154.69469411681115,
"width": 62.099945068359375,
"height": 25,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "aA",
"roundness": null,
"seed": 452254927,
"version": 10,
"versionNonce": 972895023,
"isDeleted": false,
"boundElements": null,
"updated": 1763704057762,
"link": null,
"locked": false,
"text": "chunk1",
"fontSize": 20,
"fontFamily": 5,
"textAlign": "left",
"verticalAlign": "top",
"containerId": null,
"originalText": "chunk1",
"autoResize": true,
"lineHeight": 1.25
},
{
"id": "knvlKpaFZ8lY-73Y-e9W6",
"type": "text",
"x": 569.11328125,
"y": -116.91056665512056,
"width": 67.55995178222656,
"height": 25,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "aB",
"roundness": null,
"seed": 914644015,
"version": 90,
"versionNonce": 158135631,
"isDeleted": false,
"boundElements": null,
"updated": 1763704057762,
"link": null,
"locked": false,
"text": "chunk2",
"fontSize": 20,
"fontFamily": 5,
"textAlign": "left",
"verticalAlign": "top",
"containerId": null,
"originalText": "chunk2",
"autoResize": true,
"lineHeight": 1.25
},
{
"id": "Q7URqvTSMpvj08ye-afTT",
"type": "rectangle",
"x": 444.515625,
"y": 36.7890625,
"width": 58.859375,
"height": 29.41796875,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "aC",
"roundness": {
"type": 3
},
"seed": 1642537601,
"version": 19,
"versionNonce": 948406575,
"isDeleted": false,
"boundElements": null,
"updated": 1763703870173,
"link": null,
"locked": false
},
{
"id": "JjxBt9cZIZXNTd6CmwyKL",
"type": "rectangle",
"x": 452.203125,
"y": 46.064453125,
"width": 58.859375,
"height": 29.41796875,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "aD",
"roundness": {
"type": 3
},
"seed": 1746916641,
"version": 40,
"versionNonce": 1650978255,
"isDeleted": false,
"boundElements": [],
"updated": 1763703871882,
"link": null,
"locked": false
},
{
"id": "XGBCPPFnjriqsL8LvLwyQ",
"type": "rectangle",
"x": 461.56640625,
"y": 56.162109375,
"width": 58.859375,
"height": 29.41796875,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "aE",
"roundness": {
"type": 3
},
"seed": 529794575,
"version": 85,
"versionNonce": 2131900641,
"isDeleted": false,
"boundElements": [],
"updated": 1763703874182,
"link": null,
"locked": false
},
{
"id": "FfWdx1_yCq6UYfXamJX9N",
"type": "arrow",
"x": 537.6875,
"y": 48.203125,
"width": 6.615850226297994,
"height": 75.81335873223107,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "aF",
"roundness": {
"type": 2
},
"seed": 1982870689,
"version": 90,
"versionNonce": 25307457,
"isDeleted": false,
"boundElements": null,
"updated": 1763704050188,
"link": null,
"locked": false,
"points": [
[
0,
0
],
[
6.615850226297994,
-75.81335873223107
]
],
"lastCommittedPoint": null,
"startBinding": null,
"endBinding": {
"elementId": "uTEFJs8cNS09WFq2pi9P7",
"focus": 0.6071885090336794,
"gap": 24.64453125
},
"startArrowhead": null,
"endArrowhead": "arrow",
"elbowed": false
},
{
"id": "jgJgqGMRWcaNX_28wY4CU",
"type": "text",
"x": 570,
"y": 10,
"width": 67.11994934082031,
"height": 25,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "aG",
"roundness": null,
"seed": 1065220559,
"version": 26,
"versionNonce": 2115991521,
"isDeleted": false,
"boundElements": null,
"updated": 1763703959397,
"link": null,
"locked": false,
"text": "update",
"fontSize": 20,
"fontFamily": 5,
"textAlign": "left",
"verticalAlign": "top",
"containerId": null,
"originalText": "update",
"autoResize": true,
"lineHeight": 1.25
},
{
"id": "_5pSPPOpp9h1TpFCIc055",
"type": "text",
"x": 292.36328125,
"y": -138.5703125,
"width": 122.87992858886719,
"height": 25,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "aH",
"roundness": null,
"seed": 51461025,
"version": 26,
"versionNonce": 1647492655,
"isDeleted": false,
"boundElements": null,
"updated": 1763703925147,
"link": null,
"locked": false,
"text": "ADD Memory",
"fontSize": 20,
"fontFamily": 5,
"textAlign": "left",
"verticalAlign": "top",
"containerId": null,
"originalText": "ADD Memory",
"autoResize": true,
"lineHeight": 1.25
},
{
"id": "YG6MdL14l7lk4ypQNMZ_k",
"type": "text",
"x": 296.71885397566257,
"y": 161.399157096715,
"width": 295.27984619140625,
"height": 25,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "aJ",
"roundness": null,
"seed": 1183210273,
"version": 122,
"versionNonce": 1702733281,
"isDeleted": false,
"boundElements": [],
"updated": 1763704085083,
"link": null,
"locked": false,
"text": "RETRIEVE Memory (STATIC)",
"fontSize": 20,
"fontFamily": 5,
"textAlign": "left",
"verticalAlign": "top",
"containerId": null,
"originalText": "RETRIEVE Memory (STATIC)",
"autoResize": true,
"lineHeight": 1.25
},
{
"id": "Foa3VPJYqhj1uAX5mn3n0",
"type": "rectangle",
"x": 324.7616636099071,
"y": 248.63213980937013,
"width": 135.7578125,
"height": 128.73828125,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "aL",
"roundness": {
"type": 3
},
"seed": 995116257,
"version": 225,
"versionNonce": 1886900225,
"isDeleted": false,
"boundElements": [],
"updated": 1763704055846,
"link": null,
"locked": false
},
{
"id": "pe3veI_yBFKYtbaJwDKQT",
"type": "text",
"x": 344.7510748617428,
"y": 211.06686356120565,
"width": 86.43992614746094,
"height": 25,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "aM",
"roundness": null,
"seed": 26673345,
"version": 204,
"versionNonce": 1004546017,
"isDeleted": false,
"boundElements": [],
"updated": 1763704055846,
"link": null,
"locked": false,
"text": "Memories",
"fontSize": 20,
"fontFamily": 5,
"textAlign": "left",
"verticalAlign": "top",
"containerId": null,
"originalText": "Memories",
"autoResize": true,
"lineHeight": 1.25
},
{
"id": "bOlhO8AaKE86_43viu5UG",
"type": "text",
"x": 365.50408375566445,
"y": 269.24725381983865,
"width": 62.099945068359375,
"height": 25,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "aN",
"roundness": null,
"seed": 1849784033,
"version": 106,
"versionNonce": 762320737,
"isDeleted": false,
"boundElements": [],
"updated": 1763704060295,
"link": null,
"locked": false,
"text": "chunk1",
"fontSize": 20,
"fontFamily": 5,
"textAlign": "left",
"verticalAlign": "top",
"containerId": null,
"originalText": "chunk1",
"autoResize": true,
"lineHeight": 1.25
},
{
"id": "V_iDW10PKwMe7vWb5S5HF",
"type": "text",
"x": 366.3074332757606,
"y": 307.03138128152926,
"width": 67.55995178222656,
"height": 25,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "aO",
"roundness": null,
"seed": 1670509249,
"version": 186,
"versionNonce": 1964540737,
"isDeleted": false,
"boundElements": [],
"updated": 1763704060295,
"link": null,
"locked": false,
"text": "chunk2",
"fontSize": 20,
"fontFamily": 5,
"textAlign": "left",
"verticalAlign": "top",
"containerId": null,
"originalText": "chunk2",
"autoResize": true,
"lineHeight": 1.25
},
{
"id": "LHKMRdSowgcl2LsKacxTz",
"type": "text",
"x": 484.9493410573871,
"y": 292.45619471187945,
"width": 273.579833984375,
"height": 50,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "aP",
"roundness": null,
"seed": 945666991,
"version": 104,
"versionNonce": 1512137505,
"isDeleted": false,
"boundElements": null,
"updated": 1763704096016,
"link": null,
"locked": false,
"text": "RANKED By DECAY SCORE,\nTOP K",
"fontSize": 20,
"fontFamily": 5,
"textAlign": "left",
"verticalAlign": "top",
"containerId": null,
"originalText": "RANKED By DECAY SCORE,\nTOP K",
"autoResize": true,
"lineHeight": 1.25
}
],
"appState": {
"gridSize": 20,
"gridStep": 5,
"gridModeEnabled": false,
"viewBackgroundColor": "#ffffff"
},
"files": {}
}

View File

@@ -0,0 +1,76 @@
## Decay Score
记忆衰减分数定义为:
\[
\text{decay\_score}
= \alpha \cdot e^{-\lambda \cdot \Delta t \cdot \beta}
+ (1-\alpha)\cdot (1 - e^{-\gamma \cdot c})
\]
其中:
+ \(\Delta t\):自上次检索以来经过的时间(天),由 `last_retrieval_at` 计算;
+ \(c\):检索次数,对应字段 `retrieval_count`
+ \(\alpha\):控制时间衰减和检索次数影响的权重;
+ \(\gamma\):控制检索次数影响的速率;
+ \(\lambda\):控制时间衰减的速率;
+ \(\beta\):时间衰减调节因子;
\[
\beta = \frac{1}{1 + a \cdot c}
\]
+ \(a\):控制检索次数对时间衰减影响的权重。
## ADD MEMORY
+ LLM 通过 `astr_add_memory` 工具调用,传入记忆内容和记忆类型。
+ 生成 `mem_id = uuid4()`
+ 从上下文中获取 `owner_id = unified_message_origin`
步骤:
1. 使用 VecDB 以新记忆内容为 query检索前 20 条相似记忆。
2. 从中取相似度最高的前 5 条:
+ 若相似度超过“合并阈值”(如 `sim >= merge_threshold`
+ 将该条记忆视为同一记忆,使用 LLM 将旧内容与新内容合并;
+ 在同一个 `mem_id` 上更新 MemoryDB 和 VecDBUPDATE而非新建
+ 否则:
+ 作为全新的记忆插入:
+ 写入 VecDBmetadata 中包含 `mem_id`, `owner_id`
+ 写入 MemoryDB 的 `memory_chunks` 表,初始化:
+ `created_at = now`
+ `last_retrieval_at = now`
+ `retrieval_count = 1` 等。
3. 对 VecDB 返回的前 20 条记忆,如果相似度高于某个“赫布阈值”(`hebb_threshold`),则:
+ `retrieval_count += 1`
+ `last_retrieval_at = now`
这一步体现了赫布学习:与新记忆共同被激活的旧记忆会获得一次强化。
## QUERY MEMORY (STATIC)
+ LLM 通过 `astr_query_memory` 工具调用,无参数。
步骤:
1. 从 MemoryDB 的 `memory_chunks` 表中查询当前用户所有活跃记忆:
+ `SELECT * FROM memory_chunks WHERE owner_id = ? AND is_active = 1`
2. 对每条记忆,根据 `last_retrieval_at``retrieval_count` 计算对应的 `decay_score`
3.`decay_score` 从高到低排序,返回前 `top_k` 条记忆内容给 LLM。
4. 对返回的这 `top_k` 条记忆:
+ `retrieval_count += 1`
+ `last_retrieval_at = now`
## QUERY MEMORY (DYNAMIC)(暂不实现)
+ LLM 提供查询内容作为语义 query。
+ 使用 VecDB 检索与该 query 最相似的前 `N` 条记忆(`N > top_k`)。
+ 根据 `mem_id``memory_chunks` 中加载对应记录。
+ 对这批候选记忆计算:
+ 语义相似度(来自 VecDB
+ `decay_score`
+ 最终排序分数(例如 `w1 * sim + w2 * decay_score`
+ 按最终排序分数从高到低返回前 `top_k` 条记忆内容,并更新它们的 `retrieval_count``last_retrieval_at`

View File

@@ -0,0 +1,63 @@
import uuid
from datetime import datetime, timezone
import numpy as np
from sqlmodel import Field, MetaData, SQLModel
MEMORY_TYPE_IMPORTANCE = {"persona": 1.3, "fact": 1.0, "ephemeral": 0.8}
class BaseMemoryModel(SQLModel, table=False):
metadata = MetaData()
class MemoryChunk(BaseMemoryModel, table=True):
"""A chunk of memory stored in the system."""
__tablename__ = "memory_chunks" # type: ignore
id: int | None = Field(
primary_key=True,
sa_column_kwargs={"autoincrement": True},
default=None,
)
mem_id: str = Field(
max_length=36,
nullable=False,
unique=True,
default_factory=lambda: str(uuid.uuid4()),
index=True,
)
fact: str = Field(nullable=False)
"""The factual content of the memory chunk."""
owner_id: str = Field(max_length=255, nullable=False, index=True)
"""The identifier of the owner (user) of the memory chunk."""
created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
"""The timestamp when the memory chunk was created."""
last_retrieval_at: datetime = Field(
default_factory=lambda: datetime.now(timezone.utc)
)
"""The timestamp when the memory chunk was last retrieved."""
retrieval_count: int = Field(default=1, nullable=False)
"""The number of times the memory chunk has been retrieved."""
memory_type: str = Field(max_length=20, nullable=False, default="fact")
"""The type of memory (e.g., 'persona', 'fact', 'ephemeral')."""
is_active: bool = Field(default=True, nullable=False)
"""Whether the memory chunk is active."""
def compute_decay_score(self, current_time: datetime) -> float:
"""Compute the decay score of the memory chunk based on time and retrievals."""
# Constants for the decay formula
alpha = 0.5
gamma = 0.1
lambda_ = 0.05
a = 0.1
# Calculate delta_t in days
delta_t = (current_time - self.last_retrieval_at).total_seconds() / 86400
c = self.retrieval_count
beta = 1 / (1 + a * c)
decay_score = alpha * np.exp(-lambda_ * delta_t * beta) + (1 - alpha) * (
1 - np.exp(-gamma * c)
)
return decay_score * MEMORY_TYPE_IMPORTANCE.get(self.memory_type, 1.0)

View File

@@ -0,0 +1,174 @@
from contextlib import asynccontextmanager
from datetime import datetime, timezone
from pathlib import Path
from sqlalchemy import select, text, update
from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker, create_async_engine
from sqlmodel import col
from astrbot.core import logger
from .entities import BaseMemoryModel, MemoryChunk
class MemoryDatabase:
def __init__(self, db_path: str = "data/astr_memory/memory.db") -> None:
"""Initialize memory database
Args:
db_path: Database file path, default is data/astr_memory/memory.db
"""
self.db_path = db_path
self.DATABASE_URL = f"sqlite+aiosqlite:///{db_path}"
self.inited = False
# Ensure directory exists
Path(db_path).parent.mkdir(parents=True, exist_ok=True)
# Create async engine
self.engine = create_async_engine(
self.DATABASE_URL,
echo=False,
pool_pre_ping=True,
pool_recycle=3600,
)
# Create session factory
self.async_session = async_sessionmaker(
self.engine,
class_=AsyncSession,
expire_on_commit=False,
)
@asynccontextmanager
async def get_db(self):
"""Get database session
Usage:
async with mem_db.get_db() as session:
# Perform database operations
result = await session.execute(stmt)
"""
async with self.async_session() as session:
yield session
async def initialize(self) -> None:
"""Initialize database, create tables and configure SQLite parameters"""
async with self.engine.begin() as conn:
# Create all memory related tables
await conn.run_sync(BaseMemoryModel.metadata.create_all)
# Configure SQLite performance optimization parameters
await conn.execute(text("PRAGMA journal_mode=WAL"))
await conn.execute(text("PRAGMA synchronous=NORMAL"))
await conn.execute(text("PRAGMA cache_size=20000"))
await conn.execute(text("PRAGMA temp_store=MEMORY"))
await conn.execute(text("PRAGMA mmap_size=134217728"))
await conn.execute(text("PRAGMA optimize"))
await conn.commit()
await self._create_indexes()
self.inited = True
logger.info(f"Memory database initialized: {self.db_path}")
async def _create_indexes(self) -> None:
"""Create indexes for memory_chunks table"""
async with self.get_db() as session:
async with session.begin():
# Create memory chunks table indexes
await session.execute(
text(
"CREATE INDEX IF NOT EXISTS idx_mem_mem_id "
"ON memory_chunks(mem_id)",
),
)
await session.execute(
text(
"CREATE INDEX IF NOT EXISTS idx_mem_owner_id "
"ON memory_chunks(owner_id)",
),
)
await session.execute(
text(
"CREATE INDEX IF NOT EXISTS idx_mem_owner_active "
"ON memory_chunks(owner_id, is_active)",
),
)
await session.commit()
async def close(self) -> None:
"""Close database connection"""
await self.engine.dispose()
logger.info(f"Memory database closed: {self.db_path}")
async def insert_memory(self, memory: MemoryChunk) -> MemoryChunk:
"""Insert a new memory chunk"""
async with self.get_db() as session:
session.add(memory)
await session.commit()
await session.refresh(memory)
return memory
async def get_memory_by_id(self, mem_id: str) -> MemoryChunk | None:
"""Get memory chunk by mem_id"""
async with self.get_db() as session:
stmt = select(MemoryChunk).where(col(MemoryChunk.mem_id) == mem_id)
result = await session.execute(stmt)
return result.scalar_one_or_none()
async def update_memory(self, memory: MemoryChunk) -> MemoryChunk:
"""Update an existing memory chunk"""
async with self.get_db() as session:
session.add(memory)
await session.commit()
await session.refresh(memory)
return memory
async def get_active_memories(self, owner_id: str) -> list[MemoryChunk]:
"""Get all active memories for a user"""
async with self.get_db() as session:
stmt = select(MemoryChunk).where(
col(MemoryChunk.owner_id) == owner_id,
col(MemoryChunk.is_active) == True, # noqa: E712
)
result = await session.execute(stmt)
return list(result.scalars().all())
async def update_retrieval_stats(
self,
mem_ids: list[str],
current_time: datetime | None = None,
) -> None:
"""Update retrieval statistics for multiple memories"""
if not mem_ids:
return
if current_time is None:
current_time = datetime.now(timezone.utc)
async with self.get_db() as session:
async with session.begin():
stmt = (
update(MemoryChunk)
.where(col(MemoryChunk.mem_id).in_(mem_ids))
.values(
retrieval_count=MemoryChunk.retrieval_count + 1,
last_retrieval_at=current_time,
)
)
await session.execute(stmt)
await session.commit()
async def deactivate_memory(self, mem_id: str) -> bool:
"""Deactivate a memory chunk"""
async with self.get_db() as session:
async with session.begin():
stmt = (
update(MemoryChunk)
.where(col(MemoryChunk.mem_id) == mem_id)
.values(is_active=False)
)
result = await session.execute(stmt)
await session.commit()
return result.rowcount > 0 if result.rowcount else False # type: ignore

View File

@@ -0,0 +1,281 @@
import json
import uuid
from datetime import datetime, timezone
from pathlib import Path
from astrbot.core import logger
from astrbot.core.db.vec_db.faiss_impl import FaissVecDB
from astrbot.core.provider.provider import EmbeddingProvider
from astrbot.core.provider.provider import Provider as LLMProvider
from .entities import MemoryChunk
from .mem_db_sqlite import MemoryDatabase
MERGE_THRESHOLD = 0.85
"""Similarity threshold for merging memories"""
HEBB_THRESHOLD = 0.70
"""Similarity threshold for Hebbian learning reinforcement"""
MERGE_SYSTEM_PROMPT = """You are a memory consolidation assistant. Your task is to merge two related memory entries into a single, comprehensive memory.
Input format:
- Old memory: [existing memory content]
- New memory: [new memory content to be integrated]
Your output should be a single, concise memory that combines the essential information from both entries. Preserve specific details, update outdated information, and eliminate redundancy. Output only the merged memory content without any explanations or meta-commentary."""
class MemoryManager:
"""Manager for user long-term memory storage and retrieval"""
def __init__(self, memory_root_dir: str = "data/astr_memory"):
self.memory_root_dir = Path(memory_root_dir)
self.memory_root_dir.mkdir(parents=True, exist_ok=True)
self.mem_db: MemoryDatabase | None = None
self.vec_db: FaissVecDB | None = None
self._initialized = False
async def initialize(
self,
embedding_provider: EmbeddingProvider,
merge_llm_provider: LLMProvider,
):
"""Initialize memory database and vector database"""
# Initialize MemoryDB
db_path = self.memory_root_dir / "memory.db"
self.mem_db = MemoryDatabase(db_path.as_posix())
await self.mem_db.initialize()
self.embedding_provider = embedding_provider
self.merge_llm_provider = merge_llm_provider
# Initialize VecDB
doc_store_path = self.memory_root_dir / "doc.db"
index_store_path = self.memory_root_dir / "index.faiss"
self.vec_db = FaissVecDB(
doc_store_path=doc_store_path.as_posix(),
index_store_path=index_store_path.as_posix(),
embedding_provider=self.embedding_provider,
)
await self.vec_db.initialize()
logger.info("Memory manager initialized")
self._initialized = True
async def terminate(self):
"""Close all database connections"""
if self.vec_db:
await self.vec_db.close()
if self.mem_db:
await self.mem_db.close()
async def add_memory(
self,
fact: str,
owner_id: str,
memory_type: str = "fact",
) -> MemoryChunk:
"""Add a new memory with similarity check and merge logic
Implements the ADD MEMORY workflow from _README.md:
1. Search for similar memories using VecDB
2. If similarity >= merge_threshold, merge with existing memory
3. Otherwise, create new memory
4. Apply Hebbian learning to similar memories (similarity >= hebb_threshold)
Args:
fact: Memory content
owner_id: User identifier
memory_type: Memory type ('persona', 'fact', 'ephemeral')
Returns:
The created or updated MemoryChunk
"""
if not self.vec_db or not self.mem_db:
raise RuntimeError("Memory manager not initialized")
current_time = datetime.now(timezone.utc)
# Step 1: Search for similar memories
similar_results = await self.vec_db.retrieve(
query=fact,
k=20,
fetch_k=50,
metadata_filters={"owner_id": owner_id},
)
# Step 2: Check if we should merge with existing memories (top 3 similar ones)
merge_candidates = [
r for r in similar_results[:3] if r.similarity >= MERGE_THRESHOLD
]
if merge_candidates:
# Get all candidate memories from database
candidate_memories: list[tuple[str, MemoryChunk]] = []
for candidate in merge_candidates:
mem_id = json.loads(candidate.data["metadata"])["mem_id"]
memory = await self.mem_db.get_memory_by_id(mem_id)
if memory:
candidate_memories.append((mem_id, memory))
if candidate_memories:
# Use the most similar memory as the base
base_mem_id, base_memory = candidate_memories[0]
# Collect all facts to merge (existing candidates + new fact)
all_facts = [mem.fact for _, mem in candidate_memories] + [fact]
merged_fact = await self._merge_multiple_memories(all_facts)
# Update the base memory
base_memory.fact = merged_fact
base_memory.last_retrieval_at = current_time
base_memory.retrieval_count += 1
updated_memory = await self.mem_db.update_memory(base_memory)
# Update VecDB for base memory
await self.vec_db.delete(base_mem_id)
await self.vec_db.insert(
content=merged_fact,
metadata={
"mem_id": base_mem_id,
"owner_id": owner_id,
"memory_type": memory_type,
},
id=base_mem_id,
)
# Deactivate and remove other merged memories
for mem_id, _ in candidate_memories[1:]:
await self.mem_db.deactivate_memory(mem_id)
await self.vec_db.delete(mem_id)
logger.info(
f"Merged {len(candidate_memories)} memories into {base_mem_id} for user {owner_id}"
)
return updated_memory
# Step 3: Create new memory
mem_id = str(uuid.uuid4())
new_memory = MemoryChunk(
mem_id=mem_id,
fact=fact,
owner_id=owner_id,
memory_type=memory_type,
created_at=current_time,
last_retrieval_at=current_time,
retrieval_count=1,
is_active=True,
)
# Insert into MemoryDB
created_memory = await self.mem_db.insert_memory(new_memory)
# Insert into VecDB
await self.vec_db.insert(
content=fact,
metadata={
"mem_id": mem_id,
"owner_id": owner_id,
"memory_type": memory_type,
},
id=mem_id,
)
# Step 4: Apply Hebbian learning to similar memories
hebb_mem_ids = [
json.loads(r.data["metadata"])["mem_id"]
for r in similar_results
if r.similarity >= HEBB_THRESHOLD
]
if hebb_mem_ids:
await self.mem_db.update_retrieval_stats(hebb_mem_ids, current_time)
logger.debug(
f"Applied Hebbian learning to {len(hebb_mem_ids)} memories for user {owner_id}",
)
logger.info(f"Created new memory {mem_id} for user {owner_id}")
return created_memory
async def query_memory(
self,
owner_id: str,
top_k: int = 5,
) -> list[MemoryChunk]:
"""Query user's memories using static retrieval with decay score ranking
Implements the QUERY MEMORY (STATIC) workflow from _README.md:
1. Get all active memories for user from MemoryDB
2. Compute decay_score for each memory
3. Sort by decay_score and return top_k
4. Update retrieval statistics for returned memories
Args:
owner_id: User identifier
top_k: Number of memories to return
Returns:
List of top_k MemoryChunk sorted by decay score
"""
if not self.mem_db:
raise RuntimeError("Memory manager not initialized")
current_time = datetime.now(timezone.utc)
# Step 1: Get all active memories for user
all_memories = await self.mem_db.get_active_memories(owner_id)
if not all_memories:
return []
# Step 2-3: Compute decay scores and sort
memories_with_scores = [
(mem, mem.compute_decay_score(current_time)) for mem in all_memories
]
memories_with_scores.sort(key=lambda x: x[1], reverse=True)
# Get top_k memories
top_memories = [mem for mem, _ in memories_with_scores[:top_k]]
# Step 4: Update retrieval statistics
mem_ids = [mem.mem_id for mem in top_memories]
await self.mem_db.update_retrieval_stats(mem_ids, current_time)
logger.debug(f"Retrieved {len(top_memories)} memories for user {owner_id}")
return top_memories
async def _merge_multiple_memories(self, facts: list[str]) -> str:
"""Merge multiple memory facts using LLM in one call
Args:
facts: List of memory facts to merge
Returns:
Merged memory content
"""
if not self.merge_llm_provider:
return " ".join(facts)
if len(facts) == 1:
return facts[0]
try:
# Format all facts as a numbered list
facts_list = "\n".join(f"{i + 1}. {fact}" for i, fact in enumerate(facts))
user_prompt = (
f"Please merge the following {len(facts)} related memory entries "
"into a single, comprehensive memory:"
f"\n{facts_list}\n\nOutput only the merged memory content."
)
response = await self.merge_llm_provider.text_chat(
prompt=user_prompt,
system_prompt=MERGE_SYSTEM_PROMPT,
)
merged_content = response.completion_text.strip()
return merged_content if merged_content else " ".join(facts)
except Exception as e:
logger.warning(f"Failed to merge memories with LLM: {e}, using fallback")
return " ".join(facts)

View File

@@ -0,0 +1,156 @@
from pydantic import Field
from pydantic.dataclasses import dataclass
from astrbot.core.agent.tool import FunctionTool, ToolExecResult
from astrbot.core.astr_agent_context import AstrAgentContext, ContextWrapper
@dataclass
class AddMemory(FunctionTool[AstrAgentContext]):
"""Tool for adding memories to user's long-term memory storage"""
name: str = "astr_add_memory"
description: str = (
"Add a new memory to the user's long-term memory storage. "
"Use this tool only when the user explicitly asks you to remember something, "
"or when they share stable preferences, identity, or long-term goals that will be useful in future interactions."
)
parameters: dict = Field(
default_factory=lambda: {
"type": "object",
"properties": {
"fact": {
"type": "string",
"description": (
"The concrete memory content to store, such as a user preference, "
"identity detail, long-term goal, or stable profile fact."
),
},
"memory_type": {
"type": "string",
"enum": ["persona", "fact", "ephemeral"],
"description": (
"The relative importance of this memory. "
"Use 'persona' for core identity or highly impactful information, "
"'fact' for normal long-term preferences, "
"and 'ephemeral' for minor or tentative facts."
),
},
},
"required": ["fact", "memory_type"],
}
)
async def call(
self, context: ContextWrapper[AstrAgentContext], **kwargs
) -> ToolExecResult:
"""Add a memory to long-term storage
Args:
context: Agent context
**kwargs: Must contain 'fact' and 'memory_type'
Returns:
ToolExecResult with success message
"""
mm = context.context.context.memory_manager
fact = kwargs.get("fact")
memory_type = kwargs.get("memory_type", "fact")
if not fact:
return "Missing required parameter: fact"
try:
# Get owner_id from context
owner_id = context.context.event.unified_msg_origin
# Add memory using memory manager
memory = await mm.add_memory(
fact=fact,
owner_id=owner_id,
memory_type=memory_type,
)
return f"Memory added successfully (ID: {memory.mem_id})"
except Exception as e:
return f"Failed to add memory: {str(e)}"
@dataclass
class QueryMemory(FunctionTool[AstrAgentContext]):
"""Tool for querying user's long-term memories"""
name: str = "astr_query_memory"
description: str = (
"Query the user's long-term memory storage and return the most relevant memories. "
"Use this tool when you need user-specific context, preferences, or past facts "
"that are not explicitly present in the current conversation."
)
parameters: dict = Field(
default_factory=lambda: {
"type": "object",
"properties": {
"top_k": {
"type": "integer",
"description": (
"Maximum number of memories to retrieve after retention-based ranking. "
"Typically between 3 and 10."
),
"default": 5,
"minimum": 1,
"maximum": 20,
},
},
"required": [],
}
)
async def call(
self, context: ContextWrapper[AstrAgentContext], **kwargs
) -> ToolExecResult:
"""Query memories from long-term storage
Args:
context: Agent context
**kwargs: Optional 'top_k' parameter
Returns:
ToolExecResult with formatted memory list
"""
mm = context.context.context.memory_manager
top_k = kwargs.get("top_k", 5)
try:
# Get owner_id from context
owner_id = context.context.event.unified_msg_origin
# Query memories using memory manager
memories = await mm.query_memory(
owner_id=owner_id,
top_k=top_k,
)
if not memories:
return "No memories found for this user."
# Format memories for output
formatted_memories = []
for i, mem in enumerate(memories, 1):
formatted_memories.append(
f"{i}. [{mem.memory_type.upper()}] {mem.fact} "
f"(retrieved {mem.retrieval_count} times, "
f"last: {mem.last_retrieval_at.strftime('%Y-%m-%d')})"
)
result_text = "Retrieved memories:\n" + "\n".join(formatted_memories)
return result_text
except Exception as e:
return f"Failed to query memories: {str(e)}"
ADD_MEMORY_TOOL = AddMemory()
QUERY_MEMORY_TOOL = QueryMemory()

View File

@@ -30,6 +30,7 @@ from ....astr_agent_context import AgentContextWrapper
from ....astr_agent_hooks import MAIN_AGENT_HOOKS from ....astr_agent_hooks import MAIN_AGENT_HOOKS
from ....astr_agent_run_util import AgentRunner, run_agent from ....astr_agent_run_util import AgentRunner, run_agent
from ....astr_agent_tool_exec import FunctionToolExecutor from ....astr_agent_tool_exec import FunctionToolExecutor
from ....memory.tools import ADD_MEMORY_TOOL, QUERY_MEMORY_TOOL
from ...context import PipelineContext, call_event_hook from ...context import PipelineContext, call_event_hook
from ..stage import Stage from ..stage import Stage
from ..utils import KNOWLEDGE_BASE_QUERY_TOOL, retrieve_knowledge_base from ..utils import KNOWLEDGE_BASE_QUERY_TOOL, retrieve_knowledge_base
@@ -124,6 +125,15 @@ class LLMRequestSubStage(Stage):
req.func_tool = ToolSet() req.func_tool = ToolSet()
req.func_tool.add_tool(KNOWLEDGE_BASE_QUERY_TOOL) req.func_tool.add_tool(KNOWLEDGE_BASE_QUERY_TOOL)
async def _apply_memory(self, req: ProviderRequest):
mm = self.ctx.plugin_manager.context.memory_manager
if not mm or not mm._initialized:
return
if req.func_tool is None:
req.func_tool = ToolSet()
req.func_tool.add_tool(ADD_MEMORY_TOOL)
req.func_tool.add_tool(QUERY_MEMORY_TOOL)
def _truncate_contexts( def _truncate_contexts(
self, self,
contexts: list[dict], contexts: list[dict],
@@ -377,6 +387,9 @@ class LLMRequestSubStage(Stage):
# apply knowledge base feature # apply knowledge base feature
await self._apply_kb(event, req) await self._apply_kb(event, req)
# apply memory feature
await self._apply_memory(req)
# fix contexts json str # fix contexts json str
if isinstance(req.contexts, str): if isinstance(req.contexts, str):
req.contexts = json.loads(req.contexts) req.contexts = json.loads(req.contexts)

View File

@@ -14,6 +14,7 @@ from astrbot.core.config.astrbot_config import AstrBotConfig
from astrbot.core.conversation_mgr import ConversationManager from astrbot.core.conversation_mgr import ConversationManager
from astrbot.core.db import BaseDatabase from astrbot.core.db import BaseDatabase
from astrbot.core.knowledge_base.kb_mgr import KnowledgeBaseManager from astrbot.core.knowledge_base.kb_mgr import KnowledgeBaseManager
from astrbot.core.memory.memory_manager import MemoryManager
from astrbot.core.message.message_event_result import MessageChain from astrbot.core.message.message_event_result import MessageChain
from astrbot.core.persona_mgr import PersonaManager from astrbot.core.persona_mgr import PersonaManager
from astrbot.core.platform import Platform from astrbot.core.platform import Platform
@@ -65,6 +66,7 @@ class Context:
persona_manager: PersonaManager, persona_manager: PersonaManager,
astrbot_config_mgr: AstrBotConfigManager, astrbot_config_mgr: AstrBotConfigManager,
knowledge_base_manager: KnowledgeBaseManager, knowledge_base_manager: KnowledgeBaseManager,
memory_manager: MemoryManager,
): ):
self._event_queue = event_queue self._event_queue = event_queue
"""事件队列。消息平台通过事件队列传递消息事件。""" """事件队列。消息平台通过事件队列传递消息事件。"""
@@ -79,6 +81,7 @@ class Context:
self.persona_manager = persona_manager self.persona_manager = persona_manager
self.astrbot_config_mgr = astrbot_config_mgr self.astrbot_config_mgr = astrbot_config_mgr
self.kb_manager = knowledge_base_manager self.kb_manager = knowledge_base_manager
self.memory_manager = memory_manager
async def llm_generate( async def llm_generate(
self, self,

View File

@@ -5,6 +5,7 @@ from .conversation import ConversationRoute
from .file import FileRoute from .file import FileRoute
from .knowledge_base import KnowledgeBaseRoute from .knowledge_base import KnowledgeBaseRoute
from .log import LogRoute from .log import LogRoute
from .memory import MemoryRoute
from .persona import PersonaRoute from .persona import PersonaRoute
from .plugin import PluginRoute from .plugin import PluginRoute
from .session_management import SessionManagementRoute from .session_management import SessionManagementRoute
@@ -21,6 +22,7 @@ __all__ = [
"FileRoute", "FileRoute",
"KnowledgeBaseRoute", "KnowledgeBaseRoute",
"LogRoute", "LogRoute",
"MemoryRoute",
"PersonaRoute", "PersonaRoute",
"PluginRoute", "PluginRoute",
"SessionManagementRoute", "SessionManagementRoute",

View File

@@ -0,0 +1,174 @@
"""Memory management API routes"""
from quart import jsonify, request
from astrbot.core import logger
from astrbot.core.core_lifecycle import AstrBotCoreLifecycle
from astrbot.core.db import BaseDatabase
from .route import Response, Route, RouteContext
class MemoryRoute(Route):
"""Memory management routes"""
def __init__(
self,
context: RouteContext,
db: BaseDatabase,
core_lifecycle: AstrBotCoreLifecycle,
):
super().__init__(context)
self.db = db
self.core_lifecycle = core_lifecycle
self.memory_manager = core_lifecycle.memory_manager
self.provider_manager = core_lifecycle.provider_manager
self.routes = [
("/memory/status", ("GET", self.get_status)),
("/memory/initialize", ("POST", self.initialize)),
("/memory/update_merge_llm", ("POST", self.update_merge_llm)),
]
self.register_routes()
async def get_status(self):
"""Get memory system status"""
try:
is_initialized = self.memory_manager._initialized
status_data = {
"initialized": is_initialized,
"embedding_provider_id": None,
"merge_llm_provider_id": None,
}
if is_initialized:
# Get embedding provider info
if self.memory_manager.embedding_provider:
status_data["embedding_provider_id"] = (
self.memory_manager.embedding_provider.provider_config["id"]
)
# Get merge LLM provider info
if self.memory_manager.merge_llm_provider:
status_data["merge_llm_provider_id"] = (
self.memory_manager.merge_llm_provider.provider_config["id"]
)
return jsonify(Response().ok(status_data).__dict__)
except Exception as e:
logger.error(f"Failed to get memory status: {e}")
return jsonify(Response().error(str(e)).__dict__)
async def initialize(self):
"""Initialize memory system with embedding and merge LLM providers"""
try:
data = await request.get_json()
embedding_provider_id = data.get("embedding_provider_id")
merge_llm_provider_id = data.get("merge_llm_provider_id")
if not embedding_provider_id or not merge_llm_provider_id:
return jsonify(
Response()
.error(
"embedding_provider_id and merge_llm_provider_id are required"
)
.__dict__,
)
# Check if already initialized
if self.memory_manager._initialized:
return jsonify(
Response()
.error(
"Memory system already initialized. Embedding provider cannot be changed.",
)
.__dict__,
)
# Get providers
embedding_provider = await self.provider_manager.get_provider_by_id(
embedding_provider_id,
)
merge_llm_provider = await self.provider_manager.get_provider_by_id(
merge_llm_provider_id,
)
if not embedding_provider:
return jsonify(
Response()
.error(f"Embedding provider {embedding_provider_id} not found")
.__dict__,
)
if not merge_llm_provider:
return jsonify(
Response()
.error(f"Merge LLM provider {merge_llm_provider_id} not found")
.__dict__,
)
# Initialize memory manager
await self.memory_manager.initialize(
embedding_provider=embedding_provider,
merge_llm_provider=merge_llm_provider,
)
logger.info(
f"Memory system initialized with embedding: {embedding_provider_id}, "
f"merge LLM: {merge_llm_provider_id}",
)
return jsonify(
Response()
.ok({"message": "Memory system initialized successfully"})
.__dict__,
)
except Exception as e:
logger.error(f"Failed to initialize memory system: {e}")
return jsonify(Response().error(str(e)).__dict__)
async def update_merge_llm(self):
"""Update merge LLM provider (only allowed after initialization)"""
try:
data = await request.get_json()
merge_llm_provider_id = data.get("merge_llm_provider_id")
if not merge_llm_provider_id:
return jsonify(
Response().error("merge_llm_provider_id is required").__dict__,
)
# Check if initialized
if not self.memory_manager._initialized:
return jsonify(
Response()
.error("Memory system not initialized. Please initialize first.")
.__dict__,
)
# Get new merge LLM provider
merge_llm_provider = await self.provider_manager.get_provider_by_id(
merge_llm_provider_id,
)
if not merge_llm_provider:
return jsonify(
Response()
.error(f"Merge LLM provider {merge_llm_provider_id} not found")
.__dict__,
)
# Update merge LLM provider
self.memory_manager.merge_llm_provider = merge_llm_provider
logger.info(f"Updated merge LLM provider to: {merge_llm_provider_id}")
return jsonify(
Response()
.ok({"message": "Merge LLM provider updated successfully"})
.__dict__,
)
except Exception as e:
logger.error(f"Failed to update merge LLM provider: {e}")
return jsonify(Response().error(str(e)).__dict__)

View File

@@ -79,6 +79,7 @@ class AstrBotDashboard:
self.persona_route = PersonaRoute(self.context, db, core_lifecycle) self.persona_route = PersonaRoute(self.context, db, core_lifecycle)
self.t2i_route = T2iRoute(self.context, core_lifecycle) self.t2i_route = T2iRoute(self.context, core_lifecycle)
self.kb_route = KnowledgeBaseRoute(self.context, core_lifecycle) self.kb_route = KnowledgeBaseRoute(self.context, core_lifecycle)
self.memory_route = MemoryRoute(self.context, db, core_lifecycle)
self.app.add_url_rule( self.app.add_url_rule(
"/api/plug/<path:subpath>", "/api/plug/<path:subpath>",

View File

@@ -293,4 +293,3 @@ function handleSidebarMouseLeave() {
} }
} }
</style> </style>

View File

@@ -301,4 +301,3 @@ export function useMessages(
toggleStreaming toggleStreaming
}; };
} }

View File

@@ -12,6 +12,7 @@
"console": "Console", "console": "Console",
"alkaid": "Alkaid Lab", "alkaid": "Alkaid Lab",
"knowledgeBase": "Knowledge Base", "knowledgeBase": "Knowledge Base",
"memory": "Long-term Memory",
"about": "About", "about": "About",
"settings": "Settings", "settings": "Settings",
"documentation": "Documentation", "documentation": "Documentation",

View File

@@ -12,6 +12,7 @@
"console": "控制台", "console": "控制台",
"alkaid": "Alkaid", "alkaid": "Alkaid",
"knowledgeBase": "知识库", "knowledgeBase": "知识库",
"memory": "长期记忆",
"about": "关于", "about": "关于",
"settings": "设置", "settings": "设置",
"documentation": "官方文档", "documentation": "官方文档",

View File

@@ -48,6 +48,11 @@ const sidebarItem: menu[] = [
icon: 'mdi-book-open-variant', icon: 'mdi-book-open-variant',
to: '/knowledge-base', to: '/knowledge-base',
}, },
{
title: 'core.navigation.memory',
icon: 'mdi-brain',
to: '/memory',
},
{ {
title: 'core.navigation.chat', title: 'core.navigation.chat',
icon: 'mdi-chat', icon: 'mdi-chat',

View File

@@ -90,6 +90,11 @@ const MainRoutes = {
} }
] ]
}, },
{
name: 'Memory',
path: '/memory',
component: () => import('@/views/MemoryPage.vue')
},
// 旧版本的知识库路由 // 旧版本的知识库路由
{ {

View File

@@ -0,0 +1,358 @@
<template>
<div class="memory-page">
<v-container fluid class="pa-0">
<!-- 页面标题 -->
<v-row class="d-flex justify-space-between align-center px-4 py-3 pb-8">
<div>
<h1 class="text-h1 font-weight-bold mb-2">
<v-icon color="black" class="me-2">mdi-brain</v-icon>{{ t('core.navigation.memory') }}
</h1>
<p class="text-subtitle-1 text-medium-emphasis mb-4">
管理长期记忆系统的配置
</p>
</div>
</v-row>
<!-- 加载状态 -->
<v-row v-if="loading">
<v-col cols="12">
<v-card>
<v-card-text class="text-center">
<v-progress-circular indeterminate color="primary"></v-progress-circular>
</v-card-text>
</v-card>
</v-col>
</v-row>
<!-- 主内容 -->
<v-row v-else>
<v-col cols="12" md="8" lg="6">
<v-card rounded="lg">
<v-card-title class="d-flex align-center">
<v-icon class="mr-2">mdi-cog</v-icon>
记忆系统配置
</v-card-title>
<v-divider></v-divider>
<v-card-text>
<!-- 状态显示 -->
<v-alert
:type="memoryStatus.initialized ? 'success' : 'info'"
variant="tonal"
class="mb-4"
>
<div class="d-flex align-center">
<v-icon class="mr-2">
{{ memoryStatus.initialized ? 'mdi-check-circle' : 'mdi-information' }}
</v-icon>
<div>
<strong>状态</strong>
{{ memoryStatus.initialized ? '已初始化' : '未初始化' }}
</div>
</div>
</v-alert>
<!-- 未初始化时显示初始化表单 -->
<div v-if="!memoryStatus.initialized">
<v-form @submit.prevent="initializeMemory">
<v-select
v-model="selectedEmbeddingProvider"
:items="embeddingProviders"
item-title="text"
item-value="value"
label="Embedding 模型 *"
hint="用于生成向量表示,初始化后不可更改"
persistent-hint
class="mb-4"
required
:disabled="initializing"
></v-select>
<v-select
v-model="selectedMergeLLM"
:items="llmProviders"
item-title="text"
item-value="value"
label="合并 LLM *"
hint="用于合并相似记忆,可在初始化后更改"
persistent-hint
class="mb-4"
required
:disabled="initializing"
></v-select>
<v-btn
type="submit"
color="primary"
:loading="initializing"
:disabled="!selectedEmbeddingProvider || !selectedMergeLLM"
block
size="large"
>
初始化记忆系统
</v-btn>
</v-form>
</div>
<!-- 已初始化时显示配置信息 -->
<div v-else>
<v-list>
<v-list-item>
<template v-slot:prepend>
<v-icon>mdi-vector-triangle</v-icon>
</template>
<v-list-item-title>Embedding 模型</v-list-item-title>
<v-list-item-subtitle>
{{ getProviderName(memoryStatus.embedding_provider_id) }}
</v-list-item-subtitle>
</v-list-item>
<v-divider class="my-2"></v-divider>
<v-list-item>
<template v-slot:prepend>
<v-icon>mdi-robot</v-icon>
</template>
<v-list-item-title>合并 LLM</v-list-item-title>
<v-list-item-subtitle>
{{ getProviderName(memoryStatus.merge_llm_provider_id) }}
</v-list-item-subtitle>
</v-list-item>
</v-list>
<v-divider class="my-4"></v-divider>
<v-form @submit.prevent="updateMergeLLM">
<v-select
v-model="newMergeLLM"
:items="llmProviders"
item-title="text"
item-value="value"
label="更新合并 LLM"
hint="可以更换用于合并记忆的 LLM"
persistent-hint
class="mb-4"
:disabled="updating"
></v-select>
<v-btn
type="submit"
color="primary"
:loading="updating"
:disabled="!newMergeLLM || newMergeLLM === memoryStatus.merge_llm_provider_id"
block
variant="tonal"
>
更新合并 LLM
</v-btn>
</v-form>
</div>
</v-card-text>
</v-card>
</v-col>
<!-- 说明卡片 -->
<v-col cols="12" md="4" lg="6">
<v-card rounded="lg">
<v-card-title class="d-flex align-center">
<v-icon class="mr-2">mdi-information</v-icon>
说明
</v-card-title>
<v-divider></v-divider>
<v-card-text>
<v-list density="compact">
<v-list-item>
<v-list-item-title class="text-wrap">
<strong>Embedding 模型</strong>用于将文本转换为向量支持语义相似度搜索
<v-chip size="x-small" color="warning" class="ml-2">不可更改</v-chip>
</v-list-item-title>
</v-list-item>
<v-list-item>
<v-list-item-title class="text-wrap">
<strong>合并 LLM</strong>当检测到相似记忆时使用此模型合并为一条记忆
<v-chip size="x-small" color="success" class="ml-2">可更改</v-chip>
</v-list-item-title>
</v-list-item>
<v-list-item>
<v-list-item-title class="text-wrap">
<strong>注意</strong>Embedding 模型一旦选择后无法更改请谨慎选择
</v-list-item-title>
</v-list-item>
</v-list>
</v-card-text>
</v-card>
</v-col>
</v-row>
</v-container>
<!-- 提示框 -->
<v-snackbar v-model="snackbar.show" :color="snackbar.color" :timeout="3000">
{{ snackbar.message }}
</v-snackbar>
</div>
</template>
<script setup lang="ts">
import { ref, onMounted } from 'vue';
import axios from 'axios';
import { useI18n } from '@/i18n/composables';
const { t } = useI18n();
interface MemoryStatus {
initialized: boolean;
embedding_provider_id: string | null;
merge_llm_provider_id: string | null;
}
interface Provider {
value: string;
text: string;
}
const loading = ref(true);
const initializing = ref(false);
const updating = ref(false);
const memoryStatus = ref<MemoryStatus>({
initialized: false,
embedding_provider_id: null,
merge_llm_provider_id: null,
});
const embeddingProviders = ref<Provider[]>([]);
const llmProviders = ref<Provider[]>([]);
const selectedEmbeddingProvider = ref<string>('');
const selectedMergeLLM = ref<string>('');
const newMergeLLM = ref<string>('');
const snackbar = ref({
show: false,
message: '',
color: 'success',
});
const showMessage = (message: string, color: string = 'success') => {
snackbar.value.message = message;
snackbar.value.color = color;
snackbar.value.show = true;
};
const getProviderName = (providerId: string | null): string => {
if (!providerId) return '未设置';
const embedding = embeddingProviders.value.find(p => p.value === providerId);
const llm = llmProviders.value.find(p => p.value === providerId);
return embedding?.text || llm?.text || providerId;
};
const loadProviders = async () => {
try {
// Load embedding providers
const embeddingResponse = await axios.get('/api/config/provider/list', {
params: { provider_type: 'embedding' }
});
if (embeddingResponse.data.status === 'ok') {
embeddingProviders.value = (embeddingResponse.data.data || []).map((p: any) => ({
value: p.id,
text: `${p.embedding_model} (${p.id})`,
}));
}
// Load LLM providers
const llmResponse = await axios.get('/api/config/provider/list', {
params: { provider_type: 'chat_completion' }
});
if (llmResponse.data.status === 'ok') {
llmProviders.value = (llmResponse.data.data || []).map((p: any) => ({
value: p.id,
text: `${p?.model_config?.model} (${p.id})`,
}));
}
} catch (error) {
console.error('Failed to load providers:', error);
showMessage('加载提供商列表失败', 'error');
}
};
const loadStatus = async () => {
try {
const response = await axios.get('/api/memory/status');
if (response.data.status === 'ok') {
memoryStatus.value = response.data.data;
if (memoryStatus.value.merge_llm_provider_id) {
newMergeLLM.value = memoryStatus.value.merge_llm_provider_id;
}
}
} catch (error) {
console.error('Failed to load memory status:', error);
showMessage('加载记忆系统状态失败', 'error');
}
};
const initializeMemory = async () => {
if (!selectedEmbeddingProvider.value || !selectedMergeLLM.value) {
showMessage('请选择 Embedding 模型和合并 LLM', 'warning');
return;
}
initializing.value = true;
try {
const response = await axios.post('/api/memory/initialize', {
embedding_provider_id: selectedEmbeddingProvider.value,
merge_llm_provider_id: selectedMergeLLM.value,
});
if (response.data.status === 'ok') {
showMessage('记忆系统初始化成功', 'success');
await loadStatus();
} else {
showMessage(response.data.message || '初始化失败', 'error');
}
} catch (error: any) {
console.error('Failed to initialize memory:', error);
showMessage(error.response?.data?.message || '初始化失败', 'error');
} finally {
initializing.value = false;
}
};
const updateMergeLLM = async () => {
if (!newMergeLLM.value) {
showMessage('请选择新的合并 LLM', 'warning');
return;
}
updating.value = true;
try {
const response = await axios.post('/api/memory/update_merge_llm', {
merge_llm_provider_id: newMergeLLM.value,
});
if (response.data.status === 'ok') {
showMessage('合并 LLM 更新成功', 'success');
await loadStatus();
} else {
showMessage(response.data.message || '更新失败', 'error');
}
} catch (error: any) {
console.error('Failed to update merge LLM:', error);
showMessage(error.response?.data?.message || '更新失败', 'error');
} finally {
updating.value = false;
}
};
onMounted(async () => {
loading.value = true;
await Promise.all([loadProviders(), loadStatus()]);
loading.value = false;
});
</script>
<style scoped>
.memory-page {
min-height: 100vh;
padding: 8px;
}
</style>