Compare commits
28 Commits
copilot/ad
...
v4.5.8
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
adbb84530a | ||
|
|
6cf169f4f2 | ||
|
|
5ab9ea12c0 | ||
|
|
fd9cb703db | ||
|
|
388c1ab16d | ||
|
|
f867c2a271 | ||
|
|
605bb2cb90 | ||
|
|
5ea15dde5a | ||
|
|
3ca545c4c7 | ||
|
|
e200835074 | ||
|
|
3a90348353 | ||
|
|
5a11d8f0ee | ||
|
|
824af5eeea | ||
|
|
08ec787491 | ||
|
|
b062e83d54 | ||
|
|
17422ba9c3 | ||
|
|
6849af2bad | ||
|
|
09c3da64f9 | ||
|
|
2c8470e8ac | ||
|
|
c4ea3db73d | ||
|
|
89e79863f6 | ||
|
|
d19945009f | ||
|
|
c77256ee0e | ||
|
|
7d823af627 | ||
|
|
3957861878 | ||
|
|
6ac43c600e | ||
|
|
27af9ebb6b | ||
|
|
b360c8446e |
@@ -36,7 +36,8 @@ from astrbot.core.star.config import *
|
|||||||
|
|
||||||
|
|
||||||
# provider
|
# provider
|
||||||
from astrbot.core.provider import Provider, Personality, ProviderMetaData
|
from astrbot.core.provider import Provider, ProviderMetaData
|
||||||
|
from astrbot.core.db.po import Personality
|
||||||
|
|
||||||
# platform
|
# platform
|
||||||
from astrbot.core.platform import (
|
from astrbot.core.platform import (
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
from astrbot.core.provider import Personality, Provider, STTProvider
|
from astrbot.core.db.po import Personality
|
||||||
|
from astrbot.core.provider import Provider, STTProvider
|
||||||
from astrbot.core.provider.entities import (
|
from astrbot.core.provider.entities import (
|
||||||
LLMResponse,
|
LLMResponse,
|
||||||
ProviderMetaData,
|
ProviderMetaData,
|
||||||
|
|||||||
@@ -76,7 +76,7 @@ class ImageURLPart(ContentPart):
|
|||||||
"""The ID of the image, to allow LLMs to distinguish different images."""
|
"""The ID of the image, to allow LLMs to distinguish different images."""
|
||||||
|
|
||||||
type: str = "image_url"
|
type: str = "image_url"
|
||||||
image_url: str
|
image_url: ImageURL
|
||||||
|
|
||||||
|
|
||||||
class AudioURLPart(ContentPart):
|
class AudioURLPart(ContentPart):
|
||||||
|
|||||||
@@ -1,16 +1,21 @@
|
|||||||
from dataclasses import dataclass
|
|
||||||
from typing import Any, Generic
|
from typing import Any, Generic
|
||||||
|
|
||||||
|
from pydantic import Field
|
||||||
|
from pydantic.dataclasses import dataclass
|
||||||
from typing_extensions import TypeVar
|
from typing_extensions import TypeVar
|
||||||
|
|
||||||
|
from .message import Message
|
||||||
|
|
||||||
TContext = TypeVar("TContext", default=Any)
|
TContext = TypeVar("TContext", default=Any)
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass(config={"arbitrary_types_allowed": True})
|
||||||
class ContextWrapper(Generic[TContext]):
|
class ContextWrapper(Generic[TContext]):
|
||||||
"""A context for running an agent, which can be used to pass additional data or state."""
|
"""A context for running an agent, which can be used to pass additional data or state."""
|
||||||
|
|
||||||
context: TContext
|
context: TContext
|
||||||
|
messages: list[Message] = Field(default_factory=list)
|
||||||
|
"""This field stores the llm message context for the agent run, agent runners will maintain this field automatically."""
|
||||||
tool_call_timeout: int = 60 # Default tool call timeout in seconds
|
tool_call_timeout: int = 60 # Default tool call timeout in seconds
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -40,6 +40,13 @@ class BaseAgentRunner(T.Generic[TContext]):
|
|||||||
"""Process a single step of the agent."""
|
"""Process a single step of the agent."""
|
||||||
...
|
...
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
async def step_until_done(
|
||||||
|
self, max_step: int
|
||||||
|
) -> T.AsyncGenerator[AgentResponse, None]:
|
||||||
|
"""Process steps until the agent is done."""
|
||||||
|
...
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def done(self) -> bool:
|
def done(self) -> bool:
|
||||||
"""Check if the agent has completed its task.
|
"""Check if the agent has completed its task.
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ from astrbot.core.provider.entities import (
|
|||||||
from astrbot.core.provider.provider import Provider
|
from astrbot.core.provider.provider import Provider
|
||||||
|
|
||||||
from ..hooks import BaseAgentRunHooks
|
from ..hooks import BaseAgentRunHooks
|
||||||
from ..message import AssistantMessageSegment, ToolCallMessageSegment
|
from ..message import AssistantMessageSegment, Message, ToolCallMessageSegment
|
||||||
from ..response import AgentResponseData
|
from ..response import AgentResponseData
|
||||||
from ..run_context import ContextWrapper, TContext
|
from ..run_context import ContextWrapper, TContext
|
||||||
from ..tool_executor import BaseFunctionToolExecutor
|
from ..tool_executor import BaseFunctionToolExecutor
|
||||||
@@ -55,6 +55,20 @@ class ToolLoopAgentRunner(BaseAgentRunner[TContext]):
|
|||||||
self.agent_hooks = agent_hooks
|
self.agent_hooks = agent_hooks
|
||||||
self.run_context = run_context
|
self.run_context = run_context
|
||||||
|
|
||||||
|
messages = []
|
||||||
|
# append existing messages in the run context
|
||||||
|
for msg in request.contexts:
|
||||||
|
messages.append(Message.model_validate(msg))
|
||||||
|
if request.prompt is not None:
|
||||||
|
m = await request.assemble_context()
|
||||||
|
messages.append(Message.model_validate(m))
|
||||||
|
if request.system_prompt:
|
||||||
|
messages.insert(
|
||||||
|
0,
|
||||||
|
Message(role="system", content=request.system_prompt),
|
||||||
|
)
|
||||||
|
self.run_context.messages = messages
|
||||||
|
|
||||||
def _transition_state(self, new_state: AgentState) -> None:
|
def _transition_state(self, new_state: AgentState) -> None:
|
||||||
"""转换 Agent 状态"""
|
"""转换 Agent 状态"""
|
||||||
if self._state != new_state:
|
if self._state != new_state:
|
||||||
@@ -96,13 +110,22 @@ class ToolLoopAgentRunner(BaseAgentRunner[TContext]):
|
|||||||
type="streaming_delta",
|
type="streaming_delta",
|
||||||
data=AgentResponseData(chain=llm_response.result_chain),
|
data=AgentResponseData(chain=llm_response.result_chain),
|
||||||
)
|
)
|
||||||
else:
|
elif llm_response.completion_text:
|
||||||
yield AgentResponse(
|
yield AgentResponse(
|
||||||
type="streaming_delta",
|
type="streaming_delta",
|
||||||
data=AgentResponseData(
|
data=AgentResponseData(
|
||||||
chain=MessageChain().message(llm_response.completion_text),
|
chain=MessageChain().message(llm_response.completion_text),
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
elif llm_response.reasoning_content:
|
||||||
|
yield AgentResponse(
|
||||||
|
type="streaming_delta",
|
||||||
|
data=AgentResponseData(
|
||||||
|
chain=MessageChain(type="reasoning").message(
|
||||||
|
llm_response.reasoning_content,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
llm_resp_result = llm_response
|
llm_resp_result = llm_response
|
||||||
break # got final response
|
break # got final response
|
||||||
@@ -130,6 +153,13 @@ class ToolLoopAgentRunner(BaseAgentRunner[TContext]):
|
|||||||
# 如果没有工具调用,转换到完成状态
|
# 如果没有工具调用,转换到完成状态
|
||||||
self.final_llm_resp = llm_resp
|
self.final_llm_resp = llm_resp
|
||||||
self._transition_state(AgentState.DONE)
|
self._transition_state(AgentState.DONE)
|
||||||
|
# record the final assistant message
|
||||||
|
self.run_context.messages.append(
|
||||||
|
Message(
|
||||||
|
role="assistant",
|
||||||
|
content=llm_resp.completion_text or "",
|
||||||
|
),
|
||||||
|
)
|
||||||
try:
|
try:
|
||||||
await self.agent_hooks.on_agent_done(self.run_context, llm_resp)
|
await self.agent_hooks.on_agent_done(self.run_context, llm_resp)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@@ -156,13 +186,16 @@ class ToolLoopAgentRunner(BaseAgentRunner[TContext]):
|
|||||||
yield AgentResponse(
|
yield AgentResponse(
|
||||||
type="tool_call",
|
type="tool_call",
|
||||||
data=AgentResponseData(
|
data=AgentResponseData(
|
||||||
chain=MessageChain().message(f"🔨 调用工具: {tool_call_name}"),
|
chain=MessageChain(type="tool_call").message(
|
||||||
|
f"🔨 调用工具: {tool_call_name}"
|
||||||
|
),
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
async for result in self._handle_function_tools(self.req, llm_resp):
|
async for result in self._handle_function_tools(self.req, llm_resp):
|
||||||
if isinstance(result, list):
|
if isinstance(result, list):
|
||||||
tool_call_result_blocks = result
|
tool_call_result_blocks = result
|
||||||
elif isinstance(result, MessageChain):
|
elif isinstance(result, MessageChain):
|
||||||
|
result.type = "tool_call_result"
|
||||||
yield AgentResponse(
|
yield AgentResponse(
|
||||||
type="tool_call_result",
|
type="tool_call_result",
|
||||||
data=AgentResponseData(chain=result),
|
data=AgentResponseData(chain=result),
|
||||||
@@ -175,8 +208,23 @@ class ToolLoopAgentRunner(BaseAgentRunner[TContext]):
|
|||||||
),
|
),
|
||||||
tool_calls_result=tool_call_result_blocks,
|
tool_calls_result=tool_call_result_blocks,
|
||||||
)
|
)
|
||||||
|
# record the assistant message with tool calls
|
||||||
|
self.run_context.messages.extend(
|
||||||
|
tool_calls_result.to_openai_messages_model()
|
||||||
|
)
|
||||||
|
|
||||||
self.req.append_tool_calls_result(tool_calls_result)
|
self.req.append_tool_calls_result(tool_calls_result)
|
||||||
|
|
||||||
|
async def step_until_done(
|
||||||
|
self, max_step: int
|
||||||
|
) -> T.AsyncGenerator[AgentResponse, None]:
|
||||||
|
"""Process steps until the agent is done."""
|
||||||
|
step_count = 0
|
||||||
|
while not self.done() and step_count < max_step:
|
||||||
|
step_count += 1
|
||||||
|
async for resp in self.step():
|
||||||
|
yield resp
|
||||||
|
|
||||||
async def _handle_function_tools(
|
async def _handle_function_tools(
|
||||||
self,
|
self,
|
||||||
req: ProviderRequest,
|
req: ProviderRequest,
|
||||||
|
|||||||
@@ -4,12 +4,13 @@ from typing import Any, Generic
|
|||||||
import jsonschema
|
import jsonschema
|
||||||
import mcp
|
import mcp
|
||||||
from deprecated import deprecated
|
from deprecated import deprecated
|
||||||
from pydantic import model_validator
|
from pydantic import Field, model_validator
|
||||||
from pydantic.dataclasses import dataclass
|
from pydantic.dataclasses import dataclass
|
||||||
|
|
||||||
from .run_context import ContextWrapper, TContext
|
from .run_context import ContextWrapper, TContext
|
||||||
|
|
||||||
ParametersType = dict[str, Any]
|
ParametersType = dict[str, Any]
|
||||||
|
ToolExecResult = str | mcp.types.CallToolResult
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
@@ -55,15 +56,14 @@ class FunctionTool(ToolSchema, Generic[TContext]):
|
|||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return f"FuncTool(name={self.name}, parameters={self.parameters}, description={self.description})"
|
return f"FuncTool(name={self.name}, parameters={self.parameters}, description={self.description})"
|
||||||
|
|
||||||
async def call(
|
async def call(self, context: ContextWrapper[TContext], **kwargs) -> ToolExecResult:
|
||||||
self, context: ContextWrapper[TContext], **kwargs
|
|
||||||
) -> str | mcp.types.CallToolResult:
|
|
||||||
"""Run the tool with the given arguments. The handler field has priority."""
|
"""Run the tool with the given arguments. The handler field has priority."""
|
||||||
raise NotImplementedError(
|
raise NotImplementedError(
|
||||||
"FunctionTool.call() must be implemented by subclasses or set a handler."
|
"FunctionTool.call() must be implemented by subclasses or set a handler."
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
class ToolSet:
|
class ToolSet:
|
||||||
"""A set of function tools that can be used in function calling.
|
"""A set of function tools that can be used in function calling.
|
||||||
|
|
||||||
@@ -71,8 +71,7 @@ class ToolSet:
|
|||||||
convert the tools to different API formats (OpenAI, Anthropic, Google GenAI).
|
convert the tools to different API formats (OpenAI, Anthropic, Google GenAI).
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, tools: list[FunctionTool] | None = None):
|
tools: list[FunctionTool] = Field(default_factory=list)
|
||||||
self.tools: list[FunctionTool] = tools or []
|
|
||||||
|
|
||||||
def empty(self) -> bool:
|
def empty(self) -> bool:
|
||||||
"""Check if the tool set is empty."""
|
"""Check if the tool set is empty."""
|
||||||
|
|||||||
@@ -1,14 +1,19 @@
|
|||||||
from dataclasses import dataclass
|
from pydantic import Field
|
||||||
|
from pydantic.dataclasses import dataclass
|
||||||
|
|
||||||
|
from astrbot.core.agent.run_context import ContextWrapper
|
||||||
from astrbot.core.platform.astr_message_event import AstrMessageEvent
|
from astrbot.core.platform.astr_message_event import AstrMessageEvent
|
||||||
from astrbot.core.provider import Provider
|
from astrbot.core.star.context import Context
|
||||||
from astrbot.core.provider.entities import ProviderRequest
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass(config={"arbitrary_types_allowed": True})
|
||||||
class AstrAgentContext:
|
class AstrAgentContext:
|
||||||
provider: Provider
|
context: Context
|
||||||
first_provider_request: ProviderRequest
|
"""The star context instance"""
|
||||||
curr_provider_request: ProviderRequest
|
|
||||||
streaming: bool
|
|
||||||
event: AstrMessageEvent
|
event: AstrMessageEvent
|
||||||
|
"""The message event associated with the agent context."""
|
||||||
|
extra: dict[str, str] = Field(default_factory=dict)
|
||||||
|
"""Customized extra data."""
|
||||||
|
|
||||||
|
|
||||||
|
AgentContextWrapper = ContextWrapper[AstrAgentContext]
|
||||||
|
|||||||
36
astrbot/core/astr_agent_hooks.py
Normal file
36
astrbot/core/astr_agent_hooks.py
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
from typing import Any
|
||||||
|
|
||||||
|
from mcp.types import CallToolResult
|
||||||
|
|
||||||
|
from astrbot.core.agent.hooks import BaseAgentRunHooks
|
||||||
|
from astrbot.core.agent.run_context import ContextWrapper
|
||||||
|
from astrbot.core.agent.tool import FunctionTool
|
||||||
|
from astrbot.core.astr_agent_context import AstrAgentContext
|
||||||
|
from astrbot.core.pipeline.context_utils import call_event_hook
|
||||||
|
from astrbot.core.star.star_handler import EventType
|
||||||
|
|
||||||
|
|
||||||
|
class MainAgentHooks(BaseAgentRunHooks[AstrAgentContext]):
|
||||||
|
async def on_agent_done(self, run_context, llm_response):
|
||||||
|
# 执行事件钩子
|
||||||
|
await call_event_hook(
|
||||||
|
run_context.context.event,
|
||||||
|
EventType.OnLLMResponseEvent,
|
||||||
|
llm_response,
|
||||||
|
)
|
||||||
|
|
||||||
|
async def on_tool_end(
|
||||||
|
self,
|
||||||
|
run_context: ContextWrapper[AstrAgentContext],
|
||||||
|
tool: FunctionTool[Any],
|
||||||
|
tool_args: dict | None,
|
||||||
|
tool_result: CallToolResult | None,
|
||||||
|
):
|
||||||
|
run_context.context.event.clear_result()
|
||||||
|
|
||||||
|
|
||||||
|
class EmptyAgentHooks(BaseAgentRunHooks[AstrAgentContext]):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
MAIN_AGENT_HOOKS = MainAgentHooks()
|
||||||
80
astrbot/core/astr_agent_run_util.py
Normal file
80
astrbot/core/astr_agent_run_util.py
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
import traceback
|
||||||
|
from collections.abc import AsyncGenerator
|
||||||
|
|
||||||
|
from astrbot.core import logger
|
||||||
|
from astrbot.core.agent.runners.tool_loop_agent_runner import ToolLoopAgentRunner
|
||||||
|
from astrbot.core.astr_agent_context import AstrAgentContext
|
||||||
|
from astrbot.core.message.message_event_result import (
|
||||||
|
MessageChain,
|
||||||
|
MessageEventResult,
|
||||||
|
ResultContentType,
|
||||||
|
)
|
||||||
|
|
||||||
|
AgentRunner = ToolLoopAgentRunner[AstrAgentContext]
|
||||||
|
|
||||||
|
|
||||||
|
async def run_agent(
|
||||||
|
agent_runner: AgentRunner,
|
||||||
|
max_step: int = 30,
|
||||||
|
show_tool_use: bool = True,
|
||||||
|
stream_to_general: bool = False,
|
||||||
|
show_reasoning: bool = False,
|
||||||
|
) -> AsyncGenerator[MessageChain | None, None]:
|
||||||
|
step_idx = 0
|
||||||
|
astr_event = agent_runner.run_context.context.event
|
||||||
|
while step_idx < max_step:
|
||||||
|
step_idx += 1
|
||||||
|
try:
|
||||||
|
async for resp in agent_runner.step():
|
||||||
|
if astr_event.is_stopped():
|
||||||
|
return
|
||||||
|
if resp.type == "tool_call_result":
|
||||||
|
msg_chain = resp.data["chain"]
|
||||||
|
if msg_chain.type == "tool_direct_result":
|
||||||
|
# tool_direct_result 用于标记 llm tool 需要直接发送给用户的内容
|
||||||
|
await astr_event.send(resp.data["chain"])
|
||||||
|
continue
|
||||||
|
# 对于其他情况,暂时先不处理
|
||||||
|
continue
|
||||||
|
elif resp.type == "tool_call":
|
||||||
|
if agent_runner.streaming:
|
||||||
|
# 用来标记流式响应需要分节
|
||||||
|
yield MessageChain(chain=[], type="break")
|
||||||
|
if show_tool_use:
|
||||||
|
await astr_event.send(resp.data["chain"])
|
||||||
|
continue
|
||||||
|
|
||||||
|
if stream_to_general and resp.type == "streaming_delta":
|
||||||
|
continue
|
||||||
|
|
||||||
|
if stream_to_general or not agent_runner.streaming:
|
||||||
|
content_typ = (
|
||||||
|
ResultContentType.LLM_RESULT
|
||||||
|
if resp.type == "llm_result"
|
||||||
|
else ResultContentType.GENERAL_RESULT
|
||||||
|
)
|
||||||
|
astr_event.set_result(
|
||||||
|
MessageEventResult(
|
||||||
|
chain=resp.data["chain"].chain,
|
||||||
|
result_content_type=content_typ,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
yield
|
||||||
|
astr_event.clear_result()
|
||||||
|
elif resp.type == "streaming_delta":
|
||||||
|
chain = resp.data["chain"]
|
||||||
|
if chain.type == "reasoning" and not show_reasoning:
|
||||||
|
# display the reasoning content only when configured
|
||||||
|
continue
|
||||||
|
yield resp.data["chain"] # MessageChain
|
||||||
|
if agent_runner.done():
|
||||||
|
break
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(traceback.format_exc())
|
||||||
|
err_msg = f"\n\nAstrBot 请求失败。\n错误类型: {type(e).__name__}\n错误信息: {e!s}\n\n请在控制台查看和分享错误详情。\n"
|
||||||
|
if agent_runner.streaming:
|
||||||
|
yield MessageChain().message(err_msg)
|
||||||
|
else:
|
||||||
|
astr_event.set_result(MessageEventResult().message(err_msg))
|
||||||
|
return
|
||||||
246
astrbot/core/astr_agent_tool_exec.py
Normal file
246
astrbot/core/astr_agent_tool_exec.py
Normal file
@@ -0,0 +1,246 @@
|
|||||||
|
import asyncio
|
||||||
|
import inspect
|
||||||
|
import traceback
|
||||||
|
import typing as T
|
||||||
|
|
||||||
|
import mcp
|
||||||
|
|
||||||
|
from astrbot import logger
|
||||||
|
from astrbot.core.agent.handoff import HandoffTool
|
||||||
|
from astrbot.core.agent.mcp_client import MCPTool
|
||||||
|
from astrbot.core.agent.run_context import ContextWrapper
|
||||||
|
from astrbot.core.agent.tool import FunctionTool, ToolSet
|
||||||
|
from astrbot.core.agent.tool_executor import BaseFunctionToolExecutor
|
||||||
|
from astrbot.core.astr_agent_context import AstrAgentContext
|
||||||
|
from astrbot.core.message.message_event_result import (
|
||||||
|
CommandResult,
|
||||||
|
MessageChain,
|
||||||
|
MessageEventResult,
|
||||||
|
)
|
||||||
|
from astrbot.core.provider.register import llm_tools
|
||||||
|
|
||||||
|
|
||||||
|
class FunctionToolExecutor(BaseFunctionToolExecutor[AstrAgentContext]):
|
||||||
|
@classmethod
|
||||||
|
async def execute(cls, tool, run_context, **tool_args):
|
||||||
|
"""执行函数调用。
|
||||||
|
|
||||||
|
Args:
|
||||||
|
event (AstrMessageEvent): 事件对象, 当 origin 为 local 时必须提供。
|
||||||
|
**kwargs: 函数调用的参数。
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
AsyncGenerator[None | mcp.types.CallToolResult, None]
|
||||||
|
|
||||||
|
"""
|
||||||
|
if isinstance(tool, HandoffTool):
|
||||||
|
async for r in cls._execute_handoff(tool, run_context, **tool_args):
|
||||||
|
yield r
|
||||||
|
return
|
||||||
|
|
||||||
|
elif isinstance(tool, MCPTool):
|
||||||
|
async for r in cls._execute_mcp(tool, run_context, **tool_args):
|
||||||
|
yield r
|
||||||
|
return
|
||||||
|
|
||||||
|
else:
|
||||||
|
async for r in cls._execute_local(tool, run_context, **tool_args):
|
||||||
|
yield r
|
||||||
|
return
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def _execute_handoff(
|
||||||
|
cls,
|
||||||
|
tool: HandoffTool,
|
||||||
|
run_context: ContextWrapper[AstrAgentContext],
|
||||||
|
**tool_args,
|
||||||
|
):
|
||||||
|
input_ = tool_args.get("input")
|
||||||
|
|
||||||
|
# make toolset for the agent
|
||||||
|
tools = tool.agent.tools
|
||||||
|
if tools:
|
||||||
|
toolset = ToolSet()
|
||||||
|
for t in tools:
|
||||||
|
if isinstance(t, str):
|
||||||
|
_t = llm_tools.get_func(t)
|
||||||
|
if _t:
|
||||||
|
toolset.add_tool(_t)
|
||||||
|
elif isinstance(t, FunctionTool):
|
||||||
|
toolset.add_tool(t)
|
||||||
|
else:
|
||||||
|
toolset = None
|
||||||
|
|
||||||
|
ctx = run_context.context.context
|
||||||
|
event = run_context.context.event
|
||||||
|
umo = event.unified_msg_origin
|
||||||
|
prov_id = await ctx.get_current_chat_provider_id(umo)
|
||||||
|
llm_resp = await ctx.tool_loop_agent(
|
||||||
|
event=event,
|
||||||
|
chat_provider_id=prov_id,
|
||||||
|
prompt=input_,
|
||||||
|
system_prompt=tool.agent.instructions,
|
||||||
|
tools=toolset,
|
||||||
|
max_steps=30,
|
||||||
|
run_hooks=tool.agent.run_hooks,
|
||||||
|
)
|
||||||
|
yield mcp.types.CallToolResult(
|
||||||
|
content=[mcp.types.TextContent(type="text", text=llm_resp.completion_text)]
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def _execute_local(
|
||||||
|
cls,
|
||||||
|
tool: FunctionTool,
|
||||||
|
run_context: ContextWrapper[AstrAgentContext],
|
||||||
|
**tool_args,
|
||||||
|
):
|
||||||
|
event = run_context.context.event
|
||||||
|
if not event:
|
||||||
|
raise ValueError("Event must be provided for local function tools.")
|
||||||
|
|
||||||
|
is_override_call = False
|
||||||
|
for ty in type(tool).mro():
|
||||||
|
if "call" in ty.__dict__ and ty.__dict__["call"] is not FunctionTool.call:
|
||||||
|
is_override_call = True
|
||||||
|
break
|
||||||
|
|
||||||
|
# 检查 tool 下有没有 run 方法
|
||||||
|
if not tool.handler and not hasattr(tool, "run") and not is_override_call:
|
||||||
|
raise ValueError("Tool must have a valid handler or override 'run' method.")
|
||||||
|
|
||||||
|
awaitable = None
|
||||||
|
method_name = ""
|
||||||
|
if tool.handler:
|
||||||
|
awaitable = tool.handler
|
||||||
|
method_name = "decorator_handler"
|
||||||
|
elif is_override_call:
|
||||||
|
awaitable = tool.call
|
||||||
|
method_name = "call"
|
||||||
|
elif hasattr(tool, "run"):
|
||||||
|
awaitable = getattr(tool, "run")
|
||||||
|
method_name = "run"
|
||||||
|
if awaitable is None:
|
||||||
|
raise ValueError("Tool must have a valid handler or override 'run' method.")
|
||||||
|
|
||||||
|
wrapper = call_local_llm_tool(
|
||||||
|
context=run_context,
|
||||||
|
handler=awaitable,
|
||||||
|
method_name=method_name,
|
||||||
|
**tool_args,
|
||||||
|
)
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
resp = await asyncio.wait_for(
|
||||||
|
anext(wrapper),
|
||||||
|
timeout=run_context.tool_call_timeout,
|
||||||
|
)
|
||||||
|
if resp is not None:
|
||||||
|
if isinstance(resp, mcp.types.CallToolResult):
|
||||||
|
yield resp
|
||||||
|
else:
|
||||||
|
text_content = mcp.types.TextContent(
|
||||||
|
type="text",
|
||||||
|
text=str(resp),
|
||||||
|
)
|
||||||
|
yield mcp.types.CallToolResult(content=[text_content])
|
||||||
|
else:
|
||||||
|
# NOTE: Tool 在这里直接请求发送消息给用户
|
||||||
|
# TODO: 是否需要判断 event.get_result() 是否为空?
|
||||||
|
# 如果为空,则说明没有发送消息给用户,并且返回值为空,将返回一个特殊的 TextContent,其内容如"工具没有返回内容"
|
||||||
|
if res := run_context.context.event.get_result():
|
||||||
|
if res.chain:
|
||||||
|
try:
|
||||||
|
await event.send(
|
||||||
|
MessageChain(
|
||||||
|
chain=res.chain,
|
||||||
|
type="tool_direct_result",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
f"Tool 直接发送消息失败: {e}",
|
||||||
|
exc_info=True,
|
||||||
|
)
|
||||||
|
yield None
|
||||||
|
except asyncio.TimeoutError:
|
||||||
|
raise Exception(
|
||||||
|
f"tool {tool.name} execution timeout after {run_context.tool_call_timeout} seconds.",
|
||||||
|
)
|
||||||
|
except StopAsyncIteration:
|
||||||
|
break
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def _execute_mcp(
|
||||||
|
cls,
|
||||||
|
tool: FunctionTool,
|
||||||
|
run_context: ContextWrapper[AstrAgentContext],
|
||||||
|
**tool_args,
|
||||||
|
):
|
||||||
|
res = await tool.call(run_context, **tool_args)
|
||||||
|
if not res:
|
||||||
|
return
|
||||||
|
yield res
|
||||||
|
|
||||||
|
|
||||||
|
async def call_local_llm_tool(
|
||||||
|
context: ContextWrapper[AstrAgentContext],
|
||||||
|
handler: T.Callable[..., T.Awaitable[T.Any]],
|
||||||
|
method_name: str,
|
||||||
|
*args,
|
||||||
|
**kwargs,
|
||||||
|
) -> T.AsyncGenerator[T.Any, None]:
|
||||||
|
"""执行本地 LLM 工具的处理函数并处理其返回结果"""
|
||||||
|
ready_to_call = None # 一个协程或者异步生成器
|
||||||
|
|
||||||
|
trace_ = None
|
||||||
|
|
||||||
|
event = context.context.event
|
||||||
|
|
||||||
|
try:
|
||||||
|
if method_name == "run" or method_name == "decorator_handler":
|
||||||
|
ready_to_call = handler(event, *args, **kwargs)
|
||||||
|
elif method_name == "call":
|
||||||
|
ready_to_call = handler(context, *args, **kwargs)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"未知的方法名: {method_name}")
|
||||||
|
except ValueError as e:
|
||||||
|
logger.error(f"调用本地 LLM 工具时出错: {e}", exc_info=True)
|
||||||
|
except TypeError:
|
||||||
|
logger.error("处理函数参数不匹配,请检查 handler 的定义。", exc_info=True)
|
||||||
|
except Exception as e:
|
||||||
|
trace_ = traceback.format_exc()
|
||||||
|
logger.error(f"调用本地 LLM 工具时出错: {e}\n{trace_}")
|
||||||
|
|
||||||
|
if not ready_to_call:
|
||||||
|
return
|
||||||
|
|
||||||
|
if inspect.isasyncgen(ready_to_call):
|
||||||
|
_has_yielded = False
|
||||||
|
try:
|
||||||
|
async for ret in ready_to_call:
|
||||||
|
# 这里逐步执行异步生成器, 对于每个 yield 返回的 ret, 执行下面的代码
|
||||||
|
# 返回值只能是 MessageEventResult 或者 None(无返回值)
|
||||||
|
_has_yielded = True
|
||||||
|
if isinstance(ret, (MessageEventResult, CommandResult)):
|
||||||
|
# 如果返回值是 MessageEventResult, 设置结果并继续
|
||||||
|
event.set_result(ret)
|
||||||
|
yield
|
||||||
|
else:
|
||||||
|
# 如果返回值是 None, 则不设置结果并继续
|
||||||
|
# 继续执行后续阶段
|
||||||
|
yield ret
|
||||||
|
if not _has_yielded:
|
||||||
|
# 如果这个异步生成器没有执行到 yield 分支
|
||||||
|
yield
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Previous Error: {trace_}")
|
||||||
|
raise e
|
||||||
|
elif inspect.iscoroutine(ready_to_call):
|
||||||
|
# 如果只是一个协程, 直接执行
|
||||||
|
ret = await ready_to_call
|
||||||
|
if isinstance(ret, (MessageEventResult, CommandResult)):
|
||||||
|
event.set_result(ret)
|
||||||
|
yield
|
||||||
|
else:
|
||||||
|
yield ret
|
||||||
@@ -4,7 +4,7 @@ import os
|
|||||||
|
|
||||||
from astrbot.core.utils.astrbot_path import get_astrbot_data_path
|
from astrbot.core.utils.astrbot_path import get_astrbot_data_path
|
||||||
|
|
||||||
VERSION = "4.5.6"
|
VERSION = "4.5.8"
|
||||||
DB_PATH = os.path.join(get_astrbot_data_path(), "data_v4.db")
|
DB_PATH = os.path.join(get_astrbot_data_path(), "data_v4.db")
|
||||||
|
|
||||||
# 默认配置
|
# 默认配置
|
||||||
@@ -68,7 +68,7 @@ DEFAULT_CONFIG = {
|
|||||||
"dequeue_context_length": 1,
|
"dequeue_context_length": 1,
|
||||||
"streaming_response": False,
|
"streaming_response": False,
|
||||||
"show_tool_use_status": False,
|
"show_tool_use_status": False,
|
||||||
"streaming_segmented": False,
|
"unsupported_streaming_strategy": "realtime_segmenting",
|
||||||
"max_agent_step": 30,
|
"max_agent_step": 30,
|
||||||
"tool_call_timeout": 60,
|
"tool_call_timeout": 60,
|
||||||
},
|
},
|
||||||
@@ -880,6 +880,23 @@ CONFIG_METADATA_2 = {
|
|||||||
"custom_extra_body": {},
|
"custom_extra_body": {},
|
||||||
"modalities": ["text", "tool_use"],
|
"modalities": ["text", "tool_use"],
|
||||||
},
|
},
|
||||||
|
"Groq": {
|
||||||
|
"id": "groq_default",
|
||||||
|
"provider": "groq",
|
||||||
|
"type": "groq_chat_completion",
|
||||||
|
"provider_type": "chat_completion",
|
||||||
|
"enable": True,
|
||||||
|
"key": [],
|
||||||
|
"api_base": "https://api.groq.com/openai/v1",
|
||||||
|
"timeout": 120,
|
||||||
|
"model_config": {
|
||||||
|
"model": "openai/gpt-oss-20b",
|
||||||
|
"temperature": 0.4,
|
||||||
|
},
|
||||||
|
"custom_headers": {},
|
||||||
|
"custom_extra_body": {},
|
||||||
|
"modalities": ["text", "tool_use"],
|
||||||
|
},
|
||||||
"302.AI": {
|
"302.AI": {
|
||||||
"id": "302ai",
|
"id": "302ai",
|
||||||
"provider": "302ai",
|
"provider": "302ai",
|
||||||
@@ -1993,8 +2010,8 @@ CONFIG_METADATA_2 = {
|
|||||||
"show_tool_use_status": {
|
"show_tool_use_status": {
|
||||||
"type": "bool",
|
"type": "bool",
|
||||||
},
|
},
|
||||||
"streaming_segmented": {
|
"unsupported_streaming_strategy": {
|
||||||
"type": "bool",
|
"type": "string",
|
||||||
},
|
},
|
||||||
"max_agent_step": {
|
"max_agent_step": {
|
||||||
"description": "工具调用轮数上限",
|
"description": "工具调用轮数上限",
|
||||||
@@ -2299,9 +2316,15 @@ CONFIG_METADATA_3 = {
|
|||||||
"description": "流式回复",
|
"description": "流式回复",
|
||||||
"type": "bool",
|
"type": "bool",
|
||||||
},
|
},
|
||||||
"provider_settings.streaming_segmented": {
|
"provider_settings.unsupported_streaming_strategy": {
|
||||||
"description": "不支持流式回复的平台采取分段输出",
|
"description": "不支持流式回复的平台",
|
||||||
"type": "bool",
|
"type": "string",
|
||||||
|
"options": ["realtime_segmenting", "turn_off"],
|
||||||
|
"hint": "选择在不支持流式回复的平台上的处理方式。实时分段回复会在系统接收流式响应检测到诸如标点符号等分段点时,立即发送当前已接收的内容",
|
||||||
|
"labels": ["实时分段回复", "关闭流式回复"],
|
||||||
|
"condition": {
|
||||||
|
"provider_settings.streaming_response": True,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
"provider_settings.max_context_length": {
|
"provider_settings.max_context_length": {
|
||||||
"description": "最多携带对话轮数",
|
"description": "最多携带对话轮数",
|
||||||
|
|||||||
9
astrbot/core/exceptions.py
Normal file
9
astrbot/core/exceptions.py
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
|
||||||
|
class AstrBotError(Exception):
|
||||||
|
"""Base exception for all AstrBot errors."""
|
||||||
|
|
||||||
|
|
||||||
|
class ProviderNotFoundError(AstrBotError):
|
||||||
|
"""Raised when a specified provider is not found."""
|
||||||
@@ -3,7 +3,7 @@ from dataclasses import dataclass
|
|||||||
from astrbot.core.config import AstrBotConfig
|
from astrbot.core.config import AstrBotConfig
|
||||||
from astrbot.core.star import PluginManager
|
from astrbot.core.star import PluginManager
|
||||||
|
|
||||||
from .context_utils import call_event_hook, call_handler, call_local_llm_tool
|
from .context_utils import call_event_hook, call_handler
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
@@ -15,4 +15,3 @@ class PipelineContext:
|
|||||||
astrbot_config_id: str
|
astrbot_config_id: str
|
||||||
call_handler = call_handler
|
call_handler = call_handler
|
||||||
call_event_hook = call_event_hook
|
call_event_hook = call_event_hook
|
||||||
call_local_llm_tool = call_local_llm_tool
|
|
||||||
|
|||||||
@@ -3,8 +3,6 @@ import traceback
|
|||||||
import typing as T
|
import typing as T
|
||||||
|
|
||||||
from astrbot import logger
|
from astrbot import logger
|
||||||
from astrbot.core.agent.run_context import ContextWrapper
|
|
||||||
from astrbot.core.astr_agent_context import AstrAgentContext
|
|
||||||
from astrbot.core.message.message_event_result import CommandResult, MessageEventResult
|
from astrbot.core.message.message_event_result import CommandResult, MessageEventResult
|
||||||
from astrbot.core.platform.astr_message_event import AstrMessageEvent
|
from astrbot.core.platform.astr_message_event import AstrMessageEvent
|
||||||
from astrbot.core.star.star import star_map
|
from astrbot.core.star.star import star_map
|
||||||
@@ -107,66 +105,3 @@ async def call_event_hook(
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
return event.is_stopped()
|
return event.is_stopped()
|
||||||
|
|
||||||
|
|
||||||
async def call_local_llm_tool(
|
|
||||||
context: ContextWrapper[AstrAgentContext],
|
|
||||||
handler: T.Callable[..., T.Awaitable[T.Any]],
|
|
||||||
method_name: str,
|
|
||||||
*args,
|
|
||||||
**kwargs,
|
|
||||||
) -> T.AsyncGenerator[T.Any, None]:
|
|
||||||
"""执行本地 LLM 工具的处理函数并处理其返回结果"""
|
|
||||||
ready_to_call = None # 一个协程或者异步生成器
|
|
||||||
|
|
||||||
trace_ = None
|
|
||||||
|
|
||||||
event = context.context.event
|
|
||||||
|
|
||||||
try:
|
|
||||||
if method_name == "run" or method_name == "decorator_handler":
|
|
||||||
ready_to_call = handler(event, *args, **kwargs)
|
|
||||||
elif method_name == "call":
|
|
||||||
ready_to_call = handler(context, *args, **kwargs)
|
|
||||||
else:
|
|
||||||
raise ValueError(f"未知的方法名: {method_name}")
|
|
||||||
except ValueError as e:
|
|
||||||
logger.error(f"调用本地 LLM 工具时出错: {e}", exc_info=True)
|
|
||||||
except TypeError:
|
|
||||||
logger.error("处理函数参数不匹配,请检查 handler 的定义。", exc_info=True)
|
|
||||||
except Exception as e:
|
|
||||||
trace_ = traceback.format_exc()
|
|
||||||
logger.error(f"调用本地 LLM 工具时出错: {e}\n{trace_}")
|
|
||||||
|
|
||||||
if not ready_to_call:
|
|
||||||
return
|
|
||||||
|
|
||||||
if inspect.isasyncgen(ready_to_call):
|
|
||||||
_has_yielded = False
|
|
||||||
try:
|
|
||||||
async for ret in ready_to_call:
|
|
||||||
# 这里逐步执行异步生成器, 对于每个 yield 返回的 ret, 执行下面的代码
|
|
||||||
# 返回值只能是 MessageEventResult 或者 None(无返回值)
|
|
||||||
_has_yielded = True
|
|
||||||
if isinstance(ret, (MessageEventResult, CommandResult)):
|
|
||||||
# 如果返回值是 MessageEventResult, 设置结果并继续
|
|
||||||
event.set_result(ret)
|
|
||||||
yield
|
|
||||||
else:
|
|
||||||
# 如果返回值是 None, 则不设置结果并继续
|
|
||||||
# 继续执行后续阶段
|
|
||||||
yield ret
|
|
||||||
if not _has_yielded:
|
|
||||||
# 如果这个异步生成器没有执行到 yield 分支
|
|
||||||
yield
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Previous Error: {trace_}")
|
|
||||||
raise e
|
|
||||||
elif inspect.iscoroutine(ready_to_call):
|
|
||||||
# 如果只是一个协程, 直接执行
|
|
||||||
ret = await ready_to_call
|
|
||||||
if isinstance(ret, (MessageEventResult, CommandResult)):
|
|
||||||
event.set_result(ret)
|
|
||||||
yield
|
|
||||||
else:
|
|
||||||
yield ret
|
|
||||||
|
|||||||
@@ -3,20 +3,10 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
import copy
|
import copy
|
||||||
import json
|
import json
|
||||||
import traceback
|
|
||||||
from collections.abc import AsyncGenerator
|
from collections.abc import AsyncGenerator
|
||||||
from typing import Any
|
|
||||||
|
|
||||||
from mcp.types import CallToolResult
|
|
||||||
|
|
||||||
from astrbot.core import logger
|
from astrbot.core import logger
|
||||||
from astrbot.core.agent.handoff import HandoffTool
|
from astrbot.core.agent.tool import ToolSet
|
||||||
from astrbot.core.agent.hooks import BaseAgentRunHooks
|
|
||||||
from astrbot.core.agent.mcp_client import MCPTool
|
|
||||||
from astrbot.core.agent.run_context import ContextWrapper
|
|
||||||
from astrbot.core.agent.runners.tool_loop_agent_runner import ToolLoopAgentRunner
|
|
||||||
from astrbot.core.agent.tool import FunctionTool, ToolSet
|
|
||||||
from astrbot.core.agent.tool_executor import BaseFunctionToolExecutor
|
|
||||||
from astrbot.core.astr_agent_context import AstrAgentContext
|
from astrbot.core.astr_agent_context import AstrAgentContext
|
||||||
from astrbot.core.conversation_mgr import Conversation
|
from astrbot.core.conversation_mgr import Conversation
|
||||||
from astrbot.core.message.components import Image
|
from astrbot.core.message.components import Image
|
||||||
@@ -31,324 +21,19 @@ from astrbot.core.provider.entities import (
|
|||||||
LLMResponse,
|
LLMResponse,
|
||||||
ProviderRequest,
|
ProviderRequest,
|
||||||
)
|
)
|
||||||
from astrbot.core.provider.register import llm_tools
|
|
||||||
from astrbot.core.star.session_llm_manager import SessionServiceManager
|
from astrbot.core.star.session_llm_manager import SessionServiceManager
|
||||||
from astrbot.core.star.star_handler import EventType, star_map
|
from astrbot.core.star.star_handler import EventType, star_map
|
||||||
from astrbot.core.utils.metrics import Metric
|
from astrbot.core.utils.metrics import Metric
|
||||||
|
from astrbot.core.utils.session_lock import session_lock_manager
|
||||||
|
|
||||||
from ...context import PipelineContext, call_event_hook, call_local_llm_tool
|
from ....astr_agent_context import AgentContextWrapper
|
||||||
|
from ....astr_agent_hooks import MAIN_AGENT_HOOKS
|
||||||
|
from ....astr_agent_run_util import AgentRunner, run_agent
|
||||||
|
from ....astr_agent_tool_exec import FunctionToolExecutor
|
||||||
|
from ...context import PipelineContext, call_event_hook
|
||||||
from ..stage import Stage
|
from ..stage import Stage
|
||||||
from ..utils import inject_kb_context
|
from ..utils import inject_kb_context
|
||||||
|
|
||||||
try:
|
|
||||||
import mcp
|
|
||||||
except (ModuleNotFoundError, ImportError):
|
|
||||||
logger.warning("警告: 缺少依赖库 'mcp',将无法使用 MCP 服务。")
|
|
||||||
|
|
||||||
|
|
||||||
AgentContextWrapper = ContextWrapper[AstrAgentContext]
|
|
||||||
AgentRunner = ToolLoopAgentRunner[AstrAgentContext]
|
|
||||||
|
|
||||||
|
|
||||||
class FunctionToolExecutor(BaseFunctionToolExecutor[AstrAgentContext]):
|
|
||||||
@classmethod
|
|
||||||
async def execute(cls, tool, run_context, **tool_args):
|
|
||||||
"""执行函数调用。
|
|
||||||
|
|
||||||
Args:
|
|
||||||
event (AstrMessageEvent): 事件对象, 当 origin 为 local 时必须提供。
|
|
||||||
**kwargs: 函数调用的参数。
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
AsyncGenerator[None | mcp.types.CallToolResult, None]
|
|
||||||
|
|
||||||
"""
|
|
||||||
if isinstance(tool, HandoffTool):
|
|
||||||
async for r in cls._execute_handoff(tool, run_context, **tool_args):
|
|
||||||
yield r
|
|
||||||
return
|
|
||||||
|
|
||||||
elif isinstance(tool, MCPTool):
|
|
||||||
async for r in cls._execute_mcp(tool, run_context, **tool_args):
|
|
||||||
yield r
|
|
||||||
return
|
|
||||||
|
|
||||||
else:
|
|
||||||
async for r in cls._execute_local(tool, run_context, **tool_args):
|
|
||||||
yield r
|
|
||||||
return
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
async def _execute_handoff(
|
|
||||||
cls,
|
|
||||||
tool: HandoffTool,
|
|
||||||
run_context: ContextWrapper[AstrAgentContext],
|
|
||||||
**tool_args,
|
|
||||||
):
|
|
||||||
input_ = tool_args.get("input", "agent")
|
|
||||||
agent_runner = AgentRunner()
|
|
||||||
|
|
||||||
# make toolset for the agent
|
|
||||||
tools = tool.agent.tools
|
|
||||||
if tools:
|
|
||||||
toolset = ToolSet()
|
|
||||||
for t in tools:
|
|
||||||
if isinstance(t, str):
|
|
||||||
_t = llm_tools.get_func(t)
|
|
||||||
if _t:
|
|
||||||
toolset.add_tool(_t)
|
|
||||||
elif isinstance(t, FunctionTool):
|
|
||||||
toolset.add_tool(t)
|
|
||||||
else:
|
|
||||||
toolset = None
|
|
||||||
|
|
||||||
request = ProviderRequest(
|
|
||||||
prompt=input_,
|
|
||||||
system_prompt=tool.description or "",
|
|
||||||
image_urls=[], # 暂时不传递原始 agent 的上下文
|
|
||||||
contexts=[], # 暂时不传递原始 agent 的上下文
|
|
||||||
func_tool=toolset,
|
|
||||||
)
|
|
||||||
astr_agent_ctx = AstrAgentContext(
|
|
||||||
provider=run_context.context.provider,
|
|
||||||
first_provider_request=run_context.context.first_provider_request,
|
|
||||||
curr_provider_request=request,
|
|
||||||
streaming=run_context.context.streaming,
|
|
||||||
event=run_context.context.event,
|
|
||||||
)
|
|
||||||
|
|
||||||
event = run_context.context.event
|
|
||||||
|
|
||||||
logger.debug(f"正在将任务委托给 Agent: {tool.agent.name}, input: {input_}")
|
|
||||||
await event.send(
|
|
||||||
MessageChain().message("✨ 正在将任务委托给 Agent: " + tool.agent.name),
|
|
||||||
)
|
|
||||||
|
|
||||||
await agent_runner.reset(
|
|
||||||
provider=run_context.context.provider,
|
|
||||||
request=request,
|
|
||||||
run_context=AgentContextWrapper(
|
|
||||||
context=astr_agent_ctx,
|
|
||||||
tool_call_timeout=run_context.tool_call_timeout,
|
|
||||||
),
|
|
||||||
tool_executor=FunctionToolExecutor(),
|
|
||||||
agent_hooks=tool.agent.run_hooks or BaseAgentRunHooks[AstrAgentContext](),
|
|
||||||
streaming=run_context.context.streaming,
|
|
||||||
)
|
|
||||||
|
|
||||||
async for _ in run_agent(agent_runner, 15, True):
|
|
||||||
pass
|
|
||||||
|
|
||||||
if agent_runner.done():
|
|
||||||
llm_response = agent_runner.get_final_llm_resp()
|
|
||||||
|
|
||||||
if not llm_response:
|
|
||||||
text_content = mcp.types.TextContent(
|
|
||||||
type="text",
|
|
||||||
text=f"error when deligate task to {tool.agent.name}",
|
|
||||||
)
|
|
||||||
yield mcp.types.CallToolResult(content=[text_content])
|
|
||||||
return
|
|
||||||
|
|
||||||
logger.debug(
|
|
||||||
f"Agent {tool.agent.name} 任务完成, response: {llm_response.completion_text}",
|
|
||||||
)
|
|
||||||
|
|
||||||
result = (
|
|
||||||
f"Agent {tool.agent.name} respond with: {llm_response.completion_text}\n\n"
|
|
||||||
"Note: If the result is error or need user provide more information, please provide more information to the agent(you can ask user for more information first)."
|
|
||||||
)
|
|
||||||
|
|
||||||
text_content = mcp.types.TextContent(
|
|
||||||
type="text",
|
|
||||||
text=result,
|
|
||||||
)
|
|
||||||
yield mcp.types.CallToolResult(content=[text_content])
|
|
||||||
else:
|
|
||||||
text_content = mcp.types.TextContent(
|
|
||||||
type="text",
|
|
||||||
text=f"error when deligate task to {tool.agent.name}",
|
|
||||||
)
|
|
||||||
yield mcp.types.CallToolResult(content=[text_content])
|
|
||||||
return
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
async def _execute_local(
|
|
||||||
cls,
|
|
||||||
tool: FunctionTool,
|
|
||||||
run_context: ContextWrapper[AstrAgentContext],
|
|
||||||
**tool_args,
|
|
||||||
):
|
|
||||||
event = run_context.context.event
|
|
||||||
if not event:
|
|
||||||
raise ValueError("Event must be provided for local function tools.")
|
|
||||||
|
|
||||||
is_override_call = False
|
|
||||||
for ty in type(tool).mro():
|
|
||||||
if "call" in ty.__dict__ and ty.__dict__["call"] is not FunctionTool.call:
|
|
||||||
logger.debug(f"Found call in: {ty}")
|
|
||||||
is_override_call = True
|
|
||||||
break
|
|
||||||
|
|
||||||
# 检查 tool 下有没有 run 方法
|
|
||||||
if not tool.handler and not hasattr(tool, "run") and not is_override_call:
|
|
||||||
raise ValueError("Tool must have a valid handler or override 'run' method.")
|
|
||||||
|
|
||||||
awaitable = None
|
|
||||||
method_name = ""
|
|
||||||
if tool.handler:
|
|
||||||
awaitable = tool.handler
|
|
||||||
method_name = "decorator_handler"
|
|
||||||
elif is_override_call:
|
|
||||||
awaitable = tool.call
|
|
||||||
method_name = "call"
|
|
||||||
elif hasattr(tool, "run"):
|
|
||||||
awaitable = getattr(tool, "run")
|
|
||||||
method_name = "run"
|
|
||||||
if awaitable is None:
|
|
||||||
raise ValueError("Tool must have a valid handler or override 'run' method.")
|
|
||||||
|
|
||||||
wrapper = call_local_llm_tool(
|
|
||||||
context=run_context,
|
|
||||||
handler=awaitable,
|
|
||||||
method_name=method_name,
|
|
||||||
**tool_args,
|
|
||||||
)
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
resp = await asyncio.wait_for(
|
|
||||||
anext(wrapper),
|
|
||||||
timeout=run_context.tool_call_timeout,
|
|
||||||
)
|
|
||||||
if resp is not None:
|
|
||||||
if isinstance(resp, mcp.types.CallToolResult):
|
|
||||||
yield resp
|
|
||||||
else:
|
|
||||||
text_content = mcp.types.TextContent(
|
|
||||||
type="text",
|
|
||||||
text=str(resp),
|
|
||||||
)
|
|
||||||
yield mcp.types.CallToolResult(content=[text_content])
|
|
||||||
else:
|
|
||||||
# NOTE: Tool 在这里直接请求发送消息给用户
|
|
||||||
# TODO: 是否需要判断 event.get_result() 是否为空?
|
|
||||||
# 如果为空,则说明没有发送消息给用户,并且返回值为空,将返回一个特殊的 TextContent,其内容如"工具没有返回内容"
|
|
||||||
if res := run_context.context.event.get_result():
|
|
||||||
if res.chain:
|
|
||||||
try:
|
|
||||||
await event.send(
|
|
||||||
MessageChain(
|
|
||||||
chain=res.chain,
|
|
||||||
type="tool_direct_result",
|
|
||||||
)
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(
|
|
||||||
f"Tool 直接发送消息失败: {e}",
|
|
||||||
exc_info=True,
|
|
||||||
)
|
|
||||||
yield None
|
|
||||||
except asyncio.TimeoutError:
|
|
||||||
raise Exception(
|
|
||||||
f"tool {tool.name} execution timeout after {run_context.tool_call_timeout} seconds.",
|
|
||||||
)
|
|
||||||
except StopAsyncIteration:
|
|
||||||
break
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
async def _execute_mcp(
|
|
||||||
cls,
|
|
||||||
tool: FunctionTool,
|
|
||||||
run_context: ContextWrapper[AstrAgentContext],
|
|
||||||
**tool_args,
|
|
||||||
):
|
|
||||||
res = await tool.call(run_context, **tool_args)
|
|
||||||
if not res:
|
|
||||||
return
|
|
||||||
yield res
|
|
||||||
|
|
||||||
|
|
||||||
class MainAgentHooks(BaseAgentRunHooks[AstrAgentContext]):
|
|
||||||
async def on_agent_done(self, run_context, llm_response):
|
|
||||||
# 执行事件钩子
|
|
||||||
await call_event_hook(
|
|
||||||
run_context.context.event,
|
|
||||||
EventType.OnLLMResponseEvent,
|
|
||||||
llm_response,
|
|
||||||
)
|
|
||||||
|
|
||||||
async def on_tool_end(
|
|
||||||
self,
|
|
||||||
run_context: ContextWrapper[AstrAgentContext],
|
|
||||||
tool: FunctionTool[Any],
|
|
||||||
tool_args: dict | None,
|
|
||||||
tool_result: CallToolResult | None,
|
|
||||||
):
|
|
||||||
run_context.context.event.clear_result()
|
|
||||||
|
|
||||||
|
|
||||||
MAIN_AGENT_HOOKS = MainAgentHooks()
|
|
||||||
|
|
||||||
|
|
||||||
async def run_agent(
|
|
||||||
agent_runner: AgentRunner,
|
|
||||||
max_step: int = 30,
|
|
||||||
show_tool_use: bool = True,
|
|
||||||
) -> AsyncGenerator[MessageChain, None]:
|
|
||||||
step_idx = 0
|
|
||||||
astr_event = agent_runner.run_context.context.event
|
|
||||||
while step_idx < max_step:
|
|
||||||
step_idx += 1
|
|
||||||
try:
|
|
||||||
async for resp in agent_runner.step():
|
|
||||||
if astr_event.is_stopped():
|
|
||||||
return
|
|
||||||
if resp.type == "tool_call_result":
|
|
||||||
msg_chain = resp.data["chain"]
|
|
||||||
if msg_chain.type == "tool_direct_result":
|
|
||||||
# tool_direct_result 用于标记 llm tool 需要直接发送给用户的内容
|
|
||||||
resp.data["chain"].type = "tool_call_result"
|
|
||||||
await astr_event.send(resp.data["chain"])
|
|
||||||
continue
|
|
||||||
# 对于其他情况,暂时先不处理
|
|
||||||
continue
|
|
||||||
elif resp.type == "tool_call":
|
|
||||||
if agent_runner.streaming:
|
|
||||||
# 用来标记流式响应需要分节
|
|
||||||
yield MessageChain(chain=[], type="break")
|
|
||||||
if show_tool_use or astr_event.get_platform_name() == "webchat":
|
|
||||||
resp.data["chain"].type = "tool_call"
|
|
||||||
await astr_event.send(resp.data["chain"])
|
|
||||||
continue
|
|
||||||
|
|
||||||
if not agent_runner.streaming:
|
|
||||||
content_typ = (
|
|
||||||
ResultContentType.LLM_RESULT
|
|
||||||
if resp.type == "llm_result"
|
|
||||||
else ResultContentType.GENERAL_RESULT
|
|
||||||
)
|
|
||||||
astr_event.set_result(
|
|
||||||
MessageEventResult(
|
|
||||||
chain=resp.data["chain"].chain,
|
|
||||||
result_content_type=content_typ,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
yield
|
|
||||||
astr_event.clear_result()
|
|
||||||
elif resp.type == "streaming_delta":
|
|
||||||
yield resp.data["chain"] # MessageChain
|
|
||||||
if agent_runner.done():
|
|
||||||
break
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(traceback.format_exc())
|
|
||||||
err_msg = f"\n\nAstrBot 请求失败。\n错误类型: {type(e).__name__}\n错误信息: {e!s}\n\n请在控制台查看和分享错误详情。\n"
|
|
||||||
if agent_runner.streaming:
|
|
||||||
yield MessageChain().message(err_msg)
|
|
||||||
else:
|
|
||||||
astr_event.set_result(MessageEventResult().message(err_msg))
|
|
||||||
return
|
|
||||||
|
|
||||||
|
|
||||||
class LLMRequestSubStage(Stage):
|
class LLMRequestSubStage(Stage):
|
||||||
async def initialize(self, ctx: PipelineContext) -> None:
|
async def initialize(self, ctx: PipelineContext) -> None:
|
||||||
@@ -363,11 +48,15 @@ class LLMRequestSubStage(Stage):
|
|||||||
self.max_context_length - 1,
|
self.max_context_length - 1,
|
||||||
)
|
)
|
||||||
self.streaming_response: bool = settings["streaming_response"]
|
self.streaming_response: bool = settings["streaming_response"]
|
||||||
|
self.unsupported_streaming_strategy: str = settings[
|
||||||
|
"unsupported_streaming_strategy"
|
||||||
|
]
|
||||||
self.max_step: int = settings.get("max_agent_step", 30)
|
self.max_step: int = settings.get("max_agent_step", 30)
|
||||||
self.tool_call_timeout: int = settings.get("tool_call_timeout", 60)
|
self.tool_call_timeout: int = settings.get("tool_call_timeout", 60)
|
||||||
if isinstance(self.max_step, bool): # workaround: #2622
|
if isinstance(self.max_step, bool): # workaround: #2622
|
||||||
self.max_step = 30
|
self.max_step = 30
|
||||||
self.show_tool_use: bool = settings.get("show_tool_use_status", True)
|
self.show_tool_use: bool = settings.get("show_tool_use_status", True)
|
||||||
|
self.show_reasoning = settings.get("display_reasoning_text", False)
|
||||||
|
|
||||||
for bwp in self.bot_wake_prefixs:
|
for bwp in self.bot_wake_prefixs:
|
||||||
if self.provider_wake_prefix.startswith(bwp):
|
if self.provider_wake_prefix.startswith(bwp):
|
||||||
@@ -406,67 +95,12 @@ class LLMRequestSubStage(Stage):
|
|||||||
raise RuntimeError("无法创建新的对话。")
|
raise RuntimeError("无法创建新的对话。")
|
||||||
return conversation
|
return conversation
|
||||||
|
|
||||||
async def process(
|
async def _apply_kb_context(
|
||||||
self,
|
self,
|
||||||
event: AstrMessageEvent,
|
event: AstrMessageEvent,
|
||||||
_nested: bool = False,
|
req: ProviderRequest,
|
||||||
) -> None | AsyncGenerator[None, None]:
|
):
|
||||||
req: ProviderRequest | None = None
|
"""应用知识库上下文到请求中"""
|
||||||
|
|
||||||
if not self.ctx.astrbot_config["provider_settings"]["enable"]:
|
|
||||||
logger.debug("未启用 LLM 能力,跳过处理。")
|
|
||||||
return
|
|
||||||
|
|
||||||
# 检查会话级别的LLM启停状态
|
|
||||||
if not SessionServiceManager.should_process_llm_request(event):
|
|
||||||
logger.debug(f"会话 {event.unified_msg_origin} 禁用了 LLM,跳过处理。")
|
|
||||||
return
|
|
||||||
|
|
||||||
provider = self._select_provider(event)
|
|
||||||
if provider is None:
|
|
||||||
return
|
|
||||||
if not isinstance(provider, Provider):
|
|
||||||
logger.error(f"选择的提供商类型无效({type(provider)}),跳过 LLM 请求处理。")
|
|
||||||
return
|
|
||||||
|
|
||||||
streaming_response = self.streaming_response
|
|
||||||
if (enable_streaming := event.get_extra("enable_streaming")) is not None:
|
|
||||||
streaming_response = bool(enable_streaming)
|
|
||||||
|
|
||||||
if event.get_extra("provider_request"):
|
|
||||||
req = event.get_extra("provider_request")
|
|
||||||
assert isinstance(req, ProviderRequest), (
|
|
||||||
"provider_request 必须是 ProviderRequest 类型。"
|
|
||||||
)
|
|
||||||
|
|
||||||
if req.conversation:
|
|
||||||
req.contexts = json.loads(req.conversation.history)
|
|
||||||
|
|
||||||
else:
|
|
||||||
req = ProviderRequest(prompt="", image_urls=[])
|
|
||||||
if sel_model := event.get_extra("selected_model"):
|
|
||||||
req.model = sel_model
|
|
||||||
if self.provider_wake_prefix:
|
|
||||||
if not event.message_str.startswith(self.provider_wake_prefix):
|
|
||||||
return
|
|
||||||
req.prompt = event.message_str[len(self.provider_wake_prefix) :]
|
|
||||||
# func_tool selection 现在已经转移到 packages/astrbot 插件中进行选择。
|
|
||||||
# req.func_tool = self.ctx.plugin_manager.context.get_llm_tool_manager()
|
|
||||||
for comp in event.message_obj.message:
|
|
||||||
if isinstance(comp, Image):
|
|
||||||
image_path = await comp.convert_to_file_path()
|
|
||||||
req.image_urls.append(image_path)
|
|
||||||
|
|
||||||
conversation = await self._get_session_conv(event)
|
|
||||||
req.conversation = conversation
|
|
||||||
req.contexts = json.loads(conversation.history)
|
|
||||||
|
|
||||||
event.set_extra("provider_request", req)
|
|
||||||
|
|
||||||
if not req.prompt and not req.image_urls:
|
|
||||||
return
|
|
||||||
|
|
||||||
# 应用知识库
|
|
||||||
try:
|
try:
|
||||||
await inject_kb_context(
|
await inject_kb_context(
|
||||||
umo=event.unified_msg_origin,
|
umo=event.unified_msg_origin,
|
||||||
@@ -476,43 +110,40 @@ class LLMRequestSubStage(Stage):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"调用知识库时遇到问题: {e}")
|
logger.error(f"调用知识库时遇到问题: {e}")
|
||||||
|
|
||||||
# 执行请求 LLM 前事件钩子。
|
def _truncate_contexts(
|
||||||
if await call_event_hook(event, EventType.OnLLMRequestEvent, req):
|
self,
|
||||||
return
|
contexts: list[dict],
|
||||||
|
) -> list[dict]:
|
||||||
|
"""截断上下文列表,确保不超过最大长度"""
|
||||||
|
if self.max_context_length == -1:
|
||||||
|
return contexts
|
||||||
|
|
||||||
if isinstance(req.contexts, str):
|
if len(contexts) // 2 <= self.max_context_length:
|
||||||
req.contexts = json.loads(req.contexts)
|
return contexts
|
||||||
|
|
||||||
# max context length
|
truncated_contexts = contexts[
|
||||||
if (
|
-(self.max_context_length - self.dequeue_context_length + 1) * 2 :
|
||||||
self.max_context_length != -1 # -1 为不限制
|
]
|
||||||
and len(req.contexts) // 2 > self.max_context_length
|
# 找到第一个role 为 user 的索引,确保上下文格式正确
|
||||||
):
|
index = next(
|
||||||
logger.debug("上下文长度超过限制,将截断。")
|
(
|
||||||
req.contexts = req.contexts[
|
i
|
||||||
-(self.max_context_length - self.dequeue_context_length + 1) * 2 :
|
for i, item in enumerate(truncated_contexts)
|
||||||
]
|
if item.get("role") == "user"
|
||||||
# 找到第一个role 为 user 的索引,确保上下文格式正确
|
),
|
||||||
index = next(
|
None,
|
||||||
(
|
)
|
||||||
i
|
if index is not None and index > 0:
|
||||||
for i, item in enumerate(req.contexts)
|
truncated_contexts = truncated_contexts[index:]
|
||||||
if item.get("role") == "user"
|
|
||||||
),
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
if index is not None and index > 0:
|
|
||||||
req.contexts = req.contexts[index:]
|
|
||||||
|
|
||||||
# session_id
|
return truncated_contexts
|
||||||
if not req.session_id:
|
|
||||||
req.session_id = event.unified_msg_origin
|
|
||||||
|
|
||||||
# fix messages
|
def _modalities_fix(
|
||||||
req.contexts = self.fix_messages(req.contexts)
|
self,
|
||||||
|
provider: Provider,
|
||||||
# check provider modalities
|
req: ProviderRequest,
|
||||||
# 如果提供商不支持图像/工具使用,但请求中包含图像/工具列表,则清空。图片转述等的检测和调用发生在这之前,因此这里可以这样处理。
|
):
|
||||||
|
"""检查提供商的模态能力,清理请求中的不支持内容"""
|
||||||
if req.image_urls:
|
if req.image_urls:
|
||||||
provider_cfg = provider.provider_config.get("modalities", ["image"])
|
provider_cfg = provider.provider_config.get("modalities", ["image"])
|
||||||
if "image" not in provider_cfg:
|
if "image" not in provider_cfg:
|
||||||
@@ -526,7 +157,13 @@ class LLMRequestSubStage(Stage):
|
|||||||
f"用户设置提供商 {provider} 不支持工具使用,清空工具列表。",
|
f"用户设置提供商 {provider} 不支持工具使用,清空工具列表。",
|
||||||
)
|
)
|
||||||
req.func_tool = None
|
req.func_tool = None
|
||||||
# 插件可用性设置
|
|
||||||
|
def _plugin_tool_fix(
|
||||||
|
self,
|
||||||
|
event: AstrMessageEvent,
|
||||||
|
req: ProviderRequest,
|
||||||
|
):
|
||||||
|
"""根据事件中的插件设置,过滤请求中的工具列表"""
|
||||||
if event.plugins_name is not None and req.func_tool:
|
if event.plugins_name is not None and req.func_tool:
|
||||||
new_tool_set = ToolSet()
|
new_tool_set = ToolSet()
|
||||||
for tool in req.func_tool.tools:
|
for tool in req.func_tool.tools:
|
||||||
@@ -540,80 +177,6 @@ class LLMRequestSubStage(Stage):
|
|||||||
new_tool_set.add_tool(tool)
|
new_tool_set.add_tool(tool)
|
||||||
req.func_tool = new_tool_set
|
req.func_tool = new_tool_set
|
||||||
|
|
||||||
# 备份 req.contexts
|
|
||||||
backup_contexts = copy.deepcopy(req.contexts)
|
|
||||||
|
|
||||||
# run agent
|
|
||||||
agent_runner = AgentRunner()
|
|
||||||
logger.debug(
|
|
||||||
f"handle provider[id: {provider.provider_config['id']}] request: {req}",
|
|
||||||
)
|
|
||||||
astr_agent_ctx = AstrAgentContext(
|
|
||||||
provider=provider,
|
|
||||||
first_provider_request=req,
|
|
||||||
curr_provider_request=req,
|
|
||||||
streaming=streaming_response,
|
|
||||||
event=event,
|
|
||||||
)
|
|
||||||
await agent_runner.reset(
|
|
||||||
provider=provider,
|
|
||||||
request=req,
|
|
||||||
run_context=AgentContextWrapper(
|
|
||||||
context=astr_agent_ctx,
|
|
||||||
tool_call_timeout=self.tool_call_timeout,
|
|
||||||
),
|
|
||||||
tool_executor=FunctionToolExecutor(),
|
|
||||||
agent_hooks=MAIN_AGENT_HOOKS,
|
|
||||||
streaming=streaming_response,
|
|
||||||
)
|
|
||||||
|
|
||||||
if streaming_response:
|
|
||||||
# 流式响应
|
|
||||||
event.set_result(
|
|
||||||
MessageEventResult()
|
|
||||||
.set_result_content_type(ResultContentType.STREAMING_RESULT)
|
|
||||||
.set_async_stream(
|
|
||||||
run_agent(agent_runner, self.max_step, self.show_tool_use),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
yield
|
|
||||||
if agent_runner.done():
|
|
||||||
if final_llm_resp := agent_runner.get_final_llm_resp():
|
|
||||||
if final_llm_resp.completion_text:
|
|
||||||
chain = (
|
|
||||||
MessageChain().message(final_llm_resp.completion_text).chain
|
|
||||||
)
|
|
||||||
elif final_llm_resp.result_chain:
|
|
||||||
chain = final_llm_resp.result_chain.chain
|
|
||||||
else:
|
|
||||||
chain = MessageChain().chain
|
|
||||||
event.set_result(
|
|
||||||
MessageEventResult(
|
|
||||||
chain=chain,
|
|
||||||
result_content_type=ResultContentType.STREAMING_FINISH,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
async for _ in run_agent(agent_runner, self.max_step, self.show_tool_use):
|
|
||||||
yield
|
|
||||||
|
|
||||||
# 恢复备份的 contexts
|
|
||||||
req.contexts = backup_contexts
|
|
||||||
|
|
||||||
await self._save_to_history(event, req, agent_runner.get_final_llm_resp())
|
|
||||||
|
|
||||||
# 异步处理 WebChat 特殊情况
|
|
||||||
if event.get_platform_name() == "webchat":
|
|
||||||
asyncio.create_task(self._handle_webchat(event, req, provider))
|
|
||||||
|
|
||||||
asyncio.create_task(
|
|
||||||
Metric.upload(
|
|
||||||
llm_tick=1,
|
|
||||||
model_name=agent_runner.provider.get_model(),
|
|
||||||
provider_type=agent_runner.provider.meta().type,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
async def _handle_webchat(
|
async def _handle_webchat(
|
||||||
self,
|
self,
|
||||||
event: AstrMessageEvent,
|
event: AstrMessageEvent,
|
||||||
@@ -661,9 +224,6 @@ class LLMRequestSubStage(Stage):
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
if llm_resp and llm_resp.completion_text:
|
if llm_resp and llm_resp.completion_text:
|
||||||
logger.debug(
|
|
||||||
f"WebChat 对话标题生成响应: {llm_resp.completion_text.strip()}",
|
|
||||||
)
|
|
||||||
title = llm_resp.completion_text.strip()
|
title = llm_resp.completion_text.strip()
|
||||||
if not title or "<None>" in title:
|
if not title or "<None>" in title:
|
||||||
return
|
return
|
||||||
@@ -691,6 +251,9 @@ class LLMRequestSubStage(Stage):
|
|||||||
logger.debug("LLM 响应为空,不保存记录。")
|
logger.debug("LLM 响应为空,不保存记录。")
|
||||||
return
|
return
|
||||||
|
|
||||||
|
if req.contexts is None:
|
||||||
|
req.contexts = []
|
||||||
|
|
||||||
# 历史上下文
|
# 历史上下文
|
||||||
messages = copy.deepcopy(req.contexts)
|
messages = copy.deepcopy(req.contexts)
|
||||||
# 这一轮对话请求的用户输入
|
# 这一轮对话请求的用户输入
|
||||||
@@ -710,7 +273,7 @@ class LLMRequestSubStage(Stage):
|
|||||||
history=messages,
|
history=messages,
|
||||||
)
|
)
|
||||||
|
|
||||||
def fix_messages(self, messages: list[dict]) -> list[dict]:
|
def _fix_messages(self, messages: list[dict]) -> list[dict]:
|
||||||
"""验证并且修复上下文"""
|
"""验证并且修复上下文"""
|
||||||
fixed_messages = []
|
fixed_messages = []
|
||||||
for message in messages:
|
for message in messages:
|
||||||
@@ -725,3 +288,184 @@ class LLMRequestSubStage(Stage):
|
|||||||
else:
|
else:
|
||||||
fixed_messages.append(message)
|
fixed_messages.append(message)
|
||||||
return fixed_messages
|
return fixed_messages
|
||||||
|
|
||||||
|
async def process(
|
||||||
|
self,
|
||||||
|
event: AstrMessageEvent,
|
||||||
|
_nested: bool = False,
|
||||||
|
) -> None | AsyncGenerator[None, None]:
|
||||||
|
req: ProviderRequest | None = None
|
||||||
|
|
||||||
|
if not self.ctx.astrbot_config["provider_settings"]["enable"]:
|
||||||
|
logger.debug("未启用 LLM 能力,跳过处理。")
|
||||||
|
return
|
||||||
|
|
||||||
|
# 检查会话级别的LLM启停状态
|
||||||
|
if not SessionServiceManager.should_process_llm_request(event):
|
||||||
|
logger.debug(f"会话 {event.unified_msg_origin} 禁用了 LLM,跳过处理。")
|
||||||
|
return
|
||||||
|
|
||||||
|
provider = self._select_provider(event)
|
||||||
|
if provider is None:
|
||||||
|
return
|
||||||
|
if not isinstance(provider, Provider):
|
||||||
|
logger.error(f"选择的提供商类型无效({type(provider)}),跳过 LLM 请求处理。")
|
||||||
|
return
|
||||||
|
|
||||||
|
streaming_response = self.streaming_response
|
||||||
|
if (enable_streaming := event.get_extra("enable_streaming")) is not None:
|
||||||
|
streaming_response = bool(enable_streaming)
|
||||||
|
|
||||||
|
logger.debug("ready to request llm provider")
|
||||||
|
async with session_lock_manager.acquire_lock(event.unified_msg_origin):
|
||||||
|
logger.debug("acquired session lock for llm request")
|
||||||
|
if event.get_extra("provider_request"):
|
||||||
|
req = event.get_extra("provider_request")
|
||||||
|
assert isinstance(req, ProviderRequest), (
|
||||||
|
"provider_request 必须是 ProviderRequest 类型。"
|
||||||
|
)
|
||||||
|
|
||||||
|
if req.conversation:
|
||||||
|
req.contexts = json.loads(req.conversation.history)
|
||||||
|
|
||||||
|
else:
|
||||||
|
req = ProviderRequest()
|
||||||
|
req.prompt = ""
|
||||||
|
req.image_urls = []
|
||||||
|
if sel_model := event.get_extra("selected_model"):
|
||||||
|
req.model = sel_model
|
||||||
|
if self.provider_wake_prefix and not event.message_str.startswith(
|
||||||
|
self.provider_wake_prefix
|
||||||
|
):
|
||||||
|
return
|
||||||
|
|
||||||
|
req.prompt = event.message_str[len(self.provider_wake_prefix) :]
|
||||||
|
# func_tool selection 现在已经转移到 packages/astrbot 插件中进行选择。
|
||||||
|
# req.func_tool = self.ctx.plugin_manager.context.get_llm_tool_manager()
|
||||||
|
for comp in event.message_obj.message:
|
||||||
|
if isinstance(comp, Image):
|
||||||
|
image_path = await comp.convert_to_file_path()
|
||||||
|
req.image_urls.append(image_path)
|
||||||
|
|
||||||
|
conversation = await self._get_session_conv(event)
|
||||||
|
req.conversation = conversation
|
||||||
|
req.contexts = json.loads(conversation.history)
|
||||||
|
|
||||||
|
event.set_extra("provider_request", req)
|
||||||
|
|
||||||
|
if not req.prompt and not req.image_urls:
|
||||||
|
return
|
||||||
|
|
||||||
|
# apply knowledge base context
|
||||||
|
await self._apply_kb_context(event, req)
|
||||||
|
|
||||||
|
# call event hook
|
||||||
|
if await call_event_hook(event, EventType.OnLLMRequestEvent, req):
|
||||||
|
return
|
||||||
|
|
||||||
|
# fix contexts json str
|
||||||
|
if isinstance(req.contexts, str):
|
||||||
|
req.contexts = json.loads(req.contexts)
|
||||||
|
|
||||||
|
# truncate contexts to fit max length
|
||||||
|
if req.contexts:
|
||||||
|
req.contexts = self._truncate_contexts(req.contexts)
|
||||||
|
self._fix_messages(req.contexts)
|
||||||
|
|
||||||
|
# session_id
|
||||||
|
if not req.session_id:
|
||||||
|
req.session_id = event.unified_msg_origin
|
||||||
|
|
||||||
|
# check provider modalities, if provider does not support image/tool_use, clear them in request.
|
||||||
|
self._modalities_fix(provider, req)
|
||||||
|
|
||||||
|
# filter tools, only keep tools from this pipeline's selected plugins
|
||||||
|
self._plugin_tool_fix(event, req)
|
||||||
|
|
||||||
|
stream_to_general = (
|
||||||
|
self.unsupported_streaming_strategy == "turn_off"
|
||||||
|
and not event.platform_meta.support_streaming_message
|
||||||
|
)
|
||||||
|
# 备份 req.contexts
|
||||||
|
backup_contexts = copy.deepcopy(req.contexts)
|
||||||
|
|
||||||
|
# run agent
|
||||||
|
agent_runner = AgentRunner()
|
||||||
|
logger.debug(
|
||||||
|
f"handle provider[id: {provider.provider_config['id']}] request: {req}",
|
||||||
|
)
|
||||||
|
astr_agent_ctx = AstrAgentContext(
|
||||||
|
context=self.ctx.plugin_manager.context,
|
||||||
|
event=event,
|
||||||
|
)
|
||||||
|
await agent_runner.reset(
|
||||||
|
provider=provider,
|
||||||
|
request=req,
|
||||||
|
run_context=AgentContextWrapper(
|
||||||
|
context=astr_agent_ctx,
|
||||||
|
tool_call_timeout=self.tool_call_timeout,
|
||||||
|
),
|
||||||
|
tool_executor=FunctionToolExecutor(),
|
||||||
|
agent_hooks=MAIN_AGENT_HOOKS,
|
||||||
|
streaming=streaming_response,
|
||||||
|
)
|
||||||
|
|
||||||
|
if streaming_response and not stream_to_general:
|
||||||
|
# 流式响应
|
||||||
|
event.set_result(
|
||||||
|
MessageEventResult()
|
||||||
|
.set_result_content_type(ResultContentType.STREAMING_RESULT)
|
||||||
|
.set_async_stream(
|
||||||
|
run_agent(
|
||||||
|
agent_runner,
|
||||||
|
self.max_step,
|
||||||
|
self.show_tool_use,
|
||||||
|
show_reasoning=self.show_reasoning,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
yield
|
||||||
|
if agent_runner.done():
|
||||||
|
if final_llm_resp := agent_runner.get_final_llm_resp():
|
||||||
|
if final_llm_resp.completion_text:
|
||||||
|
chain = (
|
||||||
|
MessageChain()
|
||||||
|
.message(final_llm_resp.completion_text)
|
||||||
|
.chain
|
||||||
|
)
|
||||||
|
elif final_llm_resp.result_chain:
|
||||||
|
chain = final_llm_resp.result_chain.chain
|
||||||
|
else:
|
||||||
|
chain = MessageChain().chain
|
||||||
|
event.set_result(
|
||||||
|
MessageEventResult(
|
||||||
|
chain=chain,
|
||||||
|
result_content_type=ResultContentType.STREAMING_FINISH,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
async for _ in run_agent(
|
||||||
|
agent_runner,
|
||||||
|
self.max_step,
|
||||||
|
self.show_tool_use,
|
||||||
|
stream_to_general,
|
||||||
|
show_reasoning=self.show_reasoning,
|
||||||
|
):
|
||||||
|
yield
|
||||||
|
|
||||||
|
# 恢复备份的 contexts
|
||||||
|
req.contexts = backup_contexts
|
||||||
|
|
||||||
|
await self._save_to_history(event, req, agent_runner.get_final_llm_resp())
|
||||||
|
|
||||||
|
# 异步处理 WebChat 特殊情况
|
||||||
|
if event.get_platform_name() == "webchat":
|
||||||
|
asyncio.create_task(self._handle_webchat(event, req, provider))
|
||||||
|
|
||||||
|
asyncio.create_task(
|
||||||
|
Metric.upload(
|
||||||
|
llm_tick=1,
|
||||||
|
model_name=agent_runner.provider.get_model(),
|
||||||
|
provider_type=agent_runner.provider.meta().type,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|||||||
@@ -10,7 +10,6 @@ from astrbot.core.message.message_event_result import MessageChain, ResultConten
|
|||||||
from astrbot.core.platform.astr_message_event import AstrMessageEvent
|
from astrbot.core.platform.astr_message_event import AstrMessageEvent
|
||||||
from astrbot.core.star.star_handler import EventType
|
from astrbot.core.star.star_handler import EventType
|
||||||
from astrbot.core.utils.path_util import path_Mapping
|
from astrbot.core.utils.path_util import path_Mapping
|
||||||
from astrbot.core.utils.session_lock import session_lock_manager
|
|
||||||
|
|
||||||
from ..context import PipelineContext, call_event_hook
|
from ..context import PipelineContext, call_event_hook
|
||||||
from ..stage import Stage, register_stage
|
from ..stage import Stage, register_stage
|
||||||
@@ -169,12 +168,15 @@ class RespondStage(Stage):
|
|||||||
logger.warning("async_stream 为空,跳过发送。")
|
logger.warning("async_stream 为空,跳过发送。")
|
||||||
return
|
return
|
||||||
# 流式结果直接交付平台适配器处理
|
# 流式结果直接交付平台适配器处理
|
||||||
use_fallback = self.config.get("provider_settings", {}).get(
|
realtime_segmenting = (
|
||||||
"streaming_segmented",
|
self.config.get("provider_settings", {}).get(
|
||||||
False,
|
"unsupported_streaming_strategy",
|
||||||
|
"realtime_segmenting",
|
||||||
|
)
|
||||||
|
== "realtime_segmenting"
|
||||||
)
|
)
|
||||||
logger.info(f"应用流式输出({event.get_platform_id()})")
|
logger.info(f"应用流式输出({event.get_platform_id()})")
|
||||||
await event.send_streaming(result.async_stream, use_fallback)
|
await event.send_streaming(result.async_stream, realtime_segmenting)
|
||||||
return
|
return
|
||||||
if len(result.chain) > 0:
|
if len(result.chain) > 0:
|
||||||
# 检查路径映射
|
# 检查路径映射
|
||||||
@@ -218,21 +220,20 @@ class RespondStage(Stage):
|
|||||||
f"实际消息链为空, 跳过发送阶段。header_chain: {header_comps}, actual_chain: {result.chain}",
|
f"实际消息链为空, 跳过发送阶段。header_chain: {header_comps}, actual_chain: {result.chain}",
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
async with session_lock_manager.acquire_lock(event.unified_msg_origin):
|
for comp in result.chain:
|
||||||
for comp in result.chain:
|
i = await self._calc_comp_interval(comp)
|
||||||
i = await self._calc_comp_interval(comp)
|
await asyncio.sleep(i)
|
||||||
await asyncio.sleep(i)
|
try:
|
||||||
try:
|
if comp.type in need_separately:
|
||||||
if comp.type in need_separately:
|
await event.send(MessageChain([comp]))
|
||||||
await event.send(MessageChain([comp]))
|
else:
|
||||||
else:
|
await event.send(MessageChain([*header_comps, comp]))
|
||||||
await event.send(MessageChain([*header_comps, comp]))
|
header_comps.clear()
|
||||||
header_comps.clear()
|
except Exception as e:
|
||||||
except Exception as e:
|
logger.error(
|
||||||
logger.error(
|
f"发送消息链失败: chain = {MessageChain([comp])}, error = {e}",
|
||||||
f"发送消息链失败: chain = {MessageChain([comp])}, error = {e}",
|
exc_info=True,
|
||||||
exc_info=True,
|
)
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
if all(
|
if all(
|
||||||
comp.type in {ComponentType.Reply, ComponentType.At}
|
comp.type in {ComponentType.Reply, ComponentType.At}
|
||||||
|
|||||||
@@ -16,3 +16,6 @@ class PlatformMetadata:
|
|||||||
"""显示在 WebUI 配置页中的平台名称,如空则是 name"""
|
"""显示在 WebUI 配置页中的平台名称,如空则是 name"""
|
||||||
logo_path: str | None = None
|
logo_path: str | None = None
|
||||||
"""平台适配器的 logo 文件路径(相对于插件目录)"""
|
"""平台适配器的 logo 文件路径(相对于插件目录)"""
|
||||||
|
|
||||||
|
support_streaming_message: bool = True
|
||||||
|
"""平台是否支持真实流式传输"""
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ def register_platform_adapter(
|
|||||||
default_config_tmpl: dict | None = None,
|
default_config_tmpl: dict | None = None,
|
||||||
adapter_display_name: str | None = None,
|
adapter_display_name: str | None = None,
|
||||||
logo_path: str | None = None,
|
logo_path: str | None = None,
|
||||||
|
support_streaming_message: bool = True,
|
||||||
):
|
):
|
||||||
"""用于注册平台适配器的带参装饰器。
|
"""用于注册平台适配器的带参装饰器。
|
||||||
|
|
||||||
@@ -42,6 +43,7 @@ def register_platform_adapter(
|
|||||||
default_config_tmpl=default_config_tmpl,
|
default_config_tmpl=default_config_tmpl,
|
||||||
adapter_display_name=adapter_display_name,
|
adapter_display_name=adapter_display_name,
|
||||||
logo_path=logo_path,
|
logo_path=logo_path,
|
||||||
|
support_streaming_message=support_streaming_message,
|
||||||
)
|
)
|
||||||
platform_registry.append(pm)
|
platform_registry.append(pm)
|
||||||
platform_cls_map[adapter_name] = cls
|
platform_cls_map[adapter_name] = cls
|
||||||
|
|||||||
@@ -29,6 +29,7 @@ from .aiocqhttp_message_event import AiocqhttpMessageEvent
|
|||||||
@register_platform_adapter(
|
@register_platform_adapter(
|
||||||
"aiocqhttp",
|
"aiocqhttp",
|
||||||
"适用于 OneBot V11 标准的消息平台适配器,支持反向 WebSockets。",
|
"适用于 OneBot V11 标准的消息平台适配器,支持反向 WebSockets。",
|
||||||
|
support_streaming_message=False,
|
||||||
)
|
)
|
||||||
class AiocqhttpAdapter(Platform):
|
class AiocqhttpAdapter(Platform):
|
||||||
def __init__(
|
def __init__(
|
||||||
@@ -49,6 +50,7 @@ class AiocqhttpAdapter(Platform):
|
|||||||
name="aiocqhttp",
|
name="aiocqhttp",
|
||||||
description="适用于 OneBot 标准的消息平台适配器,支持反向 WebSockets。",
|
description="适用于 OneBot 标准的消息平台适配器,支持反向 WebSockets。",
|
||||||
id=self.config.get("id"),
|
id=self.config.get("id"),
|
||||||
|
support_streaming_message=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
self.bot = CQHttp(
|
self.bot = CQHttp(
|
||||||
|
|||||||
@@ -37,7 +37,9 @@ class MyEventHandler(dingtalk_stream.EventHandler):
|
|||||||
return AckMessage.STATUS_OK, "OK"
|
return AckMessage.STATUS_OK, "OK"
|
||||||
|
|
||||||
|
|
||||||
@register_platform_adapter("dingtalk", "钉钉机器人官方 API 适配器")
|
@register_platform_adapter(
|
||||||
|
"dingtalk", "钉钉机器人官方 API 适配器", support_streaming_message=False
|
||||||
|
)
|
||||||
class DingtalkPlatformAdapter(Platform):
|
class DingtalkPlatformAdapter(Platform):
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
@@ -74,6 +76,14 @@ class DingtalkPlatformAdapter(Platform):
|
|||||||
)
|
)
|
||||||
self.client_ = client # 用于 websockets 的 client
|
self.client_ = client # 用于 websockets 的 client
|
||||||
|
|
||||||
|
def _id_to_sid(self, dingtalk_id: str | None) -> str | None:
|
||||||
|
if not dingtalk_id:
|
||||||
|
return dingtalk_id
|
||||||
|
prefix = "$:LWCP_v1:$"
|
||||||
|
if dingtalk_id.startswith(prefix):
|
||||||
|
return dingtalk_id[len(prefix) :]
|
||||||
|
return dingtalk_id
|
||||||
|
|
||||||
async def send_by_session(
|
async def send_by_session(
|
||||||
self,
|
self,
|
||||||
session: MessageSesion,
|
session: MessageSesion,
|
||||||
@@ -86,6 +96,7 @@ class DingtalkPlatformAdapter(Platform):
|
|||||||
name="dingtalk",
|
name="dingtalk",
|
||||||
description="钉钉机器人官方 API 适配器",
|
description="钉钉机器人官方 API 适配器",
|
||||||
id=self.config.get("id"),
|
id=self.config.get("id"),
|
||||||
|
support_streaming_message=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
async def convert_msg(
|
async def convert_msg(
|
||||||
@@ -102,10 +113,10 @@ class DingtalkPlatformAdapter(Platform):
|
|||||||
else MessageType.FRIEND_MESSAGE
|
else MessageType.FRIEND_MESSAGE
|
||||||
)
|
)
|
||||||
abm.sender = MessageMember(
|
abm.sender = MessageMember(
|
||||||
user_id=message.sender_id,
|
user_id=self._id_to_sid(message.sender_id),
|
||||||
nickname=message.sender_nick,
|
nickname=message.sender_nick,
|
||||||
)
|
)
|
||||||
abm.self_id = message.chatbot_user_id
|
abm.self_id = self._id_to_sid(message.chatbot_user_id)
|
||||||
abm.message_id = message.message_id
|
abm.message_id = message.message_id
|
||||||
abm.raw_message = message
|
abm.raw_message = message
|
||||||
|
|
||||||
@@ -113,8 +124,8 @@ class DingtalkPlatformAdapter(Platform):
|
|||||||
# 处理所有被 @ 的用户(包括机器人自己,因 at_users 已包含)
|
# 处理所有被 @ 的用户(包括机器人自己,因 at_users 已包含)
|
||||||
if message.at_users:
|
if message.at_users:
|
||||||
for user in message.at_users:
|
for user in message.at_users:
|
||||||
if user.dingtalk_id:
|
if id := self._id_to_sid(user.dingtalk_id):
|
||||||
abm.message.append(At(qq=user.dingtalk_id))
|
abm.message.append(At(qq=id))
|
||||||
abm.group_id = message.conversation_id
|
abm.group_id = message.conversation_id
|
||||||
if self.unique_session:
|
if self.unique_session:
|
||||||
abm.session_id = abm.sender.user_id
|
abm.session_id = abm.sender.user_id
|
||||||
|
|||||||
@@ -34,7 +34,9 @@ else:
|
|||||||
|
|
||||||
|
|
||||||
# 注册平台适配器
|
# 注册平台适配器
|
||||||
@register_platform_adapter("discord", "Discord 适配器 (基于 Pycord)")
|
@register_platform_adapter(
|
||||||
|
"discord", "Discord 适配器 (基于 Pycord)", support_streaming_message=False
|
||||||
|
)
|
||||||
class DiscordPlatformAdapter(Platform):
|
class DiscordPlatformAdapter(Platform):
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
@@ -111,6 +113,7 @@ class DiscordPlatformAdapter(Platform):
|
|||||||
"Discord 适配器",
|
"Discord 适配器",
|
||||||
id=self.config.get("id"),
|
id=self.config.get("id"),
|
||||||
default_config_tmpl=self.config,
|
default_config_tmpl=self.config,
|
||||||
|
support_streaming_message=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
@override
|
@override
|
||||||
|
|||||||
@@ -23,7 +23,9 @@ from ...register import register_platform_adapter
|
|||||||
from .lark_event import LarkMessageEvent
|
from .lark_event import LarkMessageEvent
|
||||||
|
|
||||||
|
|
||||||
@register_platform_adapter("lark", "飞书机器人官方 API 适配器")
|
@register_platform_adapter(
|
||||||
|
"lark", "飞书机器人官方 API 适配器", support_streaming_message=False
|
||||||
|
)
|
||||||
class LarkPlatformAdapter(Platform):
|
class LarkPlatformAdapter(Platform):
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
@@ -115,6 +117,7 @@ class LarkPlatformAdapter(Platform):
|
|||||||
name="lark",
|
name="lark",
|
||||||
description="飞书机器人官方 API 适配器",
|
description="飞书机器人官方 API 适配器",
|
||||||
id=self.config.get("id"),
|
id=self.config.get("id"),
|
||||||
|
support_streaming_message=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
async def convert_msg(self, event: lark.im.v1.P2ImMessageReceiveV1):
|
async def convert_msg(self, event: lark.im.v1.P2ImMessageReceiveV1):
|
||||||
|
|||||||
@@ -45,7 +45,9 @@ MAX_FILE_UPLOAD_COUNT = 16
|
|||||||
DEFAULT_UPLOAD_CONCURRENCY = 3
|
DEFAULT_UPLOAD_CONCURRENCY = 3
|
||||||
|
|
||||||
|
|
||||||
@register_platform_adapter("misskey", "Misskey 平台适配器")
|
@register_platform_adapter(
|
||||||
|
"misskey", "Misskey 平台适配器", support_streaming_message=False
|
||||||
|
)
|
||||||
class MisskeyPlatformAdapter(Platform):
|
class MisskeyPlatformAdapter(Platform):
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
@@ -120,6 +122,7 @@ class MisskeyPlatformAdapter(Platform):
|
|||||||
description="Misskey 平台适配器",
|
description="Misskey 平台适配器",
|
||||||
id=self.config.get("id", "misskey"),
|
id=self.config.get("id", "misskey"),
|
||||||
default_config_tmpl=default_config,
|
default_config_tmpl=default_config,
|
||||||
|
support_streaming_message=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
async def run(self):
|
async def run(self):
|
||||||
|
|||||||
@@ -29,8 +29,7 @@ from astrbot.core.platform.astr_message_event import MessageSession
|
|||||||
|
|
||||||
|
|
||||||
@register_platform_adapter(
|
@register_platform_adapter(
|
||||||
"satori",
|
"satori", "Satori 协议适配器", support_streaming_message=False
|
||||||
"Satori 协议适配器",
|
|
||||||
)
|
)
|
||||||
class SatoriPlatformAdapter(Platform):
|
class SatoriPlatformAdapter(Platform):
|
||||||
def __init__(
|
def __init__(
|
||||||
@@ -60,6 +59,7 @@ class SatoriPlatformAdapter(Platform):
|
|||||||
name="satori",
|
name="satori",
|
||||||
description="Satori 通用协议适配器",
|
description="Satori 通用协议适配器",
|
||||||
id=self.config["id"],
|
id=self.config["id"],
|
||||||
|
support_streaming_message=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
self.ws: ClientConnection | None = None
|
self.ws: ClientConnection | None = None
|
||||||
|
|||||||
@@ -30,6 +30,7 @@ from .slack_event import SlackMessageEvent
|
|||||||
@register_platform_adapter(
|
@register_platform_adapter(
|
||||||
"slack",
|
"slack",
|
||||||
"适用于 Slack 的消息平台适配器,支持 Socket Mode 和 Webhook Mode。",
|
"适用于 Slack 的消息平台适配器,支持 Socket Mode 和 Webhook Mode。",
|
||||||
|
support_streaming_message=False,
|
||||||
)
|
)
|
||||||
class SlackAdapter(Platform):
|
class SlackAdapter(Platform):
|
||||||
def __init__(
|
def __init__(
|
||||||
@@ -68,6 +69,7 @@ class SlackAdapter(Platform):
|
|||||||
name="slack",
|
name="slack",
|
||||||
description="适用于 Slack 的消息平台适配器,支持 Socket Mode 和 Webhook Mode。",
|
description="适用于 Slack 的消息平台适配器,支持 Socket Mode 和 Webhook Mode。",
|
||||||
id=self.config.get("id"),
|
id=self.config.get("id"),
|
||||||
|
support_streaming_message=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
# 初始化 Slack Web Client
|
# 初始化 Slack Web Client
|
||||||
|
|||||||
@@ -109,6 +109,7 @@ class WebChatMessageEvent(AstrMessageEvent):
|
|||||||
|
|
||||||
async def send_streaming(self, generator, use_fallback: bool = False):
|
async def send_streaming(self, generator, use_fallback: bool = False):
|
||||||
final_data = ""
|
final_data = ""
|
||||||
|
reasoning_content = ""
|
||||||
cid = self.session_id.split("!")[-1]
|
cid = self.session_id.split("!")[-1]
|
||||||
web_chat_back_queue = webchat_queue_mgr.get_or_create_back_queue(cid)
|
web_chat_back_queue = webchat_queue_mgr.get_or_create_back_queue(cid)
|
||||||
async for chain in generator:
|
async for chain in generator:
|
||||||
@@ -124,16 +125,22 @@ class WebChatMessageEvent(AstrMessageEvent):
|
|||||||
)
|
)
|
||||||
final_data = ""
|
final_data = ""
|
||||||
continue
|
continue
|
||||||
final_data += await WebChatMessageEvent._send(
|
|
||||||
|
r = await WebChatMessageEvent._send(
|
||||||
chain,
|
chain,
|
||||||
session_id=self.session_id,
|
session_id=self.session_id,
|
||||||
streaming=True,
|
streaming=True,
|
||||||
)
|
)
|
||||||
|
if chain.type == "reasoning":
|
||||||
|
reasoning_content += chain.get_plain_text()
|
||||||
|
else:
|
||||||
|
final_data += r
|
||||||
|
|
||||||
await web_chat_back_queue.put(
|
await web_chat_back_queue.put(
|
||||||
{
|
{
|
||||||
"type": "complete", # complete means we return the final result
|
"type": "complete", # complete means we return the final result
|
||||||
"data": final_data,
|
"data": final_data,
|
||||||
|
"reasoning": reasoning_content,
|
||||||
"streaming": True,
|
"streaming": True,
|
||||||
"cid": cid,
|
"cid": cid,
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -32,7 +32,9 @@ except ImportError as e:
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@register_platform_adapter("wechatpadpro", "WeChatPadPro 消息平台适配器")
|
@register_platform_adapter(
|
||||||
|
"wechatpadpro", "WeChatPadPro 消息平台适配器", support_streaming_message=False
|
||||||
|
)
|
||||||
class WeChatPadProAdapter(Platform):
|
class WeChatPadProAdapter(Platform):
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
@@ -51,6 +53,7 @@ class WeChatPadProAdapter(Platform):
|
|||||||
name="wechatpadpro",
|
name="wechatpadpro",
|
||||||
description="WeChatPadPro 消息平台适配器",
|
description="WeChatPadPro 消息平台适配器",
|
||||||
id=self.config.get("id", "wechatpadpro"),
|
id=self.config.get("id", "wechatpadpro"),
|
||||||
|
support_streaming_message=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
# 保存配置信息
|
# 保存配置信息
|
||||||
|
|||||||
@@ -110,7 +110,7 @@ class WecomServer:
|
|||||||
await self.shutdown_event.wait()
|
await self.shutdown_event.wait()
|
||||||
|
|
||||||
|
|
||||||
@register_platform_adapter("wecom", "wecom 适配器")
|
@register_platform_adapter("wecom", "wecom 适配器", support_streaming_message=False)
|
||||||
class WecomPlatformAdapter(Platform):
|
class WecomPlatformAdapter(Platform):
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
@@ -196,6 +196,7 @@ class WecomPlatformAdapter(Platform):
|
|||||||
"wecom",
|
"wecom",
|
||||||
"wecom 适配器",
|
"wecom 适配器",
|
||||||
id=self.config.get("id", "wecom"),
|
id=self.config.get("id", "wecom"),
|
||||||
|
support_streaming_message=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
@override
|
@override
|
||||||
|
|||||||
@@ -113,7 +113,9 @@ class WecomServer:
|
|||||||
await self.shutdown_event.wait()
|
await self.shutdown_event.wait()
|
||||||
|
|
||||||
|
|
||||||
@register_platform_adapter("weixin_official_account", "微信公众平台 适配器")
|
@register_platform_adapter(
|
||||||
|
"weixin_official_account", "微信公众平台 适配器", support_streaming_message=False
|
||||||
|
)
|
||||||
class WeixinOfficialAccountPlatformAdapter(Platform):
|
class WeixinOfficialAccountPlatformAdapter(Platform):
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
@@ -195,6 +197,7 @@ class WeixinOfficialAccountPlatformAdapter(Platform):
|
|||||||
"weixin_official_account",
|
"weixin_official_account",
|
||||||
"微信公众平台 适配器",
|
"微信公众平台 适配器",
|
||||||
id=self.config.get("id", "weixin_official_account"),
|
id=self.config.get("id", "weixin_official_account"),
|
||||||
|
support_streaming_message=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
@override
|
@override
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
from .entities import ProviderMetaData
|
from .entities import ProviderMetaData
|
||||||
from .provider import Personality, Provider, STTProvider
|
from .provider import Provider, STTProvider
|
||||||
|
|
||||||
__all__ = ["Personality", "Provider", "ProviderMetaData", "STTProvider"]
|
__all__ = ["Provider", "ProviderMetaData", "STTProvider"]
|
||||||
|
|||||||
@@ -30,18 +30,31 @@ class ProviderType(enum.Enum):
|
|||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class ProviderMetaData:
|
class ProviderMeta:
|
||||||
type: str
|
"""The basic metadata of a provider instance."""
|
||||||
"""提供商适配器名称,如 openai, ollama"""
|
|
||||||
desc: str = ""
|
|
||||||
"""提供商适配器描述"""
|
|
||||||
provider_type: ProviderType = ProviderType.CHAT_COMPLETION
|
|
||||||
cls_type: Any = None
|
|
||||||
|
|
||||||
|
id: str
|
||||||
|
"""the unique id of the provider instance that user configured"""
|
||||||
|
model: str | None
|
||||||
|
"""the model name of the provider instance currently used"""
|
||||||
|
type: str
|
||||||
|
"""the name of the provider adapter, such as openai, ollama"""
|
||||||
|
provider_type: ProviderType = ProviderType.CHAT_COMPLETION
|
||||||
|
"""the capability type of the provider adapter"""
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ProviderMetaData(ProviderMeta):
|
||||||
|
"""The metadata of a provider adapter for registration."""
|
||||||
|
|
||||||
|
desc: str = ""
|
||||||
|
"""the short description of the provider adapter"""
|
||||||
|
cls_type: Any = None
|
||||||
|
"""the class type of the provider adapter"""
|
||||||
default_config_tmpl: dict | None = None
|
default_config_tmpl: dict | None = None
|
||||||
"""平台的默认配置模板"""
|
"""the default configuration template of the provider adapter"""
|
||||||
provider_display_name: str | None = None
|
provider_display_name: str | None = None
|
||||||
"""显示在 WebUI 配置页中的提供商名称,如空则是 type"""
|
"""the display name of the provider shown in the WebUI configuration page; if empty, the type is used"""
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
@@ -60,12 +73,20 @@ class ToolCallsResult:
|
|||||||
]
|
]
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
def to_openai_messages_model(
|
||||||
|
self,
|
||||||
|
) -> list[AssistantMessageSegment | ToolCallMessageSegment]:
|
||||||
|
return [
|
||||||
|
self.tool_calls_info,
|
||||||
|
*self.tool_calls_result,
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class ProviderRequest:
|
class ProviderRequest:
|
||||||
prompt: str
|
prompt: str | None = None
|
||||||
"""提示词"""
|
"""提示词"""
|
||||||
session_id: str = ""
|
session_id: str | None = ""
|
||||||
"""会话 ID"""
|
"""会话 ID"""
|
||||||
image_urls: list[str] = field(default_factory=list)
|
image_urls: list[str] = field(default_factory=list)
|
||||||
"""图片 URL 列表"""
|
"""图片 URL 列表"""
|
||||||
@@ -181,25 +202,28 @@ class ProviderRequest:
|
|||||||
@dataclass
|
@dataclass
|
||||||
class LLMResponse:
|
class LLMResponse:
|
||||||
role: str
|
role: str
|
||||||
"""角色, assistant, tool, err"""
|
"""The role of the message, e.g., assistant, tool, err"""
|
||||||
result_chain: MessageChain | None = None
|
result_chain: MessageChain | None = None
|
||||||
"""返回的消息链"""
|
"""A chain of message components representing the text completion from LLM."""
|
||||||
tools_call_args: list[dict[str, Any]] = field(default_factory=list)
|
tools_call_args: list[dict[str, Any]] = field(default_factory=list)
|
||||||
"""工具调用参数"""
|
"""Tool call arguments."""
|
||||||
tools_call_name: list[str] = field(default_factory=list)
|
tools_call_name: list[str] = field(default_factory=list)
|
||||||
"""工具调用名称"""
|
"""Tool call names."""
|
||||||
tools_call_ids: list[str] = field(default_factory=list)
|
tools_call_ids: list[str] = field(default_factory=list)
|
||||||
"""工具调用 ID"""
|
"""Tool call IDs."""
|
||||||
|
reasoning_content: str = ""
|
||||||
|
"""The reasoning content extracted from the LLM, if any."""
|
||||||
|
|
||||||
raw_completion: (
|
raw_completion: (
|
||||||
ChatCompletion | GenerateContentResponse | AnthropicMessage | None
|
ChatCompletion | GenerateContentResponse | AnthropicMessage | None
|
||||||
) = None
|
) = None
|
||||||
_new_record: dict[str, Any] | None = None
|
"""The raw completion response from the LLM provider."""
|
||||||
|
|
||||||
_completion_text: str = ""
|
_completion_text: str = ""
|
||||||
|
"""The plain text of the completion."""
|
||||||
|
|
||||||
is_chunk: bool = False
|
is_chunk: bool = False
|
||||||
"""是否是流式输出的单个 Chunk"""
|
"""Indicates if the response is a chunked response."""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
@@ -213,7 +237,6 @@ class LLMResponse:
|
|||||||
| GenerateContentResponse
|
| GenerateContentResponse
|
||||||
| AnthropicMessage
|
| AnthropicMessage
|
||||||
| None = None,
|
| None = None,
|
||||||
_new_record: dict[str, Any] | None = None,
|
|
||||||
is_chunk: bool = False,
|
is_chunk: bool = False,
|
||||||
):
|
):
|
||||||
"""初始化 LLMResponse
|
"""初始化 LLMResponse
|
||||||
@@ -241,7 +264,6 @@ class LLMResponse:
|
|||||||
self.tools_call_name = tools_call_name
|
self.tools_call_name = tools_call_name
|
||||||
self.tools_call_ids = tools_call_ids
|
self.tools_call_ids = tools_call_ids
|
||||||
self.raw_completion = raw_completion
|
self.raw_completion = raw_completion
|
||||||
self._new_record = _new_record
|
|
||||||
self.is_chunk = is_chunk
|
self.is_chunk = is_chunk
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
|
import copy
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
from collections.abc import Awaitable, Callable
|
from collections.abc import Awaitable, Callable
|
||||||
@@ -24,7 +25,16 @@ SUPPORTED_TYPES = [
|
|||||||
"boolean",
|
"boolean",
|
||||||
] # json schema 支持的数据类型
|
] # json schema 支持的数据类型
|
||||||
|
|
||||||
|
PY_TO_JSON_TYPE = {
|
||||||
|
"int": "number",
|
||||||
|
"float": "number",
|
||||||
|
"bool": "boolean",
|
||||||
|
"str": "string",
|
||||||
|
"dict": "object",
|
||||||
|
"list": "array",
|
||||||
|
"tuple": "array",
|
||||||
|
"set": "array",
|
||||||
|
}
|
||||||
# alias
|
# alias
|
||||||
FuncTool = FunctionTool
|
FuncTool = FunctionTool
|
||||||
|
|
||||||
@@ -106,7 +116,7 @@ class FunctionToolManager:
|
|||||||
def spec_to_func(
|
def spec_to_func(
|
||||||
self,
|
self,
|
||||||
name: str,
|
name: str,
|
||||||
func_args: list,
|
func_args: list[dict],
|
||||||
desc: str,
|
desc: str,
|
||||||
handler: Callable[..., Awaitable[Any]],
|
handler: Callable[..., Awaitable[Any]],
|
||||||
) -> FuncTool:
|
) -> FuncTool:
|
||||||
@@ -115,10 +125,9 @@ class FunctionToolManager:
|
|||||||
"properties": {},
|
"properties": {},
|
||||||
}
|
}
|
||||||
for param in func_args:
|
for param in func_args:
|
||||||
params["properties"][param["name"]] = {
|
p = copy.deepcopy(param)
|
||||||
"type": param["type"],
|
p.pop("name", None)
|
||||||
"description": param["description"],
|
params["properties"][param["name"]] = p
|
||||||
}
|
|
||||||
return FuncTool(
|
return FuncTool(
|
||||||
name=name,
|
name=name,
|
||||||
parameters=params,
|
parameters=params,
|
||||||
|
|||||||
@@ -241,6 +241,8 @@ class ProviderManager:
|
|||||||
)
|
)
|
||||||
case "zhipu_chat_completion":
|
case "zhipu_chat_completion":
|
||||||
from .sources.zhipu_source import ProviderZhipu as ProviderZhipu
|
from .sources.zhipu_source import ProviderZhipu as ProviderZhipu
|
||||||
|
case "groq_chat_completion":
|
||||||
|
from .sources.groq_source import ProviderGroq as ProviderGroq
|
||||||
case "anthropic_chat_completion":
|
case "anthropic_chat_completion":
|
||||||
from .sources.anthropic_source import (
|
from .sources.anthropic_source import (
|
||||||
ProviderAnthropic as ProviderAnthropic,
|
ProviderAnthropic as ProviderAnthropic,
|
||||||
@@ -354,6 +356,8 @@ class ProviderManager:
|
|||||||
logger.error(f"无法找到 {provider_metadata.type} 的类")
|
logger.error(f"无法找到 {provider_metadata.type} 的类")
|
||||||
return
|
return
|
||||||
|
|
||||||
|
provider_metadata.id = provider_config["id"]
|
||||||
|
|
||||||
if provider_metadata.provider_type == ProviderType.SPEECH_TO_TEXT:
|
if provider_metadata.provider_type == ProviderType.SPEECH_TO_TEXT:
|
||||||
# STT 任务
|
# STT 任务
|
||||||
inst = cls_type(provider_config, self.provider_settings)
|
inst = cls_type(provider_config, self.provider_settings)
|
||||||
@@ -394,7 +398,6 @@ class ProviderManager:
|
|||||||
inst = cls_type(
|
inst = cls_type(
|
||||||
provider_config,
|
provider_config,
|
||||||
self.provider_settings,
|
self.provider_settings,
|
||||||
self.selected_default_persona,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if getattr(inst, "initialize", None):
|
if getattr(inst, "initialize", None):
|
||||||
|
|||||||
@@ -1,28 +1,18 @@
|
|||||||
import abc
|
import abc
|
||||||
import asyncio
|
import asyncio
|
||||||
from collections.abc import AsyncGenerator
|
from collections.abc import AsyncGenerator
|
||||||
from dataclasses import dataclass
|
|
||||||
|
|
||||||
from astrbot.core.agent.message import Message
|
from astrbot.core.agent.message import Message
|
||||||
from astrbot.core.agent.tool import ToolSet
|
from astrbot.core.agent.tool import ToolSet
|
||||||
from astrbot.core.db.po import Personality
|
|
||||||
from astrbot.core.provider.entities import (
|
from astrbot.core.provider.entities import (
|
||||||
LLMResponse,
|
LLMResponse,
|
||||||
ProviderType,
|
ProviderMeta,
|
||||||
RerankResult,
|
RerankResult,
|
||||||
ToolCallsResult,
|
ToolCallsResult,
|
||||||
)
|
)
|
||||||
from astrbot.core.provider.register import provider_cls_map
|
from astrbot.core.provider.register import provider_cls_map
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class ProviderMeta:
|
|
||||||
id: str
|
|
||||||
model: str
|
|
||||||
type: str
|
|
||||||
provider_type: ProviderType
|
|
||||||
|
|
||||||
|
|
||||||
class AbstractProvider(abc.ABC):
|
class AbstractProvider(abc.ABC):
|
||||||
"""Provider Abstract Class"""
|
"""Provider Abstract Class"""
|
||||||
|
|
||||||
@@ -43,15 +33,15 @@ class AbstractProvider(abc.ABC):
|
|||||||
"""Get the provider metadata"""
|
"""Get the provider metadata"""
|
||||||
provider_type_name = self.provider_config["type"]
|
provider_type_name = self.provider_config["type"]
|
||||||
meta_data = provider_cls_map.get(provider_type_name)
|
meta_data = provider_cls_map.get(provider_type_name)
|
||||||
provider_type = meta_data.provider_type if meta_data else None
|
if not meta_data:
|
||||||
if provider_type is None:
|
raise ValueError(f"Provider type {provider_type_name} not registered")
|
||||||
raise ValueError(f"Cannot find provider type: {provider_type_name}")
|
meta = ProviderMeta(
|
||||||
return ProviderMeta(
|
id=self.provider_config.get("id", "default"),
|
||||||
id=self.provider_config["id"],
|
|
||||||
model=self.get_model(),
|
model=self.get_model(),
|
||||||
type=provider_type_name,
|
type=provider_type_name,
|
||||||
provider_type=provider_type,
|
provider_type=meta_data.provider_type,
|
||||||
)
|
)
|
||||||
|
return meta
|
||||||
|
|
||||||
|
|
||||||
class Provider(AbstractProvider):
|
class Provider(AbstractProvider):
|
||||||
@@ -61,15 +51,10 @@ class Provider(AbstractProvider):
|
|||||||
self,
|
self,
|
||||||
provider_config: dict,
|
provider_config: dict,
|
||||||
provider_settings: dict,
|
provider_settings: dict,
|
||||||
default_persona: Personality | None = None,
|
|
||||||
) -> None:
|
) -> None:
|
||||||
super().__init__(provider_config)
|
super().__init__(provider_config)
|
||||||
|
|
||||||
self.provider_settings = provider_settings
|
self.provider_settings = provider_settings
|
||||||
|
|
||||||
self.curr_personality = default_persona
|
|
||||||
"""维护了当前的使用的 persona,即人格。可能为 None"""
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def get_current_key(self) -> str:
|
def get_current_key(self) -> str:
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|||||||
@@ -36,6 +36,8 @@ def register_provider_adapter(
|
|||||||
default_config_tmpl["id"] = provider_type_name
|
default_config_tmpl["id"] = provider_type_name
|
||||||
|
|
||||||
pm = ProviderMetaData(
|
pm = ProviderMetaData(
|
||||||
|
id="default", # will be replaced when instantiated
|
||||||
|
model=None,
|
||||||
type=provider_type_name,
|
type=provider_type_name,
|
||||||
desc=desc,
|
desc=desc,
|
||||||
provider_type=provider_type,
|
provider_type=provider_type,
|
||||||
|
|||||||
@@ -25,12 +25,10 @@ class ProviderAnthropic(Provider):
|
|||||||
self,
|
self,
|
||||||
provider_config,
|
provider_config,
|
||||||
provider_settings,
|
provider_settings,
|
||||||
default_persona=None,
|
|
||||||
) -> None:
|
) -> None:
|
||||||
super().__init__(
|
super().__init__(
|
||||||
provider_config,
|
provider_config,
|
||||||
provider_settings,
|
provider_settings,
|
||||||
default_persona,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
self.chosen_api_key: str = ""
|
self.chosen_api_key: str = ""
|
||||||
|
|||||||
@@ -20,12 +20,10 @@ class ProviderCoze(Provider):
|
|||||||
self,
|
self,
|
||||||
provider_config,
|
provider_config,
|
||||||
provider_settings,
|
provider_settings,
|
||||||
default_persona=None,
|
|
||||||
) -> None:
|
) -> None:
|
||||||
super().__init__(
|
super().__init__(
|
||||||
provider_config,
|
provider_config,
|
||||||
provider_settings,
|
provider_settings,
|
||||||
default_persona,
|
|
||||||
)
|
)
|
||||||
self.api_key = provider_config.get("coze_api_key", "")
|
self.api_key = provider_config.get("coze_api_key", "")
|
||||||
if not self.api_key:
|
if not self.api_key:
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ from dashscope.app.application_response import ApplicationResponse
|
|||||||
from astrbot.core import logger, sp
|
from astrbot.core import logger, sp
|
||||||
from astrbot.core.message.message_event_result import MessageChain
|
from astrbot.core.message.message_event_result import MessageChain
|
||||||
|
|
||||||
from .. import Personality, Provider
|
from .. import Provider
|
||||||
from ..entities import LLMResponse
|
from ..entities import LLMResponse
|
||||||
from ..register import register_provider_adapter
|
from ..register import register_provider_adapter
|
||||||
from .openai_source import ProviderOpenAIOfficial
|
from .openai_source import ProviderOpenAIOfficial
|
||||||
@@ -20,13 +20,11 @@ class ProviderDashscope(ProviderOpenAIOfficial):
|
|||||||
self,
|
self,
|
||||||
provider_config: dict,
|
provider_config: dict,
|
||||||
provider_settings: dict,
|
provider_settings: dict,
|
||||||
default_persona: Personality | None = None,
|
|
||||||
) -> None:
|
) -> None:
|
||||||
Provider.__init__(
|
Provider.__init__(
|
||||||
self,
|
self,
|
||||||
provider_config,
|
provider_config,
|
||||||
provider_settings,
|
provider_settings,
|
||||||
default_persona,
|
|
||||||
)
|
)
|
||||||
self.api_key = provider_config.get("dashscope_api_key", "")
|
self.api_key = provider_config.get("dashscope_api_key", "")
|
||||||
if not self.api_key:
|
if not self.api_key:
|
||||||
|
|||||||
@@ -18,12 +18,10 @@ class ProviderDify(Provider):
|
|||||||
self,
|
self,
|
||||||
provider_config,
|
provider_config,
|
||||||
provider_settings,
|
provider_settings,
|
||||||
default_persona=None,
|
|
||||||
) -> None:
|
) -> None:
|
||||||
super().__init__(
|
super().__init__(
|
||||||
provider_config,
|
provider_config,
|
||||||
provider_settings,
|
provider_settings,
|
||||||
default_persona,
|
|
||||||
)
|
)
|
||||||
self.api_key = provider_config.get("dify_api_key", "")
|
self.api_key = provider_config.get("dify_api_key", "")
|
||||||
if not self.api_key:
|
if not self.api_key:
|
||||||
|
|||||||
@@ -53,12 +53,10 @@ class ProviderGoogleGenAI(Provider):
|
|||||||
self,
|
self,
|
||||||
provider_config,
|
provider_config,
|
||||||
provider_settings,
|
provider_settings,
|
||||||
default_persona=None,
|
|
||||||
) -> None:
|
) -> None:
|
||||||
super().__init__(
|
super().__init__(
|
||||||
provider_config,
|
provider_config,
|
||||||
provider_settings,
|
provider_settings,
|
||||||
default_persona,
|
|
||||||
)
|
)
|
||||||
self.api_keys: list = super().get_keys()
|
self.api_keys: list = super().get_keys()
|
||||||
self.chosen_api_key: str = self.api_keys[0] if len(self.api_keys) > 0 else ""
|
self.chosen_api_key: str = self.api_keys[0] if len(self.api_keys) > 0 else ""
|
||||||
@@ -326,8 +324,18 @@ class ProviderGoogleGenAI(Provider):
|
|||||||
|
|
||||||
return gemini_contents
|
return gemini_contents
|
||||||
|
|
||||||
@staticmethod
|
def _extract_reasoning_content(self, candidate: types.Candidate) -> str:
|
||||||
|
"""Extract reasoning content from candidate parts"""
|
||||||
|
if not candidate.content or not candidate.content.parts:
|
||||||
|
return ""
|
||||||
|
|
||||||
|
thought_buf: list[str] = [
|
||||||
|
(p.text or "") for p in candidate.content.parts if p.thought
|
||||||
|
]
|
||||||
|
return "".join(thought_buf).strip()
|
||||||
|
|
||||||
def _process_content_parts(
|
def _process_content_parts(
|
||||||
|
self,
|
||||||
candidate: types.Candidate,
|
candidate: types.Candidate,
|
||||||
llm_response: LLMResponse,
|
llm_response: LLMResponse,
|
||||||
) -> MessageChain:
|
) -> MessageChain:
|
||||||
@@ -358,6 +366,11 @@ class ProviderGoogleGenAI(Provider):
|
|||||||
logger.warning(f"收到的 candidate.content.parts 为空: {candidate}")
|
logger.warning(f"收到的 candidate.content.parts 为空: {candidate}")
|
||||||
raise Exception("API 返回的 candidate.content.parts 为空。")
|
raise Exception("API 返回的 candidate.content.parts 为空。")
|
||||||
|
|
||||||
|
# 提取 reasoning content
|
||||||
|
reasoning = self._extract_reasoning_content(candidate)
|
||||||
|
if reasoning:
|
||||||
|
llm_response.reasoning_content = reasoning
|
||||||
|
|
||||||
chain = []
|
chain = []
|
||||||
part: types.Part
|
part: types.Part
|
||||||
|
|
||||||
@@ -515,6 +528,7 @@ class ProviderGoogleGenAI(Provider):
|
|||||||
|
|
||||||
# Accumulate the complete response text for the final response
|
# Accumulate the complete response text for the final response
|
||||||
accumulated_text = ""
|
accumulated_text = ""
|
||||||
|
accumulated_reasoning = ""
|
||||||
final_response = None
|
final_response = None
|
||||||
|
|
||||||
async for chunk in result:
|
async for chunk in result:
|
||||||
@@ -539,9 +553,19 @@ class ProviderGoogleGenAI(Provider):
|
|||||||
yield llm_response
|
yield llm_response
|
||||||
return
|
return
|
||||||
|
|
||||||
|
_f = False
|
||||||
|
|
||||||
|
# 提取 reasoning content
|
||||||
|
reasoning = self._extract_reasoning_content(chunk.candidates[0])
|
||||||
|
if reasoning:
|
||||||
|
_f = True
|
||||||
|
accumulated_reasoning += reasoning
|
||||||
|
llm_response.reasoning_content = reasoning
|
||||||
if chunk.text:
|
if chunk.text:
|
||||||
|
_f = True
|
||||||
accumulated_text += chunk.text
|
accumulated_text += chunk.text
|
||||||
llm_response.result_chain = MessageChain(chain=[Comp.Plain(chunk.text)])
|
llm_response.result_chain = MessageChain(chain=[Comp.Plain(chunk.text)])
|
||||||
|
if _f:
|
||||||
yield llm_response
|
yield llm_response
|
||||||
|
|
||||||
if chunk.candidates[0].finish_reason:
|
if chunk.candidates[0].finish_reason:
|
||||||
@@ -559,6 +583,10 @@ class ProviderGoogleGenAI(Provider):
|
|||||||
if not final_response:
|
if not final_response:
|
||||||
final_response = LLMResponse("assistant", is_chunk=False)
|
final_response = LLMResponse("assistant", is_chunk=False)
|
||||||
|
|
||||||
|
# Set the complete accumulated reasoning in the final response
|
||||||
|
if accumulated_reasoning:
|
||||||
|
final_response.reasoning_content = accumulated_reasoning
|
||||||
|
|
||||||
# Set the complete accumulated text in the final response
|
# Set the complete accumulated text in the final response
|
||||||
if accumulated_text:
|
if accumulated_text:
|
||||||
final_response.result_chain = MessageChain(
|
final_response.result_chain = MessageChain(
|
||||||
|
|||||||
15
astrbot/core/provider/sources/groq_source.py
Normal file
15
astrbot/core/provider/sources/groq_source.py
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
from ..register import register_provider_adapter
|
||||||
|
from .openai_source import ProviderOpenAIOfficial
|
||||||
|
|
||||||
|
|
||||||
|
@register_provider_adapter(
|
||||||
|
"groq_chat_completion", "Groq Chat Completion Provider Adapter"
|
||||||
|
)
|
||||||
|
class ProviderGroq(ProviderOpenAIOfficial):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
provider_config: dict,
|
||||||
|
provider_settings: dict,
|
||||||
|
) -> None:
|
||||||
|
super().__init__(provider_config, provider_settings)
|
||||||
|
self.reasoning_key = "reasoning"
|
||||||
@@ -4,12 +4,14 @@ import inspect
|
|||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
|
import re
|
||||||
from collections.abc import AsyncGenerator
|
from collections.abc import AsyncGenerator
|
||||||
|
|
||||||
from openai import AsyncAzureOpenAI, AsyncOpenAI
|
from openai import AsyncAzureOpenAI, AsyncOpenAI
|
||||||
from openai._exceptions import NotFoundError, UnprocessableEntityError
|
from openai._exceptions import NotFoundError, UnprocessableEntityError
|
||||||
from openai.lib.streaming.chat._completions import ChatCompletionStreamState
|
from openai.lib.streaming.chat._completions import ChatCompletionStreamState
|
||||||
from openai.types.chat.chat_completion import ChatCompletion
|
from openai.types.chat.chat_completion import ChatCompletion
|
||||||
|
from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
|
||||||
|
|
||||||
import astrbot.core.message.components as Comp
|
import astrbot.core.message.components as Comp
|
||||||
from astrbot import logger
|
from astrbot import logger
|
||||||
@@ -28,17 +30,8 @@ from ..register import register_provider_adapter
|
|||||||
"OpenAI API Chat Completion 提供商适配器",
|
"OpenAI API Chat Completion 提供商适配器",
|
||||||
)
|
)
|
||||||
class ProviderOpenAIOfficial(Provider):
|
class ProviderOpenAIOfficial(Provider):
|
||||||
def __init__(
|
def __init__(self, provider_config, provider_settings) -> None:
|
||||||
self,
|
super().__init__(provider_config, provider_settings)
|
||||||
provider_config,
|
|
||||||
provider_settings,
|
|
||||||
default_persona=None,
|
|
||||||
) -> None:
|
|
||||||
super().__init__(
|
|
||||||
provider_config,
|
|
||||||
provider_settings,
|
|
||||||
default_persona,
|
|
||||||
)
|
|
||||||
self.chosen_api_key = None
|
self.chosen_api_key = None
|
||||||
self.api_keys: list = super().get_keys()
|
self.api_keys: list = super().get_keys()
|
||||||
self.chosen_api_key = self.api_keys[0] if len(self.api_keys) > 0 else None
|
self.chosen_api_key = self.api_keys[0] if len(self.api_keys) > 0 else None
|
||||||
@@ -53,9 +46,8 @@ class ProviderOpenAIOfficial(Provider):
|
|||||||
for key in self.custom_headers:
|
for key in self.custom_headers:
|
||||||
self.custom_headers[key] = str(self.custom_headers[key])
|
self.custom_headers[key] = str(self.custom_headers[key])
|
||||||
|
|
||||||
# 适配 azure openai #332
|
|
||||||
if "api_version" in provider_config:
|
if "api_version" in provider_config:
|
||||||
# 使用 azure api
|
# Using Azure OpenAI API
|
||||||
self.client = AsyncAzureOpenAI(
|
self.client = AsyncAzureOpenAI(
|
||||||
api_key=self.chosen_api_key,
|
api_key=self.chosen_api_key,
|
||||||
api_version=provider_config.get("api_version", None),
|
api_version=provider_config.get("api_version", None),
|
||||||
@@ -64,7 +56,7 @@ class ProviderOpenAIOfficial(Provider):
|
|||||||
timeout=self.timeout,
|
timeout=self.timeout,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
# 使用 openai api
|
# Using OpenAI Official API
|
||||||
self.client = AsyncOpenAI(
|
self.client = AsyncOpenAI(
|
||||||
api_key=self.chosen_api_key,
|
api_key=self.chosen_api_key,
|
||||||
base_url=provider_config.get("api_base", None),
|
base_url=provider_config.get("api_base", None),
|
||||||
@@ -80,6 +72,8 @@ class ProviderOpenAIOfficial(Provider):
|
|||||||
model = model_config.get("model", "unknown")
|
model = model_config.get("model", "unknown")
|
||||||
self.set_model(model)
|
self.set_model(model)
|
||||||
|
|
||||||
|
self.reasoning_key = "reasoning_content"
|
||||||
|
|
||||||
def _maybe_inject_xai_search(self, payloads: dict, **kwargs):
|
def _maybe_inject_xai_search(self, payloads: dict, **kwargs):
|
||||||
"""当开启 xAI 原生搜索时,向请求体注入 Live Search 参数。
|
"""当开启 xAI 原生搜索时,向请求体注入 Live Search 参数。
|
||||||
|
|
||||||
@@ -157,7 +151,7 @@ class ProviderOpenAIOfficial(Provider):
|
|||||||
|
|
||||||
logger.debug(f"completion: {completion}")
|
logger.debug(f"completion: {completion}")
|
||||||
|
|
||||||
llm_response = await self.parse_openai_completion(completion, tools)
|
llm_response = await self._parse_openai_completion(completion, tools)
|
||||||
|
|
||||||
return llm_response
|
return llm_response
|
||||||
|
|
||||||
@@ -210,36 +204,78 @@ class ProviderOpenAIOfficial(Provider):
|
|||||||
if len(chunk.choices) == 0:
|
if len(chunk.choices) == 0:
|
||||||
continue
|
continue
|
||||||
delta = chunk.choices[0].delta
|
delta = chunk.choices[0].delta
|
||||||
# 处理文本内容
|
# logger.debug(f"chunk delta: {delta}")
|
||||||
|
# handle the content delta
|
||||||
|
reasoning = self._extract_reasoning_content(chunk)
|
||||||
|
_y = False
|
||||||
|
if reasoning:
|
||||||
|
llm_response.reasoning_content = reasoning
|
||||||
|
_y = True
|
||||||
if delta.content:
|
if delta.content:
|
||||||
completion_text = delta.content
|
completion_text = delta.content
|
||||||
llm_response.result_chain = MessageChain(
|
llm_response.result_chain = MessageChain(
|
||||||
chain=[Comp.Plain(completion_text)],
|
chain=[Comp.Plain(completion_text)],
|
||||||
)
|
)
|
||||||
|
_y = True
|
||||||
|
if _y:
|
||||||
yield llm_response
|
yield llm_response
|
||||||
|
|
||||||
final_completion = state.get_final_completion()
|
final_completion = state.get_final_completion()
|
||||||
llm_response = await self.parse_openai_completion(final_completion, tools)
|
llm_response = await self._parse_openai_completion(final_completion, tools)
|
||||||
|
|
||||||
yield llm_response
|
yield llm_response
|
||||||
|
|
||||||
async def parse_openai_completion(
|
def _extract_reasoning_content(
|
||||||
|
self,
|
||||||
|
completion: ChatCompletion | ChatCompletionChunk,
|
||||||
|
) -> str:
|
||||||
|
"""Extract reasoning content from OpenAI ChatCompletion if available."""
|
||||||
|
reasoning_text = ""
|
||||||
|
if len(completion.choices) == 0:
|
||||||
|
return reasoning_text
|
||||||
|
if isinstance(completion, ChatCompletion):
|
||||||
|
choice = completion.choices[0]
|
||||||
|
reasoning_attr = getattr(choice.message, self.reasoning_key, None)
|
||||||
|
if reasoning_attr:
|
||||||
|
reasoning_text = str(reasoning_attr)
|
||||||
|
elif isinstance(completion, ChatCompletionChunk):
|
||||||
|
delta = completion.choices[0].delta
|
||||||
|
reasoning_attr = getattr(delta, self.reasoning_key, None)
|
||||||
|
if reasoning_attr:
|
||||||
|
reasoning_text = str(reasoning_attr)
|
||||||
|
return reasoning_text
|
||||||
|
|
||||||
|
async def _parse_openai_completion(
|
||||||
self, completion: ChatCompletion, tools: ToolSet | None
|
self, completion: ChatCompletion, tools: ToolSet | None
|
||||||
) -> LLMResponse:
|
) -> LLMResponse:
|
||||||
"""解析 OpenAI 的 ChatCompletion 响应"""
|
"""Parse OpenAI ChatCompletion into LLMResponse"""
|
||||||
llm_response = LLMResponse("assistant")
|
llm_response = LLMResponse("assistant")
|
||||||
|
|
||||||
if len(completion.choices) == 0:
|
if len(completion.choices) == 0:
|
||||||
raise Exception("API 返回的 completion 为空。")
|
raise Exception("API 返回的 completion 为空。")
|
||||||
choice = completion.choices[0]
|
choice = completion.choices[0]
|
||||||
|
|
||||||
|
# parse the text completion
|
||||||
if choice.message.content is not None:
|
if choice.message.content is not None:
|
||||||
# text completion
|
# text completion
|
||||||
completion_text = str(choice.message.content).strip()
|
completion_text = str(choice.message.content).strip()
|
||||||
|
# specially, some providers may set <think> tags around reasoning content in the completion text,
|
||||||
|
# we use regex to remove them, and store then in reasoning_content field
|
||||||
|
reasoning_pattern = re.compile(r"<think>(.*?)</think>", re.DOTALL)
|
||||||
|
matches = reasoning_pattern.findall(completion_text)
|
||||||
|
if matches:
|
||||||
|
llm_response.reasoning_content = "\n".join(
|
||||||
|
[match.strip() for match in matches],
|
||||||
|
)
|
||||||
|
completion_text = reasoning_pattern.sub("", completion_text).strip()
|
||||||
llm_response.result_chain = MessageChain().message(completion_text)
|
llm_response.result_chain = MessageChain().message(completion_text)
|
||||||
|
|
||||||
|
# parse the reasoning content if any
|
||||||
|
# the priority is higher than the <think> tag extraction
|
||||||
|
llm_response.reasoning_content = self._extract_reasoning_content(completion)
|
||||||
|
|
||||||
|
# parse tool calls if any
|
||||||
if choice.message.tool_calls and tools is not None:
|
if choice.message.tool_calls and tools is not None:
|
||||||
# tools call (function calling)
|
|
||||||
args_ls = []
|
args_ls = []
|
||||||
func_name_ls = []
|
func_name_ls = []
|
||||||
tool_call_ids = []
|
tool_call_ids = []
|
||||||
@@ -265,11 +301,11 @@ class ProviderOpenAIOfficial(Provider):
|
|||||||
llm_response.tools_call_name = func_name_ls
|
llm_response.tools_call_name = func_name_ls
|
||||||
llm_response.tools_call_ids = tool_call_ids
|
llm_response.tools_call_ids = tool_call_ids
|
||||||
|
|
||||||
|
# specially handle finish reason
|
||||||
if choice.finish_reason == "content_filter":
|
if choice.finish_reason == "content_filter":
|
||||||
raise Exception(
|
raise Exception(
|
||||||
"API 返回的 completion 由于内容安全过滤被拒绝(非 AstrBot)。",
|
"API 返回的 completion 由于内容安全过滤被拒绝(非 AstrBot)。",
|
||||||
)
|
)
|
||||||
|
|
||||||
if llm_response.completion_text is None and not llm_response.tools_call_args:
|
if llm_response.completion_text is None and not llm_response.tools_call_args:
|
||||||
logger.error(f"API 返回的 completion 无法解析:{completion}。")
|
logger.error(f"API 返回的 completion 无法解析:{completion}。")
|
||||||
raise Exception(f"API 返回的 completion 无法解析:{completion}。")
|
raise Exception(f"API 返回的 completion 无法解析:{completion}。")
|
||||||
|
|||||||
@@ -12,10 +12,5 @@ class ProviderZhipu(ProviderOpenAIOfficial):
|
|||||||
self,
|
self,
|
||||||
provider_config: dict,
|
provider_config: dict,
|
||||||
provider_settings: dict,
|
provider_settings: dict,
|
||||||
default_persona=None,
|
|
||||||
) -> None:
|
) -> None:
|
||||||
super().__init__(
|
super().__init__(provider_config, provider_settings)
|
||||||
provider_config,
|
|
||||||
provider_settings,
|
|
||||||
default_persona,
|
|
||||||
)
|
|
||||||
|
|||||||
@@ -5,6 +5,10 @@ from typing import Any
|
|||||||
|
|
||||||
from deprecated import deprecated
|
from deprecated import deprecated
|
||||||
|
|
||||||
|
from astrbot.core.agent.hooks import BaseAgentRunHooks
|
||||||
|
from astrbot.core.agent.message import Message
|
||||||
|
from astrbot.core.agent.runners.tool_loop_agent_runner import ToolLoopAgentRunner
|
||||||
|
from astrbot.core.agent.tool import ToolSet
|
||||||
from astrbot.core.astrbot_config_mgr import AstrBotConfigManager
|
from astrbot.core.astrbot_config_mgr import AstrBotConfigManager
|
||||||
from astrbot.core.config.astrbot_config import AstrBotConfig
|
from astrbot.core.config.astrbot_config import AstrBotConfig
|
||||||
from astrbot.core.conversation_mgr import ConversationManager
|
from astrbot.core.conversation_mgr import ConversationManager
|
||||||
@@ -13,10 +17,10 @@ from astrbot.core.knowledge_base.kb_mgr import KnowledgeBaseManager
|
|||||||
from astrbot.core.message.message_event_result import MessageChain
|
from astrbot.core.message.message_event_result import MessageChain
|
||||||
from astrbot.core.persona_mgr import PersonaManager
|
from astrbot.core.persona_mgr import PersonaManager
|
||||||
from astrbot.core.platform import Platform
|
from astrbot.core.platform import Platform
|
||||||
from astrbot.core.platform.astr_message_event import MessageSesion
|
from astrbot.core.platform.astr_message_event import AstrMessageEvent, MessageSesion
|
||||||
from astrbot.core.platform.manager import PlatformManager
|
from astrbot.core.platform.manager import PlatformManager
|
||||||
from astrbot.core.platform_message_history_mgr import PlatformMessageHistoryManager
|
from astrbot.core.platform_message_history_mgr import PlatformMessageHistoryManager
|
||||||
from astrbot.core.provider.entities import ProviderType
|
from astrbot.core.provider.entities import LLMResponse, ProviderRequest, ProviderType
|
||||||
from astrbot.core.provider.func_tool_manager import FunctionTool, FunctionToolManager
|
from astrbot.core.provider.func_tool_manager import FunctionTool, FunctionToolManager
|
||||||
from astrbot.core.provider.manager import ProviderManager
|
from astrbot.core.provider.manager import ProviderManager
|
||||||
from astrbot.core.provider.provider import (
|
from astrbot.core.provider.provider import (
|
||||||
@@ -31,6 +35,7 @@ from astrbot.core.star.filter.platform_adapter_type import (
|
|||||||
PlatformAdapterType,
|
PlatformAdapterType,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
from ..exceptions import ProviderNotFoundError
|
||||||
from .filter.command import CommandFilter
|
from .filter.command import CommandFilter
|
||||||
from .filter.regex import RegexFilter
|
from .filter.regex import RegexFilter
|
||||||
from .star import StarMetadata, star_map, star_registry
|
from .star import StarMetadata, star_map, star_registry
|
||||||
@@ -75,6 +80,153 @@ class Context:
|
|||||||
self.astrbot_config_mgr = astrbot_config_mgr
|
self.astrbot_config_mgr = astrbot_config_mgr
|
||||||
self.kb_manager = knowledge_base_manager
|
self.kb_manager = knowledge_base_manager
|
||||||
|
|
||||||
|
async def llm_generate(
|
||||||
|
self,
|
||||||
|
*,
|
||||||
|
chat_provider_id: str,
|
||||||
|
prompt: str | None = None,
|
||||||
|
image_urls: list[str] | None = None,
|
||||||
|
tools: ToolSet | None = None,
|
||||||
|
system_prompt: str | None = None,
|
||||||
|
contexts: list[Message] | None = None,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> LLMResponse:
|
||||||
|
"""Call the LLM to generate a response. The method will not automatically execute tool calls. If you want to use tool calls, please use `tool_loop_agent()`.
|
||||||
|
|
||||||
|
.. versionadded:: 4.5.7 (sdk)
|
||||||
|
|
||||||
|
Args:
|
||||||
|
chat_provider_id: The chat provider ID to use.
|
||||||
|
prompt: The prompt to send to the LLM, if `contexts` and `prompt` are both provided, `prompt` will be appended as the last user message
|
||||||
|
image_urls: List of image URLs to include in the prompt, if `contexts` and `prompt` are both provided, `image_urls` will be appended to the last user message
|
||||||
|
tools: ToolSet of tools available to the LLM
|
||||||
|
system_prompt: System prompt to guide the LLM's behavior, if provided, it will always insert as the first system message in the context
|
||||||
|
contexts: context messages for the LLM
|
||||||
|
**kwargs: Additional keyword arguments for LLM generation, OpenAI compatible
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ChatProviderNotFoundError: If the specified chat provider ID is not found
|
||||||
|
Exception: For other errors during LLM generation
|
||||||
|
"""
|
||||||
|
prov = await self.provider_manager.get_provider_by_id(chat_provider_id)
|
||||||
|
if not prov or not isinstance(prov, Provider):
|
||||||
|
raise ProviderNotFoundError(f"Provider {chat_provider_id} not found")
|
||||||
|
llm_resp = await prov.text_chat(
|
||||||
|
prompt=prompt,
|
||||||
|
image_urls=image_urls,
|
||||||
|
func_tool=tools,
|
||||||
|
contexts=contexts,
|
||||||
|
system_prompt=system_prompt,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
return llm_resp
|
||||||
|
|
||||||
|
async def tool_loop_agent(
|
||||||
|
self,
|
||||||
|
*,
|
||||||
|
event: AstrMessageEvent,
|
||||||
|
chat_provider_id: str,
|
||||||
|
prompt: str | None = None,
|
||||||
|
image_urls: list[str] | None = None,
|
||||||
|
tools: ToolSet | None = None,
|
||||||
|
system_prompt: str | None = None,
|
||||||
|
contexts: list[Message] | None = None,
|
||||||
|
max_steps: int = 30,
|
||||||
|
tool_call_timeout: int = 60,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> LLMResponse:
|
||||||
|
"""Run an agent loop that allows the LLM to call tools iteratively until a final answer is produced.
|
||||||
|
If you do not pass the agent_context parameter, the method will recreate a new agent context.
|
||||||
|
|
||||||
|
.. versionadded:: 4.5.7 (sdk)
|
||||||
|
|
||||||
|
Args:
|
||||||
|
chat_provider_id: The chat provider ID to use.
|
||||||
|
prompt: The prompt to send to the LLM, if `contexts` and `prompt` are both provided, `prompt` will be appended as the last user message
|
||||||
|
image_urls: List of image URLs to include in the prompt, if `contexts` and `prompt` are both provided, `image_urls` will be appended to the last user message
|
||||||
|
tools: ToolSet of tools available to the LLM
|
||||||
|
system_prompt: System prompt to guide the LLM's behavior, if provided, it will always insert as the first system message in the context
|
||||||
|
contexts: context messages for the LLM
|
||||||
|
max_steps: Maximum number of tool calls before stopping the loop
|
||||||
|
**kwargs: Additional keyword arguments. The kwargs will not be passed to the LLM directly for now, but can include:
|
||||||
|
agent_hooks: BaseAgentRunHooks[AstrAgentContext] - hooks to run during agent execution
|
||||||
|
agent_context: AstrAgentContext - context to use for the agent
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The final LLMResponse after tool calls are completed.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ChatProviderNotFoundError: If the specified chat provider ID is not found
|
||||||
|
Exception: For other errors during LLM generation
|
||||||
|
"""
|
||||||
|
# Import here to avoid circular imports
|
||||||
|
from astrbot.core.astr_agent_context import (
|
||||||
|
AgentContextWrapper,
|
||||||
|
AstrAgentContext,
|
||||||
|
)
|
||||||
|
from astrbot.core.astr_agent_tool_exec import FunctionToolExecutor
|
||||||
|
|
||||||
|
prov = await self.provider_manager.get_provider_by_id(chat_provider_id)
|
||||||
|
if not prov or not isinstance(prov, Provider):
|
||||||
|
raise ProviderNotFoundError(f"Provider {chat_provider_id} not found")
|
||||||
|
|
||||||
|
agent_hooks = kwargs.get("agent_hooks") or BaseAgentRunHooks[AstrAgentContext]()
|
||||||
|
agent_context = kwargs.get("agent_context")
|
||||||
|
|
||||||
|
context_ = []
|
||||||
|
for msg in contexts or []:
|
||||||
|
if isinstance(msg, Message):
|
||||||
|
context_.append(msg.model_dump())
|
||||||
|
else:
|
||||||
|
context_.append(msg)
|
||||||
|
|
||||||
|
request = ProviderRequest(
|
||||||
|
prompt=prompt,
|
||||||
|
image_urls=image_urls or [],
|
||||||
|
func_tool=tools,
|
||||||
|
contexts=context_,
|
||||||
|
system_prompt=system_prompt or "",
|
||||||
|
)
|
||||||
|
if agent_context is None:
|
||||||
|
agent_context = AstrAgentContext(
|
||||||
|
context=self,
|
||||||
|
event=event,
|
||||||
|
)
|
||||||
|
agent_runner = ToolLoopAgentRunner()
|
||||||
|
tool_executor = FunctionToolExecutor()
|
||||||
|
await agent_runner.reset(
|
||||||
|
provider=prov,
|
||||||
|
request=request,
|
||||||
|
run_context=AgentContextWrapper(
|
||||||
|
context=agent_context,
|
||||||
|
tool_call_timeout=tool_call_timeout,
|
||||||
|
),
|
||||||
|
tool_executor=tool_executor,
|
||||||
|
agent_hooks=agent_hooks,
|
||||||
|
streaming=kwargs.get("stream", False),
|
||||||
|
)
|
||||||
|
async for _ in agent_runner.step_until_done(max_steps):
|
||||||
|
pass
|
||||||
|
llm_resp = agent_runner.get_final_llm_resp()
|
||||||
|
if not llm_resp:
|
||||||
|
raise Exception("Agent did not produce a final LLM response")
|
||||||
|
return llm_resp
|
||||||
|
|
||||||
|
async def get_current_chat_provider_id(self, umo: str) -> str:
|
||||||
|
"""Get the ID of the currently used chat provider.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
umo(str): unified_message_origin value, if provided and user has enabled provider session isolation, the provider preferred by that session will be used.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ProviderNotFoundError: If the specified chat provider is not found
|
||||||
|
|
||||||
|
"""
|
||||||
|
prov = self.get_using_provider(umo)
|
||||||
|
if not prov:
|
||||||
|
raise ProviderNotFoundError("Provider not found")
|
||||||
|
return prov.meta().id
|
||||||
|
|
||||||
def get_registered_star(self, star_name: str) -> StarMetadata | None:
|
def get_registered_star(self, star_name: str) -> StarMetadata | None:
|
||||||
"""根据插件名获取插件的 Metadata"""
|
"""根据插件名获取插件的 Metadata"""
|
||||||
for star in star_registry:
|
for star in star_registry:
|
||||||
@@ -107,10 +259,6 @@ class Context:
|
|||||||
"""
|
"""
|
||||||
return self.provider_manager.llm_tools.deactivate_llm_tool(name)
|
return self.provider_manager.llm_tools.deactivate_llm_tool(name)
|
||||||
|
|
||||||
def register_provider(self, provider: Provider):
|
|
||||||
"""注册一个 LLM Provider(Chat_Completion 类型)。"""
|
|
||||||
self.provider_manager.provider_insts.append(provider)
|
|
||||||
|
|
||||||
def get_provider_by_id(
|
def get_provider_by_id(
|
||||||
self,
|
self,
|
||||||
provider_id: str,
|
provider_id: str,
|
||||||
@@ -189,45 +337,6 @@ class Context:
|
|||||||
return self._config
|
return self._config
|
||||||
return self.astrbot_config_mgr.get_conf(umo)
|
return self.astrbot_config_mgr.get_conf(umo)
|
||||||
|
|
||||||
def get_db(self) -> BaseDatabase:
|
|
||||||
"""获取 AstrBot 数据库。"""
|
|
||||||
return self._db
|
|
||||||
|
|
||||||
def get_event_queue(self) -> Queue:
|
|
||||||
"""获取事件队列。"""
|
|
||||||
return self._event_queue
|
|
||||||
|
|
||||||
@deprecated(version="4.0.0", reason="Use get_platform_inst instead")
|
|
||||||
def get_platform(self, platform_type: PlatformAdapterType | str) -> Platform | None:
|
|
||||||
"""获取指定类型的平台适配器。
|
|
||||||
|
|
||||||
该方法已经过时,请使用 get_platform_inst 方法。(>= AstrBot v4.0.0)
|
|
||||||
"""
|
|
||||||
for platform in self.platform_manager.platform_insts:
|
|
||||||
name = platform.meta().name
|
|
||||||
if isinstance(platform_type, str):
|
|
||||||
if name == platform_type:
|
|
||||||
return platform
|
|
||||||
elif (
|
|
||||||
name in ADAPTER_NAME_2_TYPE
|
|
||||||
and ADAPTER_NAME_2_TYPE[name] & platform_type
|
|
||||||
):
|
|
||||||
return platform
|
|
||||||
|
|
||||||
def get_platform_inst(self, platform_id: str) -> Platform | None:
|
|
||||||
"""获取指定 ID 的平台适配器实例。
|
|
||||||
|
|
||||||
Args:
|
|
||||||
platform_id (str): 平台适配器的唯一标识符。你可以通过 event.get_platform_id() 获取。
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Platform: 平台适配器实例,如果未找到则返回 None。
|
|
||||||
|
|
||||||
"""
|
|
||||||
for platform in self.platform_manager.platform_insts:
|
|
||||||
if platform.meta().id == platform_id:
|
|
||||||
return platform
|
|
||||||
|
|
||||||
async def send_message(
|
async def send_message(
|
||||||
self,
|
self,
|
||||||
session: str | MessageSesion,
|
session: str | MessageSesion,
|
||||||
@@ -300,6 +409,49 @@ class Context:
|
|||||||
以下的方法已经不推荐使用。请从 AstrBot 文档查看更好的注册方式。
|
以下的方法已经不推荐使用。请从 AstrBot 文档查看更好的注册方式。
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
def get_event_queue(self) -> Queue:
|
||||||
|
"""获取事件队列。"""
|
||||||
|
return self._event_queue
|
||||||
|
|
||||||
|
@deprecated(version="4.0.0", reason="Use get_platform_inst instead")
|
||||||
|
def get_platform(self, platform_type: PlatformAdapterType | str) -> Platform | None:
|
||||||
|
"""获取指定类型的平台适配器。
|
||||||
|
|
||||||
|
该方法已经过时,请使用 get_platform_inst 方法。(>= AstrBot v4.0.0)
|
||||||
|
"""
|
||||||
|
for platform in self.platform_manager.platform_insts:
|
||||||
|
name = platform.meta().name
|
||||||
|
if isinstance(platform_type, str):
|
||||||
|
if name == platform_type:
|
||||||
|
return platform
|
||||||
|
elif (
|
||||||
|
name in ADAPTER_NAME_2_TYPE
|
||||||
|
and ADAPTER_NAME_2_TYPE[name] & platform_type
|
||||||
|
):
|
||||||
|
return platform
|
||||||
|
|
||||||
|
def get_platform_inst(self, platform_id: str) -> Platform | None:
|
||||||
|
"""获取指定 ID 的平台适配器实例。
|
||||||
|
|
||||||
|
Args:
|
||||||
|
platform_id (str): 平台适配器的唯一标识符。你可以通过 event.get_platform_id() 获取。
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Platform: 平台适配器实例,如果未找到则返回 None。
|
||||||
|
|
||||||
|
"""
|
||||||
|
for platform in self.platform_manager.platform_insts:
|
||||||
|
if platform.meta().id == platform_id:
|
||||||
|
return platform
|
||||||
|
|
||||||
|
def get_db(self) -> BaseDatabase:
|
||||||
|
"""获取 AstrBot 数据库。"""
|
||||||
|
return self._db
|
||||||
|
|
||||||
|
def register_provider(self, provider: Provider):
|
||||||
|
"""注册一个 LLM Provider(Chat_Completion 类型)。"""
|
||||||
|
self.provider_manager.provider_insts.append(provider)
|
||||||
|
|
||||||
def register_llm_tool(
|
def register_llm_tool(
|
||||||
self,
|
self,
|
||||||
name: str,
|
name: str,
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import re
|
||||||
from collections.abc import Awaitable, Callable
|
from collections.abc import Awaitable, Callable
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
@@ -11,7 +12,7 @@ from astrbot.core.agent.handoff import HandoffTool
|
|||||||
from astrbot.core.agent.hooks import BaseAgentRunHooks
|
from astrbot.core.agent.hooks import BaseAgentRunHooks
|
||||||
from astrbot.core.agent.tool import FunctionTool
|
from astrbot.core.agent.tool import FunctionTool
|
||||||
from astrbot.core.astr_agent_context import AstrAgentContext
|
from astrbot.core.astr_agent_context import AstrAgentContext
|
||||||
from astrbot.core.provider.func_tool_manager import SUPPORTED_TYPES
|
from astrbot.core.provider.func_tool_manager import PY_TO_JSON_TYPE, SUPPORTED_TYPES
|
||||||
from astrbot.core.provider.register import llm_tools
|
from astrbot.core.provider.register import llm_tools
|
||||||
|
|
||||||
from ..filter.command import CommandFilter
|
from ..filter.command import CommandFilter
|
||||||
@@ -417,18 +418,37 @@ def register_llm_tool(name: str | None = None, **kwargs):
|
|||||||
docstring = docstring_parser.parse(func_doc)
|
docstring = docstring_parser.parse(func_doc)
|
||||||
args = []
|
args = []
|
||||||
for arg in docstring.params:
|
for arg in docstring.params:
|
||||||
if arg.type_name not in SUPPORTED_TYPES:
|
sub_type_name = None
|
||||||
|
type_name = arg.type_name
|
||||||
|
if not type_name:
|
||||||
|
raise ValueError(
|
||||||
|
f"LLM 函数工具 {awaitable.__module__}_{llm_tool_name} 的参数 {arg.arg_name} 缺少类型注释。",
|
||||||
|
)
|
||||||
|
# parse type_name to handle cases like "list[string]"
|
||||||
|
match = re.match(r"(\w+)\[(\w+)\]", type_name)
|
||||||
|
if match:
|
||||||
|
type_name = match.group(1)
|
||||||
|
sub_type_name = match.group(2)
|
||||||
|
type_name = PY_TO_JSON_TYPE.get(type_name, type_name)
|
||||||
|
if sub_type_name:
|
||||||
|
sub_type_name = PY_TO_JSON_TYPE.get(sub_type_name, sub_type_name)
|
||||||
|
if type_name not in SUPPORTED_TYPES or (
|
||||||
|
sub_type_name and sub_type_name not in SUPPORTED_TYPES
|
||||||
|
):
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"LLM 函数工具 {awaitable.__module__}_{llm_tool_name} 不支持的参数类型:{arg.type_name}",
|
f"LLM 函数工具 {awaitable.__module__}_{llm_tool_name} 不支持的参数类型:{arg.type_name}",
|
||||||
)
|
)
|
||||||
args.append(
|
|
||||||
{
|
arg_json_schema = {
|
||||||
"type": arg.type_name,
|
"type": type_name,
|
||||||
"name": arg.arg_name,
|
"name": arg.arg_name,
|
||||||
"description": arg.description,
|
"description": arg.description,
|
||||||
},
|
}
|
||||||
)
|
if sub_type_name:
|
||||||
# print(llm_tool_name, registering_agent)
|
if type_name == "array":
|
||||||
|
arg_json_schema["items"] = {"type": sub_type_name}
|
||||||
|
args.append(arg_json_schema)
|
||||||
|
|
||||||
if not registering_agent:
|
if not registering_agent:
|
||||||
doc_desc = docstring.description.strip() if docstring.description else ""
|
doc_desc = docstring.description.strip() if docstring.description else ""
|
||||||
md = get_handler_or_create(awaitable, EventType.OnCallingFuncToolEvent)
|
md = get_handler_or_create(awaitable, EventType.OnCallingFuncToolEvent)
|
||||||
|
|||||||
@@ -204,6 +204,8 @@ class ChatRoute(Route):
|
|||||||
):
|
):
|
||||||
# 追加机器人消息
|
# 追加机器人消息
|
||||||
new_his = {"type": "bot", "message": result_text}
|
new_his = {"type": "bot", "message": result_text}
|
||||||
|
if "reasoning" in result:
|
||||||
|
new_his["reasoning"] = result["reasoning"]
|
||||||
await self.platform_history_mgr.insert(
|
await self.platform_history_mgr.insert(
|
||||||
platform_id="webchat",
|
platform_id="webchat",
|
||||||
user_id=webchat_conv_id,
|
user_id=webchat_conv_id,
|
||||||
|
|||||||
12
changelogs/v4.5.7.md
Normal file
12
changelogs/v4.5.7.md
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
## What's Changed
|
||||||
|
|
||||||
|
1. 新增:支持为 OpenAI API 提供商自定义请求头 ([#3581](https://github.com/AstrBotDevs/AstrBot/issues/3581))
|
||||||
|
2. 新增:为 WebChat 为 Thinking 模型添加思考过程展示功能;支持快捷切换流式输出 / 非流式输出。([#3632](https://github.com/AstrBotDevs/AstrBot/issues/3632))
|
||||||
|
3. 新增:优化插件调用 LLM 和 Agent 的路径,为 Context 类引入多个调用 LLM 和 Agent 的便捷方法 ([#3636](https://github.com/AstrBotDevs/AstrBot/issues/3636))
|
||||||
|
4. 优化:改善不支持流式输出的消息平台的回退策略 ([#3547](https://github.com/AstrBotDevs/AstrBot/issues/3547))
|
||||||
|
5. 优化:当同一个会话(umo)下同时有多个请求时,执行排队处理,避免并发请求导致的上下文混乱问题 ([#3607](https://github.com/AstrBotDevs/AstrBot/issues/3607))
|
||||||
|
6. 优化:优化 WebUI 的登录界面和 Changelog 页面的显示效果
|
||||||
|
7. 修复:修复在知识库名字过长的情况下,“选择知识库”按钮显示异常的问题 ([#3582](https://github.com/AstrBotDevs/AstrBot/issues/3582))
|
||||||
|
8. 修复:修复部分情况下,分段消息发送时导致的死锁问题(由 PR #3607 引入)
|
||||||
|
9. 修复:钉钉适配器使用部分指令无法生效的问题 ([#3634](https://github.com/AstrBotDevs/AstrBot/issues/3634))
|
||||||
|
10. 其他:为部分适配器添加缺失的 send_streaming 方法 ([#3545](https://github.com/AstrBotDevs/AstrBot/issues/3545))
|
||||||
5
changelogs/v4.5.8.md
Normal file
5
changelogs/v4.5.8.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
## What's Changed
|
||||||
|
|
||||||
|
hot fix of 4.5.7
|
||||||
|
|
||||||
|
fix: 无法正常发送图片,报错 `pydantic_core._pydantic_core.ValidationError`
|
||||||
@@ -146,21 +146,6 @@
|
|||||||
<span>Hello, I'm</span>
|
<span>Hello, I'm</span>
|
||||||
<span class="bot-name">AstrBot ⭐</span>
|
<span class="bot-name">AstrBot ⭐</span>
|
||||||
</div>
|
</div>
|
||||||
<div class="welcome-hint markdown-content">
|
|
||||||
<span>{{ t('core.common.type') }}</span>
|
|
||||||
<code>help</code>
|
|
||||||
<span>{{ tm('shortcuts.help') }} 😊</span>
|
|
||||||
</div>
|
|
||||||
<div class="welcome-hint markdown-content">
|
|
||||||
<span>{{ t('core.common.longPress') }}</span>
|
|
||||||
<code>Ctrl + B</code>
|
|
||||||
<span>{{ tm('shortcuts.voiceRecord') }} 🎤</span>
|
|
||||||
</div>
|
|
||||||
<div class="welcome-hint markdown-content">
|
|
||||||
<span>{{ t('core.common.press') }}</span>
|
|
||||||
<code>Ctrl + V</code>
|
|
||||||
<span>{{ tm('shortcuts.pasteImage') }} 🏞️</span>
|
|
||||||
</div>
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<!-- 输入区域 -->
|
<!-- 输入区域 -->
|
||||||
@@ -1031,17 +1016,26 @@ export default {
|
|||||||
"content": bot_resp
|
"content": bot_resp
|
||||||
});
|
});
|
||||||
} else if (chunk_json.type === 'plain') {
|
} else if (chunk_json.type === 'plain') {
|
||||||
|
const chain_type = chunk_json.chain_type || 'normal';
|
||||||
|
|
||||||
if (!in_streaming) {
|
if (!in_streaming) {
|
||||||
message_obj = {
|
message_obj = {
|
||||||
type: 'bot',
|
type: 'bot',
|
||||||
message: this.ref(chunk_json.data),
|
message: this.ref(chain_type === 'reasoning' ? '' : chunk_json.data),
|
||||||
|
reasoning: this.ref(chain_type === 'reasoning' ? chunk_json.data : ''),
|
||||||
}
|
}
|
||||||
this.messages.push({
|
this.messages.push({
|
||||||
"content": message_obj
|
"content": message_obj
|
||||||
});
|
});
|
||||||
in_streaming = true;
|
in_streaming = true;
|
||||||
} else {
|
} else {
|
||||||
message_obj.message.value += chunk_json.data;
|
if (chain_type === 'reasoning') {
|
||||||
|
// Append to reasoning content
|
||||||
|
message_obj.reasoning.value += chunk_json.data;
|
||||||
|
} else {
|
||||||
|
// Append to normal message
|
||||||
|
message_obj.message.value += chunk_json.data;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else if (chunk_json.type === 'update_title') {
|
} else if (chunk_json.type === 'update_title') {
|
||||||
// 更新对话标题
|
// 更新对话标题
|
||||||
|
|||||||
@@ -37,6 +37,19 @@
|
|||||||
</v-avatar>
|
</v-avatar>
|
||||||
<div class="bot-message-content">
|
<div class="bot-message-content">
|
||||||
<div class="message-bubble bot-bubble">
|
<div class="message-bubble bot-bubble">
|
||||||
|
<!-- Reasoning Block (Collapsible) -->
|
||||||
|
<div v-if="msg.content.reasoning && msg.content.reasoning.trim()" class="reasoning-container">
|
||||||
|
<div class="reasoning-header" @click="toggleReasoning(index)">
|
||||||
|
<v-icon size="small" class="reasoning-icon">
|
||||||
|
{{ isReasoningExpanded(index) ? 'mdi-chevron-down' : 'mdi-chevron-right' }}
|
||||||
|
</v-icon>
|
||||||
|
<span class="reasoning-label">{{ tm('reasoning.thinking') }}</span>
|
||||||
|
</div>
|
||||||
|
<div v-if="isReasoningExpanded(index)" class="reasoning-content">
|
||||||
|
<div v-html="md.render(msg.content.reasoning)" class="markdown-content reasoning-text"></div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
<!-- Text -->
|
<!-- Text -->
|
||||||
<div v-if="msg.content.message && msg.content.message.trim()"
|
<div v-if="msg.content.message && msg.content.message.trim()"
|
||||||
v-html="md.render(msg.content.message)" class="markdown-content"></div>
|
v-html="md.render(msg.content.message)" class="markdown-content"></div>
|
||||||
@@ -125,7 +138,8 @@ export default {
|
|||||||
copiedMessages: new Set(),
|
copiedMessages: new Set(),
|
||||||
isUserNearBottom: true,
|
isUserNearBottom: true,
|
||||||
scrollThreshold: 1,
|
scrollThreshold: 1,
|
||||||
scrollTimer: null
|
scrollTimer: null,
|
||||||
|
expandedReasoning: new Set(), // Track which reasoning blocks are expanded
|
||||||
};
|
};
|
||||||
},
|
},
|
||||||
mounted() {
|
mounted() {
|
||||||
@@ -142,6 +156,22 @@ export default {
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
methods: {
|
methods: {
|
||||||
|
// Toggle reasoning expansion state
|
||||||
|
toggleReasoning(messageIndex) {
|
||||||
|
if (this.expandedReasoning.has(messageIndex)) {
|
||||||
|
this.expandedReasoning.delete(messageIndex);
|
||||||
|
} else {
|
||||||
|
this.expandedReasoning.add(messageIndex);
|
||||||
|
}
|
||||||
|
// Force reactivity
|
||||||
|
this.expandedReasoning = new Set(this.expandedReasoning);
|
||||||
|
},
|
||||||
|
|
||||||
|
// Check if reasoning is expanded
|
||||||
|
isReasoningExpanded(messageIndex) {
|
||||||
|
return this.expandedReasoning.has(messageIndex);
|
||||||
|
},
|
||||||
|
|
||||||
// 复制代码到剪贴板
|
// 复制代码到剪贴板
|
||||||
copyCodeToClipboard(code) {
|
copyCodeToClipboard(code) {
|
||||||
navigator.clipboard.writeText(code).then(() => {
|
navigator.clipboard.writeText(code).then(() => {
|
||||||
@@ -348,7 +378,7 @@ export default {
|
|||||||
@keyframes fadeIn {
|
@keyframes fadeIn {
|
||||||
from {
|
from {
|
||||||
opacity: 0;
|
opacity: 0;
|
||||||
transform: translateY(10px);
|
transform: translateY(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
to {
|
to {
|
||||||
@@ -539,6 +569,69 @@ export default {
|
|||||||
.fade-in {
|
.fade-in {
|
||||||
animation: fadeIn 0.3s ease-in-out;
|
animation: fadeIn 0.3s ease-in-out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Reasoning 区块样式 */
|
||||||
|
.reasoning-container {
|
||||||
|
margin-bottom: 12px;
|
||||||
|
margin-top: 6px;
|
||||||
|
border: 1px solid var(--v-theme-border);
|
||||||
|
border-radius: 8px;
|
||||||
|
overflow: hidden;
|
||||||
|
width: fit-content;
|
||||||
|
}
|
||||||
|
|
||||||
|
.v-theme--dark .reasoning-container {
|
||||||
|
background-color: rgba(103, 58, 183, 0.08);
|
||||||
|
}
|
||||||
|
|
||||||
|
.reasoning-header {
|
||||||
|
display: inline-flex;
|
||||||
|
align-items: center;
|
||||||
|
padding: 8px 8px;
|
||||||
|
cursor: pointer;
|
||||||
|
user-select: none;
|
||||||
|
transition: background-color 0.2s ease;
|
||||||
|
border-radius: 8px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.reasoning-header:hover {
|
||||||
|
background-color: rgba(103, 58, 183, 0.08);
|
||||||
|
}
|
||||||
|
|
||||||
|
.v-theme--dark .reasoning-header:hover {
|
||||||
|
background-color: rgba(103, 58, 183, 0.15);
|
||||||
|
}
|
||||||
|
|
||||||
|
.reasoning-icon {
|
||||||
|
margin-right: 6px;
|
||||||
|
color: var(--v-theme-secondary);
|
||||||
|
transition: transform 0.2s ease;
|
||||||
|
}
|
||||||
|
|
||||||
|
.reasoning-label {
|
||||||
|
font-size: 13px;
|
||||||
|
font-weight: 500;
|
||||||
|
color: var(--v-theme-secondary);
|
||||||
|
letter-spacing: 0.3px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.reasoning-content {
|
||||||
|
padding: 0px 12px;
|
||||||
|
border-top: 1px solid var(--v-theme-border);
|
||||||
|
color: gray;
|
||||||
|
animation: fadeIn 0.2s ease-in-out;
|
||||||
|
font-style: italic;
|
||||||
|
}
|
||||||
|
|
||||||
|
.reasoning-text {
|
||||||
|
font-size: 14px;
|
||||||
|
line-height: 1.6;
|
||||||
|
color: var(--v-theme-secondaryText);
|
||||||
|
}
|
||||||
|
|
||||||
|
.v-theme--dark .reasoning-text {
|
||||||
|
opacity: 0.85;
|
||||||
|
}
|
||||||
</style>
|
</style>
|
||||||
|
|
||||||
<style>
|
<style>
|
||||||
|
|||||||
@@ -5,6 +5,9 @@
|
|||||||
v-if="selectedProviderId && selectedModelName" @click="openDialog">
|
v-if="selectedProviderId && selectedModelName" @click="openDialog">
|
||||||
{{ selectedProviderId }} / {{ selectedModelName }}
|
{{ selectedProviderId }} / {{ selectedModelName }}
|
||||||
</v-chip>
|
</v-chip>
|
||||||
|
<v-chip variant="tonal" rounded="xl" size="x-small" v-else @click="openDialog">
|
||||||
|
选择模型
|
||||||
|
</v-chip>
|
||||||
|
|
||||||
<!-- 选择提供商和模型对话框 -->
|
<!-- 选择提供商和模型对话框 -->
|
||||||
<v-dialog v-model="showDialog" max-width="800" persistent>
|
<v-dialog v-model="showDialog" max-width="800" persistent>
|
||||||
|
|||||||
@@ -154,7 +154,8 @@ function hasVisibleItemsAfter(items, currentIndex) {
|
|||||||
<div class="w-100" v-if="!itemMeta?._special">
|
<div class="w-100" v-if="!itemMeta?._special">
|
||||||
<!-- Select input for JSON selector -->
|
<!-- Select input for JSON selector -->
|
||||||
<v-select v-if="itemMeta?.options" v-model="createSelectorModel(itemKey).value"
|
<v-select v-if="itemMeta?.options" v-model="createSelectorModel(itemKey).value"
|
||||||
:items="itemMeta?.options" :disabled="itemMeta?.readonly" density="compact" variant="outlined"
|
:items="itemMeta?.labels ? itemMeta.options.map((value, index) => ({ title: itemMeta.labels[index] || value, value: value })) : itemMeta.options"
|
||||||
|
:disabled="itemMeta?.readonly" density="compact" variant="outlined"
|
||||||
class="config-field" hide-details></v-select>
|
class="config-field" hide-details></v-select>
|
||||||
|
|
||||||
<!-- Code Editor for JSON selector -->
|
<!-- Code Editor for JSON selector -->
|
||||||
|
|||||||
@@ -56,6 +56,9 @@
|
|||||||
"linkText": "View master branch commit history (click copy on the right to copy)",
|
"linkText": "View master branch commit history (click copy on the right to copy)",
|
||||||
"confirm": "Confirm Switch"
|
"confirm": "Confirm Switch"
|
||||||
},
|
},
|
||||||
|
"releaseNotes": {
|
||||||
|
"title": "Release Notes"
|
||||||
|
},
|
||||||
"dashboardUpdate": {
|
"dashboardUpdate": {
|
||||||
"title": "Update Dashboard to Latest Version Only",
|
"title": "Update Dashboard to Latest Version Only",
|
||||||
"currentVersion": "Current Version",
|
"currentVersion": "Current Version",
|
||||||
|
|||||||
@@ -63,6 +63,9 @@
|
|||||||
"on": "Stream",
|
"on": "Stream",
|
||||||
"off": "Normal"
|
"off": "Normal"
|
||||||
},
|
},
|
||||||
|
"reasoning": {
|
||||||
|
"thinking": "Thinking Process"
|
||||||
|
},
|
||||||
"connection": {
|
"connection": {
|
||||||
"title": "Connection Status Notice",
|
"title": "Connection Status Notice",
|
||||||
"message": "The system detected that the chat connection needs to be re-established.",
|
"message": "The system detected that the chat connection needs to be re-established.",
|
||||||
|
|||||||
@@ -55,6 +55,9 @@
|
|||||||
"linkText": "查看 master 分支提交记录(点击右边的 copy 即可复制)",
|
"linkText": "查看 master 分支提交记录(点击右边的 copy 即可复制)",
|
||||||
"confirm": "确定切换"
|
"confirm": "确定切换"
|
||||||
},
|
},
|
||||||
|
"releaseNotes": {
|
||||||
|
"title": "更新日志"
|
||||||
|
},
|
||||||
"dashboardUpdate": {
|
"dashboardUpdate": {
|
||||||
"title": "单独更新管理面板到最新版本",
|
"title": "单独更新管理面板到最新版本",
|
||||||
"currentVersion": "当前版本",
|
"currentVersion": "当前版本",
|
||||||
|
|||||||
@@ -63,6 +63,9 @@
|
|||||||
"on": "流式",
|
"on": "流式",
|
||||||
"off": "普通"
|
"off": "普通"
|
||||||
},
|
},
|
||||||
|
"reasoning": {
|
||||||
|
"thinking": "思考过程"
|
||||||
|
},
|
||||||
"connection": {
|
"connection": {
|
||||||
"title": "连接状态提醒",
|
"title": "连接状态提醒",
|
||||||
"message": "系统检测到聊天连接需要重新建立。",
|
"message": "系统检测到聊天连接需要重新建立。",
|
||||||
|
|||||||
@@ -43,6 +43,11 @@ let devCommits = ref<{ sha: string; date: string; message: string }[]>([]);
|
|||||||
let updatingDashboardLoading = ref(false);
|
let updatingDashboardLoading = ref(false);
|
||||||
let installLoading = ref(false);
|
let installLoading = ref(false);
|
||||||
|
|
||||||
|
// Release Notes Modal
|
||||||
|
let releaseNotesDialog = ref(false);
|
||||||
|
let selectedReleaseNotes = ref('');
|
||||||
|
let selectedReleaseTag = ref('');
|
||||||
|
|
||||||
let tab = ref(0);
|
let tab = ref(0);
|
||||||
|
|
||||||
const releasesHeader = computed(() => [
|
const releasesHeader = computed(() => [
|
||||||
@@ -283,6 +288,12 @@ function toggleDarkMode() {
|
|||||||
theme.global.name.value = newTheme;
|
theme.global.name.value = newTheme;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function openReleaseNotesDialog(body: string, tag: string) {
|
||||||
|
selectedReleaseNotes.value = body;
|
||||||
|
selectedReleaseTag.value = tag;
|
||||||
|
releaseNotesDialog.value = true;
|
||||||
|
}
|
||||||
|
|
||||||
getVersion();
|
getVersion();
|
||||||
checkUpdate();
|
checkUpdate();
|
||||||
|
|
||||||
@@ -417,13 +428,10 @@ commonStore.getStartTime();
|
|||||||
</v-chip>
|
</v-chip>
|
||||||
</div>
|
</div>
|
||||||
</template>
|
</template>
|
||||||
<template v-slot:item.body="{ item }: { item: { body: string } }">
|
<template v-slot:item.body="{ item }: { item: { body: string; tag_name: string } }">
|
||||||
<v-tooltip :text="item.body">
|
<v-btn @click="openReleaseNotesDialog(item.body, item.tag_name)" rounded="xl" variant="tonal"
|
||||||
<template v-slot:activator="{ props }">
|
color="primary" size="x-small">{{
|
||||||
<v-btn v-bind="props" rounded="xl" variant="tonal" color="primary" size="x-small">{{
|
t('core.header.updateDialog.table.view') }}</v-btn>
|
||||||
t('core.header.updateDialog.table.view') }}</v-btn>
|
|
||||||
</template>
|
|
||||||
</v-tooltip>
|
|
||||||
</template>
|
</template>
|
||||||
<template v-slot:item.switch="{ item }: { item: { tag_name: string } }">
|
<template v-slot:item.switch="{ item }: { item: { tag_name: string } }">
|
||||||
<v-btn @click="switchVersion(item.tag_name)" rounded="xl" variant="plain" color="primary">
|
<v-btn @click="switchVersion(item.tag_name)" rounded="xl" variant="plain" color="primary">
|
||||||
@@ -502,6 +510,25 @@ commonStore.getStartTime();
|
|||||||
</v-card>
|
</v-card>
|
||||||
</v-dialog>
|
</v-dialog>
|
||||||
|
|
||||||
|
<!-- Release Notes Modal -->
|
||||||
|
<v-dialog v-model="releaseNotesDialog" max-width="800">
|
||||||
|
<v-card>
|
||||||
|
<v-card-title class="text-h5">
|
||||||
|
{{ t('core.header.updateDialog.releaseNotes.title') }}: {{ selectedReleaseTag }}
|
||||||
|
</v-card-title>
|
||||||
|
<v-card-text
|
||||||
|
style="font-size: 14px; max-height: 400px; overflow-y: auto;"
|
||||||
|
v-html="md.render(selectedReleaseNotes)" class="markdown-content">
|
||||||
|
</v-card-text>
|
||||||
|
<v-card-actions>
|
||||||
|
<v-spacer></v-spacer>
|
||||||
|
<v-btn color="blue-darken-1" variant="text" @click="releaseNotesDialog = false">
|
||||||
|
{{ t('core.common.close') }}
|
||||||
|
</v-btn>
|
||||||
|
</v-card-actions>
|
||||||
|
</v-card>
|
||||||
|
</v-dialog>
|
||||||
|
|
||||||
<!-- 账户对话框 -->
|
<!-- 账户对话框 -->
|
||||||
<v-dialog v-model="dialog" persistent :max-width="$vuetify.display.xs ? '90%' : '500'">
|
<v-dialog v-model="dialog" persistent :max-width="$vuetify.display.xs ? '90%' : '500'">
|
||||||
<template v-slot:activator="{ props }">
|
<template v-slot:activator="{ props }">
|
||||||
|
|||||||
@@ -31,6 +31,7 @@ export function getProviderIcon(type) {
|
|||||||
'302ai': 'https://registry.npmmirror.com/@lobehub/icons-static-svg/1.53.0/files/icons/ai302-color.svg',
|
'302ai': 'https://registry.npmmirror.com/@lobehub/icons-static-svg/1.53.0/files/icons/ai302-color.svg',
|
||||||
'microsoft': 'https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/microsoft.svg',
|
'microsoft': 'https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/microsoft.svg',
|
||||||
'vllm': 'https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/vllm.svg',
|
'vllm': 'https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/vllm.svg',
|
||||||
|
'groq': 'https://registry.npmmirror.com/@lobehub/icons-static-svg/latest/files/icons/groq.svg',
|
||||||
};
|
};
|
||||||
return icons[type] || '';
|
return icons[type] || '';
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ import traceback
|
|||||||
from astrbot.api import star
|
from astrbot.api import star
|
||||||
from astrbot.api.event import AstrMessageEvent, filter
|
from astrbot.api.event import AstrMessageEvent, filter
|
||||||
from astrbot.api.message_components import Image, Plain
|
from astrbot.api.message_components import Image, Plain
|
||||||
from astrbot.api.provider import ProviderRequest
|
from astrbot.api.provider import LLMResponse, ProviderRequest
|
||||||
from astrbot.core import logger
|
from astrbot.core import logger
|
||||||
from astrbot.core.provider.sources.dify_source import ProviderDify
|
from astrbot.core.provider.sources.dify_source import ProviderDify
|
||||||
|
|
||||||
@@ -334,6 +334,17 @@ class Main(star.Star):
|
|||||||
except BaseException as e:
|
except BaseException as e:
|
||||||
logger.error(f"ltm: {e}")
|
logger.error(f"ltm: {e}")
|
||||||
|
|
||||||
|
@filter.on_llm_response()
|
||||||
|
async def inject_reasoning(self, event: AstrMessageEvent, resp: LLMResponse):
|
||||||
|
"""在 LLM 响应后基于配置注入思考过程文本"""
|
||||||
|
umo = event.unified_msg_origin
|
||||||
|
cfg = self.context.get_config(umo).get("provider_settings", {})
|
||||||
|
show_reasoning = cfg.get("display_reasoning_text", False)
|
||||||
|
if show_reasoning and resp.reasoning_content:
|
||||||
|
resp.completion_text = (
|
||||||
|
f"🤔 思考: {resp.reasoning_content}\n\n{resp.completion_text}"
|
||||||
|
)
|
||||||
|
|
||||||
@filter.after_message_sent()
|
@filter.after_message_sent()
|
||||||
async def after_llm_req(self, event: AstrMessageEvent):
|
async def after_llm_req(self, event: AstrMessageEvent):
|
||||||
"""在 LLM 请求后记录对话"""
|
"""在 LLM 请求后记录对话"""
|
||||||
|
|||||||
@@ -1,208 +0,0 @@
|
|||||||
import json
|
|
||||||
import logging
|
|
||||||
import re
|
|
||||||
from typing import Any
|
|
||||||
|
|
||||||
from openai.types.chat.chat_completion import ChatCompletion
|
|
||||||
|
|
||||||
from astrbot.api.event import AstrMessageEvent, filter
|
|
||||||
from astrbot.api.provider import LLMResponse
|
|
||||||
from astrbot.api.star import Context, Star
|
|
||||||
|
|
||||||
try:
|
|
||||||
# 谨慎引入,避免在未安装 google-genai 的环境下报错
|
|
||||||
from google.genai.types import GenerateContentResponse
|
|
||||||
except Exception: # pragma: no cover - 兼容无此依赖的运行环境
|
|
||||||
GenerateContentResponse = None # type: ignore
|
|
||||||
|
|
||||||
|
|
||||||
class R1Filter(Star):
|
|
||||||
def __init__(self, context: Context):
|
|
||||||
super().__init__(context)
|
|
||||||
|
|
||||||
@filter.on_llm_response()
|
|
||||||
async def resp(self, event: AstrMessageEvent, response: LLMResponse):
|
|
||||||
cfg = self.context.get_config(umo=event.unified_msg_origin).get(
|
|
||||||
"provider_settings",
|
|
||||||
{},
|
|
||||||
)
|
|
||||||
show_reasoning = cfg.get("display_reasoning_text", False)
|
|
||||||
|
|
||||||
# --- Gemini: 过滤/展示 thought:true 片段 ---
|
|
||||||
# Gemini 可能在 parts 中注入 {"thought": true, "text": "..."}
|
|
||||||
# 官方 SDK 默认不会返回此字段。
|
|
||||||
if GenerateContentResponse is not None and isinstance(
|
|
||||||
response.raw_completion,
|
|
||||||
GenerateContentResponse,
|
|
||||||
):
|
|
||||||
thought_text, answer_text = self._extract_gemini_texts(
|
|
||||||
response.raw_completion,
|
|
||||||
)
|
|
||||||
|
|
||||||
if thought_text or answer_text:
|
|
||||||
# 有明确的思考/正文分离信号,则按配置处理
|
|
||||||
if show_reasoning:
|
|
||||||
merged = (
|
|
||||||
(f"🤔思考:{thought_text}\n\n" if thought_text else "")
|
|
||||||
+ (answer_text or "")
|
|
||||||
).strip()
|
|
||||||
if merged:
|
|
||||||
response.completion_text = merged
|
|
||||||
return
|
|
||||||
# 默认隐藏思考内容,仅保留正文
|
|
||||||
elif answer_text:
|
|
||||||
response.completion_text = answer_text
|
|
||||||
return
|
|
||||||
|
|
||||||
# --- 非 Gemini 或无明确 thought:true 情况 ---
|
|
||||||
if show_reasoning:
|
|
||||||
# 显示推理内容的处理逻辑
|
|
||||||
if (
|
|
||||||
response
|
|
||||||
and response.raw_completion
|
|
||||||
and isinstance(response.raw_completion, ChatCompletion)
|
|
||||||
and len(response.raw_completion.choices) > 0
|
|
||||||
and response.raw_completion.choices[0].message
|
|
||||||
):
|
|
||||||
message = response.raw_completion.choices[0].message
|
|
||||||
reasoning_content = "" # 初始化 reasoning_content
|
|
||||||
|
|
||||||
# 检查 Groq deepseek-r1-distill-llama-70b 模型的 'reasoning' 属性
|
|
||||||
if hasattr(message, "reasoning") and message.reasoning:
|
|
||||||
reasoning_content = message.reasoning
|
|
||||||
# 检查 DeepSeek deepseek-reasoner 模型的 'reasoning_content'
|
|
||||||
elif (
|
|
||||||
hasattr(message, "reasoning_content") and message.reasoning_content
|
|
||||||
):
|
|
||||||
reasoning_content = message.reasoning_content
|
|
||||||
|
|
||||||
if reasoning_content:
|
|
||||||
response.completion_text = (
|
|
||||||
f"🤔思考:{reasoning_content}\n\n{message.content}"
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
response.completion_text = message.content
|
|
||||||
else:
|
|
||||||
# 过滤推理标签的处理逻辑
|
|
||||||
completion_text = response.completion_text
|
|
||||||
|
|
||||||
# 检查并移除 <think> 标签
|
|
||||||
if r"<think>" in completion_text or r"</think>" in completion_text:
|
|
||||||
# 移除配对的标签及其内容
|
|
||||||
completion_text = re.sub(
|
|
||||||
r"<think>.*?</think>",
|
|
||||||
"",
|
|
||||||
completion_text,
|
|
||||||
flags=re.DOTALL,
|
|
||||||
).strip()
|
|
||||||
|
|
||||||
# 移除可能残留的单个标签
|
|
||||||
completion_text = (
|
|
||||||
completion_text.replace(r"<think>", "")
|
|
||||||
.replace(r"</think>", "")
|
|
||||||
.strip()
|
|
||||||
)
|
|
||||||
|
|
||||||
response.completion_text = completion_text
|
|
||||||
|
|
||||||
# ------------------------
|
|
||||||
# helpers
|
|
||||||
# ------------------------
|
|
||||||
def _get_part_dict(self, p: Any) -> dict:
|
|
||||||
"""优先使用 SDK 标准序列化方法获取字典,失败则逐级回退。
|
|
||||||
|
|
||||||
顺序: model_dump → model_dump_json → json → to_dict → dict → __dict__。
|
|
||||||
"""
|
|
||||||
for getter in ("model_dump", "model_dump_json", "json", "to_dict", "dict"):
|
|
||||||
fn = getattr(p, getter, None)
|
|
||||||
if callable(fn):
|
|
||||||
try:
|
|
||||||
result = fn()
|
|
||||||
if isinstance(result, (str, bytes)):
|
|
||||||
try:
|
|
||||||
if isinstance(result, bytes):
|
|
||||||
result = result.decode("utf-8", "ignore")
|
|
||||||
return json.loads(result) or {}
|
|
||||||
except json.JSONDecodeError:
|
|
||||||
continue
|
|
||||||
if isinstance(result, dict):
|
|
||||||
return result
|
|
||||||
except (AttributeError, TypeError):
|
|
||||||
continue
|
|
||||||
except Exception as e:
|
|
||||||
logging.exception(
|
|
||||||
f"Unexpected error when calling {getter} on {type(p).__name__}: {e}",
|
|
||||||
)
|
|
||||||
continue
|
|
||||||
try:
|
|
||||||
d = getattr(p, "__dict__", None)
|
|
||||||
if isinstance(d, dict):
|
|
||||||
return d
|
|
||||||
except (AttributeError, TypeError):
|
|
||||||
pass
|
|
||||||
except Exception as e:
|
|
||||||
logging.exception(
|
|
||||||
f"Unexpected error when accessing __dict__ on {type(p).__name__}: {e}",
|
|
||||||
)
|
|
||||||
return {}
|
|
||||||
|
|
||||||
def _is_thought_part(self, p: Any) -> bool:
|
|
||||||
"""判断是否为思考片段。
|
|
||||||
|
|
||||||
规则:
|
|
||||||
1) 直接 thought 属性
|
|
||||||
2) 字典字段 thought 或 metadata.thought
|
|
||||||
3) data/raw/extra/_raw 中嵌入的 JSON 串包含 thought: true
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
if getattr(p, "thought", False):
|
|
||||||
return True
|
|
||||||
except Exception:
|
|
||||||
# best-effort
|
|
||||||
pass
|
|
||||||
|
|
||||||
d = self._get_part_dict(p)
|
|
||||||
if d.get("thought") is True:
|
|
||||||
return True
|
|
||||||
meta = d.get("metadata")
|
|
||||||
if isinstance(meta, dict) and meta.get("thought") is True:
|
|
||||||
return True
|
|
||||||
for k in ("data", "raw", "extra", "_raw"):
|
|
||||||
v = d.get(k)
|
|
||||||
if isinstance(v, (str, bytes)):
|
|
||||||
try:
|
|
||||||
if isinstance(v, bytes):
|
|
||||||
v = v.decode("utf-8", "ignore")
|
|
||||||
parsed = json.loads(v)
|
|
||||||
if isinstance(parsed, dict) and parsed.get("thought") is True:
|
|
||||||
return True
|
|
||||||
except json.JSONDecodeError:
|
|
||||||
continue
|
|
||||||
return False
|
|
||||||
|
|
||||||
def _extract_gemini_texts(self, resp: Any) -> tuple[str, str]:
|
|
||||||
"""从 GenerateContentResponse 中提取 (思考文本, 正文文本)。"""
|
|
||||||
try:
|
|
||||||
cand0 = next(iter(getattr(resp, "candidates", []) or []), None)
|
|
||||||
if not cand0:
|
|
||||||
return "", ""
|
|
||||||
content = getattr(cand0, "content", None)
|
|
||||||
parts = getattr(content, "parts", None) or []
|
|
||||||
except (AttributeError, TypeError, ValueError):
|
|
||||||
return "", ""
|
|
||||||
|
|
||||||
thought_buf: list[str] = []
|
|
||||||
answer_buf: list[str] = []
|
|
||||||
for p in parts:
|
|
||||||
txt = getattr(p, "text", None)
|
|
||||||
if txt is None:
|
|
||||||
continue
|
|
||||||
txt_str = str(txt).strip()
|
|
||||||
if not txt_str:
|
|
||||||
continue
|
|
||||||
if self._is_thought_part(p):
|
|
||||||
thought_buf.append(txt_str)
|
|
||||||
else:
|
|
||||||
answer_buf.append(txt_str)
|
|
||||||
|
|
||||||
return "\n".join(thought_buf).strip(), "\n".join(answer_buf).strip()
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
name: thinking_filter
|
|
||||||
desc: 可选择是否过滤推理模型的思考内容
|
|
||||||
author: Soulter
|
|
||||||
version: 1.0.0
|
|
||||||
repo: https://astrbot.app
|
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
[project]
|
[project]
|
||||||
name = "AstrBot"
|
name = "AstrBot"
|
||||||
version = "4.5.6"
|
version = "4.5.8"
|
||||||
description = "Easy-to-use multi-platform LLM chatbot and development framework"
|
description = "Easy-to-use multi-platform LLM chatbot and development framework"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
requires-python = ">=3.10"
|
requires-python = ">=3.10"
|
||||||
|
|||||||
Reference in New Issue
Block a user