181 lines
6.2 KiB
Python
181 lines
6.2 KiB
Python
"""Prompt builder for the agent system."""
|
|
|
|
import json
|
|
from typing import Any
|
|
|
|
from alfred.infrastructure.persistence import get_memory
|
|
|
|
from .registry import Tool
|
|
|
|
|
|
class PromptBuilder:
|
|
"""Builds system prompts for the agent with memory context."""
|
|
|
|
def __init__(self, tools: dict[str, Tool]):
|
|
self.tools = tools
|
|
|
|
def build_tools_spec(self) -> list[dict[str, Any]]:
|
|
"""Build the tool specification for the LLM API."""
|
|
tool_specs = []
|
|
for tool in self.tools.values():
|
|
spec = {
|
|
"type": "function",
|
|
"function": {
|
|
"name": tool.name,
|
|
"description": tool.description,
|
|
"parameters": tool.parameters,
|
|
},
|
|
}
|
|
tool_specs.append(spec)
|
|
return tool_specs
|
|
|
|
def _format_tools_description(self) -> str:
|
|
"""Format tools with their descriptions and parameters."""
|
|
if not self.tools:
|
|
return ""
|
|
return "\n".join(
|
|
f"- {tool.name}: {tool.description}\n"
|
|
f" Parameters: {json.dumps(tool.parameters, ensure_ascii=False)}"
|
|
for tool in self.tools.values()
|
|
)
|
|
|
|
def _format_episodic_context(self, memory) -> str:
|
|
"""Format episodic memory context for the prompt."""
|
|
lines = []
|
|
|
|
if memory.episodic.last_search_results:
|
|
results = memory.episodic.last_search_results
|
|
result_list = results.get("results", [])
|
|
lines.append(
|
|
f"\nLAST SEARCH: '{results.get('query')}' ({len(result_list)} results)"
|
|
)
|
|
# Show first 5 results
|
|
for i, result in enumerate(result_list[:5]):
|
|
name = result.get("name", "Unknown")
|
|
lines.append(f" {i + 1}. {name}")
|
|
if len(result_list) > 5:
|
|
lines.append(f" ... and {len(result_list) - 5} more")
|
|
|
|
if memory.episodic.pending_question:
|
|
question = memory.episodic.pending_question
|
|
lines.append(f"\nPENDING QUESTION: {question.get('question')}")
|
|
lines.append(f" Type: {question.get('type')}")
|
|
if question.get("options"):
|
|
lines.append(f" Options: {len(question.get('options'))}")
|
|
|
|
if memory.episodic.active_downloads:
|
|
lines.append(f"\nACTIVE DOWNLOADS: {len(memory.episodic.active_downloads)}")
|
|
for dl in memory.episodic.active_downloads[:3]:
|
|
lines.append(f" - {dl.get('name')}: {dl.get('progress', 0)}%")
|
|
|
|
if memory.episodic.recent_errors:
|
|
lines.append("\nRECENT ERRORS (up to 3):")
|
|
for error in memory.episodic.recent_errors[-3:]:
|
|
lines.append(
|
|
f" - Action '{error.get('action')}' failed: {error.get('error')}"
|
|
)
|
|
|
|
# Unread events
|
|
unread = [e for e in memory.episodic.background_events if not e.get("read")]
|
|
if unread:
|
|
lines.append(f"\nUNREAD EVENTS: {len(unread)}")
|
|
for event in unread[:3]:
|
|
lines.append(f" - {event.get('type')}: {event.get('data')}")
|
|
|
|
return "\n".join(lines)
|
|
|
|
def _format_stm_context(self, memory) -> str:
|
|
"""Format short-term memory context for the prompt."""
|
|
lines = []
|
|
|
|
if memory.stm.current_workflow:
|
|
workflow = memory.stm.current_workflow
|
|
lines.append(
|
|
f"CURRENT WORKFLOW: {workflow.get('type')} (stage: {workflow.get('stage')})"
|
|
)
|
|
if workflow.get("target"):
|
|
lines.append(f" Target: {workflow.get('target')}")
|
|
|
|
if memory.stm.current_topic:
|
|
lines.append(f"CURRENT TOPIC: {memory.stm.current_topic}")
|
|
|
|
if memory.stm.extracted_entities:
|
|
lines.append("EXTRACTED ENTITIES:")
|
|
for key, value in memory.stm.extracted_entities.items():
|
|
lines.append(f" - {key}: {value}")
|
|
|
|
if memory.stm.language:
|
|
lines.append(f"CONVERSATION LANGUAGE: {memory.stm.language}")
|
|
|
|
return "\n".join(lines)
|
|
|
|
def _format_config_context(self, memory) -> str:
|
|
"""Format configuration context."""
|
|
lines = ["CURRENT CONFIGURATION:"]
|
|
if memory.ltm.config:
|
|
for key, value in memory.ltm.config.items():
|
|
lines.append(f" - {key}: {value}")
|
|
else:
|
|
lines.append(" (no configuration set)")
|
|
return "\n".join(lines)
|
|
|
|
def build_system_prompt(self) -> str:
|
|
"""Build the complete system prompt."""
|
|
# Get memory once for all context formatting
|
|
memory = get_memory()
|
|
|
|
# Base instruction
|
|
base = "You are a helpful AI assistant for managing a media library."
|
|
|
|
# Language instruction
|
|
language_instruction = (
|
|
"Your first task is to determine the user's language from their message "
|
|
"and use the `set_language` tool if it's different from the current one. "
|
|
"After that, proceed to help the user."
|
|
)
|
|
|
|
# Available tools
|
|
tools_desc = self._format_tools_description()
|
|
tools_section = f"\nAVAILABLE TOOLS:\n{tools_desc}" if tools_desc else ""
|
|
|
|
# Configuration
|
|
config_section = self._format_config_context(memory)
|
|
if config_section:
|
|
config_section = f"\n{config_section}"
|
|
|
|
# STM context
|
|
stm_context = self._format_stm_context(memory)
|
|
if stm_context:
|
|
stm_context = f"\n{stm_context}"
|
|
|
|
# Episodic context
|
|
episodic_context = self._format_episodic_context(memory)
|
|
|
|
# Important rules
|
|
rules = """
|
|
IMPORTANT RULES:
|
|
- Use tools to accomplish tasks
|
|
- When search results are available, reference them by index (e.g., "add_torrent_by_index")
|
|
- Always confirm actions with the user before executing destructive operations
|
|
- Provide clear, concise responses
|
|
"""
|
|
|
|
# Examples
|
|
examples = """
|
|
EXAMPLES:
|
|
- User: "Find Inception" → Use find_media_imdb_id, then find_torrent
|
|
- User: "download the 3rd one" → Use add_torrent_by_index with index=3
|
|
- User: "List my downloads" → Use list_folder with folder_type="download"
|
|
"""
|
|
|
|
return f"""{base}
|
|
|
|
{language_instruction}
|
|
{tools_section}
|
|
{config_section}
|
|
{stm_context}
|
|
{episodic_context}
|
|
{rules}
|
|
{examples}
|
|
"""
|