130 lines
4.3 KiB
Python
130 lines
4.3 KiB
Python
# agent/agent.py
|
|
from typing import Any, Dict, List
|
|
import json
|
|
|
|
from .llm import DeepSeekClient
|
|
from .memory import Memory
|
|
from .registry import make_tools, Tool
|
|
from .prompts import PromptBuilder
|
|
|
|
class Agent:
|
|
def __init__(self, llm: DeepSeekClient, memory: Memory, max_tool_iterations: int = 5):
|
|
self.llm = llm
|
|
self.memory = memory
|
|
self.tools: Dict[str, Tool] = make_tools(memory)
|
|
self.prompt_builder = PromptBuilder(self.tools)
|
|
self.max_tool_iterations = max_tool_iterations
|
|
|
|
|
|
def _parse_intent(self, text: str) -> Dict[str, Any] | None:
|
|
try:
|
|
data = json.loads(text)
|
|
except json.JSONDecodeError:
|
|
return None
|
|
|
|
if not isinstance(data, dict):
|
|
return None
|
|
|
|
action = data.get("action")
|
|
if not isinstance(action, dict):
|
|
return None
|
|
|
|
name = action.get("name")
|
|
if not isinstance(name, str):
|
|
return None
|
|
|
|
return data
|
|
|
|
def _execute_action(self, intent: Dict[str, Any]) -> Dict[str, Any]:
|
|
action = intent["action"]
|
|
name: str = action["name"]
|
|
args: Dict[str, Any] = action.get("args", {}) or {}
|
|
|
|
tool = self.tools.get(name)
|
|
if not tool:
|
|
return {"error": "unknown_tool", "tool": name}
|
|
|
|
try:
|
|
result = tool.func(**args)
|
|
except TypeError as e:
|
|
# Mauvais arguments
|
|
return {"error": "bad_args", "message": str(e)}
|
|
|
|
return result
|
|
|
|
def step(self, user_input: str) -> str:
|
|
"""
|
|
Execute one agent step with iterative tool execution:
|
|
- Build system prompt
|
|
- Query LLM
|
|
- Loop: If JSON intent -> execute tool, add result to conversation, query LLM again
|
|
- Continue until LLM responds with text (no tool call) or max iterations reached
|
|
- Return final text response
|
|
"""
|
|
print("Starting a new step...")
|
|
print("User input:", user_input)
|
|
|
|
print("Current memory state:", self.memory.data)
|
|
|
|
# Build system prompt using PromptBuilder
|
|
system_prompt = self.prompt_builder.build_system_prompt(self.memory.data)
|
|
|
|
# Initialize conversation with user input
|
|
messages: List[Dict[str, Any]] = [
|
|
{"role": "system", "content": system_prompt},
|
|
{"role": "user", "content": user_input},
|
|
]
|
|
|
|
# Tool execution loop
|
|
iteration = 0
|
|
while iteration < self.max_tool_iterations:
|
|
print(f"\n--- Iteration {iteration + 1} ---")
|
|
|
|
# Get LLM response
|
|
llm_response = self.llm.complete(messages)
|
|
print("LLM response:", llm_response)
|
|
|
|
# Try to parse as tool intent
|
|
intent = self._parse_intent(llm_response)
|
|
|
|
if not intent:
|
|
# No tool call - this is the final text response
|
|
print("No tool intent detected, returning final response")
|
|
# Save to history
|
|
self.memory.append_history("user", user_input)
|
|
self.memory.append_history("assistant", llm_response)
|
|
return llm_response
|
|
|
|
# Tool call detected - execute it
|
|
print("Intent detected:", intent)
|
|
tool_result = self._execute_action(intent)
|
|
print("Tool result:", tool_result)
|
|
|
|
# Add assistant's tool call and result to conversation
|
|
messages.append({
|
|
"role": "assistant",
|
|
"content": json.dumps(intent, ensure_ascii=False)
|
|
})
|
|
messages.append({
|
|
"role": "user",
|
|
"content": json.dumps(
|
|
{"tool_result": tool_result},
|
|
ensure_ascii=False
|
|
)
|
|
})
|
|
|
|
iteration += 1
|
|
|
|
# Max iterations reached - ask LLM for final response
|
|
print(f"\n--- Max iterations ({self.max_tool_iterations}) reached, requesting final response ---")
|
|
messages.append({
|
|
"role": "user",
|
|
"content": "Merci pour ces résultats. Peux-tu maintenant me donner une réponse finale en texte naturel ?"
|
|
})
|
|
|
|
final_response = self.llm.complete(messages)
|
|
# Save to history
|
|
self.memory.append_history("user", user_input)
|
|
self.memory.append_history("assistant", final_response)
|
|
return final_response
|