6 Commits

32 changed files with 1109 additions and 593 deletions

View File

@@ -22,8 +22,7 @@ venv
.venv .venv
env env
.env .env
.env.* .env-
# IDE # IDE
.vscode .vscode
.idea .idea

View File

@@ -1,53 +1,93 @@
# Configuration MAX_HISTORY_MESSAGES=10
LIBRECHAT_VERSION=v0.8.1 MAX_TOOL_ITERATIONS=10
RAG_VERSION=v0.7.0 REQUEST_TIMEOUT=30
# Keys # LLM Settings
# - Deepseek API LLM_TEMPERATURE=0.2
DEEPSEEK_API_KEY=
# - Google API # Persistence
GOOGLE_API_KEY= DATA_STORAGE_DIR=data
#GOOGLE_MODELS=gemini-2.5-pro,gemini-2.5-flash,gemini-2.5-flash-lite,gemini-2.0-flash,gemini-2.0-flash-lite #TODO:Update models
# - Anthropic API # Network configuration
ANTHROPIC_API_KEY= HOST=0.0.0.0
PORT=3080
# - Kimi API # Build informations (Synced with pyproject.toml via bootstrap)
KIMI_API_KEY= ALFRED_VERSION=
IMAGE_NAME=
LIBRECHAT_VERSION=
PYTHON_VERSION=
PYTHON_VERSION_SHORT=
RAG_VERSION=
RUNNER=
SERVICE_NAME=
# - ChatGPT/Open API # --- SECURITY KEYS (CRITICAL) ---
OPENAI_API_KEY= # These are used for session tokens and encrypting sensitive data in MongoDB.
# If you lose these, you lose access to encrypted stored credentials.
# - Themoviedb.org API (media metadata)
TMDB_API_KEY=
# - Security keys
JWT_SECRET= JWT_SECRET=
JWT_REFRESH_SECRET= JWT_REFRESH_SECRET=
CREDS_KEY= CREDS_KEY=
CREDS_IV= CREDS_IV=
# Local LLM # --- DATABASES (AUTO-SECURED) ---
OLLAMA_BASE_URL= # Alfred uses MongoDB for application state and PostgreSQL for Vector RAG.
OLLAMA_MODEL= # Passwords will be generated as 24-character secure tokens if left blank.
# Alfred Configuration # MongoDB (Application Data)
LLM_PROVIDER=deepseek MONGO_URI=
MONGO_HOST=mongodb
MONGO_PORT=27017
MONGO_USER=alfred
MONGO_PASSWORD=
MONGO_DB_NAME=alfred
# Memory storage directory (inside container) # PostgreSQL (Vector Database / RAG)
MEMORY_STORAGE_DIR=/data/memory POSTGRES_URI=
POSTGRES_HOST=vectordb
# qBittorrent Configuration POSTGRES_PORT=5432
QBITTORRENT_URL= POSTGRES_USER=alfred
QBITTORRENT_USERNAME=admin
QBITTORRENT_PASSWORD=adminadmin
# Debug Options
DEBUG_LOGGING=false
DEBUG_CONSOLE=false
# Postgres (RAG)
POSTGRES_DB=
POSTGRES_USER=
POSTGRES_PASSWORD= POSTGRES_PASSWORD=
POSTGRES_DB_NAME=alfred
# --- EXTERNAL SERVICES ---
# Media Metadata (Required)
# Get your key at https://www.themoviedb.org/
TMDB_API_KEY=
TMDB_BASE_URL=https://api.themoviedb.org/3
# qBittorrent integration
QBITTORRENT_URL=http://qbittorrent:16140
QBITTORRENT_USERNAME=admin
QBITTORRENT_PASSWORD=
QBITTORRENT_PORT=16140
# Meilisearch
MEILI_ENABLED=FALSE
MEILI_NO_ANALYTICS=TRUE
MEILI_HOST=http://meilisearch:7700
MEILI_MASTER_KEY=
# --- LLM CONFIGURATION ---
# Providers: 'local', 'openai', 'anthropic', 'deepseek', 'google', 'kimi'
DEFAULT_LLM_PROVIDER=local
# Local LLM (Ollama)
OLLAMA_BASE_URL=http://ollama:11434
OLLAMA_MODEL=llama3.3:latest
# --- API KEYS (OPTIONAL) ---
# Fill only the ones you intend to use.
ANTHROPIC_API_KEY=
DEEPSEEK_API_KEY=
GOOGLE_API_KEY=
KIMI_API_KEY=
OPENAI_API_KEY=
# --- RAG ENGINE ---
# Enable/Disable the Retrieval Augmented Generation system
RAG_ENABLED=TRUE
RAG_API_URL=http://rag_api:8000
RAG_API_PORT=8000
EMBEDDINGS_PROVIDER=ollama
EMBEDDINGS_MODEL=nomic-embed-text

View File

@@ -43,6 +43,9 @@ RUN --mount=type=cache,target=/root/.cache/pip \
uv pip install --system -r pyproject.toml; \ uv pip install --system -r pyproject.toml; \
fi fi
COPY scripts/ ./scripts/
COPY .env.example ./
# =========================================== # ===========================================
# Stage 2: Testing # Stage 2: Testing
# =========================================== # ===========================================
@@ -60,7 +63,8 @@ RUN --mount=type=cache,target=/root/.cache/pip \
fi fi
COPY alfred/ ./alfred COPY alfred/ ./alfred
COPY tests/ ./tests COPY scripts ./scripts
COPY tests/ ./tests
# =========================================== # ===========================================
# Stage 3: Runtime # Stage 3: Runtime
@@ -69,10 +73,11 @@ FROM python:${PYTHON_VERSION}-slim-bookworm AS runtime
ARG PYTHON_VERSION_SHORT ARG PYTHON_VERSION_SHORT
# TODO: A-t-on encore besoin de toutes les clés ?
ENV LLM_PROVIDER=deepseek \ ENV LLM_PROVIDER=deepseek \
MEMORY_STORAGE_DIR=/data/memory \ MEMORY_STORAGE_DIR=/data/memory \
PYTHONDONTWRITEBYTECODE=1 \ PYTHONDONTWRITEBYTECODE=1 \
PYTHONPATH=/home/appuser/app \ PYTHONPATH=/home/appuser \
PYTHONUNBUFFERED=1 PYTHONUNBUFFERED=1
# Install runtime dependencies (needs root) # Install runtime dependencies (needs root)
@@ -85,8 +90,8 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
RUN useradd -m -u 1000 -s /bin/bash appuser RUN useradd -m -u 1000 -s /bin/bash appuser
# Create data directories (needs root for /data) # Create data directories (needs root for /data)
RUN mkdir -p /data/memory /data/logs \ RUN mkdir -p /data /logs \
&& chown -R appuser:appuser /data && chown -R appuser:appuser /data /logs
# Switch to non-root user # Switch to non-root user
USER appuser USER appuser
@@ -100,9 +105,12 @@ COPY --from=builder /usr/local/bin /usr/local/bin
# Copy application code (already owned by appuser) # Copy application code (already owned by appuser)
COPY --chown=appuser:appuser alfred/ ./alfred COPY --chown=appuser:appuser alfred/ ./alfred
COPY --chown=appuser:appuser scripts/ ./scripts
COPY --chown=appuser:appuser .env.example ./
COPY --chown=appuser:appuser pyproject.toml ./
# Create volumes for persistent data # Create volumes for persistent data
VOLUME ["/data/memory", "/data/logs"] VOLUME ["/data", "/logs"]
# Expose port # Expose port
EXPOSE 8000 EXPOSE 8000
@@ -111,5 +119,4 @@ EXPOSE 8000
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
CMD python -c "import requests; requests.get('http://localhost:8000/health', timeout=5).raise_for_status()" || exit 1 CMD python -c "import requests; requests.get('http://localhost:8000/health', timeout=5).raise_for_status()" || exit 1
# Run the application CMD ["python", "-m", "uvicorn", "alfred.app:app", "--host", "0.0.0.0", "--port", "8000"]
CMD ["python", "-m", "uvicorn", "app:app", "--host", "0.0.0.0", "--port", "8000"]

115
Makefile
View File

@@ -1,48 +1,37 @@
.DEFAULT_GOAL := help .DEFAULT_GOAL := help
# --- Config --- # --- Load Config from pyproject.toml ---
export IMAGE_NAME := alfred_media_organizer -include .env.make
export LIBRECHAT_VERSION := v0.8.1
export PYTHON_VERSION := 3.14.2 # --- Profiles management ---
export PYTHON_VERSION_SHORT := 3.14 # Usage: make up p=rag,meili
export RAG_VERSION := v0.7.0 p ?= core
export RUNNER := poetry PROFILES_PARAM := COMPOSE_PROFILES=$(p)
export SERVICE_NAME := alfred
# --- Commands --- # --- Commands ---
CLI := python3 cli.py
DOCKER_COMPOSE := docker compose DOCKER_COMPOSE := docker compose
DOCKER_BUILD := docker build \ DOCKER_BUILD := docker build --no-cache \
--build-arg PYTHON_VERSION=$(PYTHON_VERSION) \ --build-arg PYTHON_VERSION=$(PYTHON_VERSION) \
--build-arg PYTHON_VERSION_SHORT=$(PYTHON_VERSION_SHORT) \ --build-arg PYTHON_VERSION_SHORT=$(PYTHON_VERSION_SHORT) \
--build-arg RUNNER=$(RUNNER) --build-arg RUNNER=$(RUNNER)
# --- Phony --- # --- Phony ---
.PHONY: setup status check .PHONY: .env up down restart logs ps shell build build-test install update \
.PHONY: up down restart logs ps shell install-hooks test coverage lint format clean major minor patch help
.PHONY: build build-test
.PHONY: install update install-hooks
.PHONY: test coverage lint format clean prune
.PHONY: major minor patch
.PHONY: help
# --- Setup --- # --- Setup ---
setup: .env .env.make:
@echo "Initializing environment..." @echo "Initializing environment..."
@$(CLI) setup \ @python scripts/bootstrap.py \
&& echo "✓ Environment ready" \ && echo "✓ Environment ready" \
|| (echo "✗ Setup failed" && exit 1) || (echo "✗ Environment setup failed" && exit 1)
status: bootstrap: .env .env.make
@$(CLI) status
check:
@$(CLI) check
# --- Docker --- # --- Docker ---
up: check up: .env
@echo "Starting containers..." @echo "Starting containers with profiles: [$(p)]..."
@$(DOCKER_COMPOSE) up -d --remove-orphans \ @$(PROFILES_PARAM) $(DOCKER_COMPOSE) up -d --remove-orphans \
&& echo "✓ Containers started" \ && echo "✓ Containers started" \
|| (echo "✗ Failed to start containers" && exit 1) || (echo "✗ Failed to start containers" && exit 1)
@@ -54,30 +43,30 @@ down:
restart: restart:
@echo "Restarting containers..." @echo "Restarting containers..."
@$(DOCKER_COMPOSE) restart \ @$(PROFILES_PARAM) $(DOCKER_COMPOSE) restart \
&& echo "✓ Containers restarted" \ && echo "✓ Containers restarted" \
|| (echo "✗ Failed to restart containers" && exit 1) || (echo "✗ Failed to restart containers" && exit 1)
logs: logs:
@echo "Following logs (Ctrl+C to exit)..." @echo "Following logs (Ctrl+C to exit)..."
@$(DOCKER_COMPOSE) logs -f @$(PROFILES_PARAM) $(DOCKER_COMPOSE) logs -f
ps: ps:
@echo "Container status:" @echo "Container status:"
@$(DOCKER_COMPOSE) ps @$(PROFILES_PARAM) $(DOCKER_COMPOSE) ps
shell: shell:
@echo "Opening shell in $(SERVICE_NAME)..." @echo "Opening shell in $(SERVICE_NAME)..."
@$(DOCKER_COMPOSE) exec $(SERVICE_NAME) /bin/bash @$(DOCKER_COMPOSE) exec $(SERVICE_NAME) /bin/bash
# --- Build --- # --- Build ---
build: check build: .env.make
@echo "Building image $(IMAGE_NAME):latest ..." @echo "Building image $(IMAGE_NAME):latest ..."
@$(DOCKER_BUILD) -t $(IMAGE_NAME):latest . \ @$(DOCKER_BUILD) -t $(IMAGE_NAME):latest . \
&& echo "✓ Build complete" \ && echo "✓ Build complete" \
|| (echo "✗ Build failed" && exit 1) || (echo "✗ Build failed" && exit 1)
build-test: check build-test: .env.make
@echo "Building test image $(IMAGE_NAME):test..." @echo "Building test image $(IMAGE_NAME):test..."
@$(DOCKER_BUILD) --target test -t $(IMAGE_NAME):test . \ @$(DOCKER_BUILD) --target test -t $(IMAGE_NAME):test . \
&& echo "✓ Test image built" \ && echo "✓ Test image built" \
@@ -90,18 +79,18 @@ install:
&& echo "✓ Dependencies installed" \ && echo "✓ Dependencies installed" \
|| (echo "✗ Installation failed" && exit 1) || (echo "✗ Installation failed" && exit 1)
update:
@echo "Updating dependencies with $(RUNNER)..."
@$(RUNNER) update \
&& echo "✓ Dependencies updated" \
|| (echo "✗ Update failed" && exit 1)
install-hooks: install-hooks:
@echo "Installing pre-commit hooks..." @echo "Installing pre-commit hooks..."
@$(RUNNER) run pre-commit install \ @$(RUNNER) run pre-commit install \
&& echo "✓ Hooks installed" \ && echo "✓ Hooks installed" \
|| (echo "✗ Hook installation failed" && exit 1) || (echo "✗ Hook installation failed" && exit 1)
update:
@echo "Updating dependencies with $(RUNNER)..."
@$(RUNNER) update \
&& echo "✓ Dependencies updated" \
|| (echo "✗ Update failed" && exit 1)
# --- Quality --- # --- Quality ---
test: test:
@echo "Running tests..." @echo "Running tests..."
@@ -133,12 +122,6 @@ clean:
@find . -type d -name "__pycache__" -exec rm -rf {} + 2>/dev/null || true @find . -type d -name "__pycache__" -exec rm -rf {} + 2>/dev/null || true
@echo "✓ Cleanup complete" @echo "✓ Cleanup complete"
prune:
@echo "Pruning Docker system..."
@docker system prune -af \
&& echo "✓ Docker pruned" \
|| (echo "✗ Prune failed" && exit 1)
# --- Versioning --- # --- Versioning ---
major minor patch: _check-main major minor patch: _check-main
@echo "Bumping $@ version..." @echo "Bumping $@ version..."
@@ -151,6 +134,7 @@ major minor patch: _check-main
&& echo "✓ Tags pushed" \ && echo "✓ Tags pushed" \
|| (echo "✗ Push failed" && exit 1) || (echo "✗ Push failed" && exit 1)
# CI/CD helpers
_ci-dump-config: _ci-dump-config:
@echo "image_name=$(IMAGE_NAME)" @echo "image_name=$(IMAGE_NAME)"
@echo "python_version=$(PYTHON_VERSION)" @echo "python_version=$(PYTHON_VERSION)"
@@ -173,31 +157,26 @@ _check-main:
# --- Help --- # --- Help ---
help: help:
@echo "Usage: make [target]" @echo "Cleverly Crafted Unawareness - Management Commands"
@echo "" @echo ""
@echo "Setup:" @echo "Usage: make [target] [p=profile1,profile2]"
@echo " setup Initialize .env"
@echo " status Show project status"
@echo "" @echo ""
@echo "Docker:" @echo "Docker:"
@echo " up Start containers" @echo " up Start containers (default profile: core)"
@echo " down Stop containers" @echo " Example: make up p=rag,meili"
@echo " restart Restart containers" @echo " down Stop all containers"
@echo " logs Follow logs" @echo " restart Restart containers (supports p=...)"
@echo " ps Container status" @echo " logs Follow logs (supports p=...)"
@echo " shell Shell into container" @echo " ps Status of containers"
@echo " build Build image" @echo " shell Open bash in the core container"
@echo " build Build the production Docker image"
@echo "" @echo ""
@echo "Dev:" @echo "Dev & Quality:"
@echo " install Install dependencies" @echo " setup Bootstrap .env and security keys"
@echo " update Update dependencies" @echo " install Install dependencies via $(RUNNER)"
@echo " test Run tests" @echo " test Run pytest suite"
@echo " coverage Run tests with coverage" @echo " coverage Run tests and generate HTML report"
@echo " lint Lint code" @echo " lint/format Quality and style checks"
@echo " format Format code"
@echo " clean Clean artifacts"
@echo "" @echo ""
@echo "Release:" @echo "Release:"
@echo " patch Bump patch version" @echo " major|minor|patch Bump version and push tags (main branch only)"
@echo " minor Bump minor version"
@echo " major Bump major version"

0
alfred/__init__.py Normal file
View File

View File

@@ -1,6 +1,7 @@
"""Agent module for media library management.""" """Agent module for media library management."""
from alfred.settings import settings
from .agent import Agent from .agent import Agent
from .config import settings
__all__ = ["Agent", "settings"] __all__ = ["Agent", "settings"]

View File

@@ -6,8 +6,8 @@ from collections.abc import AsyncGenerator
from typing import Any from typing import Any
from alfred.infrastructure.persistence import get_memory from alfred.infrastructure.persistence import get_memory
from alfred.settings import settings
from .config import settings
from .prompts import PromptBuilder from .prompts import PromptBuilder
from .registry import Tool, make_tools from .registry import Tool, make_tools
@@ -21,17 +21,20 @@ class Agent:
Uses OpenAI-compatible tool calling API. Uses OpenAI-compatible tool calling API.
""" """
def __init__(self, llm, max_tool_iterations: int = 5): def __init__(self, settings, llm, max_tool_iterations: int = 5):
""" """
Initialize the agent. Initialize the agent.
Args: Args:
settings: Application settings instance
llm: LLM client with complete() method llm: LLM client with complete() method
max_tool_iterations: Maximum number of tool execution iterations max_tool_iterations: Maximum number of tool execution iterations
""" """
self.settings = settings
self.llm = llm self.llm = llm
self.tools: dict[str, Tool] = make_tools() self.tools: dict[str, Tool] = make_tools(settings)
self.prompt_builder = PromptBuilder(self.tools) self.prompt_builder = PromptBuilder(self.tools)
self.settings = settings
self.max_tool_iterations = max_tool_iterations self.max_tool_iterations = max_tool_iterations
def step(self, user_input: str) -> str: def step(self, user_input: str) -> str:
@@ -78,7 +81,7 @@ class Agent:
tools_spec = self.prompt_builder.build_tools_spec() tools_spec = self.prompt_builder.build_tools_spec()
# Tool execution loop # Tool execution loop
for _iteration in range(self.max_tool_iterations): for _iteration in range(self.settings.max_tool_iterations):
# Call LLM with tools # Call LLM with tools
llm_result = self.llm.complete(messages, tools=tools_spec) llm_result = self.llm.complete(messages, tools=tools_spec)
@@ -230,7 +233,7 @@ class Agent:
tools_spec = self.prompt_builder.build_tools_spec() tools_spec = self.prompt_builder.build_tools_spec()
# Tool execution loop # Tool execution loop
for _iteration in range(self.max_tool_iterations): for _iteration in range(self.settings.max_tool_iterations):
# Call LLM with tools # Call LLM with tools
llm_result = self.llm.complete(messages, tools=tools_spec) llm_result = self.llm.complete(messages, tools=tools_spec)

View File

@@ -1,115 +0,0 @@
"""Configuration management with validation."""
import os
from dataclasses import dataclass, field
from pathlib import Path
from dotenv import load_dotenv
# Load environment variables from .env file
load_dotenv()
class ConfigurationError(Exception):
"""Raised when configuration is invalid."""
pass
@dataclass
class Settings:
"""Application settings loaded from environment variables."""
# LLM Configuration
deepseek_api_key: str = field(
default_factory=lambda: os.getenv("DEEPSEEK_API_KEY", "")
)
deepseek_base_url: str = field(
default_factory=lambda: os.getenv(
"DEEPSEEK_BASE_URL", "https://api.deepseek.com"
)
)
model: str = field(
default_factory=lambda: os.getenv("DEEPSEEK_MODEL", "deepseek-chat")
)
temperature: float = field(
default_factory=lambda: float(os.getenv("TEMPERATURE", "0.2"))
)
# TMDB Configuration
tmdb_api_key: str = field(default_factory=lambda: os.getenv("TMDB_API_KEY", ""))
tmdb_base_url: str = field(
default_factory=lambda: os.getenv(
"TMDB_BASE_URL", "https://api.themoviedb.org/3"
)
)
# Storage Configuration
memory_file: str = field(
default_factory=lambda: os.getenv("MEMORY_FILE", "memory.json")
)
# Security Configuration
max_tool_iterations: int = field(
default_factory=lambda: int(os.getenv("MAX_TOOL_ITERATIONS", "5"))
)
request_timeout: int = field(
default_factory=lambda: int(os.getenv("REQUEST_TIMEOUT", "30"))
)
# Memory Configuration
max_history_messages: int = field(
default_factory=lambda: int(os.getenv("MAX_HISTORY_MESSAGES", "10"))
)
def __post_init__(self):
"""Validate settings after initialization."""
self._validate()
def _validate(self) -> None:
"""Validate configuration values."""
# Validate temperature
if not 0.0 <= self.temperature <= 2.0:
raise ConfigurationError(
f"Temperature must be between 0.0 and 2.0, got {self.temperature}"
)
# Validate max_tool_iterations
if self.max_tool_iterations < 1 or self.max_tool_iterations > 20:
raise ConfigurationError(
f"max_tool_iterations must be between 1 and 20, got {self.max_tool_iterations}"
)
# Validate request_timeout
if self.request_timeout < 1 or self.request_timeout > 300:
raise ConfigurationError(
f"request_timeout must be between 1 and 300 seconds, got {self.request_timeout}"
)
# Validate URLs
if not self.deepseek_base_url.startswith(("http://", "https://")):
raise ConfigurationError(
f"Invalid deepseek_base_url: {self.deepseek_base_url}"
)
if not self.tmdb_base_url.startswith(("http://", "https://")):
raise ConfigurationError(f"Invalid tmdb_base_url: {self.tmdb_base_url}")
# Validate memory file path
memory_path = Path(self.memory_file)
if memory_path.exists() and not memory_path.is_file():
raise ConfigurationError(
f"memory_file exists but is not a file: {self.memory_file}"
)
def is_deepseek_configured(self) -> bool:
"""Check if DeepSeek API is properly configured."""
return bool(self.deepseek_api_key and self.deepseek_base_url)
def is_tmdb_configured(self) -> bool:
"""Check if TMDB API is properly configured."""
return bool(self.tmdb_api_key and self.tmdb_base_url)
# Global settings instance
settings = Settings()

View File

@@ -6,7 +6,8 @@ from typing import Any
import requests import requests
from requests.exceptions import HTTPError, RequestException, Timeout from requests.exceptions import HTTPError, RequestException, Timeout
from ..config import settings from alfred.settings import Settings, settings
from .exceptions import LLMAPIError, LLMConfigurationError from .exceptions import LLMAPIError, LLMConfigurationError
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -21,6 +22,7 @@ class DeepSeekClient:
base_url: str | None = None, base_url: str | None = None,
model: str | None = None, model: str | None = None,
timeout: int | None = None, timeout: int | None = None,
settings: Settings | None = None,
): ):
""" """
Initialize DeepSeek client. Initialize DeepSeek client.
@@ -34,10 +36,10 @@ class DeepSeekClient:
Raises: Raises:
LLMConfigurationError: If API key is missing LLMConfigurationError: If API key is missing
""" """
self.api_key = api_key or settings.deepseek_api_key self.api_key = api_key or self.settings.deepseek_api_key
self.base_url = base_url or settings.deepseek_base_url self.base_url = base_url or self.settings.deepseek_base_url
self.model = model or settings.model self.model = model or self.settings.deepseek_model
self.timeout = timeout or settings.request_timeout self.timeout = timeout or self.settings.request_timeout
if not self.api_key: if not self.api_key:
raise LLMConfigurationError( raise LLMConfigurationError(
@@ -94,7 +96,7 @@ class DeepSeekClient:
payload = { payload = {
"model": self.model, "model": self.model,
"messages": messages, "messages": messages,
"temperature": settings.temperature, "temperature": settings.llm_temperature,
} }
# Add tools if provided # Add tools if provided

View File

@@ -1,13 +1,13 @@
"""Ollama LLM client with robust error handling.""" """Ollama LLM client with robust error handling."""
import logging import logging
import os
from typing import Any from typing import Any
import requests import requests
from requests.exceptions import HTTPError, RequestException, Timeout from requests.exceptions import HTTPError, RequestException, Timeout
from ..config import settings from alfred.settings import Settings
from .exceptions import LLMAPIError, LLMConfigurationError from .exceptions import LLMAPIError, LLMConfigurationError
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -32,6 +32,7 @@ class OllamaClient:
model: str | None = None, model: str | None = None,
timeout: int | None = None, timeout: int | None = None,
temperature: float | None = None, temperature: float | None = None,
settings: Settings | None = None,
): ):
""" """
Initialize Ollama client. Initialize Ollama client.
@@ -45,13 +46,11 @@ class OllamaClient:
Raises: Raises:
LLMConfigurationError: If configuration is invalid LLMConfigurationError: If configuration is invalid
""" """
self.base_url = base_url or os.getenv( self.base_url = base_url or settings.ollama_base_url
"OLLAMA_BASE_URL", "http://localhost:11434" self.model = model or settings.ollama_model
)
self.model = model or os.getenv("OLLAMA_MODEL", "llama3.2")
self.timeout = timeout or settings.request_timeout self.timeout = timeout or settings.request_timeout
self.temperature = ( self.temperature = (
temperature if temperature is not None else settings.temperature temperature if temperature is not None else settings.llm_temperature
) )
if not self.base_url: if not self.base_url:

View File

@@ -78,10 +78,13 @@ def _create_tool_from_function(func: Callable) -> Tool:
) )
def make_tools() -> dict[str, Tool]: def make_tools(settings) -> dict[str, Tool]:
""" """
Create and register all available tools. Create and register all available tools.
Args:
settings: Application settings instance
Returns: Returns:
Dictionary mapping tool names to Tool objects Dictionary mapping tool names to Tool objects
""" """

View File

@@ -2,22 +2,21 @@
import json import json
import logging import logging
import os
import time import time
import uuid import uuid
from pathlib import Path
from typing import Any from typing import Any
from fastapi import FastAPI, HTTPException from fastapi import FastAPI, HTTPException
from fastapi.responses import JSONResponse, StreamingResponse from fastapi.responses import JSONResponse, StreamingResponse
from fastapi.staticfiles import StaticFiles
from pydantic import BaseModel, Field, validator from pydantic import BaseModel, Field, validator
from alfred.agent.agent import Agent from alfred.agent.agent import Agent
from alfred.agent.config import settings
from alfred.agent.llm.deepseek import DeepSeekClient from alfred.agent.llm.deepseek import DeepSeekClient
from alfred.agent.llm.exceptions import LLMAPIError, LLMConfigurationError from alfred.agent.llm.exceptions import LLMAPIError, LLMConfigurationError
from alfred.agent.llm.ollama import OllamaClient from alfred.agent.llm.ollama import OllamaClient
from alfred.infrastructure.persistence import get_memory, init_memory from alfred.infrastructure.persistence import get_memory, init_memory
from alfred.settings import settings
logging.basicConfig( logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s" level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
@@ -30,37 +29,33 @@ app = FastAPI(
version="0.2.0", version="0.2.0",
) )
# TODO: Make a variable memory_path = Path(settings.data_storage) / "memory"
manifests = "manifests" init_memory(storage_dir=str(memory_path))
# Sécurité : on vérifie que le dossier existe pour ne pas faire planter l'app au démarrage logger.info(f"Memory context initialized (path: {memory_path})")
if os.path.exists(manifests):
app.mount("/manifests", StaticFiles(directory=manifests), name="manifests")
else:
print(
f"⚠️ ATTENTION : Le dossier '{manifests}' est introuvable. Le plugin ne marchera pas."
)
# Initialize memory context at startup
storage_dir = os.getenv("MEMORY_STORAGE_DIR", "memory_data")
init_memory(storage_dir=storage_dir)
logger.info(f"Memory context initialized (storage: {storage_dir})")
# Initialize LLM based on environment variable # Initialize LLM based on environment variable
llm_provider = os.getenv("LLM_PROVIDER", "deepseek").lower() llm_provider = settings.default_llm_provider.lower()
try: try:
if llm_provider == "ollama": if llm_provider == "local":
logger.info("Using Ollama LLM") logger.info("Using local Ollama LLM")
llm = OllamaClient() llm = OllamaClient(settings=settings)
else: elif llm_provider == "deepseek":
logger.info("Using DeepSeek LLM") logger.info("Using DeepSeek LLM")
llm = DeepSeekClient() llm = DeepSeekClient()
elif llm_provider == "claude":
raise ValueError(f"LLM provider not fully implemented: {llm_provider}")
else:
raise ValueError(f"Unknown LLM provider: {llm_provider}")
except LLMConfigurationError as e: except LLMConfigurationError as e:
logger.error(f"Failed to initialize LLM: {e}") logger.error(f"Failed to initialize LLM: {e}")
raise raise
# Initialize agent # Initialize agent
agent = Agent(llm=llm, max_tool_iterations=settings.max_tool_iterations) agent = Agent(
settings=settings, llm=llm, max_tool_iterations=settings.max_tool_iterations
)
logger.info("Agent Media API initialized") logger.info("Agent Media API initialized")
@@ -115,7 +110,7 @@ def extract_last_user_content(messages: list[dict[str, Any]]) -> str:
@app.get("/health") @app.get("/health")
async def health_check(): async def health_check():
"""Health check endpoint.""" """Health check endpoint."""
return {"status": "healthy", "version": "0.2.0"} return {"status": "healthy", "version": f"v{settings.alfred_version}"}
@app.get("/v1/models") @app.get("/v1/models")

View File

@@ -6,7 +6,7 @@ from typing import Any
import requests import requests
from requests.exceptions import HTTPError, RequestException, Timeout from requests.exceptions import HTTPError, RequestException, Timeout
from alfred.agent.config import Settings, settings from alfred.settings import Settings, settings
from .dto import TorrentResult from .dto import TorrentResult
from .exceptions import KnabenAPIError, KnabenNotFoundError from .exceptions import KnabenAPIError, KnabenNotFoundError

View File

@@ -6,7 +6,7 @@ from typing import Any
import requests import requests
from requests.exceptions import HTTPError, RequestException, Timeout from requests.exceptions import HTTPError, RequestException, Timeout
from alfred.agent.config import Settings, settings from alfred.settings import Settings, settings
from .dto import TorrentInfo from .dto import TorrentInfo
from .exceptions import QBittorrentAPIError, QBittorrentAuthError from .exceptions import QBittorrentAPIError, QBittorrentAuthError

View File

@@ -6,7 +6,7 @@ from typing import Any
import requests import requests
from requests.exceptions import HTTPError, RequestException, Timeout from requests.exceptions import HTTPError, RequestException, Timeout
from alfred.agent.config import Settings, settings from alfred.settings import Settings, settings
from .dto import MediaResult from .dto import MediaResult
from .exceptions import ( from .exceptions import (

209
alfred/settings.py Normal file
View File

@@ -0,0 +1,209 @@
import secrets
from pathlib import Path
from typing import NamedTuple
import tomllib
from pydantic import Field, computed_field, field_validator
from pydantic_settings import BaseSettings, SettingsConfigDict
BASE_DIR = Path(__file__).resolve().parent.parent
ENV_FILE_PATH = BASE_DIR / ".env"
toml_path = BASE_DIR / "pyproject.toml"
class ConfigurationError(Exception):
"""Raised when configuration is invalid."""
pass
class ProjectVersions(NamedTuple):
"""
Immutable structure for project versions.
Forces explicit naming and prevents accidental swaps.
"""
librechat: str
rag: str
alfred: str
def get_versions_from_toml() -> ProjectVersions:
"""
Reads versioning information from pyproject.toml.
Returns the default value if the file or key is missing.
"""
if not toml_path.exists():
raise FileNotFoundError(f"pyproject.toml not found: {toml_path}")
with open(toml_path, "rb") as f:
data = tomllib.load(f)
try:
return ProjectVersions(
librechat=data["tool"]["alfred"]["settings"]["librechat_version"],
rag=data["tool"]["alfred"]["settings"]["rag_version"],
alfred=data["tool"]["poetry"]["version"],
)
except KeyError as e:
raise KeyError(f"Error: Missing key {e} in pyproject.toml") from e
# Load versions once
VERSIONS: ProjectVersions = get_versions_from_toml()
class Settings(BaseSettings):
model_config = SettingsConfigDict(
env_file=ENV_FILE_PATH,
env_file_encoding="utf-8",
extra="ignore",
case_sensitive=False,
)
# --- GENERAL SETTINGS ---
host: str = "0.0.0.0"
port: int = 3080
debug_logging: bool = False
debug_console: bool = False
data_storage: str = "data"
librechat_version: str = Field(VERSIONS.librechat, description="Librechat version")
rag_version: str = Field(VERSIONS.rag, description="RAG engine version")
alfred_version: str = Field(VERSIONS.alfred, description="Alfred version")
# --- CONTEXT SETTINGS ---
max_history_messages: int = 10
max_tool_iterations: int = 10
request_timeout: int = 30
# TODO: Finish
deepseek_base_url: str = "https://api.deepseek.com"
deepseek_model: str = "deepseek-chat"
# --- API KEYS ---
anthropic_api_key: str | None = Field(None, description="Claude API key")
deepseek_api_key: str | None = Field(None, description="Deepseek API key")
google_api_key: str | None = Field(None, description="Gemini API key")
kimi_api_key: str | None = Field(None, description="Kimi API key")
openai_api_key: str | None = Field(None, description="ChatGPT API key")
# --- SECURITY KEYS ---
# Generated automatically if not in .env to ensure "Secure by Default"
jwt_secret: str = Field(default_factory=lambda: secrets.token_urlsafe(32))
jwt_refresh_secret: str = Field(default_factory=lambda: secrets.token_urlsafe(32))
# We keep these for encryption of keys in MongoDB (AES-256 Hex format)
creds_key: str = Field(default_factory=lambda: secrets.token_hex(32))
creds_iv: str = Field(default_factory=lambda: secrets.token_hex(16))
# --- SERVICES ---
qbittorrent_url: str = "http://qbittorrent:16140"
qbittorrent_username: str = "admin"
qbittorrent_password: str = Field(default_factory=lambda: secrets.token_urlsafe(16))
mongo_host: str = "mongodb"
mongo_user: str = "alfred"
mongo_password: str = Field(
default_factory=lambda: secrets.token_urlsafe(24), repr=False, exclude=True
)
mongo_port: int = 27017
mongo_db_name: str = "alfred"
@computed_field(repr=False)
@property
def mongo_uri(self) -> str:
return (
f"mongodb://{self.mongo_user}:{self.mongo_password}"
f"@{self.mongo_host}:{self.mongo_port}/{self.mongo_db_name}"
f"?authSource=admin"
)
postgres_host: str = "vectordb"
postgres_user: str = "alfred"
postgres_password: str = Field(
default_factory=lambda: secrets.token_urlsafe(24), repr=False, exclude=True
)
postgres_port: int = 5432
postgres_db_name: str = "alfred"
@computed_field(repr=False)
@property
def postgres_uri(self) -> str:
return (
f"postgresql://{self.postgres_user}:{self.postgres_password}"
f"@{self.postgres_host}:{self.postgres_port}/{self.postgres_db_name}"
)
tmdb_api_key: str | None = Field(None, description="The Movie Database API key")
tmdb_base_url: str = "https://api.themoviedb.org/3"
# --- LLM PICKER & CONFIG ---
# Providers: 'local', 'deepseek', ...
default_llm_provider: str = "local"
ollama_base_url: str = "http://ollama:11434"
# Models: ...
ollama_model: str = "llama3.3:latest"
llm_temperature: float = 0.2
# --- RAG ENGINE ---
rag_enabled: bool = True # TODO: Handle False
rag_api_url: str = "http://rag_api:8000"
embeddings_provider: str = "ollama"
# Models: ...
embeddings_model: str = "nomic-embed-text"
# --- MEILISEARCH ---
meili_enabled: bool = Field(True, description="Enable meili")
meili_no_analytics: bool = True
meili_host: str = "http://meilisearch:7700"
meili_master_key: str = Field(
default_factory=lambda: secrets.token_urlsafe(32),
description="Master key for Meilisearch",
repr=False,
)
# --- VALIDATORS ---
@field_validator("llm_temperature")
@classmethod
def validate_temperature(cls, v: float) -> float:
if not 0.0 <= v <= 2.0:
raise ConfigurationError(
f"Temperature must be between 0.0 and 2.0, got {v}"
)
return v
@field_validator("max_tool_iterations")
@classmethod
def validate_max_iterations(cls, v: int) -> int:
if not 1 <= v <= 20:
raise ConfigurationError(
f"max_tool_iterations must be between 1 and 50, got {v}"
)
return v
@field_validator("request_timeout")
@classmethod
def validate_timeout(cls, v: int) -> int:
if not 1 <= v <= 300:
raise ConfigurationError(
f"request_timeout must be between 1 and 300 seconds, got {v}"
)
return v
@field_validator("deepseek_base_url", "tmdb_base_url")
@classmethod
def validate_url(cls, v: str, info) -> str:
if not v.startswith(("http://", "https://")):
raise ConfigurationError(f"Invalid {info.field_name}")
return v
def is_tmdb_configured(self):
return bool(self.tmdb_api_key)
def is_deepseek_configured(self):
return bool(self.deepseek_api_key)
def dump_safe(self):
return self.model_dump(exclude_none=False)
settings = Settings()

View File

@@ -1,4 +1,20 @@
services: services:
# - CORE SERVICES -
# --- .ENV INIT ---
alfred-init:
container_name: alfred-init
build:
context: .
target: builder
args:
PYTHON_VERSION: ${PYTHON_VERSION}
PYTHON_VERSION_SHORT: ${PYTHON_VERSION_SHORT}
RUNNER: ${RUNNER}
command: python scripts/bootstrap.py
networks:
- alfred-net
# --- MAIN APPLICATION ---
alfred: alfred:
container_name: alfred-core container_name: alfred-core
build: build:
@@ -8,47 +24,38 @@ services:
PYTHON_VERSION_SHORT: ${PYTHON_VERSION_SHORT} PYTHON_VERSION_SHORT: ${PYTHON_VERSION_SHORT}
RUNNER: ${RUNNER} RUNNER: ${RUNNER}
depends_on: depends_on:
- librechat alfred-init:
condition: service_completed_successfully
restart: unless-stopped restart: unless-stopped
env_file: env_file:
- .env - path: .env
environment: required: true
# LLM Configuration
LLM_PROVIDER: ${LLM_PROVIDER:-deepseek}
DEEPSEEK_API_KEY: ${DEEPSEEK_API_KEY:-}
# Memory storage
MEMORY_STORAGE_DIR: /data/memory
# External services
TMDB_API_KEY: ${TMDB_API_KEY:-}
QBITTORRENT_URL: ${QBITTORRENT_URL:-}
QBITTORRENT_USERNAME: ${QBITTORRENT_USERNAME:-}
QBITTORRENT_PASSWORD: ${QBITTORRENT_PASSWORD:-}
volumes: volumes:
- ./data/memory:/data/memory - ./data:/data
- ./logs:/data/logs - ./logs:/logs
# TODO: Development: mount code for hot reload (comment out in production) # TODO: Hot reload (comment out in production)
# - ./alfred:/app/alfred #- ./alfred:/home/appuser/alfred
networks:
- alfred-net
# --- FRONTEND LIBRECHAT ---
librechat: librechat:
container_name: alfred-librechat container_name: alfred-librechat
image: ghcr.io/danny-avila/librechat:${LIBRECHAT_VERSION} image: ghcr.io/danny-avila/librechat:${LIBRECHAT_VERSION}
depends_on: depends_on:
- mongodb alfred-init:
- meilisearch condition: service_completed_successfully
- rag_api mongodb:
condition: service_healthy
restart: unless-stopped restart: unless-stopped
env_file: env_file:
- .env - path: .env
required: true
environment: environment:
- HOST=0.0.0.0 # Remap value name
- MONGO_URI=mongodb://mongodb:27017/LibreChat - SEARCH=${MEILI_ENABLED}
- MEILI_HOST=http://meilisearch:7700
- RAG_PORT=${RAG_PORT:-8000}
- RAG_API_URL=http://rag_api:${RAG_PORT:-8000}
ports: ports:
- "${LIBRECHAT_PORT:-3080}:3080" - "${PORT}:${PORT}"
volumes: volumes:
- ./data/librechat/images:/app/client/public/images - ./data/librechat/images:/app/client/public/images
- ./data/librechat/uploads:/app/client/uploads - ./data/librechat/uploads:/app/client/uploads
@@ -56,47 +63,144 @@ services:
# Mount custom endpoint # Mount custom endpoint
- ./librechat/manifests:/app/manifests:ro - ./librechat/manifests:/app/manifests:ro
- ./librechat/librechat.yaml:/app/librechat.yaml:ro - ./librechat/librechat.yaml:/app/librechat.yaml:ro
networks:
- alfred-net
# --- DATABASE #1 - APP STATE ---
mongodb: mongodb:
container_name: alfred-mongodb container_name: alfred-mongodb
image: mongo:latest image: mongo:latest
restart: unless-stopped restart: unless-stopped
depends_on:
alfred-init:
condition: service_completed_successfully
env_file:
- path: .env
required: true
environment:
# Remap value name
- MONGO_INITDB_ROOT_USERNAME=${MONGO_USER}
- MONGO_INITDB_ROOT_PASSWORD=${MONGO_PASSWORD}
ports:
- "${MONGO_PORT}:${MONGO_PORT}"
volumes: volumes:
- ./data/mongo:/data/db - ./data/mongo:/data/db
command: mongod --noauth command: mongod --quiet --setParameter logComponentVerbosity='{"network":{"verbosity":0}}'
healthcheck:
test: |
mongosh --quiet --eval "db.adminCommand('ping')" || \
mongosh --quiet -u "${MONGO_USER}" -p "${MONGO_PASSWORD}" --authenticationDatabase admin --eval "db.adminCommand('ping')"
interval: 10s
timeout: 5s
retries: 5
networks:
- alfred-net
# --- OLLAMA - LOCAL LLM ENGINE ---
ollama:
image: ollama/ollama:latest
container_name: alfred-ollama
depends_on:
alfred-init:
condition: service_completed_successfully
restart: unless-stopped
env_file:
- path: .env
required: true
volumes:
- ./data/ollama:/root/.ollama
networks:
- alfred-net
# - OPTIONAL SERVICES -
# --- SEARCH ENGINE SUPER FAST (Optional) ---
meilisearch: meilisearch:
container_name: alfred-meilisearch container_name: alfred-meilisearch
image: getmeili/meilisearch:v1.12.3 image: getmeili/meilisearch:v1.12.3
depends_on:
alfred-init:
condition: service_completed_successfully
restart: unless-stopped restart: unless-stopped
environment: env_file:
- MEILI_NO_ANALYTICS=true - path: .env
required: true
volumes: volumes:
- ./data/meili:/meili_data - ./data/meilisearch:/meili_data
#profiles: ["meili", "full"] profiles: ["meili", "full"]
networks:
- alfred-net
# --- RETRIEVAL AUGMENTED GENERATION SYSTEM (Optional) ---
rag_api: rag_api:
container_name: alfred-rag container_name: alfred-rag
image: ghcr.io/danny-avila/librechat-rag-api-dev-lite:${RAG_VERSION} image: ghcr.io/danny-avila/librechat-rag-api-dev-lite:${RAG_VERSION}
depends_on:
alfred-init:
condition: service_completed_successfully
vectordb:
condition: service_healthy
restart: unless-stopped restart: unless-stopped
env_file: env_file:
- .env - path: .env
environment: required: true
- DB_HOST=vectordb
- DB_PORT=5432
- RAG_PORT=${RAG_PORT:-8000}
ports: ports:
- "${RAG_PORT:-8000}:${RAG_PORT:-8000}" - "${RAG_API_PORT}:${RAG_API_PORT}"
#profiles: ["rag", "full"] volumes:
- ./data/rag/uploads:/app/uploads
profiles: ["rag", "full"]
networks:
- alfred-net
# --- DATABASE #2 - Vector RAG (Optional) ---
vectordb: vectordb:
container_name: alfred-vectordb container_name: alfred-vectordb
image: pgvector/pgvector:0.8.0-pg16-bookworm image: pgvector/pgvector:0.8.0-pg16-bookworm
depends_on:
alfred-init:
condition: service_completed_successfully
restart: unless-stopped restart: unless-stopped
env_file: env_file:
- .env - path: .env
required: true
ports: ports:
- "${VECTOR_DB_PORT:-5432}:5432" - "${POSTGRES_PORT}:${POSTGRES_PORT}"
volumes: volumes:
- ./data/vectordb:/var/lib/postgresql/data - ./data/vectordb:/var/lib/postgresql/data
#profiles: ["rag", "full"] profiles: ["rag", "full"]
healthcheck:
test: [ "CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-alfred} -d ${POSTGRES_DB_NAME:-alfred}" ]
interval: 5s
timeout: 5s
retries: 5
networks:
- alfred-net
# --- QBITTORENT (Optional) ---
qbittorrent:
image: lscr.io/linuxserver/qbittorrent:latest
container_name: alfred-qbittorrent
depends_on:
alfred-init:
condition: service_completed_successfully
restart: unless-stopped
env_file:
- path: .env
required: true
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Paris
- WEBUI_PORT=${QBITTORRENT_PORT}
volumes:
- ./data/qbittorrent/config:/config
- ./data/qbittorrent/downloads:/downloads
profiles: ["qbittorrent", "full"]
ports:
- "${QBITTORRENT_PORT}:${QBITTORRENT_PORT}"
networks:
- alfred-net
networks:
alfred-net:
name: alfred-internal
driver: bridge

View File

@@ -4,6 +4,16 @@
version: 1.2.1 version: 1.2.1
cache: true cache: true
endpoints: endpoints:
anthropic:
apiKey: "${ANTHROPIC_API_KEY}"
models:
default: ["claude-sonnet-4-5", "claude-haiku-4-5", "claude-opus-4-5"]
fetch: false
titleConvo: true
titleModel: "claude-haiku-4-5"
modelDisplayLabel: "Claude AI"
streamRate: 1
custom: custom:
# Deepseek # Deepseek
- name: "Deepseek" - name: "Deepseek"

202
poetry.lock generated
View File

@@ -52,17 +52,17 @@ files = [
[[package]] [[package]]
name = "bump-my-version" name = "bump-my-version"
version = "1.2.5" version = "1.2.6"
description = "Version bump your Python project" description = "Version bump your Python project"
optional = false optional = false
python-versions = ">=3.8" python-versions = ">=3.8"
files = [ files = [
{file = "bump_my_version-1.2.5-py3-none-any.whl", hash = "sha256:57e5718d9fe7d7b6f5ceb68e70cd3c4bd0570d300b4aade15fd1e355febdd351"}, {file = "bump_my_version-1.2.6-py3-none-any.whl", hash = "sha256:a2f567c10574a374b81a9bd6d2bd3cb2ca74befe5c24c3021123773635431659"},
{file = "bump_my_version-1.2.5.tar.gz", hash = "sha256:827af6c7b13111c62b45340f25defd105f566fe0cdbbb70e2c4b2f005b667e1f"}, {file = "bump_my_version-1.2.6.tar.gz", hash = "sha256:1f2f0daa5d699904e9739be8efb51c4c945461bad83cd4da4c89d324d9a18343"},
] ]
[package.dependencies] [package.dependencies]
click = "<8.2.2" click = "<8.4"
httpx = ">=0.28.1" httpx = ">=0.28.1"
pydantic = ">=2.0.0" pydantic = ">=2.0.0"
pydantic-settings = "*" pydantic-settings = "*"
@@ -218,13 +218,13 @@ files = [
[[package]] [[package]]
name = "click" name = "click"
version = "8.2.1" version = "8.3.1"
description = "Composable command line interface toolkit" description = "Composable command line interface toolkit"
optional = false optional = false
python-versions = ">=3.10" python-versions = ">=3.10"
files = [ files = [
{file = "click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b"}, {file = "click-8.3.1-py3-none-any.whl", hash = "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6"},
{file = "click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202"}, {file = "click-8.3.1.tar.gz", hash = "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a"},
] ]
[package.dependencies] [package.dependencies]
@@ -243,103 +243,103 @@ files = [
[[package]] [[package]]
name = "coverage" name = "coverage"
version = "7.13.0" version = "7.13.1"
description = "Code coverage measurement for Python" description = "Code coverage measurement for Python"
optional = false optional = false
python-versions = ">=3.10" python-versions = ">=3.10"
files = [ files = [
{file = "coverage-7.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:02d9fb9eccd48f6843c98a37bd6817462f130b86da8660461e8f5e54d4c06070"}, {file = "coverage-7.13.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e1fa280b3ad78eea5be86f94f461c04943d942697e0dac889fa18fff8f5f9147"},
{file = "coverage-7.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:367449cf07d33dc216c083f2036bb7d976c6e4903ab31be400ad74ad9f85ce98"}, {file = "coverage-7.13.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c3d8c679607220979434f494b139dfb00131ebf70bb406553d69c1ff01a5c33d"},
{file = "coverage-7.13.0-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:cdb3c9f8fef0a954c632f64328a3935988d33a6604ce4bf67ec3e39670f12ae5"}, {file = "coverage-7.13.1-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:339dc63b3eba969067b00f41f15ad161bf2946613156fb131266d8debc8e44d0"},
{file = "coverage-7.13.0-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:d10fd186aac2316f9bbb46ef91977f9d394ded67050ad6d84d94ed6ea2e8e54e"}, {file = "coverage-7.13.1-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:db622b999ffe49cb891f2fff3b340cdc2f9797d01a0a202a0973ba2562501d90"},
{file = "coverage-7.13.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7f88ae3e69df2ab62fb0bc5219a597cb890ba5c438190ffa87490b315190bb33"}, {file = "coverage-7.13.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d1443ba9acbb593fa7c1c29e011d7c9761545fe35e7652e85ce7f51a16f7e08d"},
{file = "coverage-7.13.0-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c4be718e51e86f553bcf515305a158a1cd180d23b72f07ae76d6017c3cc5d791"}, {file = "coverage-7.13.1-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c832ec92c4499ac463186af72f9ed4d8daec15499b16f0a879b0d1c8e5cf4a3b"},
{file = "coverage-7.13.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a00d3a393207ae12f7c49bb1c113190883b500f48979abb118d8b72b8c95c032"}, {file = "coverage-7.13.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:562ec27dfa3f311e0db1ba243ec6e5f6ab96b1edfcfc6cf86f28038bc4961ce6"},
{file = "coverage-7.13.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a7b1cd820e1b6116f92c6128f1188e7afe421c7e1b35fa9836b11444e53ebd9"}, {file = "coverage-7.13.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:4de84e71173d4dada2897e5a0e1b7877e5eefbfe0d6a44edee6ce31d9b8ec09e"},
{file = "coverage-7.13.0-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:37eee4e552a65866f15dedd917d5e5f3d59805994260720821e2c1b51ac3248f"}, {file = "coverage-7.13.1-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:a5a68357f686f8c4d527a2dc04f52e669c2fc1cbde38f6f7eb6a0e58cbd17cae"},
{file = "coverage-7.13.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:62d7c4f13102148c78d7353c6052af6d899a7f6df66a32bddcc0c0eb7c5326f8"}, {file = "coverage-7.13.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:77cc258aeb29a3417062758975521eae60af6f79e930d6993555eeac6a8eac29"},
{file = "coverage-7.13.0-cp310-cp310-win32.whl", hash = "sha256:24e4e56304fdb56f96f80eabf840eab043b3afea9348b88be680ec5986780a0f"}, {file = "coverage-7.13.1-cp310-cp310-win32.whl", hash = "sha256:bb4f8c3c9a9f34423dba193f241f617b08ffc63e27f67159f60ae6baf2dcfe0f"},
{file = "coverage-7.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:74c136e4093627cf04b26a35dab8cbfc9b37c647f0502fc313376e11726ba303"}, {file = "coverage-7.13.1-cp310-cp310-win_amd64.whl", hash = "sha256:c8e2706ceb622bc63bac98ebb10ef5da80ed70fbd8a7999a5076de3afaef0fb1"},
{file = "coverage-7.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0dfa3855031070058add1a59fdfda0192fd3e8f97e7c81de0596c145dea51820"}, {file = "coverage-7.13.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1a55d509a1dc5a5b708b5dad3b5334e07a16ad4c2185e27b40e4dba796ab7f88"},
{file = "coverage-7.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4fdb6f54f38e334db97f72fa0c701e66d8479af0bc3f9bfb5b90f1c30f54500f"}, {file = "coverage-7.13.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4d010d080c4888371033baab27e47c9df7d6fb28d0b7b7adf85a4a49be9298b3"},
{file = "coverage-7.13.0-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:7e442c013447d1d8d195be62852270b78b6e255b79b8675bad8479641e21fd96"}, {file = "coverage-7.13.1-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d938b4a840fb1523b9dfbbb454f652967f18e197569c32266d4d13f37244c3d9"},
{file = "coverage-7.13.0-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:1ed5630d946859de835a85e9a43b721123a8a44ec26e2830b296d478c7fd4259"}, {file = "coverage-7.13.1-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:bf100a3288f9bb7f919b87eb84f87101e197535b9bd0e2c2b5b3179633324fee"},
{file = "coverage-7.13.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7f15a931a668e58087bc39d05d2b4bf4b14ff2875b49c994bbdb1c2217a8daeb"}, {file = "coverage-7.13.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ef6688db9bf91ba111ae734ba6ef1a063304a881749726e0d3575f5c10a9facf"},
{file = "coverage-7.13.0-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:30a3a201a127ea57f7e14ba43c93c9c4be8b7d17a26e03bb49e6966d019eede9"}, {file = "coverage-7.13.1-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:0b609fc9cdbd1f02e51f67f51e5aee60a841ef58a68d00d5ee2c0faf357481a3"},
{file = "coverage-7.13.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7a485ff48fbd231efa32d58f479befce52dcb6bfb2a88bb7bf9a0b89b1bc8030"}, {file = "coverage-7.13.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c43257717611ff5e9a1d79dce8e47566235ebda63328718d9b65dd640bc832ef"},
{file = "coverage-7.13.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:22486cdafba4f9e471c816a2a5745337742a617fef68e890d8baf9f3036d7833"}, {file = "coverage-7.13.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e09fbecc007f7b6afdfb3b07ce5bd9f8494b6856dd4f577d26c66c391b829851"},
{file = "coverage-7.13.0-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:263c3dbccc78e2e331e59e90115941b5f53e85cfcc6b3b2fbff1fd4e3d2c6ea8"}, {file = "coverage-7.13.1-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:a03a4f3a19a189919c7055098790285cc5c5b0b3976f8d227aea39dbf9f8bfdb"},
{file = "coverage-7.13.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e5330fa0cc1f5c3c4c3bb8e101b742025933e7848989370a1d4c8c5e401ea753"}, {file = "coverage-7.13.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3820778ea1387c2b6a818caec01c63adc5b3750211af6447e8dcfb9b6f08dbba"},
{file = "coverage-7.13.0-cp311-cp311-win32.whl", hash = "sha256:0f4872f5d6c54419c94c25dd6ae1d015deeb337d06e448cd890a1e89a8ee7f3b"}, {file = "coverage-7.13.1-cp311-cp311-win32.whl", hash = "sha256:ff10896fa55167371960c5908150b434b71c876dfab97b69478f22c8b445ea19"},
{file = "coverage-7.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:51a202e0f80f241ccb68e3e26e19ab5b3bf0f813314f2c967642f13ebcf1ddfe"}, {file = "coverage-7.13.1-cp311-cp311-win_amd64.whl", hash = "sha256:a998cc0aeeea4c6d5622a3754da5a493055d2d95186bad877b0a34ea6e6dbe0a"},
{file = "coverage-7.13.0-cp311-cp311-win_arm64.whl", hash = "sha256:d2a9d7f1c11487b1c69367ab3ac2d81b9b3721f097aa409a3191c3e90f8f3dd7"}, {file = "coverage-7.13.1-cp311-cp311-win_arm64.whl", hash = "sha256:fea07c1a39a22614acb762e3fbbb4011f65eedafcb2948feeef641ac78b4ee5c"},
{file = "coverage-7.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:0b3d67d31383c4c68e19a88e28fc4c2e29517580f1b0ebec4a069d502ce1e0bf"}, {file = "coverage-7.13.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6f34591000f06e62085b1865c9bc5f7858df748834662a51edadfd2c3bfe0dd3"},
{file = "coverage-7.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:581f086833d24a22c89ae0fe2142cfaa1c92c930adf637ddf122d55083fb5a0f"}, {file = "coverage-7.13.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b67e47c5595b9224599016e333f5ec25392597a89d5744658f837d204e16c63e"},
{file = "coverage-7.13.0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:0a3a30f0e257df382f5f9534d4ce3d4cf06eafaf5192beb1a7bd066cb10e78fb"}, {file = "coverage-7.13.1-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3e7b8bd70c48ffb28461ebe092c2345536fb18bbbf19d287c8913699735f505c"},
{file = "coverage-7.13.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:583221913fbc8f53b88c42e8dbb8fca1d0f2e597cb190ce45916662b8b9d9621"}, {file = "coverage-7.13.1-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:c223d078112e90dc0e5c4e35b98b9584164bea9fbbd221c0b21c5241f6d51b62"},
{file = "coverage-7.13.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5f5d9bd30756fff3e7216491a0d6d520c448d5124d3d8e8f56446d6412499e74"}, {file = "coverage-7.13.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:794f7c05af0763b1bbd1b9e6eff0e52ad068be3b12cd96c87de037b01390c968"},
{file = "coverage-7.13.0-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a23e5a1f8b982d56fa64f8e442e037f6ce29322f1f9e6c2344cd9e9f4407ee57"}, {file = "coverage-7.13.1-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:0642eae483cc8c2902e4af7298bf886d605e80f26382124cddc3967c2a3df09e"},
{file = "coverage-7.13.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9b01c22bc74a7fb44066aaf765224c0d933ddf1f5047d6cdfe4795504a4493f8"}, {file = "coverage-7.13.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9f5e772ed5fef25b3de9f2008fe67b92d46831bd2bc5bdc5dd6bfd06b83b316f"},
{file = "coverage-7.13.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:898cce66d0836973f48dda4e3514d863d70142bdf6dfab932b9b6a90ea5b222d"}, {file = "coverage-7.13.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:45980ea19277dc0a579e432aef6a504fe098ef3a9032ead15e446eb0f1191aee"},
{file = "coverage-7.13.0-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:3ab483ea0e251b5790c2aac03acde31bff0c736bf8a86829b89382b407cd1c3b"}, {file = "coverage-7.13.1-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:e4f18eca6028ffa62adbd185a8f1e1dd242f2e68164dba5c2b74a5204850b4cf"},
{file = "coverage-7.13.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1d84e91521c5e4cb6602fe11ece3e1de03b2760e14ae4fcf1a4b56fa3c801fcd"}, {file = "coverage-7.13.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f8dca5590fec7a89ed6826fce625595279e586ead52e9e958d3237821fbc750c"},
{file = "coverage-7.13.0-cp312-cp312-win32.whl", hash = "sha256:193c3887285eec1dbdb3f2bd7fbc351d570ca9c02ca756c3afbc71b3c98af6ef"}, {file = "coverage-7.13.1-cp312-cp312-win32.whl", hash = "sha256:ff86d4e85188bba72cfb876df3e11fa243439882c55957184af44a35bd5880b7"},
{file = "coverage-7.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:4f3e223b2b2db5e0db0c2b97286aba0036ca000f06aca9b12112eaa9af3d92ae"}, {file = "coverage-7.13.1-cp312-cp312-win_amd64.whl", hash = "sha256:16cc1da46c04fb0fb128b4dc430b78fa2aba8a6c0c9f8eb391fd5103409a6ac6"},
{file = "coverage-7.13.0-cp312-cp312-win_arm64.whl", hash = "sha256:086cede306d96202e15a4b77ace8472e39d9f4e5f9fd92dd4fecdfb2313b2080"}, {file = "coverage-7.13.1-cp312-cp312-win_arm64.whl", hash = "sha256:8d9bc218650022a768f3775dd7fdac1886437325d8d295d923ebcfef4892ad5c"},
{file = "coverage-7.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:28ee1c96109974af104028a8ef57cec21447d42d0e937c0275329272e370ebcf"}, {file = "coverage-7.13.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:cb237bfd0ef4d5eb6a19e29f9e528ac67ac3be932ea6b44fb6cc09b9f3ecff78"},
{file = "coverage-7.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d1e97353dcc5587b85986cda4ff3ec98081d7e84dd95e8b2a6d59820f0545f8a"}, {file = "coverage-7.13.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1dcb645d7e34dcbcc96cd7c132b1fc55c39263ca62eb961c064eb3928997363b"},
{file = "coverage-7.13.0-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:99acd4dfdfeb58e1937629eb1ab6ab0899b131f183ee5f23e0b5da5cba2fec74"}, {file = "coverage-7.13.1-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3d42df8201e00384736f0df9be2ced39324c3907607d17d50d50116c989d84cd"},
{file = "coverage-7.13.0-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:ff45e0cd8451e293b63ced93161e189780baf444119391b3e7d25315060368a6"}, {file = "coverage-7.13.1-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:fa3edde1aa8807de1d05934982416cb3ec46d1d4d91e280bcce7cca01c507992"},
{file = "coverage-7.13.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f4f72a85316d8e13234cafe0a9f81b40418ad7a082792fa4165bd7d45d96066b"}, {file = "coverage-7.13.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9edd0e01a343766add6817bc448408858ba6b489039eaaa2018474e4001651a4"},
{file = "coverage-7.13.0-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:11c21557d0e0a5a38632cbbaca5f008723b26a89d70db6315523df6df77d6232"}, {file = "coverage-7.13.1-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:985b7836931d033570b94c94713c6dba5f9d3ff26045f72c3e5dbc5fe3361e5a"},
{file = "coverage-7.13.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:76541dc8d53715fb4f7a3a06b34b0dc6846e3c69bc6204c55653a85dd6220971"}, {file = "coverage-7.13.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ffed1e4980889765c84a5d1a566159e363b71d6b6fbaf0bebc9d3c30bc016766"},
{file = "coverage-7.13.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:6e9e451dee940a86789134b6b0ffbe31c454ade3b849bb8a9d2cca2541a8e91d"}, {file = "coverage-7.13.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:8842af7f175078456b8b17f1b73a0d16a65dcbdc653ecefeb00a56b3c8c298c4"},
{file = "coverage-7.13.0-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:5c67dace46f361125e6b9cace8fe0b729ed8479f47e70c89b838d319375c8137"}, {file = "coverage-7.13.1-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:ccd7a6fca48ca9c131d9b0a2972a581e28b13416fc313fb98b6d24a03ce9a398"},
{file = "coverage-7.13.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f59883c643cb19630500f57016f76cfdcd6845ca8c5b5ea1f6e17f74c8e5f511"}, {file = "coverage-7.13.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0403f647055de2609be776965108447deb8e384fe4a553c119e3ff6bfbab4784"},
{file = "coverage-7.13.0-cp313-cp313-win32.whl", hash = "sha256:58632b187be6f0be500f553be41e277712baa278147ecb7559983c6d9faf7ae1"}, {file = "coverage-7.13.1-cp313-cp313-win32.whl", hash = "sha256:549d195116a1ba1e1ae2f5ca143f9777800f6636eab917d4f02b5310d6d73461"},
{file = "coverage-7.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:73419b89f812f498aca53f757dd834919b48ce4799f9d5cad33ca0ae442bdb1a"}, {file = "coverage-7.13.1-cp313-cp313-win_amd64.whl", hash = "sha256:5899d28b5276f536fcf840b18b61a9fce23cc3aec1d114c44c07fe94ebeaa500"},
{file = "coverage-7.13.0-cp313-cp313-win_arm64.whl", hash = "sha256:eb76670874fdd6091eedcc856128ee48c41a9bbbb9c3f1c7c3cf169290e3ffd6"}, {file = "coverage-7.13.1-cp313-cp313-win_arm64.whl", hash = "sha256:868a2fae76dfb06e87291bcbd4dcbcc778a8500510b618d50496e520bd94d9b9"},
{file = "coverage-7.13.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:6e63ccc6e0ad8986386461c3c4b737540f20426e7ec932f42e030320896c311a"}, {file = "coverage-7.13.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:67170979de0dacac3f3097d02b0ad188d8edcea44ccc44aaa0550af49150c7dc"},
{file = "coverage-7.13.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:494f5459ffa1bd45e18558cd98710c36c0b8fbfa82a5eabcbe671d80ecffbfe8"}, {file = "coverage-7.13.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:f80e2bb21bfab56ed7405c2d79d34b5dc0bc96c2c1d2a067b643a09fb756c43a"},
{file = "coverage-7.13.0-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:06cac81bf10f74034e055e903f5f946e3e26fc51c09fc9f584e4a1605d977053"}, {file = "coverage-7.13.1-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:f83351e0f7dcdb14d7326c3d8d8c4e915fa685cbfdc6281f9470d97a04e9dfe4"},
{file = "coverage-7.13.0-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:f2ffc92b46ed6e6760f1d47a71e56b5664781bc68986dbd1836b2b70c0ce2071"}, {file = "coverage-7.13.1-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:bb3f6562e89bad0110afbe64e485aac2462efdce6232cdec7862a095dc3412f6"},
{file = "coverage-7.13.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0602f701057c6823e5db1b74530ce85f17c3c5be5c85fc042ac939cbd909426e"}, {file = "coverage-7.13.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:77545b5dcda13b70f872c3b5974ac64c21d05e65b1590b441c8560115dc3a0d1"},
{file = "coverage-7.13.0-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:25dc33618d45456ccb1d37bce44bc78cf269909aa14c4db2e03d63146a8a1493"}, {file = "coverage-7.13.1-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a4d240d260a1aed814790bbe1f10a5ff31ce6c21bc78f0da4a1e8268d6c80dbd"},
{file = "coverage-7.13.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:71936a8b3b977ddd0b694c28c6a34f4fff2e9dd201969a4ff5d5fc7742d614b0"}, {file = "coverage-7.13.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:d2287ac9360dec3837bfdad969963a5d073a09a85d898bd86bea82aa8876ef3c"},
{file = "coverage-7.13.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:936bc20503ce24770c71938d1369461f0c5320830800933bc3956e2a4ded930e"}, {file = "coverage-7.13.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:0d2c11f3ea4db66b5cbded23b20185c35066892c67d80ec4be4bab257b9ad1e0"},
{file = "coverage-7.13.0-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:af0a583efaacc52ae2521f8d7910aff65cdb093091d76291ac5820d5e947fc1c"}, {file = "coverage-7.13.1-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:3fc6a169517ca0d7ca6846c3c5392ef2b9e38896f61d615cb75b9e7134d4ee1e"},
{file = "coverage-7.13.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f1c23e24a7000da892a312fb17e33c5f94f8b001de44b7cf8ba2e36fbd15859e"}, {file = "coverage-7.13.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:d10a2ed46386e850bb3de503a54f9fe8192e5917fcbb143bfef653a9355e9a53"},
{file = "coverage-7.13.0-cp313-cp313t-win32.whl", hash = "sha256:5f8a0297355e652001015e93be345ee54393e45dc3050af4a0475c5a2b767d46"}, {file = "coverage-7.13.1-cp313-cp313t-win32.whl", hash = "sha256:75a6f4aa904301dab8022397a22c0039edc1f51e90b83dbd4464b8a38dc87842"},
{file = "coverage-7.13.0-cp313-cp313t-win_amd64.whl", hash = "sha256:6abb3a4c52f05e08460bd9acf04fec027f8718ecaa0d09c40ffbc3fbd70ecc39"}, {file = "coverage-7.13.1-cp313-cp313t-win_amd64.whl", hash = "sha256:309ef5706e95e62578cda256b97f5e097916a2c26247c287bbe74794e7150df2"},
{file = "coverage-7.13.0-cp313-cp313t-win_arm64.whl", hash = "sha256:3ad968d1e3aa6ce5be295ab5fe3ae1bf5bb4769d0f98a80a0252d543a2ef2e9e"}, {file = "coverage-7.13.1-cp313-cp313t-win_arm64.whl", hash = "sha256:92f980729e79b5d16d221038dbf2e8f9a9136afa072f9d5d6ed4cb984b126a09"},
{file = "coverage-7.13.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:453b7ec753cf5e4356e14fe858064e5520c460d3bbbcb9c35e55c0d21155c256"}, {file = "coverage-7.13.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:97ab3647280d458a1f9adb85244e81587505a43c0c7cff851f5116cd2814b894"},
{file = "coverage-7.13.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:af827b7cbb303e1befa6c4f94fd2bf72f108089cfa0f8abab8f4ca553cf5ca5a"}, {file = "coverage-7.13.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:8f572d989142e0908e6acf57ad1b9b86989ff057c006d13b76c146ec6a20216a"},
{file = "coverage-7.13.0-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:9987a9e4f8197a1000280f7cc089e3ea2c8b3c0a64d750537809879a7b4ceaf9"}, {file = "coverage-7.13.1-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d72140ccf8a147e94274024ff6fd8fb7811354cf7ef88b1f0a988ebaa5bc774f"},
{file = "coverage-7.13.0-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:3188936845cd0cb114fa6a51842a304cdbac2958145d03be2377ec41eb285d19"}, {file = "coverage-7.13.1-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:d3c9f051b028810f5a87c88e5d6e9af3c0ff32ef62763bf15d29f740453ca909"},
{file = "coverage-7.13.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a2bdb3babb74079f021696cb46b8bb5f5661165c385d3a238712b031a12355be"}, {file = "coverage-7.13.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f398ba4df52d30b1763f62eed9de5620dcde96e6f491f4c62686736b155aa6e4"},
{file = "coverage-7.13.0-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:7464663eaca6adba4175f6c19354feea61ebbdd735563a03d1e472c7072d27bb"}, {file = "coverage-7.13.1-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:132718176cc723026d201e347f800cd1a9e4b62ccd3f82476950834dad501c75"},
{file = "coverage-7.13.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:8069e831f205d2ff1f3d355e82f511eb7c5522d7d413f5db5756b772ec8697f8"}, {file = "coverage-7.13.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:9e549d642426e3579b3f4b92d0431543b012dcb6e825c91619d4e93b7363c3f9"},
{file = "coverage-7.13.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:6fb2d5d272341565f08e962cce14cdf843a08ac43bd621783527adb06b089c4b"}, {file = "coverage-7.13.1-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:90480b2134999301eea795b3a9dbf606c6fbab1b489150c501da84a959442465"},
{file = "coverage-7.13.0-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:5e70f92ef89bac1ac8a99b3324923b4749f008fdbd7aa9cb35e01d7a284a04f9"}, {file = "coverage-7.13.1-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:e825dbb7f84dfa24663dd75835e7257f8882629fc11f03ecf77d84a75134b864"},
{file = "coverage-7.13.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:4b5de7d4583e60d5fd246dd57fcd3a8aa23c6e118a8c72b38adf666ba8e7e927"}, {file = "coverage-7.13.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:623dcc6d7a7ba450bbdbeedbaa0c42b329bdae16491af2282f12a7e809be7eb9"},
{file = "coverage-7.13.0-cp314-cp314-win32.whl", hash = "sha256:a6c6e16b663be828a8f0b6c5027d36471d4a9f90d28444aa4ced4d48d7d6ae8f"}, {file = "coverage-7.13.1-cp314-cp314-win32.whl", hash = "sha256:6e73ebb44dca5f708dc871fe0b90cf4cff1a13f9956f747cc87b535a840386f5"},
{file = "coverage-7.13.0-cp314-cp314-win_amd64.whl", hash = "sha256:0900872f2fdb3ee5646b557918d02279dc3af3dfb39029ac4e945458b13f73bc"}, {file = "coverage-7.13.1-cp314-cp314-win_amd64.whl", hash = "sha256:be753b225d159feb397bd0bf91ae86f689bad0da09d3b301478cd39b878ab31a"},
{file = "coverage-7.13.0-cp314-cp314-win_arm64.whl", hash = "sha256:3a10260e6a152e5f03f26db4a407c4c62d3830b9af9b7c0450b183615f05d43b"}, {file = "coverage-7.13.1-cp314-cp314-win_arm64.whl", hash = "sha256:228b90f613b25ba0019361e4ab81520b343b622fc657daf7e501c4ed6a2366c0"},
{file = "coverage-7.13.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:9097818b6cc1cfb5f174e3263eba4a62a17683bcfe5c4b5d07f4c97fa51fbf28"}, {file = "coverage-7.13.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:60cfb538fe9ef86e5b2ab0ca8fc8d62524777f6c611dcaf76dc16fbe9b8e698a"},
{file = "coverage-7.13.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:0018f73dfb4301a89292c73be6ba5f58722ff79f51593352759c1790ded1cabe"}, {file = "coverage-7.13.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:57dfc8048c72ba48a8c45e188d811e5efd7e49b387effc8fb17e97936dde5bf6"},
{file = "coverage-7.13.0-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:166ad2a22ee770f5656e1257703139d3533b4a0b6909af67c6b4a3adc1c98657"}, {file = "coverage-7.13.1-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3f2f725aa3e909b3c5fdb8192490bdd8e1495e85906af74fe6e34a2a77ba0673"},
{file = "coverage-7.13.0-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:f6aaef16d65d1787280943f1c8718dc32e9cf141014e4634d64446702d26e0ff"}, {file = "coverage-7.13.1-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:9ee68b21909686eeb21dfcba2c3b81fee70dcf38b140dcd5aa70680995fa3aa5"},
{file = "coverage-7.13.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e999e2dcc094002d6e2c7bbc1fb85b58ba4f465a760a8014d97619330cdbbbf3"}, {file = "coverage-7.13.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:724b1b270cb13ea2e6503476e34541a0b1f62280bc997eab443f87790202033d"},
{file = "coverage-7.13.0-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:00c3d22cf6fb1cf3bf662aaaa4e563be8243a5ed2630339069799835a9cc7f9b"}, {file = "coverage-7.13.1-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:916abf1ac5cf7eb16bc540a5bf75c71c43a676f5c52fcb9fe75a2bd75fb944e8"},
{file = "coverage-7.13.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:22ccfe8d9bb0d6134892cbe1262493a8c70d736b9df930f3f3afae0fe3ac924d"}, {file = "coverage-7.13.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:776483fd35b58d8afe3acbd9988d5de592ab6da2d2a865edfdbc9fdb43e7c486"},
{file = "coverage-7.13.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:9372dff5ea15930fea0445eaf37bbbafbc771a49e70c0aeed8b4e2c2614cc00e"}, {file = "coverage-7.13.1-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:b6f3b96617e9852703f5b633ea01315ca45c77e879584f283c44127f0f1ec564"},
{file = "coverage-7.13.0-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:69ac2c492918c2461bc6ace42d0479638e60719f2a4ef3f0815fa2df88e9f940"}, {file = "coverage-7.13.1-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:bd63e7b74661fed317212fab774e2a648bc4bb09b35f25474f8e3325d2945cd7"},
{file = "coverage-7.13.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:739c6c051a7540608d097b8e13c76cfa85263ced467168dc6b477bae3df7d0e2"}, {file = "coverage-7.13.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:933082f161bbb3e9f90d00990dc956120f608cdbcaeea15c4d897f56ef4fe416"},
{file = "coverage-7.13.0-cp314-cp314t-win32.whl", hash = "sha256:fe81055d8c6c9de76d60c94ddea73c290b416e061d40d542b24a5871bad498b7"}, {file = "coverage-7.13.1-cp314-cp314t-win32.whl", hash = "sha256:18be793c4c87de2965e1c0f060f03d9e5aff66cfeae8e1dbe6e5b88056ec153f"},
{file = "coverage-7.13.0-cp314-cp314t-win_amd64.whl", hash = "sha256:445badb539005283825959ac9fa4a28f712c214b65af3a2c464f1adc90f5fcbc"}, {file = "coverage-7.13.1-cp314-cp314t-win_amd64.whl", hash = "sha256:0e42e0ec0cd3e0d851cb3c91f770c9301f48647cb2877cb78f74bdaa07639a79"},
{file = "coverage-7.13.0-cp314-cp314t-win_arm64.whl", hash = "sha256:de7f6748b890708578fc4b7bb967d810aeb6fcc9bff4bb77dbca77dab2f9df6a"}, {file = "coverage-7.13.1-cp314-cp314t-win_arm64.whl", hash = "sha256:eaecf47ef10c72ece9a2a92118257da87e460e113b83cc0d2905cbbe931792b4"},
{file = "coverage-7.13.0-py3-none-any.whl", hash = "sha256:850d2998f380b1e266459ca5b47bc9e7daf9af1d070f66317972f382d46f1904"}, {file = "coverage-7.13.1-py3-none-any.whl", hash = "sha256:2016745cb3ba554469d02819d78958b571792bb68e31302610e898f80dd3a573"},
{file = "coverage-7.13.0.tar.gz", hash = "sha256:a394aa27f2d7ff9bc04cf703817773a59ad6dfbd577032e690f961d2460ee936"}, {file = "coverage-7.13.1.tar.gz", hash = "sha256:b7593fe7eb5feaa3fbb461ac79aac9f9fc0387a5ca8080b0c6fe2ca27b091afd"},
] ]
[package.extras] [package.extras]
@@ -1218,4 +1218,4 @@ files = [
[metadata] [metadata]
lock-version = "2.0" lock-version = "2.0"
python-versions = "==3.14.2" python-versions = "==3.14.2"
content-hash = "7046b2edca4660e38f5f14ef0282854a4bb7892af5028c4af9e968f2c65590c5" content-hash = "ec920fd78ea55c063bf2e4696c328056b50d8d1694f057c2d455ca2619938aac"

View File

@@ -6,22 +6,33 @@ authors = ["Francwa <francois.hodiaumont@gmail.com>"]
readme = "README.md" readme = "README.md"
package-mode = false package-mode = false
[tool.alfred] [tool.alfred.settings]
image_name = "alfred_media_organizer" image_name = "alfred_media_organizer"
librechat_version = "v0.8.1" librechat_version = "v0.8.1"
rag_version = "v0.7.0" rag_version = "v0.7.0"
runner = "poetry" runner = "poetry"
service_name = "alfred" service_name = "alfred"
[tool.alfred.security]
jwt_secret = "32:b64"
jwt_refresh_secret = "32:b64"
creds_key = "32:b64"
creds_iv = "16:b64"
meili_master_key = "32:b64"
mongo_password = "16:hex"
postgres_password = "16:hex"
qbittorrent_password = "16:hex"
[tool.poetry.dependencies] [tool.poetry.dependencies]
python = "==3.14.2" python = "==3.14.2"
python-dotenv = "^1.0.0" python-dotenv = "^1.0.0"
requests = "^2.32.5" requests = "^2.32.5"
fastapi = "^0.127.0" fastapi = "^0.127.1"
pydantic = "^2.12.4" pydantic = "^2.12.4"
uvicorn = "^0.40.0" uvicorn = "^0.40.0"
pytest-xdist = "^3.8.0" pytest-xdist = "^3.8.0"
httpx = "^0.28.1" httpx = "^0.28.1"
pydantic-settings = "^2.12.0"
[tool.poetry.group.dev.dependencies] [tool.poetry.group.dev.dependencies]
pytest = "^8.0.0" pytest = "^8.0.0"

245
scripts/bootstrap.py Normal file
View File

@@ -0,0 +1,245 @@
import re
import secrets
from pathlib import Path
import tomllib
def generate_secret(rule: str) -> str:
"""
Generates a cryptographically secure secret based on a spec string.
Example specs: '32:b64', '16:hex'.
"""
chunks: list[str] = rule.split(":")
size: int = int(chunks[0])
tech: str = chunks[1]
if tech == "b64":
return secrets.token_urlsafe(size)
elif tech == "hex":
return secrets.token_hex(size)
else:
raise ValueError(f"Invalid security format: {tech}")
def extract_python_version(version_string: str) -> tuple[str, str]:
"""
Extract Python version from poetry dependency string.
Examples:
"==3.14.2" -> ("3.14.2", "3.14")
"^3.14.2" -> ("3.14.2", "3.14")
"~3.14.2" -> ("3.14.2", "3.14")
"3.14.2" -> ("3.14.2", "3.14")
"""
# Remove poetry version operators (==, ^, ~, >=, etc.)
clean_version = re.sub(r"^[=^~><]+", "", version_string.strip())
# Extract version parts
parts = clean_version.split(".")
if len(parts) >= 2:
full_version = clean_version
short_version = f"{parts[0]}.{parts[1]}"
return full_version, short_version
else:
raise ValueError(f"Invalid Python version format: {version_string}")
# TODO: Refactor
def bootstrap(): # noqa: PLR0912, PLR0915
"""
Initializes the .env file by merging .env.example with generated secrets
and build variables from pyproject.toml.
Also generates .env.make for Makefile.
ALWAYS preserves existing secrets!
"""
base_dir = Path(__file__).resolve().parent.parent
env_path = base_dir / ".env"
example_path = base_dir / ".env.example"
if not example_path.exists():
print(f"{example_path.name} not found.")
return
toml_path = base_dir / "pyproject.toml"
if not toml_path.exists():
print(f"{toml_path.name} not found.")
return
# ALWAYS load existing .env if it exists
existing_env = {}
if env_path.exists():
print("🔄 Reading existing .env...")
with open(env_path) as f:
for line in f:
if "=" in line and not line.strip().startswith("#"):
key, value = line.split("=", 1)
existing_env[key.strip()] = value.strip()
print(f" Found {len(existing_env)} existing keys")
print("🔧 Updating .env file (keeping secrets)...")
else:
print("🔧 Initializing: Creating secure .env file...")
# Load data from pyproject.toml
with open(toml_path, "rb") as f:
data = tomllib.load(f)
security_keys = data["tool"]["alfred"]["security"]
settings_keys = data["tool"]["alfred"]["settings"]
dependencies = data["tool"]["poetry"]["dependencies"]
alfred_version = data["tool"]["poetry"]["version"]
# Normalize TOML keys to UPPER_CASE for .env format (done once)
security_keys_upper = {k.upper(): v for k, v in security_keys.items()}
settings_keys_upper = {k.upper(): v for k, v in settings_keys.items()}
# Extract Python version
python_version_full, python_version_short = extract_python_version(
dependencies["python"]
)
# Read .env.example
with open(example_path) as f:
example_lines = f.readlines()
new_lines = []
# Process each line from .env.example
for raw_line in example_lines:
line = raw_line.strip()
if line and not line.startswith("#") and "=" in line:
key, value = line.split("=", 1)
key = key.strip()
# Check if key exists in current .env (update mode)
if key in existing_env:
# Keep existing value for secrets
if key in security_keys_upper:
new_lines.append(f"{key}={existing_env[key]}\n")
print(f" ↻ Kept existing {key}")
# Update build vars from pyproject.toml
elif key in settings_keys_upper:
new_value = settings_keys_upper[key]
if existing_env[key] != new_value:
new_lines.append(f"{key}={new_value}\n")
print(f" ↻ Updated {key}: {existing_env[key]}{new_value}")
else:
new_lines.append(f"{key}={existing_env[key]}\n")
print(f" ↻ Kept {key}={existing_env[key]}")
# Update Python versions
elif key == "PYTHON_VERSION":
if existing_env[key] != python_version_full:
new_lines.append(f"{key}={python_version_full}\n")
print(
f" ↻ Updated Python: {existing_env[key]}{python_version_full}"
)
else:
new_lines.append(f"{key}={existing_env[key]}\n")
print(f" ↻ Kept Python: {existing_env[key]}")
elif key == "PYTHON_VERSION_SHORT":
if existing_env[key] != python_version_short:
new_lines.append(f"{key}={python_version_short}\n")
print(
f" ↻ Updated Python (short): {existing_env[key]}{python_version_short}"
)
else:
new_lines.append(f"{key}={existing_env[key]}\n")
print(f" ↻ Kept Python (short): {existing_env[key]}")
elif key == "ALFRED_VERSION":
if existing_env.get(key) != alfred_version:
new_lines.append(f"{key}={alfred_version}\n")
print(f" ↻ Updated Alfred version: {existing_env.get(key, 'N/A')}{alfred_version}")
else:
new_lines.append(f"{key}={alfred_version}\n")
print(f" ↻ Kept Alfred version: {alfred_version}")
# Keep other existing values
else:
new_lines.append(f"{key}={existing_env[key]}\n")
# Key doesn't exist, generate/add it
elif key in security_keys_upper:
rule = security_keys_upper[key]
secret = generate_secret(rule)
new_lines.append(f"{key}={secret}\n")
print(f" + Secret generated for {key} ({rule})")
elif key in settings_keys_upper:
value = settings_keys_upper[key]
new_lines.append(f"{key}={value}\n")
print(f" + Setting added: {key}={value}")
elif key == "PYTHON_VERSION":
new_lines.append(f"{key}={python_version_full}\n")
print(f" + Python version: {python_version_full}")
elif key == "PYTHON_VERSION_SHORT":
new_lines.append(f"{key}={python_version_short}\n")
print(f" + Python version (short): {python_version_short}")
elif key == "ALFRED_VERSION":
new_lines.append(f"{key}={alfred_version}\n")
print(f" + Alfred version: {alfred_version}")
else:
new_lines.append(raw_line)
else:
# Keep comments and empty lines
new_lines.append(raw_line)
# Compute database URIs from the generated values
final_env = {}
for line in new_lines:
if "=" in line and not line.strip().startswith("#"):
key, value = line.split("=", 1)
final_env[key.strip()] = value.strip()
# Compute MONGO_URI
if "MONGO_USER" in final_env and "MONGO_PASSWORD" in final_env:
mongo_uri = (
f"mongodb://{final_env.get('MONGO_USER', 'alfred')}:"
f"{final_env.get('MONGO_PASSWORD', '')}@"
f"{final_env.get('MONGO_HOST', 'mongodb')}:"
f"{final_env.get('MONGO_PORT', '27017')}/"
f"{final_env.get('MONGO_DB_NAME', 'alfred')}?authSource=admin"
)
# Update MONGO_URI in new_lines
for i, line in enumerate(new_lines):
if line.startswith("MONGO_URI="):
new_lines[i] = f"MONGO_URI={mongo_uri}\n"
print(" ✓ Computed MONGO_URI")
break
# Compute POSTGRES_URI
if "POSTGRES_USER" in final_env and "POSTGRES_PASSWORD" in final_env:
postgres_uri = (
f"postgresql://{final_env.get('POSTGRES_USER', 'alfred')}:"
f"{final_env.get('POSTGRES_PASSWORD', '')}@"
f"{final_env.get('POSTGRES_HOST', 'vectordb')}:"
f"{final_env.get('POSTGRES_PORT', '5432')}/"
f"{final_env.get('POSTGRES_DB_NAME', 'alfred')}"
)
# Update POSTGRES_URI in new_lines
for i, line in enumerate(new_lines):
if line.startswith("POSTGRES_URI="):
new_lines[i] = f"POSTGRES_URI={postgres_uri}\n"
print(" ✓ Computed POSTGRES_URI")
break
# Write .env file
with open(env_path, "w", encoding="utf-8") as f:
f.writelines(new_lines)
print(f"\n{env_path.name} generated successfully.")
# Generate .env.make for Makefile
env_make_path = base_dir / ".env.make"
with open(env_make_path, "w", encoding="utf-8") as f:
f.write("# Auto-generated from pyproject.toml by bootstrap.py\n")
f.write(f"export ALFRED_VERSION={alfred_version}\n")
f.write(f"export PYTHON_VERSION={python_version_full}\n")
f.write(f"export PYTHON_VERSION_SHORT={python_version_short}\n")
f.write(f"export RUNNER={settings_keys['runner']}\n")
f.write(f"export IMAGE_NAME={settings_keys['image_name']}\n")
f.write(f"export SERVICE_NAME={settings_keys['service_name']}\n")
f.write(f"export LIBRECHAT_VERSION={settings_keys['librechat_version']}\n")
f.write(f"export RAG_VERSION={settings_keys['rag_version']}\n")
print(f"{env_make_path.name} generated for Makefile.")
print("\n⚠️ Reminder: Please manually add your API keys to the .env file.")
if __name__ == "__main__":
bootstrap()

View File

@@ -12,6 +12,13 @@ from unittest.mock import MagicMock, Mock
import pytest import pytest
from alfred.infrastructure.persistence import Memory, set_memory from alfred.infrastructure.persistence import Memory, set_memory
from alfred.settings import settings
@pytest.fixture
def mock_settings():
"""Create a mock Settings instance for testing."""
return settings
@pytest.fixture @pytest.fixture

View File

@@ -9,24 +9,24 @@ from alfred.infrastructure.persistence import get_memory
class TestAgentInit: class TestAgentInit:
"""Tests for Agent initialization.""" """Tests for Agent initialization."""
def test_init(self, memory, mock_llm): def test_init(self, memory, mock_settings, mock_llm):
"""Should initialize agent with LLM.""" """Should initialize agent with LLM."""
agent = Agent(llm=mock_llm) agent = Agent(settings=mock_settings, llm=mock_llm, max_tool_iterations=10)
assert agent.llm is mock_llm assert agent.llm is mock_llm
assert agent.tools is not None assert agent.tools is not None
assert agent.prompt_builder is not None assert agent.prompt_builder is not None
assert agent.max_tool_iterations == 5 assert agent.max_tool_iterations == 10
def test_init_custom_iterations(self, memory, mock_llm): def test_init_custom_iterations(self, memory, mock_settings, mock_llm):
"""Should accept custom max iterations.""" """Should accept custom max iterations."""
agent = Agent(llm=mock_llm, max_tool_iterations=10) agent = Agent(settings=mock_settings, llm=mock_llm, max_tool_iterations=10)
assert agent.max_tool_iterations == 10 assert agent.max_tool_iterations == 10
def test_tools_registered(self, memory, mock_llm): def test_tools_registered(self, memory, mock_settings, mock_llm):
"""Should register all tools.""" """Should register all tools."""
agent = Agent(llm=mock_llm) agent = Agent(settings=mock_settings, llm=mock_llm)
expected_tools = [ expected_tools = [
"set_path_for_folder", "set_path_for_folder",
@@ -46,9 +46,9 @@ class TestAgentInit:
class TestExecuteToolCall: class TestExecuteToolCall:
"""Tests for _execute_tool_call method.""" """Tests for _execute_tool_call method."""
def test_execute_known_tool(self, memory, mock_llm, real_folder): def test_execute_known_tool(self, memory, mock_settings, mock_llm, real_folder):
"""Should execute known tool.""" """Should execute known tool."""
agent = Agent(llm=mock_llm) agent = Agent(settings=mock_settings, llm=mock_llm)
memory.ltm.set_config("download_folder", str(real_folder["downloads"])) memory.ltm.set_config("download_folder", str(real_folder["downloads"]))
tool_call = { tool_call = {
@@ -62,9 +62,9 @@ class TestExecuteToolCall:
assert result["status"] == "ok" assert result["status"] == "ok"
def test_execute_unknown_tool(self, memory, mock_llm): def test_execute_unknown_tool(self, memory, mock_settings, mock_llm):
"""Should return error for unknown tool.""" """Should return error for unknown tool."""
agent = Agent(llm=mock_llm) agent = Agent(settings=mock_settings, llm=mock_llm)
tool_call = { tool_call = {
"id": "call_123", "id": "call_123",
@@ -75,9 +75,9 @@ class TestExecuteToolCall:
assert result["error"] == "unknown_tool" assert result["error"] == "unknown_tool"
assert "available_tools" in result assert "available_tools" in result
def test_execute_with_bad_args(self, memory, mock_llm): def test_execute_with_bad_args(self, memory, mock_settings, mock_llm):
"""Should return error for bad arguments.""" """Should return error for bad arguments."""
agent = Agent(llm=mock_llm) agent = Agent(settings=mock_settings, llm=mock_llm)
tool_call = { tool_call = {
"id": "call_123", "id": "call_123",
@@ -87,9 +87,9 @@ class TestExecuteToolCall:
assert result["error"] == "bad_args" assert result["error"] == "bad_args"
def test_execute_tracks_errors(self, memory, mock_llm): def test_execute_tracks_errors(self, memory, mock_settings, mock_llm):
"""Should track errors in episodic memory.""" """Should track errors in episodic memory."""
agent = Agent(llm=mock_llm) agent = Agent(settings=mock_settings, llm=mock_llm)
# Use invalid arguments to trigger a TypeError # Use invalid arguments to trigger a TypeError
tool_call = { tool_call = {
@@ -104,9 +104,9 @@ class TestExecuteToolCall:
mem = get_memory() mem = get_memory()
assert len(mem.episodic.recent_errors) > 0 assert len(mem.episodic.recent_errors) > 0
def test_execute_with_invalid_json(self, memory, mock_llm): def test_execute_with_invalid_json(self, memory, mock_settings, mock_llm):
"""Should handle invalid JSON arguments.""" """Should handle invalid JSON arguments."""
agent = Agent(llm=mock_llm) agent = Agent(settings=mock_settings, llm=mock_llm)
tool_call = { tool_call = {
"id": "call_123", "id": "call_123",
@@ -120,17 +120,17 @@ class TestExecuteToolCall:
class TestStep: class TestStep:
"""Tests for step method.""" """Tests for step method."""
def test_step_text_response(self, memory, mock_llm): def test_step_text_response(self, memory, mock_settings, mock_llm):
"""Should return text response when no tool call.""" """Should return text response when no tool call."""
agent = Agent(llm=mock_llm) agent = Agent(settings=mock_settings, llm=mock_llm)
response = agent.step("Hello") response = agent.step("Hello")
assert response == "I found what you're looking for!" assert response == "I found what you're looking for!"
def test_step_saves_to_history(self, memory, mock_llm): def test_step_saves_to_history(self, memory, mock_settings, mock_llm):
"""Should save conversation to STM history.""" """Should save conversation to STM history."""
agent = Agent(llm=mock_llm) agent = Agent(settings=mock_settings, llm=mock_llm)
agent.step("Hi there") agent.step("Hi there")
@@ -141,11 +141,13 @@ class TestStep:
assert history[0]["content"] == "Hi there" assert history[0]["content"] == "Hi there"
assert history[1]["role"] == "assistant" assert history[1]["role"] == "assistant"
def test_step_with_tool_call(self, memory, mock_llm_with_tool_call, real_folder): def test_step_with_tool_call(
self, memory, mock_settings, mock_llm_with_tool_call, real_folder
):
"""Should execute tool and continue.""" """Should execute tool and continue."""
memory.ltm.set_config("download_folder", str(real_folder["downloads"])) memory.ltm.set_config("download_folder", str(real_folder["downloads"]))
agent = Agent(llm=mock_llm_with_tool_call) agent = Agent(settings=mock_settings, llm=mock_llm_with_tool_call)
response = agent.step("List my downloads") response = agent.step("List my downloads")
@@ -157,7 +159,7 @@ class TestStep:
assert first_call_args[1]["tools"] is not None, "Tools not passed to LLM!" assert first_call_args[1]["tools"] is not None, "Tools not passed to LLM!"
assert len(first_call_args[1]["tools"]) > 0, "Tools list is empty!" assert len(first_call_args[1]["tools"]) > 0, "Tools list is empty!"
def test_step_max_iterations(self, memory, mock_llm): def test_step_max_iterations(self, memory, mock_settings, mock_llm):
"""Should stop after max iterations.""" """Should stop after max iterations."""
call_count = [0] call_count = [0]
@@ -185,15 +187,15 @@ class TestStep:
return {"role": "assistant", "content": "I couldn't complete the task."} return {"role": "assistant", "content": "I couldn't complete the task."}
mock_llm.complete = Mock(side_effect=mock_complete) mock_llm.complete = Mock(side_effect=mock_complete)
agent = Agent(llm=mock_llm, max_tool_iterations=3) agent = Agent(settings=mock_settings, llm=mock_llm, max_tool_iterations=3)
agent.step("Do something") agent.step("Do something")
assert call_count[0] == 4 assert call_count[0] == 4
def test_step_includes_history(self, memory_with_history, mock_llm): def test_step_includes_history(self, memory_with_history, mock_settings, mock_llm):
"""Should include conversation history in prompt.""" """Should include conversation history in prompt."""
agent = Agent(llm=mock_llm) agent = Agent(settings=mock_settings, llm=mock_llm)
agent.step("New message") agent.step("New message")
@@ -201,10 +203,10 @@ class TestStep:
messages_content = [m.get("content", "") for m in call_args] messages_content = [m.get("content", "") for m in call_args]
assert any("Hello" in str(c) for c in messages_content) assert any("Hello" in str(c) for c in messages_content)
def test_step_includes_events(self, memory, mock_llm): def test_step_includes_events(self, memory, mock_settings, mock_llm):
"""Should include unread events in prompt.""" """Should include unread events in prompt."""
memory.episodic.add_background_event("download_complete", {"name": "Movie.mkv"}) memory.episodic.add_background_event("download_complete", {"name": "Movie.mkv"})
agent = Agent(llm=mock_llm) agent = Agent(settings=mock_settings, llm=mock_llm)
agent.step("What's new?") agent.step("What's new?")
@@ -212,9 +214,9 @@ class TestStep:
messages_content = [m.get("content", "") for m in call_args] messages_content = [m.get("content", "") for m in call_args]
assert any("download" in str(c).lower() for c in messages_content) assert any("download" in str(c).lower() for c in messages_content)
def test_step_saves_ltm(self, memory, mock_llm, temp_dir): def test_step_saves_ltm(self, memory, mock_settings, mock_llm, temp_dir):
"""Should save LTM after step.""" """Should save LTM after step."""
agent = Agent(llm=mock_llm) agent = Agent(settings=mock_settings, llm=mock_llm)
agent.step("Hello") agent.step("Hello")
@@ -225,7 +227,7 @@ class TestStep:
class TestAgentIntegration: class TestAgentIntegration:
"""Integration tests for Agent.""" """Integration tests for Agent."""
def test_multiple_tool_calls(self, memory, mock_llm, real_folder): def test_multiple_tool_calls(self, memory, mock_settings, mock_llm, real_folder):
"""Should handle multiple tool calls in sequence.""" """Should handle multiple tool calls in sequence."""
memory.ltm.set_config("download_folder", str(real_folder["downloads"])) memory.ltm.set_config("download_folder", str(real_folder["downloads"]))
memory.ltm.set_config("movie_folder", str(real_folder["movies"])) memory.ltm.set_config("movie_folder", str(real_folder["movies"]))
@@ -276,7 +278,7 @@ class TestAgentIntegration:
} }
mock_llm.complete = Mock(side_effect=mock_complete) mock_llm.complete = Mock(side_effect=mock_complete)
agent = Agent(llm=mock_llm) agent = Agent(settings=mock_settings, llm=mock_llm)
agent.step("List my downloads and movies") agent.step("List my downloads and movies")

View File

@@ -6,6 +6,7 @@ import pytest
from alfred.agent.agent import Agent from alfred.agent.agent import Agent
from alfred.infrastructure.persistence import get_memory from alfred.infrastructure.persistence import get_memory
from alfred.settings import settings
class TestExecuteToolCallEdgeCases: class TestExecuteToolCallEdgeCases:
@@ -13,7 +14,7 @@ class TestExecuteToolCallEdgeCases:
def test_tool_returns_none(self, memory, mock_llm): def test_tool_returns_none(self, memory, mock_llm):
"""Should handle tool returning None.""" """Should handle tool returning None."""
agent = Agent(llm=mock_llm) agent = Agent(settings=settings, llm=mock_llm)
# Mock a tool that returns None # Mock a tool that returns None
from alfred.agent.registry import Tool from alfred.agent.registry import Tool
@@ -32,7 +33,7 @@ class TestExecuteToolCallEdgeCases:
def test_tool_raises_keyboard_interrupt(self, memory, mock_llm): def test_tool_raises_keyboard_interrupt(self, memory, mock_llm):
"""Should propagate KeyboardInterrupt.""" """Should propagate KeyboardInterrupt."""
agent = Agent(llm=mock_llm) agent = Agent(settings=settings, llm=mock_llm)
from alfred.agent.registry import Tool from alfred.agent.registry import Tool
@@ -53,7 +54,7 @@ class TestExecuteToolCallEdgeCases:
def test_tool_with_extra_args(self, memory, mock_llm, real_folder): def test_tool_with_extra_args(self, memory, mock_llm, real_folder):
"""Should handle extra arguments gracefully.""" """Should handle extra arguments gracefully."""
agent = Agent(llm=mock_llm) agent = Agent(settings=settings, llm=mock_llm)
memory.ltm.set_config("download_folder", str(real_folder["downloads"])) memory.ltm.set_config("download_folder", str(real_folder["downloads"]))
tool_call = { tool_call = {
@@ -70,7 +71,7 @@ class TestExecuteToolCallEdgeCases:
def test_tool_with_wrong_type_args(self, memory, mock_llm): def test_tool_with_wrong_type_args(self, memory, mock_llm):
"""Should handle wrong argument types.""" """Should handle wrong argument types."""
agent = Agent(llm=mock_llm) agent = Agent(settings=settings, llm=mock_llm)
tool_call = { tool_call = {
"id": "call_123", "id": "call_123",
@@ -90,7 +91,7 @@ class TestStepEdgeCases:
def test_step_with_empty_input(self, memory, mock_llm): def test_step_with_empty_input(self, memory, mock_llm):
"""Should handle empty user input.""" """Should handle empty user input."""
agent = Agent(llm=mock_llm) agent = Agent(settings=settings, llm=mock_llm)
response = agent.step("") response = agent.step("")
@@ -98,7 +99,7 @@ class TestStepEdgeCases:
def test_step_with_very_long_input(self, memory, mock_llm): def test_step_with_very_long_input(self, memory, mock_llm):
"""Should handle very long user input.""" """Should handle very long user input."""
agent = Agent(llm=mock_llm) agent = Agent(settings=settings, llm=mock_llm)
long_input = "x" * 100000 long_input = "x" * 100000
response = agent.step(long_input) response = agent.step(long_input)
@@ -112,7 +113,7 @@ class TestStepEdgeCases:
return {"role": "assistant", "content": "日本語の応答"} return {"role": "assistant", "content": "日本語の応答"}
mock_llm.complete = Mock(side_effect=mock_complete) mock_llm.complete = Mock(side_effect=mock_complete)
agent = Agent(llm=mock_llm) agent = Agent(settings=settings, llm=mock_llm)
response = agent.step("日本語の質問") response = agent.step("日本語の質問")
@@ -125,7 +126,7 @@ class TestStepEdgeCases:
return {"role": "assistant", "content": ""} return {"role": "assistant", "content": ""}
mock_llm.complete = Mock(side_effect=mock_complete) mock_llm.complete = Mock(side_effect=mock_complete)
agent = Agent(llm=mock_llm) agent = Agent(settings=settings, llm=mock_llm)
response = agent.step("Hello") response = agent.step("Hello")
@@ -134,7 +135,7 @@ class TestStepEdgeCases:
def test_step_llm_raises_exception(self, memory, mock_llm): def test_step_llm_raises_exception(self, memory, mock_llm):
"""Should propagate LLM exceptions.""" """Should propagate LLM exceptions."""
mock_llm.complete.side_effect = Exception("LLM Error") mock_llm.complete.side_effect = Exception("LLM Error")
agent = Agent(llm=mock_llm) agent = Agent(settings=settings, llm=mock_llm)
with pytest.raises(Exception, match="LLM Error"): with pytest.raises(Exception, match="LLM Error"):
agent.step("Hello") agent.step("Hello")
@@ -162,7 +163,7 @@ class TestStepEdgeCases:
return {"role": "assistant", "content": "Done looping"} return {"role": "assistant", "content": "Done looping"}
mock_llm.complete = Mock(side_effect=mock_complete) mock_llm.complete = Mock(side_effect=mock_complete)
agent = Agent(llm=mock_llm, max_tool_iterations=3) agent = Agent(settings=settings, llm=mock_llm, max_tool_iterations=3)
agent.step("Loop test") agent.step("Loop test")
@@ -170,7 +171,7 @@ class TestStepEdgeCases:
def test_step_preserves_history_order(self, memory, mock_llm): def test_step_preserves_history_order(self, memory, mock_llm):
"""Should preserve message order in history.""" """Should preserve message order in history."""
agent = Agent(llm=mock_llm) agent = Agent(settings=settings, llm=mock_llm)
agent.step("First") agent.step("First")
agent.step("Second") agent.step("Second")
@@ -189,7 +190,7 @@ class TestStepEdgeCases:
[{"index": 1, "label": "Option 1"}], [{"index": 1, "label": "Option 1"}],
{}, {},
) )
agent = Agent(llm=mock_llm) agent = Agent(settings=settings, llm=mock_llm)
agent.step("Hello") agent.step("Hello")
@@ -206,7 +207,7 @@ class TestStepEdgeCases:
"progress": 50, "progress": 50,
} }
) )
agent = Agent(llm=mock_llm) agent = Agent(settings=settings, llm=mock_llm)
agent.step("Hello") agent.step("Hello")
@@ -217,7 +218,7 @@ class TestStepEdgeCases:
def test_step_clears_events_after_notification(self, memory, mock_llm): def test_step_clears_events_after_notification(self, memory, mock_llm):
"""Should mark events as read after notification.""" """Should mark events as read after notification."""
memory.episodic.add_background_event("test_event", {"data": "test"}) memory.episodic.add_background_event("test_event", {"data": "test"})
agent = Agent(llm=mock_llm) agent = Agent(settings=settings, llm=mock_llm)
agent.step("Hello") agent.step("Hello")
@@ -230,8 +231,8 @@ class TestAgentConcurrencyEdgeCases:
def test_multiple_agents_same_memory(self, memory, mock_llm): def test_multiple_agents_same_memory(self, memory, mock_llm):
"""Should handle multiple agents with same memory.""" """Should handle multiple agents with same memory."""
agent1 = Agent(llm=mock_llm) agent1 = Agent(settings=settings, llm=mock_llm)
agent2 = Agent(llm=mock_llm) agent2 = Agent(settings=settings, llm=mock_llm)
agent1.step("From agent 1") agent1.step("From agent 1")
agent2.step("From agent 2") agent2.step("From agent 2")
@@ -266,7 +267,7 @@ class TestAgentConcurrencyEdgeCases:
return {"role": "assistant", "content": "Path set successfully."} return {"role": "assistant", "content": "Path set successfully."}
mock_llm.complete = Mock(side_effect=mock_complete) mock_llm.complete = Mock(side_effect=mock_complete)
agent = Agent(llm=mock_llm) agent = Agent(settings=settings, llm=mock_llm)
agent.step("Set movie folder") agent.step("Set movie folder")
@@ -300,7 +301,7 @@ class TestAgentErrorRecovery:
return {"role": "assistant", "content": "The folder is not configured."} return {"role": "assistant", "content": "The folder is not configured."}
mock_llm.complete = Mock(side_effect=mock_complete) mock_llm.complete = Mock(side_effect=mock_complete)
agent = Agent(llm=mock_llm) agent = Agent(settings=settings, llm=mock_llm)
response = agent.step("List downloads") response = agent.step("List downloads")
@@ -329,7 +330,7 @@ class TestAgentErrorRecovery:
return {"role": "assistant", "content": "Error occurred."} return {"role": "assistant", "content": "Error occurred."}
mock_llm.complete = Mock(side_effect=mock_complete) mock_llm.complete = Mock(side_effect=mock_complete)
agent = Agent(llm=mock_llm) agent = Agent(settings=settings, llm=mock_llm)
agent.step("Set folder") agent.step("Set folder")
@@ -359,7 +360,7 @@ class TestAgentErrorRecovery:
return {"role": "assistant", "content": "All attempts failed."} return {"role": "assistant", "content": "All attempts failed."}
mock_llm.complete = Mock(side_effect=mock_complete) mock_llm.complete = Mock(side_effect=mock_complete)
agent = Agent(llm=mock_llm, max_tool_iterations=3) agent = Agent(settings=settings, llm=mock_llm, max_tool_iterations=3)
agent.step("Try multiple times") agent.step("Try multiple times")

View File

@@ -2,12 +2,14 @@
from unittest.mock import Mock, patch from unittest.mock import Mock, patch
import pytest
from fastapi.testclient import TestClient from fastapi.testclient import TestClient
class TestChatCompletionsEdgeCases: class TestChatCompletionsEdgeCases:
"""Edge case tests for /v1/chat/completions endpoint.""" """Edge case tests for /v1/chat/completions endpoint."""
@pytest.mark.skip(reason="502 - Local LLM not running yet")
def test_very_long_message(self, memory): def test_very_long_message(self, memory):
"""Should handle very long user message.""" """Should handle very long user message."""
from alfred.agent import agent from alfred.agent import agent
@@ -31,6 +33,7 @@ class TestChatCompletionsEdgeCases:
assert response.status_code == 200 assert response.status_code == 200
@pytest.mark.skip(reason="502 - Local LLM not running yet")
def test_unicode_message(self, memory): def test_unicode_message(self, memory):
"""Should handle unicode in message.""" """Should handle unicode in message."""
from alfred.agent import agent from alfred.agent import agent
@@ -57,6 +60,7 @@ class TestChatCompletionsEdgeCases:
content = response.json()["choices"][0]["message"]["content"] content = response.json()["choices"][0]["message"]["content"]
assert "日本語" in content or len(content) > 0 assert "日本語" in content or len(content) > 0
@pytest.mark.skip(reason="502 - Local LLM not running yet")
def test_special_characters_in_message(self, memory): def test_special_characters_in_message(self, memory):
"""Should handle special characters.""" """Should handle special characters."""
from alfred.agent import agent from alfred.agent import agent
@@ -121,6 +125,7 @@ class TestChatCompletionsEdgeCases:
assert response.status_code == 422 assert response.status_code == 422
@pytest.mark.skip(reason="502 - Local LLM not running yet")
def test_missing_content_field(self, memory): def test_missing_content_field(self, memory):
"""Should handle missing content field.""" """Should handle missing content field."""
with patch("alfred.app.DeepSeekClient") as mock_llm_class: with patch("alfred.app.DeepSeekClient") as mock_llm_class:
@@ -185,6 +190,7 @@ class TestChatCompletionsEdgeCases:
# Should reject or ignore invalid role # Should reject or ignore invalid role
assert response.status_code in [200, 400, 422] assert response.status_code in [200, 400, 422]
@pytest.mark.skip(reason="502 - Local LLM not running yet")
def test_many_messages(self, memory): def test_many_messages(self, memory):
"""Should handle many messages in conversation.""" """Should handle many messages in conversation."""
from alfred.agent import agent from alfred.agent import agent
@@ -299,6 +305,7 @@ class TestChatCompletionsEdgeCases:
assert response.status_code == 422 assert response.status_code == 422
# Pydantic validation error # Pydantic validation error
@pytest.mark.skip(reason="502 - Local LLM not running yet")
def test_extra_fields_in_request(self, memory): def test_extra_fields_in_request(self, memory):
"""Should ignore extra fields in request.""" """Should ignore extra fields in request."""
from alfred.agent import agent from alfred.agent import agent
@@ -369,6 +376,7 @@ class TestChatCompletionsEdgeCases:
assert response.status_code == 200 assert response.status_code == 200
@pytest.mark.skip(reason="502 - Local LLM not running yet")
def test_concurrent_requests_simulation(self, memory): def test_concurrent_requests_simulation(self, memory):
"""Should handle rapid sequential requests.""" """Should handle rapid sequential requests."""
from alfred.agent import agent from alfred.agent import agent
@@ -390,6 +398,7 @@ class TestChatCompletionsEdgeCases:
) )
assert response.status_code == 200 assert response.status_code == 200
@pytest.mark.skip(reason="502 - Local LLM not running yet")
def test_llm_returns_json_in_response(self, memory): def test_llm_returns_json_in_response(self, memory):
"""Should handle LLM returning JSON in text response.""" """Should handle LLM returning JSON in text response."""
from alfred.agent import agent from alfred.agent import agent

View File

@@ -2,7 +2,7 @@
import pytest import pytest
from alfred.agent.config import ConfigurationError, Settings from alfred.settings import ConfigurationError, Settings
class TestConfigValidation: class TestConfigValidation:
@@ -11,17 +11,17 @@ class TestConfigValidation:
def test_invalid_temperature_raises_error(self): def test_invalid_temperature_raises_error(self):
"""Verify invalid temperature is rejected.""" """Verify invalid temperature is rejected."""
with pytest.raises(ConfigurationError, match="Temperature"): with pytest.raises(ConfigurationError, match="Temperature"):
Settings(temperature=3.0) # > 2.0 Settings(llm_temperature=3.0) # > 2.0
with pytest.raises(ConfigurationError, match="Temperature"): with pytest.raises(ConfigurationError, match="Temperature"):
Settings(temperature=-0.1) # < 0.0 Settings(llm_temperature=-0.1) # < 0.0
def test_valid_temperature_accepted(self): def test_valid_temperature_accepted(self):
"""Verify valid temperature is accepted.""" """Verify valid temperature is accepted."""
# Should not raise # Should not raise
Settings(temperature=0.0) Settings(llm_temperature=0.0)
Settings(temperature=1.0) Settings(llm_temperature=1.0)
Settings(temperature=2.0) Settings(llm_temperature=2.0)
def test_invalid_max_iterations_raises_error(self): def test_invalid_max_iterations_raises_error(self):
"""Verify invalid max_iterations is rejected.""" """Verify invalid max_iterations is rejected."""
@@ -126,7 +126,7 @@ class TestConfigDefaults:
"""Verify default temperature is reasonable.""" """Verify default temperature is reasonable."""
settings = Settings() settings = Settings()
assert 0.0 <= settings.temperature <= 2.0 assert 0.0 <= settings.llm_temperature <= 2.0
def test_default_max_iterations(self): def test_default_max_iterations(self):
"""Verify default max_iterations is reasonable.""" """Verify default max_iterations is reasonable."""
@@ -153,11 +153,11 @@ class TestConfigEnvironmentVariables:
def test_loads_temperature_from_env(self, monkeypatch): def test_loads_temperature_from_env(self, monkeypatch):
"""Verify temperature is loaded from environment.""" """Verify temperature is loaded from environment."""
monkeypatch.setenv("TEMPERATURE", "0.5") monkeypatch.setenv("LLM_TEMPERATURE", "0.5")
settings = Settings() settings = Settings()
assert settings.temperature == 0.5 assert settings.llm_temperature == 0.5
def test_loads_max_iterations_from_env(self, monkeypatch): def test_loads_max_iterations_from_env(self, monkeypatch):
"""Verify max_iterations is loaded from environment.""" """Verify max_iterations is loaded from environment."""
@@ -185,7 +185,7 @@ class TestConfigEnvironmentVariables:
def test_invalid_env_value_raises_error(self, monkeypatch): def test_invalid_env_value_raises_error(self, monkeypatch):
"""Verify invalid environment value raises error.""" """Verify invalid environment value raises error."""
monkeypatch.setenv("TEMPERATURE", "invalid") monkeypatch.setenv("LLM_TEMPERATURE", "invalid")
with pytest.raises(ValueError): with pytest.raises(ValueError):
Settings() Settings()

View File

@@ -5,13 +5,13 @@ from unittest.mock import patch
import pytest import pytest
from alfred.agent.config import ConfigurationError, Settings
from alfred.agent.parameters import ( from alfred.agent.parameters import (
REQUIRED_PARAMETERS, REQUIRED_PARAMETERS,
ParameterSchema, ParameterSchema,
format_parameters_for_prompt, format_parameters_for_prompt,
get_missing_required_parameters, get_missing_required_parameters,
) )
from alfred.settings import ConfigurationError, Settings
class TestSettingsEdgeCases: class TestSettingsEdgeCases:
@@ -22,31 +22,31 @@ class TestSettingsEdgeCases:
with patch.dict(os.environ, {}, clear=True): with patch.dict(os.environ, {}, clear=True):
settings = Settings() settings = Settings()
assert settings.temperature == 0.2 assert settings.llm_temperature == 0.2
assert settings.max_tool_iterations == 5 assert settings.max_tool_iterations == 10
assert settings.request_timeout == 30 assert settings.request_timeout == 30
def test_temperature_boundary_low(self): def test_temperature_boundary_low(self):
"""Should accept temperature at lower boundary.""" """Should accept temperature at lower boundary."""
with patch.dict(os.environ, {"TEMPERATURE": "0.0"}, clear=True): with patch.dict(os.environ, {"LLM_TEMPERATURE": "0.0"}, clear=True):
settings = Settings() settings = Settings()
assert settings.temperature == 0.0 assert settings.llm_temperature == 0.0
def test_temperature_boundary_high(self): def test_temperature_boundary_high(self):
"""Should accept temperature at upper boundary.""" """Should accept temperature at upper boundary."""
with patch.dict(os.environ, {"TEMPERATURE": "2.0"}, clear=True): with patch.dict(os.environ, {"LLM_TEMPERATURE": "2.0"}, clear=True):
settings = Settings() settings = Settings()
assert settings.temperature == 2.0 assert settings.llm_temperature == 2.0
def test_temperature_below_boundary(self): def test_temperature_below_boundary(self):
"""Should reject temperature below 0.""" """Should reject temperature below 0."""
with patch.dict(os.environ, {"TEMPERATURE": "-0.1"}, clear=True): with patch.dict(os.environ, {"LLM_TEMPERATURE": "-0.1"}, clear=True):
with pytest.raises(ConfigurationError): with pytest.raises(ConfigurationError):
Settings() Settings()
def test_temperature_above_boundary(self): def test_temperature_above_boundary(self):
"""Should reject temperature above 2.""" """Should reject temperature above 2."""
with patch.dict(os.environ, {"TEMPERATURE": "2.1"}, clear=True): with patch.dict(os.environ, {"LLM_TEMPERATURE": "2.1"}, clear=True):
with pytest.raises(ConfigurationError): with pytest.raises(ConfigurationError):
Settings() Settings()
@@ -162,7 +162,7 @@ class TestSettingsEdgeCases:
def test_non_numeric_temperature(self): def test_non_numeric_temperature(self):
"""Should handle non-numeric temperature.""" """Should handle non-numeric temperature."""
with patch.dict(os.environ, {"TEMPERATURE": "not-a-number"}, clear=True): with patch.dict(os.environ, {"LLM_TEMPERATURE": "not-a-number"}, clear=True):
with pytest.raises((ConfigurationError, ValueError)): with pytest.raises((ConfigurationError, ValueError)):
Settings() Settings()

View File

@@ -2,6 +2,7 @@
from alfred.agent.prompts import PromptBuilder from alfred.agent.prompts import PromptBuilder
from alfred.agent.registry import make_tools from alfred.agent.registry import make_tools
from alfred.settings import settings
class TestPromptBuilder: class TestPromptBuilder:
@@ -9,14 +10,14 @@ class TestPromptBuilder:
def test_init(self, memory): def test_init(self, memory):
"""Should initialize with tools.""" """Should initialize with tools."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
assert builder.tools is tools assert builder.tools is tools
def test_build_system_prompt(self, memory): def test_build_system_prompt(self, memory):
"""Should build a complete system prompt.""" """Should build a complete system prompt."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -27,7 +28,7 @@ class TestPromptBuilder:
def test_includes_tools(self, memory): def test_includes_tools(self, memory):
"""Should include all tool descriptions.""" """Should include all tool descriptions."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -38,7 +39,7 @@ class TestPromptBuilder:
def test_includes_config(self, memory): def test_includes_config(self, memory):
"""Should include current configuration.""" """Should include current configuration."""
memory.ltm.set_config("download_folder", "/path/to/downloads") memory.ltm.set_config("download_folder", "/path/to/downloads")
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -47,7 +48,7 @@ class TestPromptBuilder:
def test_includes_search_results(self, memory_with_search_results): def test_includes_search_results(self, memory_with_search_results):
"""Should include search results summary.""" """Should include search results summary."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -58,7 +59,7 @@ class TestPromptBuilder:
def test_includes_search_result_names(self, memory_with_search_results): def test_includes_search_result_names(self, memory_with_search_results):
"""Should include search result names.""" """Should include search result names."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -74,7 +75,7 @@ class TestPromptBuilder:
"progress": 50, "progress": 50,
} }
) )
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -89,7 +90,7 @@ class TestPromptBuilder:
[{"index": 1, "label": "Option 1"}, {"index": 2, "label": "Option 2"}], [{"index": 1, "label": "Option 1"}, {"index": 2, "label": "Option 2"}],
{}, {},
) )
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -100,7 +101,7 @@ class TestPromptBuilder:
def test_includes_last_error(self, memory): def test_includes_last_error(self, memory):
"""Should include last error.""" """Should include last error."""
memory.episodic.add_error("find_torrent", "API timeout") memory.episodic.add_error("find_torrent", "API timeout")
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -111,7 +112,7 @@ class TestPromptBuilder:
def test_includes_workflow(self, memory): def test_includes_workflow(self, memory):
"""Should include current workflow.""" """Should include current workflow."""
memory.stm.start_workflow("download", {"title": "Inception"}) memory.stm.start_workflow("download", {"title": "Inception"})
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -122,7 +123,7 @@ class TestPromptBuilder:
def test_includes_topic(self, memory): def test_includes_topic(self, memory):
"""Should include current topic.""" """Should include current topic."""
memory.stm.set_topic("selecting_torrent") memory.stm.set_topic("selecting_torrent")
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -134,7 +135,7 @@ class TestPromptBuilder:
"""Should include extracted entities.""" """Should include extracted entities."""
memory.stm.set_entity("movie_title", "Inception") memory.stm.set_entity("movie_title", "Inception")
memory.stm.set_entity("year", 2010) memory.stm.set_entity("year", 2010)
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -144,7 +145,7 @@ class TestPromptBuilder:
def test_includes_rules(self, memory): def test_includes_rules(self, memory):
"""Should include important rules.""" """Should include important rules."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -154,7 +155,7 @@ class TestPromptBuilder:
def test_includes_examples(self, memory): def test_includes_examples(self, memory):
"""Should include usage examples.""" """Should include usage examples."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -164,7 +165,7 @@ class TestPromptBuilder:
def test_empty_context(self, memory): def test_empty_context(self, memory):
"""Should handle empty context gracefully.""" """Should handle empty context gracefully."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -179,7 +180,7 @@ class TestPromptBuilder:
results = [{"name": f"Torrent {i}", "seeders": i} for i in range(20)] results = [{"name": f"Torrent {i}", "seeders": i} for i in range(20)]
memory.episodic.store_search_results("test", results) memory.episodic.store_search_results("test", results)
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -198,7 +199,7 @@ class TestFormatToolsDescription:
def test_format_all_tools(self, memory): def test_format_all_tools(self, memory):
"""Should format all tools.""" """Should format all tools."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
desc = builder._format_tools_description() desc = builder._format_tools_description()
@@ -209,7 +210,7 @@ class TestFormatToolsDescription:
def test_includes_parameters(self, memory): def test_includes_parameters(self, memory):
"""Should include parameter schemas.""" """Should include parameter schemas."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
desc = builder._format_tools_description() desc = builder._format_tools_description()
@@ -223,7 +224,7 @@ class TestFormatEpisodicContext:
def test_empty_episodic(self, memory): def test_empty_episodic(self, memory):
"""Should return empty string for empty episodic.""" """Should return empty string for empty episodic."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
context = builder._format_episodic_context(memory) context = builder._format_episodic_context(memory)
@@ -232,7 +233,7 @@ class TestFormatEpisodicContext:
def test_with_search_results(self, memory_with_search_results): def test_with_search_results(self, memory_with_search_results):
"""Should format search results.""" """Should format search results."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
context = builder._format_episodic_context(memory_with_search_results) context = builder._format_episodic_context(memory_with_search_results)
@@ -246,7 +247,7 @@ class TestFormatEpisodicContext:
memory.episodic.add_active_download({"task_id": "1", "name": "Download"}) memory.episodic.add_active_download({"task_id": "1", "name": "Download"})
memory.episodic.add_error("action", "error") memory.episodic.add_error("action", "error")
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
context = builder._format_episodic_context(memory) context = builder._format_episodic_context(memory)
@@ -261,7 +262,7 @@ class TestFormatStmContext:
def test_empty_stm(self, memory): def test_empty_stm(self, memory):
"""Should return language info even for empty STM.""" """Should return language info even for empty STM."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
context = builder._format_stm_context(memory) context = builder._format_stm_context(memory)
@@ -273,7 +274,7 @@ class TestFormatStmContext:
"""Should format workflow.""" """Should format workflow."""
memory.stm.start_workflow("download", {"title": "Test"}) memory.stm.start_workflow("download", {"title": "Test"})
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
context = builder._format_stm_context(memory) context = builder._format_stm_context(memory)
@@ -287,7 +288,7 @@ class TestFormatStmContext:
memory.stm.set_topic("searching") memory.stm.set_topic("searching")
memory.stm.set_entity("key", "value") memory.stm.set_entity("key", "value")
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
context = builder._format_stm_context(memory) context = builder._format_stm_context(memory)

View File

@@ -2,6 +2,7 @@
from alfred.agent.prompts import PromptBuilder from alfred.agent.prompts import PromptBuilder
from alfred.agent.registry import make_tools from alfred.agent.registry import make_tools
from alfred.settings import settings
class TestPromptBuilderToolsInjection: class TestPromptBuilderToolsInjection:
@@ -9,7 +10,7 @@ class TestPromptBuilderToolsInjection:
def test_system_prompt_includes_all_tools(self, memory): def test_system_prompt_includes_all_tools(self, memory):
"""CRITICAL: Verify all tools are mentioned in system prompt.""" """CRITICAL: Verify all tools are mentioned in system prompt."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -21,7 +22,7 @@ class TestPromptBuilderToolsInjection:
def test_tools_spec_contains_all_registered_tools(self, memory): def test_tools_spec_contains_all_registered_tools(self, memory):
"""CRITICAL: Verify build_tools_spec() returns all tools.""" """CRITICAL: Verify build_tools_spec() returns all tools."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
specs = builder.build_tools_spec() specs = builder.build_tools_spec()
@@ -32,7 +33,7 @@ class TestPromptBuilderToolsInjection:
def test_tools_spec_is_not_empty(self, memory): def test_tools_spec_is_not_empty(self, memory):
"""CRITICAL: Verify tools spec is never empty.""" """CRITICAL: Verify tools spec is never empty."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
specs = builder.build_tools_spec() specs = builder.build_tools_spec()
@@ -40,7 +41,7 @@ class TestPromptBuilderToolsInjection:
def test_tools_spec_format_matches_openai(self, memory): def test_tools_spec_format_matches_openai(self, memory):
"""CRITICAL: Verify tools spec format is OpenAI-compatible.""" """CRITICAL: Verify tools spec format is OpenAI-compatible."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
specs = builder.build_tools_spec() specs = builder.build_tools_spec()
@@ -58,7 +59,7 @@ class TestPromptBuilderMemoryContext:
def test_prompt_includes_current_topic(self, memory): def test_prompt_includes_current_topic(self, memory):
"""Verify current topic is included in prompt.""" """Verify current topic is included in prompt."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
memory.stm.set_topic("test_topic") memory.stm.set_topic("test_topic")
@@ -68,7 +69,7 @@ class TestPromptBuilderMemoryContext:
def test_prompt_includes_extracted_entities(self, memory): def test_prompt_includes_extracted_entities(self, memory):
"""Verify extracted entities are included in prompt.""" """Verify extracted entities are included in prompt."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
memory.stm.set_entity("test_key", "test_value") memory.stm.set_entity("test_key", "test_value")
@@ -78,7 +79,7 @@ class TestPromptBuilderMemoryContext:
def test_prompt_includes_search_results(self, memory_with_search_results): def test_prompt_includes_search_results(self, memory_with_search_results):
"""Verify search results are included in prompt.""" """Verify search results are included in prompt."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -88,7 +89,7 @@ class TestPromptBuilderMemoryContext:
def test_prompt_includes_active_downloads(self, memory): def test_prompt_includes_active_downloads(self, memory):
"""Verify active downloads are included in prompt.""" """Verify active downloads are included in prompt."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
memory.episodic.add_active_download( memory.episodic.add_active_download(
@@ -102,7 +103,7 @@ class TestPromptBuilderMemoryContext:
def test_prompt_includes_recent_errors(self, memory): def test_prompt_includes_recent_errors(self, memory):
"""Verify recent errors are included in prompt.""" """Verify recent errors are included in prompt."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
memory.episodic.add_error("test_action", "test error message") memory.episodic.add_error("test_action", "test error message")
@@ -113,7 +114,7 @@ class TestPromptBuilderMemoryContext:
def test_prompt_includes_configuration(self, memory): def test_prompt_includes_configuration(self, memory):
"""Verify configuration is included in prompt.""" """Verify configuration is included in prompt."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
memory.ltm.set_config("download_folder", "/test/downloads") memory.ltm.set_config("download_folder", "/test/downloads")
@@ -124,7 +125,7 @@ class TestPromptBuilderMemoryContext:
def test_prompt_includes_language(self, memory): def test_prompt_includes_language(self, memory):
"""Verify language is included in prompt.""" """Verify language is included in prompt."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
memory.stm.set_language("fr") memory.stm.set_language("fr")
@@ -139,7 +140,7 @@ class TestPromptBuilderStructure:
def test_system_prompt_is_not_empty(self, memory): def test_system_prompt_is_not_empty(self, memory):
"""Verify system prompt is never empty.""" """Verify system prompt is never empty."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -148,7 +149,7 @@ class TestPromptBuilderStructure:
def test_system_prompt_includes_base_instruction(self, memory): def test_system_prompt_includes_base_instruction(self, memory):
"""Verify system prompt includes base instruction.""" """Verify system prompt includes base instruction."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -156,7 +157,7 @@ class TestPromptBuilderStructure:
def test_system_prompt_includes_rules(self, memory): def test_system_prompt_includes_rules(self, memory):
"""Verify system prompt includes important rules.""" """Verify system prompt includes important rules."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -164,7 +165,7 @@ class TestPromptBuilderStructure:
def test_system_prompt_includes_examples(self, memory): def test_system_prompt_includes_examples(self, memory):
"""Verify system prompt includes examples.""" """Verify system prompt includes examples."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -172,7 +173,7 @@ class TestPromptBuilderStructure:
def test_tools_description_format(self, memory): def test_tools_description_format(self, memory):
"""Verify tools are properly formatted in description.""" """Verify tools are properly formatted in description."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
description = builder._format_tools_description() description = builder._format_tools_description()
@@ -185,7 +186,7 @@ class TestPromptBuilderStructure:
def test_episodic_context_format(self, memory_with_search_results): def test_episodic_context_format(self, memory_with_search_results):
"""Verify episodic context is properly formatted.""" """Verify episodic context is properly formatted."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
context = builder._format_episodic_context(memory_with_search_results) context = builder._format_episodic_context(memory_with_search_results)
@@ -195,7 +196,7 @@ class TestPromptBuilderStructure:
def test_stm_context_format(self, memory): def test_stm_context_format(self, memory):
"""Verify STM context is properly formatted.""" """Verify STM context is properly formatted."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
memory.stm.set_topic("test_topic") memory.stm.set_topic("test_topic")
@@ -208,7 +209,7 @@ class TestPromptBuilderStructure:
def test_config_context_format(self, memory): def test_config_context_format(self, memory):
"""Verify config context is properly formatted.""" """Verify config context is properly formatted."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
memory.ltm.set_config("test_key", "test_value") memory.ltm.set_config("test_key", "test_value")
@@ -224,7 +225,7 @@ class TestPromptBuilderEdgeCases:
def test_prompt_with_no_memory_context(self, memory): def test_prompt_with_no_memory_context(self, memory):
"""Verify prompt works with empty memory.""" """Verify prompt works with empty memory."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
# Memory is empty # Memory is empty
@@ -254,7 +255,7 @@ class TestPromptBuilderEdgeCases:
def test_prompt_with_unicode_in_memory(self, memory): def test_prompt_with_unicode_in_memory(self, memory):
"""Verify prompt handles unicode in memory.""" """Verify prompt handles unicode in memory."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
memory.stm.set_entity("movie", "Amélie 🎬") memory.stm.set_entity("movie", "Amélie 🎬")
@@ -266,7 +267,7 @@ class TestPromptBuilderEdgeCases:
def test_prompt_with_long_search_results(self, memory): def test_prompt_with_long_search_results(self, memory):
"""Verify prompt handles many search results.""" """Verify prompt handles many search results."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
# Add many results # Add many results

View File

@@ -2,6 +2,7 @@
from alfred.agent.prompts import PromptBuilder from alfred.agent.prompts import PromptBuilder
from alfred.agent.registry import make_tools from alfred.agent.registry import make_tools
from alfred.settings import settings
class TestPromptBuilderEdgeCases: class TestPromptBuilderEdgeCases:
@@ -9,7 +10,7 @@ class TestPromptBuilderEdgeCases:
def test_prompt_with_empty_memory(self, memory): def test_prompt_with_empty_memory(self, memory):
"""Should build prompt with completely empty memory.""" """Should build prompt with completely empty memory."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -22,7 +23,7 @@ class TestPromptBuilderEdgeCases:
memory.ltm.set_config("folder_日本語", "/path/to/日本語") memory.ltm.set_config("folder_日本語", "/path/to/日本語")
memory.ltm.set_config("emoji_folder", "/path/🎬") memory.ltm.set_config("emoji_folder", "/path/🎬")
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -35,7 +36,7 @@ class TestPromptBuilderEdgeCases:
long_path = "/very/long/path/" + "x" * 1000 long_path = "/very/long/path/" + "x" * 1000
memory.ltm.set_config("download_folder", long_path) memory.ltm.set_config("download_folder", long_path)
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -47,7 +48,7 @@ class TestPromptBuilderEdgeCases:
"""Should escape special characters in config.""" """Should escape special characters in config."""
memory.ltm.set_config("path", '/path/with "quotes" and \\backslash') memory.ltm.set_config("path", '/path/with "quotes" and \\backslash')
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -60,7 +61,7 @@ class TestPromptBuilderEdgeCases:
results = [{"name": f"Torrent {i}", "seeders": i} for i in range(50)] results = [{"name": f"Torrent {i}", "seeders": i} for i in range(50)]
memory.episodic.store_search_results("test query", results) memory.episodic.store_search_results("test query", results)
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -79,7 +80,7 @@ class TestPromptBuilderEdgeCases:
] ]
memory.episodic.store_search_results("test", results) memory.episodic.store_search_results("test", results)
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -98,7 +99,7 @@ class TestPromptBuilderEdgeCases:
} }
) )
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -112,7 +113,7 @@ class TestPromptBuilderEdgeCases:
for i in range(10): for i in range(10):
memory.episodic.add_error(f"action_{i}", f"Error {i}") memory.episodic.add_error(f"action_{i}", f"Error {i}")
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -125,7 +126,7 @@ class TestPromptBuilderEdgeCases:
options = [{"index": i, "label": f"Option {i}"} for i in range(20)] options = [{"index": i, "label": f"Option {i}"} for i in range(20)]
memory.episodic.set_pending_question("Choose one:", options, {}) memory.episodic.set_pending_question("Choose one:", options, {})
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -146,7 +147,7 @@ class TestPromptBuilderEdgeCases:
) )
memory.stm.update_workflow_stage("searching_torrents") memory.stm.update_workflow_stage("searching_torrents")
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -160,7 +161,7 @@ class TestPromptBuilderEdgeCases:
for i in range(50): for i in range(50):
memory.stm.set_entity(f"entity_{i}", f"value_{i}") memory.stm.set_entity(f"entity_{i}", f"value_{i}")
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -174,7 +175,7 @@ class TestPromptBuilderEdgeCases:
memory.stm.set_entity("zero", 0) memory.stm.set_entity("zero", 0)
memory.stm.set_entity("false", False) memory.stm.set_entity("false", False)
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -187,7 +188,7 @@ class TestPromptBuilderEdgeCases:
memory.episodic.add_background_event("download_complete", {"name": "Movie.mkv"}) memory.episodic.add_background_event("download_complete", {"name": "Movie.mkv"})
memory.episodic.add_background_event("new_files", {"count": 5}) memory.episodic.add_background_event("new_files", {"count": 5})
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -223,7 +224,7 @@ class TestPromptBuilderEdgeCases:
# Events # Events
memory.episodic.add_background_event("event", {}) memory.episodic.add_background_event("event", {})
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -244,7 +245,7 @@ class TestPromptBuilderEdgeCases:
memory.ltm.set_config("key", {"nested": [1, 2, 3]}) memory.ltm.set_config("key", {"nested": [1, 2, 3]})
memory.stm.set_entity("complex", {"a": {"b": {"c": "d"}}}) memory.stm.set_entity("complex", {"a": {"b": {"c": "d"}}})
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
prompt = builder.build_system_prompt() prompt = builder.build_system_prompt()
@@ -306,7 +307,7 @@ class TestFormatEpisodicContextEdgeCases:
"""Should handle empty search query.""" """Should handle empty search query."""
memory.episodic.store_search_results("", [{"name": "Result"}]) memory.episodic.store_search_results("", [{"name": "Result"}])
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
context = builder._format_episodic_context(memory) context = builder._format_episodic_context(memory)
@@ -324,7 +325,7 @@ class TestFormatEpisodicContextEdgeCases:
], ],
) )
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
context = builder._format_episodic_context(memory) context = builder._format_episodic_context(memory)
@@ -336,7 +337,7 @@ class TestFormatEpisodicContextEdgeCases:
"""Should handle download without progress.""" """Should handle download without progress."""
memory.episodic.add_active_download({"task_id": "1", "name": "Test"}) memory.episodic.add_active_download({"task_id": "1", "name": "Test"})
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
context = builder._format_episodic_context(memory) context = builder._format_episodic_context(memory)
@@ -355,7 +356,7 @@ class TestFormatStmContextEdgeCases:
"stage": "started", "stage": "started",
} }
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
context = builder._format_stm_context(memory) context = builder._format_stm_context(memory)
@@ -366,7 +367,7 @@ class TestFormatStmContextEdgeCases:
"""Should handle workflow with None target.""" """Should handle workflow with None target."""
memory.stm.start_workflow("download", None) memory.stm.start_workflow("download", None)
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
try: try:
@@ -380,7 +381,7 @@ class TestFormatStmContextEdgeCases:
"""Should handle empty topic.""" """Should handle empty topic."""
memory.stm.set_topic("") memory.stm.set_topic("")
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
context = builder._format_stm_context(memory) context = builder._format_stm_context(memory)
@@ -392,7 +393,7 @@ class TestFormatStmContextEdgeCases:
"""Should handle entities containing JSON strings.""" """Should handle entities containing JSON strings."""
memory.stm.set_entity("json_string", '{"key": "value"}') memory.stm.set_entity("json_string", '{"key": "value"}')
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
context = builder._format_stm_context(memory) context = builder._format_stm_context(memory)

View File

@@ -6,6 +6,7 @@ import pytest
from alfred.agent.prompts import PromptBuilder from alfred.agent.prompts import PromptBuilder
from alfred.agent.registry import Tool, _create_tool_from_function, make_tools from alfred.agent.registry import Tool, _create_tool_from_function, make_tools
from alfred.settings import settings
class TestToolSpecFormat: class TestToolSpecFormat:
@@ -13,7 +14,7 @@ class TestToolSpecFormat:
def test_tool_spec_format_is_openai_compatible(self): def test_tool_spec_format_is_openai_compatible(self):
"""CRITICAL: Verify tool specs are OpenAI-compatible.""" """CRITICAL: Verify tool specs are OpenAI-compatible."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
specs = builder.build_tools_spec() specs = builder.build_tools_spec()
@@ -62,7 +63,7 @@ class TestToolSpecFormat:
def test_all_registered_tools_are_callable(self): def test_all_registered_tools_are_callable(self):
"""CRITICAL: Verify all registered tools are actually callable.""" """CRITICAL: Verify all registered tools are actually callable."""
tools = make_tools() tools = make_tools(settings)
assert len(tools) > 0, "No tools registered" assert len(tools) > 0, "No tools registered"
@@ -78,7 +79,7 @@ class TestToolSpecFormat:
def test_tools_spec_contains_all_registered_tools(self): def test_tools_spec_contains_all_registered_tools(self):
"""CRITICAL: Verify build_tools_spec() returns all registered tools.""" """CRITICAL: Verify build_tools_spec() returns all registered tools."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
specs = builder.build_tools_spec() specs = builder.build_tools_spec()
@@ -119,7 +120,7 @@ class TestToolSpecFormat:
def test_tool_parameters_have_descriptions(self): def test_tool_parameters_have_descriptions(self):
"""Verify all tool parameters have descriptions.""" """Verify all tool parameters have descriptions."""
tools = make_tools() tools = make_tools(settings)
builder = PromptBuilder(tools) builder = PromptBuilder(tools)
specs = builder.build_tools_spec() specs = builder.build_tools_spec()
@@ -150,28 +151,28 @@ class TestToolRegistry:
def test_make_tools_returns_dict(self): def test_make_tools_returns_dict(self):
"""Verify make_tools returns a dictionary.""" """Verify make_tools returns a dictionary."""
tools = make_tools() tools = make_tools(settings)
assert isinstance(tools, dict) assert isinstance(tools, dict)
assert len(tools) > 0 assert len(tools) > 0
def test_all_tools_have_unique_names(self): def test_all_tools_have_unique_names(self):
"""Verify all tool names are unique.""" """Verify all tool names are unique."""
tools = make_tools() tools = make_tools(settings)
names = [tool.name for tool in tools.values()] names = [tool.name for tool in tools.values()]
assert len(names) == len(set(names)), "Duplicate tool names found" assert len(names) == len(set(names)), "Duplicate tool names found"
def test_tool_names_match_dict_keys(self): def test_tool_names_match_dict_keys(self):
"""Verify tool names match their dictionary keys.""" """Verify tool names match their dictionary keys."""
tools = make_tools() tools = make_tools(settings)
for key, tool in tools.items(): for key, tool in tools.items():
assert key == tool.name, f"Key {key} doesn't match tool name {tool.name}" assert key == tool.name, f"Key {key} doesn't match tool name {tool.name}"
def test_expected_tools_are_registered(self): def test_expected_tools_are_registered(self):
"""Verify all expected tools are registered.""" """Verify all expected tools are registered."""
tools = make_tools() tools = make_tools(settings)
expected_tools = [ expected_tools = [
"set_path_for_folder", "set_path_for_folder",
@@ -189,7 +190,7 @@ class TestToolRegistry:
def test_tool_functions_are_valid(self): def test_tool_functions_are_valid(self):
"""Verify all tool functions are properly structured.""" """Verify all tool functions are properly structured."""
tools = make_tools() tools = make_tools(settings)
# Verify structure without calling functions # Verify structure without calling functions
# (calling would require full setup with memory, clients, etc.) # (calling would require full setup with memory, clients, etc.)

View File

@@ -3,6 +3,7 @@
import pytest import pytest
from alfred.agent.registry import Tool, make_tools from alfred.agent.registry import Tool, make_tools
from alfred.settings import settings
class TestToolEdgeCases: class TestToolEdgeCases:
@@ -140,13 +141,13 @@ class TestMakeToolsEdgeCases:
def test_make_tools_returns_dict(self, memory): def test_make_tools_returns_dict(self, memory):
"""Should return dictionary of tools.""" """Should return dictionary of tools."""
tools = make_tools() tools = make_tools(settings)
assert isinstance(tools, dict) assert isinstance(tools, dict)
def test_make_tools_all_tools_have_required_fields(self, memory): def test_make_tools_all_tools_have_required_fields(self, memory):
"""Should have all required fields for each tool.""" """Should have all required fields for each tool."""
tools = make_tools() tools = make_tools(settings)
for name, tool in tools.items(): for name, tool in tools.items():
assert tool.name == name assert tool.name == name
@@ -157,14 +158,14 @@ class TestMakeToolsEdgeCases:
def test_make_tools_unique_names(self, memory): def test_make_tools_unique_names(self, memory):
"""Should have unique tool names.""" """Should have unique tool names."""
tools = make_tools() tools = make_tools(settings)
names = list(tools.keys()) names = list(tools.keys())
assert len(names) == len(set(names)) assert len(names) == len(set(names))
def test_make_tools_valid_parameter_schemas(self, memory): def test_make_tools_valid_parameter_schemas(self, memory):
"""Should have valid JSON Schema for parameters.""" """Should have valid JSON Schema for parameters."""
tools = make_tools() tools = make_tools(settings)
for tool in tools.values(): for tool in tools.values():
params = tool.parameters params = tool.parameters
@@ -176,7 +177,7 @@ class TestMakeToolsEdgeCases:
def test_make_tools_required_params_in_properties(self, memory): def test_make_tools_required_params_in_properties(self, memory):
"""Should have required params defined in properties.""" """Should have required params defined in properties."""
tools = make_tools() tools = make_tools(settings)
for tool in tools.values(): for tool in tools.values():
params = tool.parameters params = tool.parameters
@@ -188,21 +189,21 @@ class TestMakeToolsEdgeCases:
def test_make_tools_descriptions_not_empty(self, memory): def test_make_tools_descriptions_not_empty(self, memory):
"""Should have non-empty descriptions.""" """Should have non-empty descriptions."""
tools = make_tools() tools = make_tools(settings)
for tool in tools.values(): for tool in tools.values():
assert tool.description.strip() != "" assert tool.description.strip() != ""
def test_make_tools_funcs_callable(self, memory): def test_make_tools_funcs_callable(self, memory):
"""Should have callable functions.""" """Should have callable functions."""
tools = make_tools() tools = make_tools(settings)
for tool in tools.values(): for tool in tools.values():
assert callable(tool.func) assert callable(tool.func)
def test_make_tools_expected_tools_present(self, memory): def test_make_tools_expected_tools_present(self, memory):
"""Should have expected tools.""" """Should have expected tools."""
tools = make_tools() tools = make_tools(settings)
expected = [ expected = [
"set_path_for_folder", "set_path_for_folder",
@@ -220,14 +221,14 @@ class TestMakeToolsEdgeCases:
def test_make_tools_idempotent(self, memory): def test_make_tools_idempotent(self, memory):
"""Should return same tools on multiple calls.""" """Should return same tools on multiple calls."""
tools1 = make_tools() tools1 = make_tools(settings)
tools2 = make_tools() tools2 = make_tools(settings)
assert set(tools1.keys()) == set(tools2.keys()) assert set(tools1.keys()) == set(tools2.keys())
def test_make_tools_parameter_types(self, memory): def test_make_tools_parameter_types(self, memory):
"""Should have valid parameter types.""" """Should have valid parameter types."""
tools = make_tools() tools = make_tools(settings)
valid_types = ["string", "integer", "number", "boolean", "array", "object"] valid_types = ["string", "integer", "number", "boolean", "array", "object"]
@@ -241,7 +242,7 @@ class TestMakeToolsEdgeCases:
def test_make_tools_enum_values(self, memory): def test_make_tools_enum_values(self, memory):
"""Should have valid enum values.""" """Should have valid enum values."""
tools = make_tools() tools = make_tools(settings)
for tool in tools.values(): for tool in tools.values():
if "properties" in tool.parameters: if "properties" in tool.parameters:
@@ -256,7 +257,7 @@ class TestToolExecution:
def test_tool_returns_dict(self, memory, real_folder): def test_tool_returns_dict(self, memory, real_folder):
"""Should return dict from tool execution.""" """Should return dict from tool execution."""
tools = make_tools() tools = make_tools(settings)
memory.ltm.set_config("download_folder", str(real_folder["downloads"])) memory.ltm.set_config("download_folder", str(real_folder["downloads"]))
result = tools["list_folder"].func(folder_type="download") result = tools["list_folder"].func(folder_type="download")
@@ -265,7 +266,7 @@ class TestToolExecution:
def test_tool_returns_status(self, memory, real_folder): def test_tool_returns_status(self, memory, real_folder):
"""Should return status in result.""" """Should return status in result."""
tools = make_tools() tools = make_tools(settings)
memory.ltm.set_config("download_folder", str(real_folder["downloads"])) memory.ltm.set_config("download_folder", str(real_folder["downloads"]))
result = tools["list_folder"].func(folder_type="download") result = tools["list_folder"].func(folder_type="download")
@@ -274,14 +275,14 @@ class TestToolExecution:
def test_tool_handles_missing_args(self, memory): def test_tool_handles_missing_args(self, memory):
"""Should handle missing required arguments.""" """Should handle missing required arguments."""
tools = make_tools() tools = make_tools(settings)
with pytest.raises(TypeError): with pytest.raises(TypeError):
tools["set_path_for_folder"].func() # Missing required args tools["set_path_for_folder"].func() # Missing required args
def test_tool_handles_wrong_type_args(self, memory): def test_tool_handles_wrong_type_args(self, memory):
"""Should handle wrong type arguments.""" """Should handle wrong type arguments."""
tools = make_tools() tools = make_tools(settings)
# Pass wrong type - should either work or raise # Pass wrong type - should either work or raise
try: try:
@@ -293,7 +294,7 @@ class TestToolExecution:
def test_tool_handles_extra_args(self, memory, real_folder): def test_tool_handles_extra_args(self, memory, real_folder):
"""Should handle extra arguments.""" """Should handle extra arguments."""
tools = make_tools() tools = make_tools(settings)
memory.ltm.set_config("download_folder", str(real_folder["downloads"])) memory.ltm.set_config("download_folder", str(real_folder["downloads"]))
# Extra args should raise TypeError # Extra args should raise TypeError