25 Commits

Author SHA1 Message Date
01a00a12af chore: bump version 0.1.6 → 0.1.7
Some checks failed
CI/CD Awesome Pipeline / Test (push) Successful in 5m54s
CI/CD Awesome Pipeline / Build & Push to Registry (push) Failing after 1m9s
2026-01-01 05:04:37 +01:00
504d0162bb infra: added proper settings handling & orchestration and app bootstrap (.env)
Reviewed-on: https://gitea.iswearihadsomethingforthis.net/francwa/alfred/pulls/18
2026-01-01 03:55:35 +00:00
cda23d074f feat: added current alfred version from pyproject.py to healthcheck 2026-01-01 04:51:45 +01:00
0357108077 infra: added orchestration and app bootstrap (.env) 2026-01-01 04:48:32 +01:00
ab1df3dd0f fix: forgot to lint/format 2026-01-01 04:48:32 +01:00
c50091f6bf feat: added proper settings handling 2026-01-01 04:48:32 +01:00
8b406370f1 chore: updated some dependencies 2026-01-01 03:08:22 +01:00
c56bf2b92c feat: added Claude.AI to available providers 2025-12-29 02:13:11 +01:00
b1507db4d0 fix: fixed test image not being built before running tests
All checks were successful
Renovate Bot / renovate (push) Successful in 1m26s
2025-12-28 13:24:03 +01:00
3074962314 infra: proper librechat integration & improved configuration handling
Reviewed-on: https://gitea.iswearihadsomethingforthis.net/francwa/alfred/pulls/11
2025-12-28 11:56:53 +00:00
84799879bb fix: added data dir to .dockerignore 2025-12-28 12:02:44 +01:00
1052c1b619 infra: added modules version to .env.example 2025-12-28 06:07:34 +01:00
9958b8e848 feat: created placeholders for various API models 2025-12-28 06:06:28 +01:00
b15161dad7 fix: provided OpenAI API key and fixed docker-compose configuration to enable RAG service 2025-12-28 06:04:08 +01:00
52f025ae32 chore: ran linter & formatter again 2025-12-27 20:07:48 +01:00
2cfe7a035b infra: moved makefile logic to python and added .env setup 2025-12-27 19:51:40 +01:00
2441c2dc29 infra: rewrote docker-compose for proper integration of librechat 2025-12-27 19:49:43 +01:00
261a1f3918 fix: fixed real data directory being used in tests 2025-12-27 19:48:13 +01:00
253903a1e5 infra: made pyproject the SSOT 2025-12-27 19:46:27 +01:00
20a113e335 infra: updated .env.example 2025-12-27 19:43:31 +01:00
fed83e7d79 chore: bumped fastapi version 2025-12-27 19:42:16 +01:00
3880a4ec49 chore: ran linter and formatter 2025-12-27 19:41:22 +01:00
6195abbaa5 chore: fixed imports and tests configuration 2025-12-27 19:39:36 +01:00
b132554631 infra: updated .gitignore 2025-12-27 02:30:14 +01:00
561796cec1 infra: removed CORE_DIR variable from Makefile (not relevant anymore) 2025-12-24 11:28:28 +01:00
61 changed files with 1759 additions and 1150 deletions

View File

@@ -1,5 +1,5 @@
[tool.bumpversion]
current_version = "0.1.6"
current_version = "0.1.7"
parse = "(?P<major>\\d+)\\.(?P<minor>\\d+)\\.(?P<patch>\\d+)"
serialize = ["{major}.{minor}.{patch}"]
search = "{current_version}"

View File

@@ -22,8 +22,7 @@ venv
.venv
env
.env
.env.*
.env-
# IDE
.vscode
.idea
@@ -41,11 +40,8 @@ docs/
*.md
!README.md
# Tests
tests/
pytest.ini
# Data (will be mounted as volumes)
# Data
data/
memory_data/
logs/
*.log

View File

@@ -1,23 +1,93 @@
# API Keys
# Deepseek API Key (for LLM in alfred)
DEEPSEEK_API_KEY=
MAX_HISTORY_MESSAGES=10
MAX_TOOL_ITERATIONS=10
REQUEST_TIMEOUT=30
# Alfred Configuration
# LLM Provider (deepseek or ollama)
LLM_PROVIDER=deepseek
# LLM Settings
LLM_TEMPERATURE=0.2
# Memory storage directory (inside container)
MEMORY_STORAGE_DIR=/data/memory
# Persistence
DATA_STORAGE_DIR=data
# External Services (Optional)
# TMDB API Key (for movie metadata)
# Network configuration
HOST=0.0.0.0
PORT=3080
# Build informations (Synced with pyproject.toml via bootstrap)
ALFRED_VERSION=
IMAGE_NAME=
LIBRECHAT_VERSION=
PYTHON_VERSION=
PYTHON_VERSION_SHORT=
RAG_VERSION=
RUNNER=
SERVICE_NAME=
# --- SECURITY KEYS (CRITICAL) ---
# These are used for session tokens and encrypting sensitive data in MongoDB.
# If you lose these, you lose access to encrypted stored credentials.
JWT_SECRET=
JWT_REFRESH_SECRET=
CREDS_KEY=
CREDS_IV=
# --- DATABASES (AUTO-SECURED) ---
# Alfred uses MongoDB for application state and PostgreSQL for Vector RAG.
# Passwords will be generated as 24-character secure tokens if left blank.
# MongoDB (Application Data)
MONGO_URI=
MONGO_HOST=mongodb
MONGO_PORT=27017
MONGO_USER=alfred
MONGO_PASSWORD=
MONGO_DB_NAME=alfred
# PostgreSQL (Vector Database / RAG)
POSTGRES_URI=
POSTGRES_HOST=vectordb
POSTGRES_PORT=5432
POSTGRES_USER=alfred
POSTGRES_PASSWORD=
POSTGRES_DB_NAME=alfred
# --- EXTERNAL SERVICES ---
# Media Metadata (Required)
# Get your key at https://www.themoviedb.org/
TMDB_API_KEY=
TMDB_BASE_URL=https://api.themoviedb.org/3
# qBittorrent Configuration
QBITTORRENT_URL=
# qBittorrent integration
QBITTORRENT_URL=http://qbittorrent:16140
QBITTORRENT_USERNAME=admin
QBITTORRENT_PASSWORD=adminadmin
QBITTORRENT_PASSWORD=
QBITTORRENT_PORT=16140
# Debug Options
DEBUG_LOGGING=false
DEBUG_CONSOLE=false
# Meilisearch
MEILI_ENABLED=FALSE
MEILI_NO_ANALYTICS=TRUE
MEILI_HOST=http://meilisearch:7700
MEILI_MASTER_KEY=
# --- LLM CONFIGURATION ---
# Providers: 'local', 'openai', 'anthropic', 'deepseek', 'google', 'kimi'
DEFAULT_LLM_PROVIDER=local
# Local LLM (Ollama)
OLLAMA_BASE_URL=http://ollama:11434
OLLAMA_MODEL=llama3.3:latest
# --- API KEYS (OPTIONAL) ---
# Fill only the ones you intend to use.
ANTHROPIC_API_KEY=
DEEPSEEK_API_KEY=
GOOGLE_API_KEY=
KIMI_API_KEY=
OPENAI_API_KEY=
# --- RAG ENGINE ---
# Enable/Disable the Retrieval Augmented Generation system
RAG_ENABLED=TRUE
RAG_API_URL=http://rag_api:8000
RAG_API_PORT=8000
EMBEDDINGS_PROVIDER=ollama
EMBEDDINGS_MODEL=nomic-embed-text

6
.gitignore vendored
View File

@@ -59,3 +59,9 @@ Thumbs.db
# Backup files
*.backup
# Application data dir
data/*
# Application logs
logs/*

View File

@@ -43,6 +43,9 @@ RUN --mount=type=cache,target=/root/.cache/pip \
uv pip install --system -r pyproject.toml; \
fi
COPY scripts/ ./scripts/
COPY .env.example ./
# ===========================================
# Stage 2: Testing
# ===========================================
@@ -59,12 +62,9 @@ RUN --mount=type=cache,target=/root/.cache/pip \
uv pip install --system -e .[dev]; \
fi
COPY alfred/agent/ ./agent/
COPY alfred/application/ ./application/
COPY alfred/domain/ ./domain/
COPY alfred/infrastructure/ ./infrastructure/
COPY alfred/app.py .
COPY tests/ ./tests/
COPY alfred/ ./alfred
COPY scripts ./scripts
COPY tests/ ./tests
# ===========================================
# Stage 3: Runtime
@@ -73,10 +73,11 @@ FROM python:${PYTHON_VERSION}-slim-bookworm AS runtime
ARG PYTHON_VERSION_SHORT
# TODO: A-t-on encore besoin de toutes les clés ?
ENV LLM_PROVIDER=deepseek \
MEMORY_STORAGE_DIR=/data/memory \
PYTHONDONTWRITEBYTECODE=1 \
PYTHONPATH=/home/appuser/app \
PYTHONPATH=/home/appuser \
PYTHONUNBUFFERED=1
# Install runtime dependencies (needs root)
@@ -89,28 +90,27 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
RUN useradd -m -u 1000 -s /bin/bash appuser
# Create data directories (needs root for /data)
RUN mkdir -p /data/memory /data/logs \
&& chown -R appuser:appuser /data
RUN mkdir -p /data /logs \
&& chown -R appuser:appuser /data /logs
# Switch to non-root user
USER appuser
# Set working directory (owned by appuser)
WORKDIR /home/appuser/app
WORKDIR /home/appuser
# Copy Python packages from builder stage
COPY --from=builder /usr/local/lib/python${PYTHON_VERSION_SHORT}/site-packages /usr/local/lib/python${PYTHON_VERSION_SHORT}/site-packages
COPY --from=builder /usr/local/bin /usr/local/bin
# Copy application code (already owned by appuser)
COPY --chown=appuser:appuser alfred/agent/ ./agent/
COPY --chown=appuser:appuser alfred/application/ ./application/
COPY --chown=appuser:appuser alfred/domain/ ./domain/
COPY --chown=appuser:appuser alfred/infrastructure/ ./infrastructure/
COPY --chown=appuser:appuser alfred/app.py .
COPY --chown=appuser:appuser alfred/ ./alfred
COPY --chown=appuser:appuser scripts/ ./scripts
COPY --chown=appuser:appuser .env.example ./
COPY --chown=appuser:appuser pyproject.toml ./
# Create volumes for persistent data
VOLUME ["/data/memory", "/data/logs"]
VOLUME ["/data", "/logs"]
# Expose port
EXPOSE 8000
@@ -119,5 +119,4 @@ EXPOSE 8000
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
CMD python -c "import requests; requests.get('http://localhost:8000/health', timeout=5).raise_for_status()" || exit 1
# Run the application
CMD ["python", "-m", "uvicorn", "app:app", "--host", "0.0.0.0", "--port", "8000"]
CMD ["python", "-m", "uvicorn", "alfred.app:app", "--host", "0.0.0.0", "--port", "8000"]

385
Makefile
View File

@@ -1,249 +1,140 @@
.POSIX:
.SUFFIXES:
.DEFAULT_GOAL := help
# --- SETTINGS ---
CORE_DIR = alfred
IMAGE_NAME = alfred_media_organizer
# renovate: datasource=docker depName=python
PYTHON_VERSION = $(shell grep "python" $(CORE_DIR)/pyproject.toml | head -n 1 | sed -E 's/.*[=<>^~"]+ *([0-9]+\.[0-9]+(\.[0-9]+)?).*/\1/')
PYTHON_VERSION_SHORT = $(shell echo $(PYTHON_VERSION) | cut -d. -f1,2)
# Change to 'uv' when ready.
RUNNER ?= poetry
SERVICE_NAME = alfred
# --- Load Config from pyproject.toml ---
-include .env.make
export IMAGE_NAME
export PYTHON_VERSION
export PYTHON_VERSION_SHORT
export RUNNER
# --- Profiles management ---
# Usage: make up p=rag,meili
p ?= core
PROFILES_PARAM := COMPOSE_PROFILES=$(p)
# --- ADAPTERS ---
# UV uses "sync", Poetry uses "install". Both install DEV deps by default.
INSTALL_CMD = $(if $(filter uv,$(RUNNER)),sync,install)
# --- Commands ---
DOCKER_COMPOSE := docker compose
DOCKER_BUILD := docker build --no-cache \
--build-arg PYTHON_VERSION=$(PYTHON_VERSION) \
--build-arg PYTHON_VERSION_SHORT=$(PYTHON_VERSION_SHORT) \
--build-arg RUNNER=$(RUNNER)
# --- MACROS ---
ARGS = $(filter-out $@,$(MAKECMDGOALS))
BUMP_CMD = cd $(CORE_DIR) && $(RUNNER) run bump-my-version bump
COMPOSE_CMD = docker-compose
DOCKER_CMD = docker build \
--build-arg PYTHON_VERSION=$(PYTHON_VERSION) \
--build-arg PYTHON_VERSION_SHORT=$(PYTHON_VERSION_SHORT) \
--build-arg RUNNER=$(RUNNER) \
-f $(CORE_DIR)/Dockerfile \
-t $(IMAGE_NAME):latest .
# --- Phony ---
.PHONY: .env up down restart logs ps shell build build-test install update \
install-hooks test coverage lint format clean major minor patch help
RUNNER_ADD = cd $(CORE_DIR) && $(RUNNER) add
RUNNER_HOOKS = cd $(CORE_DIR) && $(RUNNER) run pre-commit install -c ../.pre-commit-config.yaml
RUNNER_INSTALL = cd $(CORE_DIR) && $(RUNNER) $(INSTALL_CMD)
RUNNER_RUN = cd $(CORE_DIR) && $(RUNNER) run
RUNNER_UPDATE = cd $(CORE_DIR) && $(RUNNER) update
# --- Setup ---
.env .env.make:
@echo "Initializing environment..."
@python scripts/bootstrap.py \
&& echo "✓ Environment ready" \
|| (echo "✗ Environment setup failed" && exit 1)
# --- STYLES ---
B = \033[1m
G = \033[32m
T = \033[36m
R = \033[0m
bootstrap: .env .env.make
# --- TARGETS ---
.PHONY: add build build-test check-docker check-runner clean coverage down format help init-dotenv install install-hooks lint logs major minor patch prune ps python-version restart run shell test up update _check_branch _ci-dump-config _ci-run-tests _push_tag
# --- Docker ---
up: .env
@echo "Starting containers with profiles: [$(p)]..."
@$(PROFILES_PARAM) $(DOCKER_COMPOSE) up -d --remove-orphans \
&& echo "✓ Containers started" \
|| (echo "✗ Failed to start containers" && exit 1)
# Catch-all for args
%:
@:
down:
@echo "Stopping containers..."
@$(DOCKER_COMPOSE) down \
&& echo "✓ Containers stopped" \
|| (echo "✗ Failed to stop containers" && exit 1)
add: check-runner
@echo "$(T) Adding dependency ($(RUNNER)): $(ARGS)$(R)"
$(RUNNER_ADD) $(ARGS)
restart:
@echo "Restarting containers..."
@$(PROFILES_PARAM) $(DOCKER_COMPOSE) restart \
&& echo "✓ Containers restarted" \
|| (echo "✗ Failed to restart containers" && exit 1)
build: check-docker
@echo "$(T)🐳 Building Docker image...$(R)"
$(DOCKER_CMD)
@echo "✅ Image $(IMAGE_NAME):latest ready."
logs:
@echo "Following logs (Ctrl+C to exit)..."
@$(PROFILES_PARAM) $(DOCKER_COMPOSE) logs -f
build-test: check-docker
@echo "$(T)🐳 Building test image (with dev deps)...$(R)"
docker build \
--build-arg RUNNER=$(RUNNER) \
--build-arg PYTHON_VERSION=$(PYTHON_VERSION) \
--build-arg PYTHON_VERSION_SHORT=$(PYTHON_VERSION_SHORT) \
-f $(CORE_DIR)/Dockerfile \
--target test \
-t $(IMAGE_NAME):test .
@echo "✅ Test image $(IMAGE_NAME):test ready."
ps:
@echo "Container status:"
@$(PROFILES_PARAM) $(DOCKER_COMPOSE) ps
check-docker:
@command -v docker >/dev/null 2>&1 || { echo "$(R)❌ Docker not installed$(R)"; exit 1; }
@docker info >/dev/null 2>&1 || { echo "$(R)❌ Docker daemon not running$(R)"; exit 1; }
shell:
@echo "Opening shell in $(SERVICE_NAME)..."
@$(DOCKER_COMPOSE) exec $(SERVICE_NAME) /bin/bash
check-runner:
@command -v $(RUNNER) >/dev/null 2>&1 || { echo "$(R)$(RUNNER) not installed$(R)"; exit 1; }
# --- Build ---
build: .env.make
@echo "Building image $(IMAGE_NAME):latest ..."
@$(DOCKER_BUILD) -t $(IMAGE_NAME):latest . \
&& echo "✓ Build complete" \
|| (echo "✗ Build failed" && exit 1)
build-test: .env.make
@echo "Building test image $(IMAGE_NAME):test..."
@$(DOCKER_BUILD) --target test -t $(IMAGE_NAME):test . \
&& echo "✓ Test image built" \
|| (echo "✗ Build failed" && exit 1)
# --- Dependencies ---
install:
@echo "Installing dependencies with $(RUNNER)..."
@$(RUNNER) install \
&& echo "✓ Dependencies installed" \
|| (echo "✗ Installation failed" && exit 1)
install-hooks:
@echo "Installing pre-commit hooks..."
@$(RUNNER) run pre-commit install \
&& echo "✓ Hooks installed" \
|| (echo "✗ Hook installation failed" && exit 1)
update:
@echo "Updating dependencies with $(RUNNER)..."
@$(RUNNER) update \
&& echo "✓ Dependencies updated" \
|| (echo "✗ Update failed" && exit 1)
# --- Quality ---
test:
@echo "Running tests..."
@$(RUNNER) run pytest \
&& echo "✓ Tests passed" \
|| (echo "✗ Tests failed" && exit 1)
coverage:
@echo "Running tests with coverage..."
@$(RUNNER) run pytest --cov=. --cov-report=html --cov-report=term \
&& echo "✓ Coverage report generated" \
|| (echo "✗ Coverage failed" && exit 1)
lint:
@echo "Linting code..."
@$(RUNNER) run ruff check --fix . \
&& echo "✓ Linting complete" \
|| (echo "✗ Linting failed" && exit 1)
format:
@echo "Formatting code..."
@$(RUNNER) run ruff format . && $(RUNNER) run ruff check --fix . \
&& echo "✓ Code formatted" \
|| (echo "✗ Formatting failed" && exit 1)
clean:
@echo "$(T)🧹 Cleaning caches...$(R)"
cd $(CORE_DIR) && rm -rf .ruff_cache __pycache__ .pytest_cache
find $(CORE_DIR) -type d -name "__pycache__" -exec rm -rf {} + 2>/dev/null || true
find $(CORE_DIR) -type d -name ".pytest_cache" -exec rm -rf {} + 2>/dev/null || true
find $(CORE_DIR) -type f -name "*.pyc" -delete 2>/dev/null || true
@echo "✅ Caches cleaned."
@echo "Cleaning build artifacts..."
@rm -rf .ruff_cache __pycache__ .pytest_cache htmlcov .coverage
@find . -type d -name "__pycache__" -exec rm -rf {} + 2>/dev/null || true
@echo "✓ Cleanup complete"
coverage: check-runner
@echo "$(T)📊 Running tests with coverage...$(R)"
$(RUNNER_RUN) pytest --cov=. --cov-report=html --cov-report=term $(ARGS)
@echo "✅ Report generated in htmlcov/"
# --- Versioning ---
major minor patch: _check-main
@echo "Bumping $@ version..."
@$(RUNNER) run bump-my-version bump $@ \
&& echo "✓ Version bumped" \
|| (echo "✗ Version bump failed" && exit 1)
down: check-docker
@echo "$(T)🛑 Stopping containers...$(R)"
$(COMPOSE_CMD) down
@echo "✅ System stopped."
format: check-runner
@echo "$(T)✨ Formatting with Ruff...$(R)"
$(RUNNER_RUN) ruff format .
$(RUNNER_RUN) ruff check --fix .
@echo "✅ Code cleaned."
help:
@echo "$(B)Available commands:$(R)"
@echo ""
@echo "$(G)Setup:$(R)"
@echo " $(T)check-docker $(R) Verify Docker is installed and running."
@echo " $(T)check-runner $(R) Verify package manager ($(RUNNER))."
@echo " $(T)init-dotenv $(R) Create .env from .env.example with generated secrets."
@echo " $(T)install $(R) Install ALL dependencies (Prod + Dev)."
@echo " $(T)install-hooks $(R) Install git pre-commit hooks."
@echo ""
@echo "$(G)Docker:$(R)"
@echo " $(T)build $(R) Build the docker image (production)."
@echo " $(T)build-test $(R) Build the docker image (with dev deps for testing)."
@echo " $(T)down $(R) Stop and remove containers."
@echo " $(T)logs $(R) Follow logs."
@echo " $(T)prune $(R) Clean Docker system."
@echo " $(T)ps $(R) Show container status."
@echo " $(T)restart $(R) Restart all containers."
@echo " $(T)shell $(R) Open shell in container."
@echo " $(T)up $(R) Start the agent."
@echo ""
@echo "$(G)Development:$(R)"
@echo " $(T)add ... $(R) Add dependency (use --group dev or --dev if needed)."
@echo " $(T)clean $(R) Clean caches."
@echo " $(T)coverage $(R) Run tests with coverage."
@echo " $(T)format $(R) Format code (Ruff)."
@echo " $(T)lint $(R) Lint code without fixing."
@echo " $(T)test ... $(R) Run tests (local with $(RUNNER))."
@echo " $(T)update $(R) Update dependencies."
@echo ""
@echo "$(G)Versioning:$(R)"
@echo " $(T)major/minor/patch $(R) Bump version and push tag (triggers CI/CD)."
init-dotenv:
@echo "$(T)🔑 Initializing .env file...$(R)"
@if [ -f .env ]; then \
echo "$(R)⚠️ .env already exists. Skipping.$(R)"; \
exit 0; \
fi
@if [ ! -f .env.example ]; then \
echo "$(R)❌ .env.example not found$(R)"; \
exit 1; \
fi
@if ! command -v openssl >/dev/null 2>&1; then \
echo "$(R)❌ openssl not found. Please install it first.$(R)"; \
exit 1; \
fi
@echo "$(T) → Copying .env.example...$(R)"
@cp .env.example .env
@echo "$(T) → Generating secrets...$(R)"
@sed -i.bak "s|JWT_SECRET=.*|JWT_SECRET=$$(openssl rand -base64 32)|" .env
@sed -i.bak "s|JWT_REFRESH_SECRET=.*|JWT_REFRESH_SECRET=$$(openssl rand -base64 32)|" .env
@sed -i.bak "s|CREDS_KEY=.*|CREDS_KEY=$$(openssl rand -hex 16)|" .env
@sed -i.bak "s|CREDS_IV=.*|CREDS_IV=$$(openssl rand -hex 8)|" .env
@sed -i.bak "s|MEILI_MASTER_KEY=.*|MEILI_MASTER_KEY=$$(openssl rand -base64 32)|" .env
@rm -f .env.bak
@echo "$(G)✅ .env created with generated secrets!$(R)"
@echo "$(T)⚠️ Don't forget to add your API keys:$(R)"
@echo " - OPENAI_API_KEY"
@echo " - DEEPSEEK_API_KEY"
@echo " - TMDB_API_KEY (optional)"
install: check-runner
@echo "$(T)📦 Installing FULL environment ($(RUNNER))...$(R)"
$(RUNNER_INSTALL)
@echo "✅ Environment ready (Prod + Dev)."
install-hooks: check-runner
@echo "$(T)🔧 Installing hooks...$(R)"
$(RUNNER_HOOKS)
@echo "✅ Hooks ready."
lint: check-runner
@echo "$(T)🔍 Linting code...$(R)"
$(RUNNER_RUN) ruff check .
logs: check-docker
@echo "$(T)📋 Following logs...$(R)"
$(COMPOSE_CMD) logs -f
major: _check_branch
@echo "$(T)💥 Bumping major...$(R)"
SKIP=all $(BUMP_CMD) major
@$(MAKE) -s _push_tag
minor: _check_branch
@echo "$(T)✨ Bumping minor...$(R)"
SKIP=all $(BUMP_CMD) minor
@$(MAKE) -s _push_tag
patch: _check_branch
@echo "$(T)🚀 Bumping patch...$(R)"
SKIP=all $(BUMP_CMD) patch
@$(MAKE) -s _push_tag
prune: check-docker
@echo "$(T)🗑️ Pruning Docker resources...$(R)"
docker system prune -af
@echo "✅ Docker cleaned."
ps: check-docker
@echo "$(T)📋 Container status:$(R)"
@$(COMPOSE_CMD) ps
python-version:
@echo "🔍 Reading pyproject.toml..."
@echo "✅ Python version : $(PYTHON_VERSION)"
@echo " Sera utilisé pour : FROM python:$(PYTHON_VERSION)-slim"
restart: check-docker
@echo "$(T)🔄 Restarting containers...$(R)"
$(COMPOSE_CMD) restart
@echo "✅ Containers restarted."
run: check-runner
$(RUNNER_RUN) $(ARGS)
shell: check-docker
@echo "$(T)🐚 Opening shell in $(SERVICE_NAME)...$(R)"
$(COMPOSE_CMD) exec $(SERVICE_NAME) /bin/sh
test: check-runner
@echo "$(T)🧪 Running tests...$(R)"
$(RUNNER_RUN) pytest $(ARGS)
up: check-docker
@echo "$(T)🚀 Starting Agent Media...$(R)"
$(COMPOSE_CMD) up -d
@echo "✅ System is up."
update: check-runner
@echo "$(T)🔄 Updating dependencies...$(R)"
$(RUNNER_UPDATE)
@echo "✅ All packages up to date."
_check_branch:
@curr=$$(git rev-parse --abbrev-ref HEAD); \
if [ "$$curr" != "main" ]; then \
echo "❌ Error: not on the main branch"; exit 1; \
fi
@echo "Pushing tags..."
@git push --tags \
&& echo "✓ Tags pushed" \
|| (echo "✗ Push failed" && exit 1)
# CI/CD helpers
_ci-dump-config:
@echo "image_name=$(IMAGE_NAME)"
@echo "python_version=$(PYTHON_VERSION)"
@@ -251,15 +142,41 @@ _ci-dump-config:
@echo "runner=$(RUNNER)"
@echo "service_name=$(SERVICE_NAME)"
_ci-run-tests: build-test
@echo "$(T)🧪 Running tests in Docker...$(R)"
_ci-run-tests:build-test
@echo "Running tests in Docker..."
docker run --rm \
-e DEEPSEEK_API_KEY \
-e TMDB_API_KEY \
-e QBITTORRENT_URL \
$(IMAGE_NAME):test pytest
@echo " Tests passed."
@echo " Tests passed."
_push_tag:
@echo "$(T)📦 Pushing tag...$(R)"
git push --tags
@echo "✅ Tag pushed. Check CI for build status."
_check-main:
@test "$$(git rev-parse --abbrev-ref HEAD)" = "main" \
|| (echo "✗ ERROR: Not on main branch" && exit 1)
# --- Help ---
help:
@echo "Cleverly Crafted Unawareness - Management Commands"
@echo ""
@echo "Usage: make [target] [p=profile1,profile2]"
@echo ""
@echo "Docker:"
@echo " up Start containers (default profile: core)"
@echo " Example: make up p=rag,meili"
@echo " down Stop all containers"
@echo " restart Restart containers (supports p=...)"
@echo " logs Follow logs (supports p=...)"
@echo " ps Status of containers"
@echo " shell Open bash in the core container"
@echo " build Build the production Docker image"
@echo ""
@echo "Dev & Quality:"
@echo " setup Bootstrap .env and security keys"
@echo " install Install dependencies via $(RUNNER)"
@echo " test Run pytest suite"
@echo " coverage Run tests and generate HTML report"
@echo " lint/format Quality and style checks"
@echo ""
@echo "Release:"
@echo " major|minor|patch Bump version and push tags (main branch only)"

0
alfred/__init__.py Normal file
View File

View File

@@ -1,6 +1,7 @@
"""Agent module for media library management."""
from alfred.settings import settings
from .agent import Agent
from .config import settings
__all__ = ["Agent", "settings"]

View File

@@ -5,9 +5,9 @@ import logging
from collections.abc import AsyncGenerator
from typing import Any
from infrastructure.persistence import get_memory
from alfred.infrastructure.persistence import get_memory
from alfred.settings import settings
from .config import settings
from .prompts import PromptBuilder
from .registry import Tool, make_tools
@@ -21,17 +21,20 @@ class Agent:
Uses OpenAI-compatible tool calling API.
"""
def __init__(self, llm, max_tool_iterations: int = 5):
def __init__(self, settings, llm, max_tool_iterations: int = 5):
"""
Initialize the agent.
Args:
settings: Application settings instance
llm: LLM client with complete() method
max_tool_iterations: Maximum number of tool execution iterations
"""
self.settings = settings
self.llm = llm
self.tools: dict[str, Tool] = make_tools()
self.tools: dict[str, Tool] = make_tools(settings)
self.prompt_builder = PromptBuilder(self.tools)
self.settings = settings
self.max_tool_iterations = max_tool_iterations
def step(self, user_input: str) -> str:
@@ -78,7 +81,7 @@ class Agent:
tools_spec = self.prompt_builder.build_tools_spec()
# Tool execution loop
for _iteration in range(self.max_tool_iterations):
for _iteration in range(self.settings.max_tool_iterations):
# Call LLM with tools
llm_result = self.llm.complete(messages, tools=tools_spec)
@@ -230,7 +233,7 @@ class Agent:
tools_spec = self.prompt_builder.build_tools_spec()
# Tool execution loop
for _iteration in range(self.max_tool_iterations):
for _iteration in range(self.settings.max_tool_iterations):
# Call LLM with tools
llm_result = self.llm.complete(messages, tools=tools_spec)

View File

@@ -1,115 +0,0 @@
"""Configuration management with validation."""
import os
from dataclasses import dataclass, field
from pathlib import Path
from dotenv import load_dotenv
# Load environment variables from .env file
load_dotenv()
class ConfigurationError(Exception):
"""Raised when configuration is invalid."""
pass
@dataclass
class Settings:
"""Application settings loaded from environment variables."""
# LLM Configuration
deepseek_api_key: str = field(
default_factory=lambda: os.getenv("DEEPSEEK_API_KEY", "")
)
deepseek_base_url: str = field(
default_factory=lambda: os.getenv(
"DEEPSEEK_BASE_URL", "https://api.deepseek.com"
)
)
model: str = field(
default_factory=lambda: os.getenv("DEEPSEEK_MODEL", "deepseek-chat")
)
temperature: float = field(
default_factory=lambda: float(os.getenv("TEMPERATURE", "0.2"))
)
# TMDB Configuration
tmdb_api_key: str = field(default_factory=lambda: os.getenv("TMDB_API_KEY", ""))
tmdb_base_url: str = field(
default_factory=lambda: os.getenv(
"TMDB_BASE_URL", "https://api.themoviedb.org/3"
)
)
# Storage Configuration
memory_file: str = field(
default_factory=lambda: os.getenv("MEMORY_FILE", "memory.json")
)
# Security Configuration
max_tool_iterations: int = field(
default_factory=lambda: int(os.getenv("MAX_TOOL_ITERATIONS", "5"))
)
request_timeout: int = field(
default_factory=lambda: int(os.getenv("REQUEST_TIMEOUT", "30"))
)
# Memory Configuration
max_history_messages: int = field(
default_factory=lambda: int(os.getenv("MAX_HISTORY_MESSAGES", "10"))
)
def __post_init__(self):
"""Validate settings after initialization."""
self._validate()
def _validate(self) -> None:
"""Validate configuration values."""
# Validate temperature
if not 0.0 <= self.temperature <= 2.0:
raise ConfigurationError(
f"Temperature must be between 0.0 and 2.0, got {self.temperature}"
)
# Validate max_tool_iterations
if self.max_tool_iterations < 1 or self.max_tool_iterations > 20:
raise ConfigurationError(
f"max_tool_iterations must be between 1 and 20, got {self.max_tool_iterations}"
)
# Validate request_timeout
if self.request_timeout < 1 or self.request_timeout > 300:
raise ConfigurationError(
f"request_timeout must be between 1 and 300 seconds, got {self.request_timeout}"
)
# Validate URLs
if not self.deepseek_base_url.startswith(("http://", "https://")):
raise ConfigurationError(
f"Invalid deepseek_base_url: {self.deepseek_base_url}"
)
if not self.tmdb_base_url.startswith(("http://", "https://")):
raise ConfigurationError(f"Invalid tmdb_base_url: {self.tmdb_base_url}")
# Validate memory file path
memory_path = Path(self.memory_file)
if memory_path.exists() and not memory_path.is_file():
raise ConfigurationError(
f"memory_file exists but is not a file: {self.memory_file}"
)
def is_deepseek_configured(self) -> bool:
"""Check if DeepSeek API is properly configured."""
return bool(self.deepseek_api_key and self.deepseek_base_url)
def is_tmdb_configured(self) -> bool:
"""Check if TMDB API is properly configured."""
return bool(self.tmdb_api_key and self.tmdb_base_url)
# Global settings instance
settings = Settings()

View File

@@ -6,7 +6,8 @@ from typing import Any
import requests
from requests.exceptions import HTTPError, RequestException, Timeout
from ..config import settings
from alfred.settings import Settings, settings
from .exceptions import LLMAPIError, LLMConfigurationError
logger = logging.getLogger(__name__)
@@ -21,6 +22,7 @@ class DeepSeekClient:
base_url: str | None = None,
model: str | None = None,
timeout: int | None = None,
settings: Settings | None = None,
):
"""
Initialize DeepSeek client.
@@ -34,10 +36,10 @@ class DeepSeekClient:
Raises:
LLMConfigurationError: If API key is missing
"""
self.api_key = api_key or settings.deepseek_api_key
self.base_url = base_url or settings.deepseek_base_url
self.model = model or settings.model
self.timeout = timeout or settings.request_timeout
self.api_key = api_key or self.settings.deepseek_api_key
self.base_url = base_url or self.settings.deepseek_base_url
self.model = model or self.settings.deepseek_model
self.timeout = timeout or self.settings.request_timeout
if not self.api_key:
raise LLMConfigurationError(
@@ -94,7 +96,7 @@ class DeepSeekClient:
payload = {
"model": self.model,
"messages": messages,
"temperature": settings.temperature,
"temperature": settings.llm_temperature,
}
# Add tools if provided

View File

@@ -1,13 +1,13 @@
"""Ollama LLM client with robust error handling."""
import logging
import os
from typing import Any
import requests
from requests.exceptions import HTTPError, RequestException, Timeout
from ..config import settings
from alfred.settings import Settings
from .exceptions import LLMAPIError, LLMConfigurationError
logger = logging.getLogger(__name__)
@@ -32,6 +32,7 @@ class OllamaClient:
model: str | None = None,
timeout: int | None = None,
temperature: float | None = None,
settings: Settings | None = None,
):
"""
Initialize Ollama client.
@@ -45,13 +46,11 @@ class OllamaClient:
Raises:
LLMConfigurationError: If configuration is invalid
"""
self.base_url = base_url or os.getenv(
"OLLAMA_BASE_URL", "http://localhost:11434"
)
self.model = model or os.getenv("OLLAMA_MODEL", "llama3.2")
self.base_url = base_url or settings.ollama_base_url
self.model = model or settings.ollama_model
self.timeout = timeout or settings.request_timeout
self.temperature = (
temperature if temperature is not None else settings.temperature
temperature if temperature is not None else settings.llm_temperature
)
if not self.base_url:

View File

@@ -3,7 +3,7 @@
import json
from typing import Any
from infrastructure.persistence import get_memory
from alfred.infrastructure.persistence import get_memory
from .registry import Tool
@@ -52,7 +52,7 @@ class PromptBuilder:
# Show first 5 results
for i, result in enumerate(result_list[:5]):
name = result.get("name", "Unknown")
lines.append(f" {i+1}. {name}")
lines.append(f" {i + 1}. {name}")
if len(result_list) > 5:
lines.append(f" ... and {len(result_list) - 5} more")

View File

@@ -78,10 +78,13 @@ def _create_tool_from_function(func: Callable) -> Tool:
)
def make_tools() -> dict[str, Tool]:
def make_tools(settings) -> dict[str, Tool]:
"""
Create and register all available tools.
Args:
settings: Application settings instance
Returns:
Dictionary mapping tool names to Tool objects
"""

View File

@@ -3,12 +3,12 @@
import logging
from typing import Any
from application.movies import SearchMovieUseCase
from application.torrents import AddTorrentUseCase, SearchTorrentsUseCase
from infrastructure.api.knaben import knaben_client
from infrastructure.api.qbittorrent import qbittorrent_client
from infrastructure.api.tmdb import tmdb_client
from infrastructure.persistence import get_memory
from alfred.application.movies import SearchMovieUseCase
from alfred.application.torrents import AddTorrentUseCase, SearchTorrentsUseCase
from alfred.infrastructure.api.knaben import knaben_client
from alfred.infrastructure.api.qbittorrent import qbittorrent_client
from alfred.infrastructure.api.tmdb import tmdb_client
from alfred.infrastructure.persistence import get_memory
logger = logging.getLogger(__name__)

View File

@@ -2,8 +2,8 @@
from typing import Any
from application.filesystem import ListFolderUseCase, SetFolderPathUseCase
from infrastructure.filesystem import FileManager
from alfred.application.filesystem import ListFolderUseCase, SetFolderPathUseCase
from alfred.infrastructure.filesystem import FileManager
def set_path_for_folder(folder_name: str, path_value: str) -> dict[str, Any]:

View File

@@ -3,7 +3,7 @@
import logging
from typing import Any
from infrastructure.persistence import get_memory
from alfred.infrastructure.persistence import get_memory
logger = logging.getLogger(__name__)

View File

@@ -2,22 +2,21 @@
import json
import logging
import os
import time
import uuid
from pathlib import Path
from typing import Any
from fastapi import FastAPI, HTTPException
from fastapi.responses import JSONResponse, StreamingResponse
from fastapi.staticfiles import StaticFiles
from pydantic import BaseModel, Field, validator
from agent.agent import Agent
from agent.config import settings
from agent.llm.deepseek import DeepSeekClient
from agent.llm.exceptions import LLMAPIError, LLMConfigurationError
from agent.llm.ollama import OllamaClient
from infrastructure.persistence import get_memory, init_memory
from alfred.agent.agent import Agent
from alfred.agent.llm.deepseek import DeepSeekClient
from alfred.agent.llm.exceptions import LLMAPIError, LLMConfigurationError
from alfred.agent.llm.ollama import OllamaClient
from alfred.infrastructure.persistence import get_memory, init_memory
from alfred.settings import settings
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
@@ -30,38 +29,33 @@ app = FastAPI(
version="0.2.0",
)
# TODO: Make a variable
manifests = "manifests"
# Sécurité : on vérifie que le dossier existe pour ne pas faire planter l'app au démarrage
if os.path.exists(manifests):
app.mount("/manifests", StaticFiles(directory=manifests), name="manifests")
else:
print(
f"⚠️ ATTENTION : Le dossier '{manifests}' est introuvable. Le plugin ne marchera pas."
)
# Initialize memory context at startup
# Use /data/memory in Docker, fallback to memory_data for local dev
storage_dir = os.getenv("MEMORY_STORAGE_DIR", "memory_data")
init_memory(storage_dir=storage_dir)
logger.info(f"Memory context initialized (storage: {storage_dir})")
memory_path = Path(settings.data_storage) / "memory"
init_memory(storage_dir=str(memory_path))
logger.info(f"Memory context initialized (path: {memory_path})")
# Initialize LLM based on environment variable
llm_provider = os.getenv("LLM_PROVIDER", "deepseek").lower()
llm_provider = settings.default_llm_provider.lower()
try:
if llm_provider == "ollama":
logger.info("Using Ollama LLM")
llm = OllamaClient()
else:
if llm_provider == "local":
logger.info("Using local Ollama LLM")
llm = OllamaClient(settings=settings)
elif llm_provider == "deepseek":
logger.info("Using DeepSeek LLM")
llm = DeepSeekClient()
elif llm_provider == "claude":
raise ValueError(f"LLM provider not fully implemented: {llm_provider}")
else:
raise ValueError(f"Unknown LLM provider: {llm_provider}")
except LLMConfigurationError as e:
logger.error(f"Failed to initialize LLM: {e}")
raise
# Initialize agent
agent = Agent(llm=llm, max_tool_iterations=settings.max_tool_iterations)
agent = Agent(
settings=settings, llm=llm, max_tool_iterations=settings.max_tool_iterations
)
logger.info("Agent Media API initialized")
@@ -116,7 +110,7 @@ def extract_last_user_content(messages: list[dict[str, Any]]) -> str:
@app.get("/health")
async def health_check():
"""Health check endpoint."""
return {"status": "healthy", "version": "0.2.0"}
return {"status": "healthy", "version": f"v{settings.alfred_version}"}
@app.get("/v1/models")

View File

@@ -2,7 +2,7 @@
import logging
from infrastructure.filesystem import FileManager
from alfred.infrastructure.filesystem import FileManager
from .dto import ListFolderResponse

View File

@@ -2,7 +2,7 @@
import logging
from infrastructure.filesystem import FileManager
from alfred.infrastructure.filesystem import FileManager
from .dto import SetFolderPathResponse

View File

@@ -2,7 +2,7 @@
import logging
from infrastructure.api.tmdb import (
from alfred.infrastructure.api.tmdb import (
TMDBAPIError,
TMDBClient,
TMDBConfigurationError,

View File

@@ -2,7 +2,7 @@
import logging
from infrastructure.api.qbittorrent import (
from alfred.infrastructure.api.qbittorrent import (
QBittorrentAPIError,
QBittorrentAuthError,
QBittorrentClient,

View File

@@ -2,7 +2,11 @@
import logging
from infrastructure.api.knaben import KnabenAPIError, KnabenClient, KnabenNotFoundError
from alfred.infrastructure.api.knaben import (
KnabenAPIError,
KnabenClient,
KnabenNotFoundError,
)
from .dto import SearchTorrentsResponse

View File

@@ -6,7 +6,7 @@ from typing import Any
import requests
from requests.exceptions import HTTPError, RequestException, Timeout
from agent.config import Settings, settings
from alfred.settings import Settings, settings
from .dto import TorrentResult
from .exceptions import KnabenAPIError, KnabenNotFoundError

View File

@@ -6,7 +6,7 @@ from typing import Any
import requests
from requests.exceptions import HTTPError, RequestException, Timeout
from agent.config import Settings, settings
from alfred.settings import Settings, settings
from .dto import TorrentInfo
from .exceptions import QBittorrentAPIError, QBittorrentAuthError

View File

@@ -6,7 +6,7 @@ from typing import Any
import requests
from requests.exceptions import HTTPError, RequestException, Timeout
from agent.config import Settings, settings
from alfred.settings import Settings, settings
from .dto import MediaResult
from .exceptions import (

View File

@@ -7,7 +7,7 @@ from enum import Enum
from pathlib import Path
from typing import Any
from infrastructure.persistence import get_memory
from alfred.infrastructure.persistence import get_memory
from .exceptions import PathTraversalError

View File

@@ -3,9 +3,9 @@
import logging
from pathlib import Path
from domain.movies.entities import Movie
from domain.tv_shows.entities import Episode, Season, TVShow
from domain.tv_shows.value_objects import SeasonNumber
from alfred.domain.movies.entities import Movie
from alfred.domain.tv_shows.entities import Episode, Season, TVShow
from alfred.domain.tv_shows.value_objects import SeasonNumber
logger = logging.getLogger(__name__)

View File

@@ -6,7 +6,7 @@ without passing it explicitly through all function calls.
Usage:
# At application startup
from infrastructure.persistence import init_memory, get_memory
from alfred.infrastructure.persistence import init_memory, get_memory
init_memory("memory_data")

View File

@@ -4,11 +4,11 @@ import logging
from datetime import datetime
from typing import Any
from domain.movies.entities import Movie
from domain.movies.repositories import MovieRepository
from domain.movies.value_objects import MovieTitle, Quality, ReleaseYear
from domain.shared.value_objects import FilePath, FileSize, ImdbId
from infrastructure.persistence import get_memory
from alfred.domain.movies.entities import Movie
from alfred.domain.movies.repositories import MovieRepository
from alfred.domain.movies.value_objects import MovieTitle, Quality, ReleaseYear
from alfred.domain.shared.value_objects import FilePath, FileSize, ImdbId
from alfred.infrastructure.persistence import get_memory
logger = logging.getLogger(__name__)

View File

@@ -3,11 +3,11 @@
import logging
from typing import Any
from domain.shared.value_objects import FilePath, ImdbId
from domain.subtitles.entities import Subtitle
from domain.subtitles.repositories import SubtitleRepository
from domain.subtitles.value_objects import Language, SubtitleFormat, TimingOffset
from infrastructure.persistence import get_memory
from alfred.domain.shared.value_objects import FilePath, ImdbId
from alfred.domain.subtitles.entities import Subtitle
from alfred.domain.subtitles.repositories import SubtitleRepository
from alfred.domain.subtitles.value_objects import Language, SubtitleFormat, TimingOffset
from alfred.infrastructure.persistence import get_memory
logger = logging.getLogger(__name__)

View File

@@ -4,11 +4,11 @@ import logging
from datetime import datetime
from typing import Any
from domain.shared.value_objects import ImdbId
from domain.tv_shows.entities import TVShow
from domain.tv_shows.repositories import TVShowRepository
from domain.tv_shows.value_objects import ShowStatus
from infrastructure.persistence import get_memory
from alfred.domain.shared.value_objects import ImdbId
from alfred.domain.tv_shows.entities import TVShow
from alfred.domain.tv_shows.repositories import TVShowRepository
from alfred.domain.tv_shows.value_objects import ShowStatus
from alfred.infrastructure.persistence import get_memory
logger = logging.getLogger(__name__)

209
alfred/settings.py Normal file
View File

@@ -0,0 +1,209 @@
import secrets
from pathlib import Path
from typing import NamedTuple
import tomllib
from pydantic import Field, computed_field, field_validator
from pydantic_settings import BaseSettings, SettingsConfigDict
BASE_DIR = Path(__file__).resolve().parent.parent
ENV_FILE_PATH = BASE_DIR / ".env"
toml_path = BASE_DIR / "pyproject.toml"
class ConfigurationError(Exception):
"""Raised when configuration is invalid."""
pass
class ProjectVersions(NamedTuple):
"""
Immutable structure for project versions.
Forces explicit naming and prevents accidental swaps.
"""
librechat: str
rag: str
alfred: str
def get_versions_from_toml() -> ProjectVersions:
"""
Reads versioning information from pyproject.toml.
Returns the default value if the file or key is missing.
"""
if not toml_path.exists():
raise FileNotFoundError(f"pyproject.toml not found: {toml_path}")
with open(toml_path, "rb") as f:
data = tomllib.load(f)
try:
return ProjectVersions(
librechat=data["tool"]["alfred"]["settings"]["librechat_version"],
rag=data["tool"]["alfred"]["settings"]["rag_version"],
alfred=data["tool"]["poetry"]["version"],
)
except KeyError as e:
raise KeyError(f"Error: Missing key {e} in pyproject.toml") from e
# Load versions once
VERSIONS: ProjectVersions = get_versions_from_toml()
class Settings(BaseSettings):
model_config = SettingsConfigDict(
env_file=ENV_FILE_PATH,
env_file_encoding="utf-8",
extra="ignore",
case_sensitive=False,
)
# --- GENERAL SETTINGS ---
host: str = "0.0.0.0"
port: int = 3080
debug_logging: bool = False
debug_console: bool = False
data_storage: str = "data"
librechat_version: str = Field(VERSIONS.librechat, description="Librechat version")
rag_version: str = Field(VERSIONS.rag, description="RAG engine version")
alfred_version: str = Field(VERSIONS.alfred, description="Alfred version")
# --- CONTEXT SETTINGS ---
max_history_messages: int = 10
max_tool_iterations: int = 10
request_timeout: int = 30
# TODO: Finish
deepseek_base_url: str = "https://api.deepseek.com"
deepseek_model: str = "deepseek-chat"
# --- API KEYS ---
anthropic_api_key: str | None = Field(None, description="Claude API key")
deepseek_api_key: str | None = Field(None, description="Deepseek API key")
google_api_key: str | None = Field(None, description="Gemini API key")
kimi_api_key: str | None = Field(None, description="Kimi API key")
openai_api_key: str | None = Field(None, description="ChatGPT API key")
# --- SECURITY KEYS ---
# Generated automatically if not in .env to ensure "Secure by Default"
jwt_secret: str = Field(default_factory=lambda: secrets.token_urlsafe(32))
jwt_refresh_secret: str = Field(default_factory=lambda: secrets.token_urlsafe(32))
# We keep these for encryption of keys in MongoDB (AES-256 Hex format)
creds_key: str = Field(default_factory=lambda: secrets.token_hex(32))
creds_iv: str = Field(default_factory=lambda: secrets.token_hex(16))
# --- SERVICES ---
qbittorrent_url: str = "http://qbittorrent:16140"
qbittorrent_username: str = "admin"
qbittorrent_password: str = Field(default_factory=lambda: secrets.token_urlsafe(16))
mongo_host: str = "mongodb"
mongo_user: str = "alfred"
mongo_password: str = Field(
default_factory=lambda: secrets.token_urlsafe(24), repr=False, exclude=True
)
mongo_port: int = 27017
mongo_db_name: str = "alfred"
@computed_field(repr=False)
@property
def mongo_uri(self) -> str:
return (
f"mongodb://{self.mongo_user}:{self.mongo_password}"
f"@{self.mongo_host}:{self.mongo_port}/{self.mongo_db_name}"
f"?authSource=admin"
)
postgres_host: str = "vectordb"
postgres_user: str = "alfred"
postgres_password: str = Field(
default_factory=lambda: secrets.token_urlsafe(24), repr=False, exclude=True
)
postgres_port: int = 5432
postgres_db_name: str = "alfred"
@computed_field(repr=False)
@property
def postgres_uri(self) -> str:
return (
f"postgresql://{self.postgres_user}:{self.postgres_password}"
f"@{self.postgres_host}:{self.postgres_port}/{self.postgres_db_name}"
)
tmdb_api_key: str | None = Field(None, description="The Movie Database API key")
tmdb_base_url: str = "https://api.themoviedb.org/3"
# --- LLM PICKER & CONFIG ---
# Providers: 'local', 'deepseek', ...
default_llm_provider: str = "local"
ollama_base_url: str = "http://ollama:11434"
# Models: ...
ollama_model: str = "llama3.3:latest"
llm_temperature: float = 0.2
# --- RAG ENGINE ---
rag_enabled: bool = True # TODO: Handle False
rag_api_url: str = "http://rag_api:8000"
embeddings_provider: str = "ollama"
# Models: ...
embeddings_model: str = "nomic-embed-text"
# --- MEILISEARCH ---
meili_enabled: bool = Field(True, description="Enable meili")
meili_no_analytics: bool = True
meili_host: str = "http://meilisearch:7700"
meili_master_key: str = Field(
default_factory=lambda: secrets.token_urlsafe(32),
description="Master key for Meilisearch",
repr=False,
)
# --- VALIDATORS ---
@field_validator("llm_temperature")
@classmethod
def validate_temperature(cls, v: float) -> float:
if not 0.0 <= v <= 2.0:
raise ConfigurationError(
f"Temperature must be between 0.0 and 2.0, got {v}"
)
return v
@field_validator("max_tool_iterations")
@classmethod
def validate_max_iterations(cls, v: int) -> int:
if not 1 <= v <= 20:
raise ConfigurationError(
f"max_tool_iterations must be between 1 and 50, got {v}"
)
return v
@field_validator("request_timeout")
@classmethod
def validate_timeout(cls, v: int) -> int:
if not 1 <= v <= 300:
raise ConfigurationError(
f"request_timeout must be between 1 and 300 seconds, got {v}"
)
return v
@field_validator("deepseek_base_url", "tmdb_base_url")
@classmethod
def validate_url(cls, v: str, info) -> str:
if not v.startswith(("http://", "https://")):
raise ConfigurationError(f"Invalid {info.field_name}")
return v
def is_tmdb_configured(self):
return bool(self.tmdb_api_key)
def is_deepseek_configured(self):
return bool(self.deepseek_api_key)
def dump_safe(self):
return self.model_dump(exclude_none=False)
settings = Settings()

231
cli.py Normal file
View File

@@ -0,0 +1,231 @@
#!/usr/bin/env python3
import os
import secrets
import shutil
import subprocess
import sys
from datetime import datetime
from enum import StrEnum
from pathlib import Path
from typing import NoReturn
REQUIRED_VARS = ["DEEPSEEK_API_KEY", "TMDB_API_KEY", "QBITTORRENT_URL"]
# Size in bytes
KEYS_TO_GENERATE = {
"JWT_SECRET": 32,
"JWT_REFRESH_SECRET": 32,
"CREDS_KEY": 32,
"CREDS_IV": 16,
}
class Style(StrEnum):
"""ANSI codes for styling output.
Usage: f"{Style.RED}Error{Style.RESET}"
"""
RESET = "\033[0m"
BOLD = "\033[1m"
RED = "\033[31m"
GREEN = "\033[32m"
YELLOW = "\033[33m"
CYAN = "\033[36m"
DIM = "\033[2m"
# Only for terminals and if not specified otherwise
USE_COLORS = sys.stdout.isatty() and "NO_COLOR" not in os.environ
def styled(text: str, color_code: str) -> str:
"""Apply color only if supported by the terminal."""
if USE_COLORS:
return f"{color_code}{text}{Style.RESET}"
return text
def log(msg: str, color: str | None = None, prefix="") -> None:
"""Print a formatted message."""
formatted_msg = styled(msg, color) if color else msg
print(f"{prefix}{formatted_msg}")
def error_exit(msg: str) -> NoReturn:
"""Print an error message in red and exit."""
log(f"{msg}", Style.RED)
sys.exit(1)
def is_docker_running() -> bool:
""" "Check if Docker is available and responsive."""
if shutil.which("docker") is None:
error_exit("Docker is not installed.")
result = subprocess.run(
["docker", "info"],
# Redirect stdout/stderr to keep output clean on success
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
# Prevent exception being raised
check=False,
)
return result.returncode == 0
def parse_env(content: str) -> dict[str, str]:
"""Parses existing keys and values into a dict (ignoring comments)."""
env_vars = {}
for raw_line in content.splitlines():
line = raw_line.strip()
if line and not line.startswith("#") and "=" in line:
key, value = line.split("=", 1)
env_vars[key.strip()] = value.strip()
return env_vars
def dump_env(content: str, data: dict[str, str]) -> str:
new_content: list[str] = []
processed_keys = set()
for raw_line in content.splitlines():
line = raw_line.strip()
# Fast line (empty, comment or not an assignation)
if len(line) == 0 or line.startswith("#") or "=" not in line:
new_content.append(raw_line)
continue
# Slow line (inline comment to be kept)
key_chunk, value_chunk = raw_line.split("=", 1)
key = key_chunk.strip()
# Not in the update list
if key not in data:
new_content.append(raw_line)
continue
processed_keys.add(key)
new_value = data[key]
if " #" not in value_chunk:
new_line = f"{key_chunk}={new_value}"
else:
_, comment = value_chunk.split(" #", 1)
new_line = f"{key_chunk}={new_value} #{comment}"
new_content.append(new_line)
for key, value in data.items():
if key not in processed_keys:
new_content.append(f"{key}={value}")
return "\n".join(new_content) + "\n"
def ensure_env() -> None:
"""Manage .env lifecycle: creation, secret generation, prompts."""
env_path = Path(".env")
env_example_path = Path(".env.example")
updated: bool = False
# Read .env if exists
if env_path.exists():
content: str = env_path.read_text(encoding="utf-8")
else:
content: str = env_example_path.read_text(encoding="utf-8")
existing_vars: dict[str, str] = parse_env(content)
# Generate missing secrets
for key, length in KEYS_TO_GENERATE.items():
if key not in existing_vars or not existing_vars[key]:
log(f"Generating {key}...", Style.GREEN, prefix=" ")
existing_vars[key] = secrets.token_hex(length)
updated = True
log("Done", Style.GREEN, prefix=" ")
# Prompt for missing mandatory keys
color = Style.YELLOW if USE_COLORS else ""
reset = Style.RESET if USE_COLORS else ""
for key in REQUIRED_VARS:
if key not in existing_vars or not existing_vars[key]:
try:
existing_vars[key] = input(
f" {color}Enter value for {key}: {reset}"
).strip()
updated = True
except KeyboardInterrupt:
print()
error_exit("Aborted by user.")
# Write to disk
if updated:
# But backup original first
if env_path.exists():
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
backup_path = Path(f"{env_path}.{timestamp}.bak")
shutil.copy(env_path, backup_path)
log(f"Backup created: {backup_path}", Style.DIM)
new_content = dump_env(content, existing_vars)
env_path.write_text(new_content, encoding="utf-8")
log(".env updated successfully.", Style.GREEN)
else:
log("Configuration is up to date.", Style.GREEN)
def setup() -> None:
"""Orchestrate initialization."""
is_docker_running()
ensure_env()
def status() -> None:
"""Display simple dashboard."""
# Hardcoded bold style for title if colors are enabled
title_style = Style.BOLD if USE_COLORS else ""
reset_style = Style.RESET if USE_COLORS else ""
print(f"\n{title_style}ALFRED STATUS{reset_style}")
print(f"{title_style}==============={reset_style}\n")
# Docker Check
if is_docker_running():
print(f" Docker: {styled('✓ running', Style.GREEN)}")
else:
print(f" Docker: {styled('✗ stopped', Style.RED)}")
# Env Check
if Path(".env").exists():
print(f" .env: {styled('✓ present', Style.GREEN)}")
else:
print(f" .env: {styled('✗ missing', Style.RED)}")
print("")
def check() -> None:
"""Silent check for prerequisites (used by 'make up')."""
setup()
def main() -> None:
if len(sys.argv) < 2:
print("Usage: python cli.py [setup|check|status]")
sys.exit(1)
cmd = sys.argv[1]
if cmd == "setup":
setup()
elif cmd == "check":
check()
elif cmd == "status":
status()
else:
error_exit(f"Unknown command: {cmd}")
if __name__ == "__main__":
main()

206
docker-compose.yaml Normal file
View File

@@ -0,0 +1,206 @@
services:
# - CORE SERVICES -
# --- .ENV INIT ---
alfred-init:
container_name: alfred-init
build:
context: .
target: builder
args:
PYTHON_VERSION: ${PYTHON_VERSION}
PYTHON_VERSION_SHORT: ${PYTHON_VERSION_SHORT}
RUNNER: ${RUNNER}
command: python scripts/bootstrap.py
networks:
- alfred-net
# --- MAIN APPLICATION ---
alfred:
container_name: alfred-core
build:
context: .
args:
PYTHON_VERSION: ${PYTHON_VERSION}
PYTHON_VERSION_SHORT: ${PYTHON_VERSION_SHORT}
RUNNER: ${RUNNER}
depends_on:
alfred-init:
condition: service_completed_successfully
restart: unless-stopped
env_file:
- path: .env
required: true
volumes:
- ./data:/data
- ./logs:/logs
# TODO: Hot reload (comment out in production)
#- ./alfred:/home/appuser/alfred
networks:
- alfred-net
# --- FRONTEND LIBRECHAT ---
librechat:
container_name: alfred-librechat
image: ghcr.io/danny-avila/librechat:${LIBRECHAT_VERSION}
depends_on:
alfred-init:
condition: service_completed_successfully
mongodb:
condition: service_healthy
restart: unless-stopped
env_file:
- path: .env
required: true
environment:
# Remap value name
- SEARCH=${MEILI_ENABLED}
ports:
- "${PORT}:${PORT}"
volumes:
- ./data/librechat/images:/app/client/public/images
- ./data/librechat/uploads:/app/client/uploads
- ./logs:/app/api/logs
# Mount custom endpoint
- ./librechat/manifests:/app/manifests:ro
- ./librechat/librechat.yaml:/app/librechat.yaml:ro
networks:
- alfred-net
# --- DATABASE #1 - APP STATE ---
mongodb:
container_name: alfred-mongodb
image: mongo:latest
restart: unless-stopped
depends_on:
alfred-init:
condition: service_completed_successfully
env_file:
- path: .env
required: true
environment:
# Remap value name
- MONGO_INITDB_ROOT_USERNAME=${MONGO_USER}
- MONGO_INITDB_ROOT_PASSWORD=${MONGO_PASSWORD}
ports:
- "${MONGO_PORT}:${MONGO_PORT}"
volumes:
- ./data/mongo:/data/db
command: mongod --quiet --setParameter logComponentVerbosity='{"network":{"verbosity":0}}'
healthcheck:
test: |
mongosh --quiet --eval "db.adminCommand('ping')" || \
mongosh --quiet -u "${MONGO_USER}" -p "${MONGO_PASSWORD}" --authenticationDatabase admin --eval "db.adminCommand('ping')"
interval: 10s
timeout: 5s
retries: 5
networks:
- alfred-net
# --- OLLAMA - LOCAL LLM ENGINE ---
ollama:
image: ollama/ollama:latest
container_name: alfred-ollama
depends_on:
alfred-init:
condition: service_completed_successfully
restart: unless-stopped
env_file:
- path: .env
required: true
volumes:
- ./data/ollama:/root/.ollama
networks:
- alfred-net
# - OPTIONAL SERVICES -
# --- SEARCH ENGINE SUPER FAST (Optional) ---
meilisearch:
container_name: alfred-meilisearch
image: getmeili/meilisearch:v1.12.3
depends_on:
alfred-init:
condition: service_completed_successfully
restart: unless-stopped
env_file:
- path: .env
required: true
volumes:
- ./data/meilisearch:/meili_data
profiles: ["meili", "full"]
networks:
- alfred-net
# --- RETRIEVAL AUGMENTED GENERATION SYSTEM (Optional) ---
rag_api:
container_name: alfred-rag
image: ghcr.io/danny-avila/librechat-rag-api-dev-lite:${RAG_VERSION}
depends_on:
alfred-init:
condition: service_completed_successfully
vectordb:
condition: service_healthy
restart: unless-stopped
env_file:
- path: .env
required: true
ports:
- "${RAG_API_PORT}:${RAG_API_PORT}"
volumes:
- ./data/rag/uploads:/app/uploads
profiles: ["rag", "full"]
networks:
- alfred-net
# --- DATABASE #2 - Vector RAG (Optional) ---
vectordb:
container_name: alfred-vectordb
image: pgvector/pgvector:0.8.0-pg16-bookworm
depends_on:
alfred-init:
condition: service_completed_successfully
restart: unless-stopped
env_file:
- path: .env
required: true
ports:
- "${POSTGRES_PORT}:${POSTGRES_PORT}"
volumes:
- ./data/vectordb:/var/lib/postgresql/data
profiles: ["rag", "full"]
healthcheck:
test: [ "CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-alfred} -d ${POSTGRES_DB_NAME:-alfred}" ]
interval: 5s
timeout: 5s
retries: 5
networks:
- alfred-net
# --- QBITTORENT (Optional) ---
qbittorrent:
image: lscr.io/linuxserver/qbittorrent:latest
container_name: alfred-qbittorrent
depends_on:
alfred-init:
condition: service_completed_successfully
restart: unless-stopped
env_file:
- path: .env
required: true
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Paris
- WEBUI_PORT=${QBITTORRENT_PORT}
volumes:
- ./data/qbittorrent/config:/config
- ./data/qbittorrent/downloads:/downloads
profiles: ["qbittorrent", "full"]
ports:
- "${QBITTORRENT_PORT}:${QBITTORRENT_PORT}"
networks:
- alfred-net
networks:
alfred-net:
name: alfred-internal
driver: bridge

View File

@@ -1,202 +0,0 @@
version: "3.4"
services:
alfred:
build:
context: .
args:
RUNNER: ${RUNNER} # Get it from Makefile
container_name: alfred
restart: unless-stopped
env_file: .env
ports:
- "8000:8000"
volumes:
# Persistent data volumes (outside container /app)
- agent-memory:/data/memory
- agent-logs:/data/logs
# Development: mount code for hot reload (comment out in production)
# - ./alfred:/app
environment:
# LLM Configuration
LLM_PROVIDER: ${LLM_PROVIDER:-deepseek}
DEEPSEEK_API_KEY: ${DEEPSEEK_API_KEY:-}
# Memory storage
MEMORY_STORAGE_DIR: /data/memory
# External services
TMDB_API_KEY: ${TMDB_API_KEY:-}
QBITTORRENT_URL: ${QBITTORRENT_URL:-}
QBITTORRENT_USERNAME: ${QBITTORRENT_USERNAME:-}
QBITTORRENT_PASSWORD: ${QBITTORRENT_PASSWORD:-}
networks:
- agent-network
# Da face (LibreChat)
librechat:
image: ghcr.io/danny-avila/librechat-dev:latest
container_name: librechat-frontend
restart: unless-stopped
ports:
- "3080:3080"
depends_on:
- mongodb
- meilisearch
- rag_api
- alfred
env_file: .env
environment:
# MongoDB connection (no auth, matching LibreChat default)
MONGO_URI: mongodb://mongodb:27017/LibreChat
# App configuration
HOST: 0.0.0.0
PORT: 3080
# Security
JWT_SECRET: ${JWT_SECRET:-your-super-secret-jwt-key-change-this-in-production}
JWT_REFRESH_SECRET: ${JWT_REFRESH_SECRET:-your-super-secret-refresh-key-change-this-too}
CREDS_KEY: ${CREDS_KEY:-your-32-character-secret-key-here}
CREDS_IV: ${CREDS_IV:-your-16-character-iv-here}
# Session
SESSION_EXPIRY: ${SESSION_EXPIRY:-1000 * 60 * 15}
REFRESH_TOKEN_EXPIRY: ${REFRESH_TOKEN_EXPIRY:-1000 * 60 * 60 * 24 * 7}
# Domain
DOMAIN_CLIENT: ${DOMAIN_CLIENT:-http://localhost:3080}
DOMAIN_SERVER: ${DOMAIN_SERVER:-http://localhost:3080}
# Meilisearch
MEILI_HOST: http://meilisearch:7700
MEILI_MASTER_KEY: ${MEILI_MASTER_KEY:-DrhYf7zENyR6AlUCKmnz0eYASOQdl6zxH7s7MKFSfFU}
# RAG API
RAG_API_URL: http://rag_api:8000
# Endpoints
ENDPOINTS: custom
# Debug (optional)
DEBUG_LOGGING: ${DEBUG_LOGGING:-false}
DEBUG_CONSOLE: ${DEBUG_CONSOLE:-false}
volumes:
- ./librechat/librechat.yaml:/app/librechat.yaml:ro
- librechat-images:/app/client/public/images
- librechat-logs:/app/api/logs
networks:
- agent-network
# MongoDB for LibreChat
mongodb:
image: mongo:latest
container_name: librechat-mongodb
restart: unless-stopped
volumes:
- mongodb-data:/data/db
command: mongod --noauth
ports:
- "27017:27017"
networks:
- agent-network
# Meilisearch - Search engine for LibreChat
#TODO: Follow currently used version on librechat's github
meilisearch:
image: getmeili/meilisearch:v1.12.3
container_name: librechat-meilisearch
restart: unless-stopped
volumes:
- meilisearch-data:/meili_data
environment:
MEILI_HOST: http://meilisearch:7700
MEILI_HTTP_ADDR: meilisearch:7700
MEILI_MASTER_KEY: ${MEILI_MASTER_KEY:-DrhYf7zENyR6AlUCKmnz0eYASOQdl6zxH7s7MKFSfFU}
ports:
- "7700:7700"
networks:
- agent-network
# PostgreSQL with pgvector for RAG API
pgvector:
image: ankane/pgvector:latest
container_name: librechat-pgvector
restart: unless-stopped
environment:
POSTGRES_DB: ${POSTGRES_DB:-librechat_rag}
POSTGRES_USER: ${POSTGRES_USER:-postgres}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-postgres}
volumes:
- pgvector-data:/var/lib/postgresql/data
ports:
- "5432:5432"
networks:
- agent-network
# RAG API - Vector database for LibreChat
rag_api:
image: ghcr.io/danny-avila/librechat-rag-api-dev-lite:latest
container_name: librechat-rag-api
restart: unless-stopped
depends_on:
- pgvector
environment:
PORT: 8000
HOST: 0.0.0.0
# PostgreSQL connection (multiple variable names for compatibility)
DB_HOST: pgvector
DB_PORT: 5432
DB_NAME: ${POSTGRES_DB:-librechat_rag}
DB_USER: ${POSTGRES_USER:-postgres}
DB_PASSWORD: ${POSTGRES_PASSWORD:-postgres}
POSTGRES_DB: ${POSTGRES_DB:-librechat_rag}
POSTGRES_USER: ${POSTGRES_USER:-postgres}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-postgres}
# RAG configuration
COLLECTION_NAME: ${RAG_COLLECTION_NAME:-testcollection}
EMBEDDINGS_PROVIDER: ${RAG_EMBEDDINGS_PROVIDER:-openai}
EMBEDDINGS_MODEL: ${RAG_EMBEDDINGS_MODEL:-text-embedding-3-small}
OPENAI_API_KEY: ${OPENAI_API_KEY:-}
RAG_UPLOAD_DIR: /app/uploads
volumes:
- rag-uploads:/app/uploads
ports:
- "8001:8000"
networks:
- agent-network
# Named volumes for persistent data
volumes:
# MongoDB data
mongodb-data:
driver: local
# Meilisearch data
meilisearch-data:
driver: local
# PostgreSQL pgvector data
pgvector-data:
driver: local
# RAG API uploads
rag-uploads:
driver: local
# LibreChat data
librechat-images:
driver: local
librechat-logs:
driver: local
# Alfred data
agent-memory:
driver: local
agent-logs:
driver: local
# Network for inter-service communication
networks:
agent-network:
driver: bridge

View File

@@ -1,27 +0,0 @@
#!/bin/bash
# Script to generate secure keys for LibreChat
# Run this script to generate random secure keys for your .env file
echo "==================================="
echo "LibreChat Security Keys Generator"
echo "==================================="
echo ""
echo "# MongoDB Password"
echo "MONGO_PASSWORD=$(openssl rand -base64 24)"
echo ""
echo "# JWT Secrets"
echo "JWT_SECRET=$(openssl rand -base64 32)"
echo "JWT_REFRESH_SECRET=$(openssl rand -base64 32)"
echo ""
echo "# Credentials Encryption Keys"
echo "CREDS_KEY=$(openssl rand -hex 16)"
echo "CREDS_IV=$(openssl rand -hex 8)"
echo ""
echo "==================================="
echo "Copy these values to your .env file"
echo "==================================="

View File

@@ -4,6 +4,16 @@
version: 1.2.1
cache: true
endpoints:
anthropic:
apiKey: "${ANTHROPIC_API_KEY}"
models:
default: ["claude-sonnet-4-5", "claude-haiku-4-5", "claude-opus-4-5"]
fetch: false
titleConvo: true
titleModel: "claude-haiku-4-5"
modelDisplayLabel: "Claude AI"
streamRate: 1
custom:
# Deepseek
- name: "Deepseek"

208
poetry.lock generated
View File

@@ -52,17 +52,17 @@ files = [
[[package]]
name = "bump-my-version"
version = "1.2.5"
version = "1.2.6"
description = "Version bump your Python project"
optional = false
python-versions = ">=3.8"
files = [
{file = "bump_my_version-1.2.5-py3-none-any.whl", hash = "sha256:57e5718d9fe7d7b6f5ceb68e70cd3c4bd0570d300b4aade15fd1e355febdd351"},
{file = "bump_my_version-1.2.5.tar.gz", hash = "sha256:827af6c7b13111c62b45340f25defd105f566fe0cdbbb70e2c4b2f005b667e1f"},
{file = "bump_my_version-1.2.6-py3-none-any.whl", hash = "sha256:a2f567c10574a374b81a9bd6d2bd3cb2ca74befe5c24c3021123773635431659"},
{file = "bump_my_version-1.2.6.tar.gz", hash = "sha256:1f2f0daa5d699904e9739be8efb51c4c945461bad83cd4da4c89d324d9a18343"},
]
[package.dependencies]
click = "<8.2.2"
click = "<8.4"
httpx = ">=0.28.1"
pydantic = ">=2.0.0"
pydantic-settings = "*"
@@ -218,13 +218,13 @@ files = [
[[package]]
name = "click"
version = "8.2.1"
version = "8.3.1"
description = "Composable command line interface toolkit"
optional = false
python-versions = ">=3.10"
files = [
{file = "click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b"},
{file = "click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202"},
{file = "click-8.3.1-py3-none-any.whl", hash = "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6"},
{file = "click-8.3.1.tar.gz", hash = "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a"},
]
[package.dependencies]
@@ -243,103 +243,103 @@ files = [
[[package]]
name = "coverage"
version = "7.13.0"
version = "7.13.1"
description = "Code coverage measurement for Python"
optional = false
python-versions = ">=3.10"
files = [
{file = "coverage-7.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:02d9fb9eccd48f6843c98a37bd6817462f130b86da8660461e8f5e54d4c06070"},
{file = "coverage-7.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:367449cf07d33dc216c083f2036bb7d976c6e4903ab31be400ad74ad9f85ce98"},
{file = "coverage-7.13.0-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:cdb3c9f8fef0a954c632f64328a3935988d33a6604ce4bf67ec3e39670f12ae5"},
{file = "coverage-7.13.0-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:d10fd186aac2316f9bbb46ef91977f9d394ded67050ad6d84d94ed6ea2e8e54e"},
{file = "coverage-7.13.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7f88ae3e69df2ab62fb0bc5219a597cb890ba5c438190ffa87490b315190bb33"},
{file = "coverage-7.13.0-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c4be718e51e86f553bcf515305a158a1cd180d23b72f07ae76d6017c3cc5d791"},
{file = "coverage-7.13.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a00d3a393207ae12f7c49bb1c113190883b500f48979abb118d8b72b8c95c032"},
{file = "coverage-7.13.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a7b1cd820e1b6116f92c6128f1188e7afe421c7e1b35fa9836b11444e53ebd9"},
{file = "coverage-7.13.0-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:37eee4e552a65866f15dedd917d5e5f3d59805994260720821e2c1b51ac3248f"},
{file = "coverage-7.13.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:62d7c4f13102148c78d7353c6052af6d899a7f6df66a32bddcc0c0eb7c5326f8"},
{file = "coverage-7.13.0-cp310-cp310-win32.whl", hash = "sha256:24e4e56304fdb56f96f80eabf840eab043b3afea9348b88be680ec5986780a0f"},
{file = "coverage-7.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:74c136e4093627cf04b26a35dab8cbfc9b37c647f0502fc313376e11726ba303"},
{file = "coverage-7.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0dfa3855031070058add1a59fdfda0192fd3e8f97e7c81de0596c145dea51820"},
{file = "coverage-7.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4fdb6f54f38e334db97f72fa0c701e66d8479af0bc3f9bfb5b90f1c30f54500f"},
{file = "coverage-7.13.0-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:7e442c013447d1d8d195be62852270b78b6e255b79b8675bad8479641e21fd96"},
{file = "coverage-7.13.0-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:1ed5630d946859de835a85e9a43b721123a8a44ec26e2830b296d478c7fd4259"},
{file = "coverage-7.13.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7f15a931a668e58087bc39d05d2b4bf4b14ff2875b49c994bbdb1c2217a8daeb"},
{file = "coverage-7.13.0-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:30a3a201a127ea57f7e14ba43c93c9c4be8b7d17a26e03bb49e6966d019eede9"},
{file = "coverage-7.13.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7a485ff48fbd231efa32d58f479befce52dcb6bfb2a88bb7bf9a0b89b1bc8030"},
{file = "coverage-7.13.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:22486cdafba4f9e471c816a2a5745337742a617fef68e890d8baf9f3036d7833"},
{file = "coverage-7.13.0-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:263c3dbccc78e2e331e59e90115941b5f53e85cfcc6b3b2fbff1fd4e3d2c6ea8"},
{file = "coverage-7.13.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e5330fa0cc1f5c3c4c3bb8e101b742025933e7848989370a1d4c8c5e401ea753"},
{file = "coverage-7.13.0-cp311-cp311-win32.whl", hash = "sha256:0f4872f5d6c54419c94c25dd6ae1d015deeb337d06e448cd890a1e89a8ee7f3b"},
{file = "coverage-7.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:51a202e0f80f241ccb68e3e26e19ab5b3bf0f813314f2c967642f13ebcf1ddfe"},
{file = "coverage-7.13.0-cp311-cp311-win_arm64.whl", hash = "sha256:d2a9d7f1c11487b1c69367ab3ac2d81b9b3721f097aa409a3191c3e90f8f3dd7"},
{file = "coverage-7.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:0b3d67d31383c4c68e19a88e28fc4c2e29517580f1b0ebec4a069d502ce1e0bf"},
{file = "coverage-7.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:581f086833d24a22c89ae0fe2142cfaa1c92c930adf637ddf122d55083fb5a0f"},
{file = "coverage-7.13.0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:0a3a30f0e257df382f5f9534d4ce3d4cf06eafaf5192beb1a7bd066cb10e78fb"},
{file = "coverage-7.13.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:583221913fbc8f53b88c42e8dbb8fca1d0f2e597cb190ce45916662b8b9d9621"},
{file = "coverage-7.13.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5f5d9bd30756fff3e7216491a0d6d520c448d5124d3d8e8f56446d6412499e74"},
{file = "coverage-7.13.0-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a23e5a1f8b982d56fa64f8e442e037f6ce29322f1f9e6c2344cd9e9f4407ee57"},
{file = "coverage-7.13.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9b01c22bc74a7fb44066aaf765224c0d933ddf1f5047d6cdfe4795504a4493f8"},
{file = "coverage-7.13.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:898cce66d0836973f48dda4e3514d863d70142bdf6dfab932b9b6a90ea5b222d"},
{file = "coverage-7.13.0-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:3ab483ea0e251b5790c2aac03acde31bff0c736bf8a86829b89382b407cd1c3b"},
{file = "coverage-7.13.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1d84e91521c5e4cb6602fe11ece3e1de03b2760e14ae4fcf1a4b56fa3c801fcd"},
{file = "coverage-7.13.0-cp312-cp312-win32.whl", hash = "sha256:193c3887285eec1dbdb3f2bd7fbc351d570ca9c02ca756c3afbc71b3c98af6ef"},
{file = "coverage-7.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:4f3e223b2b2db5e0db0c2b97286aba0036ca000f06aca9b12112eaa9af3d92ae"},
{file = "coverage-7.13.0-cp312-cp312-win_arm64.whl", hash = "sha256:086cede306d96202e15a4b77ace8472e39d9f4e5f9fd92dd4fecdfb2313b2080"},
{file = "coverage-7.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:28ee1c96109974af104028a8ef57cec21447d42d0e937c0275329272e370ebcf"},
{file = "coverage-7.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d1e97353dcc5587b85986cda4ff3ec98081d7e84dd95e8b2a6d59820f0545f8a"},
{file = "coverage-7.13.0-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:99acd4dfdfeb58e1937629eb1ab6ab0899b131f183ee5f23e0b5da5cba2fec74"},
{file = "coverage-7.13.0-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:ff45e0cd8451e293b63ced93161e189780baf444119391b3e7d25315060368a6"},
{file = "coverage-7.13.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f4f72a85316d8e13234cafe0a9f81b40418ad7a082792fa4165bd7d45d96066b"},
{file = "coverage-7.13.0-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:11c21557d0e0a5a38632cbbaca5f008723b26a89d70db6315523df6df77d6232"},
{file = "coverage-7.13.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:76541dc8d53715fb4f7a3a06b34b0dc6846e3c69bc6204c55653a85dd6220971"},
{file = "coverage-7.13.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:6e9e451dee940a86789134b6b0ffbe31c454ade3b849bb8a9d2cca2541a8e91d"},
{file = "coverage-7.13.0-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:5c67dace46f361125e6b9cace8fe0b729ed8479f47e70c89b838d319375c8137"},
{file = "coverage-7.13.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f59883c643cb19630500f57016f76cfdcd6845ca8c5b5ea1f6e17f74c8e5f511"},
{file = "coverage-7.13.0-cp313-cp313-win32.whl", hash = "sha256:58632b187be6f0be500f553be41e277712baa278147ecb7559983c6d9faf7ae1"},
{file = "coverage-7.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:73419b89f812f498aca53f757dd834919b48ce4799f9d5cad33ca0ae442bdb1a"},
{file = "coverage-7.13.0-cp313-cp313-win_arm64.whl", hash = "sha256:eb76670874fdd6091eedcc856128ee48c41a9bbbb9c3f1c7c3cf169290e3ffd6"},
{file = "coverage-7.13.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:6e63ccc6e0ad8986386461c3c4b737540f20426e7ec932f42e030320896c311a"},
{file = "coverage-7.13.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:494f5459ffa1bd45e18558cd98710c36c0b8fbfa82a5eabcbe671d80ecffbfe8"},
{file = "coverage-7.13.0-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:06cac81bf10f74034e055e903f5f946e3e26fc51c09fc9f584e4a1605d977053"},
{file = "coverage-7.13.0-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:f2ffc92b46ed6e6760f1d47a71e56b5664781bc68986dbd1836b2b70c0ce2071"},
{file = "coverage-7.13.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0602f701057c6823e5db1b74530ce85f17c3c5be5c85fc042ac939cbd909426e"},
{file = "coverage-7.13.0-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:25dc33618d45456ccb1d37bce44bc78cf269909aa14c4db2e03d63146a8a1493"},
{file = "coverage-7.13.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:71936a8b3b977ddd0b694c28c6a34f4fff2e9dd201969a4ff5d5fc7742d614b0"},
{file = "coverage-7.13.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:936bc20503ce24770c71938d1369461f0c5320830800933bc3956e2a4ded930e"},
{file = "coverage-7.13.0-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:af0a583efaacc52ae2521f8d7910aff65cdb093091d76291ac5820d5e947fc1c"},
{file = "coverage-7.13.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f1c23e24a7000da892a312fb17e33c5f94f8b001de44b7cf8ba2e36fbd15859e"},
{file = "coverage-7.13.0-cp313-cp313t-win32.whl", hash = "sha256:5f8a0297355e652001015e93be345ee54393e45dc3050af4a0475c5a2b767d46"},
{file = "coverage-7.13.0-cp313-cp313t-win_amd64.whl", hash = "sha256:6abb3a4c52f05e08460bd9acf04fec027f8718ecaa0d09c40ffbc3fbd70ecc39"},
{file = "coverage-7.13.0-cp313-cp313t-win_arm64.whl", hash = "sha256:3ad968d1e3aa6ce5be295ab5fe3ae1bf5bb4769d0f98a80a0252d543a2ef2e9e"},
{file = "coverage-7.13.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:453b7ec753cf5e4356e14fe858064e5520c460d3bbbcb9c35e55c0d21155c256"},
{file = "coverage-7.13.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:af827b7cbb303e1befa6c4f94fd2bf72f108089cfa0f8abab8f4ca553cf5ca5a"},
{file = "coverage-7.13.0-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:9987a9e4f8197a1000280f7cc089e3ea2c8b3c0a64d750537809879a7b4ceaf9"},
{file = "coverage-7.13.0-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:3188936845cd0cb114fa6a51842a304cdbac2958145d03be2377ec41eb285d19"},
{file = "coverage-7.13.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a2bdb3babb74079f021696cb46b8bb5f5661165c385d3a238712b031a12355be"},
{file = "coverage-7.13.0-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:7464663eaca6adba4175f6c19354feea61ebbdd735563a03d1e472c7072d27bb"},
{file = "coverage-7.13.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:8069e831f205d2ff1f3d355e82f511eb7c5522d7d413f5db5756b772ec8697f8"},
{file = "coverage-7.13.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:6fb2d5d272341565f08e962cce14cdf843a08ac43bd621783527adb06b089c4b"},
{file = "coverage-7.13.0-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:5e70f92ef89bac1ac8a99b3324923b4749f008fdbd7aa9cb35e01d7a284a04f9"},
{file = "coverage-7.13.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:4b5de7d4583e60d5fd246dd57fcd3a8aa23c6e118a8c72b38adf666ba8e7e927"},
{file = "coverage-7.13.0-cp314-cp314-win32.whl", hash = "sha256:a6c6e16b663be828a8f0b6c5027d36471d4a9f90d28444aa4ced4d48d7d6ae8f"},
{file = "coverage-7.13.0-cp314-cp314-win_amd64.whl", hash = "sha256:0900872f2fdb3ee5646b557918d02279dc3af3dfb39029ac4e945458b13f73bc"},
{file = "coverage-7.13.0-cp314-cp314-win_arm64.whl", hash = "sha256:3a10260e6a152e5f03f26db4a407c4c62d3830b9af9b7c0450b183615f05d43b"},
{file = "coverage-7.13.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:9097818b6cc1cfb5f174e3263eba4a62a17683bcfe5c4b5d07f4c97fa51fbf28"},
{file = "coverage-7.13.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:0018f73dfb4301a89292c73be6ba5f58722ff79f51593352759c1790ded1cabe"},
{file = "coverage-7.13.0-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:166ad2a22ee770f5656e1257703139d3533b4a0b6909af67c6b4a3adc1c98657"},
{file = "coverage-7.13.0-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:f6aaef16d65d1787280943f1c8718dc32e9cf141014e4634d64446702d26e0ff"},
{file = "coverage-7.13.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e999e2dcc094002d6e2c7bbc1fb85b58ba4f465a760a8014d97619330cdbbbf3"},
{file = "coverage-7.13.0-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:00c3d22cf6fb1cf3bf662aaaa4e563be8243a5ed2630339069799835a9cc7f9b"},
{file = "coverage-7.13.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:22ccfe8d9bb0d6134892cbe1262493a8c70d736b9df930f3f3afae0fe3ac924d"},
{file = "coverage-7.13.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:9372dff5ea15930fea0445eaf37bbbafbc771a49e70c0aeed8b4e2c2614cc00e"},
{file = "coverage-7.13.0-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:69ac2c492918c2461bc6ace42d0479638e60719f2a4ef3f0815fa2df88e9f940"},
{file = "coverage-7.13.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:739c6c051a7540608d097b8e13c76cfa85263ced467168dc6b477bae3df7d0e2"},
{file = "coverage-7.13.0-cp314-cp314t-win32.whl", hash = "sha256:fe81055d8c6c9de76d60c94ddea73c290b416e061d40d542b24a5871bad498b7"},
{file = "coverage-7.13.0-cp314-cp314t-win_amd64.whl", hash = "sha256:445badb539005283825959ac9fa4a28f712c214b65af3a2c464f1adc90f5fcbc"},
{file = "coverage-7.13.0-cp314-cp314t-win_arm64.whl", hash = "sha256:de7f6748b890708578fc4b7bb967d810aeb6fcc9bff4bb77dbca77dab2f9df6a"},
{file = "coverage-7.13.0-py3-none-any.whl", hash = "sha256:850d2998f380b1e266459ca5b47bc9e7daf9af1d070f66317972f382d46f1904"},
{file = "coverage-7.13.0.tar.gz", hash = "sha256:a394aa27f2d7ff9bc04cf703817773a59ad6dfbd577032e690f961d2460ee936"},
{file = "coverage-7.13.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e1fa280b3ad78eea5be86f94f461c04943d942697e0dac889fa18fff8f5f9147"},
{file = "coverage-7.13.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c3d8c679607220979434f494b139dfb00131ebf70bb406553d69c1ff01a5c33d"},
{file = "coverage-7.13.1-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:339dc63b3eba969067b00f41f15ad161bf2946613156fb131266d8debc8e44d0"},
{file = "coverage-7.13.1-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:db622b999ffe49cb891f2fff3b340cdc2f9797d01a0a202a0973ba2562501d90"},
{file = "coverage-7.13.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d1443ba9acbb593fa7c1c29e011d7c9761545fe35e7652e85ce7f51a16f7e08d"},
{file = "coverage-7.13.1-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c832ec92c4499ac463186af72f9ed4d8daec15499b16f0a879b0d1c8e5cf4a3b"},
{file = "coverage-7.13.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:562ec27dfa3f311e0db1ba243ec6e5f6ab96b1edfcfc6cf86f28038bc4961ce6"},
{file = "coverage-7.13.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:4de84e71173d4dada2897e5a0e1b7877e5eefbfe0d6a44edee6ce31d9b8ec09e"},
{file = "coverage-7.13.1-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:a5a68357f686f8c4d527a2dc04f52e669c2fc1cbde38f6f7eb6a0e58cbd17cae"},
{file = "coverage-7.13.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:77cc258aeb29a3417062758975521eae60af6f79e930d6993555eeac6a8eac29"},
{file = "coverage-7.13.1-cp310-cp310-win32.whl", hash = "sha256:bb4f8c3c9a9f34423dba193f241f617b08ffc63e27f67159f60ae6baf2dcfe0f"},
{file = "coverage-7.13.1-cp310-cp310-win_amd64.whl", hash = "sha256:c8e2706ceb622bc63bac98ebb10ef5da80ed70fbd8a7999a5076de3afaef0fb1"},
{file = "coverage-7.13.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1a55d509a1dc5a5b708b5dad3b5334e07a16ad4c2185e27b40e4dba796ab7f88"},
{file = "coverage-7.13.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4d010d080c4888371033baab27e47c9df7d6fb28d0b7b7adf85a4a49be9298b3"},
{file = "coverage-7.13.1-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d938b4a840fb1523b9dfbbb454f652967f18e197569c32266d4d13f37244c3d9"},
{file = "coverage-7.13.1-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:bf100a3288f9bb7f919b87eb84f87101e197535b9bd0e2c2b5b3179633324fee"},
{file = "coverage-7.13.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ef6688db9bf91ba111ae734ba6ef1a063304a881749726e0d3575f5c10a9facf"},
{file = "coverage-7.13.1-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:0b609fc9cdbd1f02e51f67f51e5aee60a841ef58a68d00d5ee2c0faf357481a3"},
{file = "coverage-7.13.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c43257717611ff5e9a1d79dce8e47566235ebda63328718d9b65dd640bc832ef"},
{file = "coverage-7.13.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e09fbecc007f7b6afdfb3b07ce5bd9f8494b6856dd4f577d26c66c391b829851"},
{file = "coverage-7.13.1-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:a03a4f3a19a189919c7055098790285cc5c5b0b3976f8d227aea39dbf9f8bfdb"},
{file = "coverage-7.13.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3820778ea1387c2b6a818caec01c63adc5b3750211af6447e8dcfb9b6f08dbba"},
{file = "coverage-7.13.1-cp311-cp311-win32.whl", hash = "sha256:ff10896fa55167371960c5908150b434b71c876dfab97b69478f22c8b445ea19"},
{file = "coverage-7.13.1-cp311-cp311-win_amd64.whl", hash = "sha256:a998cc0aeeea4c6d5622a3754da5a493055d2d95186bad877b0a34ea6e6dbe0a"},
{file = "coverage-7.13.1-cp311-cp311-win_arm64.whl", hash = "sha256:fea07c1a39a22614acb762e3fbbb4011f65eedafcb2948feeef641ac78b4ee5c"},
{file = "coverage-7.13.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6f34591000f06e62085b1865c9bc5f7858df748834662a51edadfd2c3bfe0dd3"},
{file = "coverage-7.13.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b67e47c5595b9224599016e333f5ec25392597a89d5744658f837d204e16c63e"},
{file = "coverage-7.13.1-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3e7b8bd70c48ffb28461ebe092c2345536fb18bbbf19d287c8913699735f505c"},
{file = "coverage-7.13.1-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:c223d078112e90dc0e5c4e35b98b9584164bea9fbbd221c0b21c5241f6d51b62"},
{file = "coverage-7.13.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:794f7c05af0763b1bbd1b9e6eff0e52ad068be3b12cd96c87de037b01390c968"},
{file = "coverage-7.13.1-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:0642eae483cc8c2902e4af7298bf886d605e80f26382124cddc3967c2a3df09e"},
{file = "coverage-7.13.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9f5e772ed5fef25b3de9f2008fe67b92d46831bd2bc5bdc5dd6bfd06b83b316f"},
{file = "coverage-7.13.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:45980ea19277dc0a579e432aef6a504fe098ef3a9032ead15e446eb0f1191aee"},
{file = "coverage-7.13.1-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:e4f18eca6028ffa62adbd185a8f1e1dd242f2e68164dba5c2b74a5204850b4cf"},
{file = "coverage-7.13.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f8dca5590fec7a89ed6826fce625595279e586ead52e9e958d3237821fbc750c"},
{file = "coverage-7.13.1-cp312-cp312-win32.whl", hash = "sha256:ff86d4e85188bba72cfb876df3e11fa243439882c55957184af44a35bd5880b7"},
{file = "coverage-7.13.1-cp312-cp312-win_amd64.whl", hash = "sha256:16cc1da46c04fb0fb128b4dc430b78fa2aba8a6c0c9f8eb391fd5103409a6ac6"},
{file = "coverage-7.13.1-cp312-cp312-win_arm64.whl", hash = "sha256:8d9bc218650022a768f3775dd7fdac1886437325d8d295d923ebcfef4892ad5c"},
{file = "coverage-7.13.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:cb237bfd0ef4d5eb6a19e29f9e528ac67ac3be932ea6b44fb6cc09b9f3ecff78"},
{file = "coverage-7.13.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1dcb645d7e34dcbcc96cd7c132b1fc55c39263ca62eb961c064eb3928997363b"},
{file = "coverage-7.13.1-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3d42df8201e00384736f0df9be2ced39324c3907607d17d50d50116c989d84cd"},
{file = "coverage-7.13.1-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:fa3edde1aa8807de1d05934982416cb3ec46d1d4d91e280bcce7cca01c507992"},
{file = "coverage-7.13.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9edd0e01a343766add6817bc448408858ba6b489039eaaa2018474e4001651a4"},
{file = "coverage-7.13.1-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:985b7836931d033570b94c94713c6dba5f9d3ff26045f72c3e5dbc5fe3361e5a"},
{file = "coverage-7.13.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ffed1e4980889765c84a5d1a566159e363b71d6b6fbaf0bebc9d3c30bc016766"},
{file = "coverage-7.13.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:8842af7f175078456b8b17f1b73a0d16a65dcbdc653ecefeb00a56b3c8c298c4"},
{file = "coverage-7.13.1-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:ccd7a6fca48ca9c131d9b0a2972a581e28b13416fc313fb98b6d24a03ce9a398"},
{file = "coverage-7.13.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0403f647055de2609be776965108447deb8e384fe4a553c119e3ff6bfbab4784"},
{file = "coverage-7.13.1-cp313-cp313-win32.whl", hash = "sha256:549d195116a1ba1e1ae2f5ca143f9777800f6636eab917d4f02b5310d6d73461"},
{file = "coverage-7.13.1-cp313-cp313-win_amd64.whl", hash = "sha256:5899d28b5276f536fcf840b18b61a9fce23cc3aec1d114c44c07fe94ebeaa500"},
{file = "coverage-7.13.1-cp313-cp313-win_arm64.whl", hash = "sha256:868a2fae76dfb06e87291bcbd4dcbcc778a8500510b618d50496e520bd94d9b9"},
{file = "coverage-7.13.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:67170979de0dacac3f3097d02b0ad188d8edcea44ccc44aaa0550af49150c7dc"},
{file = "coverage-7.13.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:f80e2bb21bfab56ed7405c2d79d34b5dc0bc96c2c1d2a067b643a09fb756c43a"},
{file = "coverage-7.13.1-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:f83351e0f7dcdb14d7326c3d8d8c4e915fa685cbfdc6281f9470d97a04e9dfe4"},
{file = "coverage-7.13.1-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:bb3f6562e89bad0110afbe64e485aac2462efdce6232cdec7862a095dc3412f6"},
{file = "coverage-7.13.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:77545b5dcda13b70f872c3b5974ac64c21d05e65b1590b441c8560115dc3a0d1"},
{file = "coverage-7.13.1-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a4d240d260a1aed814790bbe1f10a5ff31ce6c21bc78f0da4a1e8268d6c80dbd"},
{file = "coverage-7.13.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:d2287ac9360dec3837bfdad969963a5d073a09a85d898bd86bea82aa8876ef3c"},
{file = "coverage-7.13.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:0d2c11f3ea4db66b5cbded23b20185c35066892c67d80ec4be4bab257b9ad1e0"},
{file = "coverage-7.13.1-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:3fc6a169517ca0d7ca6846c3c5392ef2b9e38896f61d615cb75b9e7134d4ee1e"},
{file = "coverage-7.13.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:d10a2ed46386e850bb3de503a54f9fe8192e5917fcbb143bfef653a9355e9a53"},
{file = "coverage-7.13.1-cp313-cp313t-win32.whl", hash = "sha256:75a6f4aa904301dab8022397a22c0039edc1f51e90b83dbd4464b8a38dc87842"},
{file = "coverage-7.13.1-cp313-cp313t-win_amd64.whl", hash = "sha256:309ef5706e95e62578cda256b97f5e097916a2c26247c287bbe74794e7150df2"},
{file = "coverage-7.13.1-cp313-cp313t-win_arm64.whl", hash = "sha256:92f980729e79b5d16d221038dbf2e8f9a9136afa072f9d5d6ed4cb984b126a09"},
{file = "coverage-7.13.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:97ab3647280d458a1f9adb85244e81587505a43c0c7cff851f5116cd2814b894"},
{file = "coverage-7.13.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:8f572d989142e0908e6acf57ad1b9b86989ff057c006d13b76c146ec6a20216a"},
{file = "coverage-7.13.1-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d72140ccf8a147e94274024ff6fd8fb7811354cf7ef88b1f0a988ebaa5bc774f"},
{file = "coverage-7.13.1-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:d3c9f051b028810f5a87c88e5d6e9af3c0ff32ef62763bf15d29f740453ca909"},
{file = "coverage-7.13.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f398ba4df52d30b1763f62eed9de5620dcde96e6f491f4c62686736b155aa6e4"},
{file = "coverage-7.13.1-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:132718176cc723026d201e347f800cd1a9e4b62ccd3f82476950834dad501c75"},
{file = "coverage-7.13.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:9e549d642426e3579b3f4b92d0431543b012dcb6e825c91619d4e93b7363c3f9"},
{file = "coverage-7.13.1-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:90480b2134999301eea795b3a9dbf606c6fbab1b489150c501da84a959442465"},
{file = "coverage-7.13.1-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:e825dbb7f84dfa24663dd75835e7257f8882629fc11f03ecf77d84a75134b864"},
{file = "coverage-7.13.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:623dcc6d7a7ba450bbdbeedbaa0c42b329bdae16491af2282f12a7e809be7eb9"},
{file = "coverage-7.13.1-cp314-cp314-win32.whl", hash = "sha256:6e73ebb44dca5f708dc871fe0b90cf4cff1a13f9956f747cc87b535a840386f5"},
{file = "coverage-7.13.1-cp314-cp314-win_amd64.whl", hash = "sha256:be753b225d159feb397bd0bf91ae86f689bad0da09d3b301478cd39b878ab31a"},
{file = "coverage-7.13.1-cp314-cp314-win_arm64.whl", hash = "sha256:228b90f613b25ba0019361e4ab81520b343b622fc657daf7e501c4ed6a2366c0"},
{file = "coverage-7.13.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:60cfb538fe9ef86e5b2ab0ca8fc8d62524777f6c611dcaf76dc16fbe9b8e698a"},
{file = "coverage-7.13.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:57dfc8048c72ba48a8c45e188d811e5efd7e49b387effc8fb17e97936dde5bf6"},
{file = "coverage-7.13.1-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3f2f725aa3e909b3c5fdb8192490bdd8e1495e85906af74fe6e34a2a77ba0673"},
{file = "coverage-7.13.1-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:9ee68b21909686eeb21dfcba2c3b81fee70dcf38b140dcd5aa70680995fa3aa5"},
{file = "coverage-7.13.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:724b1b270cb13ea2e6503476e34541a0b1f62280bc997eab443f87790202033d"},
{file = "coverage-7.13.1-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:916abf1ac5cf7eb16bc540a5bf75c71c43a676f5c52fcb9fe75a2bd75fb944e8"},
{file = "coverage-7.13.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:776483fd35b58d8afe3acbd9988d5de592ab6da2d2a865edfdbc9fdb43e7c486"},
{file = "coverage-7.13.1-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:b6f3b96617e9852703f5b633ea01315ca45c77e879584f283c44127f0f1ec564"},
{file = "coverage-7.13.1-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:bd63e7b74661fed317212fab774e2a648bc4bb09b35f25474f8e3325d2945cd7"},
{file = "coverage-7.13.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:933082f161bbb3e9f90d00990dc956120f608cdbcaeea15c4d897f56ef4fe416"},
{file = "coverage-7.13.1-cp314-cp314t-win32.whl", hash = "sha256:18be793c4c87de2965e1c0f060f03d9e5aff66cfeae8e1dbe6e5b88056ec153f"},
{file = "coverage-7.13.1-cp314-cp314t-win_amd64.whl", hash = "sha256:0e42e0ec0cd3e0d851cb3c91f770c9301f48647cb2877cb78f74bdaa07639a79"},
{file = "coverage-7.13.1-cp314-cp314t-win_arm64.whl", hash = "sha256:eaecf47ef10c72ece9a2a92118257da87e460e113b83cc0d2905cbbe931792b4"},
{file = "coverage-7.13.1-py3-none-any.whl", hash = "sha256:2016745cb3ba554469d02819d78958b571792bb68e31302610e898f80dd3a573"},
{file = "coverage-7.13.1.tar.gz", hash = "sha256:b7593fe7eb5feaa3fbb461ac79aac9f9fc0387a5ca8080b0c6fe2ca27b091afd"},
]
[package.extras]
@@ -372,13 +372,13 @@ testing = ["hatch", "pre-commit", "pytest", "tox"]
[[package]]
name = "fastapi"
version = "0.127.0"
version = "0.127.1"
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
optional = false
python-versions = ">=3.9"
files = [
{file = "fastapi-0.127.0-py3-none-any.whl", hash = "sha256:725aa2bb904e2eff8031557cf4b9b77459bfedd63cae8427634744fd199f6a49"},
{file = "fastapi-0.127.0.tar.gz", hash = "sha256:5a9246e03dcd1fdb19f1396db30894867c1d630f5107dc167dcbc5ed1ea7d259"},
{file = "fastapi-0.127.1-py3-none-any.whl", hash = "sha256:31d670a4f9373cc6d7994420f98e4dc46ea693145207abc39696746c83a44430"},
{file = "fastapi-0.127.1.tar.gz", hash = "sha256:946a87ee5d931883b562b6bada787d6c8178becee2683cb3f9b980d593206359"},
]
[package.dependencies]
@@ -1218,4 +1218,4 @@ files = [
[metadata]
lock-version = "2.0"
python-versions = "==3.14.2"
content-hash = "7046b2edca4660e38f5f14ef0282854a4bb7892af5028c4af9e968f2c65590c5"
content-hash = "ec920fd78ea55c063bf2e4696c328056b50d8d1694f057c2d455ca2619938aac"

View File

@@ -1,20 +1,38 @@
[tool.poetry]
name = "alfred"
version = "0.1.6"
version = "0.1.7"
description = "AI agent for managing a local media library"
authors = ["Francwa <francois.hodiaumont@gmail.com>"]
readme = "README.md"
package-mode = false
[tool.alfred.settings]
image_name = "alfred_media_organizer"
librechat_version = "v0.8.1"
rag_version = "v0.7.0"
runner = "poetry"
service_name = "alfred"
[tool.alfred.security]
jwt_secret = "32:b64"
jwt_refresh_secret = "32:b64"
creds_key = "32:b64"
creds_iv = "16:b64"
meili_master_key = "32:b64"
mongo_password = "16:hex"
postgres_password = "16:hex"
qbittorrent_password = "16:hex"
[tool.poetry.dependencies]
python = "==3.14.2"
python-dotenv = "^1.0.0"
requests = "^2.32.5"
fastapi = "^0.127.0"
fastapi = "^0.127.1"
pydantic = "^2.12.4"
uvicorn = "^0.40.0"
pytest-xdist = "^3.8.0"
httpx = "^0.28.1"
pydantic-settings = "^2.12.0"
[tool.poetry.group.dev.dependencies]
pytest = "^8.0.0"
@@ -31,6 +49,8 @@ build-backend = "poetry.core.masonry.api"
[tool.pytest.ini_options]
# Chemins où pytest cherche les tests
testpaths = ["tests"]
# Ajouter le répertoire racine au PYTHONPATH pour les imports
pythonpath = ["."]
# Patterns de fichiers/classes/fonctions à considérer comme tests
python_files = ["test_*.py"] # Fichiers commençant par "test_"

245
scripts/bootstrap.py Normal file
View File

@@ -0,0 +1,245 @@
import re
import secrets
from pathlib import Path
import tomllib
def generate_secret(rule: str) -> str:
"""
Generates a cryptographically secure secret based on a spec string.
Example specs: '32:b64', '16:hex'.
"""
chunks: list[str] = rule.split(":")
size: int = int(chunks[0])
tech: str = chunks[1]
if tech == "b64":
return secrets.token_urlsafe(size)
elif tech == "hex":
return secrets.token_hex(size)
else:
raise ValueError(f"Invalid security format: {tech}")
def extract_python_version(version_string: str) -> tuple[str, str]:
"""
Extract Python version from poetry dependency string.
Examples:
"==3.14.2" -> ("3.14.2", "3.14")
"^3.14.2" -> ("3.14.2", "3.14")
"~3.14.2" -> ("3.14.2", "3.14")
"3.14.2" -> ("3.14.2", "3.14")
"""
# Remove poetry version operators (==, ^, ~, >=, etc.)
clean_version = re.sub(r"^[=^~><]+", "", version_string.strip())
# Extract version parts
parts = clean_version.split(".")
if len(parts) >= 2:
full_version = clean_version
short_version = f"{parts[0]}.{parts[1]}"
return full_version, short_version
else:
raise ValueError(f"Invalid Python version format: {version_string}")
# TODO: Refactor
def bootstrap(): # noqa: PLR0912, PLR0915
"""
Initializes the .env file by merging .env.example with generated secrets
and build variables from pyproject.toml.
Also generates .env.make for Makefile.
ALWAYS preserves existing secrets!
"""
base_dir = Path(__file__).resolve().parent.parent
env_path = base_dir / ".env"
example_path = base_dir / ".env.example"
if not example_path.exists():
print(f"{example_path.name} not found.")
return
toml_path = base_dir / "pyproject.toml"
if not toml_path.exists():
print(f"{toml_path.name} not found.")
return
# ALWAYS load existing .env if it exists
existing_env = {}
if env_path.exists():
print("🔄 Reading existing .env...")
with open(env_path) as f:
for line in f:
if "=" in line and not line.strip().startswith("#"):
key, value = line.split("=", 1)
existing_env[key.strip()] = value.strip()
print(f" Found {len(existing_env)} existing keys")
print("🔧 Updating .env file (keeping secrets)...")
else:
print("🔧 Initializing: Creating secure .env file...")
# Load data from pyproject.toml
with open(toml_path, "rb") as f:
data = tomllib.load(f)
security_keys = data["tool"]["alfred"]["security"]
settings_keys = data["tool"]["alfred"]["settings"]
dependencies = data["tool"]["poetry"]["dependencies"]
alfred_version = data["tool"]["poetry"]["version"]
# Normalize TOML keys to UPPER_CASE for .env format (done once)
security_keys_upper = {k.upper(): v for k, v in security_keys.items()}
settings_keys_upper = {k.upper(): v for k, v in settings_keys.items()}
# Extract Python version
python_version_full, python_version_short = extract_python_version(
dependencies["python"]
)
# Read .env.example
with open(example_path) as f:
example_lines = f.readlines()
new_lines = []
# Process each line from .env.example
for raw_line in example_lines:
line = raw_line.strip()
if line and not line.startswith("#") and "=" in line:
key, value = line.split("=", 1)
key = key.strip()
# Check if key exists in current .env (update mode)
if key in existing_env:
# Keep existing value for secrets
if key in security_keys_upper:
new_lines.append(f"{key}={existing_env[key]}\n")
print(f" ↻ Kept existing {key}")
# Update build vars from pyproject.toml
elif key in settings_keys_upper:
new_value = settings_keys_upper[key]
if existing_env[key] != new_value:
new_lines.append(f"{key}={new_value}\n")
print(f" ↻ Updated {key}: {existing_env[key]}{new_value}")
else:
new_lines.append(f"{key}={existing_env[key]}\n")
print(f" ↻ Kept {key}={existing_env[key]}")
# Update Python versions
elif key == "PYTHON_VERSION":
if existing_env[key] != python_version_full:
new_lines.append(f"{key}={python_version_full}\n")
print(
f" ↻ Updated Python: {existing_env[key]}{python_version_full}"
)
else:
new_lines.append(f"{key}={existing_env[key]}\n")
print(f" ↻ Kept Python: {existing_env[key]}")
elif key == "PYTHON_VERSION_SHORT":
if existing_env[key] != python_version_short:
new_lines.append(f"{key}={python_version_short}\n")
print(
f" ↻ Updated Python (short): {existing_env[key]}{python_version_short}"
)
else:
new_lines.append(f"{key}={existing_env[key]}\n")
print(f" ↻ Kept Python (short): {existing_env[key]}")
elif key == "ALFRED_VERSION":
if existing_env.get(key) != alfred_version:
new_lines.append(f"{key}={alfred_version}\n")
print(f" ↻ Updated Alfred version: {existing_env.get(key, 'N/A')}{alfred_version}")
else:
new_lines.append(f"{key}={alfred_version}\n")
print(f" ↻ Kept Alfred version: {alfred_version}")
# Keep other existing values
else:
new_lines.append(f"{key}={existing_env[key]}\n")
# Key doesn't exist, generate/add it
elif key in security_keys_upper:
rule = security_keys_upper[key]
secret = generate_secret(rule)
new_lines.append(f"{key}={secret}\n")
print(f" + Secret generated for {key} ({rule})")
elif key in settings_keys_upper:
value = settings_keys_upper[key]
new_lines.append(f"{key}={value}\n")
print(f" + Setting added: {key}={value}")
elif key == "PYTHON_VERSION":
new_lines.append(f"{key}={python_version_full}\n")
print(f" + Python version: {python_version_full}")
elif key == "PYTHON_VERSION_SHORT":
new_lines.append(f"{key}={python_version_short}\n")
print(f" + Python version (short): {python_version_short}")
elif key == "ALFRED_VERSION":
new_lines.append(f"{key}={alfred_version}\n")
print(f" + Alfred version: {alfred_version}")
else:
new_lines.append(raw_line)
else:
# Keep comments and empty lines
new_lines.append(raw_line)
# Compute database URIs from the generated values
final_env = {}
for line in new_lines:
if "=" in line and not line.strip().startswith("#"):
key, value = line.split("=", 1)
final_env[key.strip()] = value.strip()
# Compute MONGO_URI
if "MONGO_USER" in final_env and "MONGO_PASSWORD" in final_env:
mongo_uri = (
f"mongodb://{final_env.get('MONGO_USER', 'alfred')}:"
f"{final_env.get('MONGO_PASSWORD', '')}@"
f"{final_env.get('MONGO_HOST', 'mongodb')}:"
f"{final_env.get('MONGO_PORT', '27017')}/"
f"{final_env.get('MONGO_DB_NAME', 'alfred')}?authSource=admin"
)
# Update MONGO_URI in new_lines
for i, line in enumerate(new_lines):
if line.startswith("MONGO_URI="):
new_lines[i] = f"MONGO_URI={mongo_uri}\n"
print(" ✓ Computed MONGO_URI")
break
# Compute POSTGRES_URI
if "POSTGRES_USER" in final_env and "POSTGRES_PASSWORD" in final_env:
postgres_uri = (
f"postgresql://{final_env.get('POSTGRES_USER', 'alfred')}:"
f"{final_env.get('POSTGRES_PASSWORD', '')}@"
f"{final_env.get('POSTGRES_HOST', 'vectordb')}:"
f"{final_env.get('POSTGRES_PORT', '5432')}/"
f"{final_env.get('POSTGRES_DB_NAME', 'alfred')}"
)
# Update POSTGRES_URI in new_lines
for i, line in enumerate(new_lines):
if line.startswith("POSTGRES_URI="):
new_lines[i] = f"POSTGRES_URI={postgres_uri}\n"
print(" ✓ Computed POSTGRES_URI")
break
# Write .env file
with open(env_path, "w", encoding="utf-8") as f:
f.writelines(new_lines)
print(f"\n{env_path.name} generated successfully.")
# Generate .env.make for Makefile
env_make_path = base_dir / ".env.make"
with open(env_make_path, "w", encoding="utf-8") as f:
f.write("# Auto-generated from pyproject.toml by bootstrap.py\n")
f.write(f"export ALFRED_VERSION={alfred_version}\n")
f.write(f"export PYTHON_VERSION={python_version_full}\n")
f.write(f"export PYTHON_VERSION_SHORT={python_version_short}\n")
f.write(f"export RUNNER={settings_keys['runner']}\n")
f.write(f"export IMAGE_NAME={settings_keys['image_name']}\n")
f.write(f"export SERVICE_NAME={settings_keys['service_name']}\n")
f.write(f"export LIBRECHAT_VERSION={settings_keys['librechat_version']}\n")
f.write(f"export RAG_VERSION={settings_keys['rag_version']}\n")
print(f"{env_make_path.name} generated for Makefile.")
print("\n⚠️ Reminder: Please manually add your API keys to the .env file.")
if __name__ == "__main__":
bootstrap()

View File

@@ -1,20 +1,24 @@
"""Pytest configuration and shared fixtures."""
import sys
from pathlib import Path
#TODO: Moved directory, should not be necessary anymore but need to check !!
# TODO: Moved directory, should not be necessary anymore but need to check !!
# Ajouter le dossier parent (brain) au PYTHONPATH
# sys.path.insert(0, str(Path(__file__).parent.parent))
import shutil
import sys
import tempfile
from pathlib import Path
from unittest.mock import MagicMock, Mock
import pytest
from infrastructure.persistence import Memory, set_memory
from alfred.infrastructure.persistence import Memory, set_memory
from alfred.settings import settings
@pytest.fixture
def mock_settings():
"""Create a mock Settings instance for testing."""
return settings
@pytest.fixture
@@ -25,6 +29,16 @@ def temp_dir():
shutil.rmtree(dirpath)
@pytest.fixture(autouse=True)
def mock_memory_storage_dir(monkeypatch):
"""Override MEMORY_STORAGE_DIR for all tests to use a temp directory."""
test_dir = tempfile.mkdtemp()
monkeypatch.setenv("MEMORY_STORAGE_DIR", test_dir)
yield
# Cleanup
shutil.rmtree(test_dir, ignore_errors=True)
@pytest.fixture
def memory(temp_dir):
"""Create a fresh Memory instance for testing."""
@@ -255,7 +269,6 @@ def mock_deepseek():
def test_something(mock_deepseek):
# Your test code here
"""
import sys
from unittest.mock import Mock
# Save the original module if it exists

View File

@@ -2,31 +2,31 @@
from unittest.mock import Mock
from agent.agent import Agent
from infrastructure.persistence import get_memory
from alfred.agent.agent import Agent
from alfred.infrastructure.persistence import get_memory
class TestAgentInit:
"""Tests for Agent initialization."""
def test_init(self, memory, mock_llm):
def test_init(self, memory, mock_settings, mock_llm):
"""Should initialize agent with LLM."""
agent = Agent(llm=mock_llm)
agent = Agent(settings=mock_settings, llm=mock_llm, max_tool_iterations=10)
assert agent.llm is mock_llm
assert agent.tools is not None
assert agent.prompt_builder is not None
assert agent.max_tool_iterations == 5
assert agent.max_tool_iterations == 10
def test_init_custom_iterations(self, memory, mock_llm):
def test_init_custom_iterations(self, memory, mock_settings, mock_llm):
"""Should accept custom max iterations."""
agent = Agent(llm=mock_llm, max_tool_iterations=10)
agent = Agent(settings=mock_settings, llm=mock_llm, max_tool_iterations=10)
assert agent.max_tool_iterations == 10
def test_tools_registered(self, memory, mock_llm):
def test_tools_registered(self, memory, mock_settings, mock_llm):
"""Should register all tools."""
agent = Agent(llm=mock_llm)
agent = Agent(settings=mock_settings, llm=mock_llm)
expected_tools = [
"set_path_for_folder",
@@ -46,9 +46,9 @@ class TestAgentInit:
class TestExecuteToolCall:
"""Tests for _execute_tool_call method."""
def test_execute_known_tool(self, memory, mock_llm, real_folder):
def test_execute_known_tool(self, memory, mock_settings, mock_llm, real_folder):
"""Should execute known tool."""
agent = Agent(llm=mock_llm)
agent = Agent(settings=mock_settings, llm=mock_llm)
memory.ltm.set_config("download_folder", str(real_folder["downloads"]))
tool_call = {
@@ -62,9 +62,9 @@ class TestExecuteToolCall:
assert result["status"] == "ok"
def test_execute_unknown_tool(self, memory, mock_llm):
def test_execute_unknown_tool(self, memory, mock_settings, mock_llm):
"""Should return error for unknown tool."""
agent = Agent(llm=mock_llm)
agent = Agent(settings=mock_settings, llm=mock_llm)
tool_call = {
"id": "call_123",
@@ -75,9 +75,9 @@ class TestExecuteToolCall:
assert result["error"] == "unknown_tool"
assert "available_tools" in result
def test_execute_with_bad_args(self, memory, mock_llm):
def test_execute_with_bad_args(self, memory, mock_settings, mock_llm):
"""Should return error for bad arguments."""
agent = Agent(llm=mock_llm)
agent = Agent(settings=mock_settings, llm=mock_llm)
tool_call = {
"id": "call_123",
@@ -87,9 +87,9 @@ class TestExecuteToolCall:
assert result["error"] == "bad_args"
def test_execute_tracks_errors(self, memory, mock_llm):
def test_execute_tracks_errors(self, memory, mock_settings, mock_llm):
"""Should track errors in episodic memory."""
agent = Agent(llm=mock_llm)
agent = Agent(settings=mock_settings, llm=mock_llm)
# Use invalid arguments to trigger a TypeError
tool_call = {
@@ -104,9 +104,9 @@ class TestExecuteToolCall:
mem = get_memory()
assert len(mem.episodic.recent_errors) > 0
def test_execute_with_invalid_json(self, memory, mock_llm):
def test_execute_with_invalid_json(self, memory, mock_settings, mock_llm):
"""Should handle invalid JSON arguments."""
agent = Agent(llm=mock_llm)
agent = Agent(settings=mock_settings, llm=mock_llm)
tool_call = {
"id": "call_123",
@@ -120,17 +120,17 @@ class TestExecuteToolCall:
class TestStep:
"""Tests for step method."""
def test_step_text_response(self, memory, mock_llm):
def test_step_text_response(self, memory, mock_settings, mock_llm):
"""Should return text response when no tool call."""
agent = Agent(llm=mock_llm)
agent = Agent(settings=mock_settings, llm=mock_llm)
response = agent.step("Hello")
assert response == "I found what you're looking for!"
def test_step_saves_to_history(self, memory, mock_llm):
def test_step_saves_to_history(self, memory, mock_settings, mock_llm):
"""Should save conversation to STM history."""
agent = Agent(llm=mock_llm)
agent = Agent(settings=mock_settings, llm=mock_llm)
agent.step("Hi there")
@@ -141,11 +141,13 @@ class TestStep:
assert history[0]["content"] == "Hi there"
assert history[1]["role"] == "assistant"
def test_step_with_tool_call(self, memory, mock_llm_with_tool_call, real_folder):
def test_step_with_tool_call(
self, memory, mock_settings, mock_llm_with_tool_call, real_folder
):
"""Should execute tool and continue."""
memory.ltm.set_config("download_folder", str(real_folder["downloads"]))
agent = Agent(llm=mock_llm_with_tool_call)
agent = Agent(settings=mock_settings, llm=mock_llm_with_tool_call)
response = agent.step("List my downloads")
@@ -157,7 +159,7 @@ class TestStep:
assert first_call_args[1]["tools"] is not None, "Tools not passed to LLM!"
assert len(first_call_args[1]["tools"]) > 0, "Tools list is empty!"
def test_step_max_iterations(self, memory, mock_llm):
def test_step_max_iterations(self, memory, mock_settings, mock_llm):
"""Should stop after max iterations."""
call_count = [0]
@@ -185,15 +187,15 @@ class TestStep:
return {"role": "assistant", "content": "I couldn't complete the task."}
mock_llm.complete = Mock(side_effect=mock_complete)
agent = Agent(llm=mock_llm, max_tool_iterations=3)
agent = Agent(settings=mock_settings, llm=mock_llm, max_tool_iterations=3)
agent.step("Do something")
assert call_count[0] == 4
def test_step_includes_history(self, memory_with_history, mock_llm):
def test_step_includes_history(self, memory_with_history, mock_settings, mock_llm):
"""Should include conversation history in prompt."""
agent = Agent(llm=mock_llm)
agent = Agent(settings=mock_settings, llm=mock_llm)
agent.step("New message")
@@ -201,10 +203,10 @@ class TestStep:
messages_content = [m.get("content", "") for m in call_args]
assert any("Hello" in str(c) for c in messages_content)
def test_step_includes_events(self, memory, mock_llm):
def test_step_includes_events(self, memory, mock_settings, mock_llm):
"""Should include unread events in prompt."""
memory.episodic.add_background_event("download_complete", {"name": "Movie.mkv"})
agent = Agent(llm=mock_llm)
agent = Agent(settings=mock_settings, llm=mock_llm)
agent.step("What's new?")
@@ -212,9 +214,9 @@ class TestStep:
messages_content = [m.get("content", "") for m in call_args]
assert any("download" in str(c).lower() for c in messages_content)
def test_step_saves_ltm(self, memory, mock_llm, temp_dir):
def test_step_saves_ltm(self, memory, mock_settings, mock_llm, temp_dir):
"""Should save LTM after step."""
agent = Agent(llm=mock_llm)
agent = Agent(settings=mock_settings, llm=mock_llm)
agent.step("Hello")
@@ -225,7 +227,7 @@ class TestStep:
class TestAgentIntegration:
"""Integration tests for Agent."""
def test_multiple_tool_calls(self, memory, mock_llm, real_folder):
def test_multiple_tool_calls(self, memory, mock_settings, mock_llm, real_folder):
"""Should handle multiple tool calls in sequence."""
memory.ltm.set_config("download_folder", str(real_folder["downloads"]))
memory.ltm.set_config("movie_folder", str(real_folder["movies"]))
@@ -276,7 +278,7 @@ class TestAgentIntegration:
}
mock_llm.complete = Mock(side_effect=mock_complete)
agent = Agent(llm=mock_llm)
agent = Agent(settings=mock_settings, llm=mock_llm)
agent.step("List my downloads and movies")

View File

@@ -4,8 +4,9 @@ from unittest.mock import Mock
import pytest
from agent.agent import Agent
from infrastructure.persistence import get_memory
from alfred.agent.agent import Agent
from alfred.infrastructure.persistence import get_memory
from alfred.settings import settings
class TestExecuteToolCallEdgeCases:
@@ -13,10 +14,10 @@ class TestExecuteToolCallEdgeCases:
def test_tool_returns_none(self, memory, mock_llm):
"""Should handle tool returning None."""
agent = Agent(llm=mock_llm)
agent = Agent(settings=settings, llm=mock_llm)
# Mock a tool that returns None
from agent.registry import Tool
from alfred.agent.registry import Tool
agent.tools["test_tool"] = Tool(
name="test_tool", description="Test", func=lambda: None, parameters={}
@@ -32,9 +33,9 @@ class TestExecuteToolCallEdgeCases:
def test_tool_raises_keyboard_interrupt(self, memory, mock_llm):
"""Should propagate KeyboardInterrupt."""
agent = Agent(llm=mock_llm)
agent = Agent(settings=settings, llm=mock_llm)
from agent.registry import Tool
from alfred.agent.registry import Tool
def raise_interrupt():
raise KeyboardInterrupt()
@@ -53,7 +54,7 @@ class TestExecuteToolCallEdgeCases:
def test_tool_with_extra_args(self, memory, mock_llm, real_folder):
"""Should handle extra arguments gracefully."""
agent = Agent(llm=mock_llm)
agent = Agent(settings=settings, llm=mock_llm)
memory.ltm.set_config("download_folder", str(real_folder["downloads"]))
tool_call = {
@@ -70,7 +71,7 @@ class TestExecuteToolCallEdgeCases:
def test_tool_with_wrong_type_args(self, memory, mock_llm):
"""Should handle wrong argument types."""
agent = Agent(llm=mock_llm)
agent = Agent(settings=settings, llm=mock_llm)
tool_call = {
"id": "call_123",
@@ -90,7 +91,7 @@ class TestStepEdgeCases:
def test_step_with_empty_input(self, memory, mock_llm):
"""Should handle empty user input."""
agent = Agent(llm=mock_llm)
agent = Agent(settings=settings, llm=mock_llm)
response = agent.step("")
@@ -98,7 +99,7 @@ class TestStepEdgeCases:
def test_step_with_very_long_input(self, memory, mock_llm):
"""Should handle very long user input."""
agent = Agent(llm=mock_llm)
agent = Agent(settings=settings, llm=mock_llm)
long_input = "x" * 100000
response = agent.step(long_input)
@@ -112,7 +113,7 @@ class TestStepEdgeCases:
return {"role": "assistant", "content": "日本語の応答"}
mock_llm.complete = Mock(side_effect=mock_complete)
agent = Agent(llm=mock_llm)
agent = Agent(settings=settings, llm=mock_llm)
response = agent.step("日本語の質問")
@@ -125,7 +126,7 @@ class TestStepEdgeCases:
return {"role": "assistant", "content": ""}
mock_llm.complete = Mock(side_effect=mock_complete)
agent = Agent(llm=mock_llm)
agent = Agent(settings=settings, llm=mock_llm)
response = agent.step("Hello")
@@ -134,7 +135,7 @@ class TestStepEdgeCases:
def test_step_llm_raises_exception(self, memory, mock_llm):
"""Should propagate LLM exceptions."""
mock_llm.complete.side_effect = Exception("LLM Error")
agent = Agent(llm=mock_llm)
agent = Agent(settings=settings, llm=mock_llm)
with pytest.raises(Exception, match="LLM Error"):
agent.step("Hello")
@@ -162,7 +163,7 @@ class TestStepEdgeCases:
return {"role": "assistant", "content": "Done looping"}
mock_llm.complete = Mock(side_effect=mock_complete)
agent = Agent(llm=mock_llm, max_tool_iterations=3)
agent = Agent(settings=settings, llm=mock_llm, max_tool_iterations=3)
agent.step("Loop test")
@@ -170,7 +171,7 @@ class TestStepEdgeCases:
def test_step_preserves_history_order(self, memory, mock_llm):
"""Should preserve message order in history."""
agent = Agent(llm=mock_llm)
agent = Agent(settings=settings, llm=mock_llm)
agent.step("First")
agent.step("Second")
@@ -189,7 +190,7 @@ class TestStepEdgeCases:
[{"index": 1, "label": "Option 1"}],
{},
)
agent = Agent(llm=mock_llm)
agent = Agent(settings=settings, llm=mock_llm)
agent.step("Hello")
@@ -206,7 +207,7 @@ class TestStepEdgeCases:
"progress": 50,
}
)
agent = Agent(llm=mock_llm)
agent = Agent(settings=settings, llm=mock_llm)
agent.step("Hello")
@@ -217,7 +218,7 @@ class TestStepEdgeCases:
def test_step_clears_events_after_notification(self, memory, mock_llm):
"""Should mark events as read after notification."""
memory.episodic.add_background_event("test_event", {"data": "test"})
agent = Agent(llm=mock_llm)
agent = Agent(settings=settings, llm=mock_llm)
agent.step("Hello")
@@ -230,8 +231,8 @@ class TestAgentConcurrencyEdgeCases:
def test_multiple_agents_same_memory(self, memory, mock_llm):
"""Should handle multiple agents with same memory."""
agent1 = Agent(llm=mock_llm)
agent2 = Agent(llm=mock_llm)
agent1 = Agent(settings=settings, llm=mock_llm)
agent2 = Agent(settings=settings, llm=mock_llm)
agent1.step("From agent 1")
agent2.step("From agent 2")
@@ -266,7 +267,7 @@ class TestAgentConcurrencyEdgeCases:
return {"role": "assistant", "content": "Path set successfully."}
mock_llm.complete = Mock(side_effect=mock_complete)
agent = Agent(llm=mock_llm)
agent = Agent(settings=settings, llm=mock_llm)
agent.step("Set movie folder")
@@ -300,7 +301,7 @@ class TestAgentErrorRecovery:
return {"role": "assistant", "content": "The folder is not configured."}
mock_llm.complete = Mock(side_effect=mock_complete)
agent = Agent(llm=mock_llm)
agent = Agent(settings=settings, llm=mock_llm)
response = agent.step("List downloads")
@@ -329,7 +330,7 @@ class TestAgentErrorRecovery:
return {"role": "assistant", "content": "Error occurred."}
mock_llm.complete = Mock(side_effect=mock_complete)
agent = Agent(llm=mock_llm)
agent = Agent(settings=settings, llm=mock_llm)
agent.step("Set folder")
@@ -359,7 +360,7 @@ class TestAgentErrorRecovery:
return {"role": "assistant", "content": "All attempts failed."}
mock_llm.complete = Mock(side_effect=mock_complete)
agent = Agent(llm=mock_llm, max_tool_iterations=3)
agent = Agent(settings=settings, llm=mock_llm, max_tool_iterations=3)
agent.step("Try multiple times")

View File

@@ -10,7 +10,7 @@ class TestHealthEndpoint:
def test_health_check(self, memory):
"""Should return healthy status."""
from app import app
from alfred.app import app
client = TestClient(app)
@@ -25,7 +25,7 @@ class TestModelsEndpoint:
def test_list_models(self, memory):
"""Should return model list."""
from app import app
from alfred.app import app
client = TestClient(app)
@@ -43,7 +43,7 @@ class TestMemoryEndpoints:
def test_get_memory_state(self, memory):
"""Should return full memory state."""
from app import app
from alfred.app import app
client = TestClient(app)
@@ -57,7 +57,7 @@ class TestMemoryEndpoints:
def test_get_search_results_empty(self, memory):
"""Should return empty when no search results."""
from app import app
from alfred.app import app
client = TestClient(app)
@@ -69,7 +69,7 @@ class TestMemoryEndpoints:
def test_get_search_results_with_data(self, memory_with_search_results):
"""Should return search results when available."""
from app import app
from alfred.app import app
client = TestClient(app)
@@ -83,7 +83,7 @@ class TestMemoryEndpoints:
def test_clear_session(self, memory_with_search_results):
"""Should clear session memories."""
from app import app
from alfred.app import app
client = TestClient(app)
@@ -102,10 +102,10 @@ class TestChatCompletionsEndpoint:
def test_chat_completion_success(self, memory):
"""Should return chat completion."""
from app import app
from alfred.app import app
# Patch the agent's step method directly
with patch("app.agent.step", return_value="Hello! How can I help?"):
with patch("alfred.app.agent.step", return_value="Hello! How can I help?"):
client = TestClient(app)
response = client.post(
@@ -123,7 +123,7 @@ class TestChatCompletionsEndpoint:
def test_chat_completion_no_user_message(self, memory):
"""Should return error if no user message."""
from app import app
from alfred.app import app
client = TestClient(app)
@@ -146,7 +146,7 @@ class TestChatCompletionsEndpoint:
def test_chat_completion_empty_messages(self, memory):
"""Should return error for empty messages."""
from app import app
from alfred.app import app
client = TestClient(app)
@@ -162,7 +162,7 @@ class TestChatCompletionsEndpoint:
def test_chat_completion_invalid_json(self, memory):
"""Should return error for invalid JSON."""
from app import app
from alfred.app import app
client = TestClient(app)
@@ -176,9 +176,9 @@ class TestChatCompletionsEndpoint:
def test_chat_completion_streaming(self, memory):
"""Should support streaming mode."""
from app import app
from alfred.app import app
with patch("app.agent.step", return_value="Streaming response"):
with patch("alfred.app.agent.step", return_value="Streaming response"):
client = TestClient(app)
response = client.post(
@@ -195,9 +195,9 @@ class TestChatCompletionsEndpoint:
def test_chat_completion_extracts_last_user_message(self, memory):
"""Should use last user message."""
from app import app
from alfred.app import app
with patch("app.agent.step", return_value="Response") as mock_step:
with patch("alfred.app.agent.step", return_value="Response") as mock_step:
client = TestClient(app)
response = client.post(
@@ -218,9 +218,9 @@ class TestChatCompletionsEndpoint:
def test_chat_completion_response_format(self, memory):
"""Should return OpenAI-compatible format."""
from app import app
from alfred.app import app
with patch("app.agent.step", return_value="Test response"):
with patch("alfred.app.agent.step", return_value="Test response"):
client = TestClient(app)
response = client.post(

View File

@@ -2,15 +2,18 @@
from unittest.mock import Mock, patch
import pytest
from fastapi.testclient import TestClient
class TestChatCompletionsEdgeCases:
"""Edge case tests for /v1/chat/completions endpoint."""
@pytest.mark.skip(reason="502 - Local LLM not running yet")
def test_very_long_message(self, memory):
"""Should handle very long user message."""
from app import agent, app
from alfred.agent import agent
from alfred.app import app
# Patch the agent's LLM directly
mock_llm = Mock()
@@ -30,9 +33,11 @@ class TestChatCompletionsEdgeCases:
assert response.status_code == 200
@pytest.mark.skip(reason="502 - Local LLM not running yet")
def test_unicode_message(self, memory):
"""Should handle unicode in message."""
from app import agent, app
from alfred.agent import agent
from alfred.app import app
mock_llm = Mock()
mock_llm.complete.return_value = {
@@ -55,9 +60,11 @@ class TestChatCompletionsEdgeCases:
content = response.json()["choices"][0]["message"]["content"]
assert "日本語" in content or len(content) > 0
@pytest.mark.skip(reason="502 - Local LLM not running yet")
def test_special_characters_in_message(self, memory):
"""Should handle special characters."""
from app import agent, app
from alfred.agent import agent
from alfred.app import app
mock_llm = Mock()
mock_llm.complete.return_value = {"role": "assistant", "content": "Response"}
@@ -78,12 +85,12 @@ class TestChatCompletionsEdgeCases:
def test_empty_content_in_message(self, memory):
"""Should handle empty content in message."""
with patch("app.DeepSeekClient") as mock_llm_class:
with patch("alfred.app.DeepSeekClient") as mock_llm_class:
mock_llm = Mock()
mock_llm.complete.return_value = "Response"
mock_llm_class.return_value = mock_llm
from app import app
from alfred.app import app
client = TestClient(app)
@@ -100,11 +107,11 @@ class TestChatCompletionsEdgeCases:
def test_null_content_in_message(self, memory):
"""Should handle null content in message."""
with patch("app.DeepSeekClient") as mock_llm_class:
with patch("alfred.app.DeepSeekClient") as mock_llm_class:
mock_llm = Mock()
mock_llm_class.return_value = mock_llm
from app import app
from alfred.app import app
client = TestClient(app)
@@ -118,13 +125,14 @@ class TestChatCompletionsEdgeCases:
assert response.status_code == 422
@pytest.mark.skip(reason="502 - Local LLM not running yet")
def test_missing_content_field(self, memory):
"""Should handle missing content field."""
with patch("app.DeepSeekClient") as mock_llm_class:
with patch("alfred.app.DeepSeekClient") as mock_llm_class:
mock_llm = Mock()
mock_llm_class.return_value = mock_llm
from app import app
from alfred.app import app
client = TestClient(app)
@@ -141,11 +149,11 @@ class TestChatCompletionsEdgeCases:
def test_missing_role_field(self, memory):
"""Should handle missing role field."""
with patch("app.DeepSeekClient") as mock_llm_class:
with patch("alfred.app.DeepSeekClient") as mock_llm_class:
mock_llm = Mock()
mock_llm_class.return_value = mock_llm
from app import app
from alfred.app import app
client = TestClient(app)
@@ -162,12 +170,12 @@ class TestChatCompletionsEdgeCases:
def test_invalid_role(self, memory):
"""Should handle invalid role."""
with patch("app.DeepSeekClient") as mock_llm_class:
with patch("alfred.app.DeepSeekClient") as mock_llm_class:
mock_llm = Mock()
mock_llm.complete.return_value = "Response"
mock_llm_class.return_value = mock_llm
from app import app
from alfred.app import app
client = TestClient(app)
@@ -182,9 +190,11 @@ class TestChatCompletionsEdgeCases:
# Should reject or ignore invalid role
assert response.status_code in [200, 400, 422]
@pytest.mark.skip(reason="502 - Local LLM not running yet")
def test_many_messages(self, memory):
"""Should handle many messages in conversation."""
from app import agent, app
from alfred.agent import agent
from alfred.app import app
mock_llm = Mock()
mock_llm.complete.return_value = {"role": "assistant", "content": "Response"}
@@ -210,11 +220,11 @@ class TestChatCompletionsEdgeCases:
def test_only_system_messages(self, memory):
"""Should reject if only system messages."""
with patch("app.DeepSeekClient") as mock_llm_class:
with patch("alfred.app.DeepSeekClient") as mock_llm_class:
mock_llm = Mock()
mock_llm_class.return_value = mock_llm
from app import app
from alfred.app import app
client = TestClient(app)
@@ -233,11 +243,11 @@ class TestChatCompletionsEdgeCases:
def test_only_assistant_messages(self, memory):
"""Should reject if only assistant messages."""
with patch("app.DeepSeekClient") as mock_llm_class:
with patch("alfred.app.DeepSeekClient") as mock_llm_class:
mock_llm = Mock()
mock_llm_class.return_value = mock_llm
from app import app
from alfred.app import app
client = TestClient(app)
@@ -255,11 +265,11 @@ class TestChatCompletionsEdgeCases:
def test_messages_not_array(self, memory):
"""Should reject if messages is not array."""
with patch("app.DeepSeekClient") as mock_llm_class:
with patch("alfred.app.DeepSeekClient") as mock_llm_class:
mock_llm = Mock()
mock_llm_class.return_value = mock_llm
from app import app
from alfred.app import app
client = TestClient(app)
@@ -276,11 +286,11 @@ class TestChatCompletionsEdgeCases:
def test_message_not_object(self, memory):
"""Should handle message that is not object."""
with patch("app.DeepSeekClient") as mock_llm_class:
with patch("alfred.app.DeepSeekClient") as mock_llm_class:
mock_llm = Mock()
mock_llm_class.return_value = mock_llm
from app import app
from alfred.app import app
client = TestClient(app)
@@ -295,9 +305,11 @@ class TestChatCompletionsEdgeCases:
assert response.status_code == 422
# Pydantic validation error
@pytest.mark.skip(reason="502 - Local LLM not running yet")
def test_extra_fields_in_request(self, memory):
"""Should ignore extra fields in request."""
from app import agent, app
from alfred.agent import agent
from alfred.app import app
mock_llm = Mock()
mock_llm.complete.return_value = {"role": "assistant", "content": "Response"}
@@ -320,8 +332,9 @@ class TestChatCompletionsEdgeCases:
def test_streaming_with_tool_call(self, memory, real_folder):
"""Should handle streaming with tool execution."""
from app import agent, app
from infrastructure.persistence import get_memory
from alfred.agent import agent
from alfred.app import app
from alfred.infrastructure.persistence import get_memory
mem = get_memory()
mem.ltm.set_config("download_folder", str(real_folder["downloads"]))
@@ -363,9 +376,11 @@ class TestChatCompletionsEdgeCases:
assert response.status_code == 200
@pytest.mark.skip(reason="502 - Local LLM not running yet")
def test_concurrent_requests_simulation(self, memory):
"""Should handle rapid sequential requests."""
from app import agent, app
from alfred.agent import agent
from alfred.app import app
mock_llm = Mock()
mock_llm.complete.return_value = {"role": "assistant", "content": "Response"}
@@ -383,9 +398,11 @@ class TestChatCompletionsEdgeCases:
)
assert response.status_code == 200
@pytest.mark.skip(reason="502 - Local LLM not running yet")
def test_llm_returns_json_in_response(self, memory):
"""Should handle LLM returning JSON in text response."""
from app import agent, app
from alfred.agent import agent
from alfred.app import app
mock_llm = Mock()
mock_llm.complete.return_value = {
@@ -414,9 +431,9 @@ class TestMemoryEndpointsEdgeCases:
def test_memory_state_with_large_data(self, memory):
"""Should handle large memory state."""
with patch("app.DeepSeekClient") as mock_llm:
with patch("alfred.app.DeepSeekClient") as mock_llm:
mock_llm.return_value = Mock()
from app import app
from alfred.app import app
# Add lots of data to memory
for i in range(100):
@@ -432,9 +449,9 @@ class TestMemoryEndpointsEdgeCases:
def test_memory_state_with_unicode(self, memory):
"""Should handle unicode in memory state."""
with patch("app.DeepSeekClient") as mock_llm:
with patch("alfred.app.DeepSeekClient") as mock_llm:
mock_llm.return_value = Mock()
from app import app
from alfred.app import app
memory.ltm.set_config("japanese", "日本語テスト")
memory.stm.add_message("user", "🎬 Movie request")
@@ -448,9 +465,9 @@ class TestMemoryEndpointsEdgeCases:
def test_search_results_with_special_chars(self, memory):
"""Should handle special characters in search results."""
with patch("app.DeepSeekClient") as mock_llm:
with patch("alfred.app.DeepSeekClient") as mock_llm:
mock_llm.return_value = Mock()
from app import app
from alfred.app import app
memory.episodic.store_search_results(
"Test <script>alert('xss')</script>",
@@ -467,9 +484,9 @@ class TestMemoryEndpointsEdgeCases:
def test_clear_session_idempotent(self, memory):
"""Should be idempotent - multiple clears should work."""
with patch("app.DeepSeekClient") as mock_llm:
with patch("alfred.app.DeepSeekClient") as mock_llm:
mock_llm.return_value = Mock()
from app import app
from alfred.app import app
client = TestClient(app)
@@ -480,9 +497,9 @@ class TestMemoryEndpointsEdgeCases:
def test_clear_session_preserves_ltm(self, memory):
"""Should preserve LTM after clear."""
with patch("app.DeepSeekClient") as mock_llm:
with patch("alfred.app.DeepSeekClient") as mock_llm:
mock_llm.return_value = Mock()
from app import app
from alfred.app import app
memory.ltm.set_config("important", "data")
memory.stm.add_message("user", "Hello")
@@ -502,9 +519,9 @@ class TestHealthEndpointEdgeCases:
def test_health_returns_version(self, memory):
"""Should return version in health check."""
with patch("app.DeepSeekClient") as mock_llm:
with patch("alfred.app.DeepSeekClient") as mock_llm:
mock_llm.return_value = Mock()
from app import app
from alfred.app import app
client = TestClient(app)
@@ -515,9 +532,9 @@ class TestHealthEndpointEdgeCases:
def test_health_with_query_params(self, memory):
"""Should ignore query parameters."""
with patch("app.DeepSeekClient") as mock_llm:
with patch("alfred.app.DeepSeekClient") as mock_llm:
mock_llm.return_value = Mock()
from app import app
from alfred.app import app
client = TestClient(app)
@@ -531,9 +548,9 @@ class TestModelsEndpointEdgeCases:
def test_models_response_format(self, memory):
"""Should return OpenAI-compatible format."""
with patch("app.DeepSeekClient") as mock_llm:
with patch("alfred.app.DeepSeekClient") as mock_llm:
mock_llm.return_value = Mock()
from app import app
from alfred.app import app
client = TestClient(app)

View File

@@ -2,7 +2,7 @@
import pytest
from agent.config import ConfigurationError, Settings
from alfred.settings import ConfigurationError, Settings
class TestConfigValidation:
@@ -11,17 +11,17 @@ class TestConfigValidation:
def test_invalid_temperature_raises_error(self):
"""Verify invalid temperature is rejected."""
with pytest.raises(ConfigurationError, match="Temperature"):
Settings(temperature=3.0) # > 2.0
Settings(llm_temperature=3.0) # > 2.0
with pytest.raises(ConfigurationError, match="Temperature"):
Settings(temperature=-0.1) # < 0.0
Settings(llm_temperature=-0.1) # < 0.0
def test_valid_temperature_accepted(self):
"""Verify valid temperature is accepted."""
# Should not raise
Settings(temperature=0.0)
Settings(temperature=1.0)
Settings(temperature=2.0)
Settings(llm_temperature=0.0)
Settings(llm_temperature=1.0)
Settings(llm_temperature=2.0)
def test_invalid_max_iterations_raises_error(self):
"""Verify invalid max_iterations is rejected."""
@@ -126,7 +126,7 @@ class TestConfigDefaults:
"""Verify default temperature is reasonable."""
settings = Settings()
assert 0.0 <= settings.temperature <= 2.0
assert 0.0 <= settings.llm_temperature <= 2.0
def test_default_max_iterations(self):
"""Verify default max_iterations is reasonable."""
@@ -153,11 +153,11 @@ class TestConfigEnvironmentVariables:
def test_loads_temperature_from_env(self, monkeypatch):
"""Verify temperature is loaded from environment."""
monkeypatch.setenv("TEMPERATURE", "0.5")
monkeypatch.setenv("LLM_TEMPERATURE", "0.5")
settings = Settings()
assert settings.temperature == 0.5
assert settings.llm_temperature == 0.5
def test_loads_max_iterations_from_env(self, monkeypatch):
"""Verify max_iterations is loaded from environment."""
@@ -185,7 +185,7 @@ class TestConfigEnvironmentVariables:
def test_invalid_env_value_raises_error(self, monkeypatch):
"""Verify invalid environment value raises error."""
monkeypatch.setenv("TEMPERATURE", "invalid")
monkeypatch.setenv("LLM_TEMPERATURE", "invalid")
with pytest.raises(ValueError):
Settings()

View File

@@ -5,13 +5,13 @@ from unittest.mock import patch
import pytest
from agent.config import ConfigurationError, Settings
from agent.parameters import (
from alfred.agent.parameters import (
REQUIRED_PARAMETERS,
ParameterSchema,
format_parameters_for_prompt,
get_missing_required_parameters,
)
from alfred.settings import ConfigurationError, Settings
class TestSettingsEdgeCases:
@@ -22,31 +22,31 @@ class TestSettingsEdgeCases:
with patch.dict(os.environ, {}, clear=True):
settings = Settings()
assert settings.temperature == 0.2
assert settings.max_tool_iterations == 5
assert settings.llm_temperature == 0.2
assert settings.max_tool_iterations == 10
assert settings.request_timeout == 30
def test_temperature_boundary_low(self):
"""Should accept temperature at lower boundary."""
with patch.dict(os.environ, {"TEMPERATURE": "0.0"}, clear=True):
with patch.dict(os.environ, {"LLM_TEMPERATURE": "0.0"}, clear=True):
settings = Settings()
assert settings.temperature == 0.0
assert settings.llm_temperature == 0.0
def test_temperature_boundary_high(self):
"""Should accept temperature at upper boundary."""
with patch.dict(os.environ, {"TEMPERATURE": "2.0"}, clear=True):
with patch.dict(os.environ, {"LLM_TEMPERATURE": "2.0"}, clear=True):
settings = Settings()
assert settings.temperature == 2.0
assert settings.llm_temperature == 2.0
def test_temperature_below_boundary(self):
"""Should reject temperature below 0."""
with patch.dict(os.environ, {"TEMPERATURE": "-0.1"}, clear=True):
with patch.dict(os.environ, {"LLM_TEMPERATURE": "-0.1"}, clear=True):
with pytest.raises(ConfigurationError):
Settings()
def test_temperature_above_boundary(self):
"""Should reject temperature above 2."""
with patch.dict(os.environ, {"TEMPERATURE": "2.1"}, clear=True):
with patch.dict(os.environ, {"LLM_TEMPERATURE": "2.1"}, clear=True):
with pytest.raises(ConfigurationError):
Settings()
@@ -162,7 +162,7 @@ class TestSettingsEdgeCases:
def test_non_numeric_temperature(self):
"""Should handle non-numeric temperature."""
with patch.dict(os.environ, {"TEMPERATURE": "not-a-number"}, clear=True):
with patch.dict(os.environ, {"LLM_TEMPERATURE": "not-a-number"}, clear=True):
with pytest.raises((ConfigurationError, ValueError)):
Settings()

View File

@@ -4,14 +4,14 @@ from datetime import datetime
import pytest
from domain.movies.entities import Movie
from domain.movies.value_objects import MovieTitle, Quality, ReleaseYear
from domain.shared.exceptions import ValidationError
from domain.shared.value_objects import FilePath, FileSize, ImdbId
from domain.subtitles.entities import Subtitle
from domain.subtitles.value_objects import Language, SubtitleFormat, TimingOffset
from domain.tv_shows.entities import TVShow
from domain.tv_shows.value_objects import ShowStatus
from alfred.domain.movies.entities import Movie
from alfred.domain.movies.value_objects import MovieTitle, Quality, ReleaseYear
from alfred.domain.shared.exceptions import ValidationError
from alfred.domain.shared.value_objects import FilePath, FileSize, ImdbId
from alfred.domain.subtitles.entities import Subtitle
from alfred.domain.subtitles.value_objects import Language, SubtitleFormat, TimingOffset
from alfred.domain.tv_shows.entities import TVShow
from alfred.domain.tv_shows.value_objects import ShowStatus
class TestImdbIdEdgeCases:

View File

@@ -4,7 +4,7 @@ from datetime import datetime
import pytest
from infrastructure.persistence import (
from alfred.infrastructure.persistence import (
EpisodicMemory,
LongTermMemory,
Memory,
@@ -13,7 +13,7 @@ from infrastructure.persistence import (
has_memory,
init_memory,
)
from infrastructure.persistence.context import _memory_ctx
from alfred.infrastructure.persistence.context import _memory_ctx
def is_iso_format(s: str) -> bool:

View File

@@ -5,7 +5,7 @@ import os
import pytest
from infrastructure.persistence import (
from alfred.infrastructure.persistence import (
EpisodicMemory,
LongTermMemory,
Memory,
@@ -14,7 +14,7 @@ from infrastructure.persistence import (
init_memory,
set_memory,
)
from infrastructure.persistence.context import _memory_ctx
from alfred.infrastructure.persistence.context import _memory_ctx
class TestLongTermMemoryEdgeCases:

View File

@@ -1,7 +1,8 @@
"""Tests for PromptBuilder."""
from agent.prompts import PromptBuilder
from agent.registry import make_tools
from alfred.agent.prompts import PromptBuilder
from alfred.agent.registry import make_tools
from alfred.settings import settings
class TestPromptBuilder:
@@ -9,14 +10,14 @@ class TestPromptBuilder:
def test_init(self, memory):
"""Should initialize with tools."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
assert builder.tools is tools
def test_build_system_prompt(self, memory):
"""Should build a complete system prompt."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
@@ -27,7 +28,7 @@ class TestPromptBuilder:
def test_includes_tools(self, memory):
"""Should include all tool descriptions."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
@@ -38,7 +39,7 @@ class TestPromptBuilder:
def test_includes_config(self, memory):
"""Should include current configuration."""
memory.ltm.set_config("download_folder", "/path/to/downloads")
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
@@ -47,7 +48,7 @@ class TestPromptBuilder:
def test_includes_search_results(self, memory_with_search_results):
"""Should include search results summary."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
@@ -58,7 +59,7 @@ class TestPromptBuilder:
def test_includes_search_result_names(self, memory_with_search_results):
"""Should include search result names."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
@@ -74,7 +75,7 @@ class TestPromptBuilder:
"progress": 50,
}
)
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
@@ -89,7 +90,7 @@ class TestPromptBuilder:
[{"index": 1, "label": "Option 1"}, {"index": 2, "label": "Option 2"}],
{},
)
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
@@ -100,7 +101,7 @@ class TestPromptBuilder:
def test_includes_last_error(self, memory):
"""Should include last error."""
memory.episodic.add_error("find_torrent", "API timeout")
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
@@ -111,7 +112,7 @@ class TestPromptBuilder:
def test_includes_workflow(self, memory):
"""Should include current workflow."""
memory.stm.start_workflow("download", {"title": "Inception"})
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
@@ -122,7 +123,7 @@ class TestPromptBuilder:
def test_includes_topic(self, memory):
"""Should include current topic."""
memory.stm.set_topic("selecting_torrent")
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
@@ -134,7 +135,7 @@ class TestPromptBuilder:
"""Should include extracted entities."""
memory.stm.set_entity("movie_title", "Inception")
memory.stm.set_entity("year", 2010)
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
@@ -144,7 +145,7 @@ class TestPromptBuilder:
def test_includes_rules(self, memory):
"""Should include important rules."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
@@ -154,7 +155,7 @@ class TestPromptBuilder:
def test_includes_examples(self, memory):
"""Should include usage examples."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
@@ -164,7 +165,7 @@ class TestPromptBuilder:
def test_empty_context(self, memory):
"""Should handle empty context gracefully."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
@@ -179,7 +180,7 @@ class TestPromptBuilder:
results = [{"name": f"Torrent {i}", "seeders": i} for i in range(20)]
memory.episodic.store_search_results("test", results)
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
@@ -198,7 +199,7 @@ class TestFormatToolsDescription:
def test_format_all_tools(self, memory):
"""Should format all tools."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
desc = builder._format_tools_description()
@@ -209,7 +210,7 @@ class TestFormatToolsDescription:
def test_includes_parameters(self, memory):
"""Should include parameter schemas."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
desc = builder._format_tools_description()
@@ -223,7 +224,7 @@ class TestFormatEpisodicContext:
def test_empty_episodic(self, memory):
"""Should return empty string for empty episodic."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
context = builder._format_episodic_context(memory)
@@ -232,7 +233,7 @@ class TestFormatEpisodicContext:
def test_with_search_results(self, memory_with_search_results):
"""Should format search results."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
context = builder._format_episodic_context(memory_with_search_results)
@@ -246,7 +247,7 @@ class TestFormatEpisodicContext:
memory.episodic.add_active_download({"task_id": "1", "name": "Download"})
memory.episodic.add_error("action", "error")
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
context = builder._format_episodic_context(memory)
@@ -261,7 +262,7 @@ class TestFormatStmContext:
def test_empty_stm(self, memory):
"""Should return language info even for empty STM."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
context = builder._format_stm_context(memory)
@@ -273,7 +274,7 @@ class TestFormatStmContext:
"""Should format workflow."""
memory.stm.start_workflow("download", {"title": "Test"})
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
context = builder._format_stm_context(memory)
@@ -287,7 +288,7 @@ class TestFormatStmContext:
memory.stm.set_topic("searching")
memory.stm.set_entity("key", "value")
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
context = builder._format_stm_context(memory)

View File

@@ -1,7 +1,8 @@
"""Critical tests for prompt builder - Tests that would have caught bugs."""
from agent.prompts import PromptBuilder
from agent.registry import make_tools
from alfred.agent.prompts import PromptBuilder
from alfred.agent.registry import make_tools
from alfred.settings import settings
class TestPromptBuilderToolsInjection:
@@ -9,19 +10,19 @@ class TestPromptBuilderToolsInjection:
def test_system_prompt_includes_all_tools(self, memory):
"""CRITICAL: Verify all tools are mentioned in system prompt."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
# Verify each tool is mentioned
for tool_name in tools.keys():
assert (
tool_name in prompt
), f"Tool {tool_name} not mentioned in system prompt"
assert tool_name in prompt, (
f"Tool {tool_name} not mentioned in system prompt"
)
def test_tools_spec_contains_all_registered_tools(self, memory):
"""CRITICAL: Verify build_tools_spec() returns all tools."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
specs = builder.build_tools_spec()
@@ -32,7 +33,7 @@ class TestPromptBuilderToolsInjection:
def test_tools_spec_is_not_empty(self, memory):
"""CRITICAL: Verify tools spec is never empty."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
specs = builder.build_tools_spec()
@@ -40,7 +41,7 @@ class TestPromptBuilderToolsInjection:
def test_tools_spec_format_matches_openai(self, memory):
"""CRITICAL: Verify tools spec format is OpenAI-compatible."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
specs = builder.build_tools_spec()
@@ -58,7 +59,7 @@ class TestPromptBuilderMemoryContext:
def test_prompt_includes_current_topic(self, memory):
"""Verify current topic is included in prompt."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
memory.stm.set_topic("test_topic")
@@ -68,7 +69,7 @@ class TestPromptBuilderMemoryContext:
def test_prompt_includes_extracted_entities(self, memory):
"""Verify extracted entities are included in prompt."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
memory.stm.set_entity("test_key", "test_value")
@@ -78,7 +79,7 @@ class TestPromptBuilderMemoryContext:
def test_prompt_includes_search_results(self, memory_with_search_results):
"""Verify search results are included in prompt."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
@@ -88,7 +89,7 @@ class TestPromptBuilderMemoryContext:
def test_prompt_includes_active_downloads(self, memory):
"""Verify active downloads are included in prompt."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
memory.episodic.add_active_download(
@@ -102,7 +103,7 @@ class TestPromptBuilderMemoryContext:
def test_prompt_includes_recent_errors(self, memory):
"""Verify recent errors are included in prompt."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
memory.episodic.add_error("test_action", "test error message")
@@ -113,7 +114,7 @@ class TestPromptBuilderMemoryContext:
def test_prompt_includes_configuration(self, memory):
"""Verify configuration is included in prompt."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
memory.ltm.set_config("download_folder", "/test/downloads")
@@ -124,7 +125,7 @@ class TestPromptBuilderMemoryContext:
def test_prompt_includes_language(self, memory):
"""Verify language is included in prompt."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
memory.stm.set_language("fr")
@@ -139,7 +140,7 @@ class TestPromptBuilderStructure:
def test_system_prompt_is_not_empty(self, memory):
"""Verify system prompt is never empty."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
@@ -148,7 +149,7 @@ class TestPromptBuilderStructure:
def test_system_prompt_includes_base_instruction(self, memory):
"""Verify system prompt includes base instruction."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
@@ -156,7 +157,7 @@ class TestPromptBuilderStructure:
def test_system_prompt_includes_rules(self, memory):
"""Verify system prompt includes important rules."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
@@ -164,7 +165,7 @@ class TestPromptBuilderStructure:
def test_system_prompt_includes_examples(self, memory):
"""Verify system prompt includes examples."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
@@ -172,7 +173,7 @@ class TestPromptBuilderStructure:
def test_tools_description_format(self, memory):
"""Verify tools are properly formatted in description."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
description = builder._format_tools_description()
@@ -185,7 +186,7 @@ class TestPromptBuilderStructure:
def test_episodic_context_format(self, memory_with_search_results):
"""Verify episodic context is properly formatted."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
context = builder._format_episodic_context(memory_with_search_results)
@@ -195,7 +196,7 @@ class TestPromptBuilderStructure:
def test_stm_context_format(self, memory):
"""Verify STM context is properly formatted."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
memory.stm.set_topic("test_topic")
@@ -208,7 +209,7 @@ class TestPromptBuilderStructure:
def test_config_context_format(self, memory):
"""Verify config context is properly formatted."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
memory.ltm.set_config("test_key", "test_value")
@@ -224,7 +225,7 @@ class TestPromptBuilderEdgeCases:
def test_prompt_with_no_memory_context(self, memory):
"""Verify prompt works with empty memory."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
# Memory is empty
@@ -254,7 +255,7 @@ class TestPromptBuilderEdgeCases:
def test_prompt_with_unicode_in_memory(self, memory):
"""Verify prompt handles unicode in memory."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
memory.stm.set_entity("movie", "Amélie 🎬")
@@ -266,7 +267,7 @@ class TestPromptBuilderEdgeCases:
def test_prompt_with_long_search_results(self, memory):
"""Verify prompt handles many search results."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
# Add many results

View File

@@ -1,7 +1,8 @@
"""Edge case tests for PromptBuilder."""
from agent.prompts import PromptBuilder
from agent.registry import make_tools
from alfred.agent.prompts import PromptBuilder
from alfred.agent.registry import make_tools
from alfred.settings import settings
class TestPromptBuilderEdgeCases:
@@ -9,7 +10,7 @@ class TestPromptBuilderEdgeCases:
def test_prompt_with_empty_memory(self, memory):
"""Should build prompt with completely empty memory."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
@@ -22,7 +23,7 @@ class TestPromptBuilderEdgeCases:
memory.ltm.set_config("folder_日本語", "/path/to/日本語")
memory.ltm.set_config("emoji_folder", "/path/🎬")
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
@@ -35,7 +36,7 @@ class TestPromptBuilderEdgeCases:
long_path = "/very/long/path/" + "x" * 1000
memory.ltm.set_config("download_folder", long_path)
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
@@ -47,7 +48,7 @@ class TestPromptBuilderEdgeCases:
"""Should escape special characters in config."""
memory.ltm.set_config("path", '/path/with "quotes" and \\backslash')
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
@@ -60,7 +61,7 @@ class TestPromptBuilderEdgeCases:
results = [{"name": f"Torrent {i}", "seeders": i} for i in range(50)]
memory.episodic.store_search_results("test query", results)
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
@@ -79,7 +80,7 @@ class TestPromptBuilderEdgeCases:
]
memory.episodic.store_search_results("test", results)
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
@@ -98,7 +99,7 @@ class TestPromptBuilderEdgeCases:
}
)
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
@@ -112,7 +113,7 @@ class TestPromptBuilderEdgeCases:
for i in range(10):
memory.episodic.add_error(f"action_{i}", f"Error {i}")
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
@@ -125,7 +126,7 @@ class TestPromptBuilderEdgeCases:
options = [{"index": i, "label": f"Option {i}"} for i in range(20)]
memory.episodic.set_pending_question("Choose one:", options, {})
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
@@ -146,7 +147,7 @@ class TestPromptBuilderEdgeCases:
)
memory.stm.update_workflow_stage("searching_torrents")
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
@@ -160,7 +161,7 @@ class TestPromptBuilderEdgeCases:
for i in range(50):
memory.stm.set_entity(f"entity_{i}", f"value_{i}")
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
@@ -174,7 +175,7 @@ class TestPromptBuilderEdgeCases:
memory.stm.set_entity("zero", 0)
memory.stm.set_entity("false", False)
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
@@ -187,7 +188,7 @@ class TestPromptBuilderEdgeCases:
memory.episodic.add_background_event("download_complete", {"name": "Movie.mkv"})
memory.episodic.add_background_event("new_files", {"count": 5})
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
@@ -223,7 +224,7 @@ class TestPromptBuilderEdgeCases:
# Events
memory.episodic.add_background_event("event", {})
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
@@ -244,7 +245,7 @@ class TestPromptBuilderEdgeCases:
memory.ltm.set_config("key", {"nested": [1, 2, 3]})
memory.stm.set_entity("complex", {"a": {"b": {"c": "d"}}})
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
prompt = builder.build_system_prompt()
@@ -266,7 +267,7 @@ class TestFormatToolsDescriptionEdgeCases:
def test_format_with_complex_parameters(self, memory):
"""Should format complex parameter schemas."""
from agent.registry import Tool
from alfred.agent.registry import Tool
tools = {
"complex_tool": Tool(
@@ -306,7 +307,7 @@ class TestFormatEpisodicContextEdgeCases:
"""Should handle empty search query."""
memory.episodic.store_search_results("", [{"name": "Result"}])
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
context = builder._format_episodic_context(memory)
@@ -324,7 +325,7 @@ class TestFormatEpisodicContextEdgeCases:
],
)
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
context = builder._format_episodic_context(memory)
@@ -336,7 +337,7 @@ class TestFormatEpisodicContextEdgeCases:
"""Should handle download without progress."""
memory.episodic.add_active_download({"task_id": "1", "name": "Test"})
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
context = builder._format_episodic_context(memory)
@@ -355,7 +356,7 @@ class TestFormatStmContextEdgeCases:
"stage": "started",
}
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
context = builder._format_stm_context(memory)
@@ -366,7 +367,7 @@ class TestFormatStmContextEdgeCases:
"""Should handle workflow with None target."""
memory.stm.start_workflow("download", None)
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
try:
@@ -380,7 +381,7 @@ class TestFormatStmContextEdgeCases:
"""Should handle empty topic."""
memory.stm.set_topic("")
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
context = builder._format_stm_context(memory)
@@ -392,7 +393,7 @@ class TestFormatStmContextEdgeCases:
"""Should handle entities containing JSON strings."""
memory.stm.set_entity("json_string", '{"key": "value"}')
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
context = builder._format_stm_context(memory)

View File

@@ -4,8 +4,9 @@ import inspect
import pytest
from agent.prompts import PromptBuilder
from agent.registry import Tool, _create_tool_from_function, make_tools
from alfred.agent.prompts import PromptBuilder
from alfred.agent.registry import Tool, _create_tool_from_function, make_tools
from alfred.settings import settings
class TestToolSpecFormat:
@@ -13,7 +14,7 @@ class TestToolSpecFormat:
def test_tool_spec_format_is_openai_compatible(self):
"""CRITICAL: Verify tool specs are OpenAI-compatible."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
specs = builder.build_tools_spec()
@@ -23,9 +24,9 @@ class TestToolSpecFormat:
for spec in specs:
# OpenAI format requires these fields
assert (
spec["type"] == "function"
), f"Tool type must be 'function', got {spec.get('type')}"
assert spec["type"] == "function", (
f"Tool type must be 'function', got {spec.get('type')}"
)
assert "function" in spec, "Tool spec missing 'function' key"
func = spec["function"]
@@ -56,13 +57,13 @@ class TestToolSpecFormat:
# Verify required vs optional
assert "name" in tool.parameters["required"], "name should be required"
assert "age" in tool.parameters["required"], "age should be required"
assert (
"active" not in tool.parameters["required"]
), "active has default, should not be required"
assert "active" not in tool.parameters["required"], (
"active has default, should not be required"
)
def test_all_registered_tools_are_callable(self):
"""CRITICAL: Verify all registered tools are actually callable."""
tools = make_tools()
tools = make_tools(settings)
assert len(tools) > 0, "No tools registered"
@@ -78,7 +79,7 @@ class TestToolSpecFormat:
def test_tools_spec_contains_all_registered_tools(self):
"""CRITICAL: Verify build_tools_spec() returns all registered tools."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
specs = builder.build_tools_spec()
@@ -119,7 +120,7 @@ class TestToolSpecFormat:
def test_tool_parameters_have_descriptions(self):
"""Verify all tool parameters have descriptions."""
tools = make_tools()
tools = make_tools(settings)
builder = PromptBuilder(tools)
specs = builder.build_tools_spec()
@@ -128,9 +129,9 @@ class TestToolSpecFormat:
properties = params.get("properties", {})
for param_name, param_spec in properties.items():
assert (
"description" in param_spec
), f"Parameter {param_name} in {spec['function']['name']} missing description"
assert "description" in param_spec, (
f"Parameter {param_name} in {spec['function']['name']} missing description"
)
def test_required_parameters_are_marked_correctly(self):
"""Verify required parameters are correctly identified."""
@@ -150,28 +151,28 @@ class TestToolRegistry:
def test_make_tools_returns_dict(self):
"""Verify make_tools returns a dictionary."""
tools = make_tools()
tools = make_tools(settings)
assert isinstance(tools, dict)
assert len(tools) > 0
def test_all_tools_have_unique_names(self):
"""Verify all tool names are unique."""
tools = make_tools()
tools = make_tools(settings)
names = [tool.name for tool in tools.values()]
assert len(names) == len(set(names)), "Duplicate tool names found"
def test_tool_names_match_dict_keys(self):
"""Verify tool names match their dictionary keys."""
tools = make_tools()
tools = make_tools(settings)
for key, tool in tools.items():
assert key == tool.name, f"Key {key} doesn't match tool name {tool.name}"
def test_expected_tools_are_registered(self):
"""Verify all expected tools are registered."""
tools = make_tools()
tools = make_tools(settings)
expected_tools = [
"set_path_for_folder",
@@ -189,7 +190,7 @@ class TestToolRegistry:
def test_tool_functions_are_valid(self):
"""Verify all tool functions are properly structured."""
tools = make_tools()
tools = make_tools(settings)
# Verify structure without calling functions
# (calling would require full setup with memory, clients, etc.)

View File

@@ -2,7 +2,8 @@
import pytest
from agent.registry import Tool, make_tools
from alfred.agent.registry import Tool, make_tools
from alfred.settings import settings
class TestToolEdgeCases:
@@ -140,13 +141,13 @@ class TestMakeToolsEdgeCases:
def test_make_tools_returns_dict(self, memory):
"""Should return dictionary of tools."""
tools = make_tools()
tools = make_tools(settings)
assert isinstance(tools, dict)
def test_make_tools_all_tools_have_required_fields(self, memory):
"""Should have all required fields for each tool."""
tools = make_tools()
tools = make_tools(settings)
for name, tool in tools.items():
assert tool.name == name
@@ -157,14 +158,14 @@ class TestMakeToolsEdgeCases:
def test_make_tools_unique_names(self, memory):
"""Should have unique tool names."""
tools = make_tools()
tools = make_tools(settings)
names = list(tools.keys())
assert len(names) == len(set(names))
def test_make_tools_valid_parameter_schemas(self, memory):
"""Should have valid JSON Schema for parameters."""
tools = make_tools()
tools = make_tools(settings)
for tool in tools.values():
params = tool.parameters
@@ -176,33 +177,33 @@ class TestMakeToolsEdgeCases:
def test_make_tools_required_params_in_properties(self, memory):
"""Should have required params defined in properties."""
tools = make_tools()
tools = make_tools(settings)
for tool in tools.values():
params = tool.parameters
if "required" in params and "properties" in params:
for req in params["required"]:
assert (
req in params["properties"]
), f"Required param {req} not in properties for {tool.name}"
assert req in params["properties"], (
f"Required param {req} not in properties for {tool.name}"
)
def test_make_tools_descriptions_not_empty(self, memory):
"""Should have non-empty descriptions."""
tools = make_tools()
tools = make_tools(settings)
for tool in tools.values():
assert tool.description.strip() != ""
def test_make_tools_funcs_callable(self, memory):
"""Should have callable functions."""
tools = make_tools()
tools = make_tools(settings)
for tool in tools.values():
assert callable(tool.func)
def test_make_tools_expected_tools_present(self, memory):
"""Should have expected tools."""
tools = make_tools()
tools = make_tools(settings)
expected = [
"set_path_for_folder",
@@ -220,14 +221,14 @@ class TestMakeToolsEdgeCases:
def test_make_tools_idempotent(self, memory):
"""Should return same tools on multiple calls."""
tools1 = make_tools()
tools2 = make_tools()
tools1 = make_tools(settings)
tools2 = make_tools(settings)
assert set(tools1.keys()) == set(tools2.keys())
def test_make_tools_parameter_types(self, memory):
"""Should have valid parameter types."""
tools = make_tools()
tools = make_tools(settings)
valid_types = ["string", "integer", "number", "boolean", "array", "object"]
@@ -235,13 +236,13 @@ class TestMakeToolsEdgeCases:
if "properties" in tool.parameters:
for prop_name, prop_schema in tool.parameters["properties"].items():
if "type" in prop_schema:
assert (
prop_schema["type"] in valid_types
), f"Invalid type for {tool.name}.{prop_name}"
assert prop_schema["type"] in valid_types, (
f"Invalid type for {tool.name}.{prop_name}"
)
def test_make_tools_enum_values(self, memory):
"""Should have valid enum values."""
tools = make_tools()
tools = make_tools(settings)
for tool in tools.values():
if "properties" in tool.parameters:
@@ -256,7 +257,7 @@ class TestToolExecution:
def test_tool_returns_dict(self, memory, real_folder):
"""Should return dict from tool execution."""
tools = make_tools()
tools = make_tools(settings)
memory.ltm.set_config("download_folder", str(real_folder["downloads"]))
result = tools["list_folder"].func(folder_type="download")
@@ -265,7 +266,7 @@ class TestToolExecution:
def test_tool_returns_status(self, memory, real_folder):
"""Should return status in result."""
tools = make_tools()
tools = make_tools(settings)
memory.ltm.set_config("download_folder", str(real_folder["downloads"]))
result = tools["list_folder"].func(folder_type="download")
@@ -274,14 +275,14 @@ class TestToolExecution:
def test_tool_handles_missing_args(self, memory):
"""Should handle missing required arguments."""
tools = make_tools()
tools = make_tools(settings)
with pytest.raises(TypeError):
tools["set_path_for_folder"].func() # Missing required args
def test_tool_handles_wrong_type_args(self, memory):
"""Should handle wrong type arguments."""
tools = make_tools()
tools = make_tools(settings)
# Pass wrong type - should either work or raise
try:
@@ -293,7 +294,7 @@ class TestToolExecution:
def test_tool_handles_extra_args(self, memory, real_folder):
"""Should handle extra arguments."""
tools = make_tools()
tools = make_tools(settings)
memory.ltm.set_config("download_folder", str(real_folder["downloads"]))
# Extra args should raise TypeError

View File

@@ -1,13 +1,13 @@
"""Tests for JSON repositories."""
from domain.movies.entities import Movie
from domain.movies.value_objects import MovieTitle, Quality, ReleaseYear
from domain.shared.value_objects import FilePath, FileSize, ImdbId
from domain.subtitles.entities import Subtitle
from domain.subtitles.value_objects import Language, SubtitleFormat, TimingOffset
from domain.tv_shows.entities import TVShow
from domain.tv_shows.value_objects import ShowStatus
from infrastructure.persistence.json import (
from alfred.domain.movies.entities import Movie
from alfred.domain.movies.value_objects import MovieTitle, Quality, ReleaseYear
from alfred.domain.shared.value_objects import FilePath, FileSize, ImdbId
from alfred.domain.subtitles.entities import Subtitle
from alfred.domain.subtitles.value_objects import Language, SubtitleFormat, TimingOffset
from alfred.domain.tv_shows.entities import TVShow
from alfred.domain.tv_shows.value_objects import ShowStatus
from alfred.infrastructure.persistence.json import (
JsonMovieRepository,
JsonSubtitleRepository,
JsonTVShowRepository,
@@ -226,13 +226,13 @@ class TestJsonTVShowRepository:
[ShowStatus.ONGOING, ShowStatus.ENDED, ShowStatus.UNKNOWN]
):
show = TVShow(
imdb_id=ImdbId(f"tt{i+1000000:07d}"),
imdb_id=ImdbId(f"tt{i + 1000000:07d}"),
title=f"Show {status.value}",
seasons_count=1,
status=status,
)
repo.save(show)
loaded = repo.find_by_imdb_id(ImdbId(f"tt{i+1000000:07d}"))
loaded = repo.find_by_imdb_id(ImdbId(f"tt{i + 1000000:07d}"))
assert loaded.status == status

View File

@@ -2,14 +2,14 @@
from datetime import datetime
from domain.movies.entities import Movie
from domain.movies.value_objects import MovieTitle, Quality
from domain.shared.value_objects import FilePath, FileSize, ImdbId
from domain.subtitles.entities import Subtitle
from domain.subtitles.value_objects import Language, SubtitleFormat, TimingOffset
from domain.tv_shows.entities import TVShow
from domain.tv_shows.value_objects import ShowStatus
from infrastructure.persistence.json import (
from alfred.domain.movies.entities import Movie
from alfred.domain.movies.value_objects import MovieTitle, Quality
from alfred.domain.shared.value_objects import FilePath, FileSize, ImdbId
from alfred.domain.subtitles.entities import Subtitle
from alfred.domain.subtitles.value_objects import Language, SubtitleFormat, TimingOffset
from alfred.domain.tv_shows.entities import TVShow
from alfred.domain.tv_shows.value_objects import ShowStatus
from alfred.infrastructure.persistence.json import (
JsonMovieRepository,
JsonSubtitleRepository,
JsonTVShowRepository,

View File

@@ -2,8 +2,8 @@
from unittest.mock import Mock, patch
from agent.tools import api as api_tools
from infrastructure.persistence import get_memory
from alfred.agent.tools import api as api_tools
from alfred.infrastructure.persistence import get_memory
def create_mock_response(status_code, json_data=None, text=None):
@@ -21,7 +21,7 @@ def create_mock_response(status_code, json_data=None, text=None):
class TestFindMediaImdbId:
"""Tests for find_media_imdb_id tool."""
@patch("infrastructure.api.tmdb.client.requests.get")
@patch("alfred.infrastructure.api.tmdb.client.requests.get")
def test_success(self, mock_get, memory):
"""Should return movie info on success."""
@@ -56,7 +56,7 @@ class TestFindMediaImdbId:
# Verify HTTP calls
assert mock_get.call_count == 2
@patch("infrastructure.api.tmdb.client.requests.get")
@patch("alfred.infrastructure.api.tmdb.client.requests.get")
def test_stores_in_stm(self, mock_get, memory):
"""Should store result in STM on success."""
@@ -88,7 +88,7 @@ class TestFindMediaImdbId:
assert entity["title"] == "Inception"
assert mem.stm.current_topic == "searching_media"
@patch("infrastructure.api.tmdb.client.requests.get")
@patch("alfred.infrastructure.api.tmdb.client.requests.get")
def test_not_found(self, mock_get, memory):
"""Should return error when not found."""
mock_get.return_value = create_mock_response(200, json_data={"results": []})
@@ -98,7 +98,7 @@ class TestFindMediaImdbId:
assert result["status"] == "error"
assert result["error"] == "not_found"
@patch("infrastructure.api.tmdb.client.requests.get")
@patch("alfred.infrastructure.api.tmdb.client.requests.get")
def test_does_not_store_on_error(self, mock_get, memory):
"""Should not store in STM on error."""
mock_get.return_value = create_mock_response(200, json_data={"results": []})
@@ -112,7 +112,7 @@ class TestFindMediaImdbId:
class TestFindTorrent:
"""Tests for find_torrent tool."""
@patch("infrastructure.api.knaben.client.requests.post")
@patch("alfred.infrastructure.api.knaben.client.requests.post")
def test_success(self, mock_post, memory):
"""Should return torrents on success."""
mock_post.return_value = create_mock_response(
@@ -146,7 +146,7 @@ class TestFindTorrent:
payload = mock_post.call_args[1]["json"]
assert payload["query"] == "Inception 1080p"
@patch("infrastructure.api.knaben.client.requests.post")
@patch("alfred.infrastructure.api.knaben.client.requests.post")
def test_stores_in_episodic(self, mock_post, memory):
"""Should store results in episodic memory."""
mock_post.return_value = create_mock_response(
@@ -171,7 +171,7 @@ class TestFindTorrent:
assert mem.episodic.last_search_results["query"] == "Inception"
assert mem.stm.current_topic == "selecting_torrent"
@patch("infrastructure.api.knaben.client.requests.post")
@patch("alfred.infrastructure.api.knaben.client.requests.post")
def test_results_have_indexes(self, mock_post, memory):
"""Should add indexes to results."""
mock_post.return_value = create_mock_response(
@@ -211,7 +211,7 @@ class TestFindTorrent:
assert results[1]["index"] == 2
assert results[2]["index"] == 3
@patch("infrastructure.api.knaben.client.requests.post")
@patch("alfred.infrastructure.api.knaben.client.requests.post")
def test_not_found(self, mock_post, memory):
"""Should return error when no torrents found."""
mock_post.return_value = create_mock_response(200, json_data={"hits": []})
@@ -286,7 +286,7 @@ class TestAddTorrentToQbittorrent:
This is acceptable mocking because we're testing the TOOL logic, not the client.
"""
@patch("agent.tools.api.qbittorrent_client")
@patch("alfred.agent.tools.api.qbittorrent_client")
def test_success(self, mock_client, memory):
"""Should add torrent successfully and update memory."""
mock_client.add_torrent.return_value = True
@@ -298,7 +298,7 @@ class TestAddTorrentToQbittorrent:
# Verify client was called correctly
mock_client.add_torrent.assert_called_once_with("magnet:?xt=urn:btih:abc123")
@patch("agent.tools.api.qbittorrent_client")
@patch("alfred.agent.tools.api.qbittorrent_client")
def test_adds_to_active_downloads(self, mock_client, memory_with_search_results):
"""Should add to active downloads on success."""
mock_client.add_torrent.return_value = True
@@ -313,7 +313,7 @@ class TestAddTorrentToQbittorrent:
== "Inception.2010.1080p.BluRay.x264"
)
@patch("agent.tools.api.qbittorrent_client")
@patch("alfred.agent.tools.api.qbittorrent_client")
def test_sets_topic_and_ends_workflow(self, mock_client, memory):
"""Should set topic and end workflow."""
mock_client.add_torrent.return_value = True
@@ -326,10 +326,10 @@ class TestAddTorrentToQbittorrent:
assert mem.stm.current_topic == "downloading"
assert mem.stm.current_workflow is None
@patch("agent.tools.api.qbittorrent_client")
@patch("alfred.agent.tools.api.qbittorrent_client")
def test_error_handling(self, mock_client, memory):
"""Should handle client errors correctly."""
from infrastructure.api.qbittorrent.exceptions import QBittorrentAPIError
from alfred.infrastructure.api.qbittorrent.exceptions import QBittorrentAPIError
mock_client.add_torrent.side_effect = QBittorrentAPIError("Connection failed")
@@ -349,7 +349,7 @@ class TestAddTorrentByIndex:
- Error handling for edge cases
"""
@patch("agent.tools.api.qbittorrent_client")
@patch("alfred.agent.tools.api.qbittorrent_client")
def test_success(self, mock_client, memory_with_search_results):
"""Should get torrent by index and add it."""
mock_client.add_torrent.return_value = True
@@ -362,7 +362,7 @@ class TestAddTorrentByIndex:
# Verify correct magnet was extracted and used
mock_client.add_torrent.assert_called_once_with("magnet:?xt=urn:btih:abc123")
@patch("agent.tools.api.qbittorrent_client")
@patch("alfred.agent.tools.api.qbittorrent_client")
def test_uses_correct_magnet(self, mock_client, memory_with_search_results):
"""Should extract correct magnet from index."""
mock_client.add_torrent.return_value = True

View File

@@ -4,15 +4,15 @@ from unittest.mock import Mock, patch
import pytest
from agent.tools import api as api_tools
from agent.tools import filesystem as fs_tools
from infrastructure.persistence import get_memory
from alfred.agent.tools import api as api_tools
from alfred.agent.tools import filesystem as fs_tools
from alfred.infrastructure.persistence import get_memory
class TestFindTorrentEdgeCases:
"""Edge case tests for find_torrent."""
@patch("agent.tools.api.SearchTorrentsUseCase")
@patch("alfred.agent.tools.api.SearchTorrentsUseCase")
def test_empty_query(self, mock_use_case_class, memory):
"""Should handle empty query."""
mock_response = Mock()
@@ -28,7 +28,7 @@ class TestFindTorrentEdgeCases:
assert result["status"] == "error"
@patch("agent.tools.api.SearchTorrentsUseCase")
@patch("alfred.agent.tools.api.SearchTorrentsUseCase")
def test_very_long_query(self, mock_use_case_class, memory):
"""Should handle very long query."""
mock_response = Mock()
@@ -47,7 +47,7 @@ class TestFindTorrentEdgeCases:
# Should not crash
assert "status" in result
@patch("agent.tools.api.SearchTorrentsUseCase")
@patch("alfred.agent.tools.api.SearchTorrentsUseCase")
def test_special_characters_in_query(self, mock_use_case_class, memory):
"""Should handle special characters in query."""
mock_response = Mock()
@@ -65,7 +65,7 @@ class TestFindTorrentEdgeCases:
assert "status" in result
@patch("agent.tools.api.SearchTorrentsUseCase")
@patch("alfred.agent.tools.api.SearchTorrentsUseCase")
def test_unicode_query(self, mock_use_case_class, memory):
"""Should handle unicode in query."""
mock_response = Mock()
@@ -82,7 +82,7 @@ class TestFindTorrentEdgeCases:
assert "status" in result
@patch("agent.tools.api.SearchTorrentsUseCase")
@patch("alfred.agent.tools.api.SearchTorrentsUseCase")
def test_results_with_missing_fields(self, mock_use_case_class, memory):
"""Should handle results with missing fields."""
mock_response = Mock()
@@ -104,7 +104,7 @@ class TestFindTorrentEdgeCases:
mem = get_memory()
assert len(mem.episodic.last_search_results["results"]) == 2
@patch("agent.tools.api.SearchTorrentsUseCase")
@patch("alfred.agent.tools.api.SearchTorrentsUseCase")
def test_api_timeout(self, mock_use_case_class, memory):
"""Should handle API timeout."""
mock_use_case = Mock()
@@ -157,7 +157,7 @@ class TestGetTorrentByIndexEdgeCases:
class TestAddTorrentEdgeCases:
"""Edge case tests for add_torrent functions."""
@patch("agent.tools.api.AddTorrentUseCase")
@patch("alfred.agent.tools.api.AddTorrentUseCase")
def test_invalid_magnet_link(self, mock_use_case_class, memory):
"""Should handle invalid magnet link."""
mock_response = Mock()
@@ -173,7 +173,7 @@ class TestAddTorrentEdgeCases:
assert result["status"] == "error"
@patch("agent.tools.api.AddTorrentUseCase")
@patch("alfred.agent.tools.api.AddTorrentUseCase")
def test_empty_magnet_link(self, mock_use_case_class, memory):
"""Should handle empty magnet link."""
mock_response = Mock()
@@ -189,7 +189,7 @@ class TestAddTorrentEdgeCases:
assert result["status"] == "error"
@patch("agent.tools.api.AddTorrentUseCase")
@patch("alfred.agent.tools.api.AddTorrentUseCase")
def test_very_long_magnet_link(self, mock_use_case_class, memory):
"""Should handle very long magnet link."""
mock_response = Mock()
@@ -203,7 +203,7 @@ class TestAddTorrentEdgeCases:
assert "status" in result
@patch("agent.tools.api.AddTorrentUseCase")
@patch("alfred.agent.tools.api.AddTorrentUseCase")
def test_qbittorrent_connection_refused(self, mock_use_case_class, memory):
"""Should handle qBittorrent connection refused."""
mock_use_case = Mock()
@@ -391,7 +391,7 @@ class TestFilesystemEdgeCases:
class TestFindMediaImdbIdEdgeCases:
"""Edge case tests for find_media_imdb_id."""
@patch("agent.tools.api.SearchMovieUseCase")
@patch("alfred.agent.tools.api.SearchMovieUseCase")
def test_movie_with_same_name_different_years(self, mock_use_case_class, memory):
"""Should handle movies with same name."""
mock_response = Mock()
@@ -409,7 +409,7 @@ class TestFindMediaImdbIdEdgeCases:
assert result["status"] == "ok"
@patch("agent.tools.api.SearchMovieUseCase")
@patch("alfred.agent.tools.api.SearchMovieUseCase")
def test_movie_with_special_title(self, mock_use_case_class, memory):
"""Should handle movies with special characters in title."""
mock_response = Mock()
@@ -426,7 +426,7 @@ class TestFindMediaImdbIdEdgeCases:
assert result["status"] == "ok"
@patch("agent.tools.api.SearchMovieUseCase")
@patch("alfred.agent.tools.api.SearchMovieUseCase")
def test_tv_show_vs_movie(self, mock_use_case_class, memory):
"""Should distinguish TV shows from movies."""
mock_response = Mock()

View File

@@ -4,8 +4,8 @@ from pathlib import Path
import pytest
from agent.tools import filesystem as fs_tools
from infrastructure.persistence import get_memory
from alfred.agent.tools import filesystem as fs_tools
from alfred.infrastructure.persistence import get_memory
class TestSetPathForFolder: