10 Commits

51 changed files with 776 additions and 730 deletions

View File

@@ -41,10 +41,6 @@ docs/
*.md
!README.md
# Tests
tests/
pytest.ini
# Data (will be mounted as volumes)
memory_data/
logs/

View File

@@ -2,12 +2,15 @@
# Deepseek API Key (for LLM in alfred)
DEEPSEEK_API_KEY=
OLLAMA_BASE_URL=
OLLAMA_MODEL=
# Alfred Configuration
# LLM Provider (deepseek or ollama)
LLM_PROVIDER=deepseek
# Memory storage directory (inside container)
MEMORY_STORAGE_DIR=/data/memory
MEMORY_STORAGE_DIR=data/memory
# External Services (Optional)
# TMDB API Key (for movie metadata)
@@ -21,3 +24,9 @@ QBITTORRENT_PASSWORD=adminadmin
# Debug Options
DEBUG_LOGGING=false
DEBUG_CONSOLE=false
# Required security keys
JWT_SECRET=
JWT_REFRESH_SECRET=
CREDS_KEY=
CREDS_IV=

6
.gitignore vendored
View File

@@ -59,3 +59,9 @@ Thumbs.db
# Backup files
*.backup
# Application data dir
data/*
# Application logs
logs/*

View File

@@ -59,12 +59,8 @@ RUN --mount=type=cache,target=/root/.cache/pip \
uv pip install --system -e .[dev]; \
fi
COPY alfred/agent/ ./agent/
COPY alfred/application/ ./application/
COPY alfred/domain/ ./domain/
COPY alfred/infrastructure/ ./infrastructure/
COPY alfred/app.py .
COPY tests/ ./tests/
COPY alfred/ ./alfred
COPY tests/ ./tests
# ===========================================
# Stage 3: Runtime
@@ -96,18 +92,14 @@ RUN mkdir -p /data/memory /data/logs \
USER appuser
# Set working directory (owned by appuser)
WORKDIR /home/appuser/app
WORKDIR /home/appuser
# Copy Python packages from builder stage
COPY --from=builder /usr/local/lib/python${PYTHON_VERSION_SHORT}/site-packages /usr/local/lib/python${PYTHON_VERSION_SHORT}/site-packages
COPY --from=builder /usr/local/bin /usr/local/bin
# Copy application code (already owned by appuser)
COPY --chown=appuser:appuser alfred/agent/ ./agent/
COPY --chown=appuser:appuser alfred/application/ ./application/
COPY --chown=appuser:appuser alfred/domain/ ./domain/
COPY --chown=appuser:appuser alfred/infrastructure/ ./infrastructure/
COPY --chown=appuser:appuser alfred/app.py .
COPY --chown=appuser:appuser alfred/ ./alfred
# Create volumes for persistent data
VOLUME ["/data/memory", "/data/logs"]

404
Makefile
View File

@@ -1,248 +1,155 @@
.POSIX:
.SUFFIXES:
.DEFAULT_GOAL := help
# --- SETTINGS ---
CORE_DIR = alfred
IMAGE_NAME = alfred_media_organizer
# renovate: datasource=docker depName=python
PYTHON_VERSION = $(shell grep "python" $(CORE_DIR)/pyproject.toml | head -n 1 | sed -E 's/.*[=<>^~"]+ *([0-9]+\.[0-9]+(\.[0-9]+)?).*/\1/')
PYTHON_VERSION_SHORT = $(shell echo $(PYTHON_VERSION) | cut -d. -f1,2)
# Change to 'uv' when ready.
RUNNER ?= poetry
SERVICE_NAME = alfred
# --- Config ---
export IMAGE_NAME := alfred_media_organizer
export LIBRECHAT_VERSION := v0.8.1
export PYTHON_VERSION := 3.14.2
export PYTHON_VERSION_SHORT := 3.14
export RAG_VERSION := v0.7.0
export RUNNER := poetry
export SERVICE_NAME := alfred
export IMAGE_NAME
export PYTHON_VERSION
export PYTHON_VERSION_SHORT
export RUNNER
# --- ADAPTERS ---
# UV uses "sync", Poetry uses "install". Both install DEV deps by default.
INSTALL_CMD = $(if $(filter uv,$(RUNNER)),sync,install)
# --- MACROS ---
ARGS = $(filter-out $@,$(MAKECMDGOALS))
BUMP_CMD = cd $(CORE_DIR) && $(RUNNER) run bump-my-version bump
COMPOSE_CMD = docker-compose
DOCKER_CMD = docker build \
# --- Commands ---
CLI := python3 cli.py
DOCKER_COMPOSE := docker compose
DOCKER_BUILD := docker build \
--build-arg PYTHON_VERSION=$(PYTHON_VERSION) \
--build-arg PYTHON_VERSION_SHORT=$(PYTHON_VERSION_SHORT) \
--build-arg RUNNER=$(RUNNER) \
-f $(CORE_DIR)/Dockerfile \
-t $(IMAGE_NAME):latest .
--build-arg RUNNER=$(RUNNER)
RUNNER_ADD = cd $(CORE_DIR) && $(RUNNER) add
RUNNER_HOOKS = cd $(CORE_DIR) && $(RUNNER) run pre-commit install -c ../.pre-commit-config.yaml
RUNNER_INSTALL = cd $(CORE_DIR) && $(RUNNER) $(INSTALL_CMD)
RUNNER_RUN = cd $(CORE_DIR) && $(RUNNER) run
RUNNER_UPDATE = cd $(CORE_DIR) && $(RUNNER) update
# --- Phony ---
.PHONY: setup status check
.PHONY: up down restart logs ps shell
.PHONY: build build-test
.PHONY: install update install-hooks
.PHONY: test coverage lint format clean prune
.PHONY: major minor patch
.PHONY: help
# --- STYLES ---
B = \033[1m
G = \033[32m
T = \033[36m
R = \033[0m
# --- Setup ---
setup:
@echo "Initializing environment..."
@$(CLI) setup \
&& echo "✓ Environment ready" \
|| (echo "✗ Setup failed" && exit 1)
# --- TARGETS ---
.PHONY: add build build-test check-docker check-runner clean coverage down format help init-dotenv install install-hooks lint logs major minor patch prune ps python-version restart run shell test up update _check_branch _ci-dump-config _ci-run-tests _push_tag
status:
@$(CLI) status
# Catch-all for args
%:
@:
check:
@$(CLI) check
add: check-runner
@echo "$(T) Adding dependency ($(RUNNER)): $(ARGS)$(R)"
$(RUNNER_ADD) $(ARGS)
# --- Docker ---
up: check
@echo "Starting containers..."
@$(DOCKER_COMPOSE) up -d --remove-orphans \
&& echo "✓ Containers started" \
|| (echo "✗ Failed to start containers" && exit 1)
build: check-docker
@echo "$(T)🐳 Building Docker image...$(R)"
$(DOCKER_CMD)
@echo "✅ Image $(IMAGE_NAME):latest ready."
down:
@echo "Stopping containers..."
@$(DOCKER_COMPOSE) down \
&& echo "✓ Containers stopped" \
|| (echo "✗ Failed to stop containers" && exit 1)
build-test: check-docker
@echo "$(T)🐳 Building test image (with dev deps)...$(R)"
docker build \
--build-arg RUNNER=$(RUNNER) \
--build-arg PYTHON_VERSION=$(PYTHON_VERSION) \
--build-arg PYTHON_VERSION_SHORT=$(PYTHON_VERSION_SHORT) \
-f $(CORE_DIR)/Dockerfile \
--target test \
-t $(IMAGE_NAME):test .
@echo "✅ Test image $(IMAGE_NAME):test ready."
restart:
@echo "Restarting containers..."
@$(DOCKER_COMPOSE) restart \
&& echo "✓ Containers restarted" \
|| (echo "✗ Failed to restart containers" && exit 1)
check-docker:
@command -v docker >/dev/null 2>&1 || { echo "$(R)❌ Docker not installed$(R)"; exit 1; }
@docker info >/dev/null 2>&1 || { echo "$(R)❌ Docker daemon not running$(R)"; exit 1; }
logs:
@echo "Following logs (Ctrl+C to exit)..."
@$(DOCKER_COMPOSE) logs -f
check-runner:
@command -v $(RUNNER) >/dev/null 2>&1 || { echo "$(R)$(RUNNER) not installed$(R)"; exit 1; }
ps:
@echo "Container status:"
@$(DOCKER_COMPOSE) ps
shell:
@echo "Opening shell in $(SERVICE_NAME)..."
@$(DOCKER_COMPOSE) exec $(SERVICE_NAME) /bin/bash
# --- Build ---
build: check
@echo "Building image $(IMAGE_NAME):latest ..."
@$(DOCKER_BUILD) -t $(IMAGE_NAME):latest . \
&& echo "✓ Build complete" \
|| (echo "✗ Build failed" && exit 1)
build-test: check
@echo "Building test image $(IMAGE_NAME):test..."
@$(DOCKER_BUILD) --target test -t $(IMAGE_NAME):test . \
&& echo "✓ Test image built" \
|| (echo "✗ Build failed" && exit 1)
# --- Dependencies ---
install:
@echo "Installing dependencies with $(RUNNER)..."
@$(RUNNER) install \
&& echo "✓ Dependencies installed" \
|| (echo "✗ Installation failed" && exit 1)
update:
@echo "Updating dependencies with $(RUNNER)..."
@$(RUNNER) update \
&& echo "✓ Dependencies updated" \
|| (echo "✗ Update failed" && exit 1)
install-hooks:
@echo "Installing pre-commit hooks..."
@$(RUNNER) run pre-commit install \
&& echo "✓ Hooks installed" \
|| (echo "✗ Hook installation failed" && exit 1)
# --- Quality ---
test:
@echo "Running tests..."
@$(RUNNER) run pytest \
&& echo "✓ Tests passed" \
|| (echo "✗ Tests failed" && exit 1)
coverage:
@echo "Running tests with coverage..."
@$(RUNNER) run pytest --cov=. --cov-report=html --cov-report=term \
&& echo "✓ Coverage report generated" \
|| (echo "✗ Coverage failed" && exit 1)
lint:
@echo "Linting code..."
@$(RUNNER) run ruff check --fix . \
&& echo "✓ Linting complete" \
|| (echo "✗ Linting failed" && exit 1)
format:
@echo "Formatting code..."
@$(RUNNER) run ruff format . && $(RUNNER) run ruff check --fix . \
&& echo "✓ Code formatted" \
|| (echo "✗ Formatting failed" && exit 1)
clean:
@echo "$(T)🧹 Cleaning caches...$(R)"
cd $(CORE_DIR) && rm -rf .ruff_cache __pycache__ .pytest_cache
find $(CORE_DIR) -type d -name "__pycache__" -exec rm -rf {} + 2>/dev/null || true
find $(CORE_DIR) -type d -name ".pytest_cache" -exec rm -rf {} + 2>/dev/null || true
find $(CORE_DIR) -type f -name "*.pyc" -delete 2>/dev/null || true
@echo "✅ Caches cleaned."
@echo "Cleaning build artifacts..."
@rm -rf .ruff_cache __pycache__ .pytest_cache htmlcov .coverage
@find . -type d -name "__pycache__" -exec rm -rf {} + 2>/dev/null || true
@echo "✓ Cleanup complete"
coverage: check-runner
@echo "$(T)📊 Running tests with coverage...$(R)"
$(RUNNER_RUN) pytest --cov=. --cov-report=html --cov-report=term $(ARGS)
@echo "✅ Report generated in htmlcov/"
prune:
@echo "Pruning Docker system..."
@docker system prune -af \
&& echo "✓ Docker pruned" \
|| (echo "✗ Prune failed" && exit 1)
down: check-docker
@echo "$(T)🛑 Stopping containers...$(R)"
$(COMPOSE_CMD) down
@echo "✅ System stopped."
# --- Versioning ---
major minor patch: _check-main
@echo "Bumping $@ version..."
@$(RUNNER) run bump-my-version bump $@ \
&& echo "✓ Version bumped" \
|| (echo "✗ Version bump failed" && exit 1)
format: check-runner
@echo "$(T)✨ Formatting with Ruff...$(R)"
$(RUNNER_RUN) ruff format .
$(RUNNER_RUN) ruff check --fix .
@echo "✅ Code cleaned."
help:
@echo "$(B)Available commands:$(R)"
@echo ""
@echo "$(G)Setup:$(R)"
@echo " $(T)check-docker $(R) Verify Docker is installed and running."
@echo " $(T)check-runner $(R) Verify package manager ($(RUNNER))."
@echo " $(T)init-dotenv $(R) Create .env from .env.example with generated secrets."
@echo " $(T)install $(R) Install ALL dependencies (Prod + Dev)."
@echo " $(T)install-hooks $(R) Install git pre-commit hooks."
@echo ""
@echo "$(G)Docker:$(R)"
@echo " $(T)build $(R) Build the docker image (production)."
@echo " $(T)build-test $(R) Build the docker image (with dev deps for testing)."
@echo " $(T)down $(R) Stop and remove containers."
@echo " $(T)logs $(R) Follow logs."
@echo " $(T)prune $(R) Clean Docker system."
@echo " $(T)ps $(R) Show container status."
@echo " $(T)restart $(R) Restart all containers."
@echo " $(T)shell $(R) Open shell in container."
@echo " $(T)up $(R) Start the agent."
@echo ""
@echo "$(G)Development:$(R)"
@echo " $(T)add ... $(R) Add dependency (use --group dev or --dev if needed)."
@echo " $(T)clean $(R) Clean caches."
@echo " $(T)coverage $(R) Run tests with coverage."
@echo " $(T)format $(R) Format code (Ruff)."
@echo " $(T)lint $(R) Lint code without fixing."
@echo " $(T)test ... $(R) Run tests (local with $(RUNNER))."
@echo " $(T)update $(R) Update dependencies."
@echo ""
@echo "$(G)Versioning:$(R)"
@echo " $(T)major/minor/patch $(R) Bump version and push tag (triggers CI/CD)."
init-dotenv:
@echo "$(T)🔑 Initializing .env file...$(R)"
@if [ -f .env ]; then \
echo "$(R)⚠️ .env already exists. Skipping.$(R)"; \
exit 0; \
fi
@if [ ! -f .env.example ]; then \
echo "$(R)❌ .env.example not found$(R)"; \
exit 1; \
fi
@if ! command -v openssl >/dev/null 2>&1; then \
echo "$(R)❌ openssl not found. Please install it first.$(R)"; \
exit 1; \
fi
@echo "$(T) → Copying .env.example...$(R)"
@cp .env.example .env
@echo "$(T) → Generating secrets...$(R)"
@sed -i.bak "s|JWT_SECRET=.*|JWT_SECRET=$$(openssl rand -base64 32)|" .env
@sed -i.bak "s|JWT_REFRESH_SECRET=.*|JWT_REFRESH_SECRET=$$(openssl rand -base64 32)|" .env
@sed -i.bak "s|CREDS_KEY=.*|CREDS_KEY=$$(openssl rand -hex 16)|" .env
@sed -i.bak "s|CREDS_IV=.*|CREDS_IV=$$(openssl rand -hex 8)|" .env
@sed -i.bak "s|MEILI_MASTER_KEY=.*|MEILI_MASTER_KEY=$$(openssl rand -base64 32)|" .env
@rm -f .env.bak
@echo "$(G)✅ .env created with generated secrets!$(R)"
@echo "$(T)⚠️ Don't forget to add your API keys:$(R)"
@echo " - OPENAI_API_KEY"
@echo " - DEEPSEEK_API_KEY"
@echo " - TMDB_API_KEY (optional)"
install: check-runner
@echo "$(T)📦 Installing FULL environment ($(RUNNER))...$(R)"
$(RUNNER_INSTALL)
@echo "✅ Environment ready (Prod + Dev)."
install-hooks: check-runner
@echo "$(T)🔧 Installing hooks...$(R)"
$(RUNNER_HOOKS)
@echo "✅ Hooks ready."
lint: check-runner
@echo "$(T)🔍 Linting code...$(R)"
$(RUNNER_RUN) ruff check .
logs: check-docker
@echo "$(T)📋 Following logs...$(R)"
$(COMPOSE_CMD) logs -f
major: _check_branch
@echo "$(T)💥 Bumping major...$(R)"
SKIP=all $(BUMP_CMD) major
@$(MAKE) -s _push_tag
minor: _check_branch
@echo "$(T)✨ Bumping minor...$(R)"
SKIP=all $(BUMP_CMD) minor
@$(MAKE) -s _push_tag
patch: _check_branch
@echo "$(T)🚀 Bumping patch...$(R)"
SKIP=all $(BUMP_CMD) patch
@$(MAKE) -s _push_tag
prune: check-docker
@echo "$(T)🗑️ Pruning Docker resources...$(R)"
docker system prune -af
@echo "✅ Docker cleaned."
ps: check-docker
@echo "$(T)📋 Container status:$(R)"
@$(COMPOSE_CMD) ps
python-version:
@echo "🔍 Reading pyproject.toml..."
@echo "✅ Python version : $(PYTHON_VERSION)"
@echo " Sera utilisé pour : FROM python:$(PYTHON_VERSION)-slim"
restart: check-docker
@echo "$(T)🔄 Restarting containers...$(R)"
$(COMPOSE_CMD) restart
@echo "✅ Containers restarted."
run: check-runner
$(RUNNER_RUN) $(ARGS)
shell: check-docker
@echo "$(T)🐚 Opening shell in $(SERVICE_NAME)...$(R)"
$(COMPOSE_CMD) exec $(SERVICE_NAME) /bin/sh
test: check-runner
@echo "$(T)🧪 Running tests...$(R)"
$(RUNNER_RUN) pytest $(ARGS)
up: check-docker
@echo "$(T)🚀 Starting Agent Media...$(R)"
$(COMPOSE_CMD) up -d
@echo "✅ System is up."
update: check-runner
@echo "$(T)🔄 Updating dependencies...$(R)"
$(RUNNER_UPDATE)
@echo "✅ All packages up to date."
_check_branch:
@curr=$$(git rev-parse --abbrev-ref HEAD); \
if [ "$$curr" != "main" ]; then \
echo "❌ Error: not on the main branch"; exit 1; \
fi
@echo "Pushing tags..."
@git push --tags \
&& echo "✓ Tags pushed" \
|| (echo "✗ Push failed" && exit 1)
_ci-dump-config:
@echo "image_name=$(IMAGE_NAME)"
@@ -251,15 +158,46 @@ _ci-dump-config:
@echo "runner=$(RUNNER)"
@echo "service_name=$(SERVICE_NAME)"
_ci-run-tests: build-test
@echo "$(T)🧪 Running tests in Docker...$(R)"
_ci-run-tests:
@echo "Running tests in Docker..."
docker run --rm \
-e DEEPSEEK_API_KEY \
-e TMDB_API_KEY \
-e QBITTORRENT_URL \
$(IMAGE_NAME):test pytest
@echo " Tests passed."
@echo " Tests passed."
_push_tag:
@echo "$(T)📦 Pushing tag...$(R)"
git push --tags
@echo "✅ Tag pushed. Check CI for build status."
_check-main:
@test "$$(git rev-parse --abbrev-ref HEAD)" = "main" \
|| (echo "✗ ERROR: Not on main branch" && exit 1)
# --- Help ---
help:
@echo "Usage: make [target]"
@echo ""
@echo "Setup:"
@echo " setup Initialize .env"
@echo " status Show project status"
@echo ""
@echo "Docker:"
@echo " up Start containers"
@echo " down Stop containers"
@echo " restart Restart containers"
@echo " logs Follow logs"
@echo " ps Container status"
@echo " shell Shell into container"
@echo " build Build image"
@echo ""
@echo "Dev:"
@echo " install Install dependencies"
@echo " update Update dependencies"
@echo " test Run tests"
@echo " coverage Run tests with coverage"
@echo " lint Lint code"
@echo " format Format code"
@echo " clean Clean artifacts"
@echo ""
@echo "Release:"
@echo " patch Bump patch version"
@echo " minor Bump minor version"
@echo " major Bump major version"

View File

@@ -5,7 +5,7 @@ import logging
from collections.abc import AsyncGenerator
from typing import Any
from infrastructure.persistence import get_memory
from alfred.infrastructure.persistence import get_memory
from .config import settings
from .prompts import PromptBuilder

View File

@@ -3,7 +3,7 @@
import json
from typing import Any
from infrastructure.persistence import get_memory
from alfred.infrastructure.persistence import get_memory
from .registry import Tool

View File

@@ -3,12 +3,12 @@
import logging
from typing import Any
from application.movies import SearchMovieUseCase
from application.torrents import AddTorrentUseCase, SearchTorrentsUseCase
from infrastructure.api.knaben import knaben_client
from infrastructure.api.qbittorrent import qbittorrent_client
from infrastructure.api.tmdb import tmdb_client
from infrastructure.persistence import get_memory
from alfred.application.movies import SearchMovieUseCase
from alfred.application.torrents import AddTorrentUseCase, SearchTorrentsUseCase
from alfred.infrastructure.api.knaben import knaben_client
from alfred.infrastructure.api.qbittorrent import qbittorrent_client
from alfred.infrastructure.api.tmdb import tmdb_client
from alfred.infrastructure.persistence import get_memory
logger = logging.getLogger(__name__)

View File

@@ -2,8 +2,8 @@
from typing import Any
from application.filesystem import ListFolderUseCase, SetFolderPathUseCase
from infrastructure.filesystem import FileManager
from alfred.application.filesystem import ListFolderUseCase, SetFolderPathUseCase
from alfred.infrastructure.filesystem import FileManager
def set_path_for_folder(folder_name: str, path_value: str) -> dict[str, Any]:

View File

@@ -3,7 +3,7 @@
import logging
from typing import Any
from infrastructure.persistence import get_memory
from alfred.infrastructure.persistence import get_memory
logger = logging.getLogger(__name__)

View File

@@ -5,19 +5,18 @@ import logging
import os
import time
import uuid
from typing import Any
from fastapi import FastAPI, HTTPException
from fastapi.responses import JSONResponse, StreamingResponse
from fastapi.staticfiles import StaticFiles
from pydantic import BaseModel, Field, validator
from typing import Any
from agent.agent import Agent
from agent.config import settings
from agent.llm.deepseek import DeepSeekClient
from agent.llm.exceptions import LLMAPIError, LLMConfigurationError
from agent.llm.ollama import OllamaClient
from infrastructure.persistence import get_memory, init_memory
from alfred.agent.agent import Agent
from alfred.agent.config import settings
from alfred.agent.llm.deepseek import DeepSeekClient
from alfred.agent.llm.exceptions import LLMAPIError, LLMConfigurationError
from alfred.agent.llm.ollama import OllamaClient
from alfred.infrastructure.persistence import get_memory, init_memory
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"

View File

@@ -2,7 +2,7 @@
import logging
from infrastructure.filesystem import FileManager
from alfred.infrastructure.filesystem import FileManager
from .dto import ListFolderResponse

View File

@@ -2,7 +2,7 @@
import logging
from infrastructure.filesystem import FileManager
from alfred.infrastructure.filesystem import FileManager
from .dto import SetFolderPathResponse

View File

@@ -2,7 +2,7 @@
import logging
from infrastructure.api.tmdb import (
from alfred.infrastructure.api.tmdb import (
TMDBAPIError,
TMDBClient,
TMDBConfigurationError,

View File

@@ -2,7 +2,7 @@
import logging
from infrastructure.api.qbittorrent import (
from alfred.infrastructure.api.qbittorrent import (
QBittorrentAPIError,
QBittorrentAuthError,
QBittorrentClient,

View File

@@ -2,7 +2,7 @@
import logging
from infrastructure.api.knaben import KnabenAPIError, KnabenClient, KnabenNotFoundError
from alfred.infrastructure.api.knaben import KnabenAPIError, KnabenClient, KnabenNotFoundError
from .dto import SearchTorrentsResponse

View File

@@ -1,13 +1,12 @@
"""Knaben torrent search API client."""
import logging
from typing import Any
import logging
import requests
from requests.exceptions import HTTPError, RequestException, Timeout
from typing import Any
from agent.config import Settings, settings
from alfred.agent.config import Settings, settings
from .dto import TorrentResult
from .exceptions import KnabenAPIError, KnabenNotFoundError

View File

@@ -1,13 +1,11 @@
"""qBittorrent Web API client."""
import logging
from typing import Any
import requests
from requests.exceptions import HTTPError, RequestException, Timeout
from typing import Any
from agent.config import Settings, settings
from alfred.agent.config import Settings, settings
from .dto import TorrentInfo
from .exceptions import QBittorrentAPIError, QBittorrentAuthError

View File

@@ -1,13 +1,12 @@
"""TMDB (The Movie Database) API client."""
import logging
from typing import Any
import requests
from requests.exceptions import HTTPError, RequestException, Timeout
from typing import Any
from agent.config import Settings, settings
from alfred.agent.config import Settings, settings
from .dto import MediaResult
from .exceptions import (
TMDBAPIError,

View File

@@ -7,7 +7,7 @@ from enum import Enum
from pathlib import Path
from typing import Any
from infrastructure.persistence import get_memory
from alfred.infrastructure.persistence import get_memory
from .exceptions import PathTraversalError

View File

@@ -3,9 +3,9 @@
import logging
from pathlib import Path
from domain.movies.entities import Movie
from domain.tv_shows.entities import Episode, Season, TVShow
from domain.tv_shows.value_objects import SeasonNumber
from alfred.domain.movies.entities import Movie
from alfred.domain.tv_shows.entities import Episode, Season, TVShow
from alfred.domain.tv_shows.value_objects import SeasonNumber
logger = logging.getLogger(__name__)

View File

@@ -6,7 +6,7 @@ without passing it explicitly through all function calls.
Usage:
# At application startup
from infrastructure.persistence import init_memory, get_memory
from alfred.infrastructure.persistence import init_memory, get_memory
init_memory("memory_data")

View File

@@ -4,11 +4,11 @@ import logging
from datetime import datetime
from typing import Any
from domain.movies.entities import Movie
from domain.movies.repositories import MovieRepository
from domain.movies.value_objects import MovieTitle, Quality, ReleaseYear
from domain.shared.value_objects import FilePath, FileSize, ImdbId
from infrastructure.persistence import get_memory
from alfred.domain.movies.entities import Movie
from alfred.domain.movies.repositories import MovieRepository
from alfred.domain.movies.value_objects import MovieTitle, Quality, ReleaseYear
from alfred.domain.shared.value_objects import FilePath, FileSize, ImdbId
from alfred.infrastructure.persistence import get_memory
logger = logging.getLogger(__name__)

View File

@@ -3,11 +3,11 @@
import logging
from typing import Any
from domain.shared.value_objects import FilePath, ImdbId
from domain.subtitles.entities import Subtitle
from domain.subtitles.repositories import SubtitleRepository
from domain.subtitles.value_objects import Language, SubtitleFormat, TimingOffset
from infrastructure.persistence import get_memory
from alfred.domain.shared.value_objects import FilePath, ImdbId
from alfred.domain.subtitles.entities import Subtitle
from alfred.domain.subtitles.repositories import SubtitleRepository
from alfred.domain.subtitles.value_objects import Language, SubtitleFormat, TimingOffset
from alfred.infrastructure.persistence import get_memory
logger = logging.getLogger(__name__)

View File

@@ -4,11 +4,11 @@ import logging
from datetime import datetime
from typing import Any
from domain.shared.value_objects import ImdbId
from domain.tv_shows.entities import TVShow
from domain.tv_shows.repositories import TVShowRepository
from domain.tv_shows.value_objects import ShowStatus
from infrastructure.persistence import get_memory
from alfred.domain.shared.value_objects import ImdbId
from alfred.domain.tv_shows.entities import TVShow
from alfred.domain.tv_shows.repositories import TVShowRepository
from alfred.domain.tv_shows.value_objects import ShowStatus
from alfred.infrastructure.persistence import get_memory
logger = logging.getLogger(__name__)

232
cli.py Normal file
View File

@@ -0,0 +1,232 @@
#!/usr/bin/env python3
import os
import secrets
import shutil
import subprocess
import sys
from datetime import datetime
from enum import StrEnum
from pathlib import Path
from typing import NoReturn
REQUIRED_VARS = ["DEEPSEEK_API_KEY", "TMDB_API_KEY", "QBITTORRENT_URL"]
# Size in bytes
KEYS_TO_GENERATE = {
"JWT_SECRET": 32,
"JWT_REFRESH_SECRET": 32,
"CREDS_KEY": 32,
"CREDS_IV": 16,
}
class Style(StrEnum):
"""ANSI codes for styling output.
Usage: f"{Style.RED}Error{Style.RESET}"
"""
RESET = "\033[0m"
BOLD = "\033[1m"
RED = "\033[31m"
GREEN = "\033[32m"
YELLOW = "\033[33m"
CYAN = "\033[36m"
DIM = "\033[2m"
# Only for terminals and if not specified otherwise
USE_COLORS = sys.stdout.isatty() and "NO_COLOR" not in os.environ
def styled(text: str, color_code: str) -> str:
"""Apply color only if supported by the terminal."""
if USE_COLORS:
return f"{color_code}{text}{Style.RESET}"
return text
def log(msg: str, color: str | None = None, prefix="") -> None:
"""Print a formatted message."""
formatted_msg = styled(msg, color) if color else msg
print(f"{prefix}{formatted_msg}")
def error_exit(msg: str) -> NoReturn:
"""Print an error message in red and exit."""
log(f"{msg}", Style.RED)
sys.exit(1)
def is_docker_running() -> bool:
""" "Check if Docker is available and responsive."""
if shutil.which("docker") is None:
error_exit("Docker is not installed.")
result = subprocess.run(
["docker", "info"],
# Redirect stdout/stderr to keep output clean on success
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
# Prevent exception being raised
check=False,
)
return result.returncode == 0
def parse_env(content: str) -> dict[str, str]:
"""Parses existing keys and values into a dict (ignoring comments)."""
env_vars = {}
for raw_line in content.splitlines():
line = raw_line.strip()
if line and not line.startswith("#") and "=" in line:
key, value = line.split("=", 1)
env_vars[key.strip()] = value.strip()
return env_vars
def dump_env(content: str, data: dict[str, str]) -> str:
new_content: list[str] = []
processed_keys = set()
for raw_line in content.splitlines():
line = raw_line.strip()
# Fast line (empty, comment or not an assignation)
if len(line) == 0 or line.startswith("#") or "=" not in line:
new_content.append(raw_line)
continue
# Slow line (inline comment to be kept)
key_chunk, value_chunk = raw_line.split("=", 1)
key = key_chunk.strip()
# Not in the update list
if key not in data:
new_content.append(raw_line)
continue
processed_keys.add(key)
new_value = data[key]
if " #" not in value_chunk:
new_line = f"{key_chunk}={new_value}"
else:
_, comment = value_chunk.split(" #", 1)
new_line = f"{key_chunk}={new_value} #{comment}"
new_content.append(new_line)
for key, value in data.items():
if key not in processed_keys:
new_content.append(f"{key}={value}")
return "\n".join(new_content) + "\n"
def ensure_env() -> None:
"""Manage .env lifecycle: creation, secret generation, prompts."""
env_path = Path(".env")
env_example_path = Path(".env.example")
updated: bool = False
# Read .env if exists
if env_path.exists():
content: str = env_path.read_text(encoding="utf-8")
else:
content: str = env_example_path.read_text(encoding="utf-8")
existing_vars: dict[str, str] = parse_env(content)
# Generate missing secrets
for key, length in KEYS_TO_GENERATE.items():
if key not in existing_vars or not existing_vars[key]:
log(f"Generating {key}...", Style.GREEN, prefix=" ")
existing_vars[key] = secrets.token_hex(length)
updated = True
log("Done", Style.GREEN, prefix=" ")
# Prompt for missing mandatory keys
color = Style.YELLOW if USE_COLORS else ""
reset = Style.RESET if USE_COLORS else ""
for key in REQUIRED_VARS:
if key not in existing_vars or not existing_vars[key]:
try:
existing_vars[key] = input(
f" {color}Enter value for {key}: {reset}"
).strip()
updated = True
except KeyboardInterrupt:
print()
error_exit("Aborted by user.")
# Write to disk
if updated:
# But backup original first
if env_path.exists():
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
backup_path = Path(f"{env_path}.{timestamp}.bak")
shutil.copy(env_path, backup_path)
log(f"Backup created: {backup_path}", Style.DIM)
new_content = dump_env(content, existing_vars)
env_path.write_text(new_content, encoding="utf-8")
log(".env updated successfully.", Style.GREEN)
else:
log("Configuration is up to date.", Style.GREEN)
def setup() -> None:
"""Orchestrate initialization."""
is_docker_running()
ensure_env()
#mkdir
def status() -> None:
"""Display simple dashboard."""
# Hardcoded bold style for title if colors are enabled
title_style = Style.BOLD if USE_COLORS else ""
reset_style = Style.RESET if USE_COLORS else ""
print(f"\n{title_style}ALFRED STATUS{reset_style}")
print(f"{title_style}==============={reset_style}\n")
# Docker Check
if is_docker_running():
print(f" Docker: {styled('✓ running', Style.GREEN)}")
else:
print(f" Docker: {styled('✗ stopped', Style.RED)}")
# Env Check
if Path(".env").exists():
print(f" .env: {styled('✓ present', Style.GREEN)}")
else:
print(f" .env: {styled('✗ missing', Style.RED)}")
print("")
def check() -> None:
"""Silent check for prerequisites (used by 'make up')."""
setup()
def main() -> None:
if len(sys.argv) < 2:
print("Usage: python cli.py [setup|check|status]")
sys.exit(1)
cmd = sys.argv[1]
if cmd == "setup":
setup()
elif cmd == "check":
check()
elif cmd == "status":
status()
else:
error_exit(f"Unknown command: {cmd}")
if __name__ == "__main__":
main()

100
docker-compose.yaml Normal file
View File

@@ -0,0 +1,100 @@
services:
alfred:
container_name: alfred-core
build:
context: .
args:
PYTHON_VERSION: ${PYTHON_VERSION}
PYTHON_VERSION_SHORT: ${PYTHON_VERSION_SHORT}
RUNNER: ${RUNNER}
depends_on:
- librechat
restart: unless-stopped
env_file:
- .env
environment:
# LLM Configuration
LLM_PROVIDER: ${LLM_PROVIDER:-deepseek}
DEEPSEEK_API_KEY: ${DEEPSEEK_API_KEY:-}
# Memory storage
MEMORY_STORAGE_DIR: /data/memory
# External services
TMDB_API_KEY: ${TMDB_API_KEY:-}
QBITTORRENT_URL: ${QBITTORRENT_URL:-}
QBITTORRENT_USERNAME: ${QBITTORRENT_USERNAME:-}
QBITTORRENT_PASSWORD: ${QBITTORRENT_PASSWORD:-}
volumes:
- ./data/memory:/data/memory
- ./logs:/data/logs
# TODO: Development: mount code for hot reload (comment out in production)
# - ./alfred:/app/alfred
librechat:
container_name: alfred-librechat
image: ghcr.io/danny-avila/librechat:${LIBRECHAT_VERSION}
depends_on:
- mongodb
- meilisearch
- rag_api
restart: unless-stopped
env_file:
- .env
environment:
- HOST=0.0.0.0
- MONGO_URI=mongodb://mongodb:27017/LibreChat
- MEILI_HOST=http://meilisearch:7700
- RAG_PORT=${RAG_PORT:-8000}
- RAG_API_URL=http://rag_api:${RAG_PORT:-8000}
ports:
- "${LIBRECHAT_PORT:-3080}:3080"
volumes:
- ./data/librechat/images:/app/client/public/images
- ./data/librechat/uploads:/app/client/uploads
- ./logs:/app/api/logs
# Mount custom endpoint
- ./librechat/manifests:/app/manifests:ro
- ./librechat/librechat.yaml:/app/librechat.yaml:ro
mongodb:
container_name: alfred-mongodb
image: mongo:latest
restart: unless-stopped
volumes:
- ./data/mongo:/data/db
command: mongod --noauth
meilisearch:
container_name: alfred-meilisearch
image: getmeili/meilisearch:v1.12.3
restart: unless-stopped
environment:
- MEILI_NO_ANALYTICS=true
volumes:
- ./data/meili:/meili_data
#profiles: ["meili", "full"]
rag_api:
container_name: alfred-rag
image: ghcr.io/danny-avila/librechat-rag-api-dev-lite:${RAG_VERSION}
restart: unless-stopped
environment:
- RAG_PORT=${RAG_PORT:-8000}
ports:
- "${RAG_PORT:-8000}:${RAG_PORT:-8000}"
#profiles: ["rag", "full"]
vectordb:
container_name: alfred-vectordb
image: pgvector/pgvector:0.8.0-pg16-bookworm
restart: unless-stopped
environment:
- POSTGRES_DB=${VECTOR_DB_NAME:-vectordb}
- POSTGRES_USER=${VECTOR_DB_USER:-postgres}
- POSTGRES_PASSWORD=${VECTOR_DB_PASSWORD:-postgres}
ports:
- "${VECTOR_DB_PORT:-5432}:5432"
volumes:
- ./data/vectordb:/var/lib/postgresql/data
#profiles: ["rag", "full"]

View File

@@ -1,202 +0,0 @@
version: "3.4"
services:
alfred:
build:
context: .
args:
RUNNER: ${RUNNER} # Get it from Makefile
container_name: alfred
restart: unless-stopped
env_file: .env
ports:
- "8000:8000"
volumes:
# Persistent data volumes (outside container /app)
- agent-memory:/data/memory
- agent-logs:/data/logs
# Development: mount code for hot reload (comment out in production)
# - ./alfred:/app
environment:
# LLM Configuration
LLM_PROVIDER: ${LLM_PROVIDER:-deepseek}
DEEPSEEK_API_KEY: ${DEEPSEEK_API_KEY:-}
# Memory storage
MEMORY_STORAGE_DIR: /data/memory
# External services
TMDB_API_KEY: ${TMDB_API_KEY:-}
QBITTORRENT_URL: ${QBITTORRENT_URL:-}
QBITTORRENT_USERNAME: ${QBITTORRENT_USERNAME:-}
QBITTORRENT_PASSWORD: ${QBITTORRENT_PASSWORD:-}
networks:
- agent-network
# Da face (LibreChat)
librechat:
image: ghcr.io/danny-avila/librechat-dev:latest
container_name: librechat-frontend
restart: unless-stopped
ports:
- "3080:3080"
depends_on:
- mongodb
- meilisearch
- rag_api
- alfred
env_file: .env
environment:
# MongoDB connection (no auth, matching LibreChat default)
MONGO_URI: mongodb://mongodb:27017/LibreChat
# App configuration
HOST: 0.0.0.0
PORT: 3080
# Security
JWT_SECRET: ${JWT_SECRET:-your-super-secret-jwt-key-change-this-in-production}
JWT_REFRESH_SECRET: ${JWT_REFRESH_SECRET:-your-super-secret-refresh-key-change-this-too}
CREDS_KEY: ${CREDS_KEY:-your-32-character-secret-key-here}
CREDS_IV: ${CREDS_IV:-your-16-character-iv-here}
# Session
SESSION_EXPIRY: ${SESSION_EXPIRY:-1000 * 60 * 15}
REFRESH_TOKEN_EXPIRY: ${REFRESH_TOKEN_EXPIRY:-1000 * 60 * 60 * 24 * 7}
# Domain
DOMAIN_CLIENT: ${DOMAIN_CLIENT:-http://localhost:3080}
DOMAIN_SERVER: ${DOMAIN_SERVER:-http://localhost:3080}
# Meilisearch
MEILI_HOST: http://meilisearch:7700
MEILI_MASTER_KEY: ${MEILI_MASTER_KEY:-DrhYf7zENyR6AlUCKmnz0eYASOQdl6zxH7s7MKFSfFU}
# RAG API
RAG_API_URL: http://rag_api:8000
# Endpoints
ENDPOINTS: custom
# Debug (optional)
DEBUG_LOGGING: ${DEBUG_LOGGING:-false}
DEBUG_CONSOLE: ${DEBUG_CONSOLE:-false}
volumes:
- ./librechat/librechat.yaml:/app/librechat.yaml:ro
- librechat-images:/app/client/public/images
- librechat-logs:/app/api/logs
networks:
- agent-network
# MongoDB for LibreChat
mongodb:
image: mongo:latest
container_name: librechat-mongodb
restart: unless-stopped
volumes:
- mongodb-data:/data/db
command: mongod --noauth
ports:
- "27017:27017"
networks:
- agent-network
# Meilisearch - Search engine for LibreChat
#TODO: Follow currently used version on librechat's github
meilisearch:
image: getmeili/meilisearch:v1.12.3
container_name: librechat-meilisearch
restart: unless-stopped
volumes:
- meilisearch-data:/meili_data
environment:
MEILI_HOST: http://meilisearch:7700
MEILI_HTTP_ADDR: meilisearch:7700
MEILI_MASTER_KEY: ${MEILI_MASTER_KEY:-DrhYf7zENyR6AlUCKmnz0eYASOQdl6zxH7s7MKFSfFU}
ports:
- "7700:7700"
networks:
- agent-network
# PostgreSQL with pgvector for RAG API
pgvector:
image: ankane/pgvector:latest
container_name: librechat-pgvector
restart: unless-stopped
environment:
POSTGRES_DB: ${POSTGRES_DB:-librechat_rag}
POSTGRES_USER: ${POSTGRES_USER:-postgres}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-postgres}
volumes:
- pgvector-data:/var/lib/postgresql/data
ports:
- "5432:5432"
networks:
- agent-network
# RAG API - Vector database for LibreChat
rag_api:
image: ghcr.io/danny-avila/librechat-rag-api-dev-lite:latest
container_name: librechat-rag-api
restart: unless-stopped
depends_on:
- pgvector
environment:
PORT: 8000
HOST: 0.0.0.0
# PostgreSQL connection (multiple variable names for compatibility)
DB_HOST: pgvector
DB_PORT: 5432
DB_NAME: ${POSTGRES_DB:-librechat_rag}
DB_USER: ${POSTGRES_USER:-postgres}
DB_PASSWORD: ${POSTGRES_PASSWORD:-postgres}
POSTGRES_DB: ${POSTGRES_DB:-librechat_rag}
POSTGRES_USER: ${POSTGRES_USER:-postgres}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-postgres}
# RAG configuration
COLLECTION_NAME: ${RAG_COLLECTION_NAME:-testcollection}
EMBEDDINGS_PROVIDER: ${RAG_EMBEDDINGS_PROVIDER:-openai}
EMBEDDINGS_MODEL: ${RAG_EMBEDDINGS_MODEL:-text-embedding-3-small}
OPENAI_API_KEY: ${OPENAI_API_KEY:-}
RAG_UPLOAD_DIR: /app/uploads
volumes:
- rag-uploads:/app/uploads
ports:
- "8001:8000"
networks:
- agent-network
# Named volumes for persistent data
volumes:
# MongoDB data
mongodb-data:
driver: local
# Meilisearch data
meilisearch-data:
driver: local
# PostgreSQL pgvector data
pgvector-data:
driver: local
# RAG API uploads
rag-uploads:
driver: local
# LibreChat data
librechat-images:
driver: local
librechat-logs:
driver: local
# Alfred data
agent-memory:
driver: local
agent-logs:
driver: local
# Network for inter-service communication
networks:
agent-network:
driver: bridge

View File

@@ -1,27 +0,0 @@
#!/bin/bash
# Script to generate secure keys for LibreChat
# Run this script to generate random secure keys for your .env file
echo "==================================="
echo "LibreChat Security Keys Generator"
echo "==================================="
echo ""
echo "# MongoDB Password"
echo "MONGO_PASSWORD=$(openssl rand -base64 24)"
echo ""
echo "# JWT Secrets"
echo "JWT_SECRET=$(openssl rand -base64 32)"
echo "JWT_REFRESH_SECRET=$(openssl rand -base64 32)"
echo ""
echo "# Credentials Encryption Keys"
echo "CREDS_KEY=$(openssl rand -hex 16)"
echo "CREDS_IV=$(openssl rand -hex 8)"
echo ""
echo "==================================="
echo "Copy these values to your .env file"
echo "==================================="

6
poetry.lock generated
View File

@@ -372,13 +372,13 @@ testing = ["hatch", "pre-commit", "pytest", "tox"]
[[package]]
name = "fastapi"
version = "0.127.0"
version = "0.127.1"
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
optional = false
python-versions = ">=3.9"
files = [
{file = "fastapi-0.127.0-py3-none-any.whl", hash = "sha256:725aa2bb904e2eff8031557cf4b9b77459bfedd63cae8427634744fd199f6a49"},
{file = "fastapi-0.127.0.tar.gz", hash = "sha256:5a9246e03dcd1fdb19f1396db30894867c1d630f5107dc167dcbc5ed1ea7d259"},
{file = "fastapi-0.127.1-py3-none-any.whl", hash = "sha256:31d670a4f9373cc6d7994420f98e4dc46ea693145207abc39696746c83a44430"},
{file = "fastapi-0.127.1.tar.gz", hash = "sha256:946a87ee5d931883b562b6bada787d6c8178becee2683cb3f9b980d593206359"},
]
[package.dependencies]

View File

@@ -6,6 +6,13 @@ authors = ["Francwa <francois.hodiaumont@gmail.com>"]
readme = "README.md"
package-mode = false
[tool.alfred]
image_name = "alfred_media_organizer"
librechat_version = "v0.8.1"
rag_version = "v0.7.0"
runner = "poetry"
service_name = "alfred"
[tool.poetry.dependencies]
python = "==3.14.2"
python-dotenv = "^1.0.0"
@@ -31,6 +38,8 @@ build-backend = "poetry.core.masonry.api"
[tool.pytest.ini_options]
# Chemins où pytest cherche les tests
testpaths = ["tests"]
# Ajouter le répertoire racine au PYTHONPATH pour les imports
pythonpath = ["."]
# Patterns de fichiers/classes/fonctions à considérer comme tests
python_files = ["test_*.py"] # Fichiers commençant par "test_"

View File

@@ -1,20 +1,16 @@
"""Pytest configuration and shared fixtures."""
import sys
from pathlib import Path
# TODO: Moved directory, should not be necessary anymore but need to check !!
# Ajouter le dossier parent (brain) au PYTHONPATH
# sys.path.insert(0, str(Path(__file__).parent.parent))
import pytest
import shutil
import sys
import tempfile
from pathlib import Path
from unittest.mock import MagicMock, Mock
import pytest
from infrastructure.persistence import Memory, set_memory
from alfred.infrastructure.persistence import Memory, set_memory
@pytest.fixture
@@ -25,6 +21,16 @@ def temp_dir():
shutil.rmtree(dirpath)
@pytest.fixture(autouse=True)
def mock_memory_storage_dir(monkeypatch):
"""Override MEMORY_STORAGE_DIR for all tests to use a temp directory."""
test_dir = tempfile.mkdtemp()
monkeypatch.setenv("MEMORY_STORAGE_DIR", test_dir)
yield
# Cleanup
shutil.rmtree(test_dir, ignore_errors=True)
@pytest.fixture
def memory(temp_dir):
"""Create a fresh Memory instance for testing."""
@@ -255,7 +261,6 @@ def mock_deepseek():
def test_something(mock_deepseek):
# Your test code here
"""
import sys
from unittest.mock import Mock
# Save the original module if it exists

View File

@@ -2,8 +2,8 @@
from unittest.mock import Mock
from agent.agent import Agent
from infrastructure.persistence import get_memory
from alfred.agent.agent import Agent
from alfred.infrastructure.persistence import get_memory
class TestAgentInit:

View File

@@ -1,11 +1,9 @@
"""Edge case tests for the Agent."""
import pytest
from unittest.mock import Mock
import pytest
from agent.agent import Agent
from infrastructure.persistence import get_memory
from alfred.agent.agent import Agent
from alfred.infrastructure.persistence import get_memory
class TestExecuteToolCallEdgeCases:
@@ -16,7 +14,7 @@ class TestExecuteToolCallEdgeCases:
agent = Agent(llm=mock_llm)
# Mock a tool that returns None
from agent.registry import Tool
from alfred.agent.registry import Tool
agent.tools["test_tool"] = Tool(
name="test_tool", description="Test", func=lambda: None, parameters={}
@@ -34,7 +32,7 @@ class TestExecuteToolCallEdgeCases:
"""Should propagate KeyboardInterrupt."""
agent = Agent(llm=mock_llm)
from agent.registry import Tool
from alfred.agent.registry import Tool
def raise_interrupt():
raise KeyboardInterrupt()

View File

@@ -10,7 +10,7 @@ class TestHealthEndpoint:
def test_health_check(self, memory):
"""Should return healthy status."""
from app import app
from alfred.app import app
client = TestClient(app)
@@ -25,7 +25,7 @@ class TestModelsEndpoint:
def test_list_models(self, memory):
"""Should return model list."""
from app import app
from alfred.app import app
client = TestClient(app)
@@ -43,7 +43,7 @@ class TestMemoryEndpoints:
def test_get_memory_state(self, memory):
"""Should return full memory state."""
from app import app
from alfred.app import app
client = TestClient(app)
@@ -57,7 +57,7 @@ class TestMemoryEndpoints:
def test_get_search_results_empty(self, memory):
"""Should return empty when no search results."""
from app import app
from alfred.app import app
client = TestClient(app)
@@ -69,7 +69,7 @@ class TestMemoryEndpoints:
def test_get_search_results_with_data(self, memory_with_search_results):
"""Should return search results when available."""
from app import app
from alfred.app import app
client = TestClient(app)
@@ -83,7 +83,7 @@ class TestMemoryEndpoints:
def test_clear_session(self, memory_with_search_results):
"""Should clear session memories."""
from app import app
from alfred.app import app
client = TestClient(app)
@@ -102,10 +102,10 @@ class TestChatCompletionsEndpoint:
def test_chat_completion_success(self, memory):
"""Should return chat completion."""
from app import app
from alfred.app import app
# Patch the agent's step method directly
with patch("app.agent.step", return_value="Hello! How can I help?"):
with patch("alfred.app.agent.step", return_value="Hello! How can I help?"):
client = TestClient(app)
response = client.post(
@@ -123,7 +123,7 @@ class TestChatCompletionsEndpoint:
def test_chat_completion_no_user_message(self, memory):
"""Should return error if no user message."""
from app import app
from alfred.app import app
client = TestClient(app)
@@ -146,7 +146,7 @@ class TestChatCompletionsEndpoint:
def test_chat_completion_empty_messages(self, memory):
"""Should return error for empty messages."""
from app import app
from alfred.app import app
client = TestClient(app)
@@ -162,7 +162,7 @@ class TestChatCompletionsEndpoint:
def test_chat_completion_invalid_json(self, memory):
"""Should return error for invalid JSON."""
from app import app
from alfred.app import app
client = TestClient(app)
@@ -176,9 +176,9 @@ class TestChatCompletionsEndpoint:
def test_chat_completion_streaming(self, memory):
"""Should support streaming mode."""
from app import app
from alfred.app import app
with patch("app.agent.step", return_value="Streaming response"):
with patch("alfred.app.agent.step", return_value="Streaming response"):
client = TestClient(app)
response = client.post(
@@ -195,9 +195,9 @@ class TestChatCompletionsEndpoint:
def test_chat_completion_extracts_last_user_message(self, memory):
"""Should use last user message."""
from app import app
from alfred.app import app
with patch("app.agent.step", return_value="Response") as mock_step:
with patch("alfred.app.agent.step", return_value="Response") as mock_step:
client = TestClient(app)
response = client.post(
@@ -218,9 +218,9 @@ class TestChatCompletionsEndpoint:
def test_chat_completion_response_format(self, memory):
"""Should return OpenAI-compatible format."""
from app import app
from alfred.app import app
with patch("app.agent.step", return_value="Test response"):
with patch("alfred.app.agent.step", return_value="Test response"):
client = TestClient(app)
response = client.post(

View File

@@ -10,7 +10,8 @@ class TestChatCompletionsEdgeCases:
def test_very_long_message(self, memory):
"""Should handle very long user message."""
from app import agent, app
from alfred.app import app
from alfred.agent import agent
# Patch the agent's LLM directly
mock_llm = Mock()
@@ -32,7 +33,8 @@ class TestChatCompletionsEdgeCases:
def test_unicode_message(self, memory):
"""Should handle unicode in message."""
from app import agent, app
from alfred.app import app
from alfred.agent import agent
mock_llm = Mock()
mock_llm.complete.return_value = {
@@ -57,7 +59,8 @@ class TestChatCompletionsEdgeCases:
def test_special_characters_in_message(self, memory):
"""Should handle special characters."""
from app import agent, app
from alfred.app import app
from alfred.agent import agent
mock_llm = Mock()
mock_llm.complete.return_value = {"role": "assistant", "content": "Response"}
@@ -78,12 +81,12 @@ class TestChatCompletionsEdgeCases:
def test_empty_content_in_message(self, memory):
"""Should handle empty content in message."""
with patch("app.DeepSeekClient") as mock_llm_class:
with patch("alfred.app.DeepSeekClient") as mock_llm_class:
mock_llm = Mock()
mock_llm.complete.return_value = "Response"
mock_llm_class.return_value = mock_llm
from app import app
from alfred.app import app
client = TestClient(app)
@@ -100,11 +103,11 @@ class TestChatCompletionsEdgeCases:
def test_null_content_in_message(self, memory):
"""Should handle null content in message."""
with patch("app.DeepSeekClient") as mock_llm_class:
with patch("alfred.app.DeepSeekClient") as mock_llm_class:
mock_llm = Mock()
mock_llm_class.return_value = mock_llm
from app import app
from alfred.app import app
client = TestClient(app)
@@ -120,11 +123,11 @@ class TestChatCompletionsEdgeCases:
def test_missing_content_field(self, memory):
"""Should handle missing content field."""
with patch("app.DeepSeekClient") as mock_llm_class:
with patch("alfred.app.DeepSeekClient") as mock_llm_class:
mock_llm = Mock()
mock_llm_class.return_value = mock_llm
from app import app
from alfred.app import app
client = TestClient(app)
@@ -141,11 +144,11 @@ class TestChatCompletionsEdgeCases:
def test_missing_role_field(self, memory):
"""Should handle missing role field."""
with patch("app.DeepSeekClient") as mock_llm_class:
with patch("alfred.app.DeepSeekClient") as mock_llm_class:
mock_llm = Mock()
mock_llm_class.return_value = mock_llm
from app import app
from alfred.app import app
client = TestClient(app)
@@ -162,12 +165,12 @@ class TestChatCompletionsEdgeCases:
def test_invalid_role(self, memory):
"""Should handle invalid role."""
with patch("app.DeepSeekClient") as mock_llm_class:
with patch("alfred.app.DeepSeekClient") as mock_llm_class:
mock_llm = Mock()
mock_llm.complete.return_value = "Response"
mock_llm_class.return_value = mock_llm
from app import app
from alfred.app import app
client = TestClient(app)
@@ -184,7 +187,8 @@ class TestChatCompletionsEdgeCases:
def test_many_messages(self, memory):
"""Should handle many messages in conversation."""
from app import agent, app
from alfred.app import app
from alfred.agent import agent
mock_llm = Mock()
mock_llm.complete.return_value = {"role": "assistant", "content": "Response"}
@@ -210,11 +214,11 @@ class TestChatCompletionsEdgeCases:
def test_only_system_messages(self, memory):
"""Should reject if only system messages."""
with patch("app.DeepSeekClient") as mock_llm_class:
with patch("alfred.app.DeepSeekClient") as mock_llm_class:
mock_llm = Mock()
mock_llm_class.return_value = mock_llm
from app import app
from alfred.app import app
client = TestClient(app)
@@ -233,11 +237,11 @@ class TestChatCompletionsEdgeCases:
def test_only_assistant_messages(self, memory):
"""Should reject if only assistant messages."""
with patch("app.DeepSeekClient") as mock_llm_class:
with patch("alfred.app.DeepSeekClient") as mock_llm_class:
mock_llm = Mock()
mock_llm_class.return_value = mock_llm
from app import app
from alfred.app import app
client = TestClient(app)
@@ -255,11 +259,11 @@ class TestChatCompletionsEdgeCases:
def test_messages_not_array(self, memory):
"""Should reject if messages is not array."""
with patch("app.DeepSeekClient") as mock_llm_class:
with patch("alfred.app.DeepSeekClient") as mock_llm_class:
mock_llm = Mock()
mock_llm_class.return_value = mock_llm
from app import app
from alfred.app import app
client = TestClient(app)
@@ -276,11 +280,11 @@ class TestChatCompletionsEdgeCases:
def test_message_not_object(self, memory):
"""Should handle message that is not object."""
with patch("app.DeepSeekClient") as mock_llm_class:
with patch("alfred.app.DeepSeekClient") as mock_llm_class:
mock_llm = Mock()
mock_llm_class.return_value = mock_llm
from app import app
from alfred.app import app
client = TestClient(app)
@@ -297,7 +301,8 @@ class TestChatCompletionsEdgeCases:
def test_extra_fields_in_request(self, memory):
"""Should ignore extra fields in request."""
from app import agent, app
from alfred.app import app
from alfred.agent import agent
mock_llm = Mock()
mock_llm.complete.return_value = {"role": "assistant", "content": "Response"}
@@ -320,8 +325,9 @@ class TestChatCompletionsEdgeCases:
def test_streaming_with_tool_call(self, memory, real_folder):
"""Should handle streaming with tool execution."""
from app import agent, app
from infrastructure.persistence import get_memory
from alfred.app import app
from alfred.agent import agent
from alfred.infrastructure.persistence import get_memory
mem = get_memory()
mem.ltm.set_config("download_folder", str(real_folder["downloads"]))
@@ -365,7 +371,8 @@ class TestChatCompletionsEdgeCases:
def test_concurrent_requests_simulation(self, memory):
"""Should handle rapid sequential requests."""
from app import agent, app
from alfred.app import app
from alfred.agent import agent
mock_llm = Mock()
mock_llm.complete.return_value = {"role": "assistant", "content": "Response"}
@@ -385,7 +392,8 @@ class TestChatCompletionsEdgeCases:
def test_llm_returns_json_in_response(self, memory):
"""Should handle LLM returning JSON in text response."""
from app import agent, app
from alfred.app import app
from alfred.agent import agent
mock_llm = Mock()
mock_llm.complete.return_value = {
@@ -414,9 +422,9 @@ class TestMemoryEndpointsEdgeCases:
def test_memory_state_with_large_data(self, memory):
"""Should handle large memory state."""
with patch("app.DeepSeekClient") as mock_llm:
with patch("alfred.app.DeepSeekClient") as mock_llm:
mock_llm.return_value = Mock()
from app import app
from alfred.app import app
# Add lots of data to memory
for i in range(100):
@@ -432,9 +440,9 @@ class TestMemoryEndpointsEdgeCases:
def test_memory_state_with_unicode(self, memory):
"""Should handle unicode in memory state."""
with patch("app.DeepSeekClient") as mock_llm:
with patch("alfred.app.DeepSeekClient") as mock_llm:
mock_llm.return_value = Mock()
from app import app
from alfred.app import app
memory.ltm.set_config("japanese", "日本語テスト")
memory.stm.add_message("user", "🎬 Movie request")
@@ -448,9 +456,9 @@ class TestMemoryEndpointsEdgeCases:
def test_search_results_with_special_chars(self, memory):
"""Should handle special characters in search results."""
with patch("app.DeepSeekClient") as mock_llm:
with patch("alfred.app.DeepSeekClient") as mock_llm:
mock_llm.return_value = Mock()
from app import app
from alfred.app import app
memory.episodic.store_search_results(
"Test <script>alert('xss')</script>",
@@ -467,9 +475,9 @@ class TestMemoryEndpointsEdgeCases:
def test_clear_session_idempotent(self, memory):
"""Should be idempotent - multiple clears should work."""
with patch("app.DeepSeekClient") as mock_llm:
with patch("alfred.app.DeepSeekClient") as mock_llm:
mock_llm.return_value = Mock()
from app import app
from alfred.app import app
client = TestClient(app)
@@ -480,9 +488,9 @@ class TestMemoryEndpointsEdgeCases:
def test_clear_session_preserves_ltm(self, memory):
"""Should preserve LTM after clear."""
with patch("app.DeepSeekClient") as mock_llm:
with patch("alfred.app.DeepSeekClient") as mock_llm:
mock_llm.return_value = Mock()
from app import app
from alfred.app import app
memory.ltm.set_config("important", "data")
memory.stm.add_message("user", "Hello")
@@ -502,9 +510,9 @@ class TestHealthEndpointEdgeCases:
def test_health_returns_version(self, memory):
"""Should return version in health check."""
with patch("app.DeepSeekClient") as mock_llm:
with patch("alfred.app.DeepSeekClient") as mock_llm:
mock_llm.return_value = Mock()
from app import app
from alfred.app import app
client = TestClient(app)
@@ -515,9 +523,9 @@ class TestHealthEndpointEdgeCases:
def test_health_with_query_params(self, memory):
"""Should ignore query parameters."""
with patch("app.DeepSeekClient") as mock_llm:
with patch("alfred.app.DeepSeekClient") as mock_llm:
mock_llm.return_value = Mock()
from app import app
from alfred.app import app
client = TestClient(app)
@@ -531,9 +539,9 @@ class TestModelsEndpointEdgeCases:
def test_models_response_format(self, memory):
"""Should return OpenAI-compatible format."""
with patch("app.DeepSeekClient") as mock_llm:
with patch("alfred.app.DeepSeekClient") as mock_llm:
mock_llm.return_value = Mock()
from app import app
from alfred.app import app
client = TestClient(app)

View File

@@ -1,8 +1,7 @@
"""Critical tests for configuration validation."""
import pytest
from agent.config import ConfigurationError, Settings
from alfred.agent.config import ConfigurationError, Settings
class TestConfigValidation:

View File

@@ -1,12 +1,11 @@
"""Edge case tests for configuration and parameters."""
import os
import pytest
from unittest.mock import patch
import pytest
from agent.config import ConfigurationError, Settings
from agent.parameters import (
from alfred.agent.config import ConfigurationError, Settings
from alfred.agent.parameters import (
REQUIRED_PARAMETERS,
ParameterSchema,
format_parameters_for_prompt,

View File

@@ -1,17 +1,15 @@
"""Edge case tests for domain entities and value objects."""
import pytest
from datetime import datetime
import pytest
from domain.movies.entities import Movie
from domain.movies.value_objects import MovieTitle, Quality, ReleaseYear
from domain.shared.exceptions import ValidationError
from domain.shared.value_objects import FilePath, FileSize, ImdbId
from domain.subtitles.entities import Subtitle
from domain.subtitles.value_objects import Language, SubtitleFormat, TimingOffset
from domain.tv_shows.entities import TVShow
from domain.tv_shows.value_objects import ShowStatus
from alfred.domain.movies.entities import Movie
from alfred.domain.movies.value_objects import MovieTitle, Quality, ReleaseYear
from alfred.domain.shared.exceptions import ValidationError
from alfred.domain.shared.value_objects import FilePath, FileSize, ImdbId
from alfred.domain.subtitles.entities import Subtitle
from alfred.domain.subtitles.value_objects import Language, SubtitleFormat, TimingOffset
from alfred.domain.tv_shows.entities import TVShow
from alfred.domain.tv_shows.value_objects import ShowStatus
class TestImdbIdEdgeCases:

View File

@@ -1,10 +1,8 @@
"""Tests for the Memory system."""
import pytest
from datetime import datetime
import pytest
from infrastructure.persistence import (
from alfred.infrastructure.persistence import (
EpisodicMemory,
LongTermMemory,
Memory,
@@ -13,7 +11,7 @@ from infrastructure.persistence import (
has_memory,
init_memory,
)
from infrastructure.persistence.context import _memory_ctx
from alfred.infrastructure.persistence.context import _memory_ctx
def is_iso_format(s: str) -> bool:

View File

@@ -4,8 +4,7 @@ import json
import os
import pytest
from infrastructure.persistence import (
from alfred.infrastructure.persistence import (
EpisodicMemory,
LongTermMemory,
Memory,
@@ -14,7 +13,7 @@ from infrastructure.persistence import (
init_memory,
set_memory,
)
from infrastructure.persistence.context import _memory_ctx
from alfred.infrastructure.persistence.context import _memory_ctx
class TestLongTermMemoryEdgeCases:

View File

@@ -1,7 +1,7 @@
"""Tests for PromptBuilder."""
from agent.prompts import PromptBuilder
from agent.registry import make_tools
from alfred.agent.prompts import PromptBuilder
from alfred.agent.registry import make_tools
class TestPromptBuilder:

View File

@@ -1,7 +1,7 @@
"""Critical tests for prompt builder - Tests that would have caught bugs."""
from agent.prompts import PromptBuilder
from agent.registry import make_tools
from alfred.agent.prompts import PromptBuilder
from alfred.agent.registry import make_tools
class TestPromptBuilderToolsInjection:
@@ -15,9 +15,9 @@ class TestPromptBuilderToolsInjection:
# Verify each tool is mentioned
for tool_name in tools.keys():
assert (
tool_name in prompt
), f"Tool {tool_name} not mentioned in system prompt"
assert tool_name in prompt, (
f"Tool {tool_name} not mentioned in system prompt"
)
def test_tools_spec_contains_all_registered_tools(self, memory):
"""CRITICAL: Verify build_tools_spec() returns all tools."""

View File

@@ -1,7 +1,7 @@
"""Edge case tests for PromptBuilder."""
from agent.prompts import PromptBuilder
from agent.registry import make_tools
from alfred.agent.prompts import PromptBuilder
from alfred.agent.registry import make_tools
class TestPromptBuilderEdgeCases:
@@ -266,7 +266,7 @@ class TestFormatToolsDescriptionEdgeCases:
def test_format_with_complex_parameters(self, memory):
"""Should format complex parameter schemas."""
from agent.registry import Tool
from alfred.agent.registry import Tool
tools = {
"complex_tool": Tool(

View File

@@ -3,9 +3,8 @@
import inspect
import pytest
from agent.prompts import PromptBuilder
from agent.registry import Tool, _create_tool_from_function, make_tools
from alfred.agent.prompts import PromptBuilder
from alfred.agent.registry import Tool, _create_tool_from_function, make_tools
class TestToolSpecFormat:
@@ -23,9 +22,9 @@ class TestToolSpecFormat:
for spec in specs:
# OpenAI format requires these fields
assert (
spec["type"] == "function"
), f"Tool type must be 'function', got {spec.get('type')}"
assert spec["type"] == "function", (
f"Tool type must be 'function', got {spec.get('type')}"
)
assert "function" in spec, "Tool spec missing 'function' key"
func = spec["function"]
@@ -56,9 +55,9 @@ class TestToolSpecFormat:
# Verify required vs optional
assert "name" in tool.parameters["required"], "name should be required"
assert "age" in tool.parameters["required"], "age should be required"
assert (
"active" not in tool.parameters["required"]
), "active has default, should not be required"
assert "active" not in tool.parameters["required"], (
"active has default, should not be required"
)
def test_all_registered_tools_are_callable(self):
"""CRITICAL: Verify all registered tools are actually callable."""
@@ -128,9 +127,9 @@ class TestToolSpecFormat:
properties = params.get("properties", {})
for param_name, param_spec in properties.items():
assert (
"description" in param_spec
), f"Parameter {param_name} in {spec['function']['name']} missing description"
assert "description" in param_spec, (
f"Parameter {param_name} in {spec['function']['name']} missing description"
)
def test_required_parameters_are_marked_correctly(self):
"""Verify required parameters are correctly identified."""

View File

@@ -1,8 +1,7 @@
"""Edge case tests for tool registry."""
import pytest
from agent.registry import Tool, make_tools
from alfred.agent.registry import Tool, make_tools
class TestToolEdgeCases:
@@ -182,9 +181,9 @@ class TestMakeToolsEdgeCases:
params = tool.parameters
if "required" in params and "properties" in params:
for req in params["required"]:
assert (
req in params["properties"]
), f"Required param {req} not in properties for {tool.name}"
assert req in params["properties"], (
f"Required param {req} not in properties for {tool.name}"
)
def test_make_tools_descriptions_not_empty(self, memory):
"""Should have non-empty descriptions."""
@@ -235,9 +234,9 @@ class TestMakeToolsEdgeCases:
if "properties" in tool.parameters:
for prop_name, prop_schema in tool.parameters["properties"].items():
if "type" in prop_schema:
assert (
prop_schema["type"] in valid_types
), f"Invalid type for {tool.name}.{prop_name}"
assert prop_schema["type"] in valid_types, (
f"Invalid type for {tool.name}.{prop_name}"
)
def test_make_tools_enum_values(self, memory):
"""Should have valid enum values."""

View File

@@ -1,13 +1,13 @@
"""Tests for JSON repositories."""
from domain.movies.entities import Movie
from domain.movies.value_objects import MovieTitle, Quality, ReleaseYear
from domain.shared.value_objects import FilePath, FileSize, ImdbId
from domain.subtitles.entities import Subtitle
from domain.subtitles.value_objects import Language, SubtitleFormat, TimingOffset
from domain.tv_shows.entities import TVShow
from domain.tv_shows.value_objects import ShowStatus
from infrastructure.persistence.json import (
from alfred.domain.movies.entities import Movie
from alfred.domain.movies.value_objects import MovieTitle, Quality, ReleaseYear
from alfred.domain.shared.value_objects import FilePath, FileSize, ImdbId
from alfred.domain.subtitles.entities import Subtitle
from alfred.domain.subtitles.value_objects import Language, SubtitleFormat, TimingOffset
from alfred.domain.tv_shows.entities import TVShow
from alfred.domain.tv_shows.value_objects import ShowStatus
from alfred.infrastructure.persistence.json import (
JsonMovieRepository,
JsonSubtitleRepository,
JsonTVShowRepository,

View File

@@ -2,14 +2,14 @@
from datetime import datetime
from domain.movies.entities import Movie
from domain.movies.value_objects import MovieTitle, Quality
from domain.shared.value_objects import FilePath, FileSize, ImdbId
from domain.subtitles.entities import Subtitle
from domain.subtitles.value_objects import Language, SubtitleFormat, TimingOffset
from domain.tv_shows.entities import TVShow
from domain.tv_shows.value_objects import ShowStatus
from infrastructure.persistence.json import (
from alfred.domain.movies.entities import Movie
from alfred.domain.movies.value_objects import MovieTitle, Quality
from alfred.domain.shared.value_objects import FilePath, FileSize, ImdbId
from alfred.domain.subtitles.entities import Subtitle
from alfred.domain.subtitles.value_objects import Language, SubtitleFormat, TimingOffset
from alfred.domain.tv_shows.entities import TVShow
from alfred.domain.tv_shows.value_objects import ShowStatus
from alfred.infrastructure.persistence.json import (
JsonMovieRepository,
JsonSubtitleRepository,
JsonTVShowRepository,

View File

@@ -2,8 +2,8 @@
from unittest.mock import Mock, patch
from agent.tools import api as api_tools
from infrastructure.persistence import get_memory
from alfred.agent.tools import api as api_tools
from alfred.infrastructure.persistence import get_memory
def create_mock_response(status_code, json_data=None, text=None):
@@ -21,7 +21,7 @@ def create_mock_response(status_code, json_data=None, text=None):
class TestFindMediaImdbId:
"""Tests for find_media_imdb_id tool."""
@patch("infrastructure.api.tmdb.client.requests.get")
@patch("alfred.infrastructure.api.tmdb.client.requests.get")
def test_success(self, mock_get, memory):
"""Should return movie info on success."""
@@ -56,7 +56,7 @@ class TestFindMediaImdbId:
# Verify HTTP calls
assert mock_get.call_count == 2
@patch("infrastructure.api.tmdb.client.requests.get")
@patch("alfred.infrastructure.api.tmdb.client.requests.get")
def test_stores_in_stm(self, mock_get, memory):
"""Should store result in STM on success."""
@@ -88,7 +88,7 @@ class TestFindMediaImdbId:
assert entity["title"] == "Inception"
assert mem.stm.current_topic == "searching_media"
@patch("infrastructure.api.tmdb.client.requests.get")
@patch("alfred.infrastructure.api.tmdb.client.requests.get")
def test_not_found(self, mock_get, memory):
"""Should return error when not found."""
mock_get.return_value = create_mock_response(200, json_data={"results": []})
@@ -98,7 +98,7 @@ class TestFindMediaImdbId:
assert result["status"] == "error"
assert result["error"] == "not_found"
@patch("infrastructure.api.tmdb.client.requests.get")
@patch("alfred.infrastructure.api.tmdb.client.requests.get")
def test_does_not_store_on_error(self, mock_get, memory):
"""Should not store in STM on error."""
mock_get.return_value = create_mock_response(200, json_data={"results": []})
@@ -112,7 +112,7 @@ class TestFindMediaImdbId:
class TestFindTorrent:
"""Tests for find_torrent tool."""
@patch("infrastructure.api.knaben.client.requests.post")
@patch("alfred.infrastructure.api.knaben.client.requests.post")
def test_success(self, mock_post, memory):
"""Should return torrents on success."""
mock_post.return_value = create_mock_response(
@@ -146,7 +146,7 @@ class TestFindTorrent:
payload = mock_post.call_args[1]["json"]
assert payload["query"] == "Inception 1080p"
@patch("infrastructure.api.knaben.client.requests.post")
@patch("alfred.infrastructure.api.knaben.client.requests.post")
def test_stores_in_episodic(self, mock_post, memory):
"""Should store results in episodic memory."""
mock_post.return_value = create_mock_response(
@@ -171,7 +171,7 @@ class TestFindTorrent:
assert mem.episodic.last_search_results["query"] == "Inception"
assert mem.stm.current_topic == "selecting_torrent"
@patch("infrastructure.api.knaben.client.requests.post")
@patch("alfred.infrastructure.api.knaben.client.requests.post")
def test_results_have_indexes(self, mock_post, memory):
"""Should add indexes to results."""
mock_post.return_value = create_mock_response(
@@ -211,7 +211,7 @@ class TestFindTorrent:
assert results[1]["index"] == 2
assert results[2]["index"] == 3
@patch("infrastructure.api.knaben.client.requests.post")
@patch("alfred.infrastructure.api.knaben.client.requests.post")
def test_not_found(self, mock_post, memory):
"""Should return error when no torrents found."""
mock_post.return_value = create_mock_response(200, json_data={"hits": []})
@@ -286,7 +286,7 @@ class TestAddTorrentToQbittorrent:
This is acceptable mocking because we're testing the TOOL logic, not the client.
"""
@patch("agent.tools.api.qbittorrent_client")
@patch("alfred.agent.tools.api.qbittorrent_client")
def test_success(self, mock_client, memory):
"""Should add torrent successfully and update memory."""
mock_client.add_torrent.return_value = True
@@ -298,7 +298,7 @@ class TestAddTorrentToQbittorrent:
# Verify client was called correctly
mock_client.add_torrent.assert_called_once_with("magnet:?xt=urn:btih:abc123")
@patch("agent.tools.api.qbittorrent_client")
@patch("alfred.agent.tools.api.qbittorrent_client")
def test_adds_to_active_downloads(self, mock_client, memory_with_search_results):
"""Should add to active downloads on success."""
mock_client.add_torrent.return_value = True
@@ -313,7 +313,7 @@ class TestAddTorrentToQbittorrent:
== "Inception.2010.1080p.BluRay.x264"
)
@patch("agent.tools.api.qbittorrent_client")
@patch("alfred.agent.tools.api.qbittorrent_client")
def test_sets_topic_and_ends_workflow(self, mock_client, memory):
"""Should set topic and end workflow."""
mock_client.add_torrent.return_value = True
@@ -326,10 +326,10 @@ class TestAddTorrentToQbittorrent:
assert mem.stm.current_topic == "downloading"
assert mem.stm.current_workflow is None
@patch("agent.tools.api.qbittorrent_client")
@patch("alfred.agent.tools.api.qbittorrent_client")
def test_error_handling(self, mock_client, memory):
"""Should handle client errors correctly."""
from infrastructure.api.qbittorrent.exceptions import QBittorrentAPIError
from alfred.infrastructure.api.qbittorrent.exceptions import QBittorrentAPIError
mock_client.add_torrent.side_effect = QBittorrentAPIError("Connection failed")
@@ -349,7 +349,7 @@ class TestAddTorrentByIndex:
- Error handling for edge cases
"""
@patch("agent.tools.api.qbittorrent_client")
@patch("alfred.agent.tools.api.qbittorrent_client")
def test_success(self, mock_client, memory_with_search_results):
"""Should get torrent by index and add it."""
mock_client.add_torrent.return_value = True
@@ -362,7 +362,7 @@ class TestAddTorrentByIndex:
# Verify correct magnet was extracted and used
mock_client.add_torrent.assert_called_once_with("magnet:?xt=urn:btih:abc123")
@patch("agent.tools.api.qbittorrent_client")
@patch("alfred.agent.tools.api.qbittorrent_client")
def test_uses_correct_magnet(self, mock_client, memory_with_search_results):
"""Should extract correct magnet from index."""
mock_client.add_torrent.return_value = True

View File

@@ -1,18 +1,16 @@
"""Edge case tests for tools."""
import pytest
from unittest.mock import Mock, patch
import pytest
from agent.tools import api as api_tools
from agent.tools import filesystem as fs_tools
from infrastructure.persistence import get_memory
from alfred.agent.tools import api as api_tools
from alfred.agent.tools import filesystem as fs_tools
from alfred.infrastructure.persistence import get_memory
class TestFindTorrentEdgeCases:
"""Edge case tests for find_torrent."""
@patch("agent.tools.api.SearchTorrentsUseCase")
@patch("alfred.agent.tools.api.SearchTorrentsUseCase")
def test_empty_query(self, mock_use_case_class, memory):
"""Should handle empty query."""
mock_response = Mock()
@@ -28,7 +26,7 @@ class TestFindTorrentEdgeCases:
assert result["status"] == "error"
@patch("agent.tools.api.SearchTorrentsUseCase")
@patch("alfred.agent.tools.api.SearchTorrentsUseCase")
def test_very_long_query(self, mock_use_case_class, memory):
"""Should handle very long query."""
mock_response = Mock()
@@ -47,7 +45,7 @@ class TestFindTorrentEdgeCases:
# Should not crash
assert "status" in result
@patch("agent.tools.api.SearchTorrentsUseCase")
@patch("alfred.agent.tools.api.SearchTorrentsUseCase")
def test_special_characters_in_query(self, mock_use_case_class, memory):
"""Should handle special characters in query."""
mock_response = Mock()
@@ -65,7 +63,7 @@ class TestFindTorrentEdgeCases:
assert "status" in result
@patch("agent.tools.api.SearchTorrentsUseCase")
@patch("alfred.agent.tools.api.SearchTorrentsUseCase")
def test_unicode_query(self, mock_use_case_class, memory):
"""Should handle unicode in query."""
mock_response = Mock()
@@ -82,7 +80,7 @@ class TestFindTorrentEdgeCases:
assert "status" in result
@patch("agent.tools.api.SearchTorrentsUseCase")
@patch("alfred.agent.tools.api.SearchTorrentsUseCase")
def test_results_with_missing_fields(self, mock_use_case_class, memory):
"""Should handle results with missing fields."""
mock_response = Mock()
@@ -104,7 +102,7 @@ class TestFindTorrentEdgeCases:
mem = get_memory()
assert len(mem.episodic.last_search_results["results"]) == 2
@patch("agent.tools.api.SearchTorrentsUseCase")
@patch("alfred.agent.tools.api.SearchTorrentsUseCase")
def test_api_timeout(self, mock_use_case_class, memory):
"""Should handle API timeout."""
mock_use_case = Mock()
@@ -157,7 +155,7 @@ class TestGetTorrentByIndexEdgeCases:
class TestAddTorrentEdgeCases:
"""Edge case tests for add_torrent functions."""
@patch("agent.tools.api.AddTorrentUseCase")
@patch("alfred.agent.tools.api.AddTorrentUseCase")
def test_invalid_magnet_link(self, mock_use_case_class, memory):
"""Should handle invalid magnet link."""
mock_response = Mock()
@@ -173,7 +171,7 @@ class TestAddTorrentEdgeCases:
assert result["status"] == "error"
@patch("agent.tools.api.AddTorrentUseCase")
@patch("alfred.agent.tools.api.AddTorrentUseCase")
def test_empty_magnet_link(self, mock_use_case_class, memory):
"""Should handle empty magnet link."""
mock_response = Mock()
@@ -189,7 +187,7 @@ class TestAddTorrentEdgeCases:
assert result["status"] == "error"
@patch("agent.tools.api.AddTorrentUseCase")
@patch("alfred.agent.tools.api.AddTorrentUseCase")
def test_very_long_magnet_link(self, mock_use_case_class, memory):
"""Should handle very long magnet link."""
mock_response = Mock()
@@ -203,7 +201,7 @@ class TestAddTorrentEdgeCases:
assert "status" in result
@patch("agent.tools.api.AddTorrentUseCase")
@patch("alfred.agent.tools.api.AddTorrentUseCase")
def test_qbittorrent_connection_refused(self, mock_use_case_class, memory):
"""Should handle qBittorrent connection refused."""
mock_use_case = Mock()
@@ -391,7 +389,7 @@ class TestFilesystemEdgeCases:
class TestFindMediaImdbIdEdgeCases:
"""Edge case tests for find_media_imdb_id."""
@patch("agent.tools.api.SearchMovieUseCase")
@patch("alfred.agent.tools.api.SearchMovieUseCase")
def test_movie_with_same_name_different_years(self, mock_use_case_class, memory):
"""Should handle movies with same name."""
mock_response = Mock()
@@ -409,7 +407,7 @@ class TestFindMediaImdbIdEdgeCases:
assert result["status"] == "ok"
@patch("agent.tools.api.SearchMovieUseCase")
@patch("alfred.agent.tools.api.SearchMovieUseCase")
def test_movie_with_special_title(self, mock_use_case_class, memory):
"""Should handle movies with special characters in title."""
mock_response = Mock()
@@ -426,7 +424,7 @@ class TestFindMediaImdbIdEdgeCases:
assert result["status"] == "ok"
@patch("agent.tools.api.SearchMovieUseCase")
@patch("alfred.agent.tools.api.SearchMovieUseCase")
def test_tv_show_vs_movie(self, mock_use_case_class, memory):
"""Should distinguish TV shows from movies."""
mock_response = Mock()

View File

@@ -1,11 +1,9 @@
"""Tests for filesystem tools."""
import pytest
from pathlib import Path
import pytest
from agent.tools import filesystem as fs_tools
from infrastructure.persistence import get_memory
from alfred.agent.tools import filesystem as fs_tools
from alfred.infrastructure.persistence import get_memory
class TestSetPathForFolder: