Compare commits
12 Commits
v0.1.7
...
tests/impr
| Author | SHA1 | Date | |
|---|---|---|---|
| fa1d50a45e | |||
| b05894d77d | |||
| 05b3d74103 | |||
| 806d9e97e1 | |||
| 97f48904e4 | |||
| 4ed0e8df78 | |||
| faaf1aafa7 | |||
| aa89a3fb00 | |||
| 64aeb5fc80 | |||
| 9540520dc4 | |||
| 300ed387f5 | |||
| dea81de5b5 |
@@ -40,7 +40,7 @@ MONGO_HOST=mongodb
|
|||||||
MONGO_PORT=27017
|
MONGO_PORT=27017
|
||||||
MONGO_USER=alfred
|
MONGO_USER=alfred
|
||||||
MONGO_PASSWORD=
|
MONGO_PASSWORD=
|
||||||
MONGO_DB_NAME=alfred
|
MONGO_DB_NAME=LibreChat
|
||||||
|
|
||||||
# PostgreSQL (Vector Database / RAG)
|
# PostgreSQL (Vector Database / RAG)
|
||||||
POSTGRES_URI=
|
POSTGRES_URI=
|
||||||
|
|||||||
@@ -34,6 +34,9 @@ jobs:
|
|||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Generate build variables
|
||||||
|
run: python scripts/generate_build_vars.py
|
||||||
|
|
||||||
- name: Load config from Makefile
|
- name: Load config from Makefile
|
||||||
id: config
|
id: config
|
||||||
run: make -s _ci-dump-config >> $GITHUB_OUTPUT
|
run: make -s _ci-dump-config >> $GITHUB_OUTPUT
|
||||||
|
|||||||
261
CONTRIBUTE.md
Normal file
261
CONTRIBUTE.md
Normal file
@@ -0,0 +1,261 @@
|
|||||||
|
# Contributing to Alfred
|
||||||
|
|
||||||
|
## Settings Management System
|
||||||
|
|
||||||
|
Alfred uses a **declarative, schema-based configuration system** that ensures type safety, validation, and maintainability.
|
||||||
|
|
||||||
|
### Architecture Overview
|
||||||
|
|
||||||
|
```
|
||||||
|
settings.toml # Schema definitions (single source of truth)
|
||||||
|
↓
|
||||||
|
settings_schema.py # Parser & validation
|
||||||
|
↓
|
||||||
|
settings_bootstrap.py # Generation & resolution
|
||||||
|
↓
|
||||||
|
.env # Runtime configuration
|
||||||
|
.env.make # Build variables for Makefile
|
||||||
|
↓
|
||||||
|
settings.py # Pydantic Settings (runtime validation)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Key Files
|
||||||
|
|
||||||
|
- **`settings.toml`** — Declarative schema for all settings
|
||||||
|
- **`alfred/settings_schema.py`** — Schema parser and validation logic
|
||||||
|
- **`alfred/settings_bootstrap.py`** — Bootstrap logic (generates `.env` and `.env.make`)
|
||||||
|
- **`alfred/settings.py`** — Pydantic Settings class (runtime)
|
||||||
|
- **`.env`** — Generated configuration file (gitignored)
|
||||||
|
- **`.env.make`** — Build variables for Makefile (gitignored)
|
||||||
|
|
||||||
|
### Setting Sources
|
||||||
|
|
||||||
|
Settings can come from different sources:
|
||||||
|
|
||||||
|
| Source | Description | Example |
|
||||||
|
|--------|-------------|---------|
|
||||||
|
| `toml` | From `pyproject.toml` | Version numbers, build config |
|
||||||
|
| `env` | From `.env` file | User-provided values, API keys |
|
||||||
|
| `generated` | Auto-generated secrets | JWT secrets, passwords |
|
||||||
|
| `computed` | Calculated from other settings | Database URIs |
|
||||||
|
|
||||||
|
### How to Add a New Setting
|
||||||
|
|
||||||
|
#### 1. Define in `settings.toml`
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[tool.alfred.settings_schema.MY_NEW_SETTING]
|
||||||
|
type = "string" # string, integer, float, boolean, secret, computed
|
||||||
|
source = "env" # env, toml, generated, computed
|
||||||
|
default = "default_value" # Optional: default value
|
||||||
|
description = "Description here" # Required: clear description
|
||||||
|
category = "app" # app, api, database, security, build
|
||||||
|
required = true # Optional: default is true
|
||||||
|
validator = "range:1:100" # Optional: validation rule
|
||||||
|
export_to_env_make = false # Optional: export to .env.make for Makefile
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 2. Regenerate Configuration
|
||||||
|
|
||||||
|
```bash
|
||||||
|
make bootstrap
|
||||||
|
```
|
||||||
|
|
||||||
|
This will:
|
||||||
|
- Read the schema from `settings.toml`
|
||||||
|
- Generate/update `.env` with the new setting
|
||||||
|
- Generate/update `.env.make` if `export_to_env_make = true`
|
||||||
|
- Preserve existing secrets
|
||||||
|
|
||||||
|
#### 3. Validate
|
||||||
|
|
||||||
|
```bash
|
||||||
|
make validate
|
||||||
|
```
|
||||||
|
|
||||||
|
This ensures all settings are valid according to the schema.
|
||||||
|
|
||||||
|
#### 4. Use in Code
|
||||||
|
|
||||||
|
The setting is automatically available in `settings.py`:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from alfred.settings import settings
|
||||||
|
|
||||||
|
print(settings.my_new_setting)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Setting Types
|
||||||
|
|
||||||
|
#### String Setting
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[tool.alfred.settings_schema.API_URL]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "https://api.example.com"
|
||||||
|
description = "API base URL"
|
||||||
|
category = "api"
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Integer Setting with Validation
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[tool.alfred.settings_schema.MAX_RETRIES]
|
||||||
|
type = "integer"
|
||||||
|
source = "env"
|
||||||
|
default = 3
|
||||||
|
description = "Maximum retry attempts"
|
||||||
|
category = "app"
|
||||||
|
validator = "range:1:10"
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Secret (Auto-generated)
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[tool.alfred.settings_schema.API_SECRET]
|
||||||
|
type = "secret"
|
||||||
|
source = "generated"
|
||||||
|
secret_rule = "32:b64" # 32 bytes, base64 encoded
|
||||||
|
description = "API secret key"
|
||||||
|
category = "security"
|
||||||
|
```
|
||||||
|
|
||||||
|
Secret rules:
|
||||||
|
- `"32:b64"` — 32 bytes, URL-safe base64
|
||||||
|
- `"16:hex"` — 16 bytes, hexadecimal
|
||||||
|
|
||||||
|
#### Computed Setting
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[tool.alfred.settings_schema.DATABASE_URL]
|
||||||
|
type = "computed"
|
||||||
|
source = "computed"
|
||||||
|
compute_from = ["DB_HOST", "DB_PORT", "DB_NAME"]
|
||||||
|
compute_template = "postgresql://{DB_HOST}:{DB_PORT}/{DB_NAME}"
|
||||||
|
description = "Database connection URL"
|
||||||
|
category = "database"
|
||||||
|
```
|
||||||
|
|
||||||
|
#### From TOML (Build Variables)
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[tool.alfred.settings_schema.APP_VERSION]
|
||||||
|
type = "string"
|
||||||
|
source = "toml"
|
||||||
|
toml_path = "tool.poetry.version"
|
||||||
|
description = "Application version"
|
||||||
|
category = "build"
|
||||||
|
export_to_env_make = true # Available in Makefile
|
||||||
|
```
|
||||||
|
|
||||||
|
### Validators
|
||||||
|
|
||||||
|
Available validators:
|
||||||
|
|
||||||
|
- **`range:min:max`** — Numeric range validation
|
||||||
|
```toml
|
||||||
|
validator = "range:0.0:2.0" # For floats
|
||||||
|
validator = "range:1:100" # For integers
|
||||||
|
```
|
||||||
|
|
||||||
|
### Categories
|
||||||
|
|
||||||
|
Organize settings by category:
|
||||||
|
|
||||||
|
- **`app`** — Application settings
|
||||||
|
- **`api`** — API keys and external services
|
||||||
|
- **`database`** — Database configuration
|
||||||
|
- **`security`** — Secrets and security keys
|
||||||
|
- **`build`** — Build-time configuration
|
||||||
|
|
||||||
|
### Best Practices
|
||||||
|
|
||||||
|
1. **Always add a description** — Make it clear what the setting does
|
||||||
|
2. **Use appropriate types** — Don't use strings for numbers
|
||||||
|
3. **Add validation** — Use validators for numeric ranges
|
||||||
|
4. **Categorize properly** — Helps with organization
|
||||||
|
5. **Use computed settings** — For values derived from others (e.g., URIs)
|
||||||
|
6. **Mark secrets as generated** — Let the system handle secret generation
|
||||||
|
7. **Export build vars** — Set `export_to_env_make = true` for Makefile variables
|
||||||
|
|
||||||
|
### Workflow Example
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Edit settings.toml
|
||||||
|
vim settings.toml
|
||||||
|
|
||||||
|
# 2. Regenerate configuration
|
||||||
|
make bootstrap
|
||||||
|
|
||||||
|
# 3. Validate
|
||||||
|
make validate
|
||||||
|
|
||||||
|
# 4. Test
|
||||||
|
python -c "from alfred.settings import settings; print(settings.my_new_setting)"
|
||||||
|
|
||||||
|
# 5. Commit (settings.toml only, not .env)
|
||||||
|
git add settings.toml
|
||||||
|
git commit -m "Add MY_NEW_SETTING"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Commands
|
||||||
|
|
||||||
|
```bash
|
||||||
|
make bootstrap # Generate .env and .env.make from schema
|
||||||
|
make validate # Validate all settings against schema
|
||||||
|
make help # Show all available commands
|
||||||
|
```
|
||||||
|
|
||||||
|
### Troubleshooting
|
||||||
|
|
||||||
|
**Setting not found in schema:**
|
||||||
|
```
|
||||||
|
KeyError: Missing [tool.alfred.settings_schema] section
|
||||||
|
```
|
||||||
|
→ Check that `settings.toml` exists and has the correct structure
|
||||||
|
|
||||||
|
**Validation error:**
|
||||||
|
```
|
||||||
|
ValueError: MY_SETTING must be between 1 and 100, got 150
|
||||||
|
```
|
||||||
|
→ Check the validator in `settings.toml` and adjust the value in `.env`
|
||||||
|
|
||||||
|
**Secret not preserved:**
|
||||||
|
→ Secrets are automatically preserved during `make bootstrap`. If lost, they were never in `.env` (check `.env` exists before running bootstrap)
|
||||||
|
|
||||||
|
### Testing
|
||||||
|
|
||||||
|
When adding a new setting, consider adding tests:
|
||||||
|
|
||||||
|
```python
|
||||||
|
# tests/test_settings_schema.py
|
||||||
|
def test_my_new_setting(self, create_schema_file):
|
||||||
|
"""Test MY_NEW_SETTING definition."""
|
||||||
|
schema_toml = """
|
||||||
|
[tool.alfred.settings_schema.MY_NEW_SETTING]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "test"
|
||||||
|
"""
|
||||||
|
base_dir = create_schema_file(schema_toml)
|
||||||
|
schema = load_schema(base_dir)
|
||||||
|
|
||||||
|
definition = schema.get("MY_NEW_SETTING")
|
||||||
|
assert definition.default == "test"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Migration from Old System
|
||||||
|
|
||||||
|
If you're migrating from the old system:
|
||||||
|
|
||||||
|
1. Settings are now in `settings.toml` instead of scattered across files
|
||||||
|
2. No more `.env.example` — schema is the source of truth
|
||||||
|
3. Secrets are auto-generated and preserved
|
||||||
|
4. Validation happens at bootstrap time, not just runtime
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Questions?
|
||||||
|
|
||||||
|
Open an issue or check the existing settings in `settings.toml` for examples.
|
||||||
@@ -108,6 +108,7 @@ COPY --chown=appuser:appuser alfred/ ./alfred
|
|||||||
COPY --chown=appuser:appuser scripts/ ./scripts
|
COPY --chown=appuser:appuser scripts/ ./scripts
|
||||||
COPY --chown=appuser:appuser .env.example ./
|
COPY --chown=appuser:appuser .env.example ./
|
||||||
COPY --chown=appuser:appuser pyproject.toml ./
|
COPY --chown=appuser:appuser pyproject.toml ./
|
||||||
|
COPY --chown=appuser:appuser settings.toml ./
|
||||||
|
|
||||||
# Create volumes for persistent data
|
# Create volumes for persistent data
|
||||||
VOLUME ["/data", "/logs"]
|
VOLUME ["/data", "/logs"]
|
||||||
|
|||||||
30
Makefile
30
Makefile
@@ -5,7 +5,7 @@
|
|||||||
|
|
||||||
# --- Profiles management ---
|
# --- Profiles management ---
|
||||||
# Usage: make up p=rag,meili
|
# Usage: make up p=rag,meili
|
||||||
p ?= core
|
p ?= full
|
||||||
PROFILES_PARAM := COMPOSE_PROFILES=$(p)
|
PROFILES_PARAM := COMPOSE_PROFILES=$(p)
|
||||||
|
|
||||||
# --- Commands ---
|
# --- Commands ---
|
||||||
@@ -16,28 +16,37 @@ DOCKER_BUILD := docker build --no-cache \
|
|||||||
--build-arg RUNNER=$(RUNNER)
|
--build-arg RUNNER=$(RUNNER)
|
||||||
|
|
||||||
# --- Phony ---
|
# --- Phony ---
|
||||||
.PHONY: .env up down restart logs ps shell build build-test install update \
|
.PHONY: .env bootstrap validate up down restart logs ps shell build build-test install \
|
||||||
install-hooks test coverage lint format clean major minor patch help
|
update install-hooks test coverage lint format clean major minor patch help
|
||||||
|
|
||||||
# --- Setup ---
|
# --- Setup ---
|
||||||
.env .env.make:
|
.env:
|
||||||
@echo "Initializing environment..."
|
@echo "Initializing environment..."
|
||||||
@python scripts/bootstrap.py \
|
@python scripts/bootstrap.py \
|
||||||
&& echo "✓ Environment ready" \
|
&& echo "✓ Environment ready" \
|
||||||
|| (echo "✗ Environment setup failed" && exit 1)
|
|| (echo "✗ Environment setup failed" && exit 1)
|
||||||
|
|
||||||
bootstrap: .env .env.make
|
# .env.make is automatically generated by bootstrap.py when .env is created
|
||||||
|
.env.make: .env
|
||||||
|
|
||||||
|
bootstrap: .env
|
||||||
|
|
||||||
|
validate:
|
||||||
|
@echo "Validating settings..."
|
||||||
|
@python scripts/validate_settings.py \
|
||||||
|
&& echo "✓ Settings valid" \
|
||||||
|
|| (echo "✗ Settings validation failed" && exit 1)
|
||||||
|
|
||||||
# --- Docker ---
|
# --- Docker ---
|
||||||
up: .env
|
up: .env
|
||||||
@echo "Starting containers with profiles: [$(p)]..."
|
@echo "Starting containers with profiles: [full]..."
|
||||||
@$(PROFILES_PARAM) $(DOCKER_COMPOSE) up -d --remove-orphans \
|
@$(PROFILES_PARAM) $(DOCKER_COMPOSE) up -d --remove-orphans \
|
||||||
&& echo "✓ Containers started" \
|
&& echo "✓ Containers started" \
|
||||||
|| (echo "✗ Failed to start containers" && exit 1)
|
|| (echo "✗ Failed to start containers" && exit 1)
|
||||||
|
|
||||||
down:
|
down:
|
||||||
@echo "Stopping containers..."
|
@echo "Stopping containers..."
|
||||||
@$(DOCKER_COMPOSE) down \
|
@$(PROFILES_PARAM) $(DOCKER_COMPOSE) down \
|
||||||
&& echo "✓ Containers stopped" \
|
&& echo "✓ Containers stopped" \
|
||||||
|| (echo "✗ Failed to stop containers" && exit 1)
|
|| (echo "✗ Failed to stop containers" && exit 1)
|
||||||
|
|
||||||
@@ -161,8 +170,12 @@ help:
|
|||||||
@echo ""
|
@echo ""
|
||||||
@echo "Usage: make [target] [p=profile1,profile2]"
|
@echo "Usage: make [target] [p=profile1,profile2]"
|
||||||
@echo ""
|
@echo ""
|
||||||
|
@echo "Setup:"
|
||||||
|
@echo " bootstrap Generate .env and .env.make from schema"
|
||||||
|
@echo " validate Validate settings against schema"
|
||||||
|
@echo ""
|
||||||
@echo "Docker:"
|
@echo "Docker:"
|
||||||
@echo " up Start containers (default profile: core)"
|
@echo " up Start containers (default profile: full)"
|
||||||
@echo " Example: make up p=rag,meili"
|
@echo " Example: make up p=rag,meili"
|
||||||
@echo " down Stop all containers"
|
@echo " down Stop all containers"
|
||||||
@echo " restart Restart containers (supports p=...)"
|
@echo " restart Restart containers (supports p=...)"
|
||||||
@@ -172,7 +185,6 @@ help:
|
|||||||
@echo " build Build the production Docker image"
|
@echo " build Build the production Docker image"
|
||||||
@echo ""
|
@echo ""
|
||||||
@echo "Dev & Quality:"
|
@echo "Dev & Quality:"
|
||||||
@echo " setup Bootstrap .env and security keys"
|
|
||||||
@echo " install Install dependencies via $(RUNNER)"
|
@echo " install Install dependencies via $(RUNNER)"
|
||||||
@echo " test Run pytest suite"
|
@echo " test Run pytest suite"
|
||||||
@echo " coverage Run tests and generate HTML report"
|
@echo " coverage Run tests and generate HTML report"
|
||||||
|
|||||||
649
README.md
649
README.md
@@ -1,89 +1,277 @@
|
|||||||
# Agent Media 🎬
|
# Alfred Media Organizer 🎬
|
||||||
|
|
||||||
An AI-powered agent for managing your local media library with natural language. Search, download, and organize movies and TV shows effortlessly.
|
An AI-powered agent for managing your local media library with natural language. Search, download, and organize movies and TV shows effortlessly through a conversational interface.
|
||||||
|
|
||||||
## Features
|
[](https://www.python.org/downloads/)
|
||||||
|
[](https://python-poetry.org/)
|
||||||
|
[](https://opensource.org/licenses/MIT)
|
||||||
|
[](https://github.com/astral-sh/ruff)
|
||||||
|
|
||||||
- 🤖 **Natural Language Interface**: Talk to your media library in plain language
|
## ✨ Features
|
||||||
- 🔍 **Smart Search**: Find movies and TV shows via TMDB
|
|
||||||
- 📥 **Torrent Integration**: Search and download via qBittorrent
|
|
||||||
- 🧠 **Contextual Memory**: Remembers your preferences and conversation history
|
|
||||||
- 📁 **Auto-Organization**: Keeps your media library tidy
|
|
||||||
- 🌐 **API Compatible**: OpenAI-compatible API for easy integration
|
|
||||||
|
|
||||||
## Architecture
|
- 🤖 **Natural Language Interface** — Talk to your media library in plain language
|
||||||
|
- 🔍 **Smart Search** — Find movies and TV shows via TMDB with rich metadata
|
||||||
|
- 📥 **Torrent Integration** — Search and download via qBittorrent
|
||||||
|
- 🧠 **Contextual Memory** — Remembers your preferences and conversation history
|
||||||
|
- 📁 **Auto-Organization** — Keeps your media library tidy and well-structured
|
||||||
|
- 🌐 **OpenAI-Compatible API** — Works with any OpenAI-compatible client
|
||||||
|
- 🖥️ **LibreChat Frontend** — Beautiful web UI included out of the box
|
||||||
|
- 🔒 **Secure by Default** — Auto-generated secrets and encrypted credentials
|
||||||
|
|
||||||
Built with **Domain-Driven Design (DDD)** principles:
|
## 🏗️ Architecture
|
||||||
|
|
||||||
|
Built with **Domain-Driven Design (DDD)** principles for clean separation of concerns:
|
||||||
|
|
||||||
```
|
```
|
||||||
agent_media/
|
alfred/
|
||||||
├── agent/ # AI agent orchestration
|
├── agent/ # AI agent orchestration
|
||||||
├── application/ # Use cases & DTOs
|
│ ├── llm/ # LLM clients (Ollama, DeepSeek)
|
||||||
├── domain/ # Business logic & entities
|
│ └── tools/ # Tool implementations
|
||||||
└── infrastructure/ # External services & persistence
|
├── application/ # Use cases & DTOs
|
||||||
|
│ ├── movies/ # Movie search use cases
|
||||||
|
│ ├── torrents/ # Torrent management
|
||||||
|
│ └── filesystem/ # File operations
|
||||||
|
├── domain/ # Business logic & entities
|
||||||
|
│ ├── movies/ # Movie entities
|
||||||
|
│ ├── tv_shows/ # TV show entities
|
||||||
|
│ └── subtitles/ # Subtitle entities
|
||||||
|
└── infrastructure/ # External services & persistence
|
||||||
|
├── api/ # External API clients (TMDB, qBittorrent)
|
||||||
|
├── filesystem/ # File system operations
|
||||||
|
└── persistence/ # Memory & repositories
|
||||||
```
|
```
|
||||||
|
|
||||||
See [architecture_diagram.md](docs/architecture_diagram.md) for architectural details.
|
See [docs/architecture_diagram.md](docs/architecture_diagram.md) for detailed architectural diagrams.
|
||||||
|
|
||||||
## Quick Start
|
## 🚀 Quick Start
|
||||||
|
|
||||||
### Prerequisites
|
### Prerequisites
|
||||||
|
|
||||||
- Python 3.12+
|
- **Python 3.14+** (required)
|
||||||
- Poetry
|
- **Poetry** (dependency manager)
|
||||||
- qBittorrent (optional, for downloads)
|
- **Docker & Docker Compose** (recommended for full stack)
|
||||||
- API Keys:
|
- **API Keys:**
|
||||||
- DeepSeek API key (or Ollama for local LLM)
|
- TMDB API key ([get one here](https://www.themoviedb.org/settings/api))
|
||||||
- TMDB API key
|
- Optional: DeepSeek, OpenAI, Anthropic, or other LLM provider keys
|
||||||
|
|
||||||
### Installation
|
### Installation
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Clone the repository
|
# Clone the repository
|
||||||
git clone https://github.com/your-username/agent-media.git
|
git clone https://github.com/francwa/alfred_media_organizer.git
|
||||||
cd agent-media
|
cd alfred_media_organizer
|
||||||
|
|
||||||
# Install dependencies
|
# Install dependencies
|
||||||
poetry install
|
make install
|
||||||
|
|
||||||
# Copy environment template
|
# Bootstrap environment (generates .env with secure secrets)
|
||||||
cp .env.example .env
|
make bootstrap
|
||||||
|
|
||||||
# Edit .env with your API keys
|
# Edit .env with your API keys
|
||||||
nano .env
|
nano .env
|
||||||
```
|
```
|
||||||
|
|
||||||
### Configuration
|
### Running with Docker (Recommended)
|
||||||
|
|
||||||
Edit `.env`:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# LLM Provider (deepseek or ollama)
|
# Start all services (LibreChat + Alfred + MongoDB + Ollama)
|
||||||
LLM_PROVIDER=deepseek
|
make up
|
||||||
DEEPSEEK_API_KEY=your-api-key-here
|
|
||||||
|
|
||||||
# TMDB (for movie/TV show metadata)
|
# Or start with specific profiles
|
||||||
TMDB_API_KEY=your-tmdb-key-here
|
make up p=rag,meili # Include RAG and Meilisearch
|
||||||
|
make up p=qbittorrent # Include qBittorrent
|
||||||
|
make up p=full # Everything
|
||||||
|
|
||||||
# qBittorrent (optional)
|
# View logs
|
||||||
QBITTORRENT_HOST=http://localhost:8080
|
make logs
|
||||||
QBITTORRENT_USERNAME=admin
|
|
||||||
QBITTORRENT_PASSWORD=adminadmin
|
# Stop all services
|
||||||
|
make down
|
||||||
```
|
```
|
||||||
|
|
||||||
### Run
|
The web interface will be available at **http://localhost:3080**
|
||||||
|
|
||||||
|
### Running Locally (Development)
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
# Install dependencies
|
||||||
|
poetry install
|
||||||
|
|
||||||
# Start the API server
|
# Start the API server
|
||||||
poetry run uvicorn app:app --reload
|
poetry run uvicorn alfred.app:app --reload --port 8000
|
||||||
|
|
||||||
# Or with Docker
|
|
||||||
docker-compose up
|
|
||||||
```
|
```
|
||||||
|
|
||||||
The API will be available at `http://localhost:8000`
|
## ⚙️ Configuration
|
||||||
|
|
||||||
## Usage
|
### Environment Bootstrap
|
||||||
|
|
||||||
|
Alfred uses a smart bootstrap system that:
|
||||||
|
|
||||||
|
1. **Generates secure secrets** automatically (JWT tokens, database passwords, encryption keys)
|
||||||
|
2. **Syncs build variables** from `pyproject.toml` (versions, image names)
|
||||||
|
3. **Preserves existing secrets** when re-running (never overwrites your API keys)
|
||||||
|
4. **Computes database URIs** automatically from individual components
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# First time setup
|
||||||
|
make bootstrap
|
||||||
|
|
||||||
|
# Re-run after updating pyproject.toml (secrets are preserved)
|
||||||
|
make bootstrap
|
||||||
|
```
|
||||||
|
|
||||||
|
### Configuration File (.env)
|
||||||
|
|
||||||
|
The `.env` file is generated from `.env.example` with secure defaults:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# --- CORE SETTINGS ---
|
||||||
|
HOST=0.0.0.0
|
||||||
|
PORT=3080
|
||||||
|
MAX_HISTORY_MESSAGES=10
|
||||||
|
MAX_TOOL_ITERATIONS=10
|
||||||
|
|
||||||
|
# --- LLM CONFIGURATION ---
|
||||||
|
# Providers: 'local' (Ollama), 'deepseek', 'openai', 'anthropic', 'google'
|
||||||
|
DEFAULT_LLM_PROVIDER=local
|
||||||
|
|
||||||
|
# Local LLM (Ollama - included in Docker stack)
|
||||||
|
OLLAMA_BASE_URL=http://ollama:11434
|
||||||
|
OLLAMA_MODEL=llama3.3:latest
|
||||||
|
LLM_TEMPERATURE=0.2
|
||||||
|
|
||||||
|
# --- API KEYS (fill only what you need) ---
|
||||||
|
TMDB_API_KEY=your-tmdb-key-here # Required for movie search
|
||||||
|
DEEPSEEK_API_KEY= # Optional
|
||||||
|
OPENAI_API_KEY= # Optional
|
||||||
|
ANTHROPIC_API_KEY= # Optional
|
||||||
|
|
||||||
|
# --- SECURITY (auto-generated, don't modify) ---
|
||||||
|
JWT_SECRET=<auto-generated>
|
||||||
|
JWT_REFRESH_SECRET=<auto-generated>
|
||||||
|
CREDS_KEY=<auto-generated>
|
||||||
|
CREDS_IV=<auto-generated>
|
||||||
|
|
||||||
|
# --- DATABASES (auto-generated passwords) ---
|
||||||
|
MONGO_PASSWORD=<auto-generated>
|
||||||
|
POSTGRES_PASSWORD=<auto-generated>
|
||||||
|
```
|
||||||
|
|
||||||
|
### Security Keys
|
||||||
|
|
||||||
|
Security keys are defined in `pyproject.toml` and generated automatically:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[tool.alfred.security]
|
||||||
|
jwt_secret = "32:b64" # 32 bytes, base64 URL-safe
|
||||||
|
jwt_refresh_secret = "32:b64"
|
||||||
|
creds_key = "32:hex" # 32 bytes, hexadecimal (AES-256)
|
||||||
|
creds_iv = "16:hex" # 16 bytes, hexadecimal (AES IV)
|
||||||
|
mongo_password = "16:hex"
|
||||||
|
postgres_password = "16:hex"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Formats:**
|
||||||
|
- `b64` — Base64 URL-safe (for JWT tokens)
|
||||||
|
- `hex` — Hexadecimal (for encryption keys, passwords)
|
||||||
|
|
||||||
|
## 🐳 Docker Services
|
||||||
|
|
||||||
|
### Service Architecture
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─────────────────────────────────────────────────────────────┐
|
||||||
|
│ alfred-net (bridge) │
|
||||||
|
├─────────────────────────────────────────────────────────────┤
|
||||||
|
│ │
|
||||||
|
│ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ │
|
||||||
|
│ │ LibreChat │───▶│ Alfred │───▶│ MongoDB │ │
|
||||||
|
│ │ :3080 │ │ (core) │ │ :27017 │ │
|
||||||
|
│ └──────────────┘ └──────────────┘ └──────────────┘ │
|
||||||
|
│ │ │ │
|
||||||
|
│ │ ▼ │
|
||||||
|
│ │ ┌──────────────┐ │
|
||||||
|
│ │ │ Ollama │ │
|
||||||
|
│ │ │ (local) │ │
|
||||||
|
│ │ └──────────────┘ │
|
||||||
|
│ │ │
|
||||||
|
│ ┌──────┴───────────────────────────────────────────────┐ │
|
||||||
|
│ │ Optional Services (profiles) │ │
|
||||||
|
│ ├──────────────┬──────────────┬──────────────┬─────────┤ │
|
||||||
|
│ │ Meilisearch │ RAG API │ VectorDB │qBittor- │ │
|
||||||
|
│ │ :7700 │ :8000 │ :5432 │ rent │ │
|
||||||
|
│ │ [meili] │ [rag] │ [rag] │[qbit..] │ │
|
||||||
|
│ └──────────────┴──────────────┴──────────────┴─────────┘ │
|
||||||
|
│ │
|
||||||
|
└─────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### Docker Profiles
|
||||||
|
|
||||||
|
| Profile | Services | Use Case |
|
||||||
|
|---------|----------|----------|
|
||||||
|
| (default) | LibreChat, Alfred, MongoDB, Ollama | Basic setup |
|
||||||
|
| `meili` | + Meilisearch | Fast search |
|
||||||
|
| `rag` | + RAG API, VectorDB | Document retrieval |
|
||||||
|
| `qbittorrent` | + qBittorrent | Torrent downloads |
|
||||||
|
| `full` | All services | Complete setup |
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Start with specific profiles
|
||||||
|
make up p=rag,meili
|
||||||
|
make up p=full
|
||||||
|
```
|
||||||
|
|
||||||
|
### Docker Commands
|
||||||
|
|
||||||
|
```bash
|
||||||
|
make up # Start containers (default profile)
|
||||||
|
make up p=full # Start with all services
|
||||||
|
make down # Stop all containers
|
||||||
|
make restart # Restart containers
|
||||||
|
make logs # Follow logs
|
||||||
|
make ps # Show container status
|
||||||
|
make shell # Open bash in Alfred container
|
||||||
|
make build # Build production image
|
||||||
|
make build-test # Build test image
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🛠️ Available Tools
|
||||||
|
|
||||||
|
The agent has access to these tools for interacting with your media library:
|
||||||
|
|
||||||
|
| Tool | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `find_media_imdb_id` | Search for movies/TV shows on TMDB by title |
|
||||||
|
| `find_torrent` | Search for torrents across multiple indexers |
|
||||||
|
| `get_torrent_by_index` | Get detailed info about a specific torrent result |
|
||||||
|
| `add_torrent_by_index` | Download a torrent by its index in search results |
|
||||||
|
| `add_torrent_to_qbittorrent` | Add a torrent via magnet link directly |
|
||||||
|
| `set_path_for_folder` | Configure folder paths for media organization |
|
||||||
|
| `list_folder` | List contents of a folder |
|
||||||
|
| `set_language` | Set preferred language for searches |
|
||||||
|
|
||||||
|
## 💬 Usage Examples
|
||||||
|
|
||||||
|
### Via Web Interface (LibreChat)
|
||||||
|
|
||||||
|
Navigate to **http://localhost:3080** and start chatting:
|
||||||
|
|
||||||
|
```
|
||||||
|
You: Find Inception in 1080p
|
||||||
|
Alfred: I found 3 torrents for Inception (2010):
|
||||||
|
1. Inception.2010.1080p.BluRay.x264 (150 seeders) - 2.1 GB
|
||||||
|
2. Inception.2010.1080p.WEB-DL.x265 (80 seeders) - 1.8 GB
|
||||||
|
3. Inception.2010.1080p.REMUX (45 seeders) - 25 GB
|
||||||
|
|
||||||
|
You: Download the first one
|
||||||
|
Alfred: ✓ Added to qBittorrent! Download started.
|
||||||
|
Saving to: /downloads/Movies/Inception (2010)/
|
||||||
|
|
||||||
|
You: What's downloading right now?
|
||||||
|
Alfred: You have 1 active download:
|
||||||
|
- Inception.2010.1080p.BluRay.x264 (45% complete, ETA: 12 min)
|
||||||
|
```
|
||||||
|
|
||||||
### Via API
|
### Via API
|
||||||
|
|
||||||
@@ -91,219 +279,177 @@ The API will be available at `http://localhost:8000`
|
|||||||
# Health check
|
# Health check
|
||||||
curl http://localhost:8000/health
|
curl http://localhost:8000/health
|
||||||
|
|
||||||
# Chat with the agent
|
# Chat with the agent (OpenAI-compatible)
|
||||||
curl -X POST http://localhost:8000/v1/chat/completions \
|
curl -X POST http://localhost:8000/v1/chat/completions \
|
||||||
-H "Content-Type: application/json" \
|
-H "Content-Type: application/json" \
|
||||||
-d '{
|
-d '{
|
||||||
"model": "agent-media",
|
"model": "alfred",
|
||||||
"messages": [
|
"messages": [
|
||||||
{"role": "user", "content": "Find Inception 1080p"}
|
{"role": "user", "content": "Find The Matrix 4K"}
|
||||||
]
|
]
|
||||||
}'
|
}'
|
||||||
|
|
||||||
|
# List available models
|
||||||
|
curl http://localhost:8000/v1/models
|
||||||
|
|
||||||
|
# View memory state (debug)
|
||||||
|
curl http://localhost:8000/memory/state
|
||||||
|
|
||||||
|
# Clear session memory
|
||||||
|
curl -X POST http://localhost:8000/memory/clear-session
|
||||||
```
|
```
|
||||||
|
|
||||||
### Via OpenWebUI
|
### Via OpenWebUI or Other Clients
|
||||||
|
|
||||||
Agent Media is compatible with [OpenWebUI](https://github.com/open-webui/open-webui):
|
Alfred is compatible with any OpenAI-compatible client:
|
||||||
|
|
||||||
1. Add as OpenAI-compatible endpoint: `http://localhost:8000/v1`
|
1. Add as OpenAI-compatible endpoint: `http://localhost:8000/v1`
|
||||||
2. Model name: `agent-media`
|
2. Model name: `alfred`
|
||||||
3. Start chatting!
|
3. No API key required (or use any placeholder)
|
||||||
|
|
||||||
### Example Conversations
|
## 🧠 Memory System
|
||||||
|
|
||||||
```
|
Alfred uses a three-tier memory system for context management:
|
||||||
You: Find Inception in 1080p
|
|
||||||
Agent: I found 3 torrents for Inception:
|
|
||||||
1. Inception.2010.1080p.BluRay.x264 (150 seeders)
|
|
||||||
2. Inception.2010.1080p.WEB-DL.x265 (80 seeders)
|
|
||||||
3. Inception.2010.720p.BluRay (45 seeders)
|
|
||||||
|
|
||||||
You: Download the first one
|
|
||||||
Agent: Added to qBittorrent! Download started.
|
|
||||||
|
|
||||||
You: List my downloads
|
|
||||||
Agent: You have 1 active download:
|
|
||||||
- Inception.2010.1080p.BluRay.x264 (45% complete)
|
|
||||||
```
|
|
||||||
|
|
||||||
## Available Tools
|
|
||||||
|
|
||||||
The agent has access to these tools:
|
|
||||||
|
|
||||||
| Tool | Description |
|
|
||||||
|------|-------------|
|
|
||||||
| `find_media_imdb_id` | Search for movies/TV shows on TMDB |
|
|
||||||
| `find_torrents` | Search for torrents |
|
|
||||||
| `get_torrent_by_index` | Get torrent details by index |
|
|
||||||
| `add_torrent_by_index` | Download torrent by index |
|
|
||||||
| `add_torrent_to_qbittorrent` | Add torrent via magnet link |
|
|
||||||
| `set_path_for_folder` | Configure folder paths |
|
|
||||||
| `list_folder` | List folder contents |
|
|
||||||
|
|
||||||
## Memory System
|
|
||||||
|
|
||||||
Agent Media uses a three-tier memory system:
|
|
||||||
|
|
||||||
### Long-Term Memory (LTM)
|
### Long-Term Memory (LTM)
|
||||||
- **Persistent** (saved to JSON)
|
- **Persistent** — Saved to JSON files
|
||||||
- Configuration, preferences, media library
|
- **Contents:** Configuration, user preferences, media library state
|
||||||
- Survives restarts
|
- **Survives:** Application restarts
|
||||||
|
|
||||||
### Short-Term Memory (STM)
|
### Short-Term Memory (STM)
|
||||||
- **Session-based** (RAM only)
|
- **Session-based** — Stored in RAM
|
||||||
- Conversation history, current workflow
|
- **Contents:** Conversation history, current workflow state
|
||||||
- Cleared on restart
|
- **Cleared:** On session end or restart
|
||||||
|
|
||||||
### Episodic Memory
|
### Episodic Memory
|
||||||
- **Transient** (RAM only)
|
- **Transient** — Stored in RAM
|
||||||
- Search results, active downloads, recent errors
|
- **Contents:** Search results, active downloads, recent errors
|
||||||
- Cleared frequently
|
- **Cleared:** Frequently, after task completion
|
||||||
|
|
||||||
## Development
|
## 🧪 Development
|
||||||
|
|
||||||
### Project Structure
|
### Project Setup
|
||||||
|
|
||||||
```
|
```bash
|
||||||
agent_media/
|
# Install all dependencies (including dev)
|
||||||
├── agent/
|
poetry install
|
||||||
│ ├── agent.py # Main agent orchestrator
|
|
||||||
│ ├── prompts.py # System prompt builder
|
# Install pre-commit hooks
|
||||||
│ ├── registry.py # Tool registration
|
make install-hooks
|
||||||
│ ├── tools/ # Tool implementations
|
|
||||||
│ └── llm/ # LLM clients (DeepSeek, Ollama)
|
# Run the development server
|
||||||
├── application/
|
poetry run uvicorn alfred.app:app --reload
|
||||||
│ ├── movies/ # Movie use cases
|
|
||||||
│ ├── torrents/ # Torrent use cases
|
|
||||||
│ └── filesystem/ # Filesystem use cases
|
|
||||||
├── domain/
|
|
||||||
│ ├── movies/ # Movie entities & value objects
|
|
||||||
│ ├── tv_shows/ # TV show entities
|
|
||||||
│ ├── subtitles/ # Subtitle entities
|
|
||||||
│ └── shared/ # Shared value objects
|
|
||||||
├── infrastructure/
|
|
||||||
│ ├── api/ # External API clients
|
|
||||||
│ │ ├── tmdb/ # TMDB client
|
|
||||||
│ │ ├── knaben/ # Torrent search
|
|
||||||
│ │ └── qbittorrent/ # qBittorrent client
|
|
||||||
│ ├── filesystem/ # File operations
|
|
||||||
│ └── persistence/ # Memory & repositories
|
|
||||||
├── tests/ # Test suite (~500 tests)
|
|
||||||
└── docs/ # Documentation
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Running Tests
|
### Running Tests
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Run all tests
|
# Run all tests (parallel execution)
|
||||||
poetry run pytest
|
make test
|
||||||
|
|
||||||
# Run with coverage
|
# Run with coverage report
|
||||||
poetry run pytest --cov
|
make coverage
|
||||||
|
|
||||||
# Run specific test file
|
# Run specific test file
|
||||||
poetry run pytest tests/test_agent.py
|
poetry run pytest tests/test_agent.py -v
|
||||||
|
|
||||||
# Run specific test
|
# Run specific test
|
||||||
poetry run pytest tests/test_agent.py::TestAgent::test_step
|
poetry run pytest tests/test_config_loader.py::TestBootstrapEnv -v
|
||||||
```
|
```
|
||||||
|
|
||||||
### Code Quality
|
### Code Quality
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Linting
|
# Lint and auto-fix
|
||||||
poetry run ruff check .
|
make lint
|
||||||
|
|
||||||
# Formatting
|
# Format code
|
||||||
poetry run black .
|
make format
|
||||||
|
|
||||||
# Type checking (if mypy is installed)
|
# Clean build artifacts
|
||||||
poetry run mypy .
|
make clean
|
||||||
```
|
```
|
||||||
|
|
||||||
### Adding a New Tool
|
### Adding a New Tool
|
||||||
|
|
||||||
Quick example:
|
1. **Create the tool function** in `alfred/agent/tools/`:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
# 1. Create the tool function in agent/tools/api.py
|
# alfred/agent/tools/api.py
|
||||||
def my_new_tool(param: str) -> Dict[str, Any]:
|
def my_new_tool(param: str) -> dict[str, Any]:
|
||||||
"""Tool description."""
|
"""
|
||||||
|
Short description of what this tool does.
|
||||||
|
|
||||||
|
This will be shown to the LLM to help it decide when to use this tool.
|
||||||
|
"""
|
||||||
memory = get_memory()
|
memory = get_memory()
|
||||||
# Implementation
|
|
||||||
return {"status": "ok", "data": "result"}
|
|
||||||
|
|
||||||
# 2. Register in agent/registry.py
|
# Your implementation here
|
||||||
Tool(
|
result = do_something(param)
|
||||||
name="my_new_tool",
|
|
||||||
description="What this tool does",
|
return {
|
||||||
func=api_tools.my_new_tool,
|
"status": "success",
|
||||||
parameters={
|
"data": result
|
||||||
"type": "object",
|
}
|
||||||
"properties": {
|
|
||||||
"param": {"type": "string", "description": "Parameter description"},
|
|
||||||
},
|
|
||||||
"required": ["param"],
|
|
||||||
},
|
|
||||||
),
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Docker
|
2. **Register in the registry** (`alfred/agent/registry.py`):
|
||||||
|
|
||||||
### Build
|
```python
|
||||||
|
tool_functions = [
|
||||||
|
# ... existing tools ...
|
||||||
|
api_tools.my_new_tool, # Add your tool here
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
The tool will be automatically registered with its parameters extracted from the function signature.
|
||||||
|
|
||||||
|
### Version Management
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker build -t agent-media .
|
# Bump version (must be on main branch)
|
||||||
|
make patch # 0.1.7 -> 0.1.8
|
||||||
|
make minor # 0.1.7 -> 0.2.0
|
||||||
|
make major # 0.1.7 -> 1.0.0
|
||||||
```
|
```
|
||||||
|
|
||||||
### Run
|
## 📚 API Reference
|
||||||
|
|
||||||
```bash
|
|
||||||
docker run -p 8000:8000 \
|
|
||||||
-e DEEPSEEK_API_KEY=your-key \
|
|
||||||
-e TMDB_API_KEY=your-key \
|
|
||||||
-v $(pwd)/memory_data:/app/memory_data \
|
|
||||||
agent-media
|
|
||||||
```
|
|
||||||
|
|
||||||
### Docker Compose
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Start all services (agent + qBittorrent)
|
|
||||||
docker-compose up -d
|
|
||||||
|
|
||||||
# View logs
|
|
||||||
docker-compose logs -f
|
|
||||||
|
|
||||||
# Stop
|
|
||||||
docker-compose down
|
|
||||||
```
|
|
||||||
|
|
||||||
## API Documentation
|
|
||||||
|
|
||||||
### Endpoints
|
### Endpoints
|
||||||
|
|
||||||
#### `GET /health`
|
#### `GET /health`
|
||||||
Health check endpoint.
|
Health check endpoint.
|
||||||
|
|
||||||
**Response:**
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"status": "healthy",
|
"status": "healthy",
|
||||||
"version": "0.2.0"
|
"version": "0.1.7"
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `GET /v1/models`
|
#### `GET /v1/models`
|
||||||
List available models (OpenAI-compatible).
|
List available models (OpenAI-compatible).
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"object": "list",
|
||||||
|
"data": [
|
||||||
|
{
|
||||||
|
"id": "alfred",
|
||||||
|
"object": "model",
|
||||||
|
"owned_by": "alfred"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
#### `POST /v1/chat/completions`
|
#### `POST /v1/chat/completions`
|
||||||
Chat with the agent (OpenAI-compatible).
|
Chat with the agent (OpenAI-compatible).
|
||||||
|
|
||||||
**Request:**
|
**Request:**
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"model": "agent-media",
|
"model": "alfred",
|
||||||
"messages": [
|
"messages": [
|
||||||
{"role": "user", "content": "Find Inception"}
|
{"role": "user", "content": "Find Inception"}
|
||||||
],
|
],
|
||||||
@@ -317,7 +463,7 @@ Chat with the agent (OpenAI-compatible).
|
|||||||
"id": "chatcmpl-xxx",
|
"id": "chatcmpl-xxx",
|
||||||
"object": "chat.completion",
|
"object": "chat.completion",
|
||||||
"created": 1234567890,
|
"created": 1234567890,
|
||||||
"model": "agent-media",
|
"model": "alfred",
|
||||||
"choices": [{
|
"choices": [{
|
||||||
"index": 0,
|
"index": 0,
|
||||||
"message": {
|
"message": {
|
||||||
@@ -330,71 +476,120 @@ Chat with the agent (OpenAI-compatible).
|
|||||||
```
|
```
|
||||||
|
|
||||||
#### `GET /memory/state`
|
#### `GET /memory/state`
|
||||||
View full memory state (debug).
|
View full memory state (debug endpoint).
|
||||||
|
|
||||||
#### `POST /memory/clear-session`
|
#### `POST /memory/clear-session`
|
||||||
Clear session memories (STM + Episodic).
|
Clear session memories (STM + Episodic).
|
||||||
|
|
||||||
## Troubleshooting
|
## 🔧 Troubleshooting
|
||||||
|
|
||||||
### Agent doesn't respond
|
### Agent doesn't respond
|
||||||
- Check API keys in `.env`
|
|
||||||
- Verify LLM provider is running (Ollama) or accessible (DeepSeek)
|
1. Check API keys in `.env`
|
||||||
- Check logs: `docker-compose logs agent-media`
|
2. Verify LLM provider is running:
|
||||||
|
```bash
|
||||||
|
# For Ollama
|
||||||
|
docker logs alfred-ollama
|
||||||
|
|
||||||
|
# Check if model is pulled
|
||||||
|
docker exec alfred-ollama ollama list
|
||||||
|
```
|
||||||
|
3. Check Alfred logs: `docker logs alfred-core`
|
||||||
|
|
||||||
### qBittorrent connection failed
|
### qBittorrent connection failed
|
||||||
- Verify qBittorrent is running
|
|
||||||
- Check `QBITTORRENT_HOST` in `.env`
|
1. Verify qBittorrent is running: `docker ps | grep qbittorrent`
|
||||||
- Ensure Web UI is enabled in qBittorrent settings
|
2. Check Web UI is enabled in qBittorrent settings
|
||||||
|
3. Verify credentials in `.env`:
|
||||||
|
```bash
|
||||||
|
QBITTORRENT_URL=http://qbittorrent:16140
|
||||||
|
QBITTORRENT_USERNAME=admin
|
||||||
|
QBITTORRENT_PASSWORD=<check-your-env>
|
||||||
|
```
|
||||||
|
|
||||||
|
### Database connection issues
|
||||||
|
|
||||||
|
1. Check MongoDB is healthy: `docker logs alfred-mongodb`
|
||||||
|
2. Verify credentials match in `.env`
|
||||||
|
3. Try restarting: `make restart`
|
||||||
|
|
||||||
### Memory not persisting
|
### Memory not persisting
|
||||||
- Check `memory_data/` directory exists and is writable
|
|
||||||
- Verify volume mounts in Docker
|
1. Check `data/` directory exists and is writable
|
||||||
|
2. Verify volume mounts in `docker-compose.yaml`
|
||||||
|
3. Check file permissions: `ls -la data/`
|
||||||
|
|
||||||
|
### Bootstrap fails
|
||||||
|
|
||||||
|
1. Ensure `.env.example` exists
|
||||||
|
2. Check `pyproject.toml` has required sections:
|
||||||
|
```toml
|
||||||
|
[tool.alfred.settings]
|
||||||
|
[tool.alfred.security]
|
||||||
|
```
|
||||||
|
3. Run manually: `python scripts/bootstrap.py`
|
||||||
|
|
||||||
### Tests failing
|
### Tests failing
|
||||||
- Run `poetry install` to ensure dependencies are up to date
|
|
||||||
- Check logs for specific error messages
|
|
||||||
|
|
||||||
## Contributing
|
1. Update dependencies: `poetry install`
|
||||||
|
2. Check Python version: `python --version` (needs 3.14+)
|
||||||
|
3. Run specific failing test with verbose output:
|
||||||
|
```bash
|
||||||
|
poetry run pytest tests/test_failing.py -v --tb=long
|
||||||
|
```
|
||||||
|
|
||||||
Contributions are welcome!
|
## 🤝 Contributing
|
||||||
|
|
||||||
### Development Workflow
|
Contributions are welcome! Please follow these steps:
|
||||||
|
|
||||||
1. Fork the repository
|
1. **Fork** the repository
|
||||||
2. Create a feature branch: `git checkout -b feature/my-feature`
|
2. **Create** a feature branch: `git checkout -b feature/my-feature`
|
||||||
3. Make your changes
|
3. **Make** your changes
|
||||||
4. Run tests: `poetry run pytest`
|
4. **Run** tests: `make test`
|
||||||
5. Run linting: `poetry run ruff check . && poetry run black .`
|
5. **Run** linting: `make lint && make format`
|
||||||
6. Commit: `git commit -m "Add my feature"`
|
6. **Commit**: `git commit -m "feat: add my feature"`
|
||||||
7. Push: `git push origin feature/my-feature`
|
7. **Push**: `git push origin feature/my-feature`
|
||||||
8. Create a Pull Request
|
8. **Create** a Pull Request
|
||||||
|
|
||||||
## Documentation
|
### Commit Convention
|
||||||
|
|
||||||
- [Architecture Diagram](docs/architecture_diagram.md) - System architecture overview
|
We use [Conventional Commits](https://www.conventionalcommits.org/):
|
||||||
- [Class Diagram](docs/class_diagram.md) - Class structure and relationships
|
|
||||||
- [Component Diagram](docs/component_diagram.md) - Component interactions
|
|
||||||
- [Sequence Diagram](docs/sequence_diagram.md) - Sequence flows
|
|
||||||
- [Flowchart](docs/flowchart.md) - System flowcharts
|
|
||||||
|
|
||||||
## License
|
- `feat:` New feature
|
||||||
|
- `fix:` Bug fix
|
||||||
|
- `docs:` Documentation
|
||||||
|
- `refactor:` Code refactoring
|
||||||
|
- `test:` Adding tests
|
||||||
|
- `chore:` Maintenance
|
||||||
|
|
||||||
MIT License - see [LICENSE](LICENSE) file for details.
|
## 📖 Documentation
|
||||||
|
|
||||||
## Acknowledgments
|
- [Architecture Diagram](docs/architecture_diagram.md) — System architecture overview
|
||||||
|
- [Class Diagram](docs/class_diagram.md) — Class structure and relationships
|
||||||
|
- [Component Diagram](docs/component_diagram.md) — Component interactions
|
||||||
|
- [Sequence Diagram](docs/sequence_diagram.md) — Sequence flows
|
||||||
|
- [Flowchart](docs/flowchart.md) — System flowcharts
|
||||||
|
|
||||||
- [DeepSeek](https://www.deepseek.com/) - LLM provider
|
## 📄 License
|
||||||
- [TMDB](https://www.themoviedb.org/) - Movie database
|
|
||||||
- [qBittorrent](https://www.qbittorrent.org/) - Torrent client
|
|
||||||
- [FastAPI](https://fastapi.tiangolo.com/) - Web framework
|
|
||||||
|
|
||||||
## Support
|
MIT License — see [LICENSE](LICENSE) file for details.
|
||||||
|
|
||||||
|
## 🙏 Acknowledgments
|
||||||
|
|
||||||
|
- [LibreChat](https://github.com/danny-avila/LibreChat) — Beautiful chat interface
|
||||||
|
- [Ollama](https://ollama.ai/) — Local LLM runtime
|
||||||
|
- [DeepSeek](https://www.deepseek.com/) — LLM provider
|
||||||
|
- [TMDB](https://www.themoviedb.org/) — Movie database
|
||||||
|
- [qBittorrent](https://www.qbittorrent.org/) — Torrent client
|
||||||
|
- [FastAPI](https://fastapi.tiangolo.com/) — Web framework
|
||||||
|
- [Pydantic](https://docs.pydantic.dev/) — Data validation
|
||||||
|
|
||||||
|
## 📬 Support
|
||||||
|
|
||||||
- 📧 Email: francois.hodiaumont@gmail.com
|
- 📧 Email: francois.hodiaumont@gmail.com
|
||||||
- 🐛 Issues: [GitHub Issues](https://github.com/your-username/agent-media/issues)
|
- 🐛 Issues: [GitHub Issues](https://github.com/francwa/alfred_media_organizer/issues)
|
||||||
- 💬 Discussions: [GitHub Discussions](https://github.com/your-username/agent-media/discussions)
|
- 💬 Discussions: [GitHub Discussions](https://github.com/francwa/alfred_media_organizer/discussions)
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
Made with ❤️ by Francwa
|
<p align="center">Made with ❤️ by <a href="https://github.com/francwa">Francwa</a></p>
|
||||||
|
|||||||
@@ -1,14 +1,19 @@
|
|||||||
import secrets
|
"""
|
||||||
from pathlib import Path
|
Application settings using Pydantic Settings.
|
||||||
from typing import NamedTuple
|
|
||||||
|
Settings are loaded from .env file and validated against the schema
|
||||||
|
defined in pyproject.toml [tool.alfred.settings_schema].
|
||||||
|
"""
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
import tomllib
|
|
||||||
from pydantic import Field, computed_field, field_validator
|
from pydantic import Field, computed_field, field_validator
|
||||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||||
|
|
||||||
|
from .settings_schema import SCHEMA
|
||||||
|
|
||||||
BASE_DIR = Path(__file__).resolve().parent.parent
|
BASE_DIR = Path(__file__).resolve().parent.parent
|
||||||
ENV_FILE_PATH = BASE_DIR / ".env"
|
ENV_FILE_PATH = BASE_DIR / ".env"
|
||||||
toml_path = BASE_DIR / "pyproject.toml"
|
|
||||||
|
|
||||||
|
|
||||||
class ConfigurationError(Exception):
|
class ConfigurationError(Exception):
|
||||||
@@ -17,154 +22,224 @@ class ConfigurationError(Exception):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class ProjectVersions(NamedTuple):
|
def _get_default_from_schema(setting_name: str):
|
||||||
"""
|
"""Get default value from schema for a setting."""
|
||||||
Immutable structure for project versions.
|
definition = SCHEMA.get(setting_name.upper())
|
||||||
Forces explicit naming and prevents accidental swaps.
|
return definition.default if definition else None
|
||||||
"""
|
|
||||||
|
|
||||||
librechat: str
|
|
||||||
rag: str
|
|
||||||
alfred: str
|
|
||||||
|
|
||||||
|
|
||||||
def get_versions_from_toml() -> ProjectVersions:
|
def _get_secret_factory(rule: str):
|
||||||
"""
|
"""Create a factory function for generating secrets."""
|
||||||
Reads versioning information from pyproject.toml.
|
|
||||||
Returns the default value if the file or key is missing.
|
|
||||||
"""
|
|
||||||
|
|
||||||
if not toml_path.exists():
|
def factory():
|
||||||
raise FileNotFoundError(f"pyproject.toml not found: {toml_path}")
|
from .settings_bootstrap import generate_secret # noqa: PLC0415
|
||||||
|
|
||||||
with open(toml_path, "rb") as f:
|
return generate_secret(rule)
|
||||||
data = tomllib.load(f)
|
|
||||||
try:
|
|
||||||
return ProjectVersions(
|
|
||||||
librechat=data["tool"]["alfred"]["settings"]["librechat_version"],
|
|
||||||
rag=data["tool"]["alfred"]["settings"]["rag_version"],
|
|
||||||
alfred=data["tool"]["poetry"]["version"],
|
|
||||||
)
|
|
||||||
except KeyError as e:
|
|
||||||
raise KeyError(f"Error: Missing key {e} in pyproject.toml") from e
|
|
||||||
|
|
||||||
|
return factory
|
||||||
# Load versions once
|
|
||||||
VERSIONS: ProjectVersions = get_versions_from_toml()
|
|
||||||
|
|
||||||
|
|
||||||
class Settings(BaseSettings):
|
class Settings(BaseSettings):
|
||||||
|
"""
|
||||||
|
Application settings.
|
||||||
|
|
||||||
|
Settings are loaded from .env and validated using the schema
|
||||||
|
defined in pyproject.toml.
|
||||||
|
"""
|
||||||
|
|
||||||
model_config = SettingsConfigDict(
|
model_config = SettingsConfigDict(
|
||||||
env_file=ENV_FILE_PATH,
|
env_file=ENV_FILE_PATH,
|
||||||
env_file_encoding="utf-8",
|
env_file_encoding="utf-8",
|
||||||
extra="ignore",
|
extra="ignore",
|
||||||
case_sensitive=False,
|
case_sensitive=False,
|
||||||
)
|
)
|
||||||
# --- GENERAL SETTINGS ---
|
|
||||||
host: str = "0.0.0.0"
|
# --- BUILD (from TOML) ---
|
||||||
port: int = 3080
|
alfred_version: str = Field(
|
||||||
|
default=_get_default_from_schema("ALFRED_VERSION"), description="Alfred version"
|
||||||
|
)
|
||||||
|
python_version: str = Field(
|
||||||
|
default=_get_default_from_schema("PYTHON_VERSION"), description="Python version"
|
||||||
|
)
|
||||||
|
python_version_short: str = Field(
|
||||||
|
default=_get_default_from_schema("PYTHON_VERSION_SHORT"),
|
||||||
|
description="Python version (short)",
|
||||||
|
)
|
||||||
|
runner: str = Field(
|
||||||
|
default=_get_default_from_schema("RUNNER"), description="Dependency manager"
|
||||||
|
)
|
||||||
|
image_name: str = Field(
|
||||||
|
default=_get_default_from_schema("IMAGE_NAME"), description="Docker image name"
|
||||||
|
)
|
||||||
|
service_name: str = Field(
|
||||||
|
default=_get_default_from_schema("SERVICE_NAME"),
|
||||||
|
description="Docker service name",
|
||||||
|
)
|
||||||
|
librechat_version: str = Field(
|
||||||
|
default=_get_default_from_schema("LIBRECHAT_VERSION"),
|
||||||
|
description="LibreChat version",
|
||||||
|
)
|
||||||
|
rag_version: str = Field(
|
||||||
|
default=_get_default_from_schema("RAG_VERSION"), description="RAG version"
|
||||||
|
)
|
||||||
|
|
||||||
|
# --- APP SETTINGS ---
|
||||||
|
host: str = Field(
|
||||||
|
default=_get_default_from_schema("HOST"), description="Server host"
|
||||||
|
)
|
||||||
|
port: int = Field(
|
||||||
|
default=_get_default_from_schema("PORT"), description="Server port"
|
||||||
|
)
|
||||||
|
max_history_messages: int = Field(
|
||||||
|
default=_get_default_from_schema("MAX_HISTORY_MESSAGES"),
|
||||||
|
description="Maximum conversation history",
|
||||||
|
)
|
||||||
|
max_tool_iterations: int = Field(
|
||||||
|
default=_get_default_from_schema("MAX_TOOL_ITERATIONS"),
|
||||||
|
description="Maximum tool iterations",
|
||||||
|
)
|
||||||
|
request_timeout: int = Field(
|
||||||
|
default=_get_default_from_schema("REQUEST_TIMEOUT"),
|
||||||
|
description="Request timeout in seconds",
|
||||||
|
)
|
||||||
|
llm_temperature: float = Field(
|
||||||
|
default=_get_default_from_schema("LLM_TEMPERATURE"),
|
||||||
|
description="LLM temperature",
|
||||||
|
)
|
||||||
|
data_storage_dir: str = Field(
|
||||||
|
default=_get_default_from_schema("DATA_STORAGE_DIR"),
|
||||||
|
description="Data storage directory",
|
||||||
|
alias="DATA_STORAGE_DIR",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Legacy aliases
|
||||||
debug_logging: bool = False
|
debug_logging: bool = False
|
||||||
debug_console: bool = False
|
debug_console: bool = False
|
||||||
data_storage: str = "data"
|
data_storage: str = Field(default="data", exclude=True) # Deprecated
|
||||||
librechat_version: str = Field(VERSIONS.librechat, description="Librechat version")
|
|
||||||
rag_version: str = Field(VERSIONS.rag, description="RAG engine version")
|
|
||||||
alfred_version: str = Field(VERSIONS.alfred, description="Alfred version")
|
|
||||||
|
|
||||||
# --- CONTEXT SETTINGS ---
|
|
||||||
max_history_messages: int = 10
|
|
||||||
max_tool_iterations: int = 10
|
|
||||||
request_timeout: int = 30
|
|
||||||
|
|
||||||
# TODO: Finish
|
|
||||||
deepseek_base_url: str = "https://api.deepseek.com"
|
|
||||||
deepseek_model: str = "deepseek-chat"
|
|
||||||
|
|
||||||
# --- API KEYS ---
|
# --- API KEYS ---
|
||||||
anthropic_api_key: str | None = Field(None, description="Claude API key")
|
tmdb_api_key: str | None = Field(None, description="TMDB API key")
|
||||||
deepseek_api_key: str | None = Field(None, description="Deepseek API key")
|
deepseek_api_key: str | None = Field(None, description="DeepSeek API key")
|
||||||
google_api_key: str | None = Field(None, description="Gemini API key")
|
openai_api_key: str | None = Field(None, description="OpenAI API key")
|
||||||
|
anthropic_api_key: str | None = Field(None, description="Anthropic API key")
|
||||||
|
google_api_key: str | None = Field(None, description="Google API key")
|
||||||
kimi_api_key: str | None = Field(None, description="Kimi API key")
|
kimi_api_key: str | None = Field(None, description="Kimi API key")
|
||||||
openai_api_key: str | None = Field(None, description="ChatGPT API key")
|
|
||||||
|
|
||||||
# --- SECURITY KEYS ---
|
# --- SECURITY SECRETS ---
|
||||||
# Generated automatically if not in .env to ensure "Secure by Default"
|
jwt_secret: str = Field(
|
||||||
jwt_secret: str = Field(default_factory=lambda: secrets.token_urlsafe(32))
|
default_factory=_get_secret_factory("32:b64"), description="JWT signing secret"
|
||||||
jwt_refresh_secret: str = Field(default_factory=lambda: secrets.token_urlsafe(32))
|
)
|
||||||
|
jwt_refresh_secret: str = Field(
|
||||||
# We keep these for encryption of keys in MongoDB (AES-256 Hex format)
|
default_factory=_get_secret_factory("32:b64"), description="JWT refresh secret"
|
||||||
creds_key: str = Field(default_factory=lambda: secrets.token_hex(32))
|
)
|
||||||
creds_iv: str = Field(default_factory=lambda: secrets.token_hex(16))
|
creds_key: str = Field(
|
||||||
|
default_factory=_get_secret_factory("32:hex"),
|
||||||
# --- SERVICES ---
|
description="Credentials encryption key",
|
||||||
qbittorrent_url: str = "http://qbittorrent:16140"
|
)
|
||||||
qbittorrent_username: str = "admin"
|
creds_iv: str = Field(
|
||||||
qbittorrent_password: str = Field(default_factory=lambda: secrets.token_urlsafe(16))
|
default_factory=_get_secret_factory("16:hex"),
|
||||||
|
description="Credentials encryption IV",
|
||||||
mongo_host: str = "mongodb"
|
)
|
||||||
mongo_user: str = "alfred"
|
meili_master_key: str = Field(
|
||||||
mongo_password: str = Field(
|
default_factory=_get_secret_factory("32:b64"),
|
||||||
default_factory=lambda: secrets.token_urlsafe(24), repr=False, exclude=True
|
description="Meilisearch master key",
|
||||||
|
repr=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
# --- DATABASE ---
|
||||||
|
mongo_host: str = Field(
|
||||||
|
default=_get_default_from_schema("MONGO_HOST"), description="MongoDB host"
|
||||||
|
)
|
||||||
|
mongo_port: int = Field(
|
||||||
|
default=_get_default_from_schema("MONGO_PORT"), description="MongoDB port"
|
||||||
|
)
|
||||||
|
mongo_user: str = Field(
|
||||||
|
default=_get_default_from_schema("MONGO_USER"), description="MongoDB user"
|
||||||
|
)
|
||||||
|
mongo_password: str = Field(
|
||||||
|
default_factory=_get_secret_factory("16:hex"),
|
||||||
|
description="MongoDB password",
|
||||||
|
repr=False,
|
||||||
|
exclude=True,
|
||||||
|
)
|
||||||
|
mongo_db_name: str = Field(
|
||||||
|
default=_get_default_from_schema("MONGO_DB_NAME"),
|
||||||
|
description="MongoDB database name",
|
||||||
)
|
)
|
||||||
mongo_port: int = 27017
|
|
||||||
mongo_db_name: str = "alfred"
|
|
||||||
|
|
||||||
@computed_field(repr=False)
|
@computed_field(repr=False)
|
||||||
@property
|
@property
|
||||||
def mongo_uri(self) -> str:
|
def mongo_uri(self) -> str:
|
||||||
|
"""MongoDB connection URI."""
|
||||||
return (
|
return (
|
||||||
f"mongodb://{self.mongo_user}:{self.mongo_password}"
|
f"mongodb://{self.mongo_user}:{self.mongo_password}"
|
||||||
f"@{self.mongo_host}:{self.mongo_port}/{self.mongo_db_name}"
|
f"@{self.mongo_host}:{self.mongo_port}/{self.mongo_db_name}"
|
||||||
f"?authSource=admin"
|
f"?authSource=admin"
|
||||||
)
|
)
|
||||||
|
|
||||||
postgres_host: str = "vectordb"
|
postgres_host: str = Field(
|
||||||
postgres_user: str = "alfred"
|
default=_get_default_from_schema("POSTGRES_HOST"), description="PostgreSQL host"
|
||||||
postgres_password: str = Field(
|
)
|
||||||
default_factory=lambda: secrets.token_urlsafe(24), repr=False, exclude=True
|
postgres_port: int = Field(
|
||||||
|
default=_get_default_from_schema("POSTGRES_PORT"), description="PostgreSQL port"
|
||||||
|
)
|
||||||
|
postgres_user: str = Field(
|
||||||
|
default=_get_default_from_schema("POSTGRES_USER"), description="PostgreSQL user"
|
||||||
|
)
|
||||||
|
postgres_password: str = Field(
|
||||||
|
default_factory=_get_secret_factory("16:hex"),
|
||||||
|
description="PostgreSQL password",
|
||||||
|
repr=False,
|
||||||
|
exclude=True,
|
||||||
|
)
|
||||||
|
postgres_db_name: str = Field(
|
||||||
|
default=_get_default_from_schema("POSTGRES_DB_NAME"),
|
||||||
|
description="PostgreSQL database name",
|
||||||
)
|
)
|
||||||
postgres_port: int = 5432
|
|
||||||
postgres_db_name: str = "alfred"
|
|
||||||
|
|
||||||
@computed_field(repr=False)
|
@computed_field(repr=False)
|
||||||
@property
|
@property
|
||||||
def postgres_uri(self) -> str:
|
def postgres_uri(self) -> str:
|
||||||
|
"""PostgreSQL connection URI."""
|
||||||
return (
|
return (
|
||||||
f"postgresql://{self.postgres_user}:{self.postgres_password}"
|
f"postgresql://{self.postgres_user}:{self.postgres_password}"
|
||||||
f"@{self.postgres_host}:{self.postgres_port}/{self.postgres_db_name}"
|
f"@{self.postgres_host}:{self.postgres_port}/{self.postgres_db_name}"
|
||||||
)
|
)
|
||||||
|
|
||||||
tmdb_api_key: str | None = Field(None, description="The Movie Database API key")
|
# --- EXTERNAL SERVICES ---
|
||||||
tmdb_base_url: str = "https://api.themoviedb.org/3"
|
tmdb_base_url: str = "https://api.themoviedb.org/3"
|
||||||
|
|
||||||
# --- LLM PICKER & CONFIG ---
|
qbittorrent_url: str = "http://qbittorrent:16140"
|
||||||
# Providers: 'local', 'deepseek', ...
|
qbittorrent_username: str = "admin"
|
||||||
|
qbittorrent_password: str = Field(
|
||||||
|
default_factory=_get_secret_factory("16:hex"),
|
||||||
|
description="qBittorrent password",
|
||||||
|
)
|
||||||
|
|
||||||
|
# --- LLM CONFIG ---
|
||||||
default_llm_provider: str = "local"
|
default_llm_provider: str = "local"
|
||||||
ollama_base_url: str = "http://ollama:11434"
|
ollama_base_url: str = "http://ollama:11434"
|
||||||
# Models: ...
|
|
||||||
ollama_model: str = "llama3.3:latest"
|
ollama_model: str = "llama3.3:latest"
|
||||||
llm_temperature: float = 0.2
|
|
||||||
|
deepseek_base_url: str = "https://api.deepseek.com"
|
||||||
|
deepseek_model: str = "deepseek-chat"
|
||||||
|
|
||||||
# --- RAG ENGINE ---
|
# --- RAG ENGINE ---
|
||||||
rag_enabled: bool = True # TODO: Handle False
|
rag_enabled: bool = True
|
||||||
rag_api_url: str = "http://rag_api:8000"
|
rag_api_url: str = "http://rag_api:8000"
|
||||||
embeddings_provider: str = "ollama"
|
embeddings_provider: str = "ollama"
|
||||||
# Models: ...
|
|
||||||
embeddings_model: str = "nomic-embed-text"
|
embeddings_model: str = "nomic-embed-text"
|
||||||
|
|
||||||
# --- MEILISEARCH ---
|
# --- MEILISEARCH ---
|
||||||
meili_enabled: bool = Field(True, description="Enable meili")
|
meili_enabled: bool = True
|
||||||
meili_no_analytics: bool = True
|
meili_no_analytics: bool = True
|
||||||
meili_host: str = "http://meilisearch:7700"
|
meili_host: str = "http://meilisearch:7700"
|
||||||
meili_master_key: str = Field(
|
|
||||||
default_factory=lambda: secrets.token_urlsafe(32),
|
|
||||||
description="Master key for Meilisearch",
|
|
||||||
repr=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
# --- VALIDATORS ---
|
# --- VALIDATORS (from schema) ---
|
||||||
@field_validator("llm_temperature")
|
@field_validator("llm_temperature")
|
||||||
@classmethod
|
@classmethod
|
||||||
def validate_temperature(cls, v: float) -> float:
|
def validate_temperature(cls, v: float) -> float:
|
||||||
|
"""Validate LLM temperature is in valid range."""
|
||||||
if not 0.0 <= v <= 2.0:
|
if not 0.0 <= v <= 2.0:
|
||||||
raise ConfigurationError(
|
raise ConfigurationError(
|
||||||
f"Temperature must be between 0.0 and 2.0, got {v}"
|
f"Temperature must be between 0.0 and 2.0, got {v}"
|
||||||
@@ -174,15 +249,17 @@ class Settings(BaseSettings):
|
|||||||
@field_validator("max_tool_iterations")
|
@field_validator("max_tool_iterations")
|
||||||
@classmethod
|
@classmethod
|
||||||
def validate_max_iterations(cls, v: int) -> int:
|
def validate_max_iterations(cls, v: int) -> int:
|
||||||
|
"""Validate max tool iterations is in valid range."""
|
||||||
if not 1 <= v <= 20:
|
if not 1 <= v <= 20:
|
||||||
raise ConfigurationError(
|
raise ConfigurationError(
|
||||||
f"max_tool_iterations must be between 1 and 50, got {v}"
|
f"max_tool_iterations must be between 1 and 20, got {v}"
|
||||||
)
|
)
|
||||||
return v
|
return v
|
||||||
|
|
||||||
@field_validator("request_timeout")
|
@field_validator("request_timeout")
|
||||||
@classmethod
|
@classmethod
|
||||||
def validate_timeout(cls, v: int) -> int:
|
def validate_timeout(cls, v: int) -> int:
|
||||||
|
"""Validate request timeout is in valid range."""
|
||||||
if not 1 <= v <= 300:
|
if not 1 <= v <= 300:
|
||||||
raise ConfigurationError(
|
raise ConfigurationError(
|
||||||
f"request_timeout must be between 1 and 300 seconds, got {v}"
|
f"request_timeout must be between 1 and 300 seconds, got {v}"
|
||||||
@@ -192,18 +269,24 @@ class Settings(BaseSettings):
|
|||||||
@field_validator("deepseek_base_url", "tmdb_base_url")
|
@field_validator("deepseek_base_url", "tmdb_base_url")
|
||||||
@classmethod
|
@classmethod
|
||||||
def validate_url(cls, v: str, info) -> str:
|
def validate_url(cls, v: str, info) -> str:
|
||||||
|
"""Validate URLs start with http:// or https://."""
|
||||||
if not v.startswith(("http://", "https://")):
|
if not v.startswith(("http://", "https://")):
|
||||||
raise ConfigurationError(f"Invalid {info.field_name}")
|
raise ConfigurationError(f"Invalid {info.field_name}: must be a valid URL")
|
||||||
return v
|
return v
|
||||||
|
|
||||||
def is_tmdb_configured(self):
|
# --- HELPER METHODS ---
|
||||||
|
def is_tmdb_configured(self) -> bool:
|
||||||
|
"""Check if TMDB API key is configured."""
|
||||||
return bool(self.tmdb_api_key)
|
return bool(self.tmdb_api_key)
|
||||||
|
|
||||||
def is_deepseek_configured(self):
|
def is_deepseek_configured(self) -> bool:
|
||||||
|
"""Check if DeepSeek API key is configured."""
|
||||||
return bool(self.deepseek_api_key)
|
return bool(self.deepseek_api_key)
|
||||||
|
|
||||||
def dump_safe(self):
|
def dump_safe(self) -> dict:
|
||||||
|
"""Dump settings excluding sensitive fields."""
|
||||||
return self.model_dump(exclude_none=False)
|
return self.model_dump(exclude_none=False)
|
||||||
|
|
||||||
|
|
||||||
|
# Global settings instance
|
||||||
settings = Settings()
|
settings = Settings()
|
||||||
|
|||||||
417
alfred/settings_bootstrap.py
Normal file
417
alfred/settings_bootstrap.py
Normal file
@@ -0,0 +1,417 @@
|
|||||||
|
"""
|
||||||
|
Settings bootstrap - Generate and validate configuration files.
|
||||||
|
|
||||||
|
This module uses the settings schema to generate .env and .env.make files
|
||||||
|
with proper validation and secret generation.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import re
|
||||||
|
import secrets
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
import tomllib
|
||||||
|
|
||||||
|
from .settings_schema import (
|
||||||
|
SCHEMA,
|
||||||
|
SettingDefinition,
|
||||||
|
SettingSource,
|
||||||
|
SettingsSchema,
|
||||||
|
SettingType,
|
||||||
|
validate_value,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ConfigSource:
|
||||||
|
"""Configuration source paths."""
|
||||||
|
|
||||||
|
base_dir: Path
|
||||||
|
toml_path: Path
|
||||||
|
env_path: Path
|
||||||
|
env_example_path: Path
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_base_dir(cls, base_dir: Path | None = None) -> "ConfigSource":
|
||||||
|
"""Create ConfigSource from base directory."""
|
||||||
|
if base_dir is None:
|
||||||
|
# Don't import settings.py to avoid Pydantic dependency in pre-commit
|
||||||
|
base_dir = Path(__file__).resolve().parent.parent
|
||||||
|
|
||||||
|
return cls(
|
||||||
|
base_dir=base_dir,
|
||||||
|
toml_path=base_dir / "pyproject.toml",
|
||||||
|
env_path=base_dir / ".env",
|
||||||
|
env_example_path=base_dir / ".env.example",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def extract_python_version(version_string: str) -> tuple[str, str]:
|
||||||
|
"""
|
||||||
|
Extract Python version from poetry dependency string.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
"==3.14.2" -> ("3.14.2", "3.14")
|
||||||
|
"^3.14.2" -> ("3.14.2", "3.14")
|
||||||
|
"""
|
||||||
|
clean_version = re.sub(r"^[=^~><]+", "", version_string.strip())
|
||||||
|
parts = clean_version.split(".")
|
||||||
|
|
||||||
|
if len(parts) >= 2:
|
||||||
|
full_version = clean_version
|
||||||
|
short_version = f"{parts[0]}.{parts[1]}"
|
||||||
|
return full_version, short_version
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Invalid Python version format: {version_string}")
|
||||||
|
|
||||||
|
|
||||||
|
def generate_secret(rule: str) -> str:
|
||||||
|
"""
|
||||||
|
Generate a cryptographically secure secret.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
rule: Format "size:tech" (e.g., "32:b64", "16:hex")
|
||||||
|
"""
|
||||||
|
parts = rule.split(":")
|
||||||
|
if len(parts) != 2:
|
||||||
|
raise ValueError(f"Invalid security rule format: {rule}")
|
||||||
|
|
||||||
|
size_str, tech = parts
|
||||||
|
size = int(size_str)
|
||||||
|
|
||||||
|
match tech:
|
||||||
|
case "b64":
|
||||||
|
return secrets.token_urlsafe(size)
|
||||||
|
case "hex":
|
||||||
|
return secrets.token_hex(size)
|
||||||
|
case _:
|
||||||
|
raise ValueError(f"Invalid security format: {tech}")
|
||||||
|
|
||||||
|
|
||||||
|
def get_nested_value(data: dict, path: str) -> Any:
|
||||||
|
"""
|
||||||
|
Get nested value from dict using dot notation.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
get_nested_value({"a": {"b": {"c": 1}}}, "a.b.c") -> 1
|
||||||
|
"""
|
||||||
|
keys = path.split(".")
|
||||||
|
value = data
|
||||||
|
for key in keys:
|
||||||
|
if not isinstance(value, dict):
|
||||||
|
raise KeyError(f"Cannot access {key} in non-dict value")
|
||||||
|
value = value[key]
|
||||||
|
return value
|
||||||
|
|
||||||
|
|
||||||
|
class SettingsBootstrap:
|
||||||
|
"""
|
||||||
|
Bootstrap settings from schema.
|
||||||
|
|
||||||
|
This class orchestrates the entire bootstrap process:
|
||||||
|
1. Load schema
|
||||||
|
2. Load sources (TOML, existing .env)
|
||||||
|
3. Resolve all settings
|
||||||
|
4. Validate
|
||||||
|
5. Write .env and .env.make
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, source: ConfigSource, schema: SettingsSchema | None = None):
|
||||||
|
"""
|
||||||
|
Initialize bootstrap.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
source: Configuration source paths
|
||||||
|
schema: Settings schema (uses global SCHEMA if None)
|
||||||
|
"""
|
||||||
|
self.source = source
|
||||||
|
self.schema = schema or SCHEMA
|
||||||
|
self.toml_data: dict | None = None
|
||||||
|
self.existing_env: dict[str, str] = {}
|
||||||
|
self.resolved_settings: dict[str, Any] = {}
|
||||||
|
|
||||||
|
def bootstrap(self) -> None:
|
||||||
|
"""
|
||||||
|
Run complete bootstrap process.
|
||||||
|
|
||||||
|
This is the main entry point that orchestrates everything.
|
||||||
|
"""
|
||||||
|
print("<EFBFBD><EFBFBD><EFBFBD><EFBFBD> Starting settings bootstrap...")
|
||||||
|
|
||||||
|
# 1. Load sources
|
||||||
|
self._load_sources()
|
||||||
|
|
||||||
|
# 2. Resolve all settings
|
||||||
|
self._resolve_settings()
|
||||||
|
|
||||||
|
# 3. Validate
|
||||||
|
self._validate_settings()
|
||||||
|
|
||||||
|
# 4. Write files
|
||||||
|
self._write_env()
|
||||||
|
self._write_env_make()
|
||||||
|
|
||||||
|
print("✅ Bootstrap complete!")
|
||||||
|
print("\n⚠️ Reminder: Add your API keys to .env if needed")
|
||||||
|
|
||||||
|
def _load_sources(self) -> None:
|
||||||
|
"""Load TOML and existing .env."""
|
||||||
|
# Load TOML
|
||||||
|
if not self.source.toml_path.exists():
|
||||||
|
raise FileNotFoundError(
|
||||||
|
f"pyproject.toml not found: {self.source.toml_path}"
|
||||||
|
)
|
||||||
|
|
||||||
|
with open(self.source.toml_path, "rb") as f:
|
||||||
|
self.toml_data = tomllib.load(f)
|
||||||
|
|
||||||
|
# Load existing .env
|
||||||
|
if self.source.env_path.exists():
|
||||||
|
print("🔄 Reading existing .env...")
|
||||||
|
with open(self.source.env_path) as f:
|
||||||
|
for line in f:
|
||||||
|
if "=" in line and not line.strip().startswith("#"):
|
||||||
|
key, value = line.split("=", 1)
|
||||||
|
self.existing_env[key.strip()] = value.strip()
|
||||||
|
print(f" Found {len(self.existing_env)} existing keys")
|
||||||
|
else:
|
||||||
|
print("🔧 Creating new .env file...")
|
||||||
|
|
||||||
|
def _resolve_settings(self) -> None:
|
||||||
|
"""Resolve all settings from their sources."""
|
||||||
|
print("📋 Resolving settings...")
|
||||||
|
|
||||||
|
# First pass: resolve non-computed settings
|
||||||
|
for definition in self.schema:
|
||||||
|
if definition.source != SettingSource.COMPUTED:
|
||||||
|
self.resolved_settings[definition.name] = self._resolve_setting(
|
||||||
|
definition
|
||||||
|
)
|
||||||
|
|
||||||
|
# Second pass: resolve computed settings (they may depend on others)
|
||||||
|
for definition in self.schema:
|
||||||
|
if definition.source == SettingSource.COMPUTED:
|
||||||
|
self.resolved_settings[definition.name] = self._resolve_setting(
|
||||||
|
definition
|
||||||
|
)
|
||||||
|
|
||||||
|
def _resolve_setting(self, definition: SettingDefinition) -> Any:
|
||||||
|
"""Resolve a single setting value."""
|
||||||
|
match definition.source:
|
||||||
|
case SettingSource.TOML:
|
||||||
|
return self._resolve_from_toml(definition)
|
||||||
|
case SettingSource.ENV:
|
||||||
|
return self._resolve_from_env(definition)
|
||||||
|
case SettingSource.GENERATED:
|
||||||
|
return self._resolve_generated(definition)
|
||||||
|
case SettingSource.COMPUTED:
|
||||||
|
return self._resolve_computed(definition)
|
||||||
|
|
||||||
|
def _resolve_from_toml(self, definition: SettingDefinition) -> Any:
|
||||||
|
"""Resolve setting from TOML."""
|
||||||
|
if not definition.toml_path:
|
||||||
|
raise ValueError(
|
||||||
|
f"{definition.name}: toml_path is required for TOML source"
|
||||||
|
)
|
||||||
|
|
||||||
|
value = get_nested_value(self.toml_data, definition.toml_path)
|
||||||
|
|
||||||
|
# Apply transform if specified
|
||||||
|
if definition.transform:
|
||||||
|
match definition.transform:
|
||||||
|
case "extract_python_version_full":
|
||||||
|
value, _ = extract_python_version(value)
|
||||||
|
case "extract_python_version_short":
|
||||||
|
_, value = extract_python_version(value)
|
||||||
|
case _:
|
||||||
|
raise ValueError(f"Unknown transform: {definition.transform}")
|
||||||
|
|
||||||
|
return value
|
||||||
|
|
||||||
|
def _resolve_from_env(self, definition: SettingDefinition) -> Any:
|
||||||
|
"""Resolve setting from .env."""
|
||||||
|
# Check existing .env first
|
||||||
|
if definition.name in self.existing_env:
|
||||||
|
value = self.existing_env[definition.name]
|
||||||
|
elif definition.default is not None:
|
||||||
|
value = definition.default
|
||||||
|
elif not definition.required:
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
raise ValueError(f"{definition.name} is required but not found in .env")
|
||||||
|
|
||||||
|
# Convert type (only if value is a string from .env)
|
||||||
|
match definition.type:
|
||||||
|
case SettingType.INTEGER:
|
||||||
|
return int(value) if not isinstance(value, int) else value
|
||||||
|
case SettingType.FLOAT:
|
||||||
|
return float(value) if not isinstance(value, float) else value
|
||||||
|
case SettingType.BOOLEAN:
|
||||||
|
if isinstance(value, bool):
|
||||||
|
return value
|
||||||
|
return str(value).lower() in ("true", "1", "yes")
|
||||||
|
case _:
|
||||||
|
return str(value) if not isinstance(value, str) else value
|
||||||
|
|
||||||
|
def _resolve_generated(self, definition: SettingDefinition) -> str:
|
||||||
|
"""Resolve generated secret."""
|
||||||
|
# Preserve existing secret
|
||||||
|
if definition.name in self.existing_env:
|
||||||
|
print(f" ↻ Kept existing {definition.name}")
|
||||||
|
return self.existing_env[definition.name]
|
||||||
|
|
||||||
|
# Generate new secret
|
||||||
|
if not definition.secret_rule:
|
||||||
|
raise ValueError(
|
||||||
|
f"{definition.name}: secret_rule is required for GENERATED source"
|
||||||
|
)
|
||||||
|
|
||||||
|
secret = generate_secret(definition.secret_rule)
|
||||||
|
print(f" + Generated {definition.name} ({definition.secret_rule})")
|
||||||
|
return secret
|
||||||
|
|
||||||
|
def _resolve_computed(self, definition: SettingDefinition) -> str:
|
||||||
|
"""Resolve computed setting."""
|
||||||
|
if not definition.compute_template:
|
||||||
|
raise ValueError(
|
||||||
|
f"{definition.name}: compute_template is required for COMPUTED source"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Build context from dependencies
|
||||||
|
context = {}
|
||||||
|
if definition.compute_from:
|
||||||
|
for dep in definition.compute_from:
|
||||||
|
if dep not in self.resolved_settings:
|
||||||
|
raise ValueError(
|
||||||
|
f"{definition.name}: dependency {dep} not resolved yet"
|
||||||
|
)
|
||||||
|
context[dep] = self.resolved_settings[dep]
|
||||||
|
|
||||||
|
# Format template
|
||||||
|
return definition.compute_template.format(**context)
|
||||||
|
|
||||||
|
def _validate_settings(self) -> None:
|
||||||
|
"""Validate all resolved settings."""
|
||||||
|
print("✓ Validating settings...")
|
||||||
|
|
||||||
|
errors = []
|
||||||
|
for definition in self.schema:
|
||||||
|
value = self.resolved_settings.get(definition.name)
|
||||||
|
try:
|
||||||
|
validate_value(definition, value)
|
||||||
|
except ValueError as e:
|
||||||
|
errors.append(str(e))
|
||||||
|
|
||||||
|
if errors:
|
||||||
|
raise ValueError(
|
||||||
|
"Validation errors:\n" + "\n".join(f" - {e}" for e in errors)
|
||||||
|
)
|
||||||
|
|
||||||
|
def _write_env(self) -> None:
|
||||||
|
"""
|
||||||
|
Write .env file using .env.example as template.
|
||||||
|
|
||||||
|
This preserves the structure, comments, and formatting of .env.example
|
||||||
|
while updating only the values of variables defined in the schema.
|
||||||
|
Custom variables from existing .env are appended at the end.
|
||||||
|
"""
|
||||||
|
print("📝 Writing .env...")
|
||||||
|
|
||||||
|
# Check if .env.example exists
|
||||||
|
if not self.source.env_example_path.exists():
|
||||||
|
raise FileNotFoundError(
|
||||||
|
f".env.example not found: {self.source.env_example_path}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Read .env.example as template
|
||||||
|
with open(self.source.env_example_path, encoding="utf-8") as f:
|
||||||
|
template_lines = f.readlines()
|
||||||
|
|
||||||
|
# Track which keys we've processed from .env.example
|
||||||
|
processed_keys = set()
|
||||||
|
|
||||||
|
# Process template line by line
|
||||||
|
output_lines = []
|
||||||
|
for line in template_lines:
|
||||||
|
stripped = line.strip()
|
||||||
|
|
||||||
|
# Keep comments and empty lines as-is
|
||||||
|
if not stripped or stripped.startswith("#"):
|
||||||
|
output_lines.append(line)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check if line contains a variable assignment
|
||||||
|
if "=" in line:
|
||||||
|
key, _ = line.split("=", 1)
|
||||||
|
key = key.strip()
|
||||||
|
processed_keys.add(key)
|
||||||
|
|
||||||
|
# Check if this variable is in our schema
|
||||||
|
definition = self.schema.get(key)
|
||||||
|
|
||||||
|
if definition:
|
||||||
|
# Update with resolved value (including computed settings)
|
||||||
|
value = self.resolved_settings.get(key, "")
|
||||||
|
|
||||||
|
# Convert Python booleans to lowercase for .env compatibility
|
||||||
|
if isinstance(value, bool):
|
||||||
|
value = "true" if value else "false"
|
||||||
|
|
||||||
|
output_lines.append(f"{key}={value}\n")
|
||||||
|
# Variable not in schema
|
||||||
|
# If it exists in current .env, use that value, otherwise keep template
|
||||||
|
elif key in self.existing_env:
|
||||||
|
output_lines.append(f"{key}={self.existing_env[key]}\n")
|
||||||
|
else:
|
||||||
|
output_lines.append(line)
|
||||||
|
else:
|
||||||
|
# Keep any other lines as-is
|
||||||
|
output_lines.append(line)
|
||||||
|
|
||||||
|
# Append custom variables from existing .env that aren't in .env.example
|
||||||
|
custom_vars = {
|
||||||
|
k: v for k, v in self.existing_env.items() if k not in processed_keys
|
||||||
|
}
|
||||||
|
if custom_vars:
|
||||||
|
output_lines.append("\n# --- CUSTOM VARIABLES ---\n")
|
||||||
|
output_lines.append("# Variables added manually (not in .env.example)\n")
|
||||||
|
for key, value in sorted(custom_vars.items()):
|
||||||
|
output_lines.append(f"{key}={value}\n")
|
||||||
|
|
||||||
|
# Write updated .env
|
||||||
|
with open(self.source.env_path, "w", encoding="utf-8") as f:
|
||||||
|
f.writelines(output_lines)
|
||||||
|
|
||||||
|
print(f"✅ {self.source.env_path.name} written (preserving template structure)")
|
||||||
|
if custom_vars:
|
||||||
|
print(f" ℹ️ Preserved {len(custom_vars)} custom variable(s)")
|
||||||
|
|
||||||
|
def _write_env_make(self) -> None:
|
||||||
|
"""Write .env.make for Makefile."""
|
||||||
|
print("📝 Writing .env.make...")
|
||||||
|
|
||||||
|
lines = ["# Auto-generated from pyproject.toml\n"]
|
||||||
|
|
||||||
|
for definition in self.schema.get_for_env_make():
|
||||||
|
value = self.resolved_settings.get(definition.name, "")
|
||||||
|
lines.append(f"export {definition.name}={value}\n")
|
||||||
|
|
||||||
|
env_make_path = self.source.base_dir / ".env.make"
|
||||||
|
with open(env_make_path, "w", encoding="utf-8") as f:
|
||||||
|
f.writelines(lines)
|
||||||
|
|
||||||
|
print("✅ .env.make written")
|
||||||
|
|
||||||
|
|
||||||
|
def bootstrap_env(source: ConfigSource) -> None: # noqa: PLC0415
|
||||||
|
"""
|
||||||
|
Bootstrap environment configuration.
|
||||||
|
|
||||||
|
This is the main entry point for bootstrapping.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
source: Configuration source paths
|
||||||
|
"""
|
||||||
|
bootstrapper = SettingsBootstrap(source)
|
||||||
|
bootstrapper.bootstrap()
|
||||||
291
alfred/settings_schema.py
Normal file
291
alfred/settings_schema.py
Normal file
@@ -0,0 +1,291 @@
|
|||||||
|
"""
|
||||||
|
Settings schema parser and definitions.
|
||||||
|
|
||||||
|
This module loads the settings schema from pyproject.toml and provides
|
||||||
|
type-safe access to setting definitions.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from enum import Enum
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
import tomllib
|
||||||
|
|
||||||
|
BASE_DIR = Path(__file__).resolve().parent.parent
|
||||||
|
|
||||||
|
|
||||||
|
class SettingType(Enum):
|
||||||
|
"""Type of setting value."""
|
||||||
|
|
||||||
|
STRING = "string"
|
||||||
|
INTEGER = "integer"
|
||||||
|
FLOAT = "float"
|
||||||
|
BOOLEAN = "boolean"
|
||||||
|
SECRET = "secret"
|
||||||
|
COMPUTED = "computed"
|
||||||
|
|
||||||
|
|
||||||
|
class SettingSource(Enum):
|
||||||
|
"""Source of setting value."""
|
||||||
|
|
||||||
|
ENV = "env" # From .env file
|
||||||
|
TOML = "toml" # From pyproject.toml
|
||||||
|
GENERATED = "generated" # Auto-generated (secrets)
|
||||||
|
COMPUTED = "computed" # Computed from other settings
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class SettingDefinition:
|
||||||
|
"""
|
||||||
|
Complete definition of a setting.
|
||||||
|
|
||||||
|
This is the parsed representation of a setting from pyproject.toml.
|
||||||
|
"""
|
||||||
|
|
||||||
|
name: str
|
||||||
|
type: SettingType
|
||||||
|
source: SettingSource
|
||||||
|
description: str = ""
|
||||||
|
category: str = "general"
|
||||||
|
required: bool = True
|
||||||
|
default: str | int | float | bool | None = None
|
||||||
|
|
||||||
|
# For TOML source
|
||||||
|
toml_path: str | None = None
|
||||||
|
transform: str | None = None # Transform function name
|
||||||
|
|
||||||
|
# For SECRET source
|
||||||
|
secret_rule: str | None = None # e.g., "32:b64", "16:hex"
|
||||||
|
|
||||||
|
# For COMPUTED source
|
||||||
|
compute_from: list[str] | None = None # Dependencies
|
||||||
|
compute_template: str | None = None # Template string
|
||||||
|
|
||||||
|
# For validation
|
||||||
|
validator: str | None = None # e.g., "range:0.0:2.0"
|
||||||
|
|
||||||
|
# For export
|
||||||
|
export_to_env_make: bool = False
|
||||||
|
|
||||||
|
|
||||||
|
class SettingsSchema:
|
||||||
|
"""
|
||||||
|
Settings schema loaded from pyproject.toml.
|
||||||
|
|
||||||
|
Provides access to all setting definitions and utilities for
|
||||||
|
working with the schema.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, schema_dict: dict[str, dict[str, Any]]):
|
||||||
|
"""
|
||||||
|
Initialize schema from parsed TOML.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
schema_dict: Dictionary from [tool.alfred.settings_schema]
|
||||||
|
"""
|
||||||
|
self.definitions: dict[str, SettingDefinition] = {}
|
||||||
|
self._parse_schema(schema_dict)
|
||||||
|
|
||||||
|
def _parse_schema(self, schema_dict: dict[str, dict[str, Any]]) -> None:
|
||||||
|
"""Parse schema dictionary into SettingDefinition objects."""
|
||||||
|
for name, config in schema_dict.items():
|
||||||
|
# Skip non-setting entries
|
||||||
|
if not isinstance(config, dict):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Parse type
|
||||||
|
type_str = config.get("type", "string")
|
||||||
|
setting_type = SettingType(type_str)
|
||||||
|
|
||||||
|
# Parse source
|
||||||
|
source_str = config.get("source", "env")
|
||||||
|
source = SettingSource(source_str)
|
||||||
|
|
||||||
|
# Parse default value based on type
|
||||||
|
default = config.get("default")
|
||||||
|
if default is not None:
|
||||||
|
match setting_type:
|
||||||
|
case SettingType.INTEGER:
|
||||||
|
default = int(default)
|
||||||
|
case SettingType.FLOAT:
|
||||||
|
default = float(default)
|
||||||
|
case SettingType.BOOLEAN:
|
||||||
|
default = bool(default)
|
||||||
|
case _:
|
||||||
|
default = str(default) if default else None
|
||||||
|
|
||||||
|
# Create definition
|
||||||
|
definition = SettingDefinition(
|
||||||
|
name=name,
|
||||||
|
type=setting_type,
|
||||||
|
source=source,
|
||||||
|
description=config.get("description", ""),
|
||||||
|
category=config.get("category", "general"),
|
||||||
|
required=config.get("required", True),
|
||||||
|
default=default,
|
||||||
|
toml_path=config.get("toml_path"),
|
||||||
|
transform=config.get("transform"),
|
||||||
|
secret_rule=config.get("secret_rule"),
|
||||||
|
compute_from=config.get("compute_from"),
|
||||||
|
compute_template=config.get("compute_template"),
|
||||||
|
validator=config.get("validator"),
|
||||||
|
export_to_env_make=config.get("export_to_env_make", False),
|
||||||
|
)
|
||||||
|
|
||||||
|
self.definitions[name] = definition
|
||||||
|
|
||||||
|
def get(self, name: str) -> SettingDefinition | None:
|
||||||
|
"""Get setting definition by name."""
|
||||||
|
return self.definitions.get(name)
|
||||||
|
|
||||||
|
def get_by_category(self, category: str) -> list[SettingDefinition]:
|
||||||
|
"""Get all settings in a category."""
|
||||||
|
return [d for d in self.definitions.values() if d.category == category]
|
||||||
|
|
||||||
|
def get_by_source(self, source: SettingSource) -> list[SettingDefinition]:
|
||||||
|
"""Get all settings from a specific source."""
|
||||||
|
return [d for d in self.definitions.values() if d.source == source]
|
||||||
|
|
||||||
|
def get_required(self) -> list[SettingDefinition]:
|
||||||
|
"""Get all required settings."""
|
||||||
|
return [d for d in self.definitions.values() if d.required]
|
||||||
|
|
||||||
|
def get_for_env_make(self) -> list[SettingDefinition]:
|
||||||
|
"""Get all settings that should be exported to .env.make."""
|
||||||
|
return [d for d in self.definitions.values() if d.export_to_env_make]
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
"""Iterate over all setting definitions."""
|
||||||
|
return iter(self.definitions.values())
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
"""Number of settings in schema."""
|
||||||
|
return len(self.definitions)
|
||||||
|
|
||||||
|
|
||||||
|
def load_schema(base_dir: Path | None = None) -> SettingsSchema:
|
||||||
|
"""
|
||||||
|
Load settings schema from settings.toml or pyproject.toml.
|
||||||
|
|
||||||
|
Priority:
|
||||||
|
1. settings.toml (if exists)
|
||||||
|
2. pyproject.toml [tool.alfred.settings_schema]
|
||||||
|
|
||||||
|
Args:
|
||||||
|
base_dir: Base directory containing config files
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
SettingsSchema instance
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
FileNotFoundError: If neither file exists
|
||||||
|
KeyError: If settings_schema section is missing
|
||||||
|
"""
|
||||||
|
if base_dir is None:
|
||||||
|
base_dir = BASE_DIR
|
||||||
|
|
||||||
|
# Try settings.toml first (cleaner, dedicated file)
|
||||||
|
settings_toml_path = base_dir / "settings.toml"
|
||||||
|
if settings_toml_path.exists():
|
||||||
|
with open(settings_toml_path, "rb") as f:
|
||||||
|
data = tomllib.load(f)
|
||||||
|
|
||||||
|
try:
|
||||||
|
schema_dict = data["tool"]["alfred"]["settings_schema"]
|
||||||
|
return SettingsSchema(schema_dict)
|
||||||
|
except KeyError as e:
|
||||||
|
raise KeyError(
|
||||||
|
"Missing [tool.alfred.settings_schema] section in settings.toml"
|
||||||
|
) from e
|
||||||
|
|
||||||
|
# Fallback to pyproject.toml
|
||||||
|
toml_path = base_dir / "pyproject.toml"
|
||||||
|
if not toml_path.exists():
|
||||||
|
raise FileNotFoundError(
|
||||||
|
f"Neither settings.toml nor pyproject.toml found in {base_dir}"
|
||||||
|
)
|
||||||
|
|
||||||
|
with open(toml_path, "rb") as f:
|
||||||
|
data = tomllib.load(f)
|
||||||
|
|
||||||
|
try:
|
||||||
|
schema_dict = data["tool"]["alfred"]["settings_schema"]
|
||||||
|
except KeyError as e:
|
||||||
|
raise KeyError(
|
||||||
|
"Missing [tool.alfred.settings_schema] section in pyproject.toml"
|
||||||
|
) from e
|
||||||
|
|
||||||
|
return SettingsSchema(schema_dict)
|
||||||
|
|
||||||
|
|
||||||
|
def validate_value(definition: SettingDefinition, value: Any) -> bool:
|
||||||
|
"""
|
||||||
|
Validate a value against a setting definition.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
definition: Setting definition with validation rules
|
||||||
|
value: Value to validate
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if valid
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If validation fails
|
||||||
|
"""
|
||||||
|
if value is None:
|
||||||
|
if definition.required:
|
||||||
|
raise ValueError(f"{definition.name} is required but got None")
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Type validation
|
||||||
|
match definition.type:
|
||||||
|
case SettingType.INTEGER:
|
||||||
|
if not isinstance(value, int):
|
||||||
|
raise ValueError(
|
||||||
|
f"{definition.name} must be integer, got {type(value).__name__}"
|
||||||
|
)
|
||||||
|
case SettingType.FLOAT:
|
||||||
|
if not isinstance(value, (int, float)):
|
||||||
|
raise ValueError(
|
||||||
|
f"{definition.name} must be float, got {type(value).__name__}"
|
||||||
|
)
|
||||||
|
case SettingType.BOOLEAN:
|
||||||
|
if not isinstance(value, bool):
|
||||||
|
raise ValueError(
|
||||||
|
f"{definition.name} must be boolean, got {type(value).__name__}"
|
||||||
|
)
|
||||||
|
case SettingType.STRING | SettingType.SECRET:
|
||||||
|
if not isinstance(value, str):
|
||||||
|
raise ValueError(
|
||||||
|
f"{definition.name} must be string, got {type(value).__name__}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Custom validator
|
||||||
|
if definition.validator:
|
||||||
|
_apply_validator(definition.name, definition.validator, value)
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def _apply_validator(name: str, validator: str, value: Any) -> None:
|
||||||
|
"""Apply custom validator to value."""
|
||||||
|
if validator.startswith("range:"):
|
||||||
|
# Parse range validator: "range:min:max"
|
||||||
|
parts = validator.split(":")
|
||||||
|
if len(parts) != 3:
|
||||||
|
raise ValueError(f"Invalid range validator format: {validator}")
|
||||||
|
|
||||||
|
min_val = float(parts[1])
|
||||||
|
max_val = float(parts[2])
|
||||||
|
|
||||||
|
if not (min_val <= value <= max_val):
|
||||||
|
raise ValueError(
|
||||||
|
f"{name} must be between {min_val} and {max_val}, got {value}"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unknown validator: {validator}")
|
||||||
|
|
||||||
|
|
||||||
|
# Load schema once at module import
|
||||||
|
SCHEMA = load_schema()
|
||||||
231
cli.py
231
cli.py
@@ -1,231 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
import os
|
|
||||||
import secrets
|
|
||||||
import shutil
|
|
||||||
import subprocess
|
|
||||||
import sys
|
|
||||||
from datetime import datetime
|
|
||||||
from enum import StrEnum
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import NoReturn
|
|
||||||
|
|
||||||
REQUIRED_VARS = ["DEEPSEEK_API_KEY", "TMDB_API_KEY", "QBITTORRENT_URL"]
|
|
||||||
|
|
||||||
# Size in bytes
|
|
||||||
KEYS_TO_GENERATE = {
|
|
||||||
"JWT_SECRET": 32,
|
|
||||||
"JWT_REFRESH_SECRET": 32,
|
|
||||||
"CREDS_KEY": 32,
|
|
||||||
"CREDS_IV": 16,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class Style(StrEnum):
|
|
||||||
"""ANSI codes for styling output.
|
|
||||||
Usage: f"{Style.RED}Error{Style.RESET}"
|
|
||||||
"""
|
|
||||||
|
|
||||||
RESET = "\033[0m"
|
|
||||||
BOLD = "\033[1m"
|
|
||||||
RED = "\033[31m"
|
|
||||||
GREEN = "\033[32m"
|
|
||||||
YELLOW = "\033[33m"
|
|
||||||
CYAN = "\033[36m"
|
|
||||||
DIM = "\033[2m"
|
|
||||||
|
|
||||||
|
|
||||||
# Only for terminals and if not specified otherwise
|
|
||||||
USE_COLORS = sys.stdout.isatty() and "NO_COLOR" not in os.environ
|
|
||||||
|
|
||||||
|
|
||||||
def styled(text: str, color_code: str) -> str:
|
|
||||||
"""Apply color only if supported by the terminal."""
|
|
||||||
if USE_COLORS:
|
|
||||||
return f"{color_code}{text}{Style.RESET}"
|
|
||||||
return text
|
|
||||||
|
|
||||||
|
|
||||||
def log(msg: str, color: str | None = None, prefix="") -> None:
|
|
||||||
"""Print a formatted message."""
|
|
||||||
formatted_msg = styled(msg, color) if color else msg
|
|
||||||
print(f"{prefix}{formatted_msg}")
|
|
||||||
|
|
||||||
|
|
||||||
def error_exit(msg: str) -> NoReturn:
|
|
||||||
"""Print an error message in red and exit."""
|
|
||||||
log(f"❌ {msg}", Style.RED)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
def is_docker_running() -> bool:
|
|
||||||
""" "Check if Docker is available and responsive."""
|
|
||||||
if shutil.which("docker") is None:
|
|
||||||
error_exit("Docker is not installed.")
|
|
||||||
|
|
||||||
result = subprocess.run(
|
|
||||||
["docker", "info"],
|
|
||||||
# Redirect stdout/stderr to keep output clean on success
|
|
||||||
stdout=subprocess.DEVNULL,
|
|
||||||
stderr=subprocess.DEVNULL,
|
|
||||||
# Prevent exception being raised
|
|
||||||
check=False,
|
|
||||||
)
|
|
||||||
return result.returncode == 0
|
|
||||||
|
|
||||||
|
|
||||||
def parse_env(content: str) -> dict[str, str]:
|
|
||||||
"""Parses existing keys and values into a dict (ignoring comments)."""
|
|
||||||
env_vars = {}
|
|
||||||
for raw_line in content.splitlines():
|
|
||||||
line = raw_line.strip()
|
|
||||||
if line and not line.startswith("#") and "=" in line:
|
|
||||||
key, value = line.split("=", 1)
|
|
||||||
env_vars[key.strip()] = value.strip()
|
|
||||||
|
|
||||||
return env_vars
|
|
||||||
|
|
||||||
|
|
||||||
def dump_env(content: str, data: dict[str, str]) -> str:
|
|
||||||
new_content: list[str] = []
|
|
||||||
processed_keys = set()
|
|
||||||
|
|
||||||
for raw_line in content.splitlines():
|
|
||||||
line = raw_line.strip()
|
|
||||||
# Fast line (empty, comment or not an assignation)
|
|
||||||
if len(line) == 0 or line.startswith("#") or "=" not in line:
|
|
||||||
new_content.append(raw_line)
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Slow line (inline comment to be kept)
|
|
||||||
key_chunk, value_chunk = raw_line.split("=", 1)
|
|
||||||
key = key_chunk.strip()
|
|
||||||
|
|
||||||
# Not in the update list
|
|
||||||
if key not in data:
|
|
||||||
new_content.append(raw_line)
|
|
||||||
continue
|
|
||||||
|
|
||||||
processed_keys.add(key)
|
|
||||||
new_value = data[key]
|
|
||||||
|
|
||||||
if " #" not in value_chunk:
|
|
||||||
new_line = f"{key_chunk}={new_value}"
|
|
||||||
else:
|
|
||||||
_, comment = value_chunk.split(" #", 1)
|
|
||||||
new_line = f"{key_chunk}={new_value} #{comment}"
|
|
||||||
|
|
||||||
new_content.append(new_line)
|
|
||||||
|
|
||||||
for key, value in data.items():
|
|
||||||
if key not in processed_keys:
|
|
||||||
new_content.append(f"{key}={value}")
|
|
||||||
|
|
||||||
return "\n".join(new_content) + "\n"
|
|
||||||
|
|
||||||
|
|
||||||
def ensure_env() -> None:
|
|
||||||
"""Manage .env lifecycle: creation, secret generation, prompts."""
|
|
||||||
env_path = Path(".env")
|
|
||||||
env_example_path = Path(".env.example")
|
|
||||||
updated: bool = False
|
|
||||||
|
|
||||||
# Read .env if exists
|
|
||||||
if env_path.exists():
|
|
||||||
content: str = env_path.read_text(encoding="utf-8")
|
|
||||||
else:
|
|
||||||
content: str = env_example_path.read_text(encoding="utf-8")
|
|
||||||
|
|
||||||
existing_vars: dict[str, str] = parse_env(content)
|
|
||||||
|
|
||||||
# Generate missing secrets
|
|
||||||
for key, length in KEYS_TO_GENERATE.items():
|
|
||||||
if key not in existing_vars or not existing_vars[key]:
|
|
||||||
log(f"Generating {key}...", Style.GREEN, prefix=" ")
|
|
||||||
existing_vars[key] = secrets.token_hex(length)
|
|
||||||
updated = True
|
|
||||||
log("Done", Style.GREEN, prefix=" ")
|
|
||||||
|
|
||||||
# Prompt for missing mandatory keys
|
|
||||||
color = Style.YELLOW if USE_COLORS else ""
|
|
||||||
reset = Style.RESET if USE_COLORS else ""
|
|
||||||
for key in REQUIRED_VARS:
|
|
||||||
if key not in existing_vars or not existing_vars[key]:
|
|
||||||
try:
|
|
||||||
existing_vars[key] = input(
|
|
||||||
f" {color}Enter value for {key}: {reset}"
|
|
||||||
).strip()
|
|
||||||
updated = True
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
print()
|
|
||||||
error_exit("Aborted by user.")
|
|
||||||
|
|
||||||
# Write to disk
|
|
||||||
if updated:
|
|
||||||
# But backup original first
|
|
||||||
if env_path.exists():
|
|
||||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
||||||
backup_path = Path(f"{env_path}.{timestamp}.bak")
|
|
||||||
shutil.copy(env_path, backup_path)
|
|
||||||
log(f"Backup created: {backup_path}", Style.DIM)
|
|
||||||
|
|
||||||
new_content = dump_env(content, existing_vars)
|
|
||||||
env_path.write_text(new_content, encoding="utf-8")
|
|
||||||
log(".env updated successfully.", Style.GREEN)
|
|
||||||
else:
|
|
||||||
log("Configuration is up to date.", Style.GREEN)
|
|
||||||
|
|
||||||
|
|
||||||
def setup() -> None:
|
|
||||||
"""Orchestrate initialization."""
|
|
||||||
is_docker_running()
|
|
||||||
ensure_env()
|
|
||||||
|
|
||||||
|
|
||||||
def status() -> None:
|
|
||||||
"""Display simple dashboard."""
|
|
||||||
# Hardcoded bold style for title if colors are enabled
|
|
||||||
title_style = Style.BOLD if USE_COLORS else ""
|
|
||||||
reset_style = Style.RESET if USE_COLORS else ""
|
|
||||||
|
|
||||||
print(f"\n{title_style}ALFRED STATUS{reset_style}")
|
|
||||||
print(f"{title_style}==============={reset_style}\n")
|
|
||||||
|
|
||||||
# Docker Check
|
|
||||||
if is_docker_running():
|
|
||||||
print(f" Docker: {styled('✓ running', Style.GREEN)}")
|
|
||||||
else:
|
|
||||||
print(f" Docker: {styled('✗ stopped', Style.RED)}")
|
|
||||||
|
|
||||||
# Env Check
|
|
||||||
if Path(".env").exists():
|
|
||||||
print(f" .env: {styled('✓ present', Style.GREEN)}")
|
|
||||||
else:
|
|
||||||
print(f" .env: {styled('✗ missing', Style.RED)}")
|
|
||||||
|
|
||||||
print("")
|
|
||||||
|
|
||||||
|
|
||||||
def check() -> None:
|
|
||||||
"""Silent check for prerequisites (used by 'make up')."""
|
|
||||||
setup()
|
|
||||||
|
|
||||||
|
|
||||||
def main() -> None:
|
|
||||||
if len(sys.argv) < 2:
|
|
||||||
print("Usage: python cli.py [setup|check|status]")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
cmd = sys.argv[1]
|
|
||||||
|
|
||||||
if cmd == "setup":
|
|
||||||
setup()
|
|
||||||
elif cmd == "check":
|
|
||||||
check()
|
|
||||||
elif cmd == "status":
|
|
||||||
status()
|
|
||||||
else:
|
|
||||||
error_exit(f"Unknown command: {cmd}")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@@ -34,7 +34,9 @@ services:
|
|||||||
- ./data:/data
|
- ./data:/data
|
||||||
- ./logs:/logs
|
- ./logs:/logs
|
||||||
# TODO: Hot reload (comment out in production)
|
# TODO: Hot reload (comment out in production)
|
||||||
#- ./alfred:/home/appuser/alfred
|
- ./alfred:/home/appuser/alfred
|
||||||
|
command: >
|
||||||
|
sh -c "python -u -m uvicorn alfred.app:app --host 0.0.0.0 --port 8000 2>&1 | tee -a /logs/alfred.log"
|
||||||
networks:
|
networks:
|
||||||
- alfred-net
|
- alfred-net
|
||||||
|
|
||||||
@@ -84,12 +86,11 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "${MONGO_PORT}:${MONGO_PORT}"
|
- "${MONGO_PORT}:${MONGO_PORT}"
|
||||||
volumes:
|
volumes:
|
||||||
- ./data/mongo:/data/db
|
- ./data/mongodb:/data/db
|
||||||
command: mongod --quiet --setParameter logComponentVerbosity='{"network":{"verbosity":0}}'
|
- ./mongod.conf:/etc/mongod.conf:ro
|
||||||
|
command: ["mongod", "--config", "/etc/mongod.conf"]
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: |
|
test: mongosh --quiet -u "${MONGO_USER}" -p "${MONGO_PASSWORD}" --authenticationDatabase admin --eval "db.adminCommand('ping')"
|
||||||
mongosh --quiet --eval "db.adminCommand('ping')" || \
|
|
||||||
mongosh --quiet -u "${MONGO_USER}" -p "${MONGO_PASSWORD}" --authenticationDatabase admin --eval "db.adminCommand('ping')"
|
|
||||||
interval: 10s
|
interval: 10s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 5
|
retries: 5
|
||||||
@@ -168,12 +169,14 @@ services:
|
|||||||
- ./data/vectordb:/var/lib/postgresql/data
|
- ./data/vectordb:/var/lib/postgresql/data
|
||||||
profiles: ["rag", "full"]
|
profiles: ["rag", "full"]
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: [ "CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-alfred} -d ${POSTGRES_DB_NAME:-alfred}" ]
|
test: [ "CMD-SHELL", "pg_isready -U $${POSTGRES_USER:-alfred} -d $${POSTGRES_DB_NAME:-alfred}" ]
|
||||||
interval: 5s
|
interval: 5s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 5
|
retries: 5
|
||||||
networks:
|
networks:
|
||||||
- alfred-net
|
alfred-net:
|
||||||
|
aliases:
|
||||||
|
- db
|
||||||
|
|
||||||
# --- QBITTORENT (Optional) ---
|
# --- QBITTORENT (Optional) ---
|
||||||
qbittorrent:
|
qbittorrent:
|
||||||
|
|||||||
49
mongod.conf
Normal file
49
mongod.conf
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
# MongoDB Configuration File
|
||||||
|
|
||||||
|
# Network settings
|
||||||
|
net:
|
||||||
|
port: 27017
|
||||||
|
bindIp: 0.0.0.0
|
||||||
|
|
||||||
|
# Storage settings
|
||||||
|
storage:
|
||||||
|
dbPath: /data/db
|
||||||
|
|
||||||
|
# Security settings
|
||||||
|
security:
|
||||||
|
authorization: enabled
|
||||||
|
|
||||||
|
# System log settings
|
||||||
|
systemLog:
|
||||||
|
destination: file
|
||||||
|
path: /dev/stdout
|
||||||
|
logAppend: false
|
||||||
|
verbosity: 0
|
||||||
|
quiet: true
|
||||||
|
component:
|
||||||
|
accessControl:
|
||||||
|
verbosity: -1
|
||||||
|
command:
|
||||||
|
verbosity: 0
|
||||||
|
control:
|
||||||
|
verbosity: 0
|
||||||
|
ftdc:
|
||||||
|
verbosity: 0
|
||||||
|
geo:
|
||||||
|
verbosity: 0
|
||||||
|
index:
|
||||||
|
verbosity: 0
|
||||||
|
network:
|
||||||
|
verbosity: 0
|
||||||
|
query:
|
||||||
|
verbosity: 0
|
||||||
|
replication:
|
||||||
|
verbosity: 0
|
||||||
|
sharding:
|
||||||
|
verbosity: 0
|
||||||
|
storage:
|
||||||
|
verbosity: 0
|
||||||
|
write:
|
||||||
|
verbosity: 0
|
||||||
|
transaction:
|
||||||
|
verbosity: 0
|
||||||
@@ -6,23 +6,6 @@ authors = ["Francwa <francois.hodiaumont@gmail.com>"]
|
|||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
package-mode = false
|
package-mode = false
|
||||||
|
|
||||||
[tool.alfred.settings]
|
|
||||||
image_name = "alfred_media_organizer"
|
|
||||||
librechat_version = "v0.8.1"
|
|
||||||
rag_version = "v0.7.0"
|
|
||||||
runner = "poetry"
|
|
||||||
service_name = "alfred"
|
|
||||||
|
|
||||||
[tool.alfred.security]
|
|
||||||
jwt_secret = "32:b64"
|
|
||||||
jwt_refresh_secret = "32:b64"
|
|
||||||
creds_key = "32:b64"
|
|
||||||
creds_iv = "16:b64"
|
|
||||||
meili_master_key = "32:b64"
|
|
||||||
mongo_password = "16:hex"
|
|
||||||
postgres_password = "16:hex"
|
|
||||||
qbittorrent_password = "16:hex"
|
|
||||||
|
|
||||||
[tool.poetry.dependencies]
|
[tool.poetry.dependencies]
|
||||||
python = "==3.14.2"
|
python = "==3.14.2"
|
||||||
python-dotenv = "^1.0.0"
|
python-dotenv = "^1.0.0"
|
||||||
|
|||||||
@@ -1,245 +1,43 @@
|
|||||||
import re
|
#!/usr/bin/env python3
|
||||||
import secrets
|
"""Bootstrap script - generates .env and .env.make from pyproject.toml schema."""
|
||||||
|
|
||||||
|
import sys
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
import tomllib
|
# Add parent directory to path to import from alfred package
|
||||||
|
sys.path.insert(0, str(Path(__file__).resolve().parent.parent))
|
||||||
|
|
||||||
|
from alfred.settings_bootstrap import ConfigSource, bootstrap_env
|
||||||
|
|
||||||
|
|
||||||
def generate_secret(rule: str) -> str:
|
def main():
|
||||||
"""
|
"""
|
||||||
Generates a cryptographically secure secret based on a spec string.
|
Initialize .env file from settings schema in pyproject.toml.
|
||||||
Example specs: '32:b64', '16:hex'.
|
|
||||||
|
- Reads schema from [tool.alfred.settings_schema]
|
||||||
|
- Generates secrets automatically
|
||||||
|
- Preserves existing secrets
|
||||||
|
- Validates all settings
|
||||||
|
- Writes .env and .env.make
|
||||||
"""
|
"""
|
||||||
chunks: list[str] = rule.split(":")
|
try:
|
||||||
size: int = int(chunks[0])
|
base_dir = Path(__file__).resolve().parent.parent
|
||||||
tech: str = chunks[1]
|
config_source = ConfigSource.from_base_dir(base_dir)
|
||||||
|
bootstrap_env(config_source)
|
||||||
|
except FileNotFoundError as e:
|
||||||
|
print(f"❌ {e}")
|
||||||
|
return 1
|
||||||
|
except ValueError as e:
|
||||||
|
print(f"❌ Validation error: {e}")
|
||||||
|
return 1
|
||||||
|
except Exception as e:
|
||||||
|
print(f"❌ Bootstrap failed: {e}")
|
||||||
|
import traceback # noqa: PLC0415
|
||||||
|
|
||||||
if tech == "b64":
|
traceback.print_exc()
|
||||||
return secrets.token_urlsafe(size)
|
return 1
|
||||||
elif tech == "hex":
|
return 0
|
||||||
return secrets.token_hex(size)
|
|
||||||
else:
|
|
||||||
raise ValueError(f"Invalid security format: {tech}")
|
|
||||||
|
|
||||||
|
|
||||||
def extract_python_version(version_string: str) -> tuple[str, str]:
|
|
||||||
"""
|
|
||||||
Extract Python version from poetry dependency string.
|
|
||||||
Examples:
|
|
||||||
"==3.14.2" -> ("3.14.2", "3.14")
|
|
||||||
"^3.14.2" -> ("3.14.2", "3.14")
|
|
||||||
"~3.14.2" -> ("3.14.2", "3.14")
|
|
||||||
"3.14.2" -> ("3.14.2", "3.14")
|
|
||||||
"""
|
|
||||||
# Remove poetry version operators (==, ^, ~, >=, etc.)
|
|
||||||
clean_version = re.sub(r"^[=^~><]+", "", version_string.strip())
|
|
||||||
|
|
||||||
# Extract version parts
|
|
||||||
parts = clean_version.split(".")
|
|
||||||
|
|
||||||
if len(parts) >= 2:
|
|
||||||
full_version = clean_version
|
|
||||||
short_version = f"{parts[0]}.{parts[1]}"
|
|
||||||
return full_version, short_version
|
|
||||||
else:
|
|
||||||
raise ValueError(f"Invalid Python version format: {version_string}")
|
|
||||||
|
|
||||||
|
|
||||||
# TODO: Refactor
|
|
||||||
def bootstrap(): # noqa: PLR0912, PLR0915
|
|
||||||
"""
|
|
||||||
Initializes the .env file by merging .env.example with generated secrets
|
|
||||||
and build variables from pyproject.toml.
|
|
||||||
Also generates .env.make for Makefile.
|
|
||||||
|
|
||||||
ALWAYS preserves existing secrets!
|
|
||||||
"""
|
|
||||||
base_dir = Path(__file__).resolve().parent.parent
|
|
||||||
env_path = base_dir / ".env"
|
|
||||||
|
|
||||||
example_path = base_dir / ".env.example"
|
|
||||||
if not example_path.exists():
|
|
||||||
print(f"❌ {example_path.name} not found.")
|
|
||||||
return
|
|
||||||
|
|
||||||
toml_path = base_dir / "pyproject.toml"
|
|
||||||
if not toml_path.exists():
|
|
||||||
print(f"❌ {toml_path.name} not found.")
|
|
||||||
return
|
|
||||||
|
|
||||||
# ALWAYS load existing .env if it exists
|
|
||||||
existing_env = {}
|
|
||||||
if env_path.exists():
|
|
||||||
print("🔄 Reading existing .env...")
|
|
||||||
with open(env_path) as f:
|
|
||||||
for line in f:
|
|
||||||
if "=" in line and not line.strip().startswith("#"):
|
|
||||||
key, value = line.split("=", 1)
|
|
||||||
existing_env[key.strip()] = value.strip()
|
|
||||||
print(f" Found {len(existing_env)} existing keys")
|
|
||||||
print("🔧 Updating .env file (keeping secrets)...")
|
|
||||||
else:
|
|
||||||
print("🔧 Initializing: Creating secure .env file...")
|
|
||||||
|
|
||||||
# Load data from pyproject.toml
|
|
||||||
with open(toml_path, "rb") as f:
|
|
||||||
data = tomllib.load(f)
|
|
||||||
security_keys = data["tool"]["alfred"]["security"]
|
|
||||||
settings_keys = data["tool"]["alfred"]["settings"]
|
|
||||||
dependencies = data["tool"]["poetry"]["dependencies"]
|
|
||||||
alfred_version = data["tool"]["poetry"]["version"]
|
|
||||||
|
|
||||||
# Normalize TOML keys to UPPER_CASE for .env format (done once)
|
|
||||||
security_keys_upper = {k.upper(): v for k, v in security_keys.items()}
|
|
||||||
settings_keys_upper = {k.upper(): v for k, v in settings_keys.items()}
|
|
||||||
|
|
||||||
# Extract Python version
|
|
||||||
python_version_full, python_version_short = extract_python_version(
|
|
||||||
dependencies["python"]
|
|
||||||
)
|
|
||||||
|
|
||||||
# Read .env.example
|
|
||||||
with open(example_path) as f:
|
|
||||||
example_lines = f.readlines()
|
|
||||||
|
|
||||||
new_lines = []
|
|
||||||
# Process each line from .env.example
|
|
||||||
for raw_line in example_lines:
|
|
||||||
line = raw_line.strip()
|
|
||||||
|
|
||||||
if line and not line.startswith("#") and "=" in line:
|
|
||||||
key, value = line.split("=", 1)
|
|
||||||
key = key.strip()
|
|
||||||
|
|
||||||
# Check if key exists in current .env (update mode)
|
|
||||||
if key in existing_env:
|
|
||||||
# Keep existing value for secrets
|
|
||||||
if key in security_keys_upper:
|
|
||||||
new_lines.append(f"{key}={existing_env[key]}\n")
|
|
||||||
print(f" ↻ Kept existing {key}")
|
|
||||||
# Update build vars from pyproject.toml
|
|
||||||
elif key in settings_keys_upper:
|
|
||||||
new_value = settings_keys_upper[key]
|
|
||||||
if existing_env[key] != new_value:
|
|
||||||
new_lines.append(f"{key}={new_value}\n")
|
|
||||||
print(f" ↻ Updated {key}: {existing_env[key]} → {new_value}")
|
|
||||||
else:
|
|
||||||
new_lines.append(f"{key}={existing_env[key]}\n")
|
|
||||||
print(f" ↻ Kept {key}={existing_env[key]}")
|
|
||||||
# Update Python versions
|
|
||||||
elif key == "PYTHON_VERSION":
|
|
||||||
if existing_env[key] != python_version_full:
|
|
||||||
new_lines.append(f"{key}={python_version_full}\n")
|
|
||||||
print(
|
|
||||||
f" ↻ Updated Python: {existing_env[key]} → {python_version_full}"
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
new_lines.append(f"{key}={existing_env[key]}\n")
|
|
||||||
print(f" ↻ Kept Python: {existing_env[key]}")
|
|
||||||
elif key == "PYTHON_VERSION_SHORT":
|
|
||||||
if existing_env[key] != python_version_short:
|
|
||||||
new_lines.append(f"{key}={python_version_short}\n")
|
|
||||||
print(
|
|
||||||
f" ↻ Updated Python (short): {existing_env[key]} → {python_version_short}"
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
new_lines.append(f"{key}={existing_env[key]}\n")
|
|
||||||
print(f" ↻ Kept Python (short): {existing_env[key]}")
|
|
||||||
elif key == "ALFRED_VERSION":
|
|
||||||
if existing_env.get(key) != alfred_version:
|
|
||||||
new_lines.append(f"{key}={alfred_version}\n")
|
|
||||||
print(f" ↻ Updated Alfred version: {existing_env.get(key, 'N/A')} → {alfred_version}")
|
|
||||||
else:
|
|
||||||
new_lines.append(f"{key}={alfred_version}\n")
|
|
||||||
print(f" ↻ Kept Alfred version: {alfred_version}")
|
|
||||||
# Keep other existing values
|
|
||||||
else:
|
|
||||||
new_lines.append(f"{key}={existing_env[key]}\n")
|
|
||||||
# Key doesn't exist, generate/add it
|
|
||||||
elif key in security_keys_upper:
|
|
||||||
rule = security_keys_upper[key]
|
|
||||||
secret = generate_secret(rule)
|
|
||||||
new_lines.append(f"{key}={secret}\n")
|
|
||||||
print(f" + Secret generated for {key} ({rule})")
|
|
||||||
elif key in settings_keys_upper:
|
|
||||||
value = settings_keys_upper[key]
|
|
||||||
new_lines.append(f"{key}={value}\n")
|
|
||||||
print(f" + Setting added: {key}={value}")
|
|
||||||
elif key == "PYTHON_VERSION":
|
|
||||||
new_lines.append(f"{key}={python_version_full}\n")
|
|
||||||
print(f" + Python version: {python_version_full}")
|
|
||||||
elif key == "PYTHON_VERSION_SHORT":
|
|
||||||
new_lines.append(f"{key}={python_version_short}\n")
|
|
||||||
print(f" + Python version (short): {python_version_short}")
|
|
||||||
elif key == "ALFRED_VERSION":
|
|
||||||
new_lines.append(f"{key}={alfred_version}\n")
|
|
||||||
print(f" + Alfred version: {alfred_version}")
|
|
||||||
else:
|
|
||||||
new_lines.append(raw_line)
|
|
||||||
else:
|
|
||||||
# Keep comments and empty lines
|
|
||||||
new_lines.append(raw_line)
|
|
||||||
|
|
||||||
# Compute database URIs from the generated values
|
|
||||||
final_env = {}
|
|
||||||
for line in new_lines:
|
|
||||||
if "=" in line and not line.strip().startswith("#"):
|
|
||||||
key, value = line.split("=", 1)
|
|
||||||
final_env[key.strip()] = value.strip()
|
|
||||||
|
|
||||||
# Compute MONGO_URI
|
|
||||||
if "MONGO_USER" in final_env and "MONGO_PASSWORD" in final_env:
|
|
||||||
mongo_uri = (
|
|
||||||
f"mongodb://{final_env.get('MONGO_USER', 'alfred')}:"
|
|
||||||
f"{final_env.get('MONGO_PASSWORD', '')}@"
|
|
||||||
f"{final_env.get('MONGO_HOST', 'mongodb')}:"
|
|
||||||
f"{final_env.get('MONGO_PORT', '27017')}/"
|
|
||||||
f"{final_env.get('MONGO_DB_NAME', 'alfred')}?authSource=admin"
|
|
||||||
)
|
|
||||||
# Update MONGO_URI in new_lines
|
|
||||||
for i, line in enumerate(new_lines):
|
|
||||||
if line.startswith("MONGO_URI="):
|
|
||||||
new_lines[i] = f"MONGO_URI={mongo_uri}\n"
|
|
||||||
print(" ✓ Computed MONGO_URI")
|
|
||||||
break
|
|
||||||
|
|
||||||
# Compute POSTGRES_URI
|
|
||||||
if "POSTGRES_USER" in final_env and "POSTGRES_PASSWORD" in final_env:
|
|
||||||
postgres_uri = (
|
|
||||||
f"postgresql://{final_env.get('POSTGRES_USER', 'alfred')}:"
|
|
||||||
f"{final_env.get('POSTGRES_PASSWORD', '')}@"
|
|
||||||
f"{final_env.get('POSTGRES_HOST', 'vectordb')}:"
|
|
||||||
f"{final_env.get('POSTGRES_PORT', '5432')}/"
|
|
||||||
f"{final_env.get('POSTGRES_DB_NAME', 'alfred')}"
|
|
||||||
)
|
|
||||||
# Update POSTGRES_URI in new_lines
|
|
||||||
for i, line in enumerate(new_lines):
|
|
||||||
if line.startswith("POSTGRES_URI="):
|
|
||||||
new_lines[i] = f"POSTGRES_URI={postgres_uri}\n"
|
|
||||||
print(" ✓ Computed POSTGRES_URI")
|
|
||||||
break
|
|
||||||
|
|
||||||
# Write .env file
|
|
||||||
with open(env_path, "w", encoding="utf-8") as f:
|
|
||||||
f.writelines(new_lines)
|
|
||||||
print(f"\n✅ {env_path.name} generated successfully.")
|
|
||||||
|
|
||||||
# Generate .env.make for Makefile
|
|
||||||
env_make_path = base_dir / ".env.make"
|
|
||||||
with open(env_make_path, "w", encoding="utf-8") as f:
|
|
||||||
f.write("# Auto-generated from pyproject.toml by bootstrap.py\n")
|
|
||||||
f.write(f"export ALFRED_VERSION={alfred_version}\n")
|
|
||||||
f.write(f"export PYTHON_VERSION={python_version_full}\n")
|
|
||||||
f.write(f"export PYTHON_VERSION_SHORT={python_version_short}\n")
|
|
||||||
f.write(f"export RUNNER={settings_keys['runner']}\n")
|
|
||||||
f.write(f"export IMAGE_NAME={settings_keys['image_name']}\n")
|
|
||||||
f.write(f"export SERVICE_NAME={settings_keys['service_name']}\n")
|
|
||||||
f.write(f"export LIBRECHAT_VERSION={settings_keys['librechat_version']}\n")
|
|
||||||
f.write(f"export RAG_VERSION={settings_keys['rag_version']}\n")
|
|
||||||
|
|
||||||
print(f"✅ {env_make_path.name} generated for Makefile.")
|
|
||||||
print("\n⚠️ Reminder: Please manually add your API keys to the .env file.")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
bootstrap()
|
sys.exit(main())
|
||||||
|
|||||||
89
scripts/config_loader.py
Normal file
89
scripts/config_loader.py
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
"""Shared configuration loader for bootstrap and CI."""
|
||||||
|
|
||||||
|
import re
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import NamedTuple
|
||||||
|
|
||||||
|
import tomllib
|
||||||
|
|
||||||
|
|
||||||
|
class BuildConfig(NamedTuple):
|
||||||
|
"""Build configuration extracted from pyproject.toml."""
|
||||||
|
|
||||||
|
alfred_version: str
|
||||||
|
python_version: str
|
||||||
|
python_version_short: str
|
||||||
|
runner: str
|
||||||
|
image_name: str
|
||||||
|
service_name: str
|
||||||
|
librechat_version: str
|
||||||
|
rag_version: str
|
||||||
|
|
||||||
|
|
||||||
|
def extract_python_version(version_string: str) -> tuple[str, str]:
|
||||||
|
"""
|
||||||
|
Extract Python version from poetry dependency string.
|
||||||
|
Examples:
|
||||||
|
"==3.14.2" -> ("3.14.2", "3.14")
|
||||||
|
"^3.14.2" -> ("3.14.2", "3.14")
|
||||||
|
"~3.14.2" -> ("3.14.2", "3.14")
|
||||||
|
"3.14.2" -> ("3.14.2", "3.14")
|
||||||
|
"""
|
||||||
|
clean_version = re.sub(r"^[=^~><]+", "", version_string.strip())
|
||||||
|
parts = clean_version.split(".")
|
||||||
|
|
||||||
|
if len(parts) >= 2:
|
||||||
|
full_version = clean_version
|
||||||
|
short_version = f"{parts[0]}.{parts[1]}"
|
||||||
|
return full_version, short_version
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Invalid Python version format: {version_string}")
|
||||||
|
|
||||||
|
|
||||||
|
def load_build_config(base_dir: Path | None = None) -> BuildConfig:
|
||||||
|
"""Load build configuration from pyproject.toml."""
|
||||||
|
if base_dir is None:
|
||||||
|
base_dir = Path(__file__).resolve().parent.parent
|
||||||
|
|
||||||
|
toml_path = base_dir / "pyproject.toml"
|
||||||
|
if not toml_path.exists():
|
||||||
|
raise FileNotFoundError(f"pyproject.toml not found: {toml_path}")
|
||||||
|
|
||||||
|
with open(toml_path, "rb") as f:
|
||||||
|
data = tomllib.load(f)
|
||||||
|
settings_keys = data["tool"]["alfred"]["settings"]
|
||||||
|
dependencies = data["tool"]["poetry"]["dependencies"]
|
||||||
|
alfred_version = data["tool"]["poetry"]["version"]
|
||||||
|
|
||||||
|
python_version_full, python_version_short = extract_python_version(
|
||||||
|
dependencies["python"]
|
||||||
|
)
|
||||||
|
|
||||||
|
return BuildConfig(
|
||||||
|
alfred_version=alfred_version,
|
||||||
|
python_version=python_version_full,
|
||||||
|
python_version_short=python_version_short,
|
||||||
|
runner=settings_keys["runner"],
|
||||||
|
image_name=settings_keys["image_name"],
|
||||||
|
service_name=settings_keys["service_name"],
|
||||||
|
librechat_version=settings_keys["librechat_version"],
|
||||||
|
rag_version=settings_keys["rag_version"],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def write_env_make(config: BuildConfig, base_dir: Path | None = None) -> None:
|
||||||
|
"""Write .env.make file for Makefile."""
|
||||||
|
if base_dir is None:
|
||||||
|
base_dir = Path(__file__).resolve().parent.parent
|
||||||
|
|
||||||
|
env_make_path = base_dir / ".env.make"
|
||||||
|
with open(env_make_path, "w", encoding="utf-8") as f:
|
||||||
|
f.write("# Auto-generated from pyproject.toml\n")
|
||||||
|
f.write(f"export ALFRED_VERSION={config.alfred_version}\n")
|
||||||
|
f.write(f"export PYTHON_VERSION={config.python_version}\n")
|
||||||
|
f.write(f"export PYTHON_VERSION_SHORT={config.python_version_short}\n")
|
||||||
|
f.write(f"export RUNNER={config.runner}\n")
|
||||||
|
f.write(f"export IMAGE_NAME={config.image_name}\n")
|
||||||
|
f.write(f"export SERVICE_NAME={config.service_name}\n")
|
||||||
|
f.write(f"export LIBRECHAT_VERSION={config.librechat_version}\n")
|
||||||
|
f.write(f"export RAG_VERSION={config.rag_version}\n")
|
||||||
66
scripts/validate_settings.py
Normal file
66
scripts/validate_settings.py
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""Validate settings against schema."""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# Add parent directory to path
|
||||||
|
sys.path.insert(0, str(Path(__file__).resolve().parent.parent))
|
||||||
|
|
||||||
|
from alfred.settings_bootstrap import ConfigSource, SettingsBootstrap
|
||||||
|
from alfred.settings_schema import SCHEMA
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""
|
||||||
|
Validate settings from .env against schema.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
0 if valid, 1 if invalid
|
||||||
|
"""
|
||||||
|
print("🔍 Validating settings...")
|
||||||
|
|
||||||
|
try:
|
||||||
|
base_dir = Path(__file__).resolve().parent.parent
|
||||||
|
source = ConfigSource.from_base_dir(base_dir)
|
||||||
|
|
||||||
|
# Check if .env exists
|
||||||
|
if not source.env_path.exists():
|
||||||
|
print(f"❌ {source.env_path} not found")
|
||||||
|
print(" Run 'make bootstrap' to generate it")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
# Create bootstrap instance (loads and validates)
|
||||||
|
bootstrapper = SettingsBootstrap(source)
|
||||||
|
bootstrapper._load_sources()
|
||||||
|
bootstrapper._resolve_settings()
|
||||||
|
bootstrapper._validate_settings()
|
||||||
|
|
||||||
|
print(f"✅ All {len(SCHEMA)} settings are valid!")
|
||||||
|
|
||||||
|
# Show summary by category
|
||||||
|
print("\n📊 Settings summary:")
|
||||||
|
categories = {}
|
||||||
|
for definition in SCHEMA:
|
||||||
|
if definition.category not in categories:
|
||||||
|
categories[definition.category] = 0
|
||||||
|
categories[definition.category] += 1
|
||||||
|
|
||||||
|
for category, count in sorted(categories.items()):
|
||||||
|
print(f" {category}: {count} settings")
|
||||||
|
|
||||||
|
return 0
|
||||||
|
|
||||||
|
except ValueError as e:
|
||||||
|
print(f"❌ Validation failed: {e}")
|
||||||
|
return 1
|
||||||
|
except Exception as e:
|
||||||
|
print(f"❌ Error: {e}")
|
||||||
|
import traceback # noqa: PLC0415
|
||||||
|
|
||||||
|
traceback.print_exc()
|
||||||
|
return 1
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
sys.exit(main())
|
||||||
414
settings.toml
Normal file
414
settings.toml
Normal file
@@ -0,0 +1,414 @@
|
|||||||
|
[tool.alfred.settings_schema]
|
||||||
|
|
||||||
|
# Build variables (from pyproject.toml)
|
||||||
|
[tool.alfred.settings_schema.ALFRED_VERSION]
|
||||||
|
type = "string"
|
||||||
|
source = "toml"
|
||||||
|
toml_path = "tool.poetry.version"
|
||||||
|
description = "Alfred version"
|
||||||
|
category = "build"
|
||||||
|
export_to_env_make = true
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.PYTHON_VERSION]
|
||||||
|
type = "string"
|
||||||
|
source = "toml"
|
||||||
|
toml_path = "tool.poetry.dependencies.python"
|
||||||
|
transform = "extract_python_version_full"
|
||||||
|
description = "Python version (full)"
|
||||||
|
category = "build"
|
||||||
|
export_to_env_make = true
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.PYTHON_VERSION_SHORT]
|
||||||
|
type = "string"
|
||||||
|
source = "toml"
|
||||||
|
toml_path = "tool.poetry.dependencies.python"
|
||||||
|
transform = "extract_python_version_short"
|
||||||
|
description = "Python version (major.minor)"
|
||||||
|
category = "build"
|
||||||
|
export_to_env_make = true
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.RUNNER]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "poetry"
|
||||||
|
description = "Dependency manager (poetry/uv)"
|
||||||
|
category = "build"
|
||||||
|
export_to_env_make = true
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.IMAGE_NAME]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "alfred_media_organizer"
|
||||||
|
description = "Docker image name"
|
||||||
|
category = "build"
|
||||||
|
export_to_env_make = true
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.SERVICE_NAME]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "alfred"
|
||||||
|
description = "Docker service name"
|
||||||
|
category = "build"
|
||||||
|
export_to_env_make = true
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.LIBRECHAT_VERSION]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "v0.8.1"
|
||||||
|
description = "LibreChat version"
|
||||||
|
category = "build"
|
||||||
|
export_to_env_make = true
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.RAG_VERSION]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "v0.7.0"
|
||||||
|
description = "RAG API version"
|
||||||
|
category = "build"
|
||||||
|
export_to_env_make = true
|
||||||
|
|
||||||
|
# Security secrets (generated)
|
||||||
|
[tool.alfred.settings_schema.JWT_SECRET]
|
||||||
|
type = "secret"
|
||||||
|
source = "generated"
|
||||||
|
secret_rule = "32:b64"
|
||||||
|
description = "JWT signing secret"
|
||||||
|
category = "security"
|
||||||
|
required = true
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.JWT_REFRESH_SECRET]
|
||||||
|
type = "secret"
|
||||||
|
source = "generated"
|
||||||
|
secret_rule = "32:b64"
|
||||||
|
description = "JWT refresh token secret"
|
||||||
|
category = "security"
|
||||||
|
required = true
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.CREDS_KEY]
|
||||||
|
type = "secret"
|
||||||
|
source = "generated"
|
||||||
|
secret_rule = "32:hex"
|
||||||
|
description = "Credentials encryption key (AES-256)"
|
||||||
|
category = "security"
|
||||||
|
required = true
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.CREDS_IV]
|
||||||
|
type = "secret"
|
||||||
|
source = "generated"
|
||||||
|
secret_rule = "16:hex"
|
||||||
|
description = "Credentials encryption IV"
|
||||||
|
category = "security"
|
||||||
|
required = true
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.MEILI_MASTER_KEY]
|
||||||
|
type = "secret"
|
||||||
|
source = "generated"
|
||||||
|
secret_rule = "32:b64"
|
||||||
|
description = "Meilisearch master key"
|
||||||
|
category = "security"
|
||||||
|
required = true
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.MONGO_PASSWORD]
|
||||||
|
type = "secret"
|
||||||
|
source = "generated"
|
||||||
|
secret_rule = "16:hex"
|
||||||
|
description = "MongoDB password"
|
||||||
|
category = "security"
|
||||||
|
required = true
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.POSTGRES_PASSWORD]
|
||||||
|
type = "secret"
|
||||||
|
source = "generated"
|
||||||
|
secret_rule = "16:hex"
|
||||||
|
description = "PostgreSQL password"
|
||||||
|
category = "security"
|
||||||
|
required = true
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.QBITTORRENT_PASSWORD]
|
||||||
|
type = "secret"
|
||||||
|
source = "generated"
|
||||||
|
secret_rule = "16:hex"
|
||||||
|
description = "qBittorrent password"
|
||||||
|
category = "security"
|
||||||
|
required = true
|
||||||
|
|
||||||
|
# Database configuration
|
||||||
|
[tool.alfred.settings_schema.MONGO_HOST]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "mongodb"
|
||||||
|
description = "MongoDB host"
|
||||||
|
category = "database"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.MONGO_PORT]
|
||||||
|
type = "integer"
|
||||||
|
source = "env"
|
||||||
|
default = 27017
|
||||||
|
description = "MongoDB port"
|
||||||
|
category = "database"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.MONGO_USER]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "alfred"
|
||||||
|
description = "MongoDB user"
|
||||||
|
category = "database"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.MONGO_DB_NAME]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "alfred"
|
||||||
|
description = "MongoDB database name"
|
||||||
|
category = "database"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.MONGO_URI]
|
||||||
|
type = "computed"
|
||||||
|
source = "computed"
|
||||||
|
compute_from = ["MONGO_USER", "MONGO_PASSWORD", "MONGO_HOST", "MONGO_PORT", "MONGO_DB_NAME"]
|
||||||
|
compute_template = "mongodb://{MONGO_USER}:{MONGO_PASSWORD}@{MONGO_HOST}:{MONGO_PORT}/{MONGO_DB_NAME}?authSource=admin"
|
||||||
|
description = "MongoDB connection URI"
|
||||||
|
category = "database"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.POSTGRES_HOST]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "vectordb"
|
||||||
|
description = "PostgreSQL host"
|
||||||
|
category = "database"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.POSTGRES_PORT]
|
||||||
|
type = "integer"
|
||||||
|
source = "env"
|
||||||
|
default = 5432
|
||||||
|
description = "PostgreSQL port"
|
||||||
|
category = "database"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.POSTGRES_USER]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "alfred"
|
||||||
|
description = "PostgreSQL user"
|
||||||
|
category = "database"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.POSTGRES_DB_NAME]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "alfred"
|
||||||
|
description = "PostgreSQL database name"
|
||||||
|
category = "database"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.POSTGRES_URI]
|
||||||
|
type = "computed"
|
||||||
|
source = "computed"
|
||||||
|
compute_from = ["POSTGRES_USER", "POSTGRES_PASSWORD", "POSTGRES_HOST", "POSTGRES_PORT", "POSTGRES_DB_NAME"]
|
||||||
|
compute_template = "postgresql://{POSTGRES_USER}:{POSTGRES_PASSWORD}@{POSTGRES_HOST}:{POSTGRES_PORT}/{POSTGRES_DB_NAME}"
|
||||||
|
description = "PostgreSQL connection URI"
|
||||||
|
category = "database"
|
||||||
|
|
||||||
|
# API Keys (optional, from .env)
|
||||||
|
[tool.alfred.settings_schema.TMDB_API_KEY]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
required = false
|
||||||
|
description = "The Movie Database API key"
|
||||||
|
category = "api"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.DEEPSEEK_API_KEY]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
required = false
|
||||||
|
description = "DeepSeek API key"
|
||||||
|
category = "api"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.OPENAI_API_KEY]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
required = false
|
||||||
|
description = "OpenAI API key"
|
||||||
|
category = "api"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.ANTHROPIC_API_KEY]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
required = false
|
||||||
|
description = "Anthropic (Claude) API key"
|
||||||
|
category = "api"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.GOOGLE_API_KEY]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
required = false
|
||||||
|
description = "Google (Gemini) API key"
|
||||||
|
category = "api"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.KIMI_API_KEY]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
required = false
|
||||||
|
description = "Kimi API key"
|
||||||
|
category = "api"
|
||||||
|
|
||||||
|
# Application settings
|
||||||
|
[tool.alfred.settings_schema.HOST]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "0.0.0.0"
|
||||||
|
description = "Server host"
|
||||||
|
category = "app"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.PORT]
|
||||||
|
type = "integer"
|
||||||
|
source = "env"
|
||||||
|
default = 3080
|
||||||
|
description = "Server port"
|
||||||
|
category = "app"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.MAX_HISTORY_MESSAGES]
|
||||||
|
type = "integer"
|
||||||
|
source = "env"
|
||||||
|
default = 10
|
||||||
|
description = "Maximum conversation history messages"
|
||||||
|
category = "app"
|
||||||
|
validator = "range:1:100"
|
||||||
|
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.MAX_TOOL_ITERATIONS]
|
||||||
|
type = "integer"
|
||||||
|
source = "env"
|
||||||
|
default = 10
|
||||||
|
description = "Maximum tool iterations per request"
|
||||||
|
category = "app"
|
||||||
|
validator = "range:1:20"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.REQUEST_TIMEOUT]
|
||||||
|
type = "integer"
|
||||||
|
source = "env"
|
||||||
|
default = 30
|
||||||
|
description = "Request timeout in seconds"
|
||||||
|
category = "app"
|
||||||
|
validator = "range:1:300"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.LLM_TEMPERATURE]
|
||||||
|
type = "float"
|
||||||
|
source = "env"
|
||||||
|
default = 0.2
|
||||||
|
description = "LLM temperature"
|
||||||
|
category = "app"
|
||||||
|
validator = "range:0.0:2.0"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.DATA_STORAGE_DIR]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "data"
|
||||||
|
description = "Data storage directory"
|
||||||
|
category = "app"
|
||||||
|
|
||||||
|
# TMDB Configuration
|
||||||
|
[tool.alfred.settings_schema.TMDB_BASE_URL]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "https://api.themoviedb.org/3"
|
||||||
|
description = "TMDB API base URL"
|
||||||
|
category = "external_services"
|
||||||
|
|
||||||
|
# qBittorrent Configuration
|
||||||
|
[tool.alfred.settings_schema.QBITTORRENT_URL]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "http://qbittorrent:16140"
|
||||||
|
description = "qBittorrent web UI URL"
|
||||||
|
category = "external_services"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.QBITTORRENT_USERNAME]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "admin"
|
||||||
|
description = "qBittorrent username"
|
||||||
|
category = "external_services"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.QBITTORRENT_PORT]
|
||||||
|
type = "integer"
|
||||||
|
source = "env"
|
||||||
|
default = 16140
|
||||||
|
description = "qBittorrent port"
|
||||||
|
category = "external_services"
|
||||||
|
|
||||||
|
# Meilisearch Configuration
|
||||||
|
[tool.alfred.settings_schema.MEILI_ENABLED]
|
||||||
|
type = "boolean"
|
||||||
|
source = "env"
|
||||||
|
default = false
|
||||||
|
description = "Enable Meilisearch"
|
||||||
|
category = "external_services"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.MEILI_NO_ANALYTICS]
|
||||||
|
type = "boolean"
|
||||||
|
source = "env"
|
||||||
|
default = true
|
||||||
|
description = "Disable Meilisearch analytics"
|
||||||
|
category = "external_services"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.MEILI_HOST]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "http://meilisearch:7700"
|
||||||
|
description = "Meilisearch host URL"
|
||||||
|
category = "external_services"
|
||||||
|
|
||||||
|
# LLM Configuration
|
||||||
|
[tool.alfred.settings_schema.DEFAULT_LLM_PROVIDER]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "local"
|
||||||
|
description = "Default LLM provider (local/openai/anthropic/deepseek/google/kimi)"
|
||||||
|
category = "llm"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.OLLAMA_BASE_URL]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "http://ollama:11434"
|
||||||
|
description = "Ollama API base URL"
|
||||||
|
category = "llm"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.OLLAMA_MODEL]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "llama3.3:latest"
|
||||||
|
description = "Ollama model name"
|
||||||
|
category = "llm"
|
||||||
|
|
||||||
|
# RAG Configuration
|
||||||
|
[tool.alfred.settings_schema.RAG_ENABLED]
|
||||||
|
type = "boolean"
|
||||||
|
source = "env"
|
||||||
|
default = true
|
||||||
|
description = "Enable RAG system"
|
||||||
|
category = "rag"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.RAG_API_URL]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "http://rag_api:8000"
|
||||||
|
description = "RAG API URL"
|
||||||
|
category = "rag"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.RAG_API_PORT]
|
||||||
|
type = "integer"
|
||||||
|
source = "env"
|
||||||
|
default = 8000
|
||||||
|
description = "RAG API port"
|
||||||
|
category = "rag"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.EMBEDDINGS_PROVIDER]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "ollama"
|
||||||
|
description = "Embeddings provider"
|
||||||
|
category = "rag"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.EMBEDDINGS_MODEL]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "nomic-embed-text"
|
||||||
|
description = "Embeddings model name"
|
||||||
|
category = "rag"
|
||||||
410
tests/test_settings_bootstrap.py
Normal file
410
tests/test_settings_bootstrap.py
Normal file
@@ -0,0 +1,410 @@
|
|||||||
|
"""Tests for settings bootstrap."""
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from alfred.settings_bootstrap import (
|
||||||
|
ConfigSource,
|
||||||
|
SettingsBootstrap,
|
||||||
|
extract_python_version,
|
||||||
|
generate_secret,
|
||||||
|
get_nested_value,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def test_toml_content():
|
||||||
|
"""Test TOML content with schema."""
|
||||||
|
return """
|
||||||
|
[tool.poetry]
|
||||||
|
name = "test"
|
||||||
|
version = "1.0.0"
|
||||||
|
|
||||||
|
[tool.poetry.dependencies]
|
||||||
|
python = "==3.14.2"
|
||||||
|
|
||||||
|
[tool.alfred.settings]
|
||||||
|
runner = "poetry"
|
||||||
|
image_name = "test_image"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.TEST_FROM_TOML]
|
||||||
|
type = "string"
|
||||||
|
source = "toml"
|
||||||
|
toml_path = "tool.poetry.version"
|
||||||
|
description = "Version from TOML"
|
||||||
|
category = "test"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.TEST_FROM_ENV]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "default_value"
|
||||||
|
description = "Value from env"
|
||||||
|
category = "test"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.TEST_SECRET]
|
||||||
|
type = "secret"
|
||||||
|
source = "generated"
|
||||||
|
secret_rule = "16:hex"
|
||||||
|
description = "Generated secret"
|
||||||
|
category = "security"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.TEST_COMPUTED]
|
||||||
|
type = "computed"
|
||||||
|
source = "computed"
|
||||||
|
compute_from = ["TEST_FROM_TOML", "TEST_FROM_ENV"]
|
||||||
|
compute_template = "{TEST_FROM_TOML}-{TEST_FROM_ENV}"
|
||||||
|
description = "Computed value"
|
||||||
|
category = "test"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.PYTHON_VERSION]
|
||||||
|
type = "string"
|
||||||
|
source = "toml"
|
||||||
|
toml_path = "tool.poetry.dependencies.python"
|
||||||
|
transform = "extract_python_version_full"
|
||||||
|
description = "Python version"
|
||||||
|
category = "build"
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def config_source(tmp_path):
|
||||||
|
"""Create a ConfigSource for testing."""
|
||||||
|
return ConfigSource.from_base_dir(tmp_path)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def create_test_env(tmp_path, test_toml_content):
|
||||||
|
"""Create a complete test environment."""
|
||||||
|
# Create pyproject.toml
|
||||||
|
toml_path = tmp_path / "pyproject.toml"
|
||||||
|
toml_path.write_text(test_toml_content)
|
||||||
|
|
||||||
|
# Create .env.example
|
||||||
|
env_example = tmp_path / ".env.example"
|
||||||
|
env_example.write_text("""
|
||||||
|
TEST_FROM_TOML=
|
||||||
|
TEST_FROM_ENV=
|
||||||
|
TEST_SECRET=
|
||||||
|
TEST_COMPUTED=
|
||||||
|
PYTHON_VERSION=
|
||||||
|
""")
|
||||||
|
|
||||||
|
return ConfigSource.from_base_dir(tmp_path)
|
||||||
|
|
||||||
|
|
||||||
|
class TestExtractPythonVersion:
|
||||||
|
"""Test Python version extraction."""
|
||||||
|
|
||||||
|
def test_exact_version(self):
|
||||||
|
"""Test exact version format."""
|
||||||
|
full, short = extract_python_version("==3.14.2")
|
||||||
|
assert full == "3.14.2"
|
||||||
|
assert short == "3.14"
|
||||||
|
|
||||||
|
def test_caret_version(self):
|
||||||
|
"""Test caret version format."""
|
||||||
|
full, short = extract_python_version("^3.14.2")
|
||||||
|
assert full == "3.14.2"
|
||||||
|
assert short == "3.14"
|
||||||
|
|
||||||
|
def test_invalid_version(self):
|
||||||
|
"""Test invalid version raises error."""
|
||||||
|
with pytest.raises(ValueError, match="Invalid Python version"):
|
||||||
|
extract_python_version("3")
|
||||||
|
|
||||||
|
|
||||||
|
class TestGenerateSecret:
|
||||||
|
"""Test secret generation."""
|
||||||
|
|
||||||
|
def test_generate_b64(self):
|
||||||
|
"""Test base64 secret generation."""
|
||||||
|
secret = generate_secret("32:b64")
|
||||||
|
assert isinstance(secret, str)
|
||||||
|
assert len(secret) > 0
|
||||||
|
|
||||||
|
def test_generate_hex(self):
|
||||||
|
"""Test hex secret generation."""
|
||||||
|
secret = generate_secret("16:hex")
|
||||||
|
assert isinstance(secret, str)
|
||||||
|
assert len(secret) == 32 # 16 bytes = 32 hex chars
|
||||||
|
assert all(c in "0123456789abcdef" for c in secret)
|
||||||
|
|
||||||
|
def test_invalid_format(self):
|
||||||
|
"""Test invalid format raises error."""
|
||||||
|
with pytest.raises(ValueError, match="Invalid security format"):
|
||||||
|
generate_secret("32:invalid")
|
||||||
|
|
||||||
|
def test_invalid_rule(self):
|
||||||
|
"""Test invalid rule format raises error."""
|
||||||
|
with pytest.raises(ValueError, match="Invalid security rule format"):
|
||||||
|
generate_secret("32")
|
||||||
|
|
||||||
|
|
||||||
|
class TestGetNestedValue:
|
||||||
|
"""Test nested value extraction."""
|
||||||
|
|
||||||
|
def test_simple_path(self):
|
||||||
|
"""Test simple path."""
|
||||||
|
data = {"key": "value"}
|
||||||
|
assert get_nested_value(data, "key") == "value"
|
||||||
|
|
||||||
|
def test_nested_path(self):
|
||||||
|
"""Test nested path."""
|
||||||
|
data = {"a": {"b": {"c": "value"}}}
|
||||||
|
assert get_nested_value(data, "a.b.c") == "value"
|
||||||
|
|
||||||
|
def test_missing_key(self):
|
||||||
|
"""Test missing key raises error."""
|
||||||
|
data = {"a": {"b": "value"}}
|
||||||
|
with pytest.raises(KeyError):
|
||||||
|
get_nested_value(data, "a.c")
|
||||||
|
|
||||||
|
def test_non_dict_value(self):
|
||||||
|
"""Test accessing non-dict raises error."""
|
||||||
|
data = {"a": "not a dict"}
|
||||||
|
with pytest.raises(KeyError, match="non-dict"):
|
||||||
|
get_nested_value(data, "a.b")
|
||||||
|
|
||||||
|
|
||||||
|
class TestSettingsBootstrap:
|
||||||
|
"""Test settings bootstrap."""
|
||||||
|
|
||||||
|
def test_load_sources(self, create_test_env):
|
||||||
|
"""Test loading TOML and env sources."""
|
||||||
|
bootstrapper = SettingsBootstrap(create_test_env)
|
||||||
|
bootstrapper._load_sources()
|
||||||
|
|
||||||
|
assert bootstrapper.toml_data is not None
|
||||||
|
assert "tool" in bootstrapper.toml_data
|
||||||
|
assert isinstance(bootstrapper.existing_env, dict)
|
||||||
|
|
||||||
|
def test_resolve_from_toml(self, create_test_env):
|
||||||
|
"""Test resolving setting from TOML."""
|
||||||
|
# Load schema from test env
|
||||||
|
from alfred.settings_schema import load_schema
|
||||||
|
|
||||||
|
schema = load_schema(create_test_env.base_dir)
|
||||||
|
|
||||||
|
bootstrapper = SettingsBootstrap(create_test_env, schema)
|
||||||
|
bootstrapper._load_sources()
|
||||||
|
|
||||||
|
# Get definition for TEST_FROM_TOML
|
||||||
|
definition = bootstrapper.schema.get("TEST_FROM_TOML")
|
||||||
|
value = bootstrapper._resolve_from_toml(definition)
|
||||||
|
|
||||||
|
assert value == "1.0.0" # From tool.poetry.version
|
||||||
|
|
||||||
|
def test_resolve_from_env_with_default(self, create_test_env):
|
||||||
|
"""Test resolving from env with default."""
|
||||||
|
from alfred.settings_schema import load_schema
|
||||||
|
|
||||||
|
schema = load_schema(create_test_env.base_dir)
|
||||||
|
|
||||||
|
bootstrapper = SettingsBootstrap(create_test_env, schema)
|
||||||
|
bootstrapper._load_sources()
|
||||||
|
|
||||||
|
definition = bootstrapper.schema.get("TEST_FROM_ENV")
|
||||||
|
value = bootstrapper._resolve_from_env(definition)
|
||||||
|
|
||||||
|
assert value == "default_value"
|
||||||
|
|
||||||
|
def test_resolve_from_env_existing(self, create_test_env):
|
||||||
|
"""Test resolving from existing env."""
|
||||||
|
# Create existing .env
|
||||||
|
create_test_env.env_path.write_text("TEST_FROM_ENV=existing_value\n")
|
||||||
|
|
||||||
|
from alfred.settings_schema import load_schema
|
||||||
|
|
||||||
|
schema = load_schema(create_test_env.base_dir)
|
||||||
|
|
||||||
|
bootstrapper = SettingsBootstrap(create_test_env, schema)
|
||||||
|
bootstrapper._load_sources()
|
||||||
|
|
||||||
|
definition = bootstrapper.schema.get("TEST_FROM_ENV")
|
||||||
|
value = bootstrapper._resolve_from_env(definition)
|
||||||
|
|
||||||
|
assert value == "existing_value"
|
||||||
|
|
||||||
|
def test_resolve_generated_new(self, create_test_env):
|
||||||
|
"""Test generating new secret."""
|
||||||
|
from alfred.settings_schema import load_schema
|
||||||
|
|
||||||
|
schema = load_schema(create_test_env.base_dir)
|
||||||
|
|
||||||
|
bootstrapper = SettingsBootstrap(create_test_env, schema)
|
||||||
|
bootstrapper._load_sources()
|
||||||
|
|
||||||
|
definition = bootstrapper.schema.get("TEST_SECRET")
|
||||||
|
value = bootstrapper._resolve_generated(definition)
|
||||||
|
|
||||||
|
assert isinstance(value, str)
|
||||||
|
assert len(value) == 32 # 16 hex = 32 chars
|
||||||
|
|
||||||
|
def test_resolve_generated_preserve_existing(self, create_test_env):
|
||||||
|
"""Test preserving existing secret."""
|
||||||
|
# Create existing .env with secret
|
||||||
|
create_test_env.env_path.write_text("TEST_SECRET=existing_secret\n")
|
||||||
|
|
||||||
|
from alfred.settings_schema import load_schema
|
||||||
|
|
||||||
|
schema = load_schema(create_test_env.base_dir)
|
||||||
|
|
||||||
|
bootstrapper = SettingsBootstrap(create_test_env, schema)
|
||||||
|
bootstrapper._load_sources()
|
||||||
|
|
||||||
|
definition = bootstrapper.schema.get("TEST_SECRET")
|
||||||
|
value = bootstrapper._resolve_generated(definition)
|
||||||
|
|
||||||
|
assert value == "existing_secret"
|
||||||
|
|
||||||
|
def test_resolve_computed(self, create_test_env):
|
||||||
|
"""Test resolving computed setting."""
|
||||||
|
from alfred.settings_schema import load_schema
|
||||||
|
|
||||||
|
schema = load_schema(create_test_env.base_dir)
|
||||||
|
|
||||||
|
bootstrapper = SettingsBootstrap(create_test_env, schema)
|
||||||
|
bootstrapper._load_sources()
|
||||||
|
|
||||||
|
# Resolve dependencies first
|
||||||
|
bootstrapper.resolved_settings["TEST_FROM_TOML"] = "1.0.0"
|
||||||
|
bootstrapper.resolved_settings["TEST_FROM_ENV"] = "test"
|
||||||
|
|
||||||
|
definition = bootstrapper.schema.get("TEST_COMPUTED")
|
||||||
|
value = bootstrapper._resolve_computed(definition)
|
||||||
|
|
||||||
|
assert value == "1.0.0-test"
|
||||||
|
|
||||||
|
def test_resolve_with_transform(self, create_test_env):
|
||||||
|
"""Test resolving with transform function."""
|
||||||
|
from alfred.settings_schema import load_schema
|
||||||
|
|
||||||
|
schema = load_schema(create_test_env.base_dir)
|
||||||
|
|
||||||
|
bootstrapper = SettingsBootstrap(create_test_env, schema)
|
||||||
|
bootstrapper._load_sources()
|
||||||
|
|
||||||
|
definition = bootstrapper.schema.get("PYTHON_VERSION")
|
||||||
|
value = bootstrapper._resolve_from_toml(definition)
|
||||||
|
|
||||||
|
assert value == "3.14.2" # Transformed from "==3.14.2"
|
||||||
|
|
||||||
|
def test_full_bootstrap(self, create_test_env):
|
||||||
|
"""Test complete bootstrap process."""
|
||||||
|
from alfred.settings_schema import load_schema
|
||||||
|
|
||||||
|
schema = load_schema(create_test_env.base_dir)
|
||||||
|
|
||||||
|
bootstrapper = SettingsBootstrap(create_test_env, schema)
|
||||||
|
bootstrapper.bootstrap()
|
||||||
|
|
||||||
|
# Check .env was created
|
||||||
|
assert create_test_env.env_path.exists()
|
||||||
|
|
||||||
|
# Check .env.make was created
|
||||||
|
env_make_path = create_test_env.base_dir / ".env.make"
|
||||||
|
assert env_make_path.exists()
|
||||||
|
|
||||||
|
# Check content
|
||||||
|
env_content = create_test_env.env_path.read_text()
|
||||||
|
assert "TEST_FROM_TOML=1.0.0" in env_content
|
||||||
|
assert "TEST_FROM_ENV=default_value" in env_content
|
||||||
|
assert "TEST_SECRET=" in env_content
|
||||||
|
assert "TEST_COMPUTED=1.0.0-default_value" in env_content
|
||||||
|
|
||||||
|
def test_bootstrap_preserves_secrets(self, create_test_env):
|
||||||
|
"""Test that bootstrap preserves existing secrets."""
|
||||||
|
# Create existing .env with secret
|
||||||
|
create_test_env.env_path.write_text("""
|
||||||
|
TEST_FROM_ENV=old_value
|
||||||
|
TEST_SECRET=my_secret_123
|
||||||
|
""")
|
||||||
|
|
||||||
|
from alfred.settings_schema import load_schema
|
||||||
|
|
||||||
|
schema = load_schema(create_test_env.base_dir)
|
||||||
|
|
||||||
|
bootstrapper = SettingsBootstrap(create_test_env, schema)
|
||||||
|
bootstrapper.bootstrap()
|
||||||
|
|
||||||
|
# Check secret was preserved
|
||||||
|
env_content = create_test_env.env_path.read_text()
|
||||||
|
assert "TEST_SECRET=my_secret_123" in env_content
|
||||||
|
|
||||||
|
def test_validation_error(self, tmp_path, test_toml_content):
|
||||||
|
"""Test validation error is raised."""
|
||||||
|
# Add a setting with validation
|
||||||
|
toml_with_validation = (
|
||||||
|
test_toml_content
|
||||||
|
+ """
|
||||||
|
[tool.alfred.settings_schema.TEST_VALIDATED]
|
||||||
|
type = "integer"
|
||||||
|
source = "env"
|
||||||
|
default = 150
|
||||||
|
validator = "range:1:100"
|
||||||
|
description = "Validated setting"
|
||||||
|
category = "test"
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
toml_path = tmp_path / "pyproject.toml"
|
||||||
|
toml_path.write_text(toml_with_validation)
|
||||||
|
|
||||||
|
env_example = tmp_path / ".env.example"
|
||||||
|
env_example.write_text("TEST_VALIDATED=\n")
|
||||||
|
|
||||||
|
source = ConfigSource.from_base_dir(tmp_path)
|
||||||
|
|
||||||
|
from alfred.settings_schema import load_schema
|
||||||
|
|
||||||
|
schema = load_schema(tmp_path)
|
||||||
|
|
||||||
|
bootstrapper = SettingsBootstrap(source, schema)
|
||||||
|
|
||||||
|
with pytest.raises(ValueError, match="Validation errors"):
|
||||||
|
bootstrapper.bootstrap()
|
||||||
|
|
||||||
|
def test_write_env_make_only_exports(self, create_test_env):
|
||||||
|
"""Test that .env.make only contains export_to_env_make settings."""
|
||||||
|
# Add a setting with export_to_env_make
|
||||||
|
toml_content = create_test_env.toml_path.read_text()
|
||||||
|
toml_content += """
|
||||||
|
[tool.alfred.settings_schema.EXPORTED_VAR]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "exported"
|
||||||
|
export_to_env_make = true
|
||||||
|
category = "build"
|
||||||
|
"""
|
||||||
|
create_test_env.toml_path.write_text(toml_content)
|
||||||
|
|
||||||
|
# Recreate schema
|
||||||
|
from alfred.settings_schema import load_schema
|
||||||
|
|
||||||
|
schema = load_schema(create_test_env.base_dir)
|
||||||
|
|
||||||
|
bootstrapper = SettingsBootstrap(create_test_env, schema)
|
||||||
|
bootstrapper.bootstrap()
|
||||||
|
|
||||||
|
env_make_content = (create_test_env.base_dir / ".env.make").read_text()
|
||||||
|
assert "EXPORTED_VAR=exported" in env_make_content
|
||||||
|
# Non-exported vars should not be in .env.make
|
||||||
|
assert "TEST_FROM_ENV" not in env_make_content
|
||||||
|
|
||||||
|
|
||||||
|
class TestConfigSource:
|
||||||
|
"""Test ConfigSource dataclass."""
|
||||||
|
|
||||||
|
def test_from_base_dir(self, tmp_path):
|
||||||
|
"""Test creating ConfigSource from base dir."""
|
||||||
|
source = ConfigSource.from_base_dir(tmp_path)
|
||||||
|
|
||||||
|
assert source.base_dir == tmp_path
|
||||||
|
assert source.toml_path == tmp_path / "pyproject.toml"
|
||||||
|
assert source.env_path == tmp_path / ".env"
|
||||||
|
assert source.env_example_path == tmp_path / ".env.example"
|
||||||
|
|
||||||
|
def test_from_base_dir_default(self):
|
||||||
|
"""Test creating ConfigSource with default base dir."""
|
||||||
|
source = ConfigSource.from_base_dir()
|
||||||
|
|
||||||
|
assert source.base_dir.exists()
|
||||||
|
assert source.toml_path.name == "pyproject.toml"
|
||||||
741
tests/test_settings_bootstrap_advanced.py
Normal file
741
tests/test_settings_bootstrap_advanced.py
Normal file
@@ -0,0 +1,741 @@
|
|||||||
|
"""Advanced tests for settings bootstrap - template preservation and edge cases."""
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from alfred.settings_bootstrap import ConfigSource, SettingsBootstrap
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def test_toml_with_all_types(tmp_path):
|
||||||
|
"""Create test TOML with all setting types."""
|
||||||
|
toml_content = """
|
||||||
|
[tool.poetry]
|
||||||
|
name = "test"
|
||||||
|
version = "1.0.0"
|
||||||
|
|
||||||
|
[tool.poetry.dependencies]
|
||||||
|
python = "^3.14"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.STRING_VAR]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "default_string"
|
||||||
|
description = "String variable"
|
||||||
|
category = "test"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.INT_VAR]
|
||||||
|
type = "integer"
|
||||||
|
source = "env"
|
||||||
|
default = 42
|
||||||
|
description = "Integer variable"
|
||||||
|
category = "test"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.FLOAT_VAR]
|
||||||
|
type = "float"
|
||||||
|
source = "env"
|
||||||
|
default = 3.14
|
||||||
|
description = "Float variable"
|
||||||
|
category = "test"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.BOOL_VAR]
|
||||||
|
type = "boolean"
|
||||||
|
source = "env"
|
||||||
|
default = true
|
||||||
|
description = "Boolean variable"
|
||||||
|
category = "test"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.SECRET_VAR]
|
||||||
|
type = "secret"
|
||||||
|
source = "generated"
|
||||||
|
secret_rule = "16:hex"
|
||||||
|
description = "Secret variable"
|
||||||
|
category = "security"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.COMPUTED_VAR]
|
||||||
|
type = "computed"
|
||||||
|
source = "computed"
|
||||||
|
compute_from = ["STRING_VAR", "INT_VAR"]
|
||||||
|
compute_template = "{STRING_VAR}_{INT_VAR}"
|
||||||
|
description = "Computed variable"
|
||||||
|
category = "test"
|
||||||
|
"""
|
||||||
|
toml_path = tmp_path / "pyproject.toml"
|
||||||
|
toml_path.write_text(toml_content)
|
||||||
|
return tmp_path
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def test_env_example(tmp_path):
|
||||||
|
"""Create .env.example template."""
|
||||||
|
env_example_content = """# Test configuration
|
||||||
|
STRING_VAR=
|
||||||
|
INT_VAR=
|
||||||
|
FLOAT_VAR=
|
||||||
|
|
||||||
|
# Boolean settings
|
||||||
|
BOOL_VAR=
|
||||||
|
|
||||||
|
# Security
|
||||||
|
SECRET_VAR=
|
||||||
|
|
||||||
|
# Computed values
|
||||||
|
COMPUTED_VAR=
|
||||||
|
|
||||||
|
# Custom section
|
||||||
|
CUSTOM_VAR=custom_value
|
||||||
|
"""
|
||||||
|
env_example_path = tmp_path / ".env.example"
|
||||||
|
env_example_path.write_text(env_example_content)
|
||||||
|
return env_example_path
|
||||||
|
|
||||||
|
|
||||||
|
class TestTemplatePreservation:
|
||||||
|
"""Test that .env.example template structure is preserved."""
|
||||||
|
|
||||||
|
def test_preserves_comments(self, test_toml_with_all_types, test_env_example):
|
||||||
|
"""Test that comments from .env.example are preserved."""
|
||||||
|
from alfred.settings_schema import load_schema
|
||||||
|
|
||||||
|
schema = load_schema(test_toml_with_all_types)
|
||||||
|
source = ConfigSource.from_base_dir(test_toml_with_all_types)
|
||||||
|
|
||||||
|
bootstrapper = SettingsBootstrap(source, schema)
|
||||||
|
bootstrapper.bootstrap()
|
||||||
|
|
||||||
|
env_content = source.env_path.read_text()
|
||||||
|
|
||||||
|
# Check comments are preserved
|
||||||
|
assert "# Test configuration" in env_content
|
||||||
|
assert "# Boolean settings" in env_content
|
||||||
|
assert "# Security" in env_content
|
||||||
|
assert "# Computed values" in env_content
|
||||||
|
|
||||||
|
def test_preserves_empty_lines(self, test_toml_with_all_types, test_env_example):
|
||||||
|
"""Test that empty lines from .env.example are preserved."""
|
||||||
|
from alfred.settings_schema import load_schema
|
||||||
|
|
||||||
|
schema = load_schema(test_toml_with_all_types)
|
||||||
|
source = ConfigSource.from_base_dir(test_toml_with_all_types)
|
||||||
|
|
||||||
|
bootstrapper = SettingsBootstrap(source, schema)
|
||||||
|
bootstrapper.bootstrap()
|
||||||
|
|
||||||
|
env_content = source.env_path.read_text()
|
||||||
|
lines = env_content.split("\n")
|
||||||
|
|
||||||
|
# Check there are empty lines (structure preserved)
|
||||||
|
assert "" in lines
|
||||||
|
|
||||||
|
def test_preserves_variable_order(self, test_toml_with_all_types, test_env_example):
|
||||||
|
"""Test that variable order from .env.example is preserved."""
|
||||||
|
from alfred.settings_schema import load_schema
|
||||||
|
|
||||||
|
schema = load_schema(test_toml_with_all_types)
|
||||||
|
source = ConfigSource.from_base_dir(test_toml_with_all_types)
|
||||||
|
|
||||||
|
bootstrapper = SettingsBootstrap(source, schema)
|
||||||
|
bootstrapper.bootstrap()
|
||||||
|
|
||||||
|
env_content = source.env_path.read_text()
|
||||||
|
|
||||||
|
# Check order is preserved
|
||||||
|
string_pos = env_content.find("STRING_VAR=")
|
||||||
|
int_pos = env_content.find("INT_VAR=")
|
||||||
|
float_pos = env_content.find("FLOAT_VAR=")
|
||||||
|
bool_pos = env_content.find("BOOL_VAR=")
|
||||||
|
|
||||||
|
assert string_pos < int_pos < float_pos < bool_pos
|
||||||
|
|
||||||
|
|
||||||
|
class TestSecretPreservation:
|
||||||
|
"""Test that secrets are never overwritten."""
|
||||||
|
|
||||||
|
def test_preserves_existing_secrets(
|
||||||
|
self, test_toml_with_all_types, test_env_example
|
||||||
|
):
|
||||||
|
"""Test that existing secrets are preserved across multiple bootstraps."""
|
||||||
|
from alfred.settings_schema import load_schema
|
||||||
|
|
||||||
|
schema = load_schema(test_toml_with_all_types)
|
||||||
|
source = ConfigSource.from_base_dir(test_toml_with_all_types)
|
||||||
|
|
||||||
|
# First bootstrap - generates secret
|
||||||
|
bootstrapper1 = SettingsBootstrap(source, schema)
|
||||||
|
bootstrapper1.bootstrap()
|
||||||
|
|
||||||
|
env_content_1 = source.env_path.read_text()
|
||||||
|
secret_1 = [
|
||||||
|
line.split("=")[1]
|
||||||
|
for line in env_content_1.split("\n")
|
||||||
|
if line.startswith("SECRET_VAR=")
|
||||||
|
][0]
|
||||||
|
|
||||||
|
# Second bootstrap - should preserve secret
|
||||||
|
bootstrapper2 = SettingsBootstrap(source, schema)
|
||||||
|
bootstrapper2.bootstrap()
|
||||||
|
|
||||||
|
env_content_2 = source.env_path.read_text()
|
||||||
|
secret_2 = [
|
||||||
|
line.split("=")[1]
|
||||||
|
for line in env_content_2.split("\n")
|
||||||
|
if line.startswith("SECRET_VAR=")
|
||||||
|
][0]
|
||||||
|
|
||||||
|
assert secret_1 == secret_2
|
||||||
|
assert len(secret_1) == 32 # 16 hex bytes
|
||||||
|
|
||||||
|
def test_multiple_secrets_preserved(self, tmp_path):
|
||||||
|
"""Test that multiple secrets are all preserved."""
|
||||||
|
toml_content = """
|
||||||
|
[tool.poetry]
|
||||||
|
name = "test"
|
||||||
|
version = "1.0.0"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.SECRET_1]
|
||||||
|
type = "secret"
|
||||||
|
source = "generated"
|
||||||
|
secret_rule = "16:hex"
|
||||||
|
category = "security"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.SECRET_2]
|
||||||
|
type = "secret"
|
||||||
|
source = "generated"
|
||||||
|
secret_rule = "32:b64"
|
||||||
|
category = "security"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.SECRET_3]
|
||||||
|
type = "secret"
|
||||||
|
source = "generated"
|
||||||
|
secret_rule = "8:hex"
|
||||||
|
category = "security"
|
||||||
|
"""
|
||||||
|
(tmp_path / "pyproject.toml").write_text(toml_content)
|
||||||
|
(tmp_path / ".env.example").write_text("SECRET_1=\nSECRET_2=\nSECRET_3=\n")
|
||||||
|
|
||||||
|
from alfred.settings_schema import load_schema
|
||||||
|
|
||||||
|
schema = load_schema(tmp_path)
|
||||||
|
source = ConfigSource.from_base_dir(tmp_path)
|
||||||
|
|
||||||
|
# First bootstrap
|
||||||
|
bootstrapper1 = SettingsBootstrap(source, schema)
|
||||||
|
bootstrapper1.bootstrap()
|
||||||
|
|
||||||
|
env_content_1 = source.env_path.read_text()
|
||||||
|
|
||||||
|
# Second bootstrap
|
||||||
|
bootstrapper2 = SettingsBootstrap(source, schema)
|
||||||
|
bootstrapper2.bootstrap()
|
||||||
|
|
||||||
|
env_content_2 = source.env_path.read_text()
|
||||||
|
|
||||||
|
# All secrets should be identical
|
||||||
|
assert env_content_1 == env_content_2
|
||||||
|
|
||||||
|
|
||||||
|
class TestCustomVariables:
|
||||||
|
"""Test that custom variables (not in schema) are preserved."""
|
||||||
|
|
||||||
|
def test_preserves_custom_variables_from_env(
|
||||||
|
self, test_toml_with_all_types, test_env_example
|
||||||
|
):
|
||||||
|
"""Test that custom variables added to .env are preserved."""
|
||||||
|
from alfred.settings_schema import load_schema
|
||||||
|
|
||||||
|
schema = load_schema(test_toml_with_all_types)
|
||||||
|
source = ConfigSource.from_base_dir(test_toml_with_all_types)
|
||||||
|
|
||||||
|
# First bootstrap
|
||||||
|
bootstrapper1 = SettingsBootstrap(source, schema)
|
||||||
|
bootstrapper1.bootstrap()
|
||||||
|
|
||||||
|
# Add custom variables to .env
|
||||||
|
with open(source.env_path, "a") as f:
|
||||||
|
f.write("\nMY_CUSTOM_VAR=custom_value\n")
|
||||||
|
f.write("ANOTHER_CUSTOM=another_value\n")
|
||||||
|
|
||||||
|
# Second bootstrap
|
||||||
|
bootstrapper2 = SettingsBootstrap(source, schema)
|
||||||
|
bootstrapper2.bootstrap()
|
||||||
|
|
||||||
|
env_content = source.env_path.read_text()
|
||||||
|
|
||||||
|
# Custom variables should be preserved
|
||||||
|
assert "MY_CUSTOM_VAR=custom_value" in env_content
|
||||||
|
assert "ANOTHER_CUSTOM=another_value" in env_content
|
||||||
|
|
||||||
|
def test_custom_variables_in_dedicated_section(
|
||||||
|
self, test_toml_with_all_types, test_env_example
|
||||||
|
):
|
||||||
|
"""Test that custom variables are placed in a dedicated section."""
|
||||||
|
from alfred.settings_schema import load_schema
|
||||||
|
|
||||||
|
schema = load_schema(test_toml_with_all_types)
|
||||||
|
source = ConfigSource.from_base_dir(test_toml_with_all_types)
|
||||||
|
|
||||||
|
# Create .env with custom variable
|
||||||
|
source.env_path.write_text("MY_CUSTOM_VAR=test\n")
|
||||||
|
|
||||||
|
bootstrapper = SettingsBootstrap(source, schema)
|
||||||
|
bootstrapper.bootstrap()
|
||||||
|
|
||||||
|
env_content = source.env_path.read_text()
|
||||||
|
|
||||||
|
# Check custom section exists
|
||||||
|
assert "# --- CUSTOM VARIABLES ---" in env_content
|
||||||
|
assert "MY_CUSTOM_VAR=test" in env_content
|
||||||
|
|
||||||
|
def test_preserves_custom_from_env_example(
|
||||||
|
self, test_toml_with_all_types, test_env_example
|
||||||
|
):
|
||||||
|
"""Test that custom variables in .env.example are preserved."""
|
||||||
|
from alfred.settings_schema import load_schema
|
||||||
|
|
||||||
|
schema = load_schema(test_toml_with_all_types)
|
||||||
|
source = ConfigSource.from_base_dir(test_toml_with_all_types)
|
||||||
|
|
||||||
|
bootstrapper = SettingsBootstrap(source, schema)
|
||||||
|
bootstrapper.bootstrap()
|
||||||
|
|
||||||
|
env_content = source.env_path.read_text()
|
||||||
|
|
||||||
|
# CUSTOM_VAR is in .env.example but not in schema
|
||||||
|
assert "CUSTOM_VAR=custom_value" in env_content
|
||||||
|
|
||||||
|
|
||||||
|
class TestBooleanHandling:
|
||||||
|
"""Test that booleans are handled correctly."""
|
||||||
|
|
||||||
|
def test_booleans_written_as_lowercase(
|
||||||
|
self, test_toml_with_all_types, test_env_example
|
||||||
|
):
|
||||||
|
"""Test that Python booleans are written as lowercase strings."""
|
||||||
|
from alfred.settings_schema import load_schema
|
||||||
|
|
||||||
|
schema = load_schema(test_toml_with_all_types)
|
||||||
|
source = ConfigSource.from_base_dir(test_toml_with_all_types)
|
||||||
|
|
||||||
|
bootstrapper = SettingsBootstrap(source, schema)
|
||||||
|
bootstrapper.bootstrap()
|
||||||
|
|
||||||
|
env_content = source.env_path.read_text()
|
||||||
|
|
||||||
|
# Boolean should be lowercase
|
||||||
|
assert "BOOL_VAR=true" in env_content
|
||||||
|
assert "BOOL_VAR=True" not in env_content
|
||||||
|
assert "BOOL_VAR=TRUE" not in env_content
|
||||||
|
|
||||||
|
def test_false_boolean_written_as_lowercase(self, tmp_path):
|
||||||
|
"""Test that False is written as 'false'."""
|
||||||
|
toml_content = """
|
||||||
|
[tool.poetry]
|
||||||
|
name = "test"
|
||||||
|
version = "1.0.0"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.BOOL_FALSE]
|
||||||
|
type = "boolean"
|
||||||
|
source = "env"
|
||||||
|
default = false
|
||||||
|
category = "test"
|
||||||
|
"""
|
||||||
|
(tmp_path / "pyproject.toml").write_text(toml_content)
|
||||||
|
(tmp_path / ".env.example").write_text("BOOL_FALSE=\n")
|
||||||
|
|
||||||
|
from alfred.settings_schema import load_schema
|
||||||
|
|
||||||
|
schema = load_schema(tmp_path)
|
||||||
|
source = ConfigSource.from_base_dir(tmp_path)
|
||||||
|
|
||||||
|
bootstrapper = SettingsBootstrap(source, schema)
|
||||||
|
bootstrapper.bootstrap()
|
||||||
|
|
||||||
|
env_content = source.env_path.read_text()
|
||||||
|
|
||||||
|
assert "BOOL_FALSE=false" in env_content
|
||||||
|
assert "BOOL_FALSE=False" not in env_content
|
||||||
|
|
||||||
|
def test_boolean_parsing_from_env(self, tmp_path):
|
||||||
|
"""Test that various boolean formats are parsed correctly."""
|
||||||
|
toml_content = """
|
||||||
|
[tool.poetry]
|
||||||
|
name = "test"
|
||||||
|
version = "1.0.0"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.BOOL_VAR]
|
||||||
|
type = "boolean"
|
||||||
|
source = "env"
|
||||||
|
default = false
|
||||||
|
category = "test"
|
||||||
|
"""
|
||||||
|
(tmp_path / "pyproject.toml").write_text(toml_content)
|
||||||
|
(tmp_path / ".env.example").write_text("BOOL_VAR=\n")
|
||||||
|
|
||||||
|
from alfred.settings_schema import load_schema
|
||||||
|
|
||||||
|
schema = load_schema(tmp_path)
|
||||||
|
source = ConfigSource.from_base_dir(tmp_path)
|
||||||
|
|
||||||
|
# Test different boolean formats
|
||||||
|
test_cases = [
|
||||||
|
("true", True),
|
||||||
|
("TRUE", True),
|
||||||
|
("True", True),
|
||||||
|
("1", True),
|
||||||
|
("yes", True),
|
||||||
|
("false", False),
|
||||||
|
("FALSE", False),
|
||||||
|
("False", False),
|
||||||
|
("0", False),
|
||||||
|
("no", False),
|
||||||
|
]
|
||||||
|
|
||||||
|
for input_val, expected in test_cases:
|
||||||
|
source.env_path.write_text(f"BOOL_VAR={input_val}\n")
|
||||||
|
|
||||||
|
bootstrapper = SettingsBootstrap(source, schema)
|
||||||
|
bootstrapper._load_sources()
|
||||||
|
bootstrapper._resolve_settings()
|
||||||
|
|
||||||
|
assert bootstrapper.resolved_settings["BOOL_VAR"] == expected
|
||||||
|
|
||||||
|
|
||||||
|
class TestComputedVariables:
|
||||||
|
"""Test that computed variables are calculated correctly."""
|
||||||
|
|
||||||
|
def test_computed_variables_written_to_env(
|
||||||
|
self, test_toml_with_all_types, test_env_example
|
||||||
|
):
|
||||||
|
"""Test that computed variables are written with their computed values."""
|
||||||
|
from alfred.settings_schema import load_schema
|
||||||
|
|
||||||
|
schema = load_schema(test_toml_with_all_types)
|
||||||
|
source = ConfigSource.from_base_dir(test_toml_with_all_types)
|
||||||
|
|
||||||
|
bootstrapper = SettingsBootstrap(source, schema)
|
||||||
|
bootstrapper.bootstrap()
|
||||||
|
|
||||||
|
env_content = source.env_path.read_text()
|
||||||
|
|
||||||
|
# Computed variable should have its computed value
|
||||||
|
assert "COMPUTED_VAR=default_string_42" in env_content
|
||||||
|
|
||||||
|
def test_computed_uri_example(self, tmp_path):
|
||||||
|
"""Test computed URI (like MONGO_URI) is written correctly."""
|
||||||
|
toml_content = """
|
||||||
|
[tool.poetry]
|
||||||
|
name = "test"
|
||||||
|
version = "1.0.0"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.DB_HOST]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "localhost"
|
||||||
|
category = "database"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.DB_PORT]
|
||||||
|
type = "integer"
|
||||||
|
source = "env"
|
||||||
|
default = 5432
|
||||||
|
category = "database"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.DB_USER]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "user"
|
||||||
|
category = "database"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.DB_PASSWORD]
|
||||||
|
type = "secret"
|
||||||
|
source = "generated"
|
||||||
|
secret_rule = "16:hex"
|
||||||
|
category = "security"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.DB_URI]
|
||||||
|
type = "computed"
|
||||||
|
source = "computed"
|
||||||
|
compute_from = ["DB_USER", "DB_PASSWORD", "DB_HOST", "DB_PORT"]
|
||||||
|
compute_template = "postgresql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}:{DB_PORT}/db"
|
||||||
|
category = "database"
|
||||||
|
"""
|
||||||
|
(tmp_path / "pyproject.toml").write_text(toml_content)
|
||||||
|
(tmp_path / ".env.example").write_text(
|
||||||
|
"DB_HOST=\nDB_PORT=\nDB_USER=\nDB_PASSWORD=\nDB_URI=\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
from alfred.settings_schema import load_schema
|
||||||
|
|
||||||
|
schema = load_schema(tmp_path)
|
||||||
|
source = ConfigSource.from_base_dir(tmp_path)
|
||||||
|
|
||||||
|
bootstrapper = SettingsBootstrap(source, schema)
|
||||||
|
bootstrapper.bootstrap()
|
||||||
|
|
||||||
|
env_content = source.env_path.read_text()
|
||||||
|
|
||||||
|
# Check URI is computed and written
|
||||||
|
assert "DB_URI=postgresql://user:" in env_content
|
||||||
|
assert "@localhost:5432/db" in env_content
|
||||||
|
|
||||||
|
# Extract password from URI to verify it's the same as DB_PASSWORD
|
||||||
|
import re
|
||||||
|
|
||||||
|
uri_match = re.search(r"DB_URI=postgresql://user:([^@]+)@", env_content)
|
||||||
|
password_match = re.search(r"DB_PASSWORD=([^\n]+)", env_content)
|
||||||
|
|
||||||
|
assert uri_match and password_match
|
||||||
|
assert uri_match.group(1) == password_match.group(1)
|
||||||
|
|
||||||
|
|
||||||
|
class TestEdgeCases:
|
||||||
|
"""Test edge cases and error conditions."""
|
||||||
|
|
||||||
|
def test_missing_env_example(self, tmp_path):
|
||||||
|
"""Test that missing .env.example raises error."""
|
||||||
|
toml_content = """
|
||||||
|
[tool.poetry]
|
||||||
|
name = "test"
|
||||||
|
version = "1.0.0"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.TEST_VAR]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "test"
|
||||||
|
category = "test"
|
||||||
|
"""
|
||||||
|
(tmp_path / "pyproject.toml").write_text(toml_content)
|
||||||
|
|
||||||
|
from alfred.settings_schema import load_schema
|
||||||
|
|
||||||
|
schema = load_schema(tmp_path)
|
||||||
|
source = ConfigSource.from_base_dir(tmp_path)
|
||||||
|
|
||||||
|
bootstrapper = SettingsBootstrap(source, schema)
|
||||||
|
|
||||||
|
with pytest.raises(FileNotFoundError, match=".env.example not found"):
|
||||||
|
bootstrapper.bootstrap()
|
||||||
|
|
||||||
|
def test_empty_env_example(self, tmp_path):
|
||||||
|
"""Test that empty .env.example works."""
|
||||||
|
toml_content = """
|
||||||
|
[tool.poetry]
|
||||||
|
name = "test"
|
||||||
|
version = "1.0.0"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.TEST_VAR]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "test"
|
||||||
|
category = "test"
|
||||||
|
"""
|
||||||
|
(tmp_path / "pyproject.toml").write_text(toml_content)
|
||||||
|
(tmp_path / ".env.example").write_text("")
|
||||||
|
|
||||||
|
from alfred.settings_schema import load_schema
|
||||||
|
|
||||||
|
schema = load_schema(tmp_path)
|
||||||
|
source = ConfigSource.from_base_dir(tmp_path)
|
||||||
|
|
||||||
|
bootstrapper = SettingsBootstrap(source, schema)
|
||||||
|
bootstrapper.bootstrap()
|
||||||
|
|
||||||
|
# Should create .env even if .env.example is empty
|
||||||
|
assert source.env_path.exists()
|
||||||
|
|
||||||
|
def test_variable_with_equals_in_value(self, tmp_path):
|
||||||
|
"""Test that variables with '=' in their value are handled correctly."""
|
||||||
|
toml_content = """
|
||||||
|
[tool.poetry]
|
||||||
|
name = "test"
|
||||||
|
version = "1.0.0"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.URL_VAR]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "http://example.com?key=value"
|
||||||
|
category = "test"
|
||||||
|
"""
|
||||||
|
(tmp_path / "pyproject.toml").write_text(toml_content)
|
||||||
|
(tmp_path / ".env.example").write_text("URL_VAR=\n")
|
||||||
|
|
||||||
|
from alfred.settings_schema import load_schema
|
||||||
|
|
||||||
|
schema = load_schema(tmp_path)
|
||||||
|
source = ConfigSource.from_base_dir(tmp_path)
|
||||||
|
|
||||||
|
bootstrapper = SettingsBootstrap(source, schema)
|
||||||
|
bootstrapper.bootstrap()
|
||||||
|
|
||||||
|
env_content = source.env_path.read_text()
|
||||||
|
|
||||||
|
assert "URL_VAR=http://example.com?key=value" in env_content
|
||||||
|
|
||||||
|
def test_preserves_existing_values_on_update(self, tmp_path):
|
||||||
|
"""Test that existing values are preserved when updating."""
|
||||||
|
toml_content = """
|
||||||
|
[tool.poetry]
|
||||||
|
name = "test"
|
||||||
|
version = "1.0.0"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.VAR1]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "default1"
|
||||||
|
category = "test"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.VAR2]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "default2"
|
||||||
|
category = "test"
|
||||||
|
"""
|
||||||
|
(tmp_path / "pyproject.toml").write_text(toml_content)
|
||||||
|
(tmp_path / ".env.example").write_text("VAR1=\nVAR2=\n")
|
||||||
|
|
||||||
|
from alfred.settings_schema import load_schema
|
||||||
|
|
||||||
|
schema = load_schema(tmp_path)
|
||||||
|
source = ConfigSource.from_base_dir(tmp_path)
|
||||||
|
|
||||||
|
# First bootstrap
|
||||||
|
bootstrapper1 = SettingsBootstrap(source, schema)
|
||||||
|
bootstrapper1.bootstrap()
|
||||||
|
|
||||||
|
# Modify values
|
||||||
|
source.env_path.write_text("VAR1=custom1\nVAR2=custom2\n")
|
||||||
|
|
||||||
|
# Second bootstrap
|
||||||
|
bootstrapper2 = SettingsBootstrap(source, schema)
|
||||||
|
bootstrapper2.bootstrap()
|
||||||
|
|
||||||
|
env_content = source.env_path.read_text()
|
||||||
|
|
||||||
|
# Custom values should be preserved
|
||||||
|
assert "VAR1=custom1" in env_content
|
||||||
|
assert "VAR2=custom2" in env_content
|
||||||
|
|
||||||
|
|
||||||
|
class TestIntegration:
|
||||||
|
"""Integration tests with realistic scenarios."""
|
||||||
|
|
||||||
|
def test_full_workflow_like_alfred(self, tmp_path):
|
||||||
|
"""Test a full workflow similar to Alfred's actual usage."""
|
||||||
|
toml_content = """
|
||||||
|
[tool.poetry]
|
||||||
|
name = "alfred"
|
||||||
|
version = "0.1.7"
|
||||||
|
|
||||||
|
[tool.poetry.dependencies]
|
||||||
|
python = "^3.14"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.ALFRED_VERSION]
|
||||||
|
type = "string"
|
||||||
|
source = "toml"
|
||||||
|
toml_path = "tool.poetry.version"
|
||||||
|
category = "build"
|
||||||
|
export_to_env_make = true
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.HOST]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "0.0.0.0"
|
||||||
|
category = "app"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.PORT]
|
||||||
|
type = "integer"
|
||||||
|
source = "env"
|
||||||
|
default = 3080
|
||||||
|
category = "app"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.JWT_SECRET]
|
||||||
|
type = "secret"
|
||||||
|
source = "generated"
|
||||||
|
secret_rule = "32:hex"
|
||||||
|
category = "security"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.MONGO_HOST]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "mongodb"
|
||||||
|
category = "database"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.MONGO_PASSWORD]
|
||||||
|
type = "secret"
|
||||||
|
source = "generated"
|
||||||
|
secret_rule = "16:hex"
|
||||||
|
category = "security"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.MONGO_URI]
|
||||||
|
type = "computed"
|
||||||
|
source = "computed"
|
||||||
|
compute_from = ["MONGO_HOST", "MONGO_PASSWORD"]
|
||||||
|
compute_template = "mongodb://user:{MONGO_PASSWORD}@{MONGO_HOST}:27017/db"
|
||||||
|
category = "database"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.DEBUG_MODE]
|
||||||
|
type = "boolean"
|
||||||
|
source = "env"
|
||||||
|
default = false
|
||||||
|
category = "app"
|
||||||
|
"""
|
||||||
|
(tmp_path / "pyproject.toml").write_text(toml_content)
|
||||||
|
|
||||||
|
env_example_content = """# Application settings
|
||||||
|
HOST=0.0.0.0
|
||||||
|
PORT=3080
|
||||||
|
DEBUG_MODE=false
|
||||||
|
|
||||||
|
# Security
|
||||||
|
JWT_SECRET=
|
||||||
|
|
||||||
|
# Database
|
||||||
|
MONGO_HOST=mongodb
|
||||||
|
MONGO_PASSWORD=
|
||||||
|
MONGO_URI=
|
||||||
|
|
||||||
|
# Build info
|
||||||
|
ALFRED_VERSION=
|
||||||
|
"""
|
||||||
|
(tmp_path / ".env.example").write_text(env_example_content)
|
||||||
|
|
||||||
|
from alfred.settings_schema import load_schema
|
||||||
|
|
||||||
|
schema = load_schema(tmp_path)
|
||||||
|
source = ConfigSource.from_base_dir(tmp_path)
|
||||||
|
|
||||||
|
# First bootstrap
|
||||||
|
bootstrapper1 = SettingsBootstrap(source, schema)
|
||||||
|
bootstrapper1.bootstrap()
|
||||||
|
|
||||||
|
env_content_1 = source.env_path.read_text()
|
||||||
|
|
||||||
|
# Verify structure
|
||||||
|
assert "# Application settings" in env_content_1
|
||||||
|
assert "HOST=0.0.0.0" in env_content_1
|
||||||
|
assert "PORT=3080" in env_content_1
|
||||||
|
assert "DEBUG_MODE=false" in env_content_1 # lowercase!
|
||||||
|
assert "ALFRED_VERSION=0.1.7" in env_content_1
|
||||||
|
assert "JWT_SECRET=" in env_content_1
|
||||||
|
assert len([l for l in env_content_1.split("\n") if "JWT_SECRET=" in l][0]) > 20
|
||||||
|
assert "MONGO_URI=mongodb://user:" in env_content_1
|
||||||
|
|
||||||
|
# Second bootstrap - should preserve everything
|
||||||
|
bootstrapper2 = SettingsBootstrap(source, schema)
|
||||||
|
bootstrapper2.bootstrap()
|
||||||
|
|
||||||
|
env_content_2 = source.env_path.read_text()
|
||||||
|
|
||||||
|
# Everything should be identical
|
||||||
|
assert env_content_1 == env_content_2
|
||||||
|
|
||||||
|
# Add custom variable
|
||||||
|
with open(source.env_path, "a") as f:
|
||||||
|
f.write("\nMY_CUSTOM_SETTING=test123\n")
|
||||||
|
|
||||||
|
# Third bootstrap - should preserve custom
|
||||||
|
bootstrapper3 = SettingsBootstrap(source, schema)
|
||||||
|
bootstrapper3.bootstrap()
|
||||||
|
|
||||||
|
env_content_3 = source.env_path.read_text()
|
||||||
|
|
||||||
|
assert "MY_CUSTOM_SETTING=test123" in env_content_3
|
||||||
|
assert "# --- CUSTOM VARIABLES ---" in env_content_3
|
||||||
332
tests/test_settings_schema.py
Normal file
332
tests/test_settings_schema.py
Normal file
@@ -0,0 +1,332 @@
|
|||||||
|
"""Tests for settings schema parser."""
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from alfred.settings_schema import (
|
||||||
|
SettingDefinition,
|
||||||
|
SettingSource,
|
||||||
|
SettingType,
|
||||||
|
load_schema,
|
||||||
|
validate_value,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def minimal_schema_toml():
|
||||||
|
"""Minimal valid schema TOML."""
|
||||||
|
return """
|
||||||
|
[tool.alfred.settings_schema.TEST_STRING]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "test_value"
|
||||||
|
description = "Test string setting"
|
||||||
|
category = "test"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.TEST_INTEGER]
|
||||||
|
type = "integer"
|
||||||
|
source = "env"
|
||||||
|
default = 42
|
||||||
|
description = "Test integer setting"
|
||||||
|
category = "test"
|
||||||
|
validator = "range:1:100"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.TEST_SECRET]
|
||||||
|
type = "secret"
|
||||||
|
source = "generated"
|
||||||
|
secret_rule = "32:b64"
|
||||||
|
description = "Test secret"
|
||||||
|
category = "security"
|
||||||
|
required = true
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.TEST_COMPUTED]
|
||||||
|
type = "computed"
|
||||||
|
source = "computed"
|
||||||
|
compute_from = ["TEST_STRING", "TEST_INTEGER"]
|
||||||
|
compute_template = "{TEST_STRING}_{TEST_INTEGER}"
|
||||||
|
description = "Test computed"
|
||||||
|
category = "test"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.TEST_OPTIONAL]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
required = false
|
||||||
|
description = "Optional setting"
|
||||||
|
category = "test"
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def create_schema_file(tmp_path):
|
||||||
|
"""Factory to create pyproject.toml with schema."""
|
||||||
|
|
||||||
|
def _create(content: str):
|
||||||
|
toml_path = tmp_path / "pyproject.toml"
|
||||||
|
full_content = f"""
|
||||||
|
[tool.poetry]
|
||||||
|
name = "test"
|
||||||
|
version = "1.0.0"
|
||||||
|
|
||||||
|
{content}
|
||||||
|
"""
|
||||||
|
toml_path.write_text(full_content)
|
||||||
|
return tmp_path
|
||||||
|
|
||||||
|
return _create
|
||||||
|
|
||||||
|
|
||||||
|
class TestSettingDefinition:
|
||||||
|
"""Test SettingDefinition dataclass."""
|
||||||
|
|
||||||
|
def test_create_definition(self):
|
||||||
|
"""Test creating a setting definition."""
|
||||||
|
definition = SettingDefinition(
|
||||||
|
name="TEST_SETTING",
|
||||||
|
type=SettingType.STRING,
|
||||||
|
source=SettingSource.ENV,
|
||||||
|
description="Test setting",
|
||||||
|
category="test",
|
||||||
|
default="default_value",
|
||||||
|
)
|
||||||
|
|
||||||
|
assert definition.name == "TEST_SETTING"
|
||||||
|
assert definition.type == SettingType.STRING
|
||||||
|
assert definition.source == SettingSource.ENV
|
||||||
|
assert definition.default == "default_value"
|
||||||
|
assert definition.required is True # Default
|
||||||
|
|
||||||
|
|
||||||
|
class TestSettingsSchema:
|
||||||
|
"""Test SettingsSchema parser."""
|
||||||
|
|
||||||
|
def test_parse_schema(self, create_schema_file, minimal_schema_toml):
|
||||||
|
"""Test parsing schema from TOML."""
|
||||||
|
base_dir = create_schema_file(minimal_schema_toml)
|
||||||
|
schema = load_schema(base_dir)
|
||||||
|
|
||||||
|
assert len(schema) == 5
|
||||||
|
assert "TEST_STRING" in schema.definitions
|
||||||
|
assert "TEST_INTEGER" in schema.definitions
|
||||||
|
assert "TEST_SECRET" in schema.definitions
|
||||||
|
assert "TEST_COMPUTED" in schema.definitions
|
||||||
|
assert "TEST_OPTIONAL" in schema.definitions
|
||||||
|
|
||||||
|
def test_get_definition(self, create_schema_file, minimal_schema_toml):
|
||||||
|
"""Test getting a definition by name."""
|
||||||
|
base_dir = create_schema_file(minimal_schema_toml)
|
||||||
|
schema = load_schema(base_dir)
|
||||||
|
|
||||||
|
definition = schema.get("TEST_STRING")
|
||||||
|
assert definition is not None
|
||||||
|
assert definition.name == "TEST_STRING"
|
||||||
|
assert definition.type == SettingType.STRING
|
||||||
|
assert definition.default == "test_value"
|
||||||
|
|
||||||
|
def test_get_by_category(self, create_schema_file, minimal_schema_toml):
|
||||||
|
"""Test getting definitions by category."""
|
||||||
|
base_dir = create_schema_file(minimal_schema_toml)
|
||||||
|
schema = load_schema(base_dir)
|
||||||
|
|
||||||
|
test_settings = schema.get_by_category("test")
|
||||||
|
assert len(test_settings) == 4
|
||||||
|
|
||||||
|
security_settings = schema.get_by_category("security")
|
||||||
|
assert len(security_settings) == 1
|
||||||
|
|
||||||
|
def test_get_by_source(self, create_schema_file, minimal_schema_toml):
|
||||||
|
"""Test getting definitions by source."""
|
||||||
|
base_dir = create_schema_file(minimal_schema_toml)
|
||||||
|
schema = load_schema(base_dir)
|
||||||
|
|
||||||
|
env_settings = schema.get_by_source(SettingSource.ENV)
|
||||||
|
assert len(env_settings) == 3
|
||||||
|
|
||||||
|
generated_settings = schema.get_by_source(SettingSource.GENERATED)
|
||||||
|
assert len(generated_settings) == 1
|
||||||
|
|
||||||
|
computed_settings = schema.get_by_source(SettingSource.COMPUTED)
|
||||||
|
assert len(computed_settings) == 1
|
||||||
|
|
||||||
|
def test_get_required(self, create_schema_file, minimal_schema_toml):
|
||||||
|
"""Test getting required settings."""
|
||||||
|
base_dir = create_schema_file(minimal_schema_toml)
|
||||||
|
schema = load_schema(base_dir)
|
||||||
|
|
||||||
|
required = schema.get_required()
|
||||||
|
# TEST_OPTIONAL is not required
|
||||||
|
assert len(required) == 4
|
||||||
|
|
||||||
|
def test_parse_types(self, create_schema_file):
|
||||||
|
"""Test parsing different setting types."""
|
||||||
|
schema_toml = """
|
||||||
|
[tool.alfred.settings_schema.STR_SETTING]
|
||||||
|
type = "string"
|
||||||
|
source = "env"
|
||||||
|
default = "text"
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.INT_SETTING]
|
||||||
|
type = "integer"
|
||||||
|
source = "env"
|
||||||
|
default = 42
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.FLOAT_SETTING]
|
||||||
|
type = "float"
|
||||||
|
source = "env"
|
||||||
|
default = 3.14
|
||||||
|
|
||||||
|
[tool.alfred.settings_schema.BOOL_SETTING]
|
||||||
|
type = "boolean"
|
||||||
|
source = "env"
|
||||||
|
default = true
|
||||||
|
"""
|
||||||
|
base_dir = create_schema_file(schema_toml)
|
||||||
|
schema = load_schema(base_dir)
|
||||||
|
|
||||||
|
assert schema.get("STR_SETTING").default == "text"
|
||||||
|
assert schema.get("INT_SETTING").default == 42
|
||||||
|
assert schema.get("FLOAT_SETTING").default == 3.14
|
||||||
|
assert schema.get("BOOL_SETTING").default is True
|
||||||
|
|
||||||
|
def test_missing_schema_section(self, tmp_path):
|
||||||
|
"""Test error when schema section is missing."""
|
||||||
|
toml_path = tmp_path / "pyproject.toml"
|
||||||
|
toml_path.write_text("""
|
||||||
|
[tool.poetry]
|
||||||
|
name = "test"
|
||||||
|
version = "1.0.0"
|
||||||
|
""")
|
||||||
|
|
||||||
|
with pytest.raises(KeyError, match="settings_schema"):
|
||||||
|
load_schema(tmp_path)
|
||||||
|
|
||||||
|
|
||||||
|
class TestValidateValue:
|
||||||
|
"""Test value validation."""
|
||||||
|
|
||||||
|
def test_validate_string(self):
|
||||||
|
"""Test validating string values."""
|
||||||
|
definition = SettingDefinition(
|
||||||
|
name="TEST",
|
||||||
|
type=SettingType.STRING,
|
||||||
|
source=SettingSource.ENV,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert validate_value(definition, "test") is True
|
||||||
|
|
||||||
|
with pytest.raises(ValueError, match="must be string"):
|
||||||
|
validate_value(definition, 123)
|
||||||
|
|
||||||
|
def test_validate_integer(self):
|
||||||
|
"""Test validating integer values."""
|
||||||
|
definition = SettingDefinition(
|
||||||
|
name="TEST",
|
||||||
|
type=SettingType.INTEGER,
|
||||||
|
source=SettingSource.ENV,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert validate_value(definition, 42) is True
|
||||||
|
|
||||||
|
with pytest.raises(ValueError, match="must be integer"):
|
||||||
|
validate_value(definition, "not an int")
|
||||||
|
|
||||||
|
def test_validate_float(self):
|
||||||
|
"""Test validating float values."""
|
||||||
|
definition = SettingDefinition(
|
||||||
|
name="TEST",
|
||||||
|
type=SettingType.FLOAT,
|
||||||
|
source=SettingSource.ENV,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert validate_value(definition, 3.14) is True
|
||||||
|
assert validate_value(definition, 42) is True # int is ok for float
|
||||||
|
|
||||||
|
with pytest.raises(ValueError, match="must be float"):
|
||||||
|
validate_value(definition, "not a float")
|
||||||
|
|
||||||
|
def test_validate_required(self):
|
||||||
|
"""Test validating required settings."""
|
||||||
|
definition = SettingDefinition(
|
||||||
|
name="TEST",
|
||||||
|
type=SettingType.STRING,
|
||||||
|
source=SettingSource.ENV,
|
||||||
|
required=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
with pytest.raises(ValueError, match="is required"):
|
||||||
|
validate_value(definition, None)
|
||||||
|
|
||||||
|
def test_validate_optional(self):
|
||||||
|
"""Test validating optional settings."""
|
||||||
|
definition = SettingDefinition(
|
||||||
|
name="TEST",
|
||||||
|
type=SettingType.STRING,
|
||||||
|
source=SettingSource.ENV,
|
||||||
|
required=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert validate_value(definition, None) is True
|
||||||
|
|
||||||
|
def test_validate_range(self):
|
||||||
|
"""Test range validator."""
|
||||||
|
definition = SettingDefinition(
|
||||||
|
name="TEST",
|
||||||
|
type=SettingType.INTEGER,
|
||||||
|
source=SettingSource.ENV,
|
||||||
|
validator="range:1:100",
|
||||||
|
)
|
||||||
|
|
||||||
|
assert validate_value(definition, 50) is True
|
||||||
|
assert validate_value(definition, 1) is True
|
||||||
|
assert validate_value(definition, 100) is True
|
||||||
|
|
||||||
|
with pytest.raises(ValueError, match=r"must be between .* and .*, got"):
|
||||||
|
validate_value(definition, 0)
|
||||||
|
|
||||||
|
with pytest.raises(ValueError, match=r"must be between .* and .*, got"):
|
||||||
|
validate_value(definition, 101)
|
||||||
|
|
||||||
|
def test_validate_float_range(self):
|
||||||
|
"""Test range validator with floats."""
|
||||||
|
definition = SettingDefinition(
|
||||||
|
name="TEST",
|
||||||
|
type=SettingType.FLOAT,
|
||||||
|
source=SettingSource.ENV,
|
||||||
|
validator="range:0.0:2.0",
|
||||||
|
)
|
||||||
|
|
||||||
|
assert validate_value(definition, 1.5) is True
|
||||||
|
assert validate_value(definition, 0.0) is True
|
||||||
|
assert validate_value(definition, 2.0) is True
|
||||||
|
|
||||||
|
with pytest.raises(ValueError, match="must be between 0.0 and 2.0"):
|
||||||
|
validate_value(definition, -0.1)
|
||||||
|
|
||||||
|
with pytest.raises(ValueError, match="must be between 0.0 and 2.0"):
|
||||||
|
validate_value(definition, 2.1)
|
||||||
|
|
||||||
|
def test_invalid_validator(self):
|
||||||
|
"""Test unknown validator raises error."""
|
||||||
|
definition = SettingDefinition(
|
||||||
|
name="TEST",
|
||||||
|
type=SettingType.STRING,
|
||||||
|
source=SettingSource.ENV,
|
||||||
|
validator="unknown:validator",
|
||||||
|
)
|
||||||
|
|
||||||
|
with pytest.raises(ValueError, match="Unknown validator"):
|
||||||
|
validate_value(definition, "test")
|
||||||
|
|
||||||
|
|
||||||
|
class TestSchemaIteration:
|
||||||
|
"""Test schema iteration."""
|
||||||
|
|
||||||
|
def test_iterate_schema(self, create_schema_file, minimal_schema_toml):
|
||||||
|
"""Test iterating over schema definitions."""
|
||||||
|
base_dir = create_schema_file(minimal_schema_toml)
|
||||||
|
schema = load_schema(base_dir)
|
||||||
|
|
||||||
|
definitions = list(schema)
|
||||||
|
assert len(definitions) == 5
|
||||||
|
|
||||||
|
names = [d.name for d in definitions]
|
||||||
|
assert "TEST_STRING" in names
|
||||||
|
assert "TEST_INTEGER" in names
|
||||||
Reference in New Issue
Block a user