Files
alfred/tests/test_api_edge_cases.py

567 lines
18 KiB
Python

"""Edge case tests for FastAPI endpoints."""
import pytest
from unittest.mock import Mock, patch
from fastapi.testclient import TestClient
class TestChatCompletionsEdgeCases:
"""Edge case tests for /v1/chat/completions endpoint."""
@pytest.mark.skip(reason="502 - Local LLM not running yet")
def test_very_long_message(self, memory):
"""Should handle very long user message."""
from alfred.agent import agent
from alfred.app import app
# Patch the agent's LLM directly
mock_llm = Mock()
mock_llm.complete.return_value = {"role": "assistant", "content": "Response"}
agent.llm = mock_llm
client = TestClient(app)
long_message = "x" * 100000
response = client.post(
"/v1/chat/completions",
json={
"model": "agent-media",
"messages": [{"role": "user", "content": long_message}],
},
)
assert response.status_code == 200
@pytest.mark.skip(reason="502 - Local LLM not running yet")
def test_unicode_message(self, memory):
"""Should handle unicode in message."""
from alfred.agent import agent
from alfred.app import app
mock_llm = Mock()
mock_llm.complete.return_value = {
"role": "assistant",
"content": "日本語の応答",
}
agent.llm = mock_llm
client = TestClient(app)
response = client.post(
"/v1/chat/completions",
json={
"model": "agent-media",
"messages": [{"role": "user", "content": "日本語のメッセージ 🎬"}],
},
)
assert response.status_code == 200
content = response.json()["choices"][0]["message"]["content"]
assert "日本語" in content or len(content) > 0
@pytest.mark.skip(reason="502 - Local LLM not running yet")
def test_special_characters_in_message(self, memory):
"""Should handle special characters."""
from alfred.agent import agent
from alfred.app import app
mock_llm = Mock()
mock_llm.complete.return_value = {"role": "assistant", "content": "Response"}
agent.llm = mock_llm
client = TestClient(app)
special_message = 'Test with "quotes" and \\backslash and \n newline'
response = client.post(
"/v1/chat/completions",
json={
"model": "agent-media",
"messages": [{"role": "user", "content": special_message}],
},
)
assert response.status_code == 200
def test_empty_content_in_message(self, memory):
"""Should handle empty content in message."""
with patch("alfred.app.DeepSeekClient") as mock_llm_class:
mock_llm = Mock()
mock_llm.complete.return_value = "Response"
mock_llm_class.return_value = mock_llm
from alfred.app import app
client = TestClient(app)
response = client.post(
"/v1/chat/completions",
json={
"model": "agent-media",
"messages": [{"role": "user", "content": ""}],
},
)
# Empty content should be rejected
assert response.status_code == 422
def test_null_content_in_message(self, memory):
"""Should handle null content in message."""
with patch("alfred.app.DeepSeekClient") as mock_llm_class:
mock_llm = Mock()
mock_llm_class.return_value = mock_llm
from alfred.app import app
client = TestClient(app)
response = client.post(
"/v1/chat/completions",
json={
"model": "agent-media",
"messages": [{"role": "user", "content": None}],
},
)
assert response.status_code == 422
@pytest.mark.skip(reason="502 - Local LLM not running yet")
def test_missing_content_field(self, memory):
"""Should handle missing content field."""
with patch("alfred.app.DeepSeekClient") as mock_llm_class:
mock_llm = Mock()
mock_llm_class.return_value = mock_llm
from alfred.app import app
client = TestClient(app)
response = client.post(
"/v1/chat/completions",
json={
"model": "agent-media",
"messages": [{"role": "user"}], # No content
},
)
# May accept or reject depending on validation
assert response.status_code in [200, 400, 422]
def test_missing_role_field(self, memory):
"""Should handle missing role field."""
with patch("alfred.app.DeepSeekClient") as mock_llm_class:
mock_llm = Mock()
mock_llm_class.return_value = mock_llm
from alfred.app import app
client = TestClient(app)
response = client.post(
"/v1/chat/completions",
json={
"model": "agent-media",
"messages": [{"content": "Hello"}], # No role
},
)
# Should reject or accept depending on validation
assert response.status_code in [200, 400, 422]
def test_invalid_role(self, memory):
"""Should handle invalid role."""
with patch("alfred.app.DeepSeekClient") as mock_llm_class:
mock_llm = Mock()
mock_llm.complete.return_value = "Response"
mock_llm_class.return_value = mock_llm
from alfred.app import app
client = TestClient(app)
response = client.post(
"/v1/chat/completions",
json={
"model": "agent-media",
"messages": [{"role": "invalid_role", "content": "Hello"}],
},
)
# Should reject or ignore invalid role
assert response.status_code in [200, 400, 422]
@pytest.mark.skip(reason="502 - Local LLM not running yet")
def test_many_messages(self, memory):
"""Should handle many messages in conversation."""
from alfred.agent import agent
from alfred.app import app
mock_llm = Mock()
mock_llm.complete.return_value = {"role": "assistant", "content": "Response"}
agent.llm = mock_llm
client = TestClient(app)
messages = []
for i in range(100):
messages.append({"role": "user", "content": f"Message {i}"})
messages.append({"role": "assistant", "content": f"Response {i}"})
messages.append({"role": "user", "content": "Final message"})
response = client.post(
"/v1/chat/completions",
json={
"model": "agent-media",
"messages": messages,
},
)
assert response.status_code == 200
def test_only_system_messages(self, memory):
"""Should reject if only system messages."""
with patch("alfred.app.DeepSeekClient") as mock_llm_class:
mock_llm = Mock()
mock_llm_class.return_value = mock_llm
from alfred.app import app
client = TestClient(app)
response = client.post(
"/v1/chat/completions",
json={
"model": "agent-media",
"messages": [
{"role": "system", "content": "You are helpful"},
{"role": "system", "content": "Be concise"},
],
},
)
assert response.status_code == 422
def test_only_assistant_messages(self, memory):
"""Should reject if only assistant messages."""
with patch("alfred.app.DeepSeekClient") as mock_llm_class:
mock_llm = Mock()
mock_llm_class.return_value = mock_llm
from alfred.app import app
client = TestClient(app)
response = client.post(
"/v1/chat/completions",
json={
"model": "agent-media",
"messages": [
{"role": "assistant", "content": "Hello"},
],
},
)
assert response.status_code == 422
def test_messages_not_array(self, memory):
"""Should reject if messages is not array."""
with patch("alfred.app.DeepSeekClient") as mock_llm_class:
mock_llm = Mock()
mock_llm_class.return_value = mock_llm
from alfred.app import app
client = TestClient(app)
response = client.post(
"/v1/chat/completions",
json={
"model": "agent-media",
"messages": "not an array",
},
)
assert response.status_code == 422
# Pydantic validation error
def test_message_not_object(self, memory):
"""Should handle message that is not object."""
with patch("alfred.app.DeepSeekClient") as mock_llm_class:
mock_llm = Mock()
mock_llm_class.return_value = mock_llm
from alfred.app import app
client = TestClient(app)
response = client.post(
"/v1/chat/completions",
json={
"model": "agent-media",
"messages": ["not an object", 123, None],
},
)
assert response.status_code == 422
# Pydantic validation error
@pytest.mark.skip(reason="502 - Local LLM not running yet")
def test_extra_fields_in_request(self, memory):
"""Should ignore extra fields in request."""
from alfred.agent import agent
from alfred.app import app
mock_llm = Mock()
mock_llm.complete.return_value = {"role": "assistant", "content": "Response"}
agent.llm = mock_llm
client = TestClient(app)
response = client.post(
"/v1/chat/completions",
json={
"model": "agent-media",
"messages": [{"role": "user", "content": "Hello"}],
"extra_field": "should be ignored",
"temperature": 0.7,
"max_tokens": 100,
},
)
assert response.status_code == 200
def test_streaming_with_tool_call(self, memory, real_folder):
"""Should handle streaming with tool execution."""
from alfred.agent import agent
from alfred.app import app
from alfred.infrastructure.persistence import get_memory
mem = get_memory()
mem.ltm.set_config("download_folder", str(real_folder["downloads"]))
call_count = [0]
def mock_complete(messages, tools=None):
call_count[0] += 1
if call_count[0] == 1:
return {
"role": "assistant",
"content": None,
"tool_calls": [
{
"id": "call_1",
"function": {
"name": "list_folder",
"arguments": '{"folder_type": "download"}',
},
}
],
}
return {"role": "assistant", "content": "Listed the folder."}
mock_llm = Mock()
mock_llm.complete = Mock(side_effect=mock_complete)
agent.llm = mock_llm
client = TestClient(app)
response = client.post(
"/v1/chat/completions",
json={
"model": "agent-media",
"messages": [{"role": "user", "content": "List downloads"}],
"stream": True,
},
)
assert response.status_code == 200
@pytest.mark.skip(reason="502 - Local LLM not running yet")
def test_concurrent_requests_simulation(self, memory):
"""Should handle rapid sequential requests."""
from alfred.agent import agent
from alfred.app import app
mock_llm = Mock()
mock_llm.complete.return_value = {"role": "assistant", "content": "Response"}
agent.llm = mock_llm
client = TestClient(app)
for i in range(10):
response = client.post(
"/v1/chat/completions",
json={
"model": "agent-media",
"messages": [{"role": "user", "content": f"Request {i}"}],
},
)
assert response.status_code == 200
@pytest.mark.skip(reason="502 - Local LLM not running yet")
def test_llm_returns_json_in_response(self, memory):
"""Should handle LLM returning JSON in text response."""
from alfred.agent import agent
from alfred.app import app
mock_llm = Mock()
mock_llm.complete.return_value = {
"role": "assistant",
"content": '{"result": "some data", "count": 5}',
}
agent.llm = mock_llm
client = TestClient(app)
response = client.post(
"/v1/chat/completions",
json={
"model": "agent-media",
"messages": [{"role": "user", "content": "Give me JSON"}],
},
)
assert response.status_code == 200
content = response.json()["choices"][0]["message"]["content"]
assert "result" in content or len(content) > 0
class TestMemoryEndpointsEdgeCases:
"""Edge case tests for memory endpoints."""
def test_memory_state_with_large_data(self, memory):
"""Should handle large memory state."""
with patch("alfred.app.DeepSeekClient") as mock_llm:
mock_llm.return_value = Mock()
from alfred.app import app
# Add lots of data to memory
for i in range(100):
memory.stm.add_message("user", f"Message {i}" * 100)
memory.episodic.add_error("action", f"Error {i}")
client = TestClient(app)
response = client.get("/memory/state")
assert response.status_code == 200
data = response.json()
assert "stm" in data
def test_memory_state_with_unicode(self, memory):
"""Should handle unicode in memory state."""
with patch("alfred.app.DeepSeekClient") as mock_llm:
mock_llm.return_value = Mock()
from alfred.app import app
memory.ltm.set_config("japanese", "日本語テスト")
memory.stm.add_message("user", "🎬 Movie request")
client = TestClient(app)
response = client.get("/memory/state")
assert response.status_code == 200
data = response.json()
assert "日本語" in str(data)
def test_search_results_with_special_chars(self, memory):
"""Should handle special characters in search results."""
with patch("alfred.app.DeepSeekClient") as mock_llm:
mock_llm.return_value = Mock()
from alfred.app import app
memory.episodic.store_search_results(
"Test <script>alert('xss')</script>",
[{"name": "Result with \"quotes\" and 'apostrophes'"}],
)
client = TestClient(app)
response = client.get("/memory/episodic/search-results")
assert response.status_code == 200
# Should be properly escaped in JSON
data = response.json()
assert "script" in data["query"]
def test_clear_session_idempotent(self, memory):
"""Should be idempotent - multiple clears should work."""
with patch("alfred.app.DeepSeekClient") as mock_llm:
mock_llm.return_value = Mock()
from alfred.app import app
client = TestClient(app)
# Clear multiple times
for _ in range(5):
response = client.post("/memory/clear-session")
assert response.status_code == 200
def test_clear_session_preserves_ltm(self, memory):
"""Should preserve LTM after clear."""
with patch("alfred.app.DeepSeekClient") as mock_llm:
mock_llm.return_value = Mock()
from alfred.app import app
memory.ltm.set_config("important", "data")
memory.stm.add_message("user", "Hello")
client = TestClient(app)
client.post("/memory/clear-session")
response = client.get("/memory/state")
data = response.json()
assert data["ltm"]["config"]["important"] == "data"
assert data["stm"]["conversation_history"] == []
class TestHealthEndpointEdgeCases:
"""Edge case tests for health endpoint."""
def test_health_returns_version(self, memory):
"""Should return version in health check."""
with patch("alfred.app.DeepSeekClient") as mock_llm:
mock_llm.return_value = Mock()
from alfred.app import app
client = TestClient(app)
response = client.get("/health")
assert response.status_code == 200
assert "version" in response.json()
def test_health_with_query_params(self, memory):
"""Should ignore query parameters."""
with patch("alfred.app.DeepSeekClient") as mock_llm:
mock_llm.return_value = Mock()
from alfred.app import app
client = TestClient(app)
response = client.get("/health?extra=param&another=value")
assert response.status_code == 200
class TestModelsEndpointEdgeCases:
"""Edge case tests for models endpoint."""
def test_models_response_format(self, memory):
"""Should return OpenAI-compatible format."""
with patch("alfred.app.DeepSeekClient") as mock_llm:
mock_llm.return_value = Mock()
from alfred.app import app
client = TestClient(app)
response = client.get("/v1/models")
data = response.json()
assert data["object"] == "list"
assert isinstance(data["data"], list)
assert len(data["data"]) > 0
assert "id" in data["data"][0]
assert "object" in data["data"][0]
assert "created" in data["data"][0]
assert "owned_by" in data["data"][0]