466 lines
17 KiB
Python
466 lines
17 KiB
Python
"""Edge case tests for FastAPI endpoints."""
|
|
import pytest
|
|
import json
|
|
from unittest.mock import Mock, patch, MagicMock
|
|
from fastapi.testclient import TestClient
|
|
|
|
|
|
class TestChatCompletionsEdgeCases:
|
|
"""Edge case tests for /v1/chat/completions endpoint."""
|
|
|
|
def test_very_long_message(self, memory):
|
|
"""Should handle very long user message."""
|
|
with patch("app.DeepSeekClient") as mock_llm_class:
|
|
mock_llm = Mock()
|
|
mock_llm.complete.return_value = "Response"
|
|
mock_llm_class.return_value = mock_llm
|
|
|
|
from app import app
|
|
client = TestClient(app)
|
|
|
|
long_message = "x" * 100000
|
|
response = client.post("/v1/chat/completions", json={
|
|
"model": "agent-media",
|
|
"messages": [{"role": "user", "content": long_message}],
|
|
})
|
|
|
|
assert response.status_code == 200
|
|
|
|
def test_unicode_message(self, memory):
|
|
"""Should handle unicode in message."""
|
|
with patch("app.DeepSeekClient") as mock_llm_class:
|
|
mock_llm = Mock()
|
|
mock_llm.complete.return_value = "日本語の応答"
|
|
mock_llm_class.return_value = mock_llm
|
|
|
|
from app import app
|
|
client = TestClient(app)
|
|
|
|
response = client.post("/v1/chat/completions", json={
|
|
"model": "agent-media",
|
|
"messages": [{"role": "user", "content": "日本語のメッセージ 🎬"}],
|
|
})
|
|
|
|
assert response.status_code == 200
|
|
content = response.json()["choices"][0]["message"]["content"]
|
|
# Response may vary based on agent behavior
|
|
assert "日本語" in content or len(content) > 0
|
|
|
|
def test_special_characters_in_message(self, memory):
|
|
"""Should handle special characters."""
|
|
with patch("app.DeepSeekClient") as mock_llm_class:
|
|
mock_llm = Mock()
|
|
mock_llm.complete.return_value = "Response"
|
|
mock_llm_class.return_value = mock_llm
|
|
|
|
from app import app
|
|
client = TestClient(app)
|
|
|
|
special_message = 'Test with "quotes" and \\backslash and \n newline'
|
|
response = client.post("/v1/chat/completions", json={
|
|
"model": "agent-media",
|
|
"messages": [{"role": "user", "content": special_message}],
|
|
})
|
|
|
|
assert response.status_code == 200
|
|
|
|
def test_empty_content_in_message(self, memory):
|
|
"""Should handle empty content in message."""
|
|
with patch("app.DeepSeekClient") as mock_llm_class:
|
|
mock_llm = Mock()
|
|
mock_llm.complete.return_value = "Response"
|
|
mock_llm_class.return_value = mock_llm
|
|
|
|
from app import app
|
|
client = TestClient(app)
|
|
|
|
response = client.post("/v1/chat/completions", json={
|
|
"model": "agent-media",
|
|
"messages": [{"role": "user", "content": ""}],
|
|
})
|
|
|
|
# Empty content should be rejected
|
|
assert response.status_code == 422
|
|
|
|
def test_null_content_in_message(self, memory):
|
|
"""Should handle null content in message."""
|
|
with patch("app.DeepSeekClient") as mock_llm_class:
|
|
mock_llm = Mock()
|
|
mock_llm_class.return_value = mock_llm
|
|
|
|
from app import app
|
|
client = TestClient(app)
|
|
|
|
response = client.post("/v1/chat/completions", json={
|
|
"model": "agent-media",
|
|
"messages": [{"role": "user", "content": None}],
|
|
})
|
|
|
|
assert response.status_code == 422
|
|
|
|
def test_missing_content_field(self, memory):
|
|
"""Should handle missing content field."""
|
|
with patch("app.DeepSeekClient") as mock_llm_class:
|
|
mock_llm = Mock()
|
|
mock_llm_class.return_value = mock_llm
|
|
|
|
from app import app
|
|
client = TestClient(app)
|
|
|
|
response = client.post("/v1/chat/completions", json={
|
|
"model": "agent-media",
|
|
"messages": [{"role": "user"}], # No content
|
|
})
|
|
|
|
# May accept or reject depending on validation
|
|
assert response.status_code in [200, 400, 422]
|
|
|
|
def test_missing_role_field(self, memory):
|
|
"""Should handle missing role field."""
|
|
with patch("app.DeepSeekClient") as mock_llm_class:
|
|
mock_llm = Mock()
|
|
mock_llm_class.return_value = mock_llm
|
|
|
|
from app import app
|
|
client = TestClient(app)
|
|
|
|
response = client.post("/v1/chat/completions", json={
|
|
"model": "agent-media",
|
|
"messages": [{"content": "Hello"}], # No role
|
|
})
|
|
|
|
# Should reject or accept depending on validation
|
|
assert response.status_code in [200, 400, 422]
|
|
|
|
def test_invalid_role(self, memory):
|
|
"""Should handle invalid role."""
|
|
with patch("app.DeepSeekClient") as mock_llm_class:
|
|
mock_llm = Mock()
|
|
mock_llm.complete.return_value = "Response"
|
|
mock_llm_class.return_value = mock_llm
|
|
|
|
from app import app
|
|
client = TestClient(app)
|
|
|
|
response = client.post("/v1/chat/completions", json={
|
|
"model": "agent-media",
|
|
"messages": [{"role": "invalid_role", "content": "Hello"}],
|
|
})
|
|
|
|
# Should reject or ignore invalid role
|
|
assert response.status_code in [200, 400, 422]
|
|
|
|
def test_many_messages(self, memory):
|
|
"""Should handle many messages in conversation."""
|
|
with patch("app.DeepSeekClient") as mock_llm_class:
|
|
mock_llm = Mock()
|
|
mock_llm.complete.return_value = "Response"
|
|
mock_llm_class.return_value = mock_llm
|
|
|
|
from app import app
|
|
client = TestClient(app)
|
|
|
|
messages = []
|
|
for i in range(100):
|
|
messages.append({"role": "user", "content": f"Message {i}"})
|
|
messages.append({"role": "assistant", "content": f"Response {i}"})
|
|
messages.append({"role": "user", "content": "Final message"})
|
|
|
|
response = client.post("/v1/chat/completions", json={
|
|
"model": "agent-media",
|
|
"messages": messages,
|
|
})
|
|
|
|
assert response.status_code == 200
|
|
|
|
def test_only_system_messages(self, memory):
|
|
"""Should reject if only system messages."""
|
|
with patch("app.DeepSeekClient") as mock_llm_class:
|
|
mock_llm = Mock()
|
|
mock_llm_class.return_value = mock_llm
|
|
|
|
from app import app
|
|
client = TestClient(app)
|
|
|
|
response = client.post("/v1/chat/completions", json={
|
|
"model": "agent-media",
|
|
"messages": [
|
|
{"role": "system", "content": "You are helpful"},
|
|
{"role": "system", "content": "Be concise"},
|
|
],
|
|
})
|
|
|
|
assert response.status_code == 422
|
|
|
|
def test_only_assistant_messages(self, memory):
|
|
"""Should reject if only assistant messages."""
|
|
with patch("app.DeepSeekClient") as mock_llm_class:
|
|
mock_llm = Mock()
|
|
mock_llm_class.return_value = mock_llm
|
|
|
|
from app import app
|
|
client = TestClient(app)
|
|
|
|
response = client.post("/v1/chat/completions", json={
|
|
"model": "agent-media",
|
|
"messages": [
|
|
{"role": "assistant", "content": "Hello"},
|
|
],
|
|
})
|
|
|
|
assert response.status_code == 422
|
|
|
|
def test_messages_not_array(self, memory):
|
|
"""Should reject if messages is not array."""
|
|
with patch("app.DeepSeekClient") as mock_llm_class:
|
|
mock_llm = Mock()
|
|
mock_llm_class.return_value = mock_llm
|
|
|
|
from app import app
|
|
client = TestClient(app)
|
|
|
|
response = client.post("/v1/chat/completions", json={
|
|
"model": "agent-media",
|
|
"messages": "not an array",
|
|
})
|
|
|
|
assert response.status_code == 422
|
|
# Pydantic validation error
|
|
|
|
def test_message_not_object(self, memory):
|
|
"""Should handle message that is not object."""
|
|
with patch("app.DeepSeekClient") as mock_llm_class:
|
|
mock_llm = Mock()
|
|
mock_llm_class.return_value = mock_llm
|
|
|
|
from app import app
|
|
client = TestClient(app)
|
|
|
|
response = client.post("/v1/chat/completions", json={
|
|
"model": "agent-media",
|
|
"messages": ["not an object", 123, None],
|
|
})
|
|
|
|
assert response.status_code == 422
|
|
# Pydantic validation error
|
|
|
|
def test_extra_fields_in_request(self, memory):
|
|
"""Should ignore extra fields in request."""
|
|
with patch("app.DeepSeekClient") as mock_llm_class:
|
|
mock_llm = Mock()
|
|
mock_llm.complete.return_value = "Response"
|
|
mock_llm_class.return_value = mock_llm
|
|
|
|
from app import app
|
|
client = TestClient(app)
|
|
|
|
response = client.post("/v1/chat/completions", json={
|
|
"model": "agent-media",
|
|
"messages": [{"role": "user", "content": "Hello"}],
|
|
"extra_field": "should be ignored",
|
|
"temperature": 0.7,
|
|
"max_tokens": 100,
|
|
})
|
|
|
|
assert response.status_code == 200
|
|
|
|
def test_streaming_with_tool_call(self, memory, real_folder):
|
|
"""Should handle streaming with tool execution."""
|
|
with patch("app.DeepSeekClient") as mock_llm_class:
|
|
mock_llm = Mock()
|
|
mock_llm.complete.side_effect = [
|
|
'{"thought": "list", "action": {"name": "list_folder", "args": {"folder_type": "download"}}}',
|
|
"Listed the folder.",
|
|
]
|
|
mock_llm_class.return_value = mock_llm
|
|
|
|
from app import app
|
|
from infrastructure.persistence import get_memory
|
|
mem = get_memory()
|
|
mem.ltm.set_config("download_folder", str(real_folder["downloads"]))
|
|
|
|
client = TestClient(app)
|
|
|
|
response = client.post("/v1/chat/completions", json={
|
|
"model": "agent-media",
|
|
"messages": [{"role": "user", "content": "List downloads"}],
|
|
"stream": True,
|
|
})
|
|
|
|
assert response.status_code == 200
|
|
|
|
def test_concurrent_requests_simulation(self, memory):
|
|
"""Should handle rapid sequential requests."""
|
|
with patch("app.DeepSeekClient") as mock_llm_class:
|
|
mock_llm = Mock()
|
|
mock_llm.complete.return_value = "Response"
|
|
mock_llm_class.return_value = mock_llm
|
|
|
|
from app import app
|
|
client = TestClient(app)
|
|
|
|
for i in range(10):
|
|
response = client.post("/v1/chat/completions", json={
|
|
"model": "agent-media",
|
|
"messages": [{"role": "user", "content": f"Request {i}"}],
|
|
})
|
|
assert response.status_code == 200
|
|
|
|
def test_llm_returns_json_in_response(self, memory):
|
|
"""Should handle LLM returning JSON in text response."""
|
|
with patch("app.DeepSeekClient") as mock_llm_class:
|
|
mock_llm = Mock()
|
|
# LLM returns JSON but not a tool call
|
|
mock_llm.complete.return_value = '{"result": "some data", "count": 5}'
|
|
mock_llm_class.return_value = mock_llm
|
|
|
|
from app import app
|
|
client = TestClient(app)
|
|
|
|
response = client.post("/v1/chat/completions", json={
|
|
"model": "agent-media",
|
|
"messages": [{"role": "user", "content": "Give me JSON"}],
|
|
})
|
|
|
|
assert response.status_code == 200
|
|
# Should return the JSON as-is since it's not a tool call
|
|
content = response.json()["choices"][0]["message"]["content"]
|
|
# May parse as tool call or return as text
|
|
assert "result" in content or len(content) > 0
|
|
|
|
|
|
class TestMemoryEndpointsEdgeCases:
|
|
"""Edge case tests for memory endpoints."""
|
|
|
|
def test_memory_state_with_large_data(self, memory):
|
|
"""Should handle large memory state."""
|
|
with patch("app.DeepSeekClient") as mock_llm:
|
|
mock_llm.return_value = Mock()
|
|
from app import app
|
|
|
|
# Add lots of data to memory
|
|
for i in range(100):
|
|
memory.stm.add_message("user", f"Message {i}" * 100)
|
|
memory.episodic.add_error("action", f"Error {i}")
|
|
|
|
client = TestClient(app)
|
|
response = client.get("/memory/state")
|
|
|
|
assert response.status_code == 200
|
|
data = response.json()
|
|
assert "stm" in data
|
|
|
|
def test_memory_state_with_unicode(self, memory):
|
|
"""Should handle unicode in memory state."""
|
|
with patch("app.DeepSeekClient") as mock_llm:
|
|
mock_llm.return_value = Mock()
|
|
from app import app
|
|
|
|
memory.ltm.set_config("japanese", "日本語テスト")
|
|
memory.stm.add_message("user", "🎬 Movie request")
|
|
|
|
client = TestClient(app)
|
|
response = client.get("/memory/state")
|
|
|
|
assert response.status_code == 200
|
|
data = response.json()
|
|
assert "日本語" in str(data)
|
|
|
|
def test_search_results_with_special_chars(self, memory):
|
|
"""Should handle special characters in search results."""
|
|
with patch("app.DeepSeekClient") as mock_llm:
|
|
mock_llm.return_value = Mock()
|
|
from app import app
|
|
|
|
memory.episodic.store_search_results(
|
|
"Test <script>alert('xss')</script>",
|
|
[{"name": "Result with \"quotes\" and 'apostrophes'"}],
|
|
)
|
|
|
|
client = TestClient(app)
|
|
response = client.get("/memory/episodic/search-results")
|
|
|
|
assert response.status_code == 200
|
|
# Should be properly escaped in JSON
|
|
data = response.json()
|
|
assert "script" in data["query"]
|
|
|
|
def test_clear_session_idempotent(self, memory):
|
|
"""Should be idempotent - multiple clears should work."""
|
|
with patch("app.DeepSeekClient") as mock_llm:
|
|
mock_llm.return_value = Mock()
|
|
from app import app
|
|
client = TestClient(app)
|
|
|
|
# Clear multiple times
|
|
for _ in range(5):
|
|
response = client.post("/memory/clear-session")
|
|
assert response.status_code == 200
|
|
|
|
def test_clear_session_preserves_ltm(self, memory):
|
|
"""Should preserve LTM after clear."""
|
|
with patch("app.DeepSeekClient") as mock_llm:
|
|
mock_llm.return_value = Mock()
|
|
from app import app
|
|
|
|
memory.ltm.set_config("important", "data")
|
|
memory.stm.add_message("user", "Hello")
|
|
|
|
client = TestClient(app)
|
|
client.post("/memory/clear-session")
|
|
|
|
response = client.get("/memory/state")
|
|
data = response.json()
|
|
|
|
assert data["ltm"]["config"]["important"] == "data"
|
|
assert data["stm"]["conversation_history"] == []
|
|
|
|
|
|
class TestHealthEndpointEdgeCases:
|
|
"""Edge case tests for health endpoint."""
|
|
|
|
def test_health_returns_version(self, memory):
|
|
"""Should return version in health check."""
|
|
with patch("app.DeepSeekClient") as mock_llm:
|
|
mock_llm.return_value = Mock()
|
|
from app import app
|
|
client = TestClient(app)
|
|
|
|
response = client.get("/health")
|
|
|
|
assert response.status_code == 200
|
|
assert "version" in response.json()
|
|
|
|
def test_health_with_query_params(self, memory):
|
|
"""Should ignore query parameters."""
|
|
with patch("app.DeepSeekClient") as mock_llm:
|
|
mock_llm.return_value = Mock()
|
|
from app import app
|
|
client = TestClient(app)
|
|
|
|
response = client.get("/health?extra=param&another=value")
|
|
|
|
assert response.status_code == 200
|
|
|
|
|
|
class TestModelsEndpointEdgeCases:
|
|
"""Edge case tests for models endpoint."""
|
|
|
|
def test_models_response_format(self, memory):
|
|
"""Should return OpenAI-compatible format."""
|
|
with patch("app.DeepSeekClient") as mock_llm:
|
|
mock_llm.return_value = Mock()
|
|
from app import app
|
|
client = TestClient(app)
|
|
|
|
response = client.get("/v1/models")
|
|
|
|
data = response.json()
|
|
assert data["object"] == "list"
|
|
assert isinstance(data["data"], list)
|
|
assert len(data["data"]) > 0
|
|
assert "id" in data["data"][0]
|
|
assert "object" in data["data"][0]
|
|
assert "created" in data["data"][0]
|
|
assert "owned_by" in data["data"][0]
|