05309f26b4
Co-authored-by: Copilot <copilot@github.com>
173 lines
5.6 KiB
Python
173 lines
5.6 KiB
Python
"""Tests for AI endpoints — OpenRouter HTTP calls are fully mocked."""
|
|
import os
|
|
import pytest
|
|
import pytest_asyncio
|
|
from unittest.mock import AsyncMock, patch
|
|
from httpx import AsyncClient, ASGITransport
|
|
|
|
from backend.app.main import app
|
|
from backend.app import db as db_module
|
|
|
|
os.environ.setdefault("JWT_SECRET", "test-secret-key-for-testing-only")
|
|
os.environ.setdefault("OPENROUTER_API_KEY", "test-key")
|
|
|
|
FAKE_MODELS = [
|
|
{"id": "openai/gpt-4o", "name": "GPT-4o", "context_length": 128000, "pricing": {"prompt": "0.000005"}},
|
|
{"id": "anthropic/claude-3-haiku", "name": "Claude 3 Haiku", "context_length": 200000, "pricing": {}},
|
|
]
|
|
|
|
FAKE_CHAT_RESPONSE = {
|
|
"id": "gen-abc123",
|
|
"model": "openai/gpt-4o",
|
|
"choices": [{"message": {"role": "assistant", "content": "Hello! How can I help?"}}],
|
|
"usage": {"prompt_tokens": 10, "completion_tokens": 8, "total_tokens": 18},
|
|
}
|
|
|
|
|
|
@pytest.fixture(autouse=True)
|
|
def fresh_db():
|
|
db_module._conn = None
|
|
db_module.init_db(":memory:")
|
|
yield
|
|
db_module.close_db()
|
|
db_module._conn = None
|
|
|
|
|
|
@pytest_asyncio.fixture
|
|
async def client(fresh_db):
|
|
transport = ASGITransport(app=app)
|
|
async with AsyncClient(transport=transport, base_url="http://test") as ac:
|
|
yield ac
|
|
|
|
|
|
async def _user_token(client):
|
|
await client.post("/auth/register", json={"email": "user@example.com", "password": "secret123"})
|
|
resp = await client.post("/auth/login", json={"email": "user@example.com", "password": "secret123"})
|
|
return resp.json()["access_token"]
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# GET /ai/models
|
|
# ---------------------------------------------------------------------------
|
|
|
|
async def test_list_models(client):
|
|
token = await _user_token(client)
|
|
with patch(
|
|
"backend.app.routers.ai.openrouter.list_models",
|
|
new_callable=AsyncMock,
|
|
return_value=FAKE_MODELS,
|
|
):
|
|
resp = await client.get("/ai/models", headers={"Authorization": f"Bearer {token}"})
|
|
|
|
assert resp.status_code == 200
|
|
data = resp.json()
|
|
assert len(data) == 2
|
|
assert data[0]["id"] == "openai/gpt-4o"
|
|
assert data[1]["name"] == "Claude 3 Haiku"
|
|
|
|
|
|
async def test_list_models_unauthenticated(client):
|
|
resp = await client.get("/ai/models")
|
|
assert resp.status_code == 401
|
|
|
|
|
|
async def test_list_models_upstream_error(client):
|
|
token = await _user_token(client)
|
|
with patch(
|
|
"backend.app.routers.ai.openrouter.list_models",
|
|
new_callable=AsyncMock,
|
|
side_effect=Exception("Connection refused"),
|
|
):
|
|
resp = await client.get("/ai/models", headers={"Authorization": f"Bearer {token}"})
|
|
|
|
assert resp.status_code == 502
|
|
assert "OpenRouter error" in resp.json()["detail"]
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# POST /ai/chat
|
|
# ---------------------------------------------------------------------------
|
|
|
|
async def test_chat_success(client):
|
|
token = await _user_token(client)
|
|
with patch(
|
|
"backend.app.routers.ai.openrouter.chat_completion",
|
|
new_callable=AsyncMock,
|
|
return_value=FAKE_CHAT_RESPONSE,
|
|
):
|
|
resp = await client.post(
|
|
"/ai/chat",
|
|
json={
|
|
"model": "openai/gpt-4o",
|
|
"messages": [{"role": "user", "content": "Hello"}],
|
|
},
|
|
headers={"Authorization": f"Bearer {token}"},
|
|
)
|
|
|
|
assert resp.status_code == 200
|
|
data = resp.json()
|
|
assert data["id"] == "gen-abc123"
|
|
assert data["model"] == "openai/gpt-4o"
|
|
assert data["content"] == "Hello! How can I help?"
|
|
assert data["usage"]["total_tokens"] == 18
|
|
|
|
|
|
async def test_chat_passes_parameters(client):
|
|
token = await _user_token(client)
|
|
mock = AsyncMock(return_value=FAKE_CHAT_RESPONSE)
|
|
with patch("backend.app.routers.ai.openrouter.chat_completion", new_callable=AsyncMock, return_value=FAKE_CHAT_RESPONSE) as mock:
|
|
await client.post(
|
|
"/ai/chat",
|
|
json={
|
|
"model": "anthropic/claude-3-haiku",
|
|
"messages": [{"role": "user", "content": "Hi"}],
|
|
"temperature": 0.3,
|
|
"max_tokens": 512,
|
|
},
|
|
headers={"Authorization": f"Bearer {token}"},
|
|
)
|
|
mock.assert_called_once_with(
|
|
model="anthropic/claude-3-haiku",
|
|
messages=[{"role": "user", "content": "Hi"}],
|
|
temperature=0.3,
|
|
max_tokens=512,
|
|
)
|
|
|
|
|
|
async def test_chat_unauthenticated(client):
|
|
resp = await client.post(
|
|
"/ai/chat",
|
|
json={"model": "openai/gpt-4o", "messages": [{"role": "user", "content": "Hi"}]},
|
|
)
|
|
assert resp.status_code == 401
|
|
|
|
|
|
async def test_chat_upstream_error(client):
|
|
token = await _user_token(client)
|
|
with patch(
|
|
"backend.app.routers.ai.openrouter.chat_completion",
|
|
new_callable=AsyncMock,
|
|
side_effect=Exception("timeout"),
|
|
):
|
|
resp = await client.post(
|
|
"/ai/chat",
|
|
json={"model": "openai/gpt-4o", "messages": [{"role": "user", "content": "Hi"}]},
|
|
headers={"Authorization": f"Bearer {token}"},
|
|
)
|
|
assert resp.status_code == 502
|
|
|
|
|
|
async def test_chat_malformed_upstream_response(client):
|
|
token = await _user_token(client)
|
|
with patch(
|
|
"backend.app.routers.ai.openrouter.chat_completion",
|
|
new_callable=AsyncMock,
|
|
return_value={"id": "x", "choices": []}, # empty choices
|
|
):
|
|
resp = await client.post(
|
|
"/ai/chat",
|
|
json={"model": "openai/gpt-4o", "messages": [{"role": "user", "content": "Hi"}]},
|
|
headers={"Authorization": f"Bearer {token}"},
|
|
)
|
|
assert resp.status_code == 502
|