2c6fdc03a8
Co-authored-by: Copilot <copilot@github.com>
367 lines
13 KiB
Python
367 lines
13 KiB
Python
"""Tests for generate endpoints — all OpenRouter calls mocked."""
|
|
import os
|
|
import pytest
|
|
import pytest_asyncio
|
|
from unittest.mock import AsyncMock, patch
|
|
from httpx import AsyncClient, ASGITransport
|
|
|
|
from app.main import app
|
|
from app import db as db_module
|
|
|
|
os.environ.setdefault("JWT_SECRET", "test-secret-key-for-testing-only")
|
|
os.environ.setdefault("OPENROUTER_API_KEY", "test-key")
|
|
|
|
FAKE_CHAT = {
|
|
"id": "gen-text-1",
|
|
"model": "openai/gpt-4o",
|
|
"choices": [{"message": {"role": "assistant", "content": "Once upon a time..."}}],
|
|
"usage": {"prompt_tokens": 5, "completion_tokens": 10, "total_tokens": 15},
|
|
}
|
|
|
|
FAKE_IMAGE = {
|
|
"id": "gen-img-1",
|
|
"model": "openai/dall-e-3",
|
|
"data": [
|
|
{"url": "https://example.com/image.png",
|
|
"revised_prompt": "A cat on the moon"},
|
|
],
|
|
}
|
|
|
|
FAKE_VIDEO = {
|
|
"id": "gen-vid-1",
|
|
"polling_url": "https://openrouter.ai/api/v1/videos/gen-vid-1",
|
|
"status": "queued",
|
|
}
|
|
|
|
FAKE_VIDEO_DONE = {
|
|
"id": "gen-vid-2",
|
|
"polling_url": "https://openrouter.ai/api/v1/videos/gen-vid-2",
|
|
"status": "completed",
|
|
"unsigned_urls": ["https://example.com/video.mp4"],
|
|
}
|
|
|
|
|
|
@pytest.fixture(autouse=True)
|
|
def fresh_db():
|
|
db_module._conn = None
|
|
db_module.init_db(":memory:")
|
|
yield
|
|
db_module.close_db()
|
|
db_module._conn = None
|
|
|
|
|
|
@pytest_asyncio.fixture
|
|
async def client(fresh_db):
|
|
transport = ASGITransport(app=app)
|
|
async with AsyncClient(transport=transport, base_url="http://test") as ac:
|
|
yield ac
|
|
|
|
|
|
async def _user_token(client):
|
|
await client.post("/auth/register", json={"email": "user@example.com", "password": "secret123"})
|
|
resp = await client.post("/auth/login", json={"email": "user@example.com", "password": "secret123"})
|
|
return resp.json()["access_token"]
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# POST /generate/text
|
|
# ---------------------------------------------------------------------------
|
|
|
|
async def test_generate_text(client):
|
|
token = await _user_token(client)
|
|
with patch("backend.app.routers.generate.openrouter.chat_completion", new_callable=AsyncMock, return_value=FAKE_CHAT):
|
|
resp = await client.post(
|
|
"/generate/text",
|
|
json={"model": "openai/gpt-4o", "prompt": "Tell me a story"},
|
|
headers={"Authorization": f"Bearer {token}"},
|
|
)
|
|
assert resp.status_code == 200
|
|
data = resp.json()
|
|
assert data["content"] == "Once upon a time..."
|
|
assert data["id"] == "gen-text-1"
|
|
assert data["usage"]["total_tokens"] == 15
|
|
|
|
|
|
async def test_generate_text_with_system_prompt(client):
|
|
token = await _user_token(client)
|
|
mock = AsyncMock(return_value=FAKE_CHAT)
|
|
with patch("backend.app.routers.generate.openrouter.chat_completion", mock):
|
|
await client.post(
|
|
"/generate/text",
|
|
json={"model": "openai/gpt-4o", "prompt": "Hello",
|
|
"system_prompt": "Be concise."},
|
|
headers={"Authorization": f"Bearer {token}"},
|
|
)
|
|
call_messages = mock.call_args.kwargs["messages"]
|
|
assert call_messages[0] == {"role": "system", "content": "Be concise."}
|
|
assert call_messages[1] == {"role": "user", "content": "Hello"}
|
|
|
|
|
|
async def test_generate_text_unauthenticated(client):
|
|
resp = await client.post("/generate/text", json={"model": "openai/gpt-4o", "prompt": "Hi"})
|
|
assert resp.status_code == 401
|
|
|
|
|
|
async def test_generate_text_upstream_error(client):
|
|
token = await _user_token(client)
|
|
with patch("backend.app.routers.generate.openrouter.chat_completion", new_callable=AsyncMock, side_effect=Exception("timeout")):
|
|
resp = await client.post(
|
|
"/generate/text",
|
|
json={"model": "openai/gpt-4o", "prompt": "Hi"},
|
|
headers={"Authorization": f"Bearer {token}"},
|
|
)
|
|
assert resp.status_code == 502
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# POST /generate/image
|
|
# ---------------------------------------------------------------------------
|
|
|
|
async def test_generate_image(client):
|
|
token = await _user_token(client)
|
|
with patch("backend.app.routers.generate.openrouter.generate_image", new_callable=AsyncMock, return_value=FAKE_IMAGE):
|
|
resp = await client.post(
|
|
"/generate/image",
|
|
json={"model": "openai/dall-e-3", "prompt": "A cat on the moon"},
|
|
headers={"Authorization": f"Bearer {token}"},
|
|
)
|
|
assert resp.status_code == 200
|
|
data = resp.json()
|
|
assert data["id"] == "gen-img-1"
|
|
assert len(data["images"]) == 1
|
|
assert data["images"][0]["url"] == "https://example.com/image.png"
|
|
assert data["images"][0]["revised_prompt"] == "A cat on the moon"
|
|
|
|
|
|
async def test_generate_image_unauthenticated(client):
|
|
resp = await client.post("/generate/image", json={"model": "openai/dall-e-3", "prompt": "Hi"})
|
|
assert resp.status_code == 401
|
|
|
|
|
|
async def test_generate_image_upstream_error(client):
|
|
token = await _user_token(client)
|
|
with patch("backend.app.routers.generate.openrouter.generate_image", new_callable=AsyncMock, side_effect=Exception("rate limit")):
|
|
resp = await client.post(
|
|
"/generate/image",
|
|
json={"model": "openai/dall-e-3", "prompt": "Hi"},
|
|
headers={"Authorization": f"Bearer {token}"},
|
|
)
|
|
assert resp.status_code == 502
|
|
|
|
|
|
# --- Chat-based image generation (FLUX, GPT-5 Image Mini) ---
|
|
|
|
FAKE_IMAGE_CHAT_FLUX = {
|
|
"id": "gen-img-chat-1",
|
|
"model": "black-forest-labs/flux.2-klein-4b",
|
|
"choices": [{
|
|
"message": {
|
|
"role": "assistant",
|
|
"content": "Here is your generated image.",
|
|
"images": [{
|
|
"type": "image_url",
|
|
"image_url": {"url": "data:image/png;base64,abc123"},
|
|
}],
|
|
}
|
|
}],
|
|
}
|
|
|
|
FAKE_IMAGE_CHAT_GPT5 = {
|
|
"id": "gen-img-chat-2",
|
|
"model": "openai/gpt-5-image-mini",
|
|
"choices": [{
|
|
"message": {
|
|
"role": "assistant",
|
|
"content": "Generated image.",
|
|
"images": [{
|
|
"type": "image_url",
|
|
"image_url": {"url": "data:image/png;base64,xyz789"},
|
|
}],
|
|
}
|
|
}],
|
|
}
|
|
|
|
|
|
async def test_generate_image_chat_flux(client):
|
|
token = await _user_token(client)
|
|
with patch("backend.app.routers.generate.openrouter.generate_image_chat", new_callable=AsyncMock, return_value=FAKE_IMAGE_CHAT_FLUX):
|
|
resp = await client.post(
|
|
"/generate/image",
|
|
json={"model": "black-forest-labs/flux.2-klein-4b",
|
|
"prompt": "A sunset"},
|
|
headers={"Authorization": f"Bearer {token}"},
|
|
)
|
|
assert resp.status_code == 200
|
|
data = resp.json()
|
|
assert data["id"] == "gen-img-chat-1"
|
|
assert len(data["images"]) == 1
|
|
assert data["images"][0]["url"] == "data:image/png;base64,abc123"
|
|
|
|
|
|
async def test_generate_image_chat_gpt5_image_mini(client):
|
|
token = await _user_token(client)
|
|
with patch("backend.app.routers.generate.openrouter.generate_image_chat", new_callable=AsyncMock, return_value=FAKE_IMAGE_CHAT_GPT5):
|
|
resp = await client.post(
|
|
"/generate/image",
|
|
json={"model": "openai/gpt-5-image-mini", "prompt": "A cat"},
|
|
headers={"Authorization": f"Bearer {token}"},
|
|
)
|
|
assert resp.status_code == 200
|
|
data = resp.json()
|
|
assert data["model"] == "openai/gpt-5-image-mini"
|
|
assert len(data["images"]) == 1
|
|
|
|
|
|
async def test_generate_image_chat_with_image_config(client):
|
|
token = await _user_token(client)
|
|
mock = AsyncMock(return_value=FAKE_IMAGE_CHAT_FLUX)
|
|
with patch("backend.app.routers.generate.openrouter.generate_image_chat", mock):
|
|
await client.post(
|
|
"/generate/image",
|
|
json={
|
|
"model": "black-forest-labs/flux.2-klein-4b",
|
|
"prompt": "A landscape",
|
|
"aspect_ratio": "16:9",
|
|
"image_size": "2K",
|
|
},
|
|
headers={"Authorization": f"Bearer {token}"},
|
|
)
|
|
call_kwargs = mock.call_args.kwargs
|
|
assert call_kwargs["image_config"]["aspect_ratio"] == "16:9"
|
|
assert call_kwargs["image_config"]["image_size"] == "2K"
|
|
assert call_kwargs["modalities"] == ["image"]
|
|
|
|
|
|
async def test_generate_image_chat_unauthenticated(client):
|
|
resp = await client.post("/generate/image", json={"model": "flux.2-klein-4b", "prompt": "Hi"})
|
|
assert resp.status_code == 401
|
|
|
|
|
|
async def test_generate_image_chat_upstream_error(client):
|
|
token = await _user_token(client)
|
|
with patch("backend.app.routers.generate.openrouter.generate_image_chat", new_callable=AsyncMock, side_effect=Exception("timeout")):
|
|
resp = await client.post(
|
|
"/generate/image",
|
|
json={"model": "black-forest-labs/flux.2-klein-4b", "prompt": "Hi"},
|
|
headers={"Authorization": f"Bearer {token}"},
|
|
)
|
|
assert resp.status_code == 502
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# POST /generate/video
|
|
# ---------------------------------------------------------------------------
|
|
|
|
async def test_generate_video(client):
|
|
token = await _user_token(client)
|
|
with patch("backend.app.routers.generate.openrouter.generate_video", new_callable=AsyncMock, return_value=FAKE_VIDEO):
|
|
resp = await client.post(
|
|
"/generate/video",
|
|
json={"model": "stability/stable-video",
|
|
"prompt": "Ocean waves at sunset"},
|
|
headers={"Authorization": f"Bearer {token}"},
|
|
)
|
|
assert resp.status_code == 200
|
|
data = resp.json()
|
|
assert data["id"] == "gen-vid-1"
|
|
assert data["status"] == "queued"
|
|
assert data["polling_url"] == "https://openrouter.ai/api/v1/videos/gen-vid-1"
|
|
assert data["video_url"] is None
|
|
|
|
|
|
async def test_generate_video_unauthenticated(client):
|
|
resp = await client.post("/generate/video", json={"model": "m", "prompt": "p"})
|
|
assert resp.status_code == 401
|
|
|
|
|
|
async def test_generate_video_upstream_error(client):
|
|
token = await _user_token(client)
|
|
with patch("backend.app.routers.generate.openrouter.generate_video", new_callable=AsyncMock, side_effect=Exception("503")):
|
|
resp = await client.post(
|
|
"/generate/video",
|
|
json={"model": "stability/stable-video", "prompt": "Hi"},
|
|
headers={"Authorization": f"Bearer {token}"},
|
|
)
|
|
assert resp.status_code == 502
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# POST /generate/video/from-image
|
|
# ---------------------------------------------------------------------------
|
|
|
|
async def test_generate_video_from_image(client):
|
|
token = await _user_token(client)
|
|
with patch("backend.app.routers.generate.openrouter.generate_video_from_image", new_callable=AsyncMock, return_value=FAKE_VIDEO_DONE):
|
|
resp = await client.post(
|
|
"/generate/video/from-image",
|
|
json={
|
|
"model": "runway/gen-3",
|
|
"image_url": "https://example.com/cat.jpg",
|
|
"prompt": "Cat runs across the room",
|
|
},
|
|
headers={"Authorization": f"Bearer {token}"},
|
|
)
|
|
assert resp.status_code == 200
|
|
data = resp.json()
|
|
assert data["status"] == "completed"
|
|
assert data["video_url"] == "https://example.com/video.mp4"
|
|
assert data["video_urls"] == ["https://example.com/video.mp4"]
|
|
|
|
|
|
async def test_poll_video_status(client):
|
|
token = await _user_token(client)
|
|
mock_result = {
|
|
"id": "gen-vid-1",
|
|
"status": "completed",
|
|
"unsigned_urls": ["https://example.com/video.mp4"],
|
|
}
|
|
with patch("backend.app.routers.generate.openrouter.poll_video_status", new_callable=AsyncMock, return_value=mock_result):
|
|
resp = await client.get(
|
|
"/generate/video/status",
|
|
params={"polling_url": "https://openrouter.ai/api/v1/videos/gen-vid-1"},
|
|
headers={"Authorization": f"Bearer {token}"},
|
|
)
|
|
assert resp.status_code == 200
|
|
data = resp.json()
|
|
assert data["status"] == "completed"
|
|
assert data["video_url"] == "https://example.com/video.mp4"
|
|
|
|
|
|
async def test_poll_video_status_unauthenticated(client):
|
|
resp = await client.get(
|
|
"/generate/video/status",
|
|
params={"polling_url": "https://openrouter.ai/api/v1/videos/gen-vid-1"},
|
|
)
|
|
assert resp.status_code == 401
|
|
|
|
|
|
async def test_poll_video_status_upstream_error(client):
|
|
token = await _user_token(client)
|
|
with patch("backend.app.routers.generate.openrouter.poll_video_status", new_callable=AsyncMock, side_effect=Exception("timeout")):
|
|
resp = await client.get(
|
|
"/generate/video/status",
|
|
params={"polling_url": "https://openrouter.ai/api/v1/videos/gen-vid-1"},
|
|
headers={"Authorization": f"Bearer {token}"},
|
|
)
|
|
assert resp.status_code == 502
|
|
|
|
|
|
async def test_generate_video_from_image_unauthenticated(client):
|
|
resp = await client.post(
|
|
"/generate/video/from-image",
|
|
json={"model": "m", "image_url": "https://example.com/img.jpg", "prompt": "p"},
|
|
)
|
|
assert resp.status_code == 401
|
|
|
|
|
|
async def test_generate_video_from_image_upstream_error(client):
|
|
token = await _user_token(client)
|
|
with patch("backend.app.routers.generate.openrouter.generate_video_from_image", new_callable=AsyncMock, side_effect=Exception("error")):
|
|
resp = await client.post(
|
|
"/generate/video/from-image",
|
|
json={"model": "runway/gen-3",
|
|
"image_url": "https://example.com/img.jpg", "prompt": "p"},
|
|
headers={"Authorization": f"Bearer {token}"},
|
|
)
|
|
assert resp.status_code == 502
|