Implement chat interface with message history and system prompt support; update frontend and tests accordingly

Co-authored-by: Copilot <copilot@github.com>
This commit is contained in:
2026-04-29 14:39:38 +02:00
parent 78b76dc331
commit 3d32e6df74
7 changed files with 428 additions and 79 deletions
+53 -15
View File
@@ -69,7 +69,7 @@ async def _user_token(client):
async def test_generate_text(client):
token = await _user_token(client)
with patch("backend.app.routers.generate.openrouter.chat_completion", new_callable=AsyncMock, return_value=FAKE_CHAT):
with patch("app.routers.generate.openrouter.chat_completion", new_callable=AsyncMock, return_value=FAKE_CHAT):
resp = await client.post(
"/generate/text",
json={"model": "openai/gpt-4o", "prompt": "Tell me a story"},
@@ -85,7 +85,7 @@ async def test_generate_text(client):
async def test_generate_text_with_system_prompt(client):
token = await _user_token(client)
mock = AsyncMock(return_value=FAKE_CHAT)
with patch("backend.app.routers.generate.openrouter.chat_completion", mock):
with patch("app.routers.generate.openrouter.chat_completion", mock):
await client.post(
"/generate/text",
json={"model": "openai/gpt-4o", "prompt": "Hello",
@@ -97,6 +97,44 @@ async def test_generate_text_with_system_prompt(client):
assert call_messages[1] == {"role": "user", "content": "Hello"}
async def test_generate_text_with_messages_array(client):
"""messages field takes precedence over prompt for multi-turn chat."""
token = await _user_token(client)
mock = AsyncMock(return_value=FAKE_CHAT)
messages = [
{"role": "user", "content": "First message"},
{"role": "assistant", "content": "Reply"},
{"role": "user", "content": "Follow up"},
]
with patch("app.routers.generate.openrouter.chat_completion", mock):
resp = await client.post(
"/generate/text",
json={"model": "openai/gpt-4o", "messages": messages},
headers={"Authorization": f"Bearer {token}"},
)
assert resp.status_code == 200
call_messages = mock.call_args.kwargs["messages"]
assert len(call_messages) == 3
assert call_messages[2]["content"] == "Follow up"
async def test_generate_text_messages_with_system_prompt(client):
"""system_prompt prepended when messages provided and no system msg present."""
token = await _user_token(client)
mock = AsyncMock(return_value=FAKE_CHAT)
messages = [{"role": "user", "content": "Hi"}]
with patch("app.routers.generate.openrouter.chat_completion", mock):
await client.post(
"/generate/text",
json={"model": "openai/gpt-4o", "messages": messages,
"system_prompt": "Be brief."},
headers={"Authorization": f"Bearer {token}"},
)
call_messages = mock.call_args.kwargs["messages"]
assert call_messages[0] == {"role": "system", "content": "Be brief."}
assert call_messages[1] == {"role": "user", "content": "Hi"}
async def test_generate_text_unauthenticated(client):
resp = await client.post("/generate/text", json={"model": "openai/gpt-4o", "prompt": "Hi"})
assert resp.status_code == 401
@@ -104,7 +142,7 @@ async def test_generate_text_unauthenticated(client):
async def test_generate_text_upstream_error(client):
token = await _user_token(client)
with patch("backend.app.routers.generate.openrouter.chat_completion", new_callable=AsyncMock, side_effect=Exception("timeout")):
with patch("app.routers.generate.openrouter.chat_completion", new_callable=AsyncMock, side_effect=Exception("timeout")):
resp = await client.post(
"/generate/text",
json={"model": "openai/gpt-4o", "prompt": "Hi"},
@@ -119,7 +157,7 @@ async def test_generate_text_upstream_error(client):
async def test_generate_image(client):
token = await _user_token(client)
with patch("backend.app.routers.generate.openrouter.generate_image", new_callable=AsyncMock, return_value=FAKE_IMAGE):
with patch("app.routers.generate.openrouter.generate_image", new_callable=AsyncMock, return_value=FAKE_IMAGE):
resp = await client.post(
"/generate/image",
json={"model": "openai/dall-e-3", "prompt": "A cat on the moon"},
@@ -140,7 +178,7 @@ async def test_generate_image_unauthenticated(client):
async def test_generate_image_upstream_error(client):
token = await _user_token(client)
with patch("backend.app.routers.generate.openrouter.generate_image", new_callable=AsyncMock, side_effect=Exception("rate limit")):
with patch("app.routers.generate.openrouter.generate_image", new_callable=AsyncMock, side_effect=Exception("rate limit")):
resp = await client.post(
"/generate/image",
json={"model": "openai/dall-e-3", "prompt": "Hi"},
@@ -184,7 +222,7 @@ FAKE_IMAGE_CHAT_GPT5 = {
async def test_generate_image_chat_flux(client):
token = await _user_token(client)
with patch("backend.app.routers.generate.openrouter.generate_image_chat", new_callable=AsyncMock, return_value=FAKE_IMAGE_CHAT_FLUX):
with patch("app.routers.generate.openrouter.generate_image_chat", new_callable=AsyncMock, return_value=FAKE_IMAGE_CHAT_FLUX):
resp = await client.post(
"/generate/image",
json={"model": "black-forest-labs/flux.2-klein-4b",
@@ -200,7 +238,7 @@ async def test_generate_image_chat_flux(client):
async def test_generate_image_chat_gpt5_image_mini(client):
token = await _user_token(client)
with patch("backend.app.routers.generate.openrouter.generate_image_chat", new_callable=AsyncMock, return_value=FAKE_IMAGE_CHAT_GPT5):
with patch("app.routers.generate.openrouter.generate_image_chat", new_callable=AsyncMock, return_value=FAKE_IMAGE_CHAT_GPT5):
resp = await client.post(
"/generate/image",
json={"model": "openai/gpt-5-image-mini", "prompt": "A cat"},
@@ -215,7 +253,7 @@ async def test_generate_image_chat_gpt5_image_mini(client):
async def test_generate_image_chat_with_image_config(client):
token = await _user_token(client)
mock = AsyncMock(return_value=FAKE_IMAGE_CHAT_FLUX)
with patch("backend.app.routers.generate.openrouter.generate_image_chat", mock):
with patch("app.routers.generate.openrouter.generate_image_chat", mock):
await client.post(
"/generate/image",
json={
@@ -239,7 +277,7 @@ async def test_generate_image_chat_unauthenticated(client):
async def test_generate_image_chat_upstream_error(client):
token = await _user_token(client)
with patch("backend.app.routers.generate.openrouter.generate_image_chat", new_callable=AsyncMock, side_effect=Exception("timeout")):
with patch("app.routers.generate.openrouter.generate_image_chat", new_callable=AsyncMock, side_effect=Exception("timeout")):
resp = await client.post(
"/generate/image",
json={"model": "black-forest-labs/flux.2-klein-4b", "prompt": "Hi"},
@@ -254,7 +292,7 @@ async def test_generate_image_chat_upstream_error(client):
async def test_generate_video(client):
token = await _user_token(client)
with patch("backend.app.routers.generate.openrouter.generate_video", new_callable=AsyncMock, return_value=FAKE_VIDEO):
with patch("app.routers.generate.openrouter.generate_video", new_callable=AsyncMock, return_value=FAKE_VIDEO):
resp = await client.post(
"/generate/video",
json={"model": "stability/stable-video",
@@ -276,7 +314,7 @@ async def test_generate_video_unauthenticated(client):
async def test_generate_video_upstream_error(client):
token = await _user_token(client)
with patch("backend.app.routers.generate.openrouter.generate_video", new_callable=AsyncMock, side_effect=Exception("503")):
with patch("app.routers.generate.openrouter.generate_video", new_callable=AsyncMock, side_effect=Exception("503")):
resp = await client.post(
"/generate/video",
json={"model": "stability/stable-video", "prompt": "Hi"},
@@ -291,7 +329,7 @@ async def test_generate_video_upstream_error(client):
async def test_generate_video_from_image(client):
token = await _user_token(client)
with patch("backend.app.routers.generate.openrouter.generate_video_from_image", new_callable=AsyncMock, return_value=FAKE_VIDEO_DONE):
with patch("app.routers.generate.openrouter.generate_video_from_image", new_callable=AsyncMock, return_value=FAKE_VIDEO_DONE):
resp = await client.post(
"/generate/video/from-image",
json={
@@ -315,7 +353,7 @@ async def test_poll_video_status(client):
"status": "completed",
"unsigned_urls": ["https://example.com/video.mp4"],
}
with patch("backend.app.routers.generate.openrouter.poll_video_status", new_callable=AsyncMock, return_value=mock_result):
with patch("app.routers.generate.openrouter.poll_video_status", new_callable=AsyncMock, return_value=mock_result):
resp = await client.get(
"/generate/video/status",
params={"polling_url": "https://openrouter.ai/api/v1/videos/gen-vid-1"},
@@ -337,7 +375,7 @@ async def test_poll_video_status_unauthenticated(client):
async def test_poll_video_status_upstream_error(client):
token = await _user_token(client)
with patch("backend.app.routers.generate.openrouter.poll_video_status", new_callable=AsyncMock, side_effect=Exception("timeout")):
with patch("app.routers.generate.openrouter.poll_video_status", new_callable=AsyncMock, side_effect=Exception("timeout")):
resp = await client.get(
"/generate/video/status",
params={"polling_url": "https://openrouter.ai/api/v1/videos/gen-vid-1"},
@@ -356,7 +394,7 @@ async def test_generate_video_from_image_unauthenticated(client):
async def test_generate_video_from_image_upstream_error(client):
token = await _user_token(client)
with patch("backend.app.routers.generate.openrouter.generate_video_from_image", new_callable=AsyncMock, side_effect=Exception("error")):
with patch("app.routers.generate.openrouter.generate_video_from_image", new_callable=AsyncMock, side_effect=Exception("error")):
resp = await client.post(
"/generate/video/from-image",
json={"model": "runway/gen-3",