Implement chat interface with message history and system prompt support; update frontend and tests accordingly

Co-authored-by: Copilot <copilot@github.com>
This commit is contained in:
2026-04-29 14:39:38 +02:00
parent 78b76dc331
commit 3d32e6df74
7 changed files with 428 additions and 79 deletions
+104 -6
View File
@@ -172,7 +172,7 @@ def test_generate_text_page_renders(client):
_set_auth(client)
resp = client.get("/generate/text")
assert resp.status_code == 200
assert b"Text Generation" in resp.data
assert b"Text Chat" in resp.data
def test_generate_text_requires_login(client):
@@ -183,13 +183,108 @@ def test_generate_text_requires_login(client):
def test_generate_text_success(client):
_set_auth(client)
mock = _mock_response(
gen_mock = _mock_response(
200, {"id": "g1", "model": "openai/gpt-4o", "content": "Hello world", "usage": None})
with patch("frontend.app.main.httpx.request", return_value=mock):
models_mock = _mock_response(200, [
{"id": "openai/gpt-4o", "name": "GPT-4o", "modality": "text"}
])
with patch("frontend.app.main.httpx.request", side_effect=[gen_mock, models_mock]):
resp = client.post(
"/generate/text", data={"model": "openai/gpt-4o", "prompt": "Say hello"})
"/generate/text",
data={"model": "openai/gpt-4o", "prompt": "Say hello", "action": "send"})
assert resp.status_code == 200
assert b"Hello world" in resp.data
assert b"chat-bubble--assistant" in resp.data
def test_generate_text_page_shows_optional_system_prompt(client):
_set_auth(client)
models_mock = _mock_response(200, [])
with patch("frontend.app.main.httpx.request", return_value=models_mock):
resp = client.get("/generate/text")
assert resp.status_code == 200
assert b"System prompt (optional)" in resp.data
assert b'name="system_prompt"' in resp.data
def test_generate_text_forwards_system_prompt(client):
_set_auth(client)
gen_mock = _mock_response(
200, {"id": "g1", "model": "openai/gpt-4o", "content": "Hello world", "usage": None})
models_mock = _mock_response(200, [
{"id": "openai/gpt-4o", "name": "GPT-4o", "modality": "text"}
])
with patch("frontend.app.main.httpx.request", side_effect=[gen_mock, models_mock]) as mock_request:
resp = client.post(
"/generate/text",
data={
"model": "openai/gpt-4o",
"prompt": "Say hello",
"system_prompt": "You are concise.",
"action": "send",
},
)
assert resp.status_code == 200
first_call_kwargs = mock_request.call_args_list[0].kwargs
assert first_call_kwargs["json"]["system_prompt"] == "You are concise."
# Messages array sent (not bare prompt)
assert "messages" in first_call_kwargs["json"]
def test_generate_text_chat_history_accumulates(client):
"""Second message includes prior user+assistant turns in messages array."""
_set_auth(client)
turn1_gen = _mock_response(
200, {"id": "g1", "model": "openai/gpt-4o", "content": "Turn 1 reply", "usage": None})
turn1_models = _mock_response(
200, [{"id": "openai/gpt-4o", "name": "GPT-4o", "modality": "text"}])
turn2_gen = _mock_response(
200, {"id": "g2", "model": "openai/gpt-4o", "content": "Turn 2 reply", "usage": None})
turn2_models = _mock_response(
200, [{"id": "openai/gpt-4o", "name": "GPT-4o", "modality": "text"}])
with patch("frontend.app.main.httpx.request", side_effect=[turn1_gen, turn1_models]):
client.post(
"/generate/text", data={"model": "openai/gpt-4o", "prompt": "First", "action": "send"})
with patch("frontend.app.main.httpx.request", side_effect=[turn2_gen, turn2_models]) as mock_req:
resp = client.post(
"/generate/text", data={"model": "openai/gpt-4o", "prompt": "Second", "action": "send"})
assert resp.status_code == 200
assert b"Turn 1 reply" in resp.data
assert b"Turn 2 reply" in resp.data
# Backend received 3 messages: First(user), Turn1(assistant), Second(user)
sent_messages = mock_req.call_args_list[0].kwargs["json"]["messages"]
assert len(sent_messages) == 3
assert sent_messages[0]["role"] == "user" and sent_messages[0]["content"] == "First"
assert sent_messages[1]["role"] == "assistant"
assert sent_messages[2]["role"] == "user" and sent_messages[2]["content"] == "Second"
def test_generate_text_clear_resets_history(client):
"""Clear action removes session history and redirects."""
_set_auth(client)
gen_mock = _mock_response(
200, {"id": "g1", "model": "openai/gpt-4o", "content": "Reply", "usage": None})
models_mock = _mock_response(
200, [{"id": "openai/gpt-4o", "name": "GPT-4o", "modality": "text"}])
with patch("frontend.app.main.httpx.request", side_effect=[gen_mock, models_mock]):
client.post(
"/generate/text", data={"model": "openai/gpt-4o", "prompt": "Hi", "action": "send"})
clear_resp = client.post("/generate/text", data={"action": "clear"})
assert clear_resp.status_code == 302
models_mock2 = _mock_response(
200, [{"id": "openai/gpt-4o", "name": "GPT-4o", "modality": "text"}])
with patch("frontend.app.main.httpx.request", return_value=models_mock2):
get_resp = client.get("/generate/text")
assert b"No messages yet" in get_resp.data
def test_generate_image_page_renders(client):
@@ -251,8 +346,11 @@ def test_generate_video_image_mode(client):
def test_generate_upstream_error_shows_message(client):
_set_auth(client)
mock = _mock_response(502, {"detail": "OpenRouter error: timeout"})
with patch("frontend.app.main.httpx.request", return_value=mock):
gen_mock = _mock_response(502, {"detail": "OpenRouter error: timeout"})
models_mock = _mock_response(200, [
{"id": "openai/gpt-4o", "name": "GPT-4o", "modality": "text"}
])
with patch("frontend.app.main.httpx.request", side_effect=[gen_mock, models_mock]):
resp = client.post(
"/generate/text", data={"model": "openai/gpt-4o", "prompt": "Hi"})
assert resp.status_code == 200