feat: enhance model caching and output modalities handling
- Updated `refresh_models_cache` to include output modalities in the models cache. - Added `get_model_output_modalities` function to retrieve output modalities for a specific model. - Modified tests to cover new functionality for output modalities. - Updated OpenRouter video generation functions to support audio generation and improved error handling. - Enhanced dashboard to display generated images and videos. - Refactored frontend templates to accommodate new data structures for generated content. - Adjusted tests to validate changes in model handling and dashboard rendering. Co-authored-by: Copilot <copilot@github.com>
This commit is contained in:
@@ -53,7 +53,7 @@ async def _user_token(client):
|
||||
async def test_list_models(client):
|
||||
token = await _user_token(client)
|
||||
with patch(
|
||||
"backend.app.routers.ai.openrouter.list_models",
|
||||
"app.routers.ai.openrouter.list_models",
|
||||
new_callable=AsyncMock,
|
||||
return_value=FAKE_MODELS,
|
||||
):
|
||||
@@ -74,7 +74,7 @@ async def test_list_models_unauthenticated(client):
|
||||
async def test_list_models_upstream_error(client):
|
||||
token = await _user_token(client)
|
||||
with patch(
|
||||
"backend.app.routers.ai.openrouter.list_models",
|
||||
"app.routers.ai.openrouter.list_models",
|
||||
new_callable=AsyncMock,
|
||||
side_effect=Exception("Connection refused"),
|
||||
):
|
||||
@@ -91,7 +91,7 @@ async def test_list_models_upstream_error(client):
|
||||
async def test_chat_success(client):
|
||||
token = await _user_token(client)
|
||||
with patch(
|
||||
"backend.app.routers.ai.openrouter.chat_completion",
|
||||
"app.routers.ai.openrouter.chat_completion",
|
||||
new_callable=AsyncMock,
|
||||
return_value=FAKE_CHAT_RESPONSE,
|
||||
):
|
||||
@@ -115,7 +115,7 @@ async def test_chat_success(client):
|
||||
async def test_chat_passes_parameters(client):
|
||||
token = await _user_token(client)
|
||||
mock = AsyncMock(return_value=FAKE_CHAT_RESPONSE)
|
||||
with patch("backend.app.routers.ai.openrouter.chat_completion", new_callable=AsyncMock, return_value=FAKE_CHAT_RESPONSE) as mock:
|
||||
with patch("app.routers.ai.openrouter.chat_completion", new_callable=AsyncMock, return_value=FAKE_CHAT_RESPONSE) as mock:
|
||||
await client.post(
|
||||
"/ai/chat",
|
||||
json={
|
||||
@@ -145,7 +145,7 @@ async def test_chat_unauthenticated(client):
|
||||
async def test_chat_upstream_error(client):
|
||||
token = await _user_token(client)
|
||||
with patch(
|
||||
"backend.app.routers.ai.openrouter.chat_completion",
|
||||
"app.routers.ai.openrouter.chat_completion",
|
||||
new_callable=AsyncMock,
|
||||
side_effect=Exception("timeout"),
|
||||
):
|
||||
@@ -160,7 +160,7 @@ async def test_chat_upstream_error(client):
|
||||
async def test_chat_malformed_upstream_response(client):
|
||||
token = await _user_token(client)
|
||||
with patch(
|
||||
"backend.app.routers.ai.openrouter.chat_completion",
|
||||
"app.routers.ai.openrouter.chat_completion",
|
||||
new_callable=AsyncMock,
|
||||
return_value={"id": "x", "choices": []}, # empty choices
|
||||
):
|
||||
|
||||
Reference in New Issue
Block a user