add AI and generation routers, models, and OpenRouter service integration with tests

Co-authored-by: Copilot <copilot@github.com>
This commit is contained in:
2026-04-27 18:12:53 +02:00
parent 3ee4ed7e7f
commit 05309f26b4
7 changed files with 826 additions and 0 deletions
+4
View File
@@ -1,6 +1,8 @@
from backend.app.routers import auth as auth_router
from backend.app.routers import users as users_router
from backend.app.routers import admin as admin_router
from backend.app.routers import ai as ai_router
from backend.app.routers import generate as generate_router
from backend.app.db import close_db, init_db
import os
from contextlib import asynccontextmanager
@@ -37,6 +39,8 @@ app.add_middleware(
app.include_router(auth_router.router)
app.include_router(users_router.router)
app.include_router(admin_router.router)
app.include_router(ai_router.router)
app.include_router(generate_router.router)
@app.get("/health", tags=["health"])
+92
View File
@@ -0,0 +1,92 @@
"""Pydantic schemas for AI generation endpoints."""
from typing import Any
from pydantic import BaseModel
class ChatMessage(BaseModel):
role: str # "user" | "assistant" | "system"
content: str
class ChatRequest(BaseModel):
model: str
messages: list[ChatMessage]
temperature: float = 0.7
max_tokens: int = 1024
class ChatResponse(BaseModel):
id: str
model: str
content: str
usage: dict[str, Any] | None = None
class ModelInfo(BaseModel):
id: str
name: str
context_length: int | None = None
pricing: dict[str, Any] | None = None
# --- Text generation ---
class TextRequest(BaseModel):
model: str
prompt: str
system_prompt: str | None = None
temperature: float = 0.7
max_tokens: int = 1024
class TextResponse(BaseModel):
id: str
model: str
content: str
usage: dict[str, Any] | None = None
# --- Image generation ---
class ImageRequest(BaseModel):
model: str
prompt: str
n: int = 1
size: str = "1024x1024"
class ImageResult(BaseModel):
url: str | None = None
b64_json: str | None = None
revised_prompt: str | None = None
class ImageResponse(BaseModel):
id: str
model: str
images: list[ImageResult]
# --- Video generation ---
class VideoRequest(BaseModel):
model: str
prompt: str
duration_seconds: int | None = None
aspect_ratio: str = "16:9"
class VideoFromImageRequest(BaseModel):
model: str
image_url: str
prompt: str
duration_seconds: int | None = None
aspect_ratio: str = "16:9"
class VideoResponse(BaseModel):
id: str
model: str
status: str # "queued" | "processing" | "completed"
video_url: str | None = None
metadata: dict[str, Any] | None = None
+63
View File
@@ -0,0 +1,63 @@
"""AI router: model listing and chat completions via OpenRouter."""
from fastapi import APIRouter, Depends, HTTPException, status
from backend.app.dependencies import get_current_user
from backend.app.models.ai import ChatRequest, ChatResponse, ModelInfo
from backend.app.services import openrouter
router = APIRouter(prefix="/ai", tags=["ai"])
@router.get("/models", response_model=list[ModelInfo])
async def get_models(_: dict = Depends(get_current_user)) -> list[ModelInfo]:
"""List available AI models from OpenRouter."""
try:
raw = await openrouter.list_models()
except Exception as exc:
raise HTTPException(
status_code=status.HTTP_502_BAD_GATEWAY,
detail=f"OpenRouter error: {exc}",
)
return [
ModelInfo(
id=m.get("id", ""),
name=m.get("name", m.get("id", "")),
context_length=m.get("context_length"),
pricing=m.get("pricing"),
)
for m in raw
]
@router.post("/chat", response_model=ChatResponse)
async def chat(
body: ChatRequest,
_: dict = Depends(get_current_user),
) -> ChatResponse:
"""Send a chat completion request through OpenRouter."""
try:
result = await openrouter.chat_completion(
model=body.model,
messages=[m.model_dump() for m in body.messages],
temperature=body.temperature,
max_tokens=body.max_tokens,
)
except Exception as exc:
raise HTTPException(
status_code=status.HTTP_502_BAD_GATEWAY,
detail=f"OpenRouter error: {exc}",
)
try:
choice = result["choices"][0]
return ChatResponse(
id=result["id"],
model=result.get("model", body.model),
content=choice["message"]["content"],
usage=result.get("usage"),
)
except (KeyError, IndexError) as exc:
raise HTTPException(
status_code=status.HTTP_502_BAD_GATEWAY,
detail=f"Unexpected response format from OpenRouter: {exc}",
)
+141
View File
@@ -0,0 +1,141 @@
"""Generate router: text, image, video, and image-to-video generation."""
from fastapi import APIRouter, Depends, HTTPException, status
from backend.app.dependencies import get_current_user
from backend.app.models.ai import (
ImageRequest,
ImageResponse,
ImageResult,
TextRequest,
TextResponse,
VideoFromImageRequest,
VideoRequest,
VideoResponse,
)
from backend.app.services import openrouter
router = APIRouter(prefix="/generate", tags=["generate"])
@router.post("/text", response_model=TextResponse)
async def generate_text(
body: TextRequest,
_: dict = Depends(get_current_user),
) -> TextResponse:
"""Generate text from a prompt using a chat model."""
messages = []
if body.system_prompt:
messages.append({"role": "system", "content": body.system_prompt})
messages.append({"role": "user", "content": body.prompt})
try:
result = await openrouter.chat_completion(
model=body.model,
messages=messages,
temperature=body.temperature,
max_tokens=body.max_tokens,
)
except Exception as exc:
raise HTTPException(
status_code=status.HTTP_502_BAD_GATEWAY, detail=f"OpenRouter error: {exc}")
try:
choice = result["choices"][0]
return TextResponse(
id=result["id"],
model=result.get("model", body.model),
content=choice["message"]["content"],
usage=result.get("usage"),
)
except (KeyError, IndexError) as exc:
raise HTTPException(status_code=status.HTTP_502_BAD_GATEWAY,
detail=f"Unexpected response format: {exc}")
@router.post("/image", response_model=ImageResponse)
async def generate_image(
body: ImageRequest,
_: dict = Depends(get_current_user),
) -> ImageResponse:
"""Generate images from a text prompt."""
try:
result = await openrouter.generate_image(
model=body.model,
prompt=body.prompt,
n=body.n,
size=body.size,
)
except Exception as exc:
raise HTTPException(
status_code=status.HTTP_502_BAD_GATEWAY, detail=f"OpenRouter error: {exc}")
try:
images = [
ImageResult(
url=item.get("url"),
b64_json=item.get("b64_json"),
revised_prompt=item.get("revised_prompt"),
)
for item in result.get("data", [])
]
return ImageResponse(
id=result.get("id", ""),
model=result.get("model", body.model),
images=images,
)
except (KeyError, TypeError) as exc:
raise HTTPException(status_code=status.HTTP_502_BAD_GATEWAY,
detail=f"Unexpected response format: {exc}")
@router.post("/video", response_model=VideoResponse)
async def generate_video(
body: VideoRequest,
_: dict = Depends(get_current_user),
) -> VideoResponse:
"""Generate a video from a text prompt."""
try:
result = await openrouter.generate_video(
model=body.model,
prompt=body.prompt,
duration_seconds=body.duration_seconds,
aspect_ratio=body.aspect_ratio,
)
except Exception as exc:
raise HTTPException(
status_code=status.HTTP_502_BAD_GATEWAY, detail=f"OpenRouter error: {exc}")
return VideoResponse(
id=result.get("id", ""),
model=result.get("model", body.model),
status=result.get("status", "queued"),
video_url=result.get("video_url"),
metadata=result.get("metadata"),
)
@router.post("/video/from-image", response_model=VideoResponse)
async def generate_video_from_image(
body: VideoFromImageRequest,
_: dict = Depends(get_current_user),
) -> VideoResponse:
"""Generate a video from an image and a text prompt."""
try:
result = await openrouter.generate_video_from_image(
model=body.model,
image_url=body.image_url,
prompt=body.prompt,
duration_seconds=body.duration_seconds,
aspect_ratio=body.aspect_ratio,
)
except Exception as exc:
raise HTTPException(
status_code=status.HTTP_502_BAD_GATEWAY, detail=f"OpenRouter error: {exc}")
return VideoResponse(
id=result.get("id", ""),
model=result.get("model", body.model),
status=result.get("status", "queued"),
video_url=result.get("video_url"),
metadata=result.get("metadata"),
)
+123
View File
@@ -0,0 +1,123 @@
"""OpenRouter API client (OpenAI-compatible interface)."""
import os
from typing import Any
import httpx
OPENROUTER_BASE_URL = "https://openrouter.ai/api/v1"
def _api_key() -> str:
key = os.getenv("OPENROUTER_API_KEY")
if not key:
raise RuntimeError(
"OPENROUTER_API_KEY environment variable is not set.")
return key
def _headers() -> dict[str, str]:
return {
"Authorization": f"Bearer {_api_key()}",
"Content-Type": "application/json",
"HTTP-Referer": os.getenv("APP_URL", "https://ai.allucanget.biz"),
"X-Title": os.getenv("APP_NAME", "AI Allucanget"),
}
async def list_models() -> list[dict[str, Any]]:
"""Return available models from OpenRouter."""
base_url = os.getenv("OPENROUTER_BASE_URL", OPENROUTER_BASE_URL)
async with httpx.AsyncClient(timeout=15) as client:
resp = client.build_request(
"GET", f"{base_url}/models", headers=_headers())
response = await client.send(resp)
response.raise_for_status()
return response.json().get("data", [])
async def chat_completion(
model: str,
messages: list[dict[str, str]],
temperature: float = 0.7,
max_tokens: int = 1024,
) -> dict[str, Any]:
"""Send a chat completion request to OpenRouter."""
base_url = os.getenv("OPENROUTER_BASE_URL", OPENROUTER_BASE_URL)
payload = {
"model": model,
"messages": messages,
"temperature": temperature,
"max_tokens": max_tokens,
}
async with httpx.AsyncClient(timeout=60) as client:
response = await client.send(resp)
response.raise_for_status()
return response.json()
async def generate_image(
model: str,
prompt: str,
n: int = 1,
size: str = "1024x1024",
) -> dict[str, Any]:
"""Request image generation via OpenRouter /images/generations."""
base_url = os.getenv("OPENROUTER_BASE_URL", OPENROUTER_BASE_URL)
payload = {"model": model, "prompt": prompt, "n": n, "size": size}
async with httpx.AsyncClient(timeout=120) as client:
resp = client.build_request(
"POST", f"{base_url}/images/generations", headers=_headers(), json=payload
)
response = await client.send(resp)
response.raise_for_status()
return response.json()
async def generate_video(
model: str,
prompt: str,
duration_seconds: int | None = None,
aspect_ratio: str = "16:9",
) -> dict[str, Any]:
"""Request text-to-video generation via OpenRouter."""
base_url = os.getenv("OPENROUTER_BASE_URL", OPENROUTER_BASE_URL)
payload: dict[str, Any] = {
"model": model,
"prompt": prompt,
"aspect_ratio": aspect_ratio,
}
if duration_seconds is not None:
payload["duration_seconds"] = duration_seconds
async with httpx.AsyncClient(timeout=120) as client:
resp = client.build_request(
"POST", f"{base_url}/video/generations", headers=_headers(), json=payload
)
response = await client.send(resp)
response.raise_for_status()
return response.json()
async def generate_video_from_image(
model: str,
image_url: str,
prompt: str,
duration_seconds: int | None = None,
aspect_ratio: str = "16:9",
) -> dict[str, Any]:
"""Request image-to-video generation via OpenRouter."""
base_url = os.getenv("OPENROUTER_BASE_URL", OPENROUTER_BASE_URL)
payload: dict[str, Any] = {
"model": model,
"image_url": image_url,
"prompt": prompt,
"aspect_ratio": aspect_ratio,
}
if duration_seconds is not None:
payload["duration_seconds"] = duration_seconds
async with httpx.AsyncClient(timeout=120) as client:
resp = client.build_request(
"POST", f"{base_url}/video/generations/from-image", headers=_headers(), json=payload
)
response = await client.send(resp)
response.raise_for_status()
return response.json()