Files
ai.allucanget.biz/backend/app/services/openrouter.py
T

173 lines
5.7 KiB
Python

"""OpenRouter API client (OpenAI-compatible interface)."""
import os
from typing import Any
import httpx
OPENROUTER_BASE_URL = "https://openrouter.ai/api/v1"
def _api_key() -> str:
key = os.getenv("OPENROUTER_API_KEY")
if not key:
raise RuntimeError(
"OPENROUTER_API_KEY environment variable is not set.")
return key
def _headers() -> dict[str, str]:
return {
"Authorization": f"Bearer {_api_key()}",
"Content-Type": "application/json",
"HTTP-Referer": os.getenv("APP_URL", "https://ai.allucanget.biz"),
"X-Title": os.getenv("APP_NAME", "AI Allucanget"),
}
async def list_models() -> list[dict[str, Any]]:
"""Return available models from OpenRouter."""
base_url = os.getenv("OPENROUTER_BASE_URL", OPENROUTER_BASE_URL)
async with httpx.AsyncClient(timeout=15) as client:
resp = client.build_request(
"GET", f"{base_url}/models", headers=_headers())
response = await client.send(resp)
response.raise_for_status()
return response.json().get("data", [])
async def chat_completion(
model: str,
messages: list[dict[str, str]],
temperature: float = 0.7,
max_tokens: int = 1024,
) -> dict[str, Any]:
"""Send a chat completion request to OpenRouter."""
base_url = os.getenv("OPENROUTER_BASE_URL", OPENROUTER_BASE_URL)
payload = {
"model": model,
"messages": messages,
"temperature": temperature,
"max_tokens": max_tokens,
}
async with httpx.AsyncClient(timeout=60) as client:
resp = client.build_request(
"POST", f"{base_url}/chat/completions", headers=_headers(), json=payload
)
response = await client.send(resp)
response.raise_for_status()
return response.json()
async def generate_image(
model: str,
prompt: str,
n: int = 1,
size: str = "1024x1024",
) -> dict[str, Any]:
"""Request image generation via OpenRouter /images/generations."""
base_url = os.getenv("OPENROUTER_BASE_URL", OPENROUTER_BASE_URL)
payload = {"model": model, "prompt": prompt, "n": n, "size": size}
async with httpx.AsyncClient(timeout=120) as client:
resp = client.build_request(
"POST", f"{base_url}/images/generations", headers=_headers(), json=payload
)
response = await client.send(resp)
response.raise_for_status()
return response.json()
async def generate_video(
model: str,
prompt: str,
duration_seconds: int | None = None,
aspect_ratio: str = "16:9",
resolution: str | None = None,
) -> dict[str, Any]:
"""Request text-to-video generation via OpenRouter."""
base_url = os.getenv("OPENROUTER_BASE_URL", OPENROUTER_BASE_URL)
payload: dict[str, Any] = {
"model": model,
"prompt": prompt,
"aspect_ratio": aspect_ratio,
}
if duration_seconds is not None:
payload["duration_seconds"] = duration_seconds
if resolution is not None:
payload["resolution"] = resolution
async with httpx.AsyncClient(timeout=120) as client:
resp = client.build_request(
"POST", f"{base_url}/videos", headers=_headers(), json=payload
)
response = await client.send(resp)
response.raise_for_status()
return response.json()
async def generate_video_from_image(
model: str,
image_url: str,
prompt: str,
duration_seconds: int | None = None,
aspect_ratio: str = "16:9",
resolution: str | None = None,
) -> dict[str, Any]:
"""Request image-to-video generation via OpenRouter."""
base_url = os.getenv("OPENROUTER_BASE_URL", OPENROUTER_BASE_URL)
payload: dict[str, Any] = {
"model": model,
"image_url": image_url,
"prompt": prompt,
"aspect_ratio": aspect_ratio,
}
if duration_seconds is not None:
payload["duration_seconds"] = duration_seconds
if resolution is not None:
payload["resolution"] = resolution
async with httpx.AsyncClient(timeout=120) as client:
resp = client.build_request(
"POST", f"{base_url}/videos", headers=_headers(), json=payload
)
response = await client.send(resp)
response.raise_for_status()
return response.json()
async def poll_video_status(polling_url: str) -> dict[str, Any]:
"""Check the status of a video generation job via its polling_url."""
async with httpx.AsyncClient(timeout=15) as client:
resp = client.build_request("GET", polling_url, headers=_headers())
response = await client.send(resp)
response.raise_for_status()
return response.json()
async def generate_image_chat(
model: str,
prompt: str,
modalities: list[str] | None = None,
image_config: dict[str, Any] | None = None,
) -> dict[str, Any]:
"""Request image generation via Chat Completions with modalities.
Used by models like FLUX.2 Klein 4B and GPT-5 Image Mini that output
images through the chat completions endpoint rather than /images/generations.
"""
base_url = os.getenv("OPENROUTER_BASE_URL", OPENROUTER_BASE_URL)
if modalities is None:
# Image-only models (FLUX) vs multimodal (GPT-5 Image Mini)
modalities = ["image"]
payload: dict[str, Any] = {
"model": model,
"messages": [{"role": "user", "content": prompt}],
"modalities": modalities,
}
if image_config:
payload["image_config"] = image_config
async with httpx.AsyncClient(timeout=120) as client:
resp = client.build_request(
"POST", f"{base_url}/chat/completions", headers=_headers(), json=payload
)
response = await client.send(resp)
response.raise_for_status()
return response.json()