Files
ai.allucanget.biz/backend/app/models/ai.py
T

96 lines
1.9 KiB
Python

"""Pydantic schemas for AI generation endpoints."""
from typing import Any
from pydantic import BaseModel
class ChatMessage(BaseModel):
role: str # "user" | "assistant" | "system"
content: str
class ChatRequest(BaseModel):
model: str
messages: list[ChatMessage]
temperature: float = 0.7
max_tokens: int = 1024
class ChatResponse(BaseModel):
id: str
model: str
content: str
usage: dict[str, Any] | None = None
class ModelInfo(BaseModel):
id: str
name: str
context_length: int | None = None
pricing: dict[str, Any] | None = None
# --- Text generation ---
class TextRequest(BaseModel):
model: str
prompt: str
system_prompt: str | None = None
temperature: float = 0.7
max_tokens: int = 1024
class TextResponse(BaseModel):
id: str
model: str
content: str
usage: dict[str, Any] | None = None
# --- Image generation ---
class ImageRequest(BaseModel):
model: str
prompt: str
n: int = 1
size: str = "1024x1024"
class ImageResult(BaseModel):
url: str | None = None
b64_json: str | None = None
revised_prompt: str | None = None
class ImageResponse(BaseModel):
id: str
model: str
images: list[ImageResult]
# --- Video generation ---
class VideoRequest(BaseModel):
model: str
prompt: str
duration_seconds: int | None = None
aspect_ratio: str = "16:9"
class VideoFromImageRequest(BaseModel):
model: str
image_url: str
prompt: str
duration_seconds: int | None = None
aspect_ratio: str = "16:9"
class VideoResponse(BaseModel):
id: str
model: str
status: str # "queued" | "processing" | "completed" | "failed"
polling_url: str | None = None
video_urls: list[str] | None = None
video_url: str | None = None # first entry of video_urls for convenience
error: str | None = None
metadata: dict[str, Any] | None = None