Compare commits

...

4 Commits

18 changed files with 636 additions and 415 deletions
+7
View File
@@ -114,6 +114,13 @@ def _run_migrations(conn: duckdb.DuckDBPyConnection) -> None:
conn.execute(""" conn.execute("""
ALTER TABLE models_cache ADD COLUMN IF NOT EXISTS output_modalities VARCHAR ALTER TABLE models_cache ADD COLUMN IF NOT EXISTS output_modalities VARCHAR
""") """)
# Migration: add video job request params + generation type
conn.execute("""
ALTER TABLE generated_videos ADD COLUMN IF NOT EXISTS request_params VARCHAR
""")
conn.execute("""
ALTER TABLE generated_videos ADD COLUMN IF NOT EXISTS generation_type VARCHAR DEFAULT 'text_to_video'
""")
_seed_admin(conn) _seed_admin(conn)
+9 -1
View File
@@ -5,7 +5,9 @@ from .routers import ai
from .routers import generate from .routers import generate
from .routers import images from .routers import images
from .routers import models from .routers import models
from .db import close_db, init_db from .db import close_db, get_conn, get_write_lock, init_db
from .services.video_worker import run_worker
import asyncio
import os import os
from contextlib import asynccontextmanager from contextlib import asynccontextmanager
@@ -19,7 +21,13 @@ load_dotenv()
@asynccontextmanager @asynccontextmanager
async def lifespan(app: FastAPI): async def lifespan(app: FastAPI):
init_db() init_db()
worker_task = asyncio.create_task(run_worker(get_conn(), get_write_lock()))
yield yield
worker_task.cancel()
try:
await worker_task
except asyncio.CancelledError:
pass
close_db() close_db()
+40
View File
@@ -185,3 +185,43 @@ async def admin_mark_timed_out(_: dict = Depends(require_admin)) -> dict[str, in
conn = get_conn() conn = get_conn()
count = mark_timed_out_video_jobs(conn, timeout_minutes=120) count = mark_timed_out_video_jobs(conn, timeout_minutes=120)
return {"timed_out": count} return {"timed_out": count}
@router.post("/videos/{job_id}/retry", status_code=200)
async def admin_retry_video_job(job_id: str, _: dict = Depends(require_admin)) -> dict[str, str]:
"""Reset a failed or cancelled video job back to 'queued' for reprocessing."""
conn = get_conn()
lock = get_write_lock()
now = datetime.now(timezone.utc)
async with lock:
row = conn.execute(
"SELECT status FROM generated_videos WHERE id = ?", [job_id]
).fetchone()
if row is None:
from fastapi import HTTPException
raise HTTPException(status_code=404, detail="Job not found")
if row[0] not in ("failed", "cancelled"):
from fastapi import HTTPException
raise HTTPException(
status_code=400, detail=f"Cannot retry job with status '{row[0]}'")
conn.execute(
"UPDATE generated_videos SET status = 'queued', updated_at = ? WHERE id = ?",
[now, job_id],
)
return {"status": "ok", "job_id": job_id}
@router.delete("/videos/{job_id}", status_code=200)
async def admin_delete_video_job(job_id: str, _: dict = Depends(require_admin)) -> dict[str, str]:
"""Permanently delete a video job record."""
conn = get_conn()
lock = get_write_lock()
async with lock:
row = conn.execute(
"SELECT id FROM generated_videos WHERE id = ?", [job_id]
).fetchone()
if row is None:
from fastapi import HTTPException
raise HTTPException(status_code=404, detail="Job not found")
conn.execute("DELETE FROM generated_videos WHERE id = ?", [job_id])
return {"status": "ok", "job_id": job_id}
+30 -73
View File
@@ -1,4 +1,5 @@
"""Generate router: text, image, video, and image-to-video generation.""" """Generate router: text, image, video, and image-to-video generation."""
import json
from datetime import datetime, timezone from datetime import datetime, timezone
import httpx import httpx
@@ -209,54 +210,32 @@ async def generate_video(
body: VideoRequest, body: VideoRequest,
current_user: dict = Depends(get_current_user), current_user: dict = Depends(get_current_user),
) -> VideoResponse: ) -> VideoResponse:
"""Generate a video from a text prompt.""" """Queue a text-to-video generation job for background processing."""
try:
result = await openrouter.generate_video(
model=body.model,
prompt=body.prompt,
duration_seconds=body.duration_seconds,
aspect_ratio=body.aspect_ratio,
resolution=body.resolution,
)
except httpx.HTTPStatusError as exc:
detail = (
f"OpenRouter API error: {exc.response.status_code} - {exc.response.text}"
)
raise HTTPException(
status_code=status.HTTP_502_BAD_GATEWAY, detail=detail)
except Exception as exc:
raise HTTPException(
status_code=status.HTTP_502_BAD_GATEWAY, detail=f"OpenRouter error: {exc}"
)
user_id = current_user.get("id") or current_user.get("sub") user_id = current_user.get("id") or current_user.get("sub")
job_id = result.get("id", "")
polling_url = result.get("polling_url")
job_status = result.get("status", "pending")
now = datetime.now(timezone.utc).replace(tzinfo=None) now = datetime.now(timezone.utc).replace(tzinfo=None)
request_params = json.dumps({
"model": body.model,
"prompt": body.prompt,
"duration_seconds": body.duration_seconds,
"aspect_ratio": body.aspect_ratio,
"resolution": body.resolution,
})
db_id = None db_id = None
async with get_write_lock(): async with get_write_lock():
conn = get_conn() conn = get_conn()
row = conn.execute( row = conn.execute(
"""INSERT INTO generated_videos (user_id, job_id, model_id, prompt, polling_url, status, created_at, updated_at) """INSERT INTO generated_videos
VALUES (?, ?, ?, ?, ?, ?, ?, ?) RETURNING id""", (user_id, job_id, model_id, prompt, status, request_params, generation_type, created_at, updated_at)
[user_id, job_id, body.model, body.prompt, VALUES (?, ?, ?, ?, 'queued', ?, 'text_to_video', ?, ?) RETURNING id""",
polling_url, job_status, now, now], [user_id, "", body.model, body.prompt, request_params, now, now],
).fetchone() ).fetchone()
if row: if row:
db_id = str(row[0]) db_id = str(row[0])
urls = result.get("unsigned_urls") or result.get("video_urls")
return VideoResponse( return VideoResponse(
id=job_id, id="",
db_id=db_id, db_id=db_id,
model=body.model, model=body.model,
status=job_status, status="queued",
polling_url=polling_url,
video_urls=urls,
video_url=(urls or [None])[0],
error=result.get("error"),
metadata=result.get("metadata"),
) )
@@ -265,55 +244,33 @@ async def generate_video_from_image(
body: VideoFromImageRequest, body: VideoFromImageRequest,
current_user: dict = Depends(get_current_user), current_user: dict = Depends(get_current_user),
) -> VideoResponse: ) -> VideoResponse:
"""Generate a video from an image and a text prompt.""" """Queue an image-to-video generation job for background processing."""
try:
result = await openrouter.generate_video_from_image(
model=body.model,
image_url=body.image_url,
prompt=body.prompt,
duration_seconds=body.duration_seconds,
aspect_ratio=body.aspect_ratio,
resolution=body.resolution,
)
except httpx.HTTPStatusError as exc:
detail = (
f"OpenRouter API error: {exc.response.status_code} - {exc.response.text}"
)
raise HTTPException(
status_code=status.HTTP_502_BAD_GATEWAY, detail=detail)
except Exception as exc:
raise HTTPException(
status_code=status.HTTP_502_BAD_GATEWAY, detail=f"OpenRouter error: {exc}"
)
user_id = current_user.get("id") or current_user.get("sub") user_id = current_user.get("id") or current_user.get("sub")
job_id = result.get("id", "")
polling_url = result.get("polling_url")
job_status = result.get("status", "pending")
now = datetime.now(timezone.utc).replace(tzinfo=None) now = datetime.now(timezone.utc).replace(tzinfo=None)
request_params = json.dumps({
"model": body.model,
"image_url": body.image_url,
"prompt": body.prompt,
"duration_seconds": body.duration_seconds,
"aspect_ratio": body.aspect_ratio,
"resolution": body.resolution,
})
db_id = None db_id = None
async with get_write_lock(): async with get_write_lock():
conn = get_conn() conn = get_conn()
row = conn.execute( row = conn.execute(
"""INSERT INTO generated_videos (user_id, job_id, model_id, prompt, polling_url, status, created_at, updated_at) """INSERT INTO generated_videos
VALUES (?, ?, ?, ?, ?, ?, ?, ?) RETURNING id""", (user_id, job_id, model_id, prompt, status, request_params, generation_type, created_at, updated_at)
[user_id, job_id, body.model, body.prompt, VALUES (?, ?, ?, ?, 'queued', ?, 'image_to_video', ?, ?) RETURNING id""",
polling_url, job_status, now, now], [user_id, "", body.model, body.prompt, request_params, now, now],
).fetchone() ).fetchone()
if row: if row:
db_id = str(row[0]) db_id = str(row[0])
urls = result.get("unsigned_urls") or result.get("video_urls")
return VideoResponse( return VideoResponse(
id=job_id, id="",
db_id=db_id, db_id=db_id,
model=body.model, model=body.model,
status=job_status, status="queued",
polling_url=polling_url,
video_urls=urls,
video_url=(urls or [None])[0],
error=result.get("error"),
metadata=result.get("metadata"),
) )
+158
View File
@@ -0,0 +1,158 @@
"""Background worker: processes queued/processing video generation jobs."""
import asyncio
import json
import logging
from datetime import datetime, timezone
import duckdb
from . import openrouter
from .models import mark_timed_out_video_jobs
logger = logging.getLogger(__name__)
# Interval between worker ticks (seconds)
WORKER_INTERVAL = 15
# Jobs to process per tick (prevents unbounded bursts)
BATCH_SIZE = 5
async def process_queued_jobs(conn: duckdb.DuckDBPyConnection, lock: asyncio.Lock) -> int:
"""Submit queued jobs to OpenRouter and transition them to 'processing'."""
rows = conn.execute(
"""SELECT id, generation_type, request_params
FROM generated_videos
WHERE status = 'queued' AND request_params IS NOT NULL
ORDER BY created_at ASC
LIMIT ?""",
[BATCH_SIZE],
).fetchall()
processed = 0
for row in rows:
db_id, generation_type, raw_params = str(row[0]), row[1], row[2]
try:
params = json.loads(raw_params)
except (json.JSONDecodeError, TypeError):
logger.error("Bad request_params for video job %s", db_id)
continue
try:
if generation_type == "image_to_video":
result = await openrouter.generate_video_from_image(
model=params["model"],
image_url=params.get("image_url", ""),
prompt=params.get("prompt", ""),
duration_seconds=params.get("duration_seconds"),
aspect_ratio=params.get("aspect_ratio", "16:9"),
resolution=params.get("resolution"),
)
else:
result = await openrouter.generate_video(
model=params["model"],
prompt=params.get("prompt", ""),
duration_seconds=params.get("duration_seconds"),
aspect_ratio=params.get("aspect_ratio", "16:9"),
resolution=params.get("resolution"),
)
except Exception as exc:
logger.warning("OpenRouter call failed for job %s: %s", db_id, exc)
now = datetime.now(timezone.utc).replace(tzinfo=None)
async with lock:
conn.execute(
"UPDATE generated_videos SET status = 'failed', updated_at = ? WHERE id = ?",
[now, db_id],
)
continue
job_id = result.get("id", "")
polling_url = result.get("polling_url")
new_status = result.get("status", "processing")
# Normalise terminal statuses returned immediately (rare but possible)
if new_status not in ("queued", "processing", "completed", "failed", "cancelled"):
new_status = "processing"
urls = result.get("unsigned_urls") or result.get("video_urls")
video_url = (urls or [None])[0]
now = datetime.now(timezone.utc).replace(tzinfo=None)
async with lock:
conn.execute(
"""UPDATE generated_videos
SET job_id = ?, polling_url = ?, status = ?, video_url = ?, updated_at = ?
WHERE id = ?""",
[job_id, polling_url, new_status, video_url, now, db_id],
)
processed += 1
logger.info("Video job %s%s (provider id: %s)",
db_id, new_status, job_id)
return processed
async def process_processing_jobs(conn: duckdb.DuckDBPyConnection, lock: asyncio.Lock) -> int:
"""Poll in-progress jobs and update to 'completed' or 'failed'."""
rows = conn.execute(
"""SELECT id, polling_url
FROM generated_videos
WHERE status = 'processing' AND polling_url IS NOT NULL
ORDER BY updated_at ASC
LIMIT ?""",
[BATCH_SIZE],
).fetchall()
updated = 0
for row in rows:
db_id, polling_url = str(row[0]), row[1]
try:
result = await openrouter.poll_video_status(polling_url)
except Exception as exc:
logger.warning("Polling failed for job %s: %s", db_id, exc)
continue
job_status = result.get("status", "processing")
if job_status not in ("completed", "failed"):
continue # still in-progress — check again next tick
urls = result.get("unsigned_urls") or result.get("video_urls")
video_url = (urls or [None])[0]
now = datetime.now(timezone.utc).replace(tzinfo=None)
async with lock:
conn.execute(
"""UPDATE generated_videos
SET status = ?, video_url = ?, updated_at = ?
WHERE id = ?""",
[job_status, video_url, now, db_id],
)
updated += 1
logger.info("Video job %s%s", db_id, job_status)
return updated
async def worker_tick(conn: duckdb.DuckDBPyConnection, lock: asyncio.Lock) -> None:
"""Single worker tick: submit queued, poll processing, expire timed-out."""
queued = await process_queued_jobs(conn, lock)
polled = await process_processing_jobs(conn, lock)
async with lock:
timed_out = mark_timed_out_video_jobs(conn, timeout_minutes=120)
if queued or polled or timed_out:
logger.info(
"Worker tick: submitted=%d polled=%d timed_out=%d",
queued, polled, timed_out,
)
async def run_worker(conn: duckdb.DuckDBPyConnection, lock: asyncio.Lock) -> None:
"""Infinite loop: run a worker tick every WORKER_INTERVAL seconds."""
logger.info("Video worker started (interval=%ds)", WORKER_INTERVAL)
while True:
try:
await worker_tick(conn, lock)
except asyncio.CancelledError:
logger.info("Video worker stopped.")
return
except Exception as exc:
logger.exception("Unexpected error in video worker: %s", exc)
await asyncio.sleep(WORKER_INTERVAL)
+4 -1
View File
@@ -4,7 +4,8 @@ Describes the relevant requirements and the driving forces that software archite
## Requirements Overview ## Requirements Overview
**Project name**: All You Can GET AI Biz **Project name**: All You Can GET AI
**URL**: [https://ai.allucanget.biz](https://ai.allucanget.biz)
**Purpose**: Provide AIpowered text, image, and video generation services via a web application. **Purpose**: Provide AIpowered text, image, and video generation services via a web application.
Users can choose between different AI models for: Users can choose between different AI models for:
@@ -14,6 +15,8 @@ Users can choose between different AI models for:
- Texttovideo generation - Texttovideo generation
- Imagetovideo generation - Imagetovideo generation
Users can create accounts, log in, and view their generation history in a gallery. An admin dashboard allows managing users, models, and video generation jobs.
## Quality Goals ## Quality Goals
| Priority | Quality Goal | Scenario | | Priority | Quality Goal | Scenario |
+6 -6
View File
@@ -5,21 +5,21 @@ Static decomposition of the system into building blocks (modules, components, su
## Level 1 Whitebox Overall System ## Level 1 Whitebox Overall System
```text ```text
┌───────────────────────┐ ┌───────────────────────
│ Frontend (Flask) │ │ Frontend (Flask) │
└───────┬───────────────┘ └───────┬───────────────
│ REST API calls │ REST API calls
┌───────▼───────────────┐ ┌───────▼───────────────
│ FastAPI Backend │ │ FastAPI Backend │
│ ├─ Auth Service │ │ ├─ Auth Service │
│ ├─ User Service │ │ ├─ User Service │
│ ├─ AI Service │ │ ├─ AI Service │
│ └─ DB Service (DuckDB)│ │ └─ DB Service (DuckDB)│
└───────┬───────────────┘ └───────┬───────────────
│ DB access │ DB access
┌───────▼───────────────┐ ┌───────▼───────────────
│ DuckDB Database │ │ DuckDB Database │
└───────────────────────┘ └───────────────────────
``` ```
**Motivation:** Separating the UI (Flask) from the API (FastAPI) allows independent scaling and testing of each layer. **Motivation:** Separating the UI (Flask) from the API (FastAPI) allows independent scaling and testing of each layer.
+54 -46
View File
@@ -5,34 +5,45 @@ Describes:
1. Technical infrastructure used to execute your system, with infrastructure elements like geographical locations, environments, computers, processors, channels and net topologies. 1. Technical infrastructure used to execute your system, with infrastructure elements like geographical locations, environments, computers, processors, channels and net topologies.
2. Mapping of (software) building blocks to that infrastructure elements. 2. Mapping of (software) building blocks to that infrastructure elements.
**See**: [Coolify Deployment Guide](./deployment/coolify.md) for detailed instructions.
## Infrastructure Level 1 ## Infrastructure Level 1
```text Hosted on a single VM running docker containers, deployed via Coolify with Nixpacks to 192.168.88.18 for production.
┌────────────────────────────────────────────┐
│ Host / VM │ Containers run behind nginx at 192.168.88.11 which handles TLS termination and reverse proxying to the frontend on port 12016 and backend on port 12015. The database is a file on the host filesystem at `data/app.db` accessed by the backend service.
│ ┌─────────────┐ ┌────────────────────┐ │
│ │ frontend │ │ backend │ │ ```mermaid
│ │ (Flask) │ │ (FastAPI) │ │ graph TD
│ │ :12016 │ │ :12015 │ │ Users[Users / Internet]
│ └──────┬──────┘ └─────────┬──────────┘ │ Nginx[nginx reverse proxy\nTLS termination]
│ │ │ │ Users -->|HTTPS| Nginx
│ └────────┬──────────┘ │
│ │ │ subgraph Coolify Server
│ ┌───────▼────────┐ │ direction TB
│ │ db (DuckDB) │ │ subgraph AI Frontend
│ │ data/app.db │ │ AI_Frontend[AI Frontend\nFlask\nServes HTML/CSS/JS UI]
│ └────────────────┘ │ end
└────────────────────────────────────────────┘ subgraph AI Backend
AI_Backend[AI Backend\nFastAPI\nCommunicates with openrouter.ai API]
db[(DuckDB Database\nFile: data/app.db)]
AI_Backend --> db
end
AI_Frontend -->|BACKEND_URL:12015| AI_Backend
end
Nginx -->|12016| AI_Frontend
``` ```
**Motivation:** All three components run on a single VM (or as Docker containers) for simplicity and low operational overhead. **Motivation:** All three components run as Docker containers for simplicity and low operational overhead.
**Quality and/or Performance Features:** The frontend and backend are stateless; DuckDB persists data on the host filesystem. **Quality and/or Performance Features:** The frontend and backend are stateless; DuckDB persists data on the host filesystem.
**Mapping of Building Blocks to Infrastructure:** **Mapping of Building Blocks to Infrastructure:**
| Building Block | Container / Process | Port | | Building Block | Container / Process | Port |
| --------------- | ---------------------------- | ----- | | --------------- | ---------------------------- | --------------- |
| Nginx | `nginx` | 80/443 (public) |
| Coolify Server | `coolify` | — |
| Flask frontend | `frontend` | 12016 | | Flask frontend | `frontend` | 12016 |
| FastAPI backend | `backend` | 12015 | | FastAPI backend | `backend` | 12015 |
| DuckDB | File on host (`data/app.db`) | — | | DuckDB | File on host (`data/app.db`) | — |
@@ -41,35 +52,32 @@ Describes:
### Coolify with Nixpacks (Production) ### Coolify with Nixpacks (Production)
Both services are deployed as separate Nixpacks resources in Coolify: Both services are deployed as separate Nixpacks resources in Coolify, which results in two separate containers running on the same host. The database is a file on the host filesystem, mounted as a volume in the backend container.
```text #### Frontend
┌──────────────────────────────────────────────────────────┐
│ Coolify Server │ ```mermaid
│ ┌────────────────────────────┐ │ graph TD
│ │ Backend Service (FastAPI) │ │ subgraph Coolify Server
│ │ - Base Dir: /backend │ │ direction TB
│ │ - Port: 12015 │ │ subgraph AI Frontend
│ │ - Volume: /app/data │ │ AI_Frontend[AI Frontend\nNixpacks\nBase Dir: /frontend]
│ ├────────────────────────────┤ │ end
│ │ Frontend Service (Flask) │ │ end
│ │ - Base Dir: /frontend │ │ Users[Users / Internet] -->|HTTPS| AI_Frontend
│ │ - Port: 12016 (public) │ │
│ │ - BACKEND_URL: :12015 │ │
│ └────────────────────────────┘ │
│ ▲ │
│ Coolify reverse proxy (TLS termination) │
└──────────────────────────────────────────────────────────┘
Users / Internet
``` ```
**Deployment Steps:** #### Backend
1. Create backend Nixpacks service in Coolify with Base Directory `/backend` ```mermaid
2. Create frontend Nixpacks service with Base Directory `/frontend` graph TD
3. Set environment variables per service subgraph Coolify Server
4. Attach domain to frontend on port `12016` direction TB
5. Enable Auto HTTPS in Coolify subgraph AI Backend
AI_Backend[AI Backend\nNixpacks\nBase Dir: /backend]
**See**: [Coolify Deployment Guide](./deployment/coolify.md) for detailed instructions. db[(DuckDB Database\nVolume: /app/data)]
AI_Backend --> db
end
end
Frontend[Frontend Container] -->|BACKEND_URL:12015| AI_Backend
```
+8 -69
View File
@@ -4,6 +4,14 @@ Describes crosscutting concepts (practices, patterns, regulations or solution id
> Pick **only** the most-needed topics for your system. > Pick **only** the most-needed topics for your system.
## OpenRouter API Integration
see [docs/8.1-openrouter.md](./8.1-openrouter.md) for details on how the backend integrates with OpenRouter for multi-modal AI generation, including image and video generation flows.
## DuckDB Concurrency and Storage
See [docs/8.2-duckdb.md](./8.2-duckdb.md) for details on how the backend handles concurrent access to DuckDB and manages the database file on the host filesystem.
## Security ## Security
- All API endpoints (except `/auth/login`) require a valid JWT in the `Authorization: Bearer` header. - All API endpoints (except `/auth/login`) require a valid JWT in the `Authorization: Bearer` header.
@@ -25,72 +33,3 @@ Describes crosscutting concepts (practices, patterns, regulations or solution id
- All secrets (API keys, DB path, JWT secret) loaded from environment variables or `.env` file. - All secrets (API keys, DB path, JWT secret) loaded from environment variables or `.env` file.
- No secrets committed to source control. - No secrets committed to source control.
## DuckDB Concurrency and Storage
### Single Writer Per Process
DuckDB allows only one process to open the database file in read-write mode at a time. The FastAPI backend must be run with a single worker (`uvicorn --workers 1`). Running multiple workers against the same DuckDB file will cause startup errors.
### asyncio.Lock for Writes
All database write operations (`INSERT`, `UPDATE`, `DELETE`) in the FastAPI async context are wrapped in a single `asyncio.Lock` (`get_write_lock()` from `backend/app/db.py`). This prevents concurrent coroutines from issuing overlapping writes within the single process, which would otherwise raise DuckDB optimistic concurrency errors.
Read operations (`SELECT`) do not require the lock — DuckDB's MVCC provides consistent read snapshots.
### Schema
```sql
CREATE TABLE users (
id UUID DEFAULT uuid() PRIMARY KEY,
email VARCHAR NOT NULL UNIQUE,
password_hash VARCHAR NOT NULL,
role VARCHAR DEFAULT 'user',
created_at TIMESTAMP DEFAULT now(),
updated_at TIMESTAMP DEFAULT now()
);
CREATE TABLE refresh_tokens (
jti UUID DEFAULT uuid() PRIMARY KEY,
user_id UUID NOT NULL, -- soft FK to users.id
issued_at TIMESTAMP DEFAULT now(),
expires_at TIMESTAMP NOT NULL,
revoked BOOLEAN DEFAULT false
);
```
> The `REFERENCES users(id)` foreign key is intentionally omitted from `refresh_tokens`. DuckDB fires FK checks on `UPDATE` of the parent table (including email changes), causing false constraint violations. Referential integrity is enforced manually: deleting a user also deletes their refresh tokens in the same write transaction.
### Access Tokens
Access tokens are **stateless** JWTs — not stored in the database. They are validated by signature and expiry claim only. The short TTL (15 minutes) limits the blast radius if a token is leaked.
### Refresh Tokens
Refresh tokens store a JTI (JWT ID) UUID in the `refresh_tokens` table. On each use the old JTI is revoked and a new one issued (rotation). On logout the JTI is immediately revoked. Expired and revoked tokens can be purged via `POST /admin/tokens/purge`.
### Future: AI Generation History
AI generation metadata (model, prompt, cost, result URLs) can be stored as JSON columns in a future `generation_history` table in DuckDB, enabling per-user analytics and usage dashboards at zero extra infrastructure cost.
## OpenRouter API Integration
### Image Generation
Image generation uses two different OpenRouter endpoints depending on the model:
- **Legacy endpoint** (`/images/generations`): Used by DALL-E 3 and similar models. Returns `data[].url` and `data[].b64_json`.
- **Chat completions** (`/chat/completions` with `modalities: ["image"]`): Used by FLUX.2 Klein 4B and GPT-5 Image Mini. Returns `choices[0].message.images[].image_url.url` as base64 data URLs.
The router auto-detects the model type and routes accordingly. Image configuration (`aspect_ratio`, `image_size`) is passed via `image_config` for chat-based models.
### Video Generation
Video generation uses OpenRouter's `/api/v1/videos` endpoint with a **submit-and-poll** pattern:
1. `POST /api/v1/videos` with `model`, `prompt`, `aspect_ratio`, `resolution`, `duration_seconds`
2. Response: `{"id": "job_id", "polling_url": "https://..."}` with `status: "queued"`
3. Poll `GET polling_url` every 5 seconds until `status` is `"completed"` or `"failed"`
4. Completed response includes `unsigned_urls: [str]` array with video download URLs
Supported models: `openai/sora-2-pro`, `google/veo-3.1-fast`. Both text-to-video and image-to-video use the same `/api/v1/videos` endpoint (image-to-video includes `image_url` in the request body).
+26
View File
@@ -0,0 +1,26 @@
# OpenRouter API Integration
## Text Generation
> [!warning]
> TODO: Add more details on how the backend integrates with OpenRouter for text generation, including chat completions and single-prompt generation flows.
## Image Generation
Image generation uses two different OpenRouter endpoints depending on the model:
- **Legacy endpoint** (`/images/generations`): Used by DALL-E 3 and similar models. Returns `data[].url` and `data[].b64_json`.
- **Chat completions** (`/chat/completions` with `modalities: ["image"]`): Used by FLUX.2 Klein 4B and GPT-5 Image Mini. Returns `choices[0].message.images[].image_url.url` as base64 data URLs.
The router auto-detects the model type and routes accordingly. Image configuration (`aspect_ratio`, `image_size`) is passed via `image_config` for chat-based models.
## Video Generation
Video generation uses OpenRouter's `/api/v1/videos` endpoint with a **submit-and-poll** pattern:
1. `POST /api/v1/videos` with `model`, `prompt`, `aspect_ratio`, `resolution`, `duration_seconds`
2. Response: `{"id": "job_id", "polling_url": "https://..."}` with `status: "queued"`
3. Poll `GET polling_url` every 5 seconds until `status` is `"completed"` or `"failed"`
4. Completed response includes `unsigned_urls: [str]` array with video download URLs
Supported models: `openai/sora-2-pro`, `google/veo-3.1-fast`. Both text-to-video and image-to-video use the same `/api/v1/videos` endpoint (image-to-video includes `image_url` in the request body).
+46
View File
@@ -0,0 +1,46 @@
# DuckDB Concurrency and Storage
## Single Writer Per Process
DuckDB allows only one process to open the database file in read-write mode at a time. The FastAPI backend must be run with a single worker (`uvicorn --workers 1`). Running multiple workers against the same DuckDB file will cause startup errors.
## asyncio.Lock for Writes
All database write operations (`INSERT`, `UPDATE`, `DELETE`) in the FastAPI async context are wrapped in a single `asyncio.Lock` (`get_write_lock()` from `backend/app/db.py`). This prevents concurrent coroutines from issuing overlapping writes within the single process, which would otherwise raise DuckDB optimistic concurrency errors.
Read operations (`SELECT`) do not require the lock — DuckDB's MVCC provides consistent read snapshots.
## Schema
```sql
CREATE TABLE users (
id UUID DEFAULT uuid() PRIMARY KEY,
email VARCHAR NOT NULL UNIQUE,
password_hash VARCHAR NOT NULL,
role VARCHAR DEFAULT 'user',
created_at TIMESTAMP DEFAULT now(),
updated_at TIMESTAMP DEFAULT now()
);
CREATE TABLE refresh_tokens (
jti UUID DEFAULT uuid() PRIMARY KEY,
user_id UUID NOT NULL, -- soft FK to users.id
issued_at TIMESTAMP DEFAULT now(),
expires_at TIMESTAMP NOT NULL,
revoked BOOLEAN DEFAULT false
);
```
> The `REFERENCES users(id)` foreign key is intentionally omitted from `refresh_tokens`. DuckDB fires FK checks on `UPDATE` of the parent table (including email changes), causing false constraint violations. Referential integrity is enforced manually: deleting a user also deletes their refresh tokens in the same write transaction.
## Access Tokens
Access tokens are **stateless** JWTs — not stored in the database. They are validated by signature and expiry claim only. The short TTL (15 minutes) limits the blast radius if a token is leaked.
## Refresh Tokens
Refresh tokens store a JTI (JWT ID) UUID in the `refresh_tokens` table. On each use the old JTI is revoked and a new one issued (rotation). On logout the JTI is immediately revoked. Expired and revoked tokens can be purged via `POST /admin/tokens/purge`.
## Future: AI Generation History
AI generation metadata (model, prompt, cost, result URLs) can be stored as JSON columns in a future `generation_history` table in DuckDB, enabling per-user analytics and usage dashboards at zero extra infrastructure cost.
-185
View File
@@ -173,188 +173,3 @@ All required environment variables:
- [ ] Domain names configured - [ ] Domain names configured
- [ ] Health checks passing - [ ] Health checks passing
- [ ] Logs reviewed for errors - [ ] Logs reviewed for errors
1. In Coolify, click **Add Resource** → **Deploy a new resource** → **Git**
2. Connect your Git repository (`git.allucanget.biz`)
3. Select the `ai.allucanget.biz` repository
4. Choose the `main` branch
5. Set **Build Pack** to `nixpacks`
6. **CRITICAL: Set Base Directory to `/backend`** — this tells Nixpacks to look in the `backend/` subdirectory for `requirements.txt` and the Python application
7. Set **Ports Exposed** to `12015`
8. Set **Start Command** to:
```txt
uvicorn app.main:app --host 0.0.0.0 --port 12015
```
9. Click **Create Resource**
> **Important:** Nixpacks copies the **contents** of the Base Directory to `/app/` in the container. When Base Directory is `/backend`, the `backend/` folder wrapper is removed — only `app/`, `tests/`, and `requirements.txt` are copied. Therefore the start command uses `app.main:app` (not `backend.app.main:app`).
### Backend Environment Variables
Add these as **Runtime** environment variables in Coolify:
| Variable | Description | Example |
| -------------------- | ------------------------------------ | ------------------------------------ |
| `OPENROUTER_API_KEY` | OpenRouter API key for AI generation | `sk-or-v1-...` |
| `JWT_SECRET` | Secret key for JWT token signing | Generate with `openssl rand -hex 32` |
| `APP_URL` | Public URL of the backend | `https://api.ai.allucanget.biz` |
| `APP_NAME` | Application name | `All You Can GET AI` |
| `CORS_ORIGINS` | Comma-separated allowed origins | `https://ai.allucanget.biz` |
## Step 2: Create Frontend Service
1. In Coolify, click **Add Resource** → **Deploy a new resource** → **Git**
2. Select the same repository
3. Choose the `main` branch
4. Set **Build Pack** to `nixpacks`
5. **CRITICAL: Set Base Directory to `/frontend`** — this tells Nixpacks to look in the `frontend/` subdirectory for `requirements.txt` and the Python application
6. Set **Ports Exposed** to `12016`
7. Set **Start Command** to:
```txt
gunicorn app.main:app --bind 0.0.0.0:12016 --workers 2 --timeout 120
```
8. Click **Create Resource**
> **Note:** The frontend uses `requirements.txt` for production dependencies and `requirements-dev.txt` for development dependencies (like pytest). Nixpacks will automatically detect and install only the production dependencies.
> **Important:** Nixpacks copies the **contents** of the Base Directory to `/app/` in the container. When Base Directory is `/frontend`, the `frontend/` folder wrapper is removed — only `app/`, `tests/`, and `requirements.txt` are copied. Therefore the start command uses `app.main:app` (not `frontend.app.main:app`).
### Frontend Environment Variables
Add these as **Runtime** environment variables in Coolify:
| Variable | Description | Example |
| ------------------ | ----------------------------------------- | --------------------------------------------------------------- |
| `FLASK_SECRET_KEY` | Flask session cookie signing key | Generate with `openssl rand -hex 32` |
| `BACKEND_URL` | Internal URL to reach the backend service | `http://localhost:12015` (or use Coolify's internal networking) |
## Step 3: Configure Reverse Proxy
Coolify provides a built-in reverse proxy. Configure routing rules:
### Backend Proxy Rules
- **Domain**: `api.ai.allucanget.biz` (or subdomain of your choice)
- **Port**: `12015`
- **Path**: `/api/*` → forward to backend
### Frontend Proxy Rules
- **Domain**: `ai.allucanget.biz`
- **Port**: `12016`
- **Path**: `/` → forward to frontend
### Nginx Configuration (Optional)
If you need custom Nginx configuration, create `nginx/coolify.conf`:
```nginx
# Reverse proxy configuration for Coolify
# This file is for reference — Coolify's built-in proxy handles routing
# Backend API proxy
location /api/ {
proxy_pass http://backend:12015;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# Frontend proxy
location / {
proxy_pass http://frontend:12016;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
```
## Step 4: SSL/TLS
Enable HTTPS in Coolify for both services:
1. Go to each service's settings
2. Enable **Auto HTTPS** (Let's Encrypt)
3. Configure domain names
4. Coolify automatically handles certificate renewal
## Step 5: Persistent Storage (Optional)
If you want to persist DuckDB data:
1. In Coolify, go to the **Backend** service
2. Navigate to **Persistent Storage**
3. Add a volume mount:
- **Host Path**: `/data` (or any path on the host)
- **Container Path**: `/app/data`
- **Type**: `Bind Mount` or `Volume`
## Troubleshooting
### Docker Compose deployment fails in Coolify
- Verify Coolify uses `docker-compose.coolify.yml`, not local `docker-compose.yml`
- Verify public domain points to `frontend` service on port `12016`
- Do not add `nginx` to the Coolify stack — bind-mounting a local config file will fail since the file doesn't exist on the Coolify server
### Backend healthcheck stays unhealthy
- Check backend logs in Coolify
- Verify `OPENROUTER_API_KEY` and `JWT_SECRET` are set
- Verify volume mount at `/app/data` is writable
### Backend won't start
- Check that `OPENROUTER_API_KEY` is set
- Verify `JWT_SECRET` is a sufficiently long random string
- Check logs in Coolify's **Logs** tab
### Frontend can't reach backend
- Ensure `BACKEND_URL` points to the correct internal URL
- If both services are on the same Coolify server, use `http://localhost:12015`
- Check that the backend service is running and healthy
### CORS errors
- Set `CORS_ORIGINS` to include your frontend domain
- Example: `https://ai.allucanget.biz`
### Nixpacks build fails
- Verify the base directory is correct (`/backend` or `/frontend`)
- Check that `requirements.txt` exists in the base directory
- Review build logs in Coolify
## Environment Variable Summary
All required environment variables:
| Variable | Service | Required |
| -------------------- | -------- | ------------------------------------- |
| `OPENROUTER_API_KEY` | Backend | Yes |
| `JWT_SECRET` | Backend | Yes |
| `APP_URL` | Backend | Yes |
| `APP_NAME` | Backend | No (defaults to "All You Can GET AI") |
| `CORS_ORIGINS` | Backend | Yes |
| `FLASK_SECRET_KEY` | Frontend | Yes |
| `BACKEND_URL` | Frontend | Yes |
## Deployment Checklist
- [ ] Repository pushed to Git
- [ ] For Docker Compose: Coolify resource uses `docker-compose.coolify.yml`
- [ ] For Docker Compose: domain points to `frontend` service on port `12016`
- [ ] Backend service created with correct base directory (`/backend`)
- [ ] Backend environment variables configured
- [ ] Frontend service created with correct base directory (`/frontend`)
- [ ] Frontend environment variables configured
- [ ] SSL certificates enabled
- [ ] Domain names configured
- [ ] Health checks passing
- [ ] Logs reviewed for errors
+9
View File
@@ -469,6 +469,15 @@ def generate_video_status():
return jsonify(resp.json()), resp.status_code return jsonify(resp.json()), resp.status_code
@app.get("/generate/video/<video_id>/status")
@login_required
def generate_video_db_status(video_id: str):
"""Return current DB status for a video job (polled by frontend JS)."""
resp = _api(
"GET", f"/generate/videos/{video_id}", token=session["access_token"])
return jsonify(resp.json()), resp.status_code
# ── Admin ───────────────────────────────────────────────────────────────── # ── Admin ─────────────────────────────────────────────────────────────────
@app.get("/admin") @app.get("/admin")
+11 -9
View File
@@ -63,15 +63,14 @@ document.addEventListener("DOMContentLoaded", () => {
// ── Video status polling ─────────────────────────────── // ── Video status polling ───────────────────────────────
const pollDiv = document.getElementById("video-poll-status"); const pollDiv = document.getElementById("video-poll-status");
if (pollDiv) { if (pollDiv) {
const pollingUrl = pollDiv.dataset.pollingUrl; const videoId = pollDiv.dataset.videoId;
const statusText = document.getElementById("poll-status-text"); const statusText = document.getElementById("poll-status-text");
const videoContainer = document.getElementById("poll-video-container"); const videoContainer = document.getElementById("poll-video-container");
const interval = setInterval(async () => { const interval = setInterval(async () => {
try { try {
const resp = await fetch( const resp = await fetch(
"/generate/video/status?polling_url=" + "/generate/video/" + encodeURIComponent(videoId) + "/status",
encodeURIComponent(pollingUrl),
); );
if (!resp.ok) return; if (!resp.ok) return;
const data = await resp.json(); const data = await resp.json();
@@ -82,7 +81,8 @@ document.addEventListener("DOMContentLoaded", () => {
if (data.status === "completed") { if (data.status === "completed") {
clearInterval(interval); clearInterval(interval);
if (data.video_url && videoContainer) { if (data.video_url) {
if (videoContainer) {
const vid = document.createElement("video"); const vid = document.createElement("video");
vid.src = data.video_url; vid.src = data.video_url;
vid.controls = true; vid.controls = true;
@@ -90,17 +90,19 @@ document.addEventListener("DOMContentLoaded", () => {
videoContainer.appendChild(vid); videoContainer.appendChild(vid);
const msg = pollDiv.querySelector("p"); const msg = pollDiv.querySelector("p");
if (msg) msg.textContent = "Video ready!"; if (msg) msg.textContent = "Video ready!";
} else {
// video_detail page: reload to show the video element
window.location.reload();
} }
} else if (data.status === "failed") { }
} else if (data.status === "failed" || data.status === "cancelled") {
clearInterval(interval); clearInterval(interval);
pollDiv.innerHTML = pollDiv.innerHTML =
'<div class="alert alert-error">Generation failed: ' + '<div class="alert alert-error">Generation failed or was cancelled.</div>';
(data.error || "Unknown error") +
"</div>";
} }
} catch (e) { } catch (e) {
console.error("Video polling error:", e); console.error("Video polling error:", e);
} }
}, 12016); }, 5000);
} }
}); });
+203
View File
@@ -76,5 +76,208 @@
</tbody> </tbody>
</table> </table>
</div> </div>
<!-- ── Video Jobs ──────────────────────────────────────────────── -->
<h2 class="section-title" style="margin-top: 2rem">Video Jobs</h2>
<div
style="
display: flex;
gap: 1rem;
align-items: center;
flex-wrap: wrap;
margin-bottom: 1rem;
"
>
<label for="vj-status-filter" style="font-weight: 600"
>Filter by status:</label
>
<select id="vj-status-filter" class="form-control" style="width: auto">
<option value="">All</option>
<option value="queued">Queued</option>
<option value="processing">Processing</option>
<option value="completed">Completed</option>
<option value="failed">Failed</option>
<option value="cancelled">Cancelled</option>
</select>
<label for="vj-sort" style="font-weight: 600">Sort:</label>
<select id="vj-sort" class="form-control" style="width: auto">
<option value="created_desc">Created (newest first)</option>
<option value="created_asc">Created (oldest first)</option>
<option value="updated_desc">Updated (newest first)</option>
<option value="status_asc">Status (AZ)</option>
<option value="model_asc">Model (AZ)</option>
</select>
<button id="vj-refresh" class="btn btn-sm">Refresh</button>
<span
id="vj-count"
style="color: var(--text-muted, #888); font-size: 0.9em"
></span>
</div> </div>
<div class="table-wrap">
<table id="vj-table">
<thead>
<tr>
<th>User</th>
<th>Status</th>
<th>Model</th>
<th>Prompt</th>
<th>Created</th>
<th>Updated</th>
<th>Actions</th>
</tr>
</thead>
<tbody id="vj-tbody">
<tr>
<td colspan="7" class="text-muted">Loading…</td>
</tr>
</tbody>
</table>
</div>
</div>
<script>
(function () {
const BACKEND = "{{ config['BACKEND_URL'] }}";
const TOKEN = "{{ session['access_token'] }}";
const headers = { Authorization: "Bearer " + TOKEN };
let allJobs = [];
async function loadJobs() {
document.getElementById("vj-tbody").innerHTML =
'<tr><td colspan="7" class="text-muted">Loading…</td></tr>';
try {
const r = await fetch(BACKEND + "/admin/videos", { headers });
if (!r.ok) throw new Error(await r.text());
allJobs = await r.json();
renderJobs();
} catch (e) {
document.getElementById("vj-tbody").innerHTML =
`<tr><td colspan="7" style="color:red;">Error: ${e.message}</td></tr>`;
}
}
function renderJobs() {
const statusFilter = document.getElementById("vj-status-filter").value;
const sort = document.getElementById("vj-sort").value;
let jobs = statusFilter
? allJobs.filter((j) => j.status === statusFilter)
: [...allJobs];
jobs.sort((a, b) => {
if (sort === "created_asc")
return new Date(a.created_at) - new Date(b.created_at);
if (sort === "updated_desc")
return new Date(b.updated_at) - new Date(a.updated_at);
if (sort === "status_asc") return a.status.localeCompare(b.status);
if (sort === "model_asc") return a.model_id.localeCompare(b.model_id);
return new Date(b.created_at) - new Date(a.created_at); // created_desc default
});
document.getElementById("vj-count").textContent =
`${jobs.length} job${jobs.length !== 1 ? "s" : ""}`;
const tbody = document.getElementById("vj-tbody");
if (jobs.length === 0) {
tbody.innerHTML =
'<tr><td colspan="7" class="text-muted">No jobs found.</td></tr>';
return;
}
const statusColor = {
completed: "color:var(--success-color,#4caf50)",
failed: "color:var(--danger-color,#e53935)",
cancelled: "color:var(--danger-color,#e53935)",
processing: "color:var(--warning-color,#fb8c00)",
queued: "color:var(--warning-color,#fb8c00)",
};
tbody.innerHTML = jobs
.map((job) => {
const sc = statusColor[job.status] || "";
const canRetry =
job.status === "failed" || job.status === "cancelled";
const canCancel =
job.status === "queued" || job.status === "processing";
const actions = [
canRetry
? `<button class="btn btn-sm vj-retry" data-id="${job.id}">Retry</button>`
: "",
canCancel
? `<button class="btn btn-sm vj-cancel" data-id="${job.id}">Cancel</button>`
: "",
`<button class="btn btn-sm btn-danger vj-delete" data-id="${job.id}">Delete</button>`,
].join(" ");
const prompt =
job.prompt.length > 60 ? job.prompt.slice(0, 57) + "…" : job.prompt;
const created = job.created_at
? new Date(job.created_at).toLocaleString()
: "—";
const updated = job.updated_at
? new Date(job.updated_at).toLocaleString()
: "—";
return `<tr>
<td>${job.user_email || "—"}</td>
<td style="${sc};font-weight:600;">${job.status}</td>
<td style="font-size:.85em;">${job.model_id}</td>
<td title="${job.prompt.replace(/"/g, "&quot;")}">${prompt}</td>
<td style="white-space:nowrap;">${created}</td>
<td style="white-space:nowrap;">${updated}</td>
<td style="white-space:nowrap;">${actions}</td>
</tr>`;
})
.join("");
}
async function apiPost(path) {
const r = await fetch(BACKEND + path, { method: "POST", headers });
if (!r.ok) {
const d = await r.json().catch(() => ({}));
throw new Error(d.detail || r.statusText);
}
return r.json();
}
async function apiDelete(path) {
const r = await fetch(BACKEND + path, { method: "DELETE", headers });
if (!r.ok) {
const d = await r.json().catch(() => ({}));
throw new Error(d.detail || r.statusText);
}
return r.json();
}
document
.getElementById("vj-tbody")
.addEventListener("click", async function (e) {
const btn = e.target.closest("button");
if (!btn) return;
const id = btn.dataset.id;
try {
if (btn.classList.contains("vj-retry"))
await apiPost(`/admin/videos/${id}/retry`);
if (btn.classList.contains("vj-cancel"))
await apiPost(`/admin/videos/${id}/cancel`);
if (btn.classList.contains("vj-delete")) {
if (!confirm("Permanently delete this video job?")) return;
await apiDelete(`/admin/videos/${id}`);
}
await loadJobs();
} catch (err) {
alert("Error: " + err.message);
}
});
document
.getElementById("vj-status-filter")
.addEventListener("change", renderJobs);
document.getElementById("vj-sort").addEventListener("change", renderJobs);
document.getElementById("vj-refresh").addEventListener("click", loadJobs);
loadJobs();
})();
</script>
{% endblock %} {% endblock %}
+3 -3
View File
@@ -192,8 +192,7 @@ content %}
<div id="loading-indicator" class="flex justify-center py-8 hidden"> <div id="loading-indicator" class="flex justify-center py-8 hidden">
<div class="spinner"></div> <div class="spinner"></div>
</div> </div>
{% endblock %} {% block scripts %}
{% block scripts %}
<script> <script>
document.addEventListener("DOMContentLoaded", function () { document.addEventListener("DOMContentLoaded", function () {
const galleryContainers = document.querySelectorAll(".grid[data-grid]"); const galleryContainers = document.querySelectorAll(".grid[data-grid]");
@@ -219,7 +218,8 @@ content %}
if (scrollPosition >= bottomThreshold) { if (scrollPosition >= bottomThreshold) {
isLoading = true; isLoading = true;
loadingIndicator.classList.remove("hidden"); loadingIndicator.classList.remove("hidden");
// TODO: Implement actual fetching of next page of results and appending to the correct grid(s)
// For demo purposes, we'll just simulate a delay and then hide the loading indicator
// Simulate API call for next page // Simulate API call for next page
// In real implementation, replace with actual backend fetch // In real implementation, replace with actual backend fetch
setTimeout(() => { setTimeout(() => {
+3 -3
View File
@@ -155,9 +155,9 @@ AI{% endblock %} {% block content %}
{% endif %} {% if result %} {% endif %} {% if result %}
<div class="result"> <div class="result">
<h2>Video job</h2> <h2>Video job</h2>
<p>Job ID: <code>{{ result.id }}</code></p> <p>Job ID: <code>{{ result.db_id or result.id }}</code></p>
{% if result.status in ('queued', 'processing') and result.polling_url %} {% if result.status in ('queued', 'processing') and result.db_id %}
<div id="video-poll-status" data-polling-url="{{ result.polling_url }}"> <div id="video-poll-status" data-video-id="{{ result.db_id }}">
<p> <p>
<span id="poll-status-text" <span id="poll-status-text"
>Status: <strong>{{ result.status }}</strong></span >Status: <strong>{{ result.status }}</strong></span
+2 -2
View File
@@ -12,11 +12,11 @@ block content %}
<div class="bg-gray-800 rounded-lg shadow-lg overflow-hidden"> <div class="bg-gray-800 rounded-lg shadow-lg overflow-hidden">
{% if video.status == 'completed' and video.video_url %} {% if video.status == 'completed' and video.video_url %}
<video src="{{ video.video_url }}" controls class="w-full"></video> <video src="{{ video.video_url }}" controls class="w-full"></video>
{% elif video.status in ('queued', 'processing') and video.polling_url %} {% elif video.status in ('queued', 'processing') %}
<div <div
class="w-full bg-black aspect-video flex flex-col items-center justify-center p-6 text-center" class="w-full bg-black aspect-video flex flex-col items-center justify-center p-6 text-center"
id="video-poll-status" id="video-poll-status"
data-polling-url="{{ video.polling_url }}" data-video-id="{{ video.id }}"
> >
<p class="text-xl font-semibold"> <p class="text-xl font-semibold">
Status: <strong id="poll-status-text">{{ video.status }}</strong> Status: <strong id="poll-status-text">{{ video.status }}</strong>