Compare commits
2 Commits
58c2cb4490
...
e5b413c79d
| Author | SHA1 | Date | |
|---|---|---|---|
| e5b413c79d | |||
| 1c96ae17fc |
@@ -0,0 +1,13 @@
|
|||||||
|
# Nixpacks configuration for the FastAPI backend
|
||||||
|
|
||||||
|
[phases.setup]
|
||||||
|
nixpkgsArchive = "88a9d1386465831607986442fd9c8c0e7a1b2f5"
|
||||||
|
aptPkgs = ["git"]
|
||||||
|
|
||||||
|
[phases.install]
|
||||||
|
# Nixpacks auto-detects Python and runs pip install -r requirements.txt
|
||||||
|
|
||||||
|
[build]
|
||||||
|
|
||||||
|
[deploy]
|
||||||
|
startCommand = "uvicorn backend.app.main:app --host 0.0.0.0 --port 8000"
|
||||||
@@ -76,13 +76,18 @@ Operational endpoints for application management.
|
|||||||
Model listing and multi-modal generation via openrouter.ai.
|
Model listing and multi-modal generation via openrouter.ai.
|
||||||
|
|
||||||
| Method | Path | Auth required | Description |
|
| Method | Path | Auth required | Description |
|
||||||
| ------ | ---------------------------- | ------------- | ------------------------------------------------------ |
|
| ------ | ---------------------------- | ------------- | ------------------------------------------------------------------------------------------------------------------- |
|
||||||
| GET | `/ai/models` | ✓ | List available OpenRouter models |
|
| GET | `/ai/models` | ✓ | List available OpenRouter models |
|
||||||
| POST | `/ai/chat` | ✓ | Multi-turn chat completion |
|
| POST | `/ai/chat` | ✓ | Multi-turn chat completion |
|
||||||
| POST | `/generate/text` | ✓ | Single-prompt text generation (optional system prompt) |
|
| POST | `/generate/text` | ✓ | Single-prompt text generation (optional system prompt) |
|
||||||
| POST | `/generate/image` | ✓ | Text-to-image generation |
|
| POST | `/generate/image` | ✓ | Text-to-image (DALL-E via `/images/generations` or FLUX/GPT-5 Image Mini via `/chat/completions` with `modalities`) |
|
||||||
| POST | `/generate/video` | ✓ | Text-to-video generation |
|
| POST | `/generate/video` | ✓ | Text-to-video (Sora 2 Pro, Veo 3.1 Fast) — returns `polling_url` |
|
||||||
| POST | `/generate/video/from-image` | ✓ | Image-to-video generation |
|
| POST | `/generate/video/from-image` | ✓ | Image-to-video — returns `polling_url` |
|
||||||
|
| GET | `/generate/video/status` | ✓ | Poll video generation status via `polling_url` |
|
||||||
|
|
||||||
|
**Video generation flow:** The `/generate/video` and `/generate/video/from-image` endpoints submit a job to OpenRouter's `/api/v1/videos` endpoint and return immediately with `status: "queued"` and a `polling_url`. Clients poll `/generate/video/status?polling_url=...` every 5 seconds until `status` is `"completed"` (returns `unsigned_urls`) or `"failed"`.
|
||||||
|
|
||||||
|
**Image generation routing:** The router auto-detects the model type — models containing `"flux"` or `"gpt-5-image-mini"` are routed to `/chat/completions` with `modalities: ["image"]`, while others (e.g. DALL-E 3) use the legacy `/images/generations` endpoint.
|
||||||
|
|
||||||
### White Box DB Service (`db.py`)
|
### White Box DB Service (`db.py`)
|
||||||
|
|
||||||
|
|||||||
+36
-16
@@ -28,30 +28,42 @@ Describes concrete behavior and interactions of the system's building blocks in
|
|||||||
|
|
||||||
## Scenario 3: Image Generation
|
## Scenario 3: Image Generation
|
||||||
|
|
||||||
1. User submits image generation form
|
1. User submits image generation form with prompt, model, size, aspect ratio, and resolution
|
||||||
2. Flask POSTs to `POST /generate/image`
|
2. Flask POSTs to `POST /generate/image` with JWT header
|
||||||
3. AI Service calls openrouter.ai image model
|
3. Router auto-detects model type:
|
||||||
4. Image URL returned to Flask
|
- **FLUX / GPT-5 Image Mini**: calls `/chat/completions` with `modalities: ["image"]` and `image_config`
|
||||||
5. Flask renders page with generated image
|
- **DALL-E 3**: calls `/images/generations` with `size` and `n`
|
||||||
|
4. Image URL (base64 data URL or hosted URL) returned to Flask
|
||||||
|
5. Flask renders page with generated image(s)
|
||||||
|
|
||||||
|
## Scenario 3a: Image Generation with Aspect Ratio & Resolution
|
||||||
|
|
||||||
|
1. User selects aspect ratio (e.g. `16:9`) and resolution (`2K`) on the image generation form
|
||||||
|
2. Flask POSTs `aspect_ratio` and `image_size` to `POST /generate/image`
|
||||||
|
3. Backend passes these as `image_config` to the chat completions endpoint (for FLUX/GPT-5 Image Mini)
|
||||||
|
4. Generated image respects the requested aspect ratio and resolution
|
||||||
|
|
||||||
## Scenario 4: Video Generation (Text-to-Video)
|
## Scenario 4: Video Generation (Text-to-Video)
|
||||||
|
|
||||||
1. User submits video generation form with prompt and model selection
|
1. User submits video generation form with prompt, model, aspect ratio, resolution, and duration
|
||||||
2. Flask POSTs to `POST /generate/video` with JWT header
|
2. Flask POSTs to `POST /generate/video` with JWT header
|
||||||
3. Auth Service validates JWT
|
3. Auth Service validates JWT
|
||||||
4. AI Service calls OpenRouter `/video/generations`
|
4. Backend calls OpenRouter `POST /api/v1/videos` with model, prompt, aspect_ratio, resolution, duration_seconds
|
||||||
5. OpenRouter returns a job response (`status: "queued"` or `"completed"`)
|
5. OpenRouter returns `{"id": "...", "polling_url": "..."}` with `status: "queued"`
|
||||||
6. FastAPI returns `VideoResponse` to Flask
|
6. FastAPI returns `VideoResponse` with `polling_url` to Flask
|
||||||
7. Flask renders result page; if status is `queued`, the UI may poll or notify asynchronously
|
7. Flask renders result page with polling UI
|
||||||
|
8. Frontend JavaScript polls `GET /generate/video/status?polling_url=...` every 5 seconds
|
||||||
|
9. When `status` becomes `"completed"`, the response includes `unsigned_urls` — the video is displayed in a `<video>` element
|
||||||
|
10. If `status` becomes `"failed"`, an error message is shown
|
||||||
|
|
||||||
## Scenario 5: Image-to-Video Generation
|
## Scenario 4a: Video Generation (Image-to-Video)
|
||||||
|
|
||||||
1. User uploads or provides an image URL and a text prompt
|
1. User provides an image URL, motion prompt, model, aspect ratio, resolution, and duration
|
||||||
2. Flask POSTs to `POST /generate/video/from-image` with JWT header
|
2. Flask POSTs to `POST /generate/video/from-image` with JWT header
|
||||||
3. AI Service calls OpenRouter `/video/generations/from-image`
|
3. Backend calls OpenRouter `POST /api/v1/videos` with `image_url`, prompt, and parameters
|
||||||
4. Returns `VideoResponse` with `video_url` when completed
|
4. Same polling flow as Scenario 4
|
||||||
|
|
||||||
## Scenario 6: Token Refresh
|
## Scenario 5: Token Refresh
|
||||||
|
|
||||||
1. Access token expires (TTL 15 min)
|
1. Access token expires (TTL 15 min)
|
||||||
2. Client POSTs current refresh token to `POST /auth/refresh`
|
2. Client POSTs current refresh token to `POST /auth/refresh`
|
||||||
@@ -59,9 +71,17 @@ Describes concrete behavior and interactions of the system's building blocks in
|
|||||||
4. Old JTI is revoked; new JTI inserted into `refresh_tokens`
|
4. Old JTI is revoked; new JTI inserted into `refresh_tokens`
|
||||||
5. New access token + new refresh token returned to client
|
5. New access token + new refresh token returned to client
|
||||||
|
|
||||||
## Scenario 7: Admin User Management
|
## Scenario 6: Admin User Management
|
||||||
|
|
||||||
1. Admin logs in and receives access token with `role: admin`
|
1. Admin logs in and receives access token with `role: admin`
|
||||||
2. Admin GETs `/admin/stats` to view user and token counts
|
2. Admin GETs `/admin/stats` to view user and token counts
|
||||||
3. Admin DELETEs `/users/{id}` to remove a user — refresh tokens for that user are cascade-deleted
|
3. Admin DELETEs `/users/{id}` to remove a user — refresh tokens for that user are cascade-deleted
|
||||||
4. Admin PUTs `/users/{id}/role` to promote a user to admin or demote to user
|
4. Admin PUTs `/users/{id}/role` to promote a user to admin or demote to user
|
||||||
|
|
||||||
|
## Scenario 7: User Profile Update
|
||||||
|
|
||||||
|
1. Authenticated user navigates to `/users/profile`
|
||||||
|
2. User submits updated email and/or new password
|
||||||
|
3. Flask POSTs to `PUT /users/me` with JWT header
|
||||||
|
4. Auth Service validates credentials and updates user record in DuckDB
|
||||||
|
5. Session `user_email` is updated; user sees success message
|
||||||
|
|||||||
@@ -72,3 +72,25 @@ Refresh tokens store a JTI (JWT ID) UUID in the `refresh_tokens` table. On each
|
|||||||
### Future: AI Generation History
|
### Future: AI Generation History
|
||||||
|
|
||||||
AI generation metadata (model, prompt, cost, result URLs) can be stored as JSON columns in a future `generation_history` table in DuckDB, enabling per-user analytics and usage dashboards at zero extra infrastructure cost.
|
AI generation metadata (model, prompt, cost, result URLs) can be stored as JSON columns in a future `generation_history` table in DuckDB, enabling per-user analytics and usage dashboards at zero extra infrastructure cost.
|
||||||
|
|
||||||
|
## OpenRouter API Integration
|
||||||
|
|
||||||
|
### Image Generation
|
||||||
|
|
||||||
|
Image generation uses two different OpenRouter endpoints depending on the model:
|
||||||
|
|
||||||
|
- **Legacy endpoint** (`/images/generations`): Used by DALL-E 3 and similar models. Returns `data[].url` and `data[].b64_json`.
|
||||||
|
- **Chat completions** (`/chat/completions` with `modalities: ["image"]`): Used by FLUX.2 Klein 4B and GPT-5 Image Mini. Returns `choices[0].message.images[].image_url.url` as base64 data URLs.
|
||||||
|
|
||||||
|
The router auto-detects the model type and routes accordingly. Image configuration (`aspect_ratio`, `image_size`) is passed via `image_config` for chat-based models.
|
||||||
|
|
||||||
|
### Video Generation
|
||||||
|
|
||||||
|
Video generation uses OpenRouter's `/api/v1/videos` endpoint with a **submit-and-poll** pattern:
|
||||||
|
|
||||||
|
1. `POST /api/v1/videos` with `model`, `prompt`, `aspect_ratio`, `resolution`, `duration_seconds`
|
||||||
|
2. Response: `{"id": "job_id", "polling_url": "https://..."}` with `status: "queued"`
|
||||||
|
3. Poll `GET polling_url` every 5 seconds until `status` is `"completed"` or `"failed"`
|
||||||
|
4. Completed response includes `unsigned_urls: [str]` array with video download URLs
|
||||||
|
|
||||||
|
Supported models: `openai/sora-2-pro`, `google/veo-3.1-fast`. Both text-to-video and image-to-video use the same `/api/v1/videos` endpoint (image-to-video includes `image_url` in the request body).
|
||||||
|
|||||||
@@ -63,3 +63,51 @@ Refer to section 4 (Solution Strategy) where the most important decisions are al
|
|||||||
**Decision:** Route all AI generation requests through the [OpenRouter](https://openrouter.ai) API, which exposes an OpenAI-compatible REST interface for hundreds of models.
|
**Decision:** Route all AI generation requests through the [OpenRouter](https://openrouter.ai) API, which exposes an OpenAI-compatible REST interface for hundreds of models.
|
||||||
|
|
||||||
**Consequences:** Single API key and base URL for all model providers. Model switching requires only a change to the `model` field in the request payload. If OpenRouter is unavailable, all generation endpoints return `502 Bad Gateway`. Pricing and rate limits are governed by OpenRouter's policies per model.
|
**Consequences:** Single API key and base URL for all model providers. Model switching requires only a change to the `model` field in the request payload. If OpenRouter is unavailable, all generation endpoints return `502 Bad Gateway`. Pricing and rate limits are governed by OpenRouter's policies per model.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## ADR-006: Use submit-and-poll pattern for video generation
|
||||||
|
|
||||||
|
**Status:** accepted
|
||||||
|
|
||||||
|
**Context:** OpenRouter's video generation models (Sora 2 Pro, Veo 3.1 Fast) do not return video URLs immediately. Video generation is a long-running operation (typically 30-120 seconds) that requires polling.
|
||||||
|
|
||||||
|
**Decision:** Use the `/api/v1/videos` endpoint with a two-step pattern: (1) `POST` to submit the job and receive a `polling_url`, (2) `GET` the `polling_url` every 5 seconds until `status` is `"completed"` or `"failed"`. The Flask frontend proxies polling requests via `GET /generate/video/status?polling_url=...` and the frontend JavaScript polls this endpoint automatically.
|
||||||
|
|
||||||
|
**Consequences:** The video generation endpoint returns immediately with `status: "queued"` and a `polling_url`. The frontend displays a "Processing..." message and polls for updates. When complete, the video is displayed in a `<video>` element. This adds complexity to the frontend but is necessary for long-running operations. If OpenRouter's polling endpoint is unavailable, the frontend shows an error after a timeout.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## ADR-007: Auto-detect image generation model type
|
||||||
|
|
||||||
|
**Status:** accepted
|
||||||
|
|
||||||
|
**Context:** OpenRouter supports image generation through two different endpoints: the legacy `/images/generations` endpoint (DALL-E 3) and the chat completions endpoint with `modalities: ["image"]` (FLUX.2 Klein 4B, GPT-5 Image Mini). These endpoints have different request/response formats.
|
||||||
|
|
||||||
|
**Decision:** The `/generate/image` router auto-detects the model type by checking if the model slug contains `"flux"` or `"gpt-5-image-mini"`. If so, it routes to `/chat/completions` with `modalities: ["image"]` and `image_config` (aspect_ratio, image_size). Otherwise, it uses `/images/generations` with `size` and `n`.
|
||||||
|
|
||||||
|
**Consequences:** Users can specify any image generation model in the form without needing to know which endpoint it uses. The router handles the routing transparently. Adding new image models requires only updating the detection logic if they use a different endpoint.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## ADR-008: Flask session-based auth with role caching
|
||||||
|
|
||||||
|
**Status:** accepted
|
||||||
|
|
||||||
|
**Context:** The Flask frontend needs to know the user's authentication state and role for route protection (`@login_required`, `@admin_required`) without making an extra API call on every request.
|
||||||
|
|
||||||
|
**Decision:** Store the JWT access token, refresh token, user email, and user role in the Flask server-side session cookie after login. The `@login_required` decorator checks for `access_token` in the session. The `@admin_required` decorator checks `session["user_role"] == "admin"`. This avoids an extra API call to `/users/me` on every request.
|
||||||
|
|
||||||
|
**Consequences:** The user role is cached in the session and may become stale if an admin changes a user's role while the user is logged in. The user must log out and log back in to see the updated role. This is acceptable for the expected usage pattern. The session cookie is signed (Flask's default) to prevent tampering.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## ADR-009: Separate generation pages in frontend
|
||||||
|
|
||||||
|
**Status:** accepted
|
||||||
|
|
||||||
|
**Context:** The original `/generate` page handled text, image, and video generation in a single form, which became unwieldy as more generation types were added.
|
||||||
|
|
||||||
|
**Decision:** Create separate Flask routes and Jinja2 templates for each generation type: `/generate/text`, `/generate/image`, `/generate/video`. The `/generate` route redirects to `/generate/text`. The navigation bar includes a "Generate" dropdown with links to each sub-page. The video page uses tabs for text-to-video and image-to-video.
|
||||||
|
|
||||||
|
**Consequences:** Each generation type has its own URL, making it bookmarkable and shareable. The navigation is clearer with a dropdown menu. Adding new generation types (e.g., audio) follows the same pattern. The `/generate` redirect provides a sensible default entry point.
|
||||||
|
|||||||
@@ -0,0 +1,197 @@
|
|||||||
|
# Coolify Deployment Guide
|
||||||
|
|
||||||
|
This guide covers deploying `ai.allucanget.biz` using [Coolify](https://coolify.io) from the repository `https://git.allucanget.biz/allucanget/ai.allucanget.biz.git`.
|
||||||
|
|
||||||
|
## Architecture Overview
|
||||||
|
|
||||||
|
The application consists of two Python services:
|
||||||
|
|
||||||
|
| Service | Framework | Port | Description |
|
||||||
|
| -------- | ----------------- | ---- | ------------------------------------------ |
|
||||||
|
| Backend | FastAPI + uvicorn | 8000 | REST API, auth, AI generation, DuckDB |
|
||||||
|
| Frontend | Flask + gunicorn | 5000 | SSR web UI, session auth, proxy to backend |
|
||||||
|
|
||||||
|
Coolify's built-in reverse proxy routes traffic:
|
||||||
|
|
||||||
|
- `/api/*` → Backend (port 8000)
|
||||||
|
- `/` → Frontend (port 5000)
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
- A Coolify instance (self-hosted or Cloud)
|
||||||
|
- Git repository pushed to `https://git.allucanget.biz/allucanget/ai.allucanget.biz.git`
|
||||||
|
- Domain configured to point to your Coolify server
|
||||||
|
|
||||||
|
## Step 1: Create Backend Service
|
||||||
|
|
||||||
|
1. In Coolify, click **Add Resource** → **Deploy a new resource** → **Git**
|
||||||
|
2. Connect your Git repository (`git.allucanget.biz`)
|
||||||
|
3. Select the `ai.allucanget.biz` repository
|
||||||
|
4. Choose the `main` branch
|
||||||
|
5. Set **Build Pack** to `nixpacks`
|
||||||
|
6. Set **Base Directory** to `/backend`
|
||||||
|
7. Set **Ports Exposed** to `8000`
|
||||||
|
8. Set **Start Command** to:
|
||||||
|
```txt
|
||||||
|
uvicorn backend.app.main:app --host 0.0.0.0 --port 8000
|
||||||
|
```
|
||||||
|
9. Click **Create Resource**
|
||||||
|
|
||||||
|
### Backend Environment Variables
|
||||||
|
|
||||||
|
Add these as **Runtime** environment variables in Coolify:
|
||||||
|
|
||||||
|
| Variable | Description | Example |
|
||||||
|
| -------------------- | ------------------------------------ | ------------------------------------ |
|
||||||
|
| `OPENROUTER_API_KEY` | OpenRouter API key for AI generation | `sk-or-v1-...` |
|
||||||
|
| `JWT_SECRET` | Secret key for JWT token signing | Generate with `openssl rand -hex 32` |
|
||||||
|
| `APP_URL` | Public URL of the backend | `https://api.ai.allucanget.biz` |
|
||||||
|
| `APP_NAME` | Application name | `AI Allucanget` |
|
||||||
|
| `CORS_ORIGINS` | Comma-separated allowed origins | `https://ai.allucanget.biz` |
|
||||||
|
|
||||||
|
## Step 2: Create Frontend Service
|
||||||
|
|
||||||
|
1. In Coolify, click **Add Resource** → **Deploy a new resource** → **Git**
|
||||||
|
2. Select the same repository
|
||||||
|
3. Choose the `main` branch
|
||||||
|
4. Set **Build Pack** to `nixpacks`
|
||||||
|
5. Set **Base Directory** to `/frontend`
|
||||||
|
6. Set **Ports Exposed** to `5000`
|
||||||
|
7. Set **Start Command** to:
|
||||||
|
```txt
|
||||||
|
gunicorn frontend.app.main:app --bind 0.0.0.0:5000 --workers 2 --timeout 120
|
||||||
|
```
|
||||||
|
8. Click **Create Resource**
|
||||||
|
|
||||||
|
### Frontend Environment Variables
|
||||||
|
|
||||||
|
Add these as **Runtime** environment variables in Coolify:
|
||||||
|
|
||||||
|
| Variable | Description | Example |
|
||||||
|
| ------------------ | ----------------------------------------- | -------------------------------------------------------------- |
|
||||||
|
| `FLASK_SECRET_KEY` | Flask session cookie signing key | Generate with `openssl rand -hex 32` |
|
||||||
|
| `BACKEND_URL` | Internal URL to reach the backend service | `http://localhost:8000` (or use Coolify's internal networking) |
|
||||||
|
|
||||||
|
## Step 3: Configure Reverse Proxy
|
||||||
|
|
||||||
|
Coolify provides a built-in reverse proxy. Configure routing rules:
|
||||||
|
|
||||||
|
### Backend Proxy Rules
|
||||||
|
|
||||||
|
- **Domain**: `api.ai.allucanget.biz` (or subdomain of your choice)
|
||||||
|
- **Port**: `8000`
|
||||||
|
- **Path**: `/api/*` → forward to backend
|
||||||
|
|
||||||
|
### Frontend Proxy Rules
|
||||||
|
|
||||||
|
- **Domain**: `ai.allucanget.biz`
|
||||||
|
- **Port**: `5000`
|
||||||
|
- **Path**: `/` → forward to frontend
|
||||||
|
|
||||||
|
### Nginx Configuration (Optional)
|
||||||
|
|
||||||
|
If you need custom Nginx configuration, create `nginx/coolify.conf`:
|
||||||
|
|
||||||
|
```nginx
|
||||||
|
# Reverse proxy configuration for Coolify
|
||||||
|
# This file is for reference — Coolify's built-in proxy handles routing
|
||||||
|
|
||||||
|
# Backend API proxy
|
||||||
|
location /api/ {
|
||||||
|
proxy_pass http://backend:8000;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Frontend proxy
|
||||||
|
location / {
|
||||||
|
proxy_pass http://frontend:5000;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Step 4: SSL/TLS
|
||||||
|
|
||||||
|
Enable HTTPS in Coolify for both services:
|
||||||
|
|
||||||
|
1. Go to each service's settings
|
||||||
|
2. Enable **Auto HTTPS** (Let's Encrypt)
|
||||||
|
3. Configure domain names
|
||||||
|
4. Coolify automatically handles certificate renewal
|
||||||
|
|
||||||
|
## Step 5: Persistent Storage (Optional)
|
||||||
|
|
||||||
|
If you want to persist DuckDB data:
|
||||||
|
|
||||||
|
1. In Coolify, go to the **Backend** service
|
||||||
|
2. Navigate to **Persistent Storage**
|
||||||
|
3. Add a volume mount:
|
||||||
|
- **Host Path**: `/data` (or any path on the host)
|
||||||
|
- **Container Path**: `/app/data`
|
||||||
|
- **Type**: `Bind Mount` or `Volume`
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Backend won't start
|
||||||
|
|
||||||
|
- Check that `OPENROUTER_API_KEY` is set
|
||||||
|
- Verify `JWT_SECRET` is a sufficiently long random string
|
||||||
|
- Check logs in Coolify's **Logs** tab
|
||||||
|
|
||||||
|
### Frontend can't reach backend
|
||||||
|
|
||||||
|
- Ensure `BACKEND_URL` points to the correct internal URL
|
||||||
|
- If both services are on the same Coolify server, use `http://localhost:8000`
|
||||||
|
- Check that the backend service is running and healthy
|
||||||
|
|
||||||
|
### CORS errors
|
||||||
|
|
||||||
|
- Set `CORS_ORIGINS` to include your frontend domain
|
||||||
|
- Example: `https://ai.allucanget.biz`
|
||||||
|
|
||||||
|
### Nixpacks build fails
|
||||||
|
|
||||||
|
- Verify the base directory is correct (`/backend` or `/frontend`)
|
||||||
|
- Check that `requirements.txt` exists in the base directory
|
||||||
|
- Review build logs in Coolify
|
||||||
|
|
||||||
|
## Environment Variable Summary
|
||||||
|
|
||||||
|
All required environment variables:
|
||||||
|
|
||||||
|
| Variable | Service | Required |
|
||||||
|
| -------------------- | -------- | -------------------------------- |
|
||||||
|
| `OPENROUTER_API_KEY` | Backend | Yes |
|
||||||
|
| `JWT_SECRET` | Backend | Yes |
|
||||||
|
| `APP_URL` | Backend | Yes |
|
||||||
|
| `APP_NAME` | Backend | No (defaults to "AI Allucanget") |
|
||||||
|
| `CORS_ORIGINS` | Backend | Yes |
|
||||||
|
| `FLASK_SECRET_KEY` | Frontend | Yes |
|
||||||
|
| `BACKEND_URL` | Frontend | Yes |
|
||||||
|
|
||||||
|
## Deployment Checklist
|
||||||
|
|
||||||
|
- [ ] Repository pushed to Git
|
||||||
|
- [ ] Backend service created with correct base directory (`/backend`)
|
||||||
|
- [ ] Backend environment variables configured
|
||||||
|
- [ ] Frontend service created with correct base directory (`/frontend`)
|
||||||
|
- [ ] Frontend environment variables configured
|
||||||
|
- [ ] SSL certificates enabled
|
||||||
|
- [ ] Domain names configured
|
||||||
|
- [ ] Health checks passing
|
||||||
|
- [ ] Logs reviewed for errors
|
||||||
|
|
||||||
|
## Nixpacks Configuration
|
||||||
|
|
||||||
|
The project includes Nixpacks configuration files for both services:
|
||||||
|
|
||||||
|
- `nixpacks.toml` — Shared configuration (Python version, packages)
|
||||||
|
- `backend/nixpacks.toml` — Backend-specific (uvicorn, port 8000)
|
||||||
|
- `frontend/nixpacks.toml` — Frontend-specific (gunicorn, port 5000)
|
||||||
|
|
||||||
|
Nixpacks automatically detects Python projects and installs dependencies from `requirements.txt`. No additional configuration is needed for basic deployments.
|
||||||
@@ -0,0 +1,13 @@
|
|||||||
|
# Nixpacks configuration for the Flask frontend
|
||||||
|
|
||||||
|
[phases.setup]
|
||||||
|
nixpkgsArchive = "88a9d1386465831607986442fd9c8c0e7a1b2f5"
|
||||||
|
aptPkgs = ["git"]
|
||||||
|
|
||||||
|
[phases.install]
|
||||||
|
# Nixpacks auto-detects Python and runs pip install -r requirements.txt
|
||||||
|
|
||||||
|
[build]
|
||||||
|
|
||||||
|
[deploy]
|
||||||
|
startCommand = "gunicorn frontend.app.main:app --bind 0.0.0.0:5000 --workers 2 --timeout 120"
|
||||||
@@ -0,0 +1,73 @@
|
|||||||
|
# Nginx reverse proxy configuration for Coolify deployment
|
||||||
|
# Place this in /etc/nginx/conf.d/ai.allucanget.biz.conf or use Coolify's built-in proxy
|
||||||
|
|
||||||
|
# Backend API proxy
|
||||||
|
upstream backend {
|
||||||
|
server 127.0.0.1:8000;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Frontend proxy
|
||||||
|
upstream frontend {
|
||||||
|
server 127.0.0.1:5000;
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
server_name ai.allucanget.biz www.ai.allucanget.biz;
|
||||||
|
|
||||||
|
# Redirect HTTP to HTTPS
|
||||||
|
return 301 https://$host$request_uri;
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 443 ssl http2;
|
||||||
|
server_name ai.allucanget.biz www.ai.allucanget.biz;
|
||||||
|
|
||||||
|
# SSL configuration (managed by Let's Encrypt / Certbot)
|
||||||
|
ssl_certificate /etc/letsencrypt/live/ai.allucanget.biz/fullchain.pem;
|
||||||
|
ssl_certificate_key /etc/letsencrypt/live/ai.allucanget.biz/privkey.pem;
|
||||||
|
ssl_protocols TLSv1.2 TLSv1.3;
|
||||||
|
ssl_ciphers HIGH:!aNULL:!MD5;
|
||||||
|
|
||||||
|
# Security headers
|
||||||
|
add_header X-Frame-Options "SAMEORIGIN" always;
|
||||||
|
add_header X-Content-Type-Options "nosniff" always;
|
||||||
|
add_header X-XSS-Protection "1; mode=block" always;
|
||||||
|
add_header Referrer-Policy "strict-origin-when-cross-origin" always;
|
||||||
|
|
||||||
|
# Backend API proxy
|
||||||
|
location /api/ {
|
||||||
|
proxy_pass http://backend;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
|
||||||
|
# WebSocket support (if needed in future)
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header Upgrade $http_upgrade;
|
||||||
|
proxy_set_header Connection "upgrade";
|
||||||
|
}
|
||||||
|
|
||||||
|
# Frontend proxy
|
||||||
|
location / {
|
||||||
|
proxy_pass http://frontend;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
|
||||||
|
# Static files caching
|
||||||
|
location ~* \.(css|js|png|jpg|jpeg|gif|ico|svg|woff|woff2|ttf|eot)$ {
|
||||||
|
proxy_pass http://frontend;
|
||||||
|
expires 30d;
|
||||||
|
add_header Cache-Control "public, immutable";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Health check endpoint
|
||||||
|
location /health {
|
||||||
|
proxy_pass http://backend;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,14 @@
|
|||||||
|
# Nixpacks configuration for ai.allucanget.biz
|
||||||
|
# Shared settings for both backend and frontend services
|
||||||
|
|
||||||
|
[phases.setup]
|
||||||
|
nixpkgsArchive = "88a9d1386465831607986442fd9c8c0e7a1b2f5"
|
||||||
|
aptPkgs = ["git"]
|
||||||
|
|
||||||
|
[phases.install]
|
||||||
|
# Nixpacks auto-detects Python and runs pip install -r requirements.txt
|
||||||
|
# No custom commands needed here
|
||||||
|
|
||||||
|
[build]
|
||||||
|
|
||||||
|
[deploy]
|
||||||
Reference in New Issue
Block a user