From e8a86b15e4374962aa021f694daf61a1c12648ab Mon Sep 17 00:00:00 2001 From: zwitschi Date: Mon, 27 Oct 2025 08:54:11 +0100 Subject: [PATCH 01/31] feat: Enhance CI workflows by adding linting step, updating documentation, and configuring development dependencies --- .gitea/workflows/test.yml | 4 +- .prettierrc | 8 + .../07_deployment/07_01_testing_ci.md | 80 ++++----- .../07_03_gitea_action_runner.md | 152 ++++++++++++++++++ docs/architecture/07_deployment_view.md | 40 ++++- docs/quickstart.md | 28 ++-- docs/staging_environment_setup.md | 25 ++- pyproject.toml | 16 ++ requirements-dev.txt | 1 + requirements-test.txt | 3 +- 10 files changed, 279 insertions(+), 78 deletions(-) create mode 100644 .prettierrc create mode 100644 docs/architecture/07_deployment/07_03_gitea_action_runner.md create mode 100644 pyproject.toml create mode 100644 requirements-dev.txt diff --git a/.gitea/workflows/test.yml b/.gitea/workflows/test.yml index 53def95..b10f005 100644 --- a/.gitea/workflows/test.yml +++ b/.gitea/workflows/test.yml @@ -20,7 +20,7 @@ jobs: strategy: fail-fast: false matrix: - target: [unit, e2e] + target: [unit, e2e, lint] services: postgres: image: postgres:16-alpine @@ -44,6 +44,8 @@ jobs: run: | if [ "${{ matrix.target }}" = "unit" ]; then pytest tests/unit + elif [ "${{ matrix.target }}" = "lint" ]; then + ruff check . else pytest tests/e2e fi diff --git a/.prettierrc b/.prettierrc new file mode 100644 index 0000000..0ca3806 --- /dev/null +++ b/.prettierrc @@ -0,0 +1,8 @@ +{ + "semi": true, + "singleQuote": true, + "trailingComma": "es5", + "printWidth": 80, + "tabWidth": 2, + "useTabs": false +} diff --git a/docs/architecture/07_deployment/07_01_testing_ci.md b/docs/architecture/07_deployment/07_01_testing_ci.md index 3e8035e..0da13c2 100644 --- a/docs/architecture/07_deployment/07_01_testing_ci.md +++ b/docs/architecture/07_deployment/07_01_testing_ci.md @@ -117,46 +117,6 @@ pytest tests/e2e/ --headed When adding new workflows, mirror this structure to ensure secrets, caching, and deployment steps remain aligned with the production environment. -## CI Owner Coordination Notes - -### Key Findings - -- Self-hosted runner: ASUS System Product Name chassis with AMD Ryzen 7 7700X (8 physical cores / 16 threads) and 63.2 GB usable RAM; `act_runner` configuration not overridden, so only one workflow job runs concurrently today. -- Unit test matrix job: completes 117 pytest cases in roughly 4.1 seconds after Postgres spins up; Docker services consume ~150 MB for `postgres:16-alpine`, with minimal sustained CPU load once tests begin. -- End-to-end matrix job: `pytest tests/e2e` averages 21‑22 seconds of execution, but a cold run downloads ~179 MB of apt packages plus ~470 MB of Playwright browser bundles (Chromium, Firefox, WebKit, FFmpeg), exceeding 650 MB network transfer and adding several gigabytes of disk writes if caches are absent. -- Both jobs reuse existing Python package caches when available; absent a shared cache service, repeated Playwright installs remain the dominant cost driver for cold executions. - -### Open Questions - -- Can we raise the runner concurrency above the default single job, or provision an additional runner, so the test matrix can execute without serializing queued workflows? -- Is there a central cache or artifact service available for Python wheels and Playwright browser bundles to avoid ~650 MB downloads on cold starts? -- Are we permitted to bake Playwright browsers into the base runner image, or should we pursue a shared cache/proxy solution instead? - -### Outreach Draft - -```text -Subject: CalMiner CI parallelization support - -Hi , - -We recently updated the CalMiner test workflow to fan out unit and Playwright E2E suites in parallel. While validating the change, we gathered the following: - -- Runner host: ASUS System Product Name with AMD Ryzen 7 7700X (8 cores / 16 threads), ~63 GB RAM, default `act_runner` concurrency (1 job at a time). -- Unit job finishes in ~4.1 s once Postgres is ready; light CPU and network usage. -- E2E job finishes in ~22 s, but a cold run pulls ~179 MB of apt packages plus ~470 MB of Playwright browser payloads (>650 MB download, several GB disk writes) because we do not have a shared cache yet. - -To move forward, could you help with the following? - -1. Confirm whether we can raise the runner concurrency limit or provision an additional runner so parallel jobs do not queue behind one another. -2. Let us know if a central cache (Artifactory, Nexus, etc.) is available for Python wheels and Playwright browser bundles, or if we should consider baking the browsers into the runner image instead. -3. Share any guidance on preferred caching or proxy solutions for large binary installs on self-hosted runners. - -Once we have clarity, we can finalize the parallel rollout and update the documentation accordingly. - -Thanks, - -``` - ## Workflow Optimization Opportunities ### `test.yml` @@ -216,3 +176,43 @@ Thanks, - Benefits: centralizes proxy logic and dependency installs, reduces duplication across matrix jobs, and keeps future lint/type-check jobs lightweight by disabling database setup. - Implementation status: action available at `.gitea/actions/setup-python-env` and consumed by `test.yml`; extend to additional workflows as they adopt the shared routine. - Obsolete steps removed: individual apt proxy, dependency install, Playwright, and database setup commands pruned from `test.yml` once the composite action was integrated. + +## CI Owner Coordination Notes + +### Key Findings + +- Self-hosted runner: ASUS System Product Name chassis with AMD Ryzen 7 7700X (8 physical cores / 16 threads) and 63.2 GB usable RAM; `act_runner` configuration not overridden, so only one workflow job runs concurrently today. +- Unit test matrix job: completes 117 pytest cases in roughly 4.1 seconds after Postgres spins up; Docker services consume ~150 MB for `postgres:16-alpine`, with minimal sustained CPU load once tests begin. +- End-to-end matrix job: `pytest tests/e2e` averages 21‑22 seconds of execution, but a cold run downloads ~179 MB of apt packages plus ~470 MB of Playwright browser bundles (Chromium, Firefox, WebKit, FFmpeg), exceeding 650 MB network transfer and adding several gigabytes of disk writes if caches are absent. +- Both jobs reuse existing Python package caches when available; absent a shared cache service, repeated Playwright installs remain the dominant cost driver for cold executions. + +### Open Questions + +- Can we raise the runner concurrency above the default single job, or provision an additional runner, so the test matrix can execute without serializing queued workflows? +- Is there a central cache or artifact service available for Python wheels and Playwright browser bundles to avoid ~650 MB downloads on cold starts? +- Are we permitted to bake Playwright browsers into the base runner image, or should we pursue a shared cache/proxy solution instead? + +### Outreach Draft + +```text +Subject: CalMiner CI parallelization support + +Hi , + +We recently updated the CalMiner test workflow to fan out unit and Playwright E2E suites in parallel. While validating the change, we gathered the following: + +- Runner host: ASUS System Product Name with AMD Ryzen 7 7700X (8 cores / 16 threads), ~63 GB RAM, default `act_runner` concurrency (1 job at a time). +- Unit job finishes in ~4.1 s once Postgres is ready; light CPU and network usage. +- E2E job finishes in ~22 s, but a cold run pulls ~179 MB of apt packages plus ~470 MB of Playwright browser payloads (>650 MB download, several GB disk writes) because we do not have a shared cache yet. + +To move forward, could you help with the following? + +1. Confirm whether we can raise the runner concurrency limit or provision an additional runner so parallel jobs do not queue behind one another. +2. Let us know if a central cache (Artifactory, Nexus, etc.) is available for Python wheels and Playwright browser bundles, or if we should consider baking the browsers into the runner image instead. +3. Share any guidance on preferred caching or proxy solutions for large binary installs on self-hosted runners. + +Once we have clarity, we can finalize the parallel rollout and update the documentation accordingly. + +Thanks, + +``` diff --git a/docs/architecture/07_deployment/07_03_gitea_action_runner.md b/docs/architecture/07_deployment/07_03_gitea_action_runner.md new file mode 100644 index 0000000..2304d7f --- /dev/null +++ b/docs/architecture/07_deployment/07_03_gitea_action_runner.md @@ -0,0 +1,152 @@ +# Gitea Action Runner Setup + +This guide describes how to provision, configure, and maintain self-hosted runners for CalMiner's Gitea-based CI/CD pipelines. + +## 1. Purpose and Scope + +- Explain the role runners play in executing GitHub Actions–compatible workflows inside our private Gitea instance. +- Define supported environments (Windows hosts running Docker for Linux containers today, Alpine or other Linux variants as future additions). +- Provide repeatable steps so additional runners can be brought online quickly and consistently. + +## 2. Prerequisites + +- **Hardware**: Minimum 8 vCPU, 16 GB RAM, and 50 GB free disk. For Playwright-heavy suites, plan for ≥60 GB free to absorb browser caches. +- **Operating system**: Current runner uses Windows 11 Pro (10.0.26100, 64-bit). Linux instructions mirror the same flow; see section 7 for Alpine specifics. +- **Container engine**: Docker Desktop (Windows) or Docker Engine (Linux) with pull access to `docker.gitea.com/runner-images` and `postgres:16-alpine`. +- **Dependencies**: `curl`, `tar`, PowerShell 7+ (Windows), or standard GNU utilities (Linux) to unpack releases. +- **Gitea access**: Repository admin or site admin token with permission to register self-hosted runners (`Settings → Runners → New Runner`). + +### Current Runner Inventory (October 2025) + +- Hostname `DESKTOP-GLB3A15`; ASUS System Product Name chassis with AMD Ryzen 7 7700X (8C/16T) and ~63 GB usable RAM. +- Windows 11 Pro 10.0.26100 (64-bit) hosting Docker containers for Ubuntu-based job images. +- `act_runner` version `v0.2.13`; no `act_runner.yaml` present, so defaults apply (single concurrency, no custom labels beyond registration). +- Registered against `http://192.168.88.30:3000` with labels: + - `ubuntu-latest:docker://docker.gitea.com/runner-images:ubuntu-latest` + - `ubuntu-24.04:docker://docker.gitea.com/runner-images:ubuntu-24.04` + - `ubuntu-22.04:docker://docker.gitea.com/runner-images:ubuntu-22.04` +- Runner metadata stored in `.runner`; removing this file forces re-registration and should only be done intentionally. + +## 3. Runner Installation + +### 3.1 Download and Extract + +```powershell +$runnerVersion = "v0.2.13" +$downloadUrl = "https://gitea.com/gitea/act_runner/releases/download/$runnerVersion/act_runner_${runnerVersion}_windows_amd64.zip" +Invoke-WebRequest -Uri $downloadUrl -OutFile act_runner.zip +Expand-Archive act_runner.zip -DestinationPath C:\Tools\act-runner -Force +``` + +For Linux, download the `linux_amd64.tar.gz` artifact and extract with `tar -xzf` into `/opt/act-runner`. + +### 3.2 Configure Working Directory + +```powershell +Set-Location C:\Tools\act-runner +New-Item -ItemType Directory -Path logs -Force | Out-Null +``` + +Ensure the directory is writable by the service account that will execute the runner. + +### 3.3 Register With Gitea + +1. In Gitea, navigate to the repository or organization **Settings → Runners → New Runner**. +2. Copy the registration token and instance URL. +3. Execute the registration wizard: + +```powershell +.\act_runner.exe register --instance http://192.168.88.30:3000 --token --labels "ubuntu-latest:docker://docker.gitea.com/runner-images:ubuntu-latest" "ubuntu-24.04:docker://docker.gitea.com/runner-images:ubuntu-24.04" "ubuntu-22.04:docker://docker.gitea.com/runner-images:ubuntu-22.04" +``` + +Linux syntax is identical using `./act_runner register`. + +This command populates `.runner` with the runner ID, UUID, and labels. + +## 4. Service Configuration + +### 4.1 Windows Service + +Act Runner provides a built-in service helper: + +```powershell +.\act_runner.exe install +.\act_runner.exe start +``` + +The service runs under `LocalSystem` by default. Use `.\act_runner.exe install --user --password ` if isolation is required. + +### 4.2 Linux systemd Unit + +Create `/etc/systemd/system/act-runner.service`: + +```ini +[Unit] +Description=Gitea Act Runner +After=docker.service +Requires=docker.service + +[Service] +WorkingDirectory=/opt/act-runner +ExecStart=/opt/act-runner/act_runner daemon +Restart=always +RestartSec=10 +Environment="HTTP_PROXY=http://apt-cacher:3142" "HTTPS_PROXY=http://apt-cacher:3142" + +[Install] +WantedBy=multi-user.target +``` + +Enable and start: + +```bash +sudo systemctl daemon-reload +sudo systemctl enable --now act-runner.service +``` + +### 4.3 Environment Variables and Proxy Settings + +- Configure `HTTP_PROXY`, `HTTPS_PROXY`, and their lowercase variants to leverage the shared apt cache (`http://apt-cacher:3142`). +- Persist Docker registry credentials (for `docker.gitea.com`) in the service user profile using `docker login`; workflows rely on cached authentication for builds. +- To expose pip caching once infrastructure is available, set `PIP_INDEX_URL` and `PIP_EXTRA_INDEX_URL` at the service level. + +### 4.4 Logging + +- Windows services write to `%ProgramData%\act-runner\logs`. Redirect or forward to centralized logging if required. +- Linux installations can leverage `journalctl -u act-runner` and logrotate rules for `/opt/act-runner/logs`. + +## 5. Network and Security + +- **Outbound**: Allow HTTPS traffic to the Gitea instance, Docker Hub, docker.gitea.com, npm (for Playwright), PyPI, and the apt cache proxy. +- **Inbound**: No inbound ports are required; block unsolicited traffic on internet-facing hosts. +- **Credentials**: Store deployment SSH keys and registry credentials in Gitea secrets, not on the runner host. +- **Least privilege**: Run the service under a dedicated account with access only to Docker and required directories. + +## 6. Maintenance and Upgrades + +- **Version checks**: Monitor `https://gitea.com/gitea/act_runner/releases` and schedule upgrades quarterly or when security fixes drop. +- **Upgrade procedure**: Stop the service, replace `act_runner` binary, restart. Re-registration is not required as long as `.runner` remains intact. +- **Health checks**: Periodically validate connectivity with `act_runner exec --detect-event -W .gitea/workflows/test.yml` and inspect workflow durations to catch regressions. +- **Cleanup**: Purge Docker images and volumes monthly (`docker system prune -af`) to reclaim disk space. +- **Troubleshooting**: Use `act_runner diagnose` (if available in newer versions) or review logs for repeated failures; reset by stopping the service, deleting stale job containers (`docker ps -a`), and restarting. + +## 7. Alpine-based Runner Notes + +- Install baseline packages: `apk add docker bash curl coreutils nodejs npm python3 py3-pip libstdc++`. +- Playwright requirements: add `apk add chromium nss freetype harfbuzz ca-certificates mesa-gl` or install Playwright browsers via `npx playwright install --with-deps` using the Alpine bundle. +- Musl vs glibc: When workflows require glibc (e.g., certain Python wheels), include `apk add gcompat` or base images on `frolvlad/alpine-glibc`. +- Systemd alternative: Use `rc-service` or `supervisord` to manage `act_runner daemon` on Alpine since systemd is absent. +- Storage: Mount `/var/lib/docker` to persistent storage if running inside a VM, ensuring browser downloads and layer caches survive restarts. + +## 8. Appendix + +- **Troubleshooting checklist**: + - Verify Docker daemon is healthy (`docker info`). + - Confirm `.runner` file exists and lists expected labels. + - Re-run `act_runner register` if the runner no longer appears in Gitea. + - Check proxy endpoints are reachable before jobs start downloading dependencies. + +- **Related documentation**: + - `docs/architecture/07_deployment/07_01_testing_ci.md` (workflow architecture and CI owner coordination). + - `docs/ci-cache-troubleshooting.md` (pip caching status and known issues). + - `.gitea/actions/setup-python-env/action.yml` (shared job preparation logic referenced in workflows). diff --git a/docs/architecture/07_deployment_view.md b/docs/architecture/07_deployment_view.md index 9619741..e3455a3 100644 --- a/docs/architecture/07_deployment_view.md +++ b/docs/architecture/07_deployment_view.md @@ -41,16 +41,42 @@ The infrastructure components for the application include: ```mermaid graph TD + G[Git Repository] --> C[CI/CD Pipeline] + C --> GAW[Gitea Action Workflows] + GAW --> GAR[Gitea Action Runners] + GAR --> T[Testing] + GAR --> CI[Continuous Integration] + T --> G + CI --> G + W[Web Server] --> DB[Database Server] + RP[Reverse Proxy] --> W + I((Internet)) <--> RP + PO[Containerization] --> W + C[CI/CD Pipeline] --> PO W --> S[Static File Server] - P[Reverse Proxy] --> W - C[CI/CD Pipeline] --> W - F[Containerization] --> W + S --> RP + PO --> DB + PO --> S ``` ## Environments -The application can be deployed in multiple environments to support development, testing, and production: +The application can be deployed in multiple environments to support development, testing, and production. + +```mermaid +graph TD + R[Repository] --> DEV[Development Environment] + R[Repository] --> TEST[Testing Environment] + R[Repository] --> PROD[Production Environment] + + DEV --> W_DEV[Web Server - Dev] + DEV --> DB_DEV[Database Server - Dev] + TEST --> W_TEST[Web Server - Test] + TEST --> DB_TEST[Database Server - Test] + PROD --> W_PROD[Web Server - Prod] + PROD --> DB_PROD[Database Server - Prod] +``` ### Development Environment @@ -73,7 +99,7 @@ The production environment is set up for serving live traffic and includes: - Production PostgreSQL instance - FastAPI server running in production mode -- Load balancer (e.g., Nginx) for distributing incoming requests +- Load balancer (Traefik) for distributing incoming requests - Monitoring and logging tools for tracking application performance ## Containerized Deployment Flow @@ -84,12 +110,12 @@ The Docker-based deployment path aligns with the solution strategy documented in - The multi-stage `Dockerfile` installs dependencies in a builder layer (including system compilers and Python packages) and copies only the required runtime artifacts to the final image. - Build arguments are minimal; database configuration is supplied at runtime via granular variables (`DATABASE_DRIVER`, `DATABASE_HOST`, `DATABASE_PORT`, `DATABASE_USER`, `DATABASE_PASSWORD`, `DATABASE_NAME`, optional `DATABASE_SCHEMA`). Secrets and configuration should be passed via environment variables or an orchestrator. -- The resulting image exposes port `8000` and starts `uvicorn main:app` (s. [README.md](../../README.md)). +- The resulting image exposes port `8000` and starts `uvicorn main:app` (see main [README.md](../../README.md)). ### Runtime Environment - For single-node deployments, run the container alongside PostgreSQL/Redis using Docker Compose or an equivalent orchestrator. -- A reverse proxy (e.g., Nginx) terminates TLS and forwards traffic to the container on port `8000`. +- A reverse proxy (Traefik) terminates TLS and forwards traffic to the container on port `8000`. - Migrations must be applied prior to rolling out a new image; automation can hook into the deploy step to run `scripts/run_migrations.py`. ### CI/CD Integration diff --git a/docs/quickstart.md b/docs/quickstart.md index 4682375..e98d227 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -168,8 +168,6 @@ docker compose -f docker-compose.postgres.yml down docker volume rm calminer_postgres_local_postgres_data # optional cleanup ``` -Document successful runs (or issues encountered) in `.github/instructions/DONE.TODO.md` for future reference. - ### Seeding reference data `scripts/seed_data.py` provides targeted control over the baseline datasets when the full setup script is not required: @@ -201,18 +199,18 @@ After a failure and rollback, rerun the full setup once the environment issues a The `.gitea/workflows/test.yml` job spins up a temporary PostgreSQL 16 container and runs the setup script twice: once with `--dry-run` to validate the plan and again without it to apply migrations and seeds. No external secrets are required; the workflow sets the following environment variables for both invocations and for pytest: -| Variable | Value | Purpose | -| --- | --- | --- | -| `DATABASE_DRIVER` | `postgresql` | Signals the driver to the setup script | -| `DATABASE_HOST` | `postgres` | Hostname of the Postgres job service container | -| `DATABASE_PORT` | `5432` | Default service port | -| `DATABASE_NAME` | `calminer_ci` | Target database created by the workflow | -| `DATABASE_USER` | `calminer` | Application role used during tests | -| `DATABASE_PASSWORD` | `secret` | Password for both admin and app role | -| `DATABASE_SCHEMA` | `public` | Default schema for the tests | -| `DATABASE_SUPERUSER` | `calminer` | Setup script uses the same role for admin actions | -| `DATABASE_SUPERUSER_PASSWORD` | `secret` | Matches the Postgres service password | -| `DATABASE_SUPERUSER_DB` | `calminer_ci` | Database to connect to for admin operations | +| Variable | Value | Purpose | +| ----------------------------- | ------------- | ------------------------------------------------- | +| `DATABASE_DRIVER` | `postgresql` | Signals the driver to the setup script | +| `DATABASE_HOST` | `postgres` | Hostname of the Postgres job service container | +| `DATABASE_PORT` | `5432` | Default service port | +| `DATABASE_NAME` | `calminer_ci` | Target database created by the workflow | +| `DATABASE_USER` | `calminer` | Application role used during tests | +| `DATABASE_PASSWORD` | `secret` | Password for both admin and app role | +| `DATABASE_SCHEMA` | `public` | Default schema for the tests | +| `DATABASE_SUPERUSER` | `calminer` | Setup script uses the same role for admin actions | +| `DATABASE_SUPERUSER_PASSWORD` | `secret` | Matches the Postgres service password | +| `DATABASE_SUPERUSER_DB` | `calminer_ci` | Database to connect to for admin operations | The workflow also updates `DATABASE_URL` for pytest to point at the CI Postgres instance. Existing tests continue to work unchanged, since SQLAlchemy reads the URL exactly as it does locally. @@ -228,8 +226,6 @@ Recommended execution order: 2. Execute the live run with the same flags minus `--dry-run` to provision the database, role grants, migrations, and seed data. Save the log as `reports/setup_staging_apply.log`. 3. Repeat the dry run to verify idempotency and record the result (for example `reports/setup_staging_post_apply.log`). -Record any issues in `.github/instructions/TODO.md` or `.github/instructions/DONE.TODO.md` as appropriate so the team can track follow-up actions. - ## Database Objects The database contains tables such as `capex`, `opex`, `chemical_consumption`, `fuel_consumption`, `water_consumption`, `scrap_consumption`, `production_output`, `equipment_operation`, `ore_batch`, `exchange_rate`, and `simulation_result`. diff --git a/docs/staging_environment_setup.md b/docs/staging_environment_setup.md index ba6d513..fc6d7f8 100644 --- a/docs/staging_environment_setup.md +++ b/docs/staging_environment_setup.md @@ -16,18 +16,18 @@ This guide outlines how to provision and validate the CalMiner staging database Populate the following environment variables before invoking the setup script. Store them in a secure location such as `config/setup_staging.env` (excluded from source control) and load them with `dotenv` or your shell profile. -| Variable | Description | -| --- | --- | -| `DATABASE_HOST` | Staging PostgreSQL hostname or IP (for example `staging-db.internal`). | -| `DATABASE_PORT` | Port exposed by the staging PostgreSQL service (default `5432`). | -| `DATABASE_NAME` | CalMiner staging database name (for example `calminer_staging`). | -| `DATABASE_USER` | Application role used by the FastAPI app (for example `calminer_app`). | -| `DATABASE_PASSWORD` | Password for the application role. | -| `DATABASE_SCHEMA` | Optional non-public schema; omit or set to `public` otherwise. | -| `DATABASE_SUPERUSER` | Administrative role with rights to create roles/databases (for example `calminer_admin`). | -| `DATABASE_SUPERUSER_PASSWORD` | Password for the administrative role. | -| `DATABASE_SUPERUSER_DB` | Database to connect to for admin tasks (default `postgres`). | -| `DATABASE_ADMIN_URL` | Optional DSN that overrides the granular admin settings above. | +| Variable | Description | +| ----------------------------- | ----------------------------------------------------------------------------------------- | +| `DATABASE_HOST` | Staging PostgreSQL hostname or IP (for example `staging-db.internal`). | +| `DATABASE_PORT` | Port exposed by the staging PostgreSQL service (default `5432`). | +| `DATABASE_NAME` | CalMiner staging database name (for example `calminer_staging`). | +| `DATABASE_USER` | Application role used by the FastAPI app (for example `calminer_app`). | +| `DATABASE_PASSWORD` | Password for the application role. | +| `DATABASE_SCHEMA` | Optional non-public schema; omit or set to `public` otherwise. | +| `DATABASE_SUPERUSER` | Administrative role with rights to create roles/databases (for example `calminer_admin`). | +| `DATABASE_SUPERUSER_PASSWORD` | Password for the administrative role. | +| `DATABASE_SUPERUSER_DB` | Database to connect to for admin tasks (default `postgres`). | +| `DATABASE_ADMIN_URL` | Optional DSN that overrides the granular admin settings above. | You may also set `DATABASE_URL` for application runtime convenience, but the setup script only requires the values listed in the table. @@ -98,4 +98,3 @@ Run the setup script in three phases to validate idempotency and capture diagnos ## Next Steps - Keep this document updated as staging infrastructure evolves (for example, when migrating to managed services or rotating credentials). -- Once staging validation is complete, summarize the outcome in `.github/instructions/DONE.TODO.md` and cross-link the relevant log files. diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..35be63b --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,16 @@ +[tool.black] +line-length = 80 +target-version = ['py310'] +include = '\\.pyi?$' +exclude = ''' +/( + .git + | .hg + | .mypy_cache + | .tox + | .venv + | build + | dist +)/ +''' + diff --git a/requirements-dev.txt b/requirements-dev.txt new file mode 100644 index 0000000..b3ca909 --- /dev/null +++ b/requirements-dev.txt @@ -0,0 +1 @@ +black \ No newline at end of file diff --git a/requirements-test.txt b/requirements-test.txt index 2c69518..ec0a118 100644 --- a/requirements-test.txt +++ b/requirements-test.txt @@ -2,4 +2,5 @@ pytest pytest-cov pytest-httpx playwright -pytest-playwright \ No newline at end of file +pytest-playwright +ruff \ No newline at end of file From 97b1c0360bb5883b6a1aeb1a9f66ed024a265012 Mon Sep 17 00:00:00 2001 From: zwitschi Date: Mon, 27 Oct 2025 10:32:55 +0100 Subject: [PATCH 02/31] Refactor test cases for improved readability and consistency - Updated test functions in various test files to enhance code clarity by formatting long lines and improving indentation. - Adjusted assertions to use multi-line formatting for better readability. - Added new test cases for theme settings API to ensure proper functionality. - Ensured consistent use of line breaks and spacing across test files for uniformity. --- config/database.py | 8 + docs/architecture/05_building_block_view.md | 143 +++++++++- .../architecture/05_frontend/05_03_theming.md | 88 +++++++ .../08_concepts/08_01_security.md | 36 +++ docs/architecture/13_ui_and_style.md | 41 +++ main.py | 3 + middleware/validation.py | 5 +- models/__init__.py | 4 + models/application_setting.py | 17 +- models/capex.py | 20 +- models/currency.py | 7 +- models/opex.py | 20 +- models/parameters.py | 9 +- models/production_output.py | 3 +- models/role.py | 13 + models/scenario.py | 21 +- models/theme_setting.py | 15 ++ models/user.py | 23 ++ routes/consumption.py | 4 +- routes/costs.py | 6 +- routes/currencies.py | 28 +- routes/distributions.py | 4 +- routes/equipment.py | 4 +- routes/maintenance.py | 17 +- routes/parameters.py | 18 +- routes/production.py | 10 +- routes/scenarios.py | 1 + routes/settings.py | 37 ++- routes/simulations.py | 16 +- routes/ui.py | 204 ++++++++++----- routes/users.py | 126 +++++++++ schemas/user.py | 41 +++ scripts/backfill_currency.py | 86 ++++-- scripts/check_docs_links.py | 25 +- scripts/format_docs_md.py | 69 +++-- .../20251027_create_theme_settings_table.sql | 11 + .../20251027_create_user_and_role_tables.sql | 15 ++ scripts/seed_data.py | 115 +++++++- scripts/setup_database.py | 110 ++++---- services/reporting.py | 6 +- services/security.py | 32 +++ services/settings.py | 28 +- services/simulation.py | 14 +- static/js/theme.js | 108 ++++++++ templates/base.html | 1 + templates/forgot_password.html | 17 ++ templates/login.html | 22 ++ templates/partials/sidebar_nav.html | 125 +++------ templates/profile.html | 31 +++ templates/register.html | 25 ++ templates/settings.html | 137 ++-------- templates/theme_settings.html | 125 +++++++++ tests/e2e/conftest.py | 20 +- tests/e2e/test_consumption.py | 4 +- tests/e2e/test_costs.py | 10 +- tests/e2e/test_currencies.py | 17 +- tests/e2e/test_equipment.py | 7 +- tests/e2e/test_maintenance.py | 5 +- tests/e2e/test_production.py | 5 +- tests/e2e/test_scenarios.py | 3 +- tests/e2e/test_smoke.py | 29 ++- tests/unit/conftest.py | 38 ++- tests/unit/test_auth.py | 231 ++++++++++++++++ tests/unit/test_consumption.py | 7 +- tests/unit/test_costs.py | 21 +- tests/unit/test_currencies.py | 46 +++- tests/unit/test_currency_workflow.py | 7 +- tests/unit/test_maintenance.py | 13 +- tests/unit/test_parameters.py | 5 +- tests/unit/test_production.py | 13 +- tests/unit/test_reporting.py | 14 +- tests/unit/test_router_validation.py | 5 +- tests/unit/test_settings_routes.py | 2 +- tests/unit/test_settings_service.py | 22 +- tests/unit/test_setup_database.py | 246 ++++++++++++------ tests/unit/test_simulation.py | 26 +- tests/unit/test_theme_settings.py | 63 +++++ tests/unit/test_ui_routes.py | 24 +- 78 files changed, 2327 insertions(+), 650 deletions(-) create mode 100644 docs/architecture/05_frontend/05_03_theming.md create mode 100644 docs/architecture/08_concepts/08_01_security.md create mode 100644 models/role.py create mode 100644 models/theme_setting.py create mode 100644 models/user.py create mode 100644 routes/users.py create mode 100644 schemas/user.py create mode 100644 scripts/migrations/20251027_create_theme_settings_table.sql create mode 100644 scripts/migrations/20251027_create_user_and_role_tables.sql create mode 100644 services/security.py create mode 100644 static/js/theme.js create mode 100644 templates/forgot_password.html create mode 100644 templates/login.html create mode 100644 templates/profile.html create mode 100644 templates/register.html create mode 100644 templates/theme_settings.html create mode 100644 tests/unit/test_auth.py create mode 100644 tests/unit/test_theme_settings.py diff --git a/config/database.py b/config/database.py index a850c05..ff6d3c0 100644 --- a/config/database.py +++ b/config/database.py @@ -56,3 +56,11 @@ DATABASE_URL = _build_database_url() engine = create_engine(DATABASE_URL, echo=True, future=True) SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) Base = declarative_base() + + +def get_db(): + db = SessionLocal() + try: + yield db + finally: + db.close() diff --git a/docs/architecture/05_building_block_view.md b/docs/architecture/05_building_block_view.md index 1af3464..7b829bc 100644 --- a/docs/architecture/05_building_block_view.md +++ b/docs/architecture/05_building_block_view.md @@ -1,10 +1,11 @@ --- -title: "05 — Building Block View" -description: "Explain the static structure: modules, components, services and their relationships." +title: '05 — Building Block View' +description: 'Explain the static structure: modules, components, services and their relationships.' status: draft --- + # 05 — Building Block View ## Architecture overview @@ -42,6 +43,144 @@ Refer to the detailed architecture chapters in `docs/architecture/`: - **Middleware** (`middleware/validation.py`): applies JSON validation before requests reach routers. - **Testing** (`tests/unit/`): pytest suite covering route and service behavior, including UI rendering checks and negative-path router validation tests to ensure consistent HTTP error semantics. Playwright end-to-end coverage is planned for core smoke flows (dashboard load, scenario inputs, reporting) and will attach in CI once scaffolding is completed. +### Component Diagram + +# System Architecture — Mermaid Diagram + +```mermaid +graph LR + %% Direction + %% LR = left-to-right for a wide architecture view + + %% === Clients === + U["User (Browser)"] + + %% === Frontend === + subgraph FE[Frontend] + TPL["Jinja2 Templates\n(templates/)\n• base layout + sidebar"] + PARTS["Reusable Partials\n(templates/partials/components.html)\n• inputs • empty states • table wrappers"] + STATIC["Static Assets\n(static/)\n• CSS: static/css/main.css (palette via CSS vars)\n• JS: static/js/*.js (page modules)"] + SETPAGE["Settings View\n(templates/settings.html)"] + SETJS["Settings Logic\n(static/js/settings.js)\n• validation • submit • live CSS updates"] + end + + %% === Backend === + subgraph BE[Backend FastAPI] + MAIN["FastAPI App\n(main.py)\n• routers • middleware • startup/shutdown"] + + subgraph ROUTES[Routers] + R_SCN["scenarios"] + R_PAR["parameters"] + R_CST["costs"] + R_CONS["consumption"] + R_PROD["production"] + R_EQP["equipment"] + R_MNT["maintenance"] + R_SIM["simulations"] + R_REP["reporting"] + R_UI["ui.py (metadata for UI)"] + DEP["dependencies.get_db\n(shared SQLAlchemy session)"] + end + + subgraph SRV[Services] + S_BLL["Business Logic Layer\n• orchestrates models + calc"] + S_REP["Reporting Calculations"] + S_SIM["Monte Carlo\n(simulation scaffolding)"] + S_SET["Settings Manager\n(services/settings.py)\n• defaults via CSS vars\n• persistence in DB\n• env overrides\n• surfaces to API & UI"] + end + + subgraph MOD[Models] + M_SCN["Scenario"] + M_CAP["CapEx"] + M_OPEX["OpEx"] + M_CONS["Consumption"] + M_PROD["ProductionOutput"] + M_EQP["Equipment"] + M_MNT["Maintenance"] + M_SIMR["SimulationResult"] + end + + subgraph DB[Database Layer] + CFG["config/database.py\n(SQLAlchemy engine & sessions)"] + PG[("PostgreSQL")] + APPSET["application_setting table"] + end + end + + %% === Middleware & Utilities === + subgraph MW[Middleware & Utilities] + VAL["JSON Validation Middleware\n(middleware/validation.py)"] + end + + subgraph TEST[Testing] + UNIT["pytest unit tests\n(tests/unit/)\n• routes • services • UI rendering\n• negative-path validation"] + E2E["Playwright E2E (planned)\n• dashboard • scenario inputs • reporting\n• attach in CI"] + end + + %% ===================== Edges / Flows ===================== + %% User to Frontend/Backend + U -->|HTTP GET| MAIN + U --> TPL + TPL -->|server-rendered HTML| U + STATIC --> U + PARTS --> TPL + SETPAGE --> U + SETJS --> U + + %% Frontend to Routers (AJAX/form submits) + SETJS -->|fetch/POST| R_UI + TPL -->|form submit / fetch| ROUTES + + %% FastAPI app wiring and middleware + VAL --> MAIN + MAIN --> ROUTES + + %% Routers to Services + ROUTES -->|calls| SRV + R_REP -->|calc| S_REP + R_SIM -->|run| S_SIM + R_UI -->|read/write settings meta| S_SET + + %% Services to Models & DB + SRV --> MOD + MOD --> CFG + CFG --> PG + + %% Settings manager persistence path + S_SET -->|persist/read| APPSET + APPSET --- PG + + %% Shared DB session dependency + DEP -. provides .-> ROUTES + DEP -. session .-> SRV + + %% Model entities mapping + S_BLL --> M_SCN & M_CAP & M_OPEX & M_CONS & M_PROD & M_EQP & M_MNT & M_SIMR + + %% Testing coverage + UNIT --> ROUTES + UNIT --> SRV + UNIT --> TPL + UNIT --> VAL + E2E --> U + E2E --> MAIN + + %% Legend + classDef store fill:#fff,stroke:#555,stroke-width:1px; + class PG store; +``` + +--- + +**Notes** + +- Arrows represent primary data/command flow. Dashed arrows denote shared dependencies (injected SQLAlchemy session). +- The settings pipeline shows how environment overrides and DB-backed defaults propagate to both API and UI. + +``` + +``` + ## Module Map (code) - `scenario.py`: central scenario entity with relationships to cost, consumption, production, equipment, maintenance, and simulation results. diff --git a/docs/architecture/05_frontend/05_03_theming.md b/docs/architecture/05_frontend/05_03_theming.md new file mode 100644 index 0000000..284b193 --- /dev/null +++ b/docs/architecture/05_frontend/05_03_theming.md @@ -0,0 +1,88 @@ +# Theming + +## Overview + +CalMiner uses a centralized theming system based on CSS custom properties (variables) to ensure consistent styling across the application. The theme is stored in the database and can be customized through environment variables or the UI settings page. + +## Default Theme Settings + +The default theme provides a light, professional color palette suitable for business applications. The colors are defined as CSS custom properties and stored in the `application_setting` table with category "theme". + +### Color Palette + +| CSS Variable | Default Value | Description | +| --------------------------- | ------------------------ | ------------------------ | +| `--color-background` | `#f4f5f7` | Main background color | +| `--color-surface` | `#ffffff` | Surface/card background | +| `--color-text-primary` | `#2a1f33` | Primary text color | +| `--color-text-secondary` | `#624769` | Secondary text color | +| `--color-text-muted` | `#64748b` | Muted text color | +| `--color-text-subtle` | `#94a3b8` | Subtle text color | +| `--color-text-invert` | `#ffffff` | Text on dark backgrounds | +| `--color-text-dark` | `#0f172a` | Dark text for contrast | +| `--color-text-strong` | `#111827` | Strong/bold text | +| `--color-primary` | `#5f320d` | Primary brand color | +| `--color-primary-strong` | `#7e4c13` | Stronger primary | +| `--color-primary-stronger` | `#837c15` | Strongest primary | +| `--color-accent` | `#bff838` | Accent/highlight color | +| `--color-border` | `#e2e8f0` | Default border color | +| `--color-border-strong` | `#cbd5e1` | Strong border color | +| `--color-highlight` | `#eef2ff` | Highlight background | +| `--color-panel-shadow` | `rgba(15, 23, 42, 0.08)` | Subtle shadow | +| `--color-panel-shadow-deep` | `rgba(15, 23, 42, 0.12)` | Deeper shadow | +| `--color-surface-alt` | `#f8fafc` | Alternative surface | +| `--color-success` | `#047857` | Success state color | +| `--color-error` | `#b91c1c` | Error state color | + +## Customization + +### Environment Variables + +Theme colors can be overridden using environment variables with the prefix `CALMINER_THEME_`. For example: + +```bash +export CALMINER_THEME_COLOR_BACKGROUND="#000000" +export CALMINER_THEME_COLOR_ACCENT="#ff0000" +``` + +The variable names are derived by: + +1. Removing the `--` prefix +2. Converting to uppercase +3. Replacing `-` with `_` +4. Adding `CALMINER_THEME_` prefix + +### Database Storage + +Settings are stored in the `application_setting` table with: + +- `category`: "theme" +- `value_type`: "color" +- `is_editable`: true + +### UI Settings + +Users can modify theme colors through the settings page at `/ui/settings`. + +## Implementation + +The theming system is implemented in: + +- `services/settings.py`: Color management and defaults +- `routes/settings.py`: API endpoints for theme settings +- `static/css/main.css`: CSS variable definitions +- `templates/settings.html`: UI for theme customization + +## Seeding + +Default theme settings are seeded during database setup using the seed script: + +```bash +python scripts/seed_data.py --theme +``` + +Or as part of defaults: + +```bash +python scripts/seed_data.py --defaults +``` diff --git a/docs/architecture/08_concepts/08_01_security.md b/docs/architecture/08_concepts/08_01_security.md new file mode 100644 index 0000000..1488537 --- /dev/null +++ b/docs/architecture/08_concepts/08_01_security.md @@ -0,0 +1,36 @@ +# User Roles and Permissions Model + +This document outlines the proposed user roles and permissions model for the CalMiner application. + +## User Roles + +- **Admin:** Full access to all features, including user management, application settings, and all data. +- **Analyst:** Can create, view, edit, and delete scenarios, run simulations, and view reports. Cannot modify application settings or manage users. +- **Viewer:** Can view scenarios, simulations, and reports. Cannot create, edit, or delete anything. + +## Permissions (examples) + +- `users:manage`: Admin only. +- `settings:manage`: Admin only. +- `scenarios:create`: Admin, Analyst. +- `scenarios:view`: Admin, Analyst, Viewer. +- `scenarios:edit`: Admin, Analyst. +- `scenarios:delete`: Admin, Analyst. +- `simulations:run`: Admin, Analyst. +- `simulations:view`: Admin, Analyst, Viewer. +- `reports:view`: Admin, Analyst, Viewer. + +## Authentication System + +The authentication system uses JWT (JSON Web Tokens) for securing API endpoints. Users can register with a username, email, and password. Passwords are hashed using bcrypt. Upon successful login, an access token is issued, which must be included in subsequent requests for protected resources. + +## Key Components + +- **Password Hashing:** `passlib.context.CryptContext` with `bcrypt` scheme. +- **Token Creation & Verification:** `jose.jwt` for encoding and decoding JWTs. +- **Authentication Flow:** + 1. User registers via `/users/register`. + 2. User logs in via `/users/login` to obtain an access token. + 3. The access token is sent in the `Authorization` header (Bearer token) for protected routes. + 4. The `get_current_user` dependency verifies the token and retrieves the authenticated user. +- **Password Reset:** A placeholder `forgot_password` endpoint is available, and a `reset_password` endpoint allows users to set a new password with a valid token (token generation and email sending are not yet implemented). diff --git a/docs/architecture/13_ui_and_style.md b/docs/architecture/13_ui_and_style.md index 4a1e6f5..d3502ca 100644 --- a/docs/architecture/13_ui_and_style.md +++ b/docs/architecture/13_ui_and_style.md @@ -28,6 +28,32 @@ Import macros via: - **Tables**: `.table-container` wrappers need overflow handling for narrow viewports; consider `overflow-x: auto` with padding adjustments. - **Feedback/Empty states**: Messages use default font weight and spacing; a utility class for margin/padding would ensure consistent separation from forms or tables. +## CSS Variable Naming Conventions + +The project adheres to a clear and descriptive naming convention for CSS variables, primarily defined in `static/css/main.css`. + +## Naming Structure + +Variables are prefixed based on their category: + +- `--color-`: For all color-related variables (e.g., `--color-primary`, `--color-background`, `--color-text-primary`). +- `--space-`: For spacing and layout-related variables (e.g., `--space-sm`, `--space-md`, `--space-lg`). +- `--font-size-`: For font size variables (e.g., `--font-size-base`, `--font-size-lg`). +- Other specific prefixes for components or properties (e.g., `--panel-radius`, `--table-radius`). + +## Descriptive Names + +Color names are chosen to be semantically meaningful rather than literal color values, allowing for easier theme changes. For example: + +- `--color-primary`: Represents the main brand color. +- `--color-accent`: Represents an accent color used for highlights. +- `--color-text-primary`: The main text color. +- `--color-text-muted`: A lighter text color for less emphasis. +- `--color-surface`: The background color for UI elements like cards or panels. +- `--color-background`: The overall page background color. + +This approach ensures that the CSS variables are intuitive, maintainable, and easily adaptable for future theme customizations. + ## Per-page data & actions Short reference of per-page APIs and primary actions used by templates and scripts. @@ -76,6 +102,21 @@ Short reference of per-page APIs and primary actions used by templates and scrip - Data: `POST /api/reporting/summary` (accepts arrays of `{ "result": float }` objects) - Actions: Trigger summary refreshes and export/download actions. +## Navigation Structure + +The application uses a sidebar navigation menu organized into the following top-level categories: + +- **Dashboard**: Main overview page. +- **Overview**: Sub-menu for core scenario inputs. + - Parameters: Process parameters configuration. + - Costs: Capital and operating costs. + - Consumption: Resource consumption tracking. + - Production: Production output settings. + - Equipment: Equipment inventory (with Maintenance sub-item). +- **Simulations**: Monte Carlo simulation runs. +- **Analytics**: Reporting and analytics. +- **Settings**: Administrative settings (with Themes and Currency Management sub-items). + ## UI Template Audit (2025-10-20) - Existing HTML templates: `ScenarioForm.html`, `ParameterInput.html`, and `Dashboard.html` (reporting summary view). diff --git a/main.py b/main.py index 0baa79d..171cd88 100644 --- a/main.py +++ b/main.py @@ -17,6 +17,7 @@ from routes.currencies import router as currencies_router from routes.simulations import router as simulations_router from routes.maintenance import router as maintenance_router from routes.settings import router as settings_router +from routes.users import router as users_router # Initialize database schema Base.metadata.create_all(bind=engine) @@ -30,6 +31,7 @@ async def json_validation( ) -> Response: return await validate_json(request, call_next) + app.mount("/static", StaticFiles(directory="static"), name="static") # Include API routers @@ -46,3 +48,4 @@ app.include_router(reporting_router) app.include_router(currencies_router) app.include_router(settings_router) app.include_router(ui_router) +app.include_router(users_router) diff --git a/middleware/validation.py b/middleware/validation.py index b779366..9f2249e 100644 --- a/middleware/validation.py +++ b/middleware/validation.py @@ -4,7 +4,10 @@ from fastapi import HTTPException, Request, Response MiddlewareCallNext = Callable[[Request], Awaitable[Response]] -async def validate_json(request: Request, call_next: MiddlewareCallNext) -> Response: + +async def validate_json( + request: Request, call_next: MiddlewareCallNext +) -> Response: # Only validate JSON for requests with a body if request.method in ("POST", "PUT", "PATCH"): try: diff --git a/models/__init__.py b/models/__init__.py index 81d530a..a46e508 100644 --- a/models/__init__.py +++ b/models/__init__.py @@ -2,5 +2,9 @@ models package initializer. Import key models so they're registered with the shared Base.metadata when the package is imported by tests. """ + from . import application_setting # noqa: F401 from . import currency # noqa: F401 +from . import role # noqa: F401 +from . import user # noqa: F401 +from . import theme_setting # noqa: F401 diff --git a/models/application_setting.py b/models/application_setting.py index 36b0ad5..ed98160 100644 --- a/models/application_setting.py +++ b/models/application_setting.py @@ -14,15 +14,24 @@ class ApplicationSetting(Base): id: Mapped[int] = mapped_column(primary_key=True, index=True) key: Mapped[str] = mapped_column(String(128), unique=True, nullable=False) value: Mapped[str] = mapped_column(Text, nullable=False) - value_type: Mapped[str] = mapped_column(String(32), nullable=False, default="string") - category: Mapped[str] = mapped_column(String(32), nullable=False, default="general") + value_type: Mapped[str] = mapped_column( + String(32), nullable=False, default="string" + ) + category: Mapped[str] = mapped_column( + String(32), nullable=False, default="general" + ) description: Mapped[Optional[str]] = mapped_column(Text, nullable=True) - is_editable: Mapped[bool] = mapped_column(Boolean, nullable=False, default=True) + is_editable: Mapped[bool] = mapped_column( + Boolean, nullable=False, default=True + ) created_at: Mapped[datetime] = mapped_column( DateTime(timezone=True), server_default=func.now(), nullable=False ) updated_at: Mapped[datetime] = mapped_column( - DateTime(timezone=True), server_default=func.now(), onupdate=func.now(), nullable=False + DateTime(timezone=True), + server_default=func.now(), + onupdate=func.now(), + nullable=False, ) def __repr__(self) -> str: diff --git a/models/capex.py b/models/capex.py index 6b68f4c..68b6749 100644 --- a/models/capex.py +++ b/models/capex.py @@ -29,8 +29,9 @@ class Capex(Base): @currency_code.setter def currency_code(self, value: str) -> None: # store pending code so application code or migrations can pick it up - setattr(self, "_currency_code_pending", - (value or "USD").strip().upper()) + setattr( + self, "_currency_code_pending", (value or "USD").strip().upper() + ) # SQLAlchemy event handlers to ensure currency_id is set before insert/update @@ -42,22 +43,27 @@ def _resolve_currency(mapper, connection, target): return code = getattr(target, "_currency_code_pending", None) or "USD" # Try to find existing currency id - row = connection.execute(text("SELECT id FROM currency WHERE code = :code"), { - "code": code}).fetchone() + row = connection.execute( + text("SELECT id FROM currency WHERE code = :code"), {"code": code} + ).fetchone() if row: cid = row[0] else: # Insert new currency and attempt to get lastrowid res = connection.execute( - text("INSERT INTO currency (code, name, symbol, is_active) VALUES (:code, :name, :symbol, :active)"), + text( + "INSERT INTO currency (code, name, symbol, is_active) VALUES (:code, :name, :symbol, :active)" + ), {"code": code, "name": code, "symbol": None, "active": True}, ) try: cid = res.lastrowid except Exception: # fallback: select after insert - cid = connection.execute(text("SELECT id FROM currency WHERE code = :code"), { - "code": code}).scalar() + cid = connection.execute( + text("SELECT id FROM currency WHERE code = :code"), + {"code": code}, + ).scalar() target.currency_id = cid diff --git a/models/currency.py b/models/currency.py index b280c2d..de95abd 100644 --- a/models/currency.py +++ b/models/currency.py @@ -14,8 +14,11 @@ class Currency(Base): # reverse relationships (optional) capex_items = relationship( - "Capex", back_populates="currency", lazy="select") + "Capex", back_populates="currency", lazy="select" + ) opex_items = relationship("Opex", back_populates="currency", lazy="select") def __repr__(self): - return f"" + return ( + f"" + ) diff --git a/models/opex.py b/models/opex.py index a819864..5c0e703 100644 --- a/models/opex.py +++ b/models/opex.py @@ -28,28 +28,34 @@ class Opex(Base): @currency_code.setter def currency_code(self, value: str) -> None: - setattr(self, "_currency_code_pending", - (value or "USD").strip().upper()) + setattr( + self, "_currency_code_pending", (value or "USD").strip().upper() + ) def _resolve_currency_opex(mapper, connection, target): if getattr(target, "currency_id", None): return code = getattr(target, "_currency_code_pending", None) or "USD" - row = connection.execute(text("SELECT id FROM currency WHERE code = :code"), { - "code": code}).fetchone() + row = connection.execute( + text("SELECT id FROM currency WHERE code = :code"), {"code": code} + ).fetchone() if row: cid = row[0] else: res = connection.execute( - text("INSERT INTO currency (code, name, symbol, is_active) VALUES (:code, :name, :symbol, :active)"), + text( + "INSERT INTO currency (code, name, symbol, is_active) VALUES (:code, :name, :symbol, :active)" + ), {"code": code, "name": code, "symbol": None, "active": True}, ) try: cid = res.lastrowid except Exception: - cid = connection.execute(text("SELECT id FROM currency WHERE code = :code"), { - "code": code}).scalar() + cid = connection.execute( + text("SELECT id FROM currency WHERE code = :code"), + {"code": code}, + ).scalar() target.currency_id = cid diff --git a/models/parameters.py b/models/parameters.py index 5182a74..822a011 100644 --- a/models/parameters.py +++ b/models/parameters.py @@ -10,14 +10,17 @@ class Parameter(Base): id: Mapped[int] = mapped_column(primary_key=True, index=True) scenario_id: Mapped[int] = mapped_column( - ForeignKey("scenario.id"), nullable=False) + ForeignKey("scenario.id"), nullable=False + ) name: Mapped[str] = mapped_column(nullable=False) value: Mapped[float] = mapped_column(nullable=False) distribution_id: Mapped[Optional[int]] = mapped_column( - ForeignKey("distribution.id"), nullable=True) + ForeignKey("distribution.id"), nullable=True + ) distribution_type: Mapped[Optional[str]] = mapped_column(nullable=True) distribution_parameters: Mapped[Optional[Dict[str, Any]]] = mapped_column( - JSON, nullable=True) + JSON, nullable=True + ) scenario = relationship("Scenario", back_populates="parameters") distribution = relationship("Distribution") diff --git a/models/production_output.py b/models/production_output.py index a700d57..fde7cb8 100644 --- a/models/production_output.py +++ b/models/production_output.py @@ -14,7 +14,8 @@ class ProductionOutput(Base): unit_symbol = Column(String(16), nullable=True) scenario = relationship( - "Scenario", back_populates="production_output_items") + "Scenario", back_populates="production_output_items" + ) def __repr__(self): return ( diff --git a/models/role.py b/models/role.py new file mode 100644 index 0000000..3351908 --- /dev/null +++ b/models/role.py @@ -0,0 +1,13 @@ +from sqlalchemy import Column, Integer, String +from sqlalchemy.orm import relationship + +from config.database import Base + + +class Role(Base): + __tablename__ = "roles" + + id = Column(Integer, primary_key=True, index=True) + name = Column(String, unique=True, index=True) + + users = relationship("User", back_populates="role") diff --git a/models/scenario.py b/models/scenario.py index 3c9f19e..66d4fd2 100644 --- a/models/scenario.py +++ b/models/scenario.py @@ -20,19 +20,16 @@ class Scenario(Base): updated_at = Column(DateTime(timezone=True), onupdate=func.now()) parameters = relationship("Parameter", back_populates="scenario") simulation_results = relationship( - SimulationResult, back_populates="scenario") - capex_items = relationship( - Capex, back_populates="scenario") - opex_items = relationship( - Opex, back_populates="scenario") - consumption_items = relationship( - Consumption, back_populates="scenario") + SimulationResult, back_populates="scenario" + ) + capex_items = relationship(Capex, back_populates="scenario") + opex_items = relationship(Opex, back_populates="scenario") + consumption_items = relationship(Consumption, back_populates="scenario") production_output_items = relationship( - ProductionOutput, back_populates="scenario") - equipment_items = relationship( - Equipment, back_populates="scenario") - maintenance_items = relationship( - Maintenance, back_populates="scenario") + ProductionOutput, back_populates="scenario" + ) + equipment_items = relationship(Equipment, back_populates="scenario") + maintenance_items = relationship(Maintenance, back_populates="scenario") # relationships can be defined later def __repr__(self): diff --git a/models/theme_setting.py b/models/theme_setting.py new file mode 100644 index 0000000..1e20c64 --- /dev/null +++ b/models/theme_setting.py @@ -0,0 +1,15 @@ +from sqlalchemy import Column, Integer, String + +from config.database import Base + + +class ThemeSetting(Base): + __tablename__ = "theme_settings" + + id = Column(Integer, primary_key=True, index=True) + theme_name = Column(String, unique=True, index=True) + primary_color = Column(String) + secondary_color = Column(String) + accent_color = Column(String) + background_color = Column(String) + text_color = Column(String) diff --git a/models/user.py b/models/user.py new file mode 100644 index 0000000..5ee8654 --- /dev/null +++ b/models/user.py @@ -0,0 +1,23 @@ +from sqlalchemy import Column, Integer, String, ForeignKey +from sqlalchemy.orm import relationship + +from config.database import Base +from services.security import get_password_hash, verify_password + + +class User(Base): + __tablename__ = "users" + + id = Column(Integer, primary_key=True, index=True) + username = Column(String, unique=True, index=True) + email = Column(String, unique=True, index=True) + hashed_password = Column(String) + role_id = Column(Integer, ForeignKey("roles.id")) + + role = relationship("Role", back_populates="users") + + def set_password(self, password: str): + self.hashed_password = get_password_hash(password) + + def check_password(self, password: str) -> bool: + return verify_password(password, str(self.hashed_password)) diff --git a/routes/consumption.py b/routes/consumption.py index 4fee0d2..e03785d 100644 --- a/routes/consumption.py +++ b/routes/consumption.py @@ -36,7 +36,9 @@ class ConsumptionRead(ConsumptionBase): model_config = ConfigDict(from_attributes=True) -@router.post("/", response_model=ConsumptionRead, status_code=status.HTTP_201_CREATED) +@router.post( + "/", response_model=ConsumptionRead, status_code=status.HTTP_201_CREATED +) def create_consumption(item: ConsumptionCreate, db: Session = Depends(get_db)): db_item = Consumption(**item.model_dump()) db.add(db_item) diff --git a/routes/costs.py b/routes/costs.py index 4dafb96..e22f18a 100644 --- a/routes/costs.py +++ b/routes/costs.py @@ -73,7 +73,8 @@ def create_capex(item: CapexCreate, db: Session = Depends(get_db)): if not cid: code = (payload.pop("currency_code", "USD") or "USD").strip().upper() currency_cls = __import__( - "models.currency", fromlist=["Currency"]).Currency + "models.currency", fromlist=["Currency"] + ).Currency currency = db.query(currency_cls).filter_by(code=code).one_or_none() if currency is None: currency = currency_cls(code=code, name=code, symbol=None) @@ -100,7 +101,8 @@ def create_opex(item: OpexCreate, db: Session = Depends(get_db)): if not cid: code = (payload.pop("currency_code", "USD") or "USD").strip().upper() currency_cls = __import__( - "models.currency", fromlist=["Currency"]).Currency + "models.currency", fromlist=["Currency"] + ).Currency currency = db.query(currency_cls).filter_by(code=code).one_or_none() if currency is None: currency = currency_cls(code=code, name=code, symbol=None) diff --git a/routes/currencies.py b/routes/currencies.py index d9a210f..642bb11 100644 --- a/routes/currencies.py +++ b/routes/currencies.py @@ -97,20 +97,20 @@ def _ensure_default_currency(db: Session) -> Currency: def _get_currency_or_404(db: Session, code: str) -> Currency: normalized = code.strip().upper() currency = ( - db.query(Currency) - .filter(Currency.code == normalized) - .one_or_none() + db.query(Currency).filter(Currency.code == normalized).one_or_none() ) if currency is None: raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, detail="Currency not found") + status_code=status.HTTP_404_NOT_FOUND, detail="Currency not found" + ) return currency @router.get("/", response_model=List[CurrencyRead]) def list_currencies( include_inactive: bool = Query( - False, description="Include inactive currencies"), + False, description="Include inactive currencies" + ), db: Session = Depends(get_db), ): _ensure_default_currency(db) @@ -121,14 +121,12 @@ def list_currencies( return currencies -@router.post("/", response_model=CurrencyRead, status_code=status.HTTP_201_CREATED) +@router.post( + "/", response_model=CurrencyRead, status_code=status.HTTP_201_CREATED +) def create_currency(payload: CurrencyCreate, db: Session = Depends(get_db)): code = payload.code - existing = ( - db.query(Currency) - .filter(Currency.code == code) - .one_or_none() - ) + existing = db.query(Currency).filter(Currency.code == code).one_or_none() if existing is not None: raise HTTPException( status_code=status.HTTP_409_CONFLICT, @@ -148,7 +146,9 @@ def create_currency(payload: CurrencyCreate, db: Session = Depends(get_db)): @router.put("/{code}", response_model=CurrencyRead) -def update_currency(code: str, payload: CurrencyUpdate, db: Session = Depends(get_db)): +def update_currency( + code: str, payload: CurrencyUpdate, db: Session = Depends(get_db) +): currency = _get_currency_or_404(db, code) if payload.name is not None: @@ -175,7 +175,9 @@ def update_currency(code: str, payload: CurrencyUpdate, db: Session = Depends(ge @router.patch("/{code}/activation", response_model=CurrencyRead) -def toggle_currency_activation(code: str, body: CurrencyActivation, db: Session = Depends(get_db)): +def toggle_currency_activation( + code: str, body: CurrencyActivation, db: Session = Depends(get_db) +): currency = _get_currency_or_404(db, code) code_value = getattr(currency, "code") if code_value == DEFAULT_CURRENCY_CODE and body.is_active is False: diff --git a/routes/distributions.py b/routes/distributions.py index 8c409c3..34a0cc8 100644 --- a/routes/distributions.py +++ b/routes/distributions.py @@ -22,7 +22,9 @@ class DistributionRead(DistributionCreate): @router.post("/", response_model=DistributionRead) -async def create_distribution(dist: DistributionCreate, db: Session = Depends(get_db)): +async def create_distribution( + dist: DistributionCreate, db: Session = Depends(get_db) +): db_dist = Distribution(**dist.model_dump()) db.add(db_dist) db.commit() diff --git a/routes/equipment.py b/routes/equipment.py index c8aecbd..a5800a9 100644 --- a/routes/equipment.py +++ b/routes/equipment.py @@ -23,7 +23,9 @@ class EquipmentRead(EquipmentCreate): @router.post("/", response_model=EquipmentRead) -async def create_equipment(item: EquipmentCreate, db: Session = Depends(get_db)): +async def create_equipment( + item: EquipmentCreate, db: Session = Depends(get_db) +): db_item = Equipment(**item.model_dump()) db.add(db_item) db.commit() diff --git a/routes/maintenance.py b/routes/maintenance.py index d7f0f49..93683fd 100644 --- a/routes/maintenance.py +++ b/routes/maintenance.py @@ -34,8 +34,9 @@ class MaintenanceRead(MaintenanceBase): def _get_maintenance_or_404(db: Session, maintenance_id: int) -> Maintenance: - maintenance = db.query(Maintenance).filter( - Maintenance.id == maintenance_id).first() + maintenance = ( + db.query(Maintenance).filter(Maintenance.id == maintenance_id).first() + ) if maintenance is None: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, @@ -44,8 +45,12 @@ def _get_maintenance_or_404(db: Session, maintenance_id: int) -> Maintenance: return maintenance -@router.post("/", response_model=MaintenanceRead, status_code=status.HTTP_201_CREATED) -def create_maintenance(maintenance: MaintenanceCreate, db: Session = Depends(get_db)): +@router.post( + "/", response_model=MaintenanceRead, status_code=status.HTTP_201_CREATED +) +def create_maintenance( + maintenance: MaintenanceCreate, db: Session = Depends(get_db) +): db_maintenance = Maintenance(**maintenance.model_dump()) db.add(db_maintenance) db.commit() @@ -54,7 +59,9 @@ def create_maintenance(maintenance: MaintenanceCreate, db: Session = Depends(get @router.get("/", response_model=List[MaintenanceRead]) -def list_maintenance(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)): +def list_maintenance( + skip: int = 0, limit: int = 100, db: Session = Depends(get_db) +): return db.query(Maintenance).offset(skip).limit(limit).all() diff --git a/routes/parameters.py b/routes/parameters.py index 39e67e4..59f09c8 100644 --- a/routes/parameters.py +++ b/routes/parameters.py @@ -30,12 +30,15 @@ class ParameterCreate(BaseModel): return None if normalized not in {"normal", "uniform", "triangular"}: raise ValueError( - "distribution_type must be normal, uniform, or triangular") + "distribution_type must be normal, uniform, or triangular" + ) return normalized @field_validator("distribution_parameters") @classmethod - def empty_dict_to_none(cls, value: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]: + def empty_dict_to_none( + cls, value: Optional[Dict[str, Any]] + ) -> Optional[Dict[str, Any]]: if value is None: return None return value or None @@ -45,6 +48,7 @@ class ParameterRead(ParameterCreate): id: int model_config = ConfigDict(from_attributes=True) + @router.post("/", response_model=ParameterRead) def create_parameter(param: ParameterCreate, db: Session = Depends(get_db)): scen = db.query(Scenario).filter(Scenario.id == param.scenario_id).first() @@ -55,11 +59,15 @@ def create_parameter(param: ParameterCreate, db: Session = Depends(get_db)): distribution_parameters = param.distribution_parameters if distribution_id is not None: - distribution = db.query(Distribution).filter( - Distribution.id == distribution_id).first() + distribution = ( + db.query(Distribution) + .filter(Distribution.id == distribution_id) + .first() + ) if not distribution: raise HTTPException( - status_code=404, detail="Distribution not found") + status_code=404, detail="Distribution not found" + ) distribution_type = distribution.distribution_type distribution_parameters = distribution.parameters or None diff --git a/routes/production.py b/routes/production.py index 264b541..ad4a059 100644 --- a/routes/production.py +++ b/routes/production.py @@ -36,8 +36,14 @@ class ProductionOutputRead(ProductionOutputBase): model_config = ConfigDict(from_attributes=True) -@router.post("/", response_model=ProductionOutputRead, status_code=status.HTTP_201_CREATED) -def create_production(item: ProductionOutputCreate, db: Session = Depends(get_db)): +@router.post( + "/", + response_model=ProductionOutputRead, + status_code=status.HTTP_201_CREATED, +) +def create_production( + item: ProductionOutputCreate, db: Session = Depends(get_db) +): db_item = ProductionOutput(**item.model_dump()) db.add(db_item) db.commit() diff --git a/routes/scenarios.py b/routes/scenarios.py index 11dab40..4454f74 100644 --- a/routes/scenarios.py +++ b/routes/scenarios.py @@ -24,6 +24,7 @@ class ScenarioRead(ScenarioCreate): updated_at: Optional[datetime] = None model_config = ConfigDict(from_attributes=True) + @router.post("/", response_model=ScenarioRead) def create_scenario(scenario: ScenarioCreate, db: Session = Depends(get_db)): db_s = db.query(Scenario).filter(Scenario.name == scenario.name).first() diff --git a/routes/settings.py b/routes/settings.py index 0cc2397..2308d7b 100644 --- a/routes/settings.py +++ b/routes/settings.py @@ -11,6 +11,8 @@ from services.settings import ( list_css_env_override_rows, read_css_color_env_overrides, update_css_color_settings, + get_theme_settings, + save_theme_settings, ) router = APIRouter(prefix="/api/settings", tags=["Settings"]) @@ -49,8 +51,7 @@ def read_css_settings(db: Session = Depends(get_db)) -> CSSSettingsResponse: values = get_css_color_settings(db) env_overrides = read_css_color_env_overrides() env_sources = [ - EnvOverride(**row) - for row in list_css_env_override_rows() + EnvOverride(**row) for row in list_css_env_override_rows() ] except ValueError as exc: raise HTTPException( @@ -64,14 +65,17 @@ def read_css_settings(db: Session = Depends(get_db)) -> CSSSettingsResponse: ) -@router.put("/css", response_model=CSSSettingsResponse, status_code=status.HTTP_200_OK) -def update_css_settings(payload: CSSSettingsPayload, db: Session = Depends(get_db)) -> CSSSettingsResponse: +@router.put( + "/css", response_model=CSSSettingsResponse, status_code=status.HTTP_200_OK +) +def update_css_settings( + payload: CSSSettingsPayload, db: Session = Depends(get_db) +) -> CSSSettingsResponse: try: values = update_css_color_settings(db, payload.variables) env_overrides = read_css_color_env_overrides() env_sources = [ - EnvOverride(**row) - for row in list_css_env_override_rows() + EnvOverride(**row) for row in list_css_env_override_rows() ] except ValueError as exc: raise HTTPException( @@ -83,3 +87,24 @@ def update_css_settings(payload: CSSSettingsPayload, db: Session = Depends(get_d env_overrides=env_overrides, env_sources=env_sources, ) + + +class ThemeSettings(BaseModel): + theme_name: str + primary_color: str + secondary_color: str + accent_color: str + background_color: str + text_color: str + + +@router.post("/theme") +async def update_theme(theme_data: ThemeSettings, db: Session = Depends(get_db)): + data_dict = theme_data.model_dump() + saved = save_theme_settings(db, data_dict) + return {"message": "Theme updated", "theme": data_dict} + + +@router.get("/theme") +async def get_theme(db: Session = Depends(get_db)): + return get_theme_settings(db) diff --git a/routes/simulations.py b/routes/simulations.py index b00c8c1..5500805 100644 --- a/routes/simulations.py +++ b/routes/simulations.py @@ -43,7 +43,9 @@ class SimulationRunResponse(BaseModel): summary: Dict[str, float | int] -def _load_parameters(db: Session, scenario_id: int) -> List[SimulationParameterInput]: +def _load_parameters( + db: Session, scenario_id: int +) -> List[SimulationParameterInput]: db_params = ( db.query(Parameter) .filter(Parameter.scenario_id == scenario_id) @@ -60,17 +62,19 @@ def _load_parameters(db: Session, scenario_id: int) -> List[SimulationParameterI @router.post("/run", response_model=SimulationRunResponse) -async def simulate(payload: SimulationRunRequest, db: Session = Depends(get_db)): - scenario = db.query(Scenario).filter( - Scenario.id == payload.scenario_id).first() +async def simulate( + payload: SimulationRunRequest, db: Session = Depends(get_db) +): + scenario = ( + db.query(Scenario).filter(Scenario.id == payload.scenario_id).first() + ) if scenario is None: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail="Scenario not found", ) - parameters = payload.parameters or _load_parameters( - db, payload.scenario_id) + parameters = payload.parameters or _load_parameters(db, payload.scenario_id) if not parameters: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, diff --git a/routes/ui.py b/routes/ui.py index 935f7e9..e690dba 100644 --- a/routes/ui.py +++ b/routes/ui.py @@ -53,7 +53,9 @@ router = APIRouter() templates = Jinja2Templates(directory="templates") -def _context(request: Request, extra: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: +def _context( + request: Request, extra: Optional[Dict[str, Any]] = None +) -> Dict[str, Any]: payload: Dict[str, Any] = { "request": request, "current_year": datetime.now(timezone.utc).year, @@ -98,7 +100,9 @@ def _load_scenarios(db: Session) -> Dict[str, Any]: def _load_parameters(db: Session) -> Dict[str, Any]: grouped: defaultdict[int, list[Dict[str, Any]]] = defaultdict(list) - for param in db.query(Parameter).order_by(Parameter.scenario_id, Parameter.id): + for param in db.query(Parameter).order_by( + Parameter.scenario_id, Parameter.id + ): grouped[param.scenario_id].append( { "id": param.id, @@ -113,27 +117,20 @@ def _load_parameters(db: Session) -> Dict[str, Any]: def _load_costs(db: Session) -> Dict[str, Any]: capex_grouped: defaultdict[int, list[Dict[str, Any]]] = defaultdict(list) - for capex in ( - db.query(Capex) - .order_by(Capex.scenario_id, Capex.id) - .all() - ): + for capex in db.query(Capex).order_by(Capex.scenario_id, Capex.id).all(): capex_grouped[int(getattr(capex, "scenario_id"))].append( { "id": int(getattr(capex, "id")), "scenario_id": int(getattr(capex, "scenario_id")), "amount": float(getattr(capex, "amount", 0.0)), "description": getattr(capex, "description", "") or "", - "currency_code": getattr(capex, "currency_code", "USD") or "USD", + "currency_code": getattr(capex, "currency_code", "USD") + or "USD", } ) opex_grouped: defaultdict[int, list[Dict[str, Any]]] = defaultdict(list) - for opex in ( - db.query(Opex) - .order_by(Opex.scenario_id, Opex.id) - .all() - ): + for opex in db.query(Opex).order_by(Opex.scenario_id, Opex.id).all(): opex_grouped[int(getattr(opex, "scenario_id"))].append( { "id": int(getattr(opex, "id")), @@ -152,9 +149,15 @@ def _load_costs(db: Session) -> Dict[str, Any]: def _load_currencies(db: Session) -> Dict[str, Any]: items: list[Dict[str, Any]] = [] - for c in db.query(Currency).filter_by(is_active=True).order_by(Currency.code).all(): + for c in ( + db.query(Currency) + .filter_by(is_active=True) + .order_by(Currency.code) + .all() + ): items.append( - {"id": c.code, "name": f"{c.name} ({c.code})", "symbol": c.symbol}) + {"id": c.code, "name": f"{c.name} ({c.code})", "symbol": c.symbol} + ) if not items: items.append({"id": "USD", "name": "US Dollar (USD)", "symbol": "$"}) return {"currency_options": items} @@ -261,9 +264,7 @@ def _load_production(db: Session) -> Dict[str, Any]: def _load_equipment(db: Session) -> Dict[str, Any]: grouped: defaultdict[int, list[Dict[str, Any]]] = defaultdict(list) for record in ( - db.query(Equipment) - .order_by(Equipment.scenario_id, Equipment.id) - .all() + db.query(Equipment).order_by(Equipment.scenario_id, Equipment.id).all() ): record_id = int(getattr(record, "id")) scenario_id = int(getattr(record, "scenario_id")) @@ -291,8 +292,9 @@ def _load_maintenance(db: Session) -> Dict[str, Any]: scenario_id = int(getattr(record, "scenario_id")) equipment_id = int(getattr(record, "equipment_id")) equipment_obj = getattr(record, "equipment", None) - equipment_name = getattr( - equipment_obj, "name", "") if equipment_obj else "" + equipment_name = ( + getattr(equipment_obj, "name", "") if equipment_obj else "" + ) maintenance_date = getattr(record, "maintenance_date", None) cost_value = float(getattr(record, "cost", 0.0)) description = getattr(record, "description", "") or "" @@ -303,7 +305,9 @@ def _load_maintenance(db: Session) -> Dict[str, Any]: "scenario_id": scenario_id, "equipment_id": equipment_id, "equipment_name": equipment_name, - "maintenance_date": maintenance_date.isoformat() if maintenance_date else "", + "maintenance_date": ( + maintenance_date.isoformat() if maintenance_date else "" + ), "cost": cost_value, "description": description, } @@ -339,8 +343,11 @@ def _load_simulations(db: Session) -> Dict[str, Any]: for item in scenarios: scenario_id = int(item["id"]) scenario_results = results_grouped.get(scenario_id, []) - summary = generate_report( - scenario_results) if scenario_results else generate_report([]) + summary = ( + generate_report(scenario_results) + if scenario_results + else generate_report([]) + ) runs.append( { "scenario_id": scenario_id, @@ -395,11 +402,11 @@ def _load_dashboard(db: Session) -> Dict[str, Any]: simulation_context = _load_simulations(db) simulation_runs = simulation_context["simulation_runs"] - runs_by_scenario = { - run["scenario_id"]: run for run in simulation_runs - } + runs_by_scenario = {run["scenario_id"]: run for run in simulation_runs} - def sum_amounts(grouped: Dict[int, list[Dict[str, Any]]], field: str = "amount") -> float: + def sum_amounts( + grouped: Dict[int, list[Dict[str, Any]]], field: str = "amount" + ) -> float: total = 0.0 for items in grouped.values(): for item in items: @@ -414,14 +421,18 @@ def _load_dashboard(db: Session) -> Dict[str, Any]: total_production = sum_amounts(production_by_scenario) total_maintenance_cost = sum_amounts(maintenance_by_scenario, field="cost") - total_parameters = sum(len(items) - for items in parameters_by_scenario.values()) - total_equipment = sum(len(items) - for items in equipment_by_scenario.values()) - total_maintenance_events = sum(len(items) - for items in maintenance_by_scenario.values()) + total_parameters = sum( + len(items) for items in parameters_by_scenario.values() + ) + total_equipment = sum( + len(items) for items in equipment_by_scenario.values() + ) + total_maintenance_events = sum( + len(items) for items in maintenance_by_scenario.values() + ) total_simulation_iterations = sum( - run["iterations"] for run in simulation_runs) + run["iterations"] for run in simulation_runs + ) scenario_rows: list[Dict[str, Any]] = [] scenario_labels: list[str] = [] @@ -501,20 +512,40 @@ def _load_dashboard(db: Session) -> Dict[str, Any]: overall_report = generate_report(all_simulation_results) overall_report_metrics = [ - {"label": "Runs", "value": _format_int( - int(overall_report.get("count", 0)))}, - {"label": "Mean", "value": _format_decimal( - float(overall_report.get("mean", 0.0)))}, - {"label": "Median", "value": _format_decimal( - float(overall_report.get("median", 0.0)))}, - {"label": "Std Dev", "value": _format_decimal( - float(overall_report.get("std_dev", 0.0)))}, - {"label": "95th Percentile", "value": _format_decimal( - float(overall_report.get("percentile_95", 0.0)))}, - {"label": "VaR (95%)", "value": _format_decimal( - float(overall_report.get("value_at_risk_95", 0.0)))}, - {"label": "Expected Shortfall (95%)", "value": _format_decimal( - float(overall_report.get("expected_shortfall_95", 0.0)))}, + { + "label": "Runs", + "value": _format_int(int(overall_report.get("count", 0))), + }, + { + "label": "Mean", + "value": _format_decimal(float(overall_report.get("mean", 0.0))), + }, + { + "label": "Median", + "value": _format_decimal(float(overall_report.get("median", 0.0))), + }, + { + "label": "Std Dev", + "value": _format_decimal(float(overall_report.get("std_dev", 0.0))), + }, + { + "label": "95th Percentile", + "value": _format_decimal( + float(overall_report.get("percentile_95", 0.0)) + ), + }, + { + "label": "VaR (95%)", + "value": _format_decimal( + float(overall_report.get("value_at_risk_95", 0.0)) + ), + }, + { + "label": "Expected Shortfall (95%)", + "value": _format_decimal( + float(overall_report.get("expected_shortfall_95", 0.0)) + ), + }, ] recent_simulations: list[Dict[str, Any]] = [ @@ -522,8 +553,12 @@ def _load_dashboard(db: Session) -> Dict[str, Any]: "scenario_name": run["scenario_name"], "iterations": run["iterations"], "iterations_display": _format_int(run["iterations"]), - "mean_display": _format_decimal(float(run["summary"].get("mean", 0.0))), - "p95_display": _format_decimal(float(run["summary"].get("percentile_95", 0.0))), + "mean_display": _format_decimal( + float(run["summary"].get("mean", 0.0)) + ), + "p95_display": _format_decimal( + float(run["summary"].get("percentile_95", 0.0)) + ), } for run in simulation_runs if run["iterations"] > 0 @@ -541,10 +576,20 @@ def _load_dashboard(db: Session) -> Dict[str, Any]: maintenance_date = getattr(record, "maintenance_date", None) upcoming_maintenance.append( { - "scenario_name": getattr(getattr(record, "scenario", None), "name", "Unknown"), - "equipment_name": getattr(getattr(record, "equipment", None), "name", "Unknown"), - "date_display": maintenance_date.strftime("%Y-%m-%d") if maintenance_date else "—", - "cost_display": _format_currency(float(getattr(record, "cost", 0.0))), + "scenario_name": getattr( + getattr(record, "scenario", None), "name", "Unknown" + ), + "equipment_name": getattr( + getattr(record, "equipment", None), "name", "Unknown" + ), + "date_display": ( + maintenance_date.strftime("%Y-%m-%d") + if maintenance_date + else "—" + ), + "cost_display": _format_currency( + float(getattr(record, "cost", 0.0)) + ), "description": getattr(record, "description", "") or "—", } ) @@ -552,9 +597,9 @@ def _load_dashboard(db: Session) -> Dict[str, Any]: cost_chart_has_data = any(value > 0 for value in scenario_capex) or any( value > 0 for value in scenario_opex ) - activity_chart_has_data = any(value > 0 for value in activity_production) or any( - value > 0 for value in activity_consumption - ) + activity_chart_has_data = any( + value > 0 for value in activity_production + ) or any(value > 0 for value in activity_consumption) scenario_cost_chart: Dict[str, list[Any]] = { "labels": scenario_labels, @@ -573,14 +618,20 @@ def _load_dashboard(db: Session) -> Dict[str, Any]: {"label": "CAPEX Total", "value": _format_currency(total_capex)}, {"label": "OPEX Total", "value": _format_currency(total_opex)}, {"label": "Equipment Assets", "value": _format_int(total_equipment)}, - {"label": "Maintenance Events", - "value": _format_int(total_maintenance_events)}, + { + "label": "Maintenance Events", + "value": _format_int(total_maintenance_events), + }, {"label": "Consumption", "value": _format_decimal(total_consumption)}, {"label": "Production", "value": _format_decimal(total_production)}, - {"label": "Simulation Iterations", - "value": _format_int(total_simulation_iterations)}, - {"label": "Maintenance Cost", - "value": _format_currency(total_maintenance_cost)}, + { + "label": "Simulation Iterations", + "value": _format_int(total_simulation_iterations), + }, + { + "label": "Maintenance Cost", + "value": _format_currency(total_maintenance_cost), + }, ] return { @@ -704,3 +755,30 @@ async def currencies_view(request: Request, db: Session = Depends(get_db)): """Render the currency administration page with full currency context.""" context = _load_currency_settings(db) return _render(request, "currencies.html", context) + + +@router.get("/login", response_class=HTMLResponse) +async def login_page(request: Request): + return _render(request, "login.html") + + +@router.get("/register", response_class=HTMLResponse) +async def register_page(request: Request): + return _render(request, "register.html") + + +@router.get("/profile", response_class=HTMLResponse) +async def profile_page(request: Request): + return _render(request, "profile.html") + + +@router.get("/forgot-password", response_class=HTMLResponse) +async def forgot_password_page(request: Request): + return _render(request, "forgot_password.html") + + +@router.get("/theme-settings", response_class=HTMLResponse) +async def theme_settings_page(request: Request, db: Session = Depends(get_db)): + """Render the theme settings page.""" + context = _load_css_settings(db) + return _render(request, "theme_settings.html", context) diff --git a/routes/users.py b/routes/users.py new file mode 100644 index 0000000..dd9ddc6 --- /dev/null +++ b/routes/users.py @@ -0,0 +1,126 @@ +from fastapi import APIRouter, Depends, HTTPException, status +from fastapi.security import OAuth2PasswordBearer +from sqlalchemy.orm import Session + +from config.database import get_db +from models.user import User +from services.security import get_password_hash, verify_password, create_access_token, SECRET_KEY, ALGORITHM +from jose import jwt, JWTError +from schemas.user import UserCreate, UserInDB, UserLogin, UserUpdate, PasswordResetRequest, PasswordReset, Token + +router = APIRouter(prefix="/users", tags=["users"]) + + +oauth2_scheme = OAuth2PasswordBearer(tokenUrl="users/login") + + +async def get_current_user(token: str = Depends(oauth2_scheme), db: Session = Depends(get_db)): + credentials_exception = HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Could not validate credentials", + headers={"WWW-Authenticate": "Bearer"}, + ) + try: + payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM]) + username: str = payload.get("sub") + if username is None: + raise credentials_exception + if username is None: + raise credentials_exception + except JWTError: + raise credentials_exception + user = db.query(User).filter(User.username == username).first() + if user is None: + raise credentials_exception + return user + + +@router.post("/register", response_model=UserInDB, status_code=status.HTTP_201_CREATED) +async def register_user(user: UserCreate, db: Session = Depends(get_db)): + db_user = db.query(User).filter(User.username == user.username).first() + if db_user: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, + detail="Username already registered") + db_user = db.query(User).filter(User.email == user.email).first() + if db_user: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, detail="Email already registered") + + # Get or create default role + from models.role import Role + default_role = db.query(Role).filter(Role.name == "user").first() + if not default_role: + default_role = Role(name="user") + db.add(default_role) + db.commit() + db.refresh(default_role) + + new_user = User(username=user.username, email=user.email, + role_id=default_role.id) + new_user.set_password(user.password) + db.add(new_user) + db.commit() + db.refresh(new_user) + return new_user + + +@router.post("/login") +async def login_user(user: UserLogin, db: Session = Depends(get_db)): + db_user = db.query(User).filter(User.username == user.username).first() + if not db_user or not db_user.check_password(user.password): + raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, + detail="Incorrect username or password") + access_token = create_access_token(subject=db_user.username) + return {"access_token": access_token, "token_type": "bearer"} + + +@router.get("/me") +async def read_users_me(current_user: User = Depends(get_current_user)): + return current_user + + +@router.put("/me", response_model=UserInDB) +async def update_user_me(user_update: UserUpdate, current_user: User = Depends(get_current_user), db: Session = Depends(get_db)): + if user_update.username and user_update.username != current_user.username: + existing_user = db.query(User).filter( + User.username == user_update.username).first() + if existing_user: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, detail="Username already taken") + current_user.username = user_update.username + + if user_update.email and user_update.email != current_user.email: + existing_user = db.query(User).filter( + User.email == user_update.email).first() + if existing_user: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, detail="Email already registered") + current_user.email = user_update.email + + if user_update.password: + current_user.set_password(user_update.password) + + db.add(current_user) + db.commit() + db.refresh(current_user) + return current_user + + +@router.post("/forgot-password") +async def forgot_password(request: PasswordResetRequest): + # In a real application, this would send an email with a reset token + return {"message": "Password reset email sent (not really)"} + + +@router.post("/reset-password") +async def reset_password(request: PasswordReset, db: Session = Depends(get_db)): + # In a real application, the token would be verified + user = db.query(User).filter(User.username == + request.token).first() # Use token as username for test + if not user: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid token or user") + user.set_password(request.new_password) + db.add(user) + db.commit() + return {"message": "Password has been reset successfully"} diff --git a/schemas/user.py b/schemas/user.py new file mode 100644 index 0000000..fafce5b --- /dev/null +++ b/schemas/user.py @@ -0,0 +1,41 @@ +from pydantic import BaseModel, ConfigDict + + +class UserCreate(BaseModel): + username: str + email: str + password: str + + +class UserInDB(BaseModel): + id: int + username: str + email: str + role_id: int + + model_config = ConfigDict(from_attributes=True) + + +class UserLogin(BaseModel): + username: str + password: str + + +class UserUpdate(BaseModel): + username: str | None = None + email: str | None = None + password: str | None = None + + +class PasswordResetRequest(BaseModel): + email: str + + +class PasswordReset(BaseModel): + token: str + new_password: str + + +class Token(BaseModel): + access_token: str + token_type: str diff --git a/scripts/backfill_currency.py b/scripts/backfill_currency.py index 15c330b..c56d6af 100644 --- a/scripts/backfill_currency.py +++ b/scripts/backfill_currency.py @@ -9,6 +9,7 @@ This script is intentionally cautious: it defaults to dry-run mode and will refu if database connection settings are missing. It supports creating missing currency rows when `--create-missing` is provided. Always run against a development/staging database first. """ + from __future__ import annotations import argparse import importlib @@ -36,26 +37,43 @@ def load_database_url() -> str: return getattr(db_module, "DATABASE_URL") -def backfill(db_url: str, dry_run: bool = True, create_missing: bool = False) -> None: +def backfill( + db_url: str, dry_run: bool = True, create_missing: bool = False +) -> None: engine = create_engine(db_url) with engine.begin() as conn: # Ensure currency table exists - res = conn.execute(text("SELECT name FROM sqlite_master WHERE type='table' AND name='currency';")) if db_url.startswith( - 'sqlite:') else conn.execute(text("SELECT to_regclass('public.currency');")) + res = ( + conn.execute( + text( + "SELECT name FROM sqlite_master WHERE type='table' AND name='currency';" + ) + ) + if db_url.startswith("sqlite:") + else conn.execute(text("SELECT to_regclass('public.currency');")) + ) # Note: we don't strictly depend on the above - we assume migration was already applied # Helper: find or create currency by code def find_currency_id(code: str): - r = conn.execute(text("SELECT id FROM currency WHERE code = :code"), { - "code": code}).fetchone() + r = conn.execute( + text("SELECT id FROM currency WHERE code = :code"), + {"code": code}, + ).fetchone() if r: return r[0] if create_missing: # insert and return id - conn.execute(text("INSERT INTO currency (code, name, symbol, is_active) VALUES (:c, :n, NULL, TRUE)"), { - "c": code, "n": code}) - r2 = conn.execute(text("SELECT id FROM currency WHERE code = :code"), { - "code": code}).fetchone() + conn.execute( + text( + "INSERT INTO currency (code, name, symbol, is_active) VALUES (:c, :n, NULL, TRUE)" + ), + {"c": code, "n": code}, + ) + r2 = conn.execute( + text("SELECT id FROM currency WHERE code = :code"), + {"code": code}, + ).fetchone() if not r2: raise RuntimeError( f"Unable to determine currency ID for '{code}' after insert" @@ -67,8 +85,15 @@ def backfill(db_url: str, dry_run: bool = True, create_missing: bool = False) -> for table in ("capex", "opex"): # Check if currency_id column exists try: - cols = conn.execute(text(f"SELECT 1 FROM information_schema.columns WHERE table_name = '{table}' AND column_name = 'currency_id'")) if not db_url.startswith( - 'sqlite:') else [(1,)] + cols = ( + conn.execute( + text( + f"SELECT 1 FROM information_schema.columns WHERE table_name = '{table}' AND column_name = 'currency_id'" + ) + ) + if not db_url.startswith("sqlite:") + else [(1,)] + ) except Exception: cols = [(1,)] @@ -77,8 +102,11 @@ def backfill(db_url: str, dry_run: bool = True, create_missing: bool = False) -> continue # Find rows where currency_id IS NULL but currency_code exists - rows = conn.execute(text( - f"SELECT id, currency_code FROM {table} WHERE currency_id IS NULL OR currency_id = ''")) + rows = conn.execute( + text( + f"SELECT id, currency_code FROM {table} WHERE currency_id IS NULL OR currency_id = ''" + ) + ) changed = 0 for r in rows: rid = r[0] @@ -86,14 +114,20 @@ def backfill(db_url: str, dry_run: bool = True, create_missing: bool = False) -> cid = find_currency_id(code) if cid is None: print( - f"Row {table}:{rid} has unknown currency code '{code}' and create_missing=False; skipping") + f"Row {table}:{rid} has unknown currency code '{code}' and create_missing=False; skipping" + ) continue if dry_run: print( - f"[DRY RUN] Would set {table}.currency_id = {cid} for row id={rid} (code={code})") + f"[DRY RUN] Would set {table}.currency_id = {cid} for row id={rid} (code={code})" + ) else: - conn.execute(text(f"UPDATE {table} SET currency_id = :cid WHERE id = :rid"), { - "cid": cid, "rid": rid}) + conn.execute( + text( + f"UPDATE {table} SET currency_id = :cid WHERE id = :rid" + ), + {"cid": cid, "rid": rid}, + ) changed += 1 print(f"{table}: processed, changed={changed} (dry_run={dry_run})") @@ -101,11 +135,19 @@ def backfill(db_url: str, dry_run: bool = True, create_missing: bool = False) -> def main() -> None: parser = argparse.ArgumentParser( - description="Backfill currency_id from currency_code for capex/opex tables") - parser.add_argument("--dry-run", action="store_true", - default=True, help="Show actions without writing") - parser.add_argument("--create-missing", action="store_true", - help="Create missing currency rows in the currency table") + description="Backfill currency_id from currency_code for capex/opex tables" + ) + parser.add_argument( + "--dry-run", + action="store_true", + default=True, + help="Show actions without writing", + ) + parser.add_argument( + "--create-missing", + action="store_true", + help="Create missing currency rows in the currency table", + ) args = parser.parse_args() db = load_database_url() diff --git a/scripts/check_docs_links.py b/scripts/check_docs_links.py index 556575a..aebc1fe 100644 --- a/scripts/check_docs_links.py +++ b/scripts/check_docs_links.py @@ -4,25 +4,30 @@ Checks only local file links (relative paths) and reports missing targets. Run from the repository root using the project's Python environment. """ + import re from pathlib import Path ROOT = Path(__file__).resolve().parent.parent -DOCS = ROOT / 'docs' +DOCS = ROOT / "docs" MD_LINK_RE = re.compile(r"\[([^\]]+)\]\(([^)]+)\)") errors = [] -for md in DOCS.rglob('*.md'): - text = md.read_text(encoding='utf-8') +for md in DOCS.rglob("*.md"): + text = md.read_text(encoding="utf-8") for m in MD_LINK_RE.finditer(text): label, target = m.groups() # skip URLs - if target.startswith('http://') or target.startswith('https://') or target.startswith('#'): + if ( + target.startswith("http://") + or target.startswith("https://") + or target.startswith("#") + ): continue # strip anchors - target_path = target.split('#')[0] + target_path = target.split("#")[0] # if link is to a directory index, allow candidate = (md.parent / target_path).resolve() if candidate.exists(): @@ -30,14 +35,16 @@ for md in DOCS.rglob('*.md'): # check common implicit index: target/ -> target/README.md or target/index.md candidate_dir = md.parent / target_path if candidate_dir.is_dir(): - if (candidate_dir / 'README.md').exists() or (candidate_dir / 'index.md').exists(): + if (candidate_dir / "README.md").exists() or ( + candidate_dir / "index.md" + ).exists(): continue errors.append((str(md.relative_to(ROOT)), target, label)) if errors: - print('Broken local links found:') + print("Broken local links found:") for src, tgt, label in errors: - print(f'- {src} -> {tgt} ({label})') + print(f"- {src} -> {tgt} ({label})") exit(2) -print('No broken local links detected.') +print("No broken local links detected.") diff --git a/scripts/format_docs_md.py b/scripts/format_docs_md.py index 3505505..5e1e856 100644 --- a/scripts/format_docs_md.py +++ b/scripts/format_docs_md.py @@ -2,16 +2,17 @@ This is intentionally small and non-destructive; it touches only files under docs/ and makes safe changes. """ + import re from pathlib import Path DOCS = Path(__file__).resolve().parents[1] / "docs" CODE_LANG_HINTS = { - 'powershell': ('powershell',), - 'bash': ('bash', 'sh'), - 'sql': ('sql',), - 'python': ('python',), + "powershell": ("powershell",), + "bash": ("bash", "sh"), + "sql": ("sql",), + "python": ("python",), } @@ -19,48 +20,60 @@ def add_code_fence_language(match): fence = match.group(0) inner = match.group(1) # If language already present, return unchanged - if fence.startswith('```') and len(fence.splitlines()[0].strip()) > 3: + if fence.startswith("```") and len(fence.splitlines()[0].strip()) > 3: return fence # Try to infer language from the code content - code = inner.strip().splitlines()[0] if inner.strip() else '' - lang = '' - if code.startswith('$') or code.startswith('PS') or code.lower().startswith('powershell'): - lang = 'powershell' - elif code.startswith('#') or code.startswith('import') or code.startswith('from'): - lang = 'python' - elif re.match(r'^(select|insert|update|create)\b', code.strip(), re.I): - lang = 'sql' - elif code.startswith('git') or code.startswith('./') or code.startswith('sudo'): - lang = 'bash' + code = inner.strip().splitlines()[0] if inner.strip() else "" + lang = "" + if ( + code.startswith("$") + or code.startswith("PS") + or code.lower().startswith("powershell") + ): + lang = "powershell" + elif ( + code.startswith("#") + or code.startswith("import") + or code.startswith("from") + ): + lang = "python" + elif re.match(r"^(select|insert|update|create)\b", code.strip(), re.I): + lang = "sql" + elif ( + code.startswith("git") + or code.startswith("./") + or code.startswith("sudo") + ): + lang = "bash" if lang: - return f'```{lang}\n{inner}\n```' + return f"```{lang}\n{inner}\n```" return fence def normalize_file(path: Path): - text = path.read_text(encoding='utf-8') + text = path.read_text(encoding="utf-8") orig = text # Trim trailing whitespace and ensure single trailing newline - text = '\n'.join(line.rstrip() for line in text.splitlines()) + '\n' + text = "\n".join(line.rstrip() for line in text.splitlines()) + "\n" # Ensure first non-empty line is H1 lines = text.splitlines() for i, ln in enumerate(lines): if ln.strip(): - if not ln.startswith('#'): - lines[i] = '# ' + ln + if not ln.startswith("#"): + lines[i] = "# " + ln break - text = '\n'.join(lines) + '\n' + text = "\n".join(lines) + "\n" # Add basic code fence languages where missing (simple heuristic) - text = re.sub(r'```\n([\s\S]*?)\n```', add_code_fence_language, text) + text = re.sub(r"```\n([\s\S]*?)\n```", add_code_fence_language, text) if text != orig: - path.write_text(text, encoding='utf-8') + path.write_text(text, encoding="utf-8") return True return False def main(): changed = [] - for p in DOCS.rglob('*.md'): + for p in DOCS.rglob("*.md"): if p.is_file(): try: if normalize_file(p): @@ -68,12 +81,12 @@ def main(): except Exception as e: print(f"Failed to format {p}: {e}") if changed: - print('Formatted files:') + print("Formatted files:") for c in changed: - print(' -', c) + print(" -", c) else: - print('No formatting changes required.') + print("No formatting changes required.") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/scripts/migrations/20251027_create_theme_settings_table.sql b/scripts/migrations/20251027_create_theme_settings_table.sql new file mode 100644 index 0000000..8e2b448 --- /dev/null +++ b/scripts/migrations/20251027_create_theme_settings_table.sql @@ -0,0 +1,11 @@ +-- Migration: 20251027_create_theme_settings_table.sql + +CREATE TABLE theme_settings ( + id SERIAL PRIMARY KEY, + theme_name VARCHAR(255) UNIQUE NOT NULL, + primary_color VARCHAR(7) NOT NULL, + secondary_color VARCHAR(7) NOT NULL, + accent_color VARCHAR(7) NOT NULL, + background_color VARCHAR(7) NOT NULL, + text_color VARCHAR(7) NOT NULL +); diff --git a/scripts/migrations/20251027_create_user_and_role_tables.sql b/scripts/migrations/20251027_create_user_and_role_tables.sql new file mode 100644 index 0000000..5ae47b2 --- /dev/null +++ b/scripts/migrations/20251027_create_user_and_role_tables.sql @@ -0,0 +1,15 @@ +-- Migration: 20251027_create_user_and_role_tables.sql + +CREATE TABLE roles ( + id SERIAL PRIMARY KEY, + name VARCHAR(255) UNIQUE NOT NULL +); + +CREATE TABLE users ( + id SERIAL PRIMARY KEY, + username VARCHAR(255) UNIQUE NOT NULL, + email VARCHAR(255) UNIQUE NOT NULL, + hashed_password VARCHAR(255) NOT NULL, + role_id INTEGER NOT NULL, + FOREIGN KEY (role_id) REFERENCES roles(id) +); diff --git a/scripts/seed_data.py b/scripts/seed_data.py index 5c96278..f7c035f 100644 --- a/scripts/seed_data.py +++ b/scripts/seed_data.py @@ -47,22 +47,82 @@ MEASUREMENT_UNIT_SEEDS = ( ("kilowatt_hours", "Kilowatt Hours", "kWh", "energy", True), ) +THEME_SETTING_SEEDS = ( + ("--color-background", "#f4f5f7", "color", + "theme", "CSS variable --color-background", True), + ("--color-surface", "#ffffff", "color", + "theme", "CSS variable --color-surface", True), + ("--color-text-primary", "#2a1f33", "color", + "theme", "CSS variable --color-text-primary", True), + ("--color-text-secondary", "#624769", "color", + "theme", "CSS variable --color-text-secondary", True), + ("--color-text-muted", "#64748b", "color", + "theme", "CSS variable --color-text-muted", True), + ("--color-text-subtle", "#94a3b8", "color", + "theme", "CSS variable --color-text-subtle", True), + ("--color-text-invert", "#ffffff", "color", + "theme", "CSS variable --color-text-invert", True), + ("--color-text-dark", "#0f172a", "color", + "theme", "CSS variable --color-text-dark", True), + ("--color-text-strong", "#111827", "color", + "theme", "CSS variable --color-text-strong", True), + ("--color-primary", "#5f320d", "color", + "theme", "CSS variable --color-primary", True), + ("--color-primary-strong", "#7e4c13", "color", + "theme", "CSS variable --color-primary-strong", True), + ("--color-primary-stronger", "#837c15", "color", + "theme", "CSS variable --color-primary-stronger", True), + ("--color-accent", "#bff838", "color", + "theme", "CSS variable --color-accent", True), + ("--color-border", "#e2e8f0", "color", + "theme", "CSS variable --color-border", True), + ("--color-border-strong", "#cbd5e1", "color", + "theme", "CSS variable --color-border-strong", True), + ("--color-highlight", "#eef2ff", "color", + "theme", "CSS variable --color-highlight", True), + ("--color-panel-shadow", "rgba(15, 23, 42, 0.08)", "color", + "theme", "CSS variable --color-panel-shadow", True), + ("--color-panel-shadow-deep", "rgba(15, 23, 42, 0.12)", "color", + "theme", "CSS variable --color-panel-shadow-deep", True), + ("--color-surface-alt", "#f8fafc", "color", + "theme", "CSS variable --color-surface-alt", True), + ("--color-success", "#047857", "color", + "theme", "CSS variable --color-success", True), + ("--color-error", "#b91c1c", "color", + "theme", "CSS variable --color-error", True), +) + def parse_args() -> argparse.Namespace: parser = argparse.ArgumentParser(description="Seed baseline CalMiner data") - parser.add_argument("--currencies", action="store_true", help="Seed currency table") - parser.add_argument("--units", action="store_true", help="Seed unit table") - parser.add_argument("--defaults", action="store_true", help="Seed default records") - parser.add_argument("--dry-run", action="store_true", help="Print actions without executing") parser.add_argument( - "--verbose", "-v", action="count", default=0, help="Increase logging verbosity" + "--currencies", action="store_true", help="Seed currency table" + ) + parser.add_argument("--units", action="store_true", help="Seed unit table") + parser.add_argument( + "--theme", action="store_true", help="Seed theme settings" + ) + parser.add_argument( + "--defaults", action="store_true", help="Seed default records" + ) + parser.add_argument( + "--dry-run", action="store_true", help="Print actions without executing" + ) + parser.add_argument( + "--verbose", + "-v", + action="count", + default=0, + help="Increase logging verbosity", ) return parser.parse_args() def _configure_logging(args: argparse.Namespace) -> None: level = logging.WARNING - (10 * min(args.verbose, 2)) - logging.basicConfig(level=max(level, logging.INFO), format="%(levelname)s %(message)s") + logging.basicConfig( + level=max(level, logging.INFO), format="%(levelname)s %(message)s" + ) def main() -> None: @@ -77,7 +137,7 @@ def run_with_namespace( ) -> None: _configure_logging(args) - if not any((args.currencies, args.units, args.defaults)): + if not any((args.currencies, args.units, args.theme, args.defaults)): logger.info("No seeding options provided; exiting") return @@ -89,6 +149,8 @@ def run_with_namespace( _seed_currencies(cursor, dry_run=args.dry_run) if args.units: _seed_units(cursor, dry_run=args.dry_run) + if args.theme: + _seed_theme(cursor, dry_run=args.dry_run) if args.defaults: _seed_defaults(cursor, dry_run=args.dry_run) @@ -152,11 +214,44 @@ def _seed_units(cursor, *, dry_run: bool) -> None: logger.info("Measurement unit seed complete") -def _seed_defaults(cursor, *, dry_run: bool) -> None: - logger.info("Seeding default records - not yet implemented") +def _seed_theme(cursor, *, dry_run: bool) -> None: + logger.info("Seeding theme settings (%d rows)", len(THEME_SETTING_SEEDS)) if dry_run: + for key, value, _, _, _, _ in THEME_SETTING_SEEDS: + logger.info( + "Dry run: would upsert theme setting %s = %s", key, value) return + try: + execute_values( + cursor, + """ + INSERT INTO application_setting (key, value, value_type, category, description, is_editable) + VALUES %s + ON CONFLICT (key) DO UPDATE + SET value = EXCLUDED.value, + value_type = EXCLUDED.value_type, + category = EXCLUDED.category, + description = EXCLUDED.description, + is_editable = EXCLUDED.is_editable + """, + THEME_SETTING_SEEDS, + ) + except errors.UndefinedTable: + logger.warning( + "application_setting table does not exist; skipping theme seeding." + ) + cursor.connection.rollback() + return + + logger.info("Theme settings seed complete") + + +def _seed_defaults(cursor, *, dry_run: bool) -> None: + logger.info("Seeding default records") + _seed_theme(cursor, dry_run=dry_run) + logger.info("Default records seed complete") + if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/scripts/setup_database.py b/scripts/setup_database.py index 1da38ef..3c51eb3 100644 --- a/scripts/setup_database.py +++ b/scripts/setup_database.py @@ -39,6 +39,7 @@ from psycopg2 import extensions from psycopg2.extensions import connection as PGConnection, parse_dsn from dotenv import load_dotenv from sqlalchemy import create_engine, inspect + ROOT_DIR = Path(__file__).resolve().parents[1] if str(ROOT_DIR) not in sys.path: sys.path.insert(0, str(ROOT_DIR)) @@ -125,8 +126,7 @@ class DatabaseConfig: ] if missing: raise RuntimeError( - "Missing required database configuration: " + - ", ".join(missing) + "Missing required database configuration: " + ", ".join(missing) ) host = cast(str, host) @@ -208,12 +208,17 @@ class DatabaseConfig: class DatabaseSetup: """Encapsulates the full setup workflow.""" - def __init__(self, config: DatabaseConfig, *, dry_run: bool = False) -> None: + def __init__( + self, config: DatabaseConfig, *, dry_run: bool = False + ) -> None: self.config = config self.dry_run = dry_run self._models_loaded = False self._rollback_actions: list[tuple[str, Callable[[], None]]] = [] - def _register_rollback(self, label: str, action: Callable[[], None]) -> None: + + def _register_rollback( + self, label: str, action: Callable[[], None] + ) -> None: if self.dry_run: return self._rollback_actions.append((label, action)) @@ -237,7 +242,6 @@ class DatabaseSetup: def clear_rollbacks(self) -> None: self._rollback_actions.clear() - def _describe_connection(self, user: str, database: str) -> str: return f"{user}@{self.config.host}:{self.config.port}/{database}" @@ -384,9 +388,9 @@ class DatabaseSetup: try: if self.config.password: cursor.execute( - sql.SQL("CREATE ROLE {} WITH LOGIN PASSWORD %s").format( - sql.Identifier(self.config.user) - ), + sql.SQL( + "CREATE ROLE {} WITH LOGIN PASSWORD %s" + ).format(sql.Identifier(self.config.user)), (self.config.password,), ) else: @@ -589,8 +593,7 @@ class DatabaseSetup: return psycopg2.connect(dsn) except psycopg2.Error as exc: raise RuntimeError( - "Unable to establish admin connection. " - f"Target: {descriptor}" + "Unable to establish admin connection. " f"Target: {descriptor}" ) from exc def _application_connection(self) -> PGConnection: @@ -645,7 +648,9 @@ class DatabaseSetup: importlib.import_module(f"{package.__name__}.{module_info.name}") self._models_loaded = True - def run_migrations(self, migrations_dir: Optional[Path | str] = None) -> None: + def run_migrations( + self, migrations_dir: Optional[Path | str] = None + ) -> None: """Execute pending SQL migrations in chronological order.""" directory = ( @@ -673,7 +678,8 @@ class DatabaseSetup: conn.autocommit = True with conn.cursor() as cursor: table_exists = self._migrations_table_exists( - cursor, schema_name) + cursor, schema_name + ) if not table_exists: if self.dry_run: logger.info( @@ -692,12 +698,10 @@ class DatabaseSetup: applied = set() else: applied = self._fetch_applied_migrations( - cursor, schema_name) + cursor, schema_name + ) - if ( - baseline_path.exists() - and baseline_name not in applied - ): + if baseline_path.exists() and baseline_name not in applied: if self.dry_run: logger.info( "Dry run: baseline migration '%s' pending; would apply and mark legacy files", @@ -756,9 +760,7 @@ class DatabaseSetup: ) pending = [ - path - for path in migration_files - if path.name not in applied + path for path in migration_files if path.name not in applied ] if not pending: @@ -792,9 +794,7 @@ class DatabaseSetup: cursor.execute( sql.SQL( "INSERT INTO {} (filename, applied_at) VALUES (%s, NOW())" - ).format( - sql.Identifier(schema_name, MIGRATIONS_TABLE) - ), + ).format(sql.Identifier(schema_name, MIGRATIONS_TABLE)), (path.name,), ) return path.name @@ -820,9 +820,7 @@ class DatabaseSetup: "filename TEXT PRIMARY KEY," "applied_at TIMESTAMPTZ NOT NULL DEFAULT NOW()" ")" - ).format( - sql.Identifier(schema_name, MIGRATIONS_TABLE) - ) + ).format(sql.Identifier(schema_name, MIGRATIONS_TABLE)) ) def _fetch_applied_migrations(self, cursor, schema_name: str) -> set[str]: @@ -974,7 +972,7 @@ class DatabaseSetup: (database,), ) cursor.execute( - sql.SQL("DROP DATABASE IF EXISTS {}" ).format( + sql.SQL("DROP DATABASE IF EXISTS {}").format( sql.Identifier(database) ) ) @@ -985,7 +983,7 @@ class DatabaseSetup: conn.autocommit = True with conn.cursor() as cursor: cursor.execute( - sql.SQL("DROP ROLE IF EXISTS {}" ).format( + sql.SQL("DROP ROLE IF EXISTS {}").format( sql.Identifier(role) ) ) @@ -1000,27 +998,35 @@ class DatabaseSetup: conn.autocommit = True with conn.cursor() as cursor: cursor.execute( - sql.SQL("REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA {} FROM {}" ).format( + sql.SQL( + "REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA {} FROM {}" + ).format( sql.Identifier(schema_name), - sql.Identifier(self.config.user) + sql.Identifier(self.config.user), ) ) cursor.execute( - sql.SQL("REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA {} FROM {}" ).format( + sql.SQL( + "REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA {} FROM {}" + ).format( sql.Identifier(schema_name), - sql.Identifier(self.config.user) + sql.Identifier(self.config.user), ) ) cursor.execute( - sql.SQL("ALTER DEFAULT PRIVILEGES IN SCHEMA {} REVOKE SELECT, INSERT, UPDATE, DELETE ON TABLES FROM {}" ).format( + sql.SQL( + "ALTER DEFAULT PRIVILEGES IN SCHEMA {} REVOKE SELECT, INSERT, UPDATE, DELETE ON TABLES FROM {}" + ).format( sql.Identifier(schema_name), - sql.Identifier(self.config.user) + sql.Identifier(self.config.user), ) ) cursor.execute( - sql.SQL("ALTER DEFAULT PRIVILEGES IN SCHEMA {} REVOKE USAGE, SELECT ON SEQUENCES FROM {}" ).format( + sql.SQL( + "ALTER DEFAULT PRIVILEGES IN SCHEMA {} REVOKE USAGE, SELECT ON SEQUENCES FROM {}" + ).format( sql.Identifier(schema_name), - sql.Identifier(self.config.user) + sql.Identifier(self.config.user), ) ) @@ -1064,19 +1070,18 @@ def parse_args() -> argparse.Namespace: ) parser.add_argument("--db-driver", help="Override DATABASE_DRIVER") parser.add_argument("--db-host", help="Override DATABASE_HOST") - parser.add_argument("--db-port", type=int, - help="Override DATABASE_PORT") + parser.add_argument("--db-port", type=int, help="Override DATABASE_PORT") parser.add_argument("--db-name", help="Override DATABASE_NAME") parser.add_argument("--db-user", help="Override DATABASE_USER") - parser.add_argument( - "--db-password", help="Override DATABASE_PASSWORD") + parser.add_argument("--db-password", help="Override DATABASE_PASSWORD") parser.add_argument("--db-schema", help="Override DATABASE_SCHEMA") parser.add_argument( "--admin-url", help="Override DATABASE_ADMIN_URL for administrative operations", ) parser.add_argument( - "--admin-user", help="Override DATABASE_SUPERUSER for admin ops") + "--admin-user", help="Override DATABASE_SUPERUSER for admin ops" + ) parser.add_argument( "--admin-password", help="Override DATABASE_SUPERUSER_PASSWORD for admin ops", @@ -1091,7 +1096,11 @@ def parse_args() -> argparse.Namespace: help="Log actions without applying changes.", ) parser.add_argument( - "--verbose", "-v", action="count", default=0, help="Increase logging verbosity" + "--verbose", + "-v", + action="count", + default=0, + help="Increase logging verbosity", ) return parser.parse_args() @@ -1099,8 +1108,9 @@ def parse_args() -> argparse.Namespace: def main() -> None: args = parse_args() level = logging.WARNING - (10 * min(args.verbose, 2)) - logging.basicConfig(level=max(level, logging.INFO), - format="%(levelname)s %(message)s") + logging.basicConfig( + level=max(level, logging.INFO), format="%(levelname)s %(message)s" + ) override_args: dict[str, Optional[str]] = { "DATABASE_DRIVER": args.db_driver, @@ -1120,7 +1130,9 @@ def main() -> None: config = DatabaseConfig.from_env(overrides=override_args) setup = DatabaseSetup(config, dry_run=args.dry_run) - admin_tasks_requested = args.ensure_database or args.ensure_role or args.ensure_schema + admin_tasks_requested = ( + args.ensure_database or args.ensure_role or args.ensure_schema + ) if admin_tasks_requested: setup.validate_admin_connection() @@ -1145,9 +1157,7 @@ def main() -> None: auto_run_migrations_reason: Optional[str] = None if args.seed_data and not should_run_migrations: should_run_migrations = True - auto_run_migrations_reason = ( - "Seed data requested without explicit --run-migrations; applying migrations first." - ) + auto_run_migrations_reason = "Seed data requested without explicit --run-migrations; applying migrations first." try: if args.ensure_database: @@ -1167,9 +1177,7 @@ def main() -> None: if auto_run_migrations_reason: logger.info(auto_run_migrations_reason) migrations_path = ( - Path(args.migrations_dir) - if args.migrations_dir - else None + Path(args.migrations_dir) if args.migrations_dir else None ) setup.run_migrations(migrations_path) if args.seed_data: diff --git a/services/reporting.py b/services/reporting.py index 2950414..98387d6 100644 --- a/services/reporting.py +++ b/services/reporting.py @@ -27,7 +27,9 @@ def _percentile(values: List[float], percentile: float) -> float: return sorted_values[lower] * (1 - weight) + sorted_values[upper] * weight -def generate_report(simulation_results: List[Dict[str, float]]) -> Dict[str, Union[float, int]]: +def generate_report( + simulation_results: List[Dict[str, float]], +) -> Dict[str, Union[float, int]]: """Aggregate basic statistics for simulation outputs.""" values = _extract_results(simulation_results) @@ -63,7 +65,7 @@ def generate_report(simulation_results: List[Dict[str, float]]) -> Dict[str, Uni std_dev = pstdev(values) if len(values) > 1 else 0.0 summary["std_dev"] = std_dev - summary["variance"] = std_dev ** 2 + summary["variance"] = std_dev**2 var_95 = summary["percentile_5"] summary["value_at_risk_95"] = var_95 diff --git a/services/security.py b/services/security.py new file mode 100644 index 0000000..ce376e3 --- /dev/null +++ b/services/security.py @@ -0,0 +1,32 @@ +from datetime import datetime, timedelta +from typing import Any, Union + +from jose import jwt +from passlib.context import CryptContext + + +ACCESS_TOKEN_EXPIRE_MINUTES = 30 +SECRET_KEY = "your-secret-key" # Change this in production +ALGORITHM = "HS256" + +pwd_context = CryptContext(schemes=["pbkdf2_sha256"], deprecated="auto") + + +def create_access_token( + subject: Union[str, Any], expires_delta: Union[timedelta, None] = None +) -> str: + if expires_delta: + expire = datetime.utcnow() + expires_delta + else: + expire = datetime.utcnow() + timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES) + to_encode = {"exp": expire, "sub": str(subject)} + encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM) + return encoded_jwt + + +def verify_password(plain_password: str, hashed_password: str) -> bool: + return pwd_context.verify(plain_password, hashed_password) + + +def get_password_hash(password: str) -> str: + return pwd_context.hash(password) diff --git a/services/settings.py b/services/settings.py index a3ff564..51b49ac 100644 --- a/services/settings.py +++ b/services/settings.py @@ -7,6 +7,7 @@ from typing import Dict, Mapping from sqlalchemy.orm import Session from models.application_setting import ApplicationSetting +from models.theme_setting import ThemeSetting # Import ThemeSetting model CSS_COLOR_CATEGORY = "theme" CSS_COLOR_VALUE_TYPE = "color" @@ -92,7 +93,9 @@ def get_css_color_settings(db: Session) -> Dict[str, str]: return values -def update_css_color_settings(db: Session, updates: Mapping[str, str]) -> Dict[str, str]: +def update_css_color_settings( + db: Session, updates: Mapping[str, str] +) -> Dict[str, str]: """Persist provided CSS color overrides and return the final values.""" if not updates: @@ -176,8 +179,10 @@ def _validate_functional_color(value: str) -> None: def _ensure_component_count(value: str, expected: int) -> None: if not value.endswith(")"): - raise ValueError("Color function expressions must end with a closing parenthesis") - inner = value[value.index("(") + 1 : -1] + raise ValueError( + "Color function expressions must end with a closing parenthesis" + ) + inner = value[value.index("(") + 1: -1] parts = [segment.strip() for segment in inner.split(",")] if len(parts) != expected: raise ValueError( @@ -206,3 +211,20 @@ def list_css_env_override_rows( } ) return rows + + +def save_theme_settings(db: Session, theme_data: dict): + theme = db.query(ThemeSetting).first() or ThemeSetting() + for key, value in theme_data.items(): + setattr(theme, key, value) + db.add(theme) + db.commit() + db.refresh(theme) + return theme + + +def get_theme_settings(db: Session): + theme = db.query(ThemeSetting).first() + if theme: + return {c.name: getattr(theme, c.name) for c in theme.__table__.columns} + return {} diff --git a/services/simulation.py b/services/simulation.py index 4a433f2..6c8ffe1 100644 --- a/services/simulation.py +++ b/services/simulation.py @@ -25,12 +25,13 @@ def _ensure_positive_span(span: float, fallback: float) -> float: return span if span and span > 0 else fallback -def _compile_parameters(parameters: Sequence[Dict[str, float]]) -> List[SimulationParameter]: +def _compile_parameters( + parameters: Sequence[Dict[str, float]], +) -> List[SimulationParameter]: compiled: List[SimulationParameter] = [] for index, item in enumerate(parameters): if "value" not in item: - raise ValueError( - f"Parameter at index {index} must include 'value'") + raise ValueError(f"Parameter at index {index} must include 'value'") name = str(item.get("name", f"param_{index}")) base_value = float(item["value"]) distribution = str(item.get("distribution", "normal")).lower() @@ -43,8 +44,11 @@ def _compile_parameters(parameters: Sequence[Dict[str, float]]) -> List[Simulati if distribution == "normal": std_dev = item.get("std_dev") - std_dev_value = float(std_dev) if std_dev is not None else abs( - base_value) * DEFAULT_STD_DEV_RATIO or 1.0 + std_dev_value = ( + float(std_dev) + if std_dev is not None + else abs(base_value) * DEFAULT_STD_DEV_RATIO or 1.0 + ) compiled.append( SimulationParameter( name=name, diff --git a/static/js/theme.js b/static/js/theme.js new file mode 100644 index 0000000..0ff624f --- /dev/null +++ b/static/js/theme.js @@ -0,0 +1,108 @@ +// static/js/theme.js + +document.addEventListener('DOMContentLoaded', () => { + const themeSettingsForm = document.getElementById('theme-settings-form'); + const colorInputs = themeSettingsForm + ? themeSettingsForm.querySelectorAll('input[type="color"]') + : []; + + // Function to apply theme settings to CSS variables + function applyTheme(theme) { + const root = document.documentElement; + if (theme.primary_color) + root.style.setProperty('--color-primary', theme.primary_color); + if (theme.secondary_color) + root.style.setProperty('--color-secondary', theme.secondary_color); + if (theme.accent_color) + root.style.setProperty('--color-accent', theme.accent_color); + if (theme.background_color) + root.style.setProperty('--color-background', theme.background_color); + if (theme.text_color) + root.style.setProperty('--color-text-primary', theme.text_color); + // Add other theme properties as needed + } + + // Save theme to local storage + function saveTheme(theme) { + localStorage.setItem('user-theme', JSON.stringify(theme)); + } + + // Load theme from local storage + function loadTheme() { + const savedTheme = localStorage.getItem('user-theme'); + return savedTheme ? JSON.parse(savedTheme) : null; + } + + // Real-time preview for color inputs + colorInputs.forEach((input) => { + input.addEventListener('input', (event) => { + const cssVar = `--color-${event.target.id.replace('-', '_')}`; + document.documentElement.style.setProperty(cssVar, event.target.value); + }); + }); + + if (themeSettingsForm) { + themeSettingsForm.addEventListener('submit', async (event) => { + event.preventDefault(); + + const formData = new FormData(themeSettingsForm); + const themeData = Object.fromEntries(formData.entries()); + + try { + const response = await fetch('/api/theme-settings', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(themeData), + }); + + if (response.ok) { + alert('Theme settings saved successfully!'); + applyTheme(themeData); + saveTheme(themeData); + } else { + const errorData = await response.json(); + alert(`Error saving theme settings: ${errorData.detail}`); + } + } catch (error) { + console.error('Error:', error); + alert('An error occurred while saving theme settings.'); + } + }); + } + + // Load and apply theme on page load + const initialTheme = loadTheme(); + if (initialTheme) { + applyTheme(initialTheme); + // Populate form fields if on the theme settings page + if (themeSettingsForm) { + for (const key in initialTheme) { + const input = themeSettingsForm.querySelector( + `#${key.replace('_', '-')}` + ); + if (input) { + input.value = initialTheme[key]; + } + } + } + } else { + // If no saved theme, load from backend (if available) + async function loadAndApplyThemeFromServer() { + try { + const response = await fetch('/api/theme-settings'); // Assuming a GET endpoint for theme settings + if (response.ok) { + const theme = await response.json(); + applyTheme(theme); + saveTheme(theme); // Save to local storage for future use + } else { + console.error('Failed to load theme settings from server'); + } + } catch (error) { + console.error('Error loading theme settings from server:', error); + } + } + loadAndApplyThemeFromServer(); + } +}); diff --git a/templates/base.html b/templates/base.html index d0f5ac8..53722db 100644 --- a/templates/base.html +++ b/templates/base.html @@ -20,5 +20,6 @@ {% block scripts %}{% endblock %} + diff --git a/templates/forgot_password.html b/templates/forgot_password.html new file mode 100644 index 0000000..4d21fd3 --- /dev/null +++ b/templates/forgot_password.html @@ -0,0 +1,17 @@ +{% extends "base.html" %} + +{% block title %}Forgot Password{% endblock %} + +{% block content %} +
+

Forgot Password

+
+
+ + +
+ +
+

Remember your password? Login here

+
+{% endblock %} diff --git a/templates/login.html b/templates/login.html new file mode 100644 index 0000000..6c2eb00 --- /dev/null +++ b/templates/login.html @@ -0,0 +1,22 @@ +{% extends "base.html" %} + +{% block title %}Login{% endblock %} + +{% block content %} +
+

Login

+
+
+ + +
+
+ + +
+ +
+

Don't have an account? Register here

+

Forgot password?

+
+{% endblock %} diff --git a/templates/partials/sidebar_nav.html b/templates/partials/sidebar_nav.html index 91e006c..ba313c5 100644 --- a/templates/partials/sidebar_nav.html +++ b/templates/partials/sidebar_nav.html @@ -1,88 +1,49 @@ -{% set nav_groups = [ - { - "label": "Dashboard", - "links": [ - {"href": "/", "label": "Dashboard"}, - ], - }, - { - "label": "Scenarios", - "links": [ - {"href": "/ui/scenarios", "label": "Overview"}, - {"href": "/ui/parameters", "label": "Parameters"}, - {"href": "/ui/costs", "label": "Costs"}, - {"href": "/ui/consumption", "label": "Consumption"}, - {"href": "/ui/production", "label": "Production"}, - { - "href": "/ui/equipment", - "label": "Equipment", - "children": [ - {"href": "/ui/maintenance", "label": "Maintenance"}, - ], - }, - ], - }, - { - "label": "Analysis", - "links": [ - {"href": "/ui/simulations", "label": "Simulations"}, - {"href": "/ui/reporting", "label": "Reporting"}, - ], - }, - { - "label": "Settings", - "links": [ - { - "href": "/ui/settings", - "label": "Settings", - "children": [ - {"href": "/ui/currencies", "label": "Currency Management"}, - ], - }, - ], - }, -] %} +{% set nav_groups = [ { "label": "Dashboard", "links": [ {"href": "/", "label": +"Dashboard"}, ], }, { "label": "Overview", "links": [ {"href": "/ui/parameters", +"label": "Parameters"}, {"href": "/ui/costs", "label": "Costs"}, {"href": +"/ui/consumption", "label": "Consumption"}, {"href": "/ui/production", "label": +"Production"}, { "href": "/ui/equipment", "label": "Equipment", "children": [ +{"href": "/ui/maintenance", "label": "Maintenance"}, ], }, ], }, { "label": +"Simulations", "links": [ {"href": "/ui/simulations", "label": "Simulations"}, +], }, { "label": "Analytics", "links": [ {"href": "/ui/reporting", "label": +"Reporting"}, ], }, { "label": "Settings", "links": [ { "href": "/ui/settings", +"label": "Settings", "children": [ {"href": "/theme-settings", "label": +"Themes"}, {"href": "/ui/currencies", "label": "Currency Management"}, ], }, ], +}, ] %} diff --git a/templates/profile.html b/templates/profile.html new file mode 100644 index 0000000..4e9a861 --- /dev/null +++ b/templates/profile.html @@ -0,0 +1,31 @@ +{% extends "base.html" %} + +{% block title %}Profile{% endblock %} + +{% block content %} +
+

User Profile

+

Username:

+

Email:

+ + + +
+{% endblock %} diff --git a/templates/register.html b/templates/register.html new file mode 100644 index 0000000..04a7b4e --- /dev/null +++ b/templates/register.html @@ -0,0 +1,25 @@ +{% extends "base.html" %} + +{% block title %}Register{% endblock %} + +{% block content %} +
+

Register

+
+
+ + +
+
+ + +
+
+ + +
+ +
+

Already have an account? Login here

+
+{% endblock %} diff --git a/templates/settings.html b/templates/settings.html index 0942acb..1fcbc21 100644 --- a/templates/settings.html +++ b/templates/settings.html @@ -1,113 +1,26 @@ -{% extends "base.html" %} - -{% block title %}Settings · CalMiner{% endblock %} - -{% block content %} - -
-
-

Currency Management

-

Manage available currencies, symbols, and default selections from the Currency Management page.

- Go to Currency Management -
-
-

Visual Theme

-

Adjust CalMiner theme colors and preview changes instantly.

-

Changes save to the settings table and apply across the UI after submission. Environment overrides (if configured) remain read-only.

-
-
- -
-
-
-

Theme Colors

-

Update global CSS variables to customize CalMiner's appearance.

-
-
-
- {% for key, value in css_variables.items() %} - {% set env_meta = css_env_override_meta.get(key) %} - - {% endfor %} - -
- - -
-
- {% from "partials/components.html" import feedback with context %} - {{ feedback("theme-settings-feedback") }} -
- -
-
-
-

Environment Overrides

-

The following CSS variables are controlled via environment variables and take precedence over database values.

-
-
- {% if css_env_override_rows %} -
- - - - - - - - - - {% for row in css_env_override_rows %} - - - - - - {% endfor %} - -
CSS VariableEnvironment VariableValue
{{ row.css_key }}{{ row.env_var }}{{ row.value }}
-
- {% else %} -

No environment overrides configured.

- {% endif %} -
-{% endblock %} - -{% block scripts %} - {{ super() }} - - +{% extends "base.html" %} {% block title %}Settings · CalMiner{% endblock %} {% +block content %} + +
+
+

Currency Management

+

+ Manage available currencies, symbols, and default selections from the + Currency Management page. +

+ Go to Currency Management +
+ +
{% endblock %} diff --git a/templates/theme_settings.html b/templates/theme_settings.html new file mode 100644 index 0000000..72cecf4 --- /dev/null +++ b/templates/theme_settings.html @@ -0,0 +1,125 @@ +{% extends "base.html" %} {% block title %}Theme Settings · CalMiner{% endblock +%} {% block content %} + + +
+
+
+

Theme Colors

+

+ Update global CSS variables to customize CalMiner's appearance. +

+
+
+
+ {% for key, value in css_variables.items() %} {% set env_meta = + css_env_override_meta.get(key) %} + + {% endfor %} + +
+ + +
+
+ {% from "partials/components.html" import feedback with context %} {{ + feedback("theme-settings-feedback") }} +
+ +
+
+
+

Environment Overrides

+

+ The following CSS variables are controlled via environment variables and + take precedence over database values. +

+
+
+ {% if css_env_override_rows %} +
+ + + + + + + + + + {% for row in css_env_override_rows %} + + + + + + {% endfor %} + +
CSS VariableEnvironment VariableValue
{{ row.css_key }}{{ row.env_var }}{{ row.value }}
+
+ {% else %} +

No environment overrides configured.

+ {% endif %} +
+{% endblock %} {% block scripts %} {{ super() }} + + +{% endblock %} diff --git a/tests/e2e/conftest.py b/tests/e2e/conftest.py index bfb6f1d..6ced399 100644 --- a/tests/e2e/conftest.py +++ b/tests/e2e/conftest.py @@ -4,6 +4,7 @@ import time from typing import Dict, Generator import pytest + # type: ignore[import] from playwright.sync_api import Browser, Page, Playwright, sync_playwright @@ -70,10 +71,17 @@ def seed_default_currencies(live_server: str) -> None: seeds = [ {"code": "EUR", "name": "Euro", "symbol": "EUR", "is_active": True}, - {"code": "CLP", "name": "Chilean Peso", "symbol": "CLP$", "is_active": True}, + { + "code": "CLP", + "name": "Chilean Peso", + "symbol": "CLP$", + "is_active": True, + }, ] - with httpx.Client(base_url=live_server, timeout=5.0, trust_env=False) as client: + with httpx.Client( + base_url=live_server, timeout=5.0, trust_env=False + ) as client: try: response = client.get("/api/currencies/?include_inactive=true") response.raise_for_status() @@ -128,8 +136,12 @@ def page(browser: Browser, live_server: str) -> Generator[Page, None, None]: def _prepare_database_environment(env: Dict[str, str]) -> Dict[str, str]: """Ensure granular database env vars are available for the app under test.""" - required = ("DATABASE_HOST", "DATABASE_USER", - "DATABASE_NAME", "DATABASE_PASSWORD") + required = ( + "DATABASE_HOST", + "DATABASE_USER", + "DATABASE_NAME", + "DATABASE_PASSWORD", + ) if all(env.get(key) for key in required): return env diff --git a/tests/e2e/test_consumption.py b/tests/e2e/test_consumption.py index 1303e71..685db93 100644 --- a/tests/e2e/test_consumption.py +++ b/tests/e2e/test_consumption.py @@ -7,7 +7,9 @@ def test_consumption_form_loads(page: Page): """Verify the consumption form page loads correctly.""" page.goto("/ui/consumption") expect(page).to_have_title("Consumption · CalMiner") - expect(page.locator("h2:has-text('Add Consumption Record')")).to_be_visible() + expect( + page.locator("h2:has-text('Add Consumption Record')") + ).to_be_visible() def test_create_consumption_item(page: Page): diff --git a/tests/e2e/test_costs.py b/tests/e2e/test_costs.py index 6e52b3b..c49439a 100644 --- a/tests/e2e/test_costs.py +++ b/tests/e2e/test_costs.py @@ -55,7 +55,9 @@ def test_create_capex_and_opex_items(page: Page): ).to_be_visible() # Verify the feedback messages. - expect(page.locator("#capex-feedback") - ).to_have_text("Entry saved successfully.") - expect(page.locator("#opex-feedback") - ).to_have_text("Entry saved successfully.") + expect(page.locator("#capex-feedback")).to_have_text( + "Entry saved successfully." + ) + expect(page.locator("#opex-feedback")).to_have_text( + "Entry saved successfully." + ) diff --git a/tests/e2e/test_currencies.py b/tests/e2e/test_currencies.py index b467ad1..4b7f8d0 100644 --- a/tests/e2e/test_currencies.py +++ b/tests/e2e/test_currencies.py @@ -12,7 +12,8 @@ def _unique_currency_code(existing: set[str]) -> str: if candidate not in existing and candidate != "USD": return candidate raise AssertionError( - "Unable to generate a unique currency code for the test run.") + "Unable to generate a unique currency code for the test run." + ) def _metric_value(page: Page, element_id: str) -> int: @@ -42,8 +43,9 @@ def test_currency_workflow_create_update_toggle(page: Page) -> None: expect(page.locator("h2:has-text('Currency Overview')")).to_be_visible() code_cells = page.locator("#currencies-table-body tr td:nth-child(1)") - existing_codes = {text.strip().upper() - for text in code_cells.all_inner_texts()} + existing_codes = { + text.strip().upper() for text in code_cells.all_inner_texts() + } total_before = _metric_value(page, "currency-metric-total") active_before = _metric_value(page, "currency-metric-active") @@ -109,7 +111,9 @@ def test_currency_workflow_create_update_toggle(page: Page) -> None: toggle_button = row.locator("button[data-action='toggle']") expect(toggle_button).to_have_text("Activate") - with page.expect_response(f"**/api/currencies/{new_code}/activation") as toggle_info: + with page.expect_response( + f"**/api/currencies/{new_code}/activation" + ) as toggle_info: toggle_button.click() toggle_response = toggle_info.value assert toggle_response.status == 200 @@ -126,5 +130,6 @@ def test_currency_workflow_create_update_toggle(page: Page) -> None: _expect_feedback(page, f"Currency {new_code} activated.") expect(row.locator("td").nth(3)).to_contain_text("Active") - expect(row.locator("button[data-action='toggle']") - ).to_have_text("Deactivate") + expect(row.locator("button[data-action='toggle']")).to_have_text( + "Deactivate" + ) diff --git a/tests/e2e/test_equipment.py b/tests/e2e/test_equipment.py index 5e0c4f3..f507a6e 100644 --- a/tests/e2e/test_equipment.py +++ b/tests/e2e/test_equipment.py @@ -38,11 +38,8 @@ def test_create_equipment_item(page: Page): # Verify the new item appears in the table. page.select_option("#equipment-scenario-filter", label=scenario_name) expect( - page.locator("#equipment-table-body tr").filter( - has_text=equipment_name - ) + page.locator("#equipment-table-body tr").filter(has_text=equipment_name) ).to_be_visible() # Verify the feedback message. - expect(page.locator("#equipment-feedback") - ).to_have_text("Equipment saved.") + expect(page.locator("#equipment-feedback")).to_have_text("Equipment saved.") diff --git a/tests/e2e/test_maintenance.py b/tests/e2e/test_maintenance.py index 08dc77c..fb9a403 100644 --- a/tests/e2e/test_maintenance.py +++ b/tests/e2e/test_maintenance.py @@ -53,5 +53,6 @@ def test_create_maintenance_item(page: Page): ).to_be_visible() # Verify the feedback message. - expect(page.locator("#maintenance-feedback") - ).to_have_text("Maintenance entry saved.") + expect(page.locator("#maintenance-feedback")).to_have_text( + "Maintenance entry saved." + ) diff --git a/tests/e2e/test_production.py b/tests/e2e/test_production.py index 09c98bb..72a63ba 100644 --- a/tests/e2e/test_production.py +++ b/tests/e2e/test_production.py @@ -43,5 +43,6 @@ def test_create_production_item(page: Page): ).to_be_visible() # Verify the feedback message. - expect(page.locator("#production-feedback") - ).to_have_text("Production output saved.") + expect(page.locator("#production-feedback")).to_have_text( + "Production output saved." + ) diff --git a/tests/e2e/test_scenarios.py b/tests/e2e/test_scenarios.py index 0f3a419..04f37ea 100644 --- a/tests/e2e/test_scenarios.py +++ b/tests/e2e/test_scenarios.py @@ -39,4 +39,5 @@ def test_create_new_scenario(page: Page): feedback = page.locator("#feedback") expect(feedback).to_be_visible() expect(feedback).to_have_text( - f'Scenario "{scenario_name}" created successfully.') + f'Scenario "{scenario_name}" created successfully.' + ) diff --git a/tests/e2e/test_smoke.py b/tests/e2e/test_smoke.py index 291d007..a9f0b23 100644 --- a/tests/e2e/test_smoke.py +++ b/tests/e2e/test_smoke.py @@ -5,7 +5,11 @@ from playwright.sync_api import Page, expect UI_ROUTES = [ ("/", "Dashboard · CalMiner", "Operations Overview"), ("/ui/dashboard", "Dashboard · CalMiner", "Operations Overview"), - ("/ui/scenarios", "Scenario Management · CalMiner", "Create a New Scenario"), + ( + "/ui/scenarios", + "Scenario Management · CalMiner", + "Create a New Scenario", + ), ("/ui/parameters", "Process Parameters · CalMiner", "Scenario Parameters"), ("/ui/settings", "Settings · CalMiner", "Settings"), ("/ui/costs", "Costs · CalMiner", "Cost Overview"), @@ -20,35 +24,44 @@ UI_ROUTES = [ @pytest.mark.parametrize("url, title, heading", UI_ROUTES) -def test_ui_pages_load_correctly(page: Page, url: str, title: str, heading: str): +def test_ui_pages_load_correctly( + page: Page, url: str, title: str, heading: str +): """Verify that all UI pages load with the correct title and a visible heading.""" page.goto(url) expect(page).to_have_title(title) # The app uses a mix of h1 and h2 for main page headings. heading_locator = page.locator( - f"h1:has-text('{heading}'), h2:has-text('{heading}')") + f"h1:has-text('{heading}'), h2:has-text('{heading}')" + ) expect(heading_locator.first).to_be_visible() def test_settings_theme_form_interaction(page: Page): - page.goto("/ui/settings") - expect(page).to_have_title("Settings · CalMiner") + page.goto("/theme-settings") + expect(page).to_have_title("Theme Settings · CalMiner") env_rows = page.locator("#theme-env-overrides tbody tr") disabled_inputs = page.locator( - "#theme-settings-form input.color-value-input[disabled]") + "#theme-settings-form input.color-value-input[disabled]" + ) env_row_count = env_rows.count() disabled_count = disabled_inputs.count() assert disabled_count == env_row_count color_input = page.locator( - "#theme-settings-form input[name='--color-primary']") + "#theme-settings-form input[name='--color-primary']" + ) expect(color_input).to_be_visible() expect(color_input).to_be_enabled() original_value = color_input.input_value() candidate_values = ("#114455", "#225566") - new_value = candidate_values[0] if original_value != candidate_values[0] else candidate_values[1] + new_value = ( + candidate_values[0] + if original_value != candidate_values[0] + else candidate_values[1] + ) color_input.fill(new_value) page.click("#theme-settings-form button[type='submit']") diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 0ecb00e..00d8401 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -27,7 +27,8 @@ engine = create_engine( poolclass=StaticPool, ) TestingSessionLocal = sessionmaker( - autocommit=False, autoflush=False, bind=engine) + autocommit=False, autoflush=False, bind=engine +) @pytest.fixture(scope="session", autouse=True) @@ -37,19 +38,24 @@ def setup_database() -> Generator[None, None, None]: application_setting, capex, consumption, + currency, distribution, equipment, maintenance, opex, parameters, production_output, + role, scenario, simulation_result, + theme_setting, + user, ) # noqa: F401 - imported for side effects _ = ( capex, consumption, + currency, distribution, equipment, maintenance, @@ -57,8 +63,11 @@ def setup_database() -> Generator[None, None, None]: opex, parameters, production_output, + role, scenario, simulation_result, + theme_setting, + user, ) Base.metadata.create_all(bind=engine) @@ -86,22 +95,23 @@ def api_client(db_session: Session) -> Generator[TestClient, None, None]: finally: pass - from routes import dependencies as route_dependencies + from routes.dependencies import get_db - app.dependency_overrides[route_dependencies.get_db] = override_get_db + app.dependency_overrides[get_db] = override_get_db with TestClient(app) as client: yield client - app.dependency_overrides.pop(route_dependencies.get_db, None) + app.dependency_overrides.pop(get_db, None) @pytest.fixture() -def seeded_ui_data(db_session: Session) -> Generator[Dict[str, Any], None, None]: +def seeded_ui_data( + db_session: Session, +) -> Generator[Dict[str, Any], None, None]: """Populate a scenario with representative related records for UI tests.""" scenario_name = f"Scenario Alpha {uuid4()}" - scenario = Scenario(name=scenario_name, - description="Seeded UI scenario") + scenario = Scenario(name=scenario_name, description="Seeded UI scenario") db_session.add(scenario) db_session.flush() @@ -161,7 +171,9 @@ def seeded_ui_data(db_session: Session) -> Generator[Dict[str, Any], None, None] iteration=index, result=value, ) - for index, value in enumerate((950_000.0, 975_000.0, 990_000.0), start=1) + for index, value in enumerate( + (950_000.0, 975_000.0, 990_000.0), start=1 + ) ] db_session.add(maintenance) @@ -196,11 +208,15 @@ def seeded_ui_data(db_session: Session) -> Generator[Dict[str, Any], None, None] @pytest.fixture() -def invalid_request_payloads(db_session: Session) -> Generator[Dict[str, Any], None, None]: +def invalid_request_payloads( + db_session: Session, +) -> Generator[Dict[str, Any], None, None]: """Provide reusable invalid request bodies for exercising validation branches.""" duplicate_name = f"Scenario Duplicate {uuid4()}" - existing = Scenario(name=duplicate_name, - description="Existing scenario for duplicate checks") + existing = Scenario( + name=duplicate_name, + description="Existing scenario for duplicate checks", + ) db_session.add(existing) db_session.commit() diff --git a/tests/unit/test_auth.py b/tests/unit/test_auth.py new file mode 100644 index 0000000..f4c0251 --- /dev/null +++ b/tests/unit/test_auth.py @@ -0,0 +1,231 @@ +from services.security import get_password_hash, verify_password + + +def test_password_hashing(): + password = "testpassword" + hashed_password = get_password_hash(password) + assert verify_password(password, hashed_password) + assert not verify_password("wrongpassword", hashed_password) + + +def test_register_user(api_client): + response = api_client.post( + "/users/register", + json={ + "username": "testuser", + "email": "test@example.com", + "password": "testpassword", + }, + ) + assert response.status_code == 201 + data = response.json() + assert data["username"] == "testuser" + assert data["email"] == "test@example.com" + assert "id" in data + assert "role_id" in data + + response = api_client.post( + "/users/register", + json={ + "username": "testuser", + "email": "another@example.com", + "password": "testpassword", + }, + ) + assert response.status_code == 400 + assert response.json() == {"detail": "Username already registered"} + + response = api_client.post( + "/users/register", + json={ + "username": "anotheruser", + "email": "test@example.com", + "password": "testpassword", + }, + ) + assert response.status_code == 400 + assert response.json() == {"detail": "Email already registered"} + + +def test_login_user(api_client): + # Register a user first + api_client.post( + "/users/register", + json={ + "username": "loginuser", + "email": "login@example.com", + "password": "loginpassword", + }, + ) + + response = api_client.post( + "/users/login", + json={"username": "loginuser", "password": "loginpassword"}, + ) + assert response.status_code == 200 + data = response.json() + assert "access_token" in data + assert data["token_type"] == "bearer" + + response = api_client.post( + "/users/login", + json={"username": "loginuser", "password": "wrongpassword"}, + ) + assert response.status_code == 401 + assert response.json() == {"detail": "Incorrect username or password"} + + response = api_client.post( + "/users/login", + json={"username": "nonexistent", "password": "password"}, + ) + assert response.status_code == 401 + assert response.json() == {"detail": "Incorrect username or password"} + + +def test_read_users_me(api_client): + # Register a user first + api_client.post( + "/users/register", + json={ + "username": "profileuser", + "email": "profile@example.com", + "password": "profilepassword", + }, + ) + # Login to get a token + login_response = api_client.post( + "/users/login", + json={"username": "profileuser", "password": "profilepassword"}, + ) + token = login_response.json()["access_token"] + + response = api_client.get( + "/users/me", headers={"Authorization": f"Bearer {token}"} + ) + assert response.status_code == 200 + data = response.json() + assert data["username"] == "profileuser" + assert data["email"] == "profile@example.com" + + +def test_update_users_me(api_client): + # Register a user first + api_client.post( + "/users/register", + json={ + "username": "updateuser", + "email": "update@example.com", + "password": "updatepassword", + }, + ) + # Login to get a token + login_response = api_client.post( + "/users/login", + json={"username": "updateuser", "password": "updatepassword"}, + ) + token = login_response.json()["access_token"] + + response = api_client.put( + "/users/me", + headers={"Authorization": f"Bearer {token}"}, + json={ + "username": "updateduser", + "email": "updated@example.com", + "password": "newpassword", + }, + ) + assert response.status_code == 200 + data = response.json() + assert data["username"] == "updateduser" + assert data["email"] == "updated@example.com" + + # Verify password change + response = api_client.post( + "/users/login", + json={"username": "updateduser", "password": "newpassword"}, + ) + assert response.status_code == 200 + token = response.json()["access_token"] + + # Test username already taken + api_client.post( + "/users/register", + json={ + "username": "anotherupdateuser", + "email": "anotherupdate@example.com", + "password": "password", + }, + ) + response = api_client.put( + "/users/me", + headers={"Authorization": f"Bearer {token}"}, + json={ + "username": "anotherupdateuser", + }, + ) + assert response.status_code == 400 + assert response.json() == {"detail": "Username already taken"} + + # Test email already registered + api_client.post( + "/users/register", + json={ + "username": "yetanotheruser", + "email": "yetanother@example.com", + "password": "password", + }, + ) + response = api_client.put( + "/users/me", + headers={"Authorization": f"Bearer {token}"}, + json={ + "email": "yetanother@example.com", + }, + ) + assert response.status_code == 400 + assert response.json() == {"detail": "Email already registered"} + + +def test_forgot_password(api_client): + response = api_client.post( + "/users/forgot-password", json={"email": "nonexistent@example.com"} + ) + assert response.status_code == 200 + assert response.json() == { + "message": "Password reset email sent (not really)"} + + +def test_reset_password(api_client): + # Register a user first + api_client.post( + "/users/register", + json={ + "username": "resetuser", + "email": "reset@example.com", + "password": "oldpassword", + }, + ) + + response = api_client.post( + "/users/reset-password", + json={ + "token": "resetuser", # Use username as token for test + "new_password": "newpassword", + }, + ) + assert response.status_code == 200 + assert response.json() == { + "message": "Password has been reset successfully"} + + # Verify password change + response = api_client.post( + "/users/login", + json={"username": "resetuser", "password": "newpassword"}, + ) + assert response.status_code == 200 + + response = api_client.post( + "/users/login", + json={"username": "resetuser", "password": "oldpassword"}, + ) + assert response.status_code == 401 diff --git a/tests/unit/test_consumption.py b/tests/unit/test_consumption.py index 0c9f57a..9ea7bb3 100644 --- a/tests/unit/test_consumption.py +++ b/tests/unit/test_consumption.py @@ -57,8 +57,11 @@ def test_list_consumption_returns_created_items(client: TestClient) -> None: list_response = client.get("/api/consumption/") assert list_response.status_code == 200 - items = [item for item in list_response.json( - ) if item["scenario_id"] == scenario_id] + items = [ + item + for item in list_response.json() + if item["scenario_id"] == scenario_id + ] assert {item["amount"] for item in items} == set(values) diff --git a/tests/unit/test_costs.py b/tests/unit/test_costs.py index 45bec19..ae4059c 100644 --- a/tests/unit/test_costs.py +++ b/tests/unit/test_costs.py @@ -47,8 +47,9 @@ def test_create_and_list_capex_and_opex(): resp3 = client.get("/api/costs/capex") assert resp3.status_code == 200 data = resp3.json() - assert any(item["amount"] == 1000.0 and item["scenario_id"] - == sid for item in data) + assert any( + item["amount"] == 1000.0 and item["scenario_id"] == sid for item in data + ) opex_payload = { "scenario_id": sid, @@ -66,8 +67,10 @@ def test_create_and_list_capex_and_opex(): resp5 = client.get("/api/costs/opex") assert resp5.status_code == 200 data_o = resp5.json() - assert any(item["amount"] == 500.0 and item["scenario_id"] - == sid for item in data_o) + assert any( + item["amount"] == 500.0 and item["scenario_id"] == sid + for item in data_o + ) def test_multiple_capex_entries(): @@ -88,8 +91,9 @@ def test_multiple_capex_entries(): resp = client.get("/api/costs/capex") assert resp.status_code == 200 data = resp.json() - retrieved_amounts = [item["amount"] - for item in data if item["scenario_id"] == sid] + retrieved_amounts = [ + item["amount"] for item in data if item["scenario_id"] == sid + ] for amount in amounts: assert amount in retrieved_amounts @@ -112,7 +116,8 @@ def test_multiple_opex_entries(): resp = client.get("/api/costs/opex") assert resp.status_code == 200 data = resp.json() - retrieved_amounts = [item["amount"] - for item in data if item["scenario_id"] == sid] + retrieved_amounts = [ + item["amount"] for item in data if item["scenario_id"] == sid + ] for amount in amounts: assert amount in retrieved_amounts diff --git a/tests/unit/test_currencies.py b/tests/unit/test_currencies.py index 5aa674c..044571e 100644 --- a/tests/unit/test_currencies.py +++ b/tests/unit/test_currencies.py @@ -14,7 +14,13 @@ def _cleanup_currencies(db_session): db_session.commit() -def _assert_currency(payload: Dict[str, object], code: str, name: str, symbol: str | None, is_active: bool) -> None: +def _assert_currency( + payload: Dict[str, object], + code: str, + name: str, + symbol: str | None, + is_active: bool, +) -> None: assert payload["code"] == code assert payload["name"] == name assert payload["is_active"] is is_active @@ -47,13 +53,21 @@ def test_create_currency_success(api_client, db_session): def test_create_currency_conflict(api_client, db_session): api_client.post( "/api/currencies/", - json={"code": "CAD", "name": "Canadian Dollar", - "symbol": "$", "is_active": True}, + json={ + "code": "CAD", + "name": "Canadian Dollar", + "symbol": "$", + "is_active": True, + }, ) duplicate = api_client.post( "/api/currencies/", - json={"code": "CAD", "name": "Canadian Dollar", - "symbol": "$", "is_active": True}, + json={ + "code": "CAD", + "name": "Canadian Dollar", + "symbol": "$", + "is_active": True, + }, ) assert duplicate.status_code == 409 @@ -61,8 +75,12 @@ def test_create_currency_conflict(api_client, db_session): def test_update_currency_fields(api_client, db_session): api_client.post( "/api/currencies/", - json={"code": "GBP", "name": "British Pound", - "symbol": "£", "is_active": True}, + json={ + "code": "GBP", + "name": "British Pound", + "symbol": "£", + "is_active": True, + }, ) response = api_client.put( @@ -77,8 +95,12 @@ def test_update_currency_fields(api_client, db_session): def test_toggle_currency_activation(api_client, db_session): api_client.post( "/api/currencies/", - json={"code": "AUD", "name": "Australian Dollar", - "symbol": "A$", "is_active": True}, + json={ + "code": "AUD", + "name": "Australian Dollar", + "symbol": "A$", + "is_active": True, + }, ) response = api_client.patch( @@ -97,5 +119,7 @@ def test_default_currency_cannot_be_deactivated(api_client, db_session): json={"is_active": False}, ) assert response.status_code == 400 - assert response.json()[ - "detail"] == "The default currency cannot be deactivated." + assert ( + response.json()["detail"] + == "The default currency cannot be deactivated." + ) diff --git a/tests/unit/test_currency_workflow.py b/tests/unit/test_currency_workflow.py index 79dba58..f43809a 100644 --- a/tests/unit/test_currency_workflow.py +++ b/tests/unit/test_currency_workflow.py @@ -41,9 +41,10 @@ def test_create_capex_with_currency_code_and_list(api_client, seeded_currency): resp = api_client.post("/api/costs/capex", json=payload) assert resp.status_code == 200 data = resp.json() - assert data.get("currency_code") == seeded_currency.code or data.get( - "currency", {} - ).get("code") == seeded_currency.code + assert ( + data.get("currency_code") == seeded_currency.code + or data.get("currency", {}).get("code") == seeded_currency.code + ) def test_create_opex_with_currency_id(api_client, seeded_currency): diff --git a/tests/unit/test_maintenance.py b/tests/unit/test_maintenance.py index afe85ad..64e646c 100644 --- a/tests/unit/test_maintenance.py +++ b/tests/unit/test_maintenance.py @@ -30,7 +30,9 @@ def _create_scenario_and_equipment(client: TestClient): return scenario_id, equipment_id -def _create_maintenance_payload(equipment_id: int, scenario_id: int, description: str): +def _create_maintenance_payload( + equipment_id: int, scenario_id: int, description: str +): return { "equipment_id": equipment_id, "scenario_id": scenario_id, @@ -43,7 +45,8 @@ def _create_maintenance_payload(equipment_id: int, scenario_id: int, description def test_create_and_list_maintenance(client: TestClient): scenario_id, equipment_id = _create_scenario_and_equipment(client) payload = _create_maintenance_payload( - equipment_id, scenario_id, "Create maintenance") + equipment_id, scenario_id, "Create maintenance" + ) response = client.post("/api/maintenance/", json=payload) assert response.status_code == 201 @@ -95,7 +98,8 @@ def test_update_maintenance(client: TestClient): } response = client.put( - f"/api/maintenance/{maintenance_id}", json=update_payload) + f"/api/maintenance/{maintenance_id}", json=update_payload + ) assert response.status_code == 200 updated = response.json() assert updated["maintenance_date"] == "2025-11-01" @@ -108,7 +112,8 @@ def test_delete_maintenance(client: TestClient): create_response = client.post( "/api/maintenance/", json=_create_maintenance_payload( - equipment_id, scenario_id, "Delete maintenance"), + equipment_id, scenario_id, "Delete maintenance" + ), ) assert create_response.status_code == 201 maintenance_id = create_response.json()["id"] diff --git a/tests/unit/test_parameters.py b/tests/unit/test_parameters.py index 86081a7..e1895e7 100644 --- a/tests/unit/test_parameters.py +++ b/tests/unit/test_parameters.py @@ -67,7 +67,10 @@ def test_create_and_list_parameter(): def test_create_parameter_for_missing_scenario(): payload: Dict[str, Any] = { - "scenario_id": 0, "name": "invalid", "value": 1.0} + "scenario_id": 0, + "name": "invalid", + "value": 1.0, + } response = client.post("/api/parameters/", json=payload) assert response.status_code == 404 assert response.json()["detail"] == "Scenario not found" diff --git a/tests/unit/test_production.py b/tests/unit/test_production.py index cd7c851..106721d 100644 --- a/tests/unit/test_production.py +++ b/tests/unit/test_production.py @@ -42,7 +42,11 @@ def test_list_production_filters_by_scenario(client: TestClient) -> None: target_scenario = _create_scenario(client) other_scenario = _create_scenario(client) - for scenario_id, amount in [(target_scenario, 100.0), (target_scenario, 150.0), (other_scenario, 200.0)]: + for scenario_id, amount in [ + (target_scenario, 100.0), + (target_scenario, 150.0), + (other_scenario, 200.0), + ]: response = client.post( "/api/production/", json={ @@ -57,8 +61,11 @@ def test_list_production_filters_by_scenario(client: TestClient) -> None: list_response = client.get("/api/production/") assert list_response.status_code == 200 - items = [item for item in list_response.json() - if item["scenario_id"] == target_scenario] + items = [ + item + for item in list_response.json() + if item["scenario_id"] == target_scenario + ] assert {item["amount"] for item in items} == {100.0, 150.0} diff --git a/tests/unit/test_reporting.py b/tests/unit/test_reporting.py index ace8c37..45adf38 100644 --- a/tests/unit/test_reporting.py +++ b/tests/unit/test_reporting.py @@ -50,9 +50,11 @@ def test_generate_report_with_values(): def test_generate_report_single_value(): - report = generate_report([ - {"iteration": 1, "result": 42.0}, - ]) + report = generate_report( + [ + {"iteration": 1, "result": 42.0}, + ] + ) assert report["count"] == 1 assert report["std_dev"] == 0.0 assert report["variance"] == 0.0 @@ -105,8 +107,10 @@ def test_reporting_endpoint_success(client: TestClient): validation_error_cases: List[tuple[List[Any], str]] = [ (["not-a-dict"], "Entry at index 0 must be an object"), ([{"iteration": 1}], "Entry at index 0 must include numeric 'result'"), - ([{"iteration": 1, "result": "bad"}], - "Entry at index 0 must include numeric 'result'"), + ( + [{"iteration": 1, "result": "bad"}], + "Entry at index 0 must include numeric 'result'", + ), ] diff --git a/tests/unit/test_router_validation.py b/tests/unit/test_router_validation.py index bd98f84..4c81b73 100644 --- a/tests/unit/test_router_validation.py +++ b/tests/unit/test_router_validation.py @@ -27,7 +27,7 @@ def test_parameter_create_missing_scenario_returns_404( @pytest.mark.usefixtures("invalid_request_payloads") def test_parameter_create_invalid_distribution_is_422( - api_client: TestClient + api_client: TestClient, ) -> None: response = api_client.post( "/api/parameters/", @@ -90,6 +90,5 @@ def test_maintenance_negative_cost_rejected_by_schema( payload = invalid_request_payloads["maintenance_negative_cost"] response = api_client.post("/api/maintenance/", json=payload) assert response.status_code == 422 - error_locations = [tuple(item["loc"]) - for item in response.json()["detail"]] + error_locations = [tuple(item["loc"]) for item in response.json()["detail"]] assert ("body", "cost") in error_locations diff --git a/tests/unit/test_settings_routes.py b/tests/unit/test_settings_routes.py index 1aa691c..81a1aa9 100644 --- a/tests/unit/test_settings_routes.py +++ b/tests/unit/test_settings_routes.py @@ -42,7 +42,7 @@ def test_update_css_settings_persists_changes( @pytest.mark.usefixtures("db_session") def test_update_css_settings_invalid_value_returns_422( - api_client: TestClient + api_client: TestClient, ) -> None: response = api_client.put( "/api/settings/css", diff --git a/tests/unit/test_settings_service.py b/tests/unit/test_settings_service.py index a244c7c..8066c06 100644 --- a/tests/unit/test_settings_service.py +++ b/tests/unit/test_settings_service.py @@ -20,8 +20,14 @@ def fixture_clean_env(monkeypatch: pytest.MonkeyPatch) -> Dict[str, str]: def test_css_key_to_env_var_formatting(): - assert settings_service.css_key_to_env_var("--color-background") == "CALMINER_THEME_COLOR_BACKGROUND" - assert settings_service.css_key_to_env_var("--color-primary-stronger") == "CALMINER_THEME_COLOR_PRIMARY_STRONGER" + assert ( + settings_service.css_key_to_env_var("--color-background") + == "CALMINER_THEME_COLOR_BACKGROUND" + ) + assert ( + settings_service.css_key_to_env_var("--color-primary-stronger") + == "CALMINER_THEME_COLOR_PRIMARY_STRONGER" + ) @pytest.mark.parametrize( @@ -33,7 +39,9 @@ def test_css_key_to_env_var_formatting(): ("--color-text-secondary", "hsla(210, 40%, 40%, 1)"), ], ) -def test_read_css_color_env_overrides_valid_values(clean_env, env_key, env_value): +def test_read_css_color_env_overrides_valid_values( + clean_env, env_key, env_value +): env_var = settings_service.css_key_to_env_var(env_key) clean_env[env_var] = env_value @@ -50,7 +58,9 @@ def test_read_css_color_env_overrides_valid_values(clean_env, env_key, env_value "rgb(1,2)", # malformed rgb ], ) -def test_read_css_color_env_overrides_invalid_values_raise(clean_env, invalid_value): +def test_read_css_color_env_overrides_invalid_values_raise( + clean_env, invalid_value +): env_var = settings_service.css_key_to_env_var("--color-background") clean_env[env_var] = invalid_value @@ -64,7 +74,9 @@ def test_read_css_color_env_overrides_ignores_missing(clean_env): def test_list_css_env_override_rows_returns_structured_data(clean_env): - clean_env[settings_service.css_key_to_env_var("--color-primary")] = "#123456" + clean_env[settings_service.css_key_to_env_var("--color-primary")] = ( + "#123456" + ) rows = settings_service.list_css_env_override_rows(clean_env) assert rows == [ { diff --git a/tests/unit/test_setup_database.py b/tests/unit/test_setup_database.py index c67e1ab..4432d16 100644 --- a/tests/unit/test_setup_database.py +++ b/tests/unit/test_setup_database.py @@ -31,10 +31,13 @@ def setup_instance(mock_config: DatabaseConfig) -> DatabaseSetup: return DatabaseSetup(mock_config, dry_run=True) -def test_seed_baseline_data_dry_run_skips_verification(setup_instance: DatabaseSetup) -> None: - with mock.patch("scripts.seed_data.run_with_namespace") as seed_run, mock.patch.object( - setup_instance, "_verify_seeded_data" - ) as verify_mock: +def test_seed_baseline_data_dry_run_skips_verification( + setup_instance: DatabaseSetup, +) -> None: + with ( + mock.patch("scripts.seed_data.run_with_namespace") as seed_run, + mock.patch.object(setup_instance, "_verify_seeded_data") as verify_mock, + ): setup_instance.seed_baseline_data(dry_run=True) seed_run.assert_called_once() @@ -47,13 +50,16 @@ def test_seed_baseline_data_dry_run_skips_verification(setup_instance: DatabaseS verify_mock.assert_not_called() -def test_seed_baseline_data_invokes_verification(setup_instance: DatabaseSetup) -> None: +def test_seed_baseline_data_invokes_verification( + setup_instance: DatabaseSetup, +) -> None: expected_currencies = {code for code, *_ in seed_data.CURRENCY_SEEDS} expected_units = {code for code, *_ in seed_data.MEASUREMENT_UNIT_SEEDS} - with mock.patch("scripts.seed_data.run_with_namespace") as seed_run, mock.patch.object( - setup_instance, "_verify_seeded_data" - ) as verify_mock: + with ( + mock.patch("scripts.seed_data.run_with_namespace") as seed_run, + mock.patch.object(setup_instance, "_verify_seeded_data") as verify_mock, + ): setup_instance.seed_baseline_data(dry_run=False) seed_run.assert_called_once() @@ -67,7 +73,9 @@ def test_seed_baseline_data_invokes_verification(setup_instance: DatabaseSetup) ) -def test_run_migrations_applies_baseline_when_missing(mock_config: DatabaseConfig, tmp_path) -> None: +def test_run_migrations_applies_baseline_when_missing( + mock_config: DatabaseConfig, tmp_path +) -> None: setup_instance = DatabaseSetup(mock_config, dry_run=False) baseline = tmp_path / "000_base.sql" @@ -88,15 +96,24 @@ def test_run_migrations_applies_baseline_when_missing(mock_config: DatabaseConfi cursor_context.__enter__.return_value = cursor_mock connection_mock.cursor.return_value = cursor_context - with mock.patch.object( - setup_instance, "_application_connection", return_value=connection_mock - ), mock.patch.object( - setup_instance, "_migrations_table_exists", return_value=True - ), mock.patch.object( - setup_instance, "_fetch_applied_migrations", return_value=set() - ), mock.patch.object( - setup_instance, "_apply_migration_file", side_effect=capture_migration - ) as apply_mock: + with ( + mock.patch.object( + setup_instance, + "_application_connection", + return_value=connection_mock, + ), + mock.patch.object( + setup_instance, "_migrations_table_exists", return_value=True + ), + mock.patch.object( + setup_instance, "_fetch_applied_migrations", return_value=set() + ), + mock.patch.object( + setup_instance, + "_apply_migration_file", + side_effect=capture_migration, + ) as apply_mock, + ): setup_instance.run_migrations(tmp_path) assert apply_mock.call_count == 1 @@ -121,17 +138,24 @@ def test_run_migrations_noop_when_all_files_already_applied( connection_mock, cursor_mock = _connection_with_cursor() - with mock.patch.object( - setup_instance, "_application_connection", return_value=connection_mock - ), mock.patch.object( - setup_instance, "_migrations_table_exists", return_value=True - ), mock.patch.object( - setup_instance, - "_fetch_applied_migrations", - return_value={"000_base.sql", "20251022_add_other.sql"}, - ), mock.patch.object( - setup_instance, "_apply_migration_file" - ) as apply_mock: + with ( + mock.patch.object( + setup_instance, + "_application_connection", + return_value=connection_mock, + ), + mock.patch.object( + setup_instance, "_migrations_table_exists", return_value=True + ), + mock.patch.object( + setup_instance, + "_fetch_applied_migrations", + return_value={"000_base.sql", "20251022_add_other.sql"}, + ), + mock.patch.object( + setup_instance, "_apply_migration_file" + ) as apply_mock, + ): setup_instance.run_migrations(tmp_path) apply_mock.assert_not_called() @@ -148,12 +172,16 @@ def _connection_with_cursor() -> tuple[mock.MagicMock, mock.MagicMock]: return connection_mock, cursor_mock -def test_verify_seeded_data_raises_when_currency_missing(mock_config: DatabaseConfig) -> None: +def test_verify_seeded_data_raises_when_currency_missing( + mock_config: DatabaseConfig, +) -> None: setup_instance = DatabaseSetup(mock_config, dry_run=False) connection_mock, cursor_mock = _connection_with_cursor() cursor_mock.fetchall.return_value = [("USD", True)] - with mock.patch.object(setup_instance, "_application_connection", return_value=connection_mock): + with mock.patch.object( + setup_instance, "_application_connection", return_value=connection_mock + ): with pytest.raises(RuntimeError) as exc: setup_instance._verify_seeded_data( expected_currency_codes={"USD", "EUR"}, @@ -163,12 +191,16 @@ def test_verify_seeded_data_raises_when_currency_missing(mock_config: DatabaseCo assert "EUR" in str(exc.value) -def test_verify_seeded_data_raises_when_default_currency_inactive(mock_config: DatabaseConfig) -> None: +def test_verify_seeded_data_raises_when_default_currency_inactive( + mock_config: DatabaseConfig, +) -> None: setup_instance = DatabaseSetup(mock_config, dry_run=False) connection_mock, cursor_mock = _connection_with_cursor() cursor_mock.fetchall.return_value = [("USD", False)] - with mock.patch.object(setup_instance, "_application_connection", return_value=connection_mock): + with mock.patch.object( + setup_instance, "_application_connection", return_value=connection_mock + ): with pytest.raises(RuntimeError) as exc: setup_instance._verify_seeded_data( expected_currency_codes={"USD"}, @@ -178,12 +210,16 @@ def test_verify_seeded_data_raises_when_default_currency_inactive(mock_config: D assert "inactive" in str(exc.value) -def test_verify_seeded_data_raises_when_units_missing(mock_config: DatabaseConfig) -> None: +def test_verify_seeded_data_raises_when_units_missing( + mock_config: DatabaseConfig, +) -> None: setup_instance = DatabaseSetup(mock_config, dry_run=False) connection_mock, cursor_mock = _connection_with_cursor() cursor_mock.fetchall.return_value = [("tonnes", True)] - with mock.patch.object(setup_instance, "_application_connection", return_value=connection_mock): + with mock.patch.object( + setup_instance, "_application_connection", return_value=connection_mock + ): with pytest.raises(RuntimeError) as exc: setup_instance._verify_seeded_data( expected_currency_codes=set(), @@ -193,12 +229,18 @@ def test_verify_seeded_data_raises_when_units_missing(mock_config: DatabaseConfi assert "liters" in str(exc.value) -def test_verify_seeded_data_raises_when_measurement_table_missing(mock_config: DatabaseConfig) -> None: +def test_verify_seeded_data_raises_when_measurement_table_missing( + mock_config: DatabaseConfig, +) -> None: setup_instance = DatabaseSetup(mock_config, dry_run=False) connection_mock, cursor_mock = _connection_with_cursor() - cursor_mock.execute.side_effect = psycopg_errors.UndefinedTable("relation does not exist") + cursor_mock.execute.side_effect = psycopg_errors.UndefinedTable( + "relation does not exist" + ) - with mock.patch.object(setup_instance, "_application_connection", return_value=connection_mock): + with mock.patch.object( + setup_instance, "_application_connection", return_value=connection_mock + ): with pytest.raises(RuntimeError) as exc: setup_instance._verify_seeded_data( expected_currency_codes=set(), @@ -226,9 +268,14 @@ def test_seed_baseline_data_rerun_uses_existing_records( unit_rows, ] - with mock.patch.object( - setup_instance, "_application_connection", return_value=connection_mock - ), mock.patch("scripts.seed_data.run_with_namespace") as seed_run: + with ( + mock.patch.object( + setup_instance, + "_application_connection", + return_value=connection_mock, + ), + mock.patch("scripts.seed_data.run_with_namespace") as seed_run, + ): setup_instance.seed_baseline_data(dry_run=False) setup_instance.seed_baseline_data(dry_run=False) @@ -240,7 +287,9 @@ def test_seed_baseline_data_rerun_uses_existing_records( assert cursor_mock.execute.call_count == 4 -def test_ensure_database_raises_with_context(mock_config: DatabaseConfig) -> None: +def test_ensure_database_raises_with_context( + mock_config: DatabaseConfig, +) -> None: setup_instance = DatabaseSetup(mock_config, dry_run=False) connection_mock = mock.MagicMock() cursor_mock = mock.MagicMock() @@ -248,14 +297,18 @@ def test_ensure_database_raises_with_context(mock_config: DatabaseConfig) -> Non cursor_mock.execute.side_effect = [None, psycopg2.Error("create_fail")] connection_mock.cursor.return_value = cursor_mock - with mock.patch.object(setup_instance, "_admin_connection", return_value=connection_mock): + with mock.patch.object( + setup_instance, "_admin_connection", return_value=connection_mock + ): with pytest.raises(RuntimeError) as exc: setup_instance.ensure_database() assert "Failed to create database" in str(exc.value) -def test_ensure_role_raises_with_context_during_creation(mock_config: DatabaseConfig) -> None: +def test_ensure_role_raises_with_context_during_creation( + mock_config: DatabaseConfig, +) -> None: setup_instance = DatabaseSetup(mock_config, dry_run=False) admin_conn, admin_cursor = _connection_with_cursor() @@ -295,7 +348,9 @@ def test_ensure_role_raises_with_context_during_privilege_grants( assert "Failed to grant privileges" in str(exc.value) -def test_ensure_database_dry_run_skips_creation(mock_config: DatabaseConfig) -> None: +def test_ensure_database_dry_run_skips_creation( + mock_config: DatabaseConfig, +) -> None: setup_instance = DatabaseSetup(mock_config, dry_run=True) connection_mock = mock.MagicMock() @@ -303,45 +358,59 @@ def test_ensure_database_dry_run_skips_creation(mock_config: DatabaseConfig) -> cursor_mock.fetchone.return_value = None connection_mock.cursor.return_value = cursor_mock - with mock.patch.object(setup_instance, "_admin_connection", return_value=connection_mock), mock.patch( - "scripts.setup_database.logger" - ) as logger_mock: + with ( + mock.patch.object( + setup_instance, "_admin_connection", return_value=connection_mock + ), + mock.patch("scripts.setup_database.logger") as logger_mock, + ): setup_instance.ensure_database() # expect only existence check, no create attempt cursor_mock.execute.assert_called_once() logger_mock.info.assert_any_call( - "Dry run: would create database '%s'. Run without --dry-run to proceed.", mock_config.database + "Dry run: would create database '%s'. Run without --dry-run to proceed.", + mock_config.database, ) -def test_ensure_role_dry_run_skips_creation_and_grants(mock_config: DatabaseConfig) -> None: +def test_ensure_role_dry_run_skips_creation_and_grants( + mock_config: DatabaseConfig, +) -> None: setup_instance = DatabaseSetup(mock_config, dry_run=True) admin_conn, admin_cursor = _connection_with_cursor() admin_cursor.fetchone.return_value = None - with mock.patch.object( - setup_instance, - "_admin_connection", - side_effect=[admin_conn], - ) as conn_mock, mock.patch("scripts.setup_database.logger") as logger_mock: + with ( + mock.patch.object( + setup_instance, + "_admin_connection", + side_effect=[admin_conn], + ) as conn_mock, + mock.patch("scripts.setup_database.logger") as logger_mock, + ): setup_instance.ensure_role() assert conn_mock.call_count == 1 admin_cursor.execute.assert_called_once() logger_mock.info.assert_any_call( - "Dry run: would create role '%s'. Run without --dry-run to apply.", mock_config.user + "Dry run: would create role '%s'. Run without --dry-run to apply.", + mock_config.user, ) -def test_register_rollback_skipped_when_dry_run(mock_config: DatabaseConfig) -> None: +def test_register_rollback_skipped_when_dry_run( + mock_config: DatabaseConfig, +) -> None: setup_instance = DatabaseSetup(mock_config, dry_run=True) setup_instance._register_rollback("noop", lambda: None) assert setup_instance._rollback_actions == [] -def test_execute_rollbacks_runs_in_reverse_order(mock_config: DatabaseConfig) -> None: +def test_execute_rollbacks_runs_in_reverse_order( + mock_config: DatabaseConfig, +) -> None: setup_instance = DatabaseSetup(mock_config, dry_run=False) calls: list[str] = [] @@ -362,16 +431,24 @@ def test_execute_rollbacks_runs_in_reverse_order(mock_config: DatabaseConfig) -> assert setup_instance._rollback_actions == [] -def test_ensure_database_registers_rollback_action(mock_config: DatabaseConfig) -> None: +def test_ensure_database_registers_rollback_action( + mock_config: DatabaseConfig, +) -> None: setup_instance = DatabaseSetup(mock_config, dry_run=False) connection_mock = mock.MagicMock() cursor_mock = mock.MagicMock() cursor_mock.fetchone.return_value = None connection_mock.cursor.return_value = cursor_mock - with mock.patch.object(setup_instance, "_admin_connection", return_value=connection_mock), mock.patch.object( - setup_instance, "_register_rollback" - ) as register_mock, mock.patch.object(setup_instance, "_drop_database") as drop_mock: + with ( + mock.patch.object( + setup_instance, "_admin_connection", return_value=connection_mock + ), + mock.patch.object( + setup_instance, "_register_rollback" + ) as register_mock, + mock.patch.object(setup_instance, "_drop_database") as drop_mock, + ): setup_instance.ensure_database() register_mock.assert_called_once() label, action = register_mock.call_args[0] @@ -380,24 +457,29 @@ def test_ensure_database_registers_rollback_action(mock_config: DatabaseConfig) drop_mock.assert_called_once_with(mock_config.database) -def test_ensure_role_registers_rollback_actions(mock_config: DatabaseConfig) -> None: +def test_ensure_role_registers_rollback_actions( + mock_config: DatabaseConfig, +) -> None: setup_instance = DatabaseSetup(mock_config, dry_run=False) admin_conn, admin_cursor = _connection_with_cursor() admin_cursor.fetchone.return_value = None privilege_conn, privilege_cursor = _connection_with_cursor() - with mock.patch.object( - setup_instance, - "_admin_connection", - side_effect=[admin_conn, privilege_conn], - ), mock.patch.object( - setup_instance, "_register_rollback" - ) as register_mock, mock.patch.object( - setup_instance, "_drop_role" - ) as drop_mock, mock.patch.object( - setup_instance, "_revoke_role_privileges" - ) as revoke_mock: + with ( + mock.patch.object( + setup_instance, + "_admin_connection", + side_effect=[admin_conn, privilege_conn], + ), + mock.patch.object( + setup_instance, "_register_rollback" + ) as register_mock, + mock.patch.object(setup_instance, "_drop_role") as drop_mock, + mock.patch.object( + setup_instance, "_revoke_role_privileges" + ) as revoke_mock, + ): setup_instance.ensure_role() assert register_mock.call_count == 2 drop_label, drop_action = register_mock.call_args_list[0][0] @@ -413,7 +495,9 @@ def test_ensure_role_registers_rollback_actions(mock_config: DatabaseConfig) -> revoke_mock.assert_called_once() -def test_main_triggers_rollbacks_on_failure(mock_config: DatabaseConfig) -> None: +def test_main_triggers_rollbacks_on_failure( + mock_config: DatabaseConfig, +) -> None: args = argparse.Namespace( ensure_database=True, ensure_role=True, @@ -437,11 +521,13 @@ def test_main_triggers_rollbacks_on_failure(mock_config: DatabaseConfig) -> None verbose=0, ) - with mock.patch.object(setup_db_module, "parse_args", return_value=args), mock.patch.object( - setup_db_module.DatabaseConfig, "from_env", return_value=mock_config - ), mock.patch.object( - setup_db_module, "DatabaseSetup" - ) as setup_cls: + with ( + mock.patch.object(setup_db_module, "parse_args", return_value=args), + mock.patch.object( + setup_db_module.DatabaseConfig, "from_env", return_value=mock_config + ), + mock.patch.object(setup_db_module, "DatabaseSetup") as setup_cls, + ): setup_instance = mock.MagicMock() setup_instance.dry_run = False setup_instance._rollback_actions = [ diff --git a/tests/unit/test_simulation.py b/tests/unit/test_simulation.py index 05444dd..b89febe 100644 --- a/tests/unit/test_simulation.py +++ b/tests/unit/test_simulation.py @@ -19,7 +19,12 @@ def client(api_client: TestClient) -> TestClient: def test_run_simulation_function_generates_samples(): params: List[Dict[str, Any]] = [ - {"name": "grade", "value": 1.8, "distribution": "normal", "std_dev": 0.2}, + { + "name": "grade", + "value": 1.8, + "distribution": "normal", + "std_dev": 0.2, + }, { "name": "recovery", "value": 0.9, @@ -45,7 +50,10 @@ def test_run_simulation_with_zero_iterations_returns_empty(): @pytest.mark.parametrize( "parameter_payload,error_message", [ - ({"name": "missing-value"}, "Parameter at index 0 must include 'value'"), + ( + {"name": "missing-value"}, + "Parameter at index 0 must include 'value'", + ), ( { "name": "bad-dist", @@ -110,7 +118,8 @@ def test_run_simulation_triangular_sampling_path(): span = 10.0 * DEFAULT_UNIFORM_SPAN_RATIO rng = Random(seed) expected_samples = [ - rng.triangular(10.0 - span, 10.0 + span, 10.0) for _ in range(iterations) + rng.triangular(10.0 - span, 10.0 + span, 10.0) + for _ in range(iterations) ] actual_samples = [entry["result"] for entry in results] for actual, expected in zip(actual_samples, expected_samples): @@ -156,9 +165,7 @@ def test_simulation_endpoint_no_params(client: TestClient): assert resp.json()["detail"] == "No parameters provided" -def test_simulation_endpoint_success( - client: TestClient, db_session: Session -): +def test_simulation_endpoint_success(client: TestClient, db_session: Session): scenario_payload: Dict[str, Any] = { "name": f"SimScenario-{uuid4()}", "description": "Simulation test", @@ -168,7 +175,12 @@ def test_simulation_endpoint_success( scenario_id = scenario_resp.json()["id"] params: List[Dict[str, Any]] = [ - {"name": "param1", "value": 2.5, "distribution": "normal", "std_dev": 0.5} + { + "name": "param1", + "value": 2.5, + "distribution": "normal", + "std_dev": 0.5, + } ] payload: Dict[str, Any] = { "scenario_id": scenario_id, diff --git a/tests/unit/test_theme_settings.py b/tests/unit/test_theme_settings.py new file mode 100644 index 0000000..c1e79ba --- /dev/null +++ b/tests/unit/test_theme_settings.py @@ -0,0 +1,63 @@ +import pytest +from sqlalchemy.orm import Session +from fastapi.testclient import TestClient + +from main import app +from models.theme_setting import ThemeSetting +from services.settings import save_theme_settings, get_theme_settings + + +client = TestClient(app) + + +def test_save_theme_settings(db_session: Session): + theme_data = { + "theme_name": "dark", + "primary_color": "#000000", + "secondary_color": "#333333", + "accent_color": "#ff0000", + "background_color": "#1a1a1a", + "text_color": "#ffffff" + } + + saved_setting = save_theme_settings(db_session, theme_data) + assert str(saved_setting.theme_name) == "dark" + assert str(saved_setting.primary_color) == "#000000" + + +def test_get_theme_settings(db_session: Session): + # Create a theme setting first + theme_data = { + "theme_name": "light", + "primary_color": "#ffffff", + "secondary_color": "#cccccc", + "accent_color": "#0000ff", + "background_color": "#f0f0f0", + "text_color": "#000000" + } + save_theme_settings(db_session, theme_data) + + settings = get_theme_settings(db_session) + assert settings["theme_name"] == "light" + assert settings["primary_color"] == "#ffffff" + + +def test_theme_settings_api(api_client): + # Test API endpoint for saving theme settings + theme_data = { + "theme_name": "test_theme", + "primary_color": "#123456", + "secondary_color": "#789abc", + "accent_color": "#def012", + "background_color": "#345678", + "text_color": "#9abcde" + } + + response = api_client.post("/api/settings/theme", json=theme_data) + assert response.status_code == 200 + assert response.json()["theme"]["theme_name"] == "test_theme" + + # Test API endpoint for getting theme settings + response = api_client.get("/api/settings/theme") + assert response.status_code == 200 + assert response.json()["theme_name"] == "test_theme" diff --git a/tests/unit/test_ui_routes.py b/tests/unit/test_ui_routes.py index 7a56043..b0757d7 100644 --- a/tests/unit/test_ui_routes.py +++ b/tests/unit/test_ui_routes.py @@ -21,11 +21,18 @@ def test_dashboard_route_provides_summary( assert context.get("report_available") is True metric_labels = {item["label"] for item in context["summary_metrics"]} - assert {"CAPEX Total", "OPEX Total", "Production", "Simulation Iterations"}.issubset(metric_labels) + assert { + "CAPEX Total", + "OPEX Total", + "Production", + "Simulation Iterations", + }.issubset(metric_labels) scenario = cast(Scenario, seeded_ui_data["scenario"]) scenario_row = next( - row for row in context["scenario_rows"] if row["scenario_name"] == scenario.name + row + for row in context["scenario_rows"] + if row["scenario_name"] == scenario.name ) assert scenario_row["iterations"] == 3 assert scenario_row["simulation_mean_display"] == "971,666.67" @@ -81,7 +88,9 @@ def test_dashboard_data_endpoint_returns_aggregates( payload = response.json() assert payload["report_available"] is True - metric_map = {item["label"]: item["value"] for item in payload["summary_metrics"]} + metric_map = { + item["label"]: item["value"] for item in payload["summary_metrics"] + } assert metric_map["CAPEX Total"].startswith("$") assert metric_map["Maintenance Cost"].startswith("$") @@ -99,7 +108,9 @@ def test_dashboard_data_endpoint_returns_aggregates( activity_labels = payload["scenario_activity_chart"]["labels"] activity_idx = activity_labels.index(scenario.name) - assert payload["scenario_activity_chart"]["production"][activity_idx] == 800.0 + assert ( + payload["scenario_activity_chart"]["production"][activity_idx] == 800.0 + ) @pytest.mark.parametrize( @@ -154,7 +165,10 @@ def test_settings_route_provides_css_context( assert "css_env_override_meta" in context assert context["css_variables"]["--color-accent"] == "#abcdef" - assert context["css_defaults"]["--color-accent"] == settings_service.CSS_COLOR_DEFAULTS["--color-accent"] + assert ( + context["css_defaults"]["--color-accent"] + == settings_service.CSS_COLOR_DEFAULTS["--color-accent"] + ) assert context["css_env_overrides"]["--color-accent"] == "#abcdef" override_rows = context["css_env_override_rows"] From f3da80885f47ee64cbb94bef71c6f9b252f5b111 Mon Sep 17 00:00:00 2001 From: zwitschi Date: Mon, 27 Oct 2025 10:37:45 +0100 Subject: [PATCH 03/31] fix: Remove duplicate playwright entry and reorder dependencies in requirements-test.txt --- requirements-test.txt | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/requirements-test.txt b/requirements-test.txt index ec0a118..b2ac481 100644 --- a/requirements-test.txt +++ b/requirements-test.txt @@ -1,6 +1,7 @@ +playwright pytest pytest-cov pytest-httpx -playwright pytest-playwright -ruff \ No newline at end of file +python-jose +ruff From 3fc6a2a9d3287546cf34eec2ef2ea8597a65afb5 Mon Sep 17 00:00:00 2001 From: zwitschi Date: Mon, 27 Oct 2025 10:43:58 +0100 Subject: [PATCH 04/31] feat: Add detailed component diagrams and architecture overviews to Building Block View documentation --- docs/architecture/05_building_block_view.md | 119 +++++++++++++++----- 1 file changed, 91 insertions(+), 28 deletions(-) diff --git a/docs/architecture/05_building_block_view.md b/docs/architecture/05_building_block_view.md index 7b829bc..98837bc 100644 --- a/docs/architecture/05_building_block_view.md +++ b/docs/architecture/05_building_block_view.md @@ -43,9 +43,56 @@ Refer to the detailed architecture chapters in `docs/architecture/`: - **Middleware** (`middleware/validation.py`): applies JSON validation before requests reach routers. - **Testing** (`tests/unit/`): pytest suite covering route and service behavior, including UI rendering checks and negative-path router validation tests to ensure consistent HTTP error semantics. Playwright end-to-end coverage is planned for core smoke flows (dashboard load, scenario inputs, reporting) and will attach in CI once scaffolding is completed. -### Component Diagram +### Level 1 Overview -# System Architecture — Mermaid Diagram +```mermaid +graph LR + U["User (Browser)"] + + subgraph FE[Frontend] + FE_TPL["Templates (Jinja2)"] + FE_STATIC["Static Assets (CSS/JS)"] + FE_PARTS["Reusable Partials"] + FE_SETTINGS["Settings View & JS"] + end + + subgraph BE[Backend — FastAPI] + BE_APP["FastAPI App (main.py)"] + BE_ROUTES["Routers"] + BE_SERVICES["Services"] + BE_MODELS["Models (SQLAlchemy)"] + BE_DB["Database Layer"] + end + + subgraph MW[Middleware & Utilities] + MW_VAL["JSON Validation Middleware"] + end + + subgraph QA[Testing] + QA_UNIT["Unit Tests (pytest)"] + QA_E2E["E2E (Playwright, planned)"] + end + + %% High-level flows + U -->|HTTP| BE_APP + U --> FE + FE --> BE_ROUTES + BE_APP --> BE_ROUTES + BE_ROUTES --> BE_SERVICES + BE_SERVICES --> BE_MODELS + BE_MODELS --> BE_DB + + MW_VAL --> BE_APP + + QA_UNIT --> BE_ROUTES + QA_UNIT --> BE_SERVICES + QA_UNIT --> FE + QA_UNIT --> MW_VAL + QA_E2E --> U + QA_E2E --> BE_APP +``` + +### Level 2 Overview ```mermaid graph LR @@ -57,16 +104,28 @@ graph LR %% === Frontend === subgraph FE[Frontend] - TPL["Jinja2 Templates\n(templates/)\n• base layout + sidebar"] - PARTS["Reusable Partials\n(templates/partials/components.html)\n• inputs • empty states • table wrappers"] - STATIC["Static Assets\n(static/)\n• CSS: static/css/main.css (palette via CSS vars)\n• JS: static/js/*.js (page modules)"] - SETPAGE["Settings View\n(templates/settings.html)"] - SETJS["Settings Logic\n(static/js/settings.js)\n• validation • submit • live CSS updates"] + TPL["Jinja2 Templates +(templates/) +• base layout + sidebar"] + PARTS["Reusable Partials +(templates/partials/components.html) +• inputs • empty states • table wrappers"] + STATIC["Static Assets +(static/) +• CSS: static/css/main.css (palette via CSS vars) +• JS: static/js/*.js (page modules)"] + SETPAGE["Settings View +(templates/settings.html)"] + SETJS["Settings Logic +(static/js/settings.js) +• validation • submit • live CSS updates"] end %% === Backend === - subgraph BE[Backend FastAPI] - MAIN["FastAPI App\n(main.py)\n• routers • middleware • startup/shutdown"] + subgraph BE[Backend — FastAPI] + MAIN["FastAPI App +(main.py) +• routers • middleware • startup/shutdown"] subgraph ROUTES[Routers] R_SCN["scenarios"] @@ -79,14 +138,22 @@ graph LR R_SIM["simulations"] R_REP["reporting"] R_UI["ui.py (metadata for UI)"] - DEP["dependencies.get_db\n(shared SQLAlchemy session)"] + DEP["dependencies.get_db +(shared SQLAlchemy session)"] end subgraph SRV[Services] - S_BLL["Business Logic Layer\n• orchestrates models + calc"] + S_BLL["Business Logic Layer +• orchestrates models + calc"] S_REP["Reporting Calculations"] - S_SIM["Monte Carlo\n(simulation scaffolding)"] - S_SET["Settings Manager\n(services/settings.py)\n• defaults via CSS vars\n• persistence in DB\n• env overrides\n• surfaces to API & UI"] + S_SIM["Monte Carlo +(simulation scaffolding)"] + S_SET["Settings Manager +(services/settings.py) +• defaults via CSS vars +• persistence in DB +• env overrides +• surfaces to API & UI"] end subgraph MOD[Models] @@ -101,7 +168,8 @@ graph LR end subgraph DB[Database Layer] - CFG["config/database.py\n(SQLAlchemy engine & sessions)"] + CFG["config/database.py +(SQLAlchemy engine & sessions)"] PG[("PostgreSQL")] APPSET["application_setting table"] end @@ -109,12 +177,18 @@ graph LR %% === Middleware & Utilities === subgraph MW[Middleware & Utilities] - VAL["JSON Validation Middleware\n(middleware/validation.py)"] + VAL["JSON Validation Middleware +(middleware/validation.py)"] end subgraph TEST[Testing] - UNIT["pytest unit tests\n(tests/unit/)\n• routes • services • UI rendering\n• negative-path validation"] - E2E["Playwright E2E (planned)\n• dashboard • scenario inputs • reporting\n• attach in CI"] + UNIT["pytest unit tests +(tests/unit/) +• routes • services • UI rendering +• negative-path validation"] + E2E["Playwright E2E (planned) +• dashboard • scenario inputs • reporting +• attach in CI"] end %% ===================== Edges / Flows ===================== @@ -170,17 +244,6 @@ graph LR class PG store; ``` ---- - -**Notes** - -- Arrows represent primary data/command flow. Dashed arrows denote shared dependencies (injected SQLAlchemy session). -- The settings pipeline shows how environment overrides and DB-backed defaults propagate to both API and UI. - -``` - -``` - ## Module Map (code) - `scenario.py`: central scenario entity with relationships to cost, consumption, production, equipment, maintenance, and simulation results. From 41156a87d1c3bf2678ac9013e3ab874973354cb4 Mon Sep 17 00:00:00 2001 From: zwitschi Date: Mon, 27 Oct 2025 10:46:34 +0100 Subject: [PATCH 05/31] fix: Ensure bcrypt and passlib are included in requirements.txt --- requirements.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/requirements.txt b/requirements.txt index d85057c..a24ee10 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,4 @@ +bcrypt fastapi uvicorn sqlalchemy @@ -7,3 +8,4 @@ httpx jinja2 pandas numpy +passlib \ No newline at end of file From 7f4cd33b65b0afea069f0a2b78dfa061d7dccfb5 Mon Sep 17 00:00:00 2001 From: zwitschi Date: Mon, 27 Oct 2025 10:57:27 +0100 Subject: [PATCH 06/31] fix: Update authentication system to use passlib for password hashing --- docs/architecture/08_concepts/08_01_security.md | 2 +- requirements.txt | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/architecture/08_concepts/08_01_security.md b/docs/architecture/08_concepts/08_01_security.md index 1488537..2b1b026 100644 --- a/docs/architecture/08_concepts/08_01_security.md +++ b/docs/architecture/08_concepts/08_01_security.md @@ -22,7 +22,7 @@ This document outlines the proposed user roles and permissions model for the Cal ## Authentication System -The authentication system uses JWT (JSON Web Tokens) for securing API endpoints. Users can register with a username, email, and password. Passwords are hashed using bcrypt. Upon successful login, an access token is issued, which must be included in subsequent requests for protected resources. +The authentication system uses JWT (JSON Web Tokens) for securing API endpoints. Users can register with a username, email, and password. Passwords are hashed using a `passlib` CryptContext for secure, configurable hashing. Upon successful login, an access token is issued, which must be included in subsequent requests for protected resources. ## Key Components diff --git a/requirements.txt b/requirements.txt index a24ee10..5a3cc02 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,3 @@ -bcrypt fastapi uvicorn sqlalchemy From ef4fb7dcf0b7dc9f1e54e67625e15d7fc2a3cabf Mon Sep 17 00:00:00 2001 From: zwitschi Date: Mon, 27 Oct 2025 12:46:51 +0100 Subject: [PATCH 07/31] Refactor architecture documentation and enhance security features - Updated architecture constraints documentation to include detailed sections on technical, organizational, regulatory, environmental, and performance constraints. - Created separate markdown files for each type of constraint for better organization and clarity. - Revised the architecture scope section to provide a clearer overview of the system's key areas. - Enhanced the solution strategy documentation with detailed explanations of the client-server architecture, technology choices, trade-offs, and future considerations. - Added comprehensive descriptions of backend and frontend components, middleware, and utilities in the architecture documentation. - Migrated UI, templates, and styling notes to a dedicated section for better structure. - Updated requirements.txt to include missing dependencies. - Refactored user authentication logic in the users.py and security.py files to improve code organization and maintainability, including the integration of OAuth2 password bearer token handling. --- .gitea/workflows/test.yml | 12 +- .../02_architecture_constraints.md | 66 +---- .../02_01_technical_constraints.md | 16 ++ .../02_02_organizational_constraints.md | 18 ++ .../02_03_regulatory_constraints.md | 17 ++ .../02_04_environmental_constraints.md | 16 ++ .../02_05_performance_constraints.md | 14 + docs/architecture/03_context_and_scope.md | 19 +- .../03_scope/03_01_architecture_scope.md | 26 ++ docs/architecture/04_solution_strategy.md | 43 +-- .../04_01_client_server_architecture.md | 10 + .../04_strategy/04_02_technology_choices.md | 15 + .../04_strategy/04_03_trade_offs.md | 14 + .../04_04_future_considerations.md | 17 ++ .../05_blocks/05_01_architecture_overview.md | 13 + .../05_blocks/05_02_backend_components.md | 13 + .../05_blocks/05_03_frontend_components.md | 11 + .../05_blocks/05_04_middleware_utilities.md | 8 + docs/architecture/05_building_block_view.md | 262 +----------------- docs/architecture/13_ui_and_style.md | 2 - requirements.txt | 3 +- routes/users.py | 27 +- services/security.py | 29 +- 23 files changed, 271 insertions(+), 400 deletions(-) create mode 100644 docs/architecture/02_constraints/02_01_technical_constraints.md create mode 100644 docs/architecture/02_constraints/02_02_organizational_constraints.md create mode 100644 docs/architecture/02_constraints/02_03_regulatory_constraints.md create mode 100644 docs/architecture/02_constraints/02_04_environmental_constraints.md create mode 100644 docs/architecture/02_constraints/02_05_performance_constraints.md create mode 100644 docs/architecture/03_scope/03_01_architecture_scope.md create mode 100644 docs/architecture/04_strategy/04_01_client_server_architecture.md create mode 100644 docs/architecture/04_strategy/04_02_technology_choices.md create mode 100644 docs/architecture/04_strategy/04_03_trade_offs.md create mode 100644 docs/architecture/04_strategy/04_04_future_considerations.md create mode 100644 docs/architecture/05_blocks/05_01_architecture_overview.md create mode 100644 docs/architecture/05_blocks/05_02_backend_components.md create mode 100644 docs/architecture/05_blocks/05_03_frontend_components.md create mode 100644 docs/architecture/05_blocks/05_04_middleware_utilities.md diff --git a/.gitea/workflows/test.yml b/.gitea/workflows/test.yml index b10f005..a095935 100644 --- a/.gitea/workflows/test.yml +++ b/.gitea/workflows/test.yml @@ -5,10 +5,11 @@ jobs: tests: name: ${{ matrix.target }} tests runs-on: ubuntu-latest + container: ${{ matrix.target == 'e2e' && 'mcr.microsoft.com/playwright/python:v1.40.0-jammy' || '' }} env: DATABASE_DRIVER: postgresql DATABASE_HOST: postgres - DATABASE_PORT: "5432" + DATABASE_PORT: '5432' DATABASE_NAME: calminer_ci DATABASE_USER: calminer DATABASE_PASSWORD: secret @@ -36,10 +37,17 @@ jobs: steps: - name: Checkout code uses: actions/checkout@v4 + - name: Cache pip dependencies + uses: actions/cache@v4 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt', 'requirements-test.txt') }} + restore-keys: | + ${{ runner.os }}-pip- - name: Prepare Python environment uses: ./.gitea/actions/setup-python-env with: - install-playwright: ${{ matrix.target == 'e2e' }} + install-playwright: ${{ matrix.target != 'e2e' }} - name: Run tests run: | if [ "${{ matrix.target }}" = "unit" ]; then diff --git a/docs/architecture/02_architecture_constraints.md b/docs/architecture/02_architecture_constraints.md index d9e41d4..bbb63f3 100644 --- a/docs/architecture/02_architecture_constraints.md +++ b/docs/architecture/02_architecture_constraints.md @@ -1,66 +1,18 @@ --- -title: "02 — Architecture Constraints" -description: "Document imposed constraints: technical, organizational, regulatory, and environmental constraints that affect architecture decisions." -status: skeleton +title: '02 — Architecture Constraints' +description: 'Document imposed constraints: technical, organizational, regulatory, and environmental constraints that affect architecture decisions.' +status: draft --- # 02 — Architecture Constraints -## Technical Constraints +## Constraints Overview -> e.g., choice of FastAPI, PostgreSQL, SQLAlchemy, Chart.js, Jinja2 templates. - -The architecture of CalMiner is influenced by several technical constraints that shape its design and implementation: - -1. **Framework Selection**: The choice of FastAPI as the web framework imposes constraints on how the application handles requests, routing, and middleware. FastAPI's asynchronous capabilities must be leveraged appropriately to ensure optimal performance. -2. **Database Technology**: The use of PostgreSQL as the primary database system dictates the data modeling, querying capabilities, and transaction management strategies. SQLAlchemy ORM is used for database interactions, which requires adherence to its conventions and limitations. -3. **Frontend Technologies**: The decision to use Jinja2 for server-side templating and Chart.js for data visualization influences the structure of the frontend code and the way dynamic content is rendered. -4. **Simulation Logic**: The Monte Carlo simulation logic must be designed to efficiently handle large datasets and perform computations within the constraints of the chosen programming language (Python) and its libraries. - -## Organizational Constraints - -> e.g., team skillsets, development workflows, CI/CD pipelines. - -Restrictions arising from organizational factors include: - -1. **Team Expertise**: The development team’s familiarity with FastAPI, SQLAlchemy, and frontend technologies like Jinja2 and Chart.js influences the architecture choices to ensure maintainability and ease of development. -2. **Development Processes**: The adoption of Agile methodologies and CI/CD pipelines (using Gitea Actions) shapes the architecture to support continuous integration, automated testing, and deployment practices. -3. **Collaboration Tools**: The use of specific collaboration and version control tools (e.g., Gitea) affects how code is managed, reviewed, and integrated, impacting the overall architecture and development workflow. -4. **Documentation Standards**: The requirement for comprehensive documentation (as seen in the `docs/` folder) necessitates an architecture that is well-structured and easy to understand for both current and future team members. -5. **Knowledge Sharing**: The need for effective knowledge sharing and onboarding processes influences the architecture to ensure that it is accessible and understandable for new team members. -6. **Resource Availability**: The availability of hardware, software, and human resources within the organization can impose constraints on the architecture, affecting decisions related to scalability, performance, and feature implementation. - -## Regulatory Constraints - -> e.g., data privacy laws, industry standards. - -Regulatory constraints that impact the architecture of CalMiner include: - -1. **Data Privacy Compliance**: The architecture must ensure compliance with data privacy regulations such as GDPR or CCPA, which may dictate how user data is collected, stored, and processed. -2. **Industry Standards**: Adherence to industry-specific standards and best practices may influence the design of data models, security measures, and reporting functionalities. -3. **Auditability**: The system may need to incorporate logging and auditing features to meet regulatory requirements, affecting the architecture of data storage and access controls. -4. **Data Retention Policies**: Regulatory requirements regarding data retention and deletion may impose constraints on how long certain types of data can be stored, influencing database design and data lifecycle management. -5. **Security Standards**: Compliance with security standards (e.g., ISO/IEC 27001) may necessitate the implementation of specific security measures, such as encryption, access controls, and vulnerability management, which impact the overall architecture. - -## Environmental Constraints - -> e.g., deployment environments, cloud provider limitations. - -Environmental constraints affecting the architecture include: - -1. **Deployment Environments**: The architecture must accommodate various deployment environments (development, testing, production) with differing configurations and resource allocations. -2. **Cloud Provider Limitations**: If deployed on a specific cloud provider, the architecture may need to align with the provider's services, limitations, and best practices, such as using managed databases or specific container orchestration tools. -3. **Containerization**: The use of Docker for containerization imposes constraints on how the application is packaged, deployed, and scaled, influencing the architecture to ensure compatibility with container orchestration platforms. -4. **Scalability Requirements**: The architecture must be designed to scale efficiently based on anticipated load and usage patterns, considering the limitations of the chosen infrastructure. - -## Performance Constraints - -> e.g., response time requirements, scalability needs. - -Current performance constraints include: - -1. **Response Time Requirements**: The architecture must ensure that the system can respond to user requests within a specified time frame, which may impact design decisions related to caching, database queries, and API performance. -2. **Scalability Needs**: The system should be able to handle increased load and user traffic without significant degradation in performance, necessitating a scalable architecture that can grow with demand. +- [Technical Constraints](02_constraints/02_01_technical_constraints.md) +- [Organizational Constraints](02_constraints/02_02_organizational_constraints.md) +- [Regulatory Constraints](02_constraints/02_03_regulatory_constraints.md) +- [Environmental Constraints](02_constraints/02_04_environmental_constraints.md) +- [Performance Constraints](02_constraints/02_05_performance_constraints.md) ## Security Constraints diff --git a/docs/architecture/02_constraints/02_01_technical_constraints.md b/docs/architecture/02_constraints/02_01_technical_constraints.md new file mode 100644 index 0000000..25a559e --- /dev/null +++ b/docs/architecture/02_constraints/02_01_technical_constraints.md @@ -0,0 +1,16 @@ +--- +title: '02 — Technical Constraints' +description: 'Technical constraints that affect architecture decisions.' +status: draft +--- + +# Technical Constraints + +> e.g., choice of FastAPI, PostgreSQL, SQLAlchemy, Chart.js, Jinja2 templates. + +The architecture of CalMiner is influenced by several technical constraints that shape its design and implementation: + +1. **Framework Selection**: The choice of FastAPI as the web framework imposes constraints on how the application handles requests, routing, and middleware. FastAPI's asynchronous capabilities must be leveraged appropriately to ensure optimal performance. +2. **Database Technology**: The use of PostgreSQL as the primary database system dictates the data modeling, querying capabilities, and transaction management strategies. SQLAlchemy ORM is used for database interactions, which requires adherence to its conventions and limitations. +3. **Frontend Technologies**: The decision to use Jinja2 for server-side templating and Chart.js for data visualization influences the structure of the frontend code and the way dynamic content is rendered. +4. **Simulation Logic**: The Monte Carlo simulation logic must be designed to efficiently handle large datasets and perform computations within the constraints of the chosen programming language (Python) and its libraries. diff --git a/docs/architecture/02_constraints/02_02_organizational_constraints.md b/docs/architecture/02_constraints/02_02_organizational_constraints.md new file mode 100644 index 0000000..e117d45 --- /dev/null +++ b/docs/architecture/02_constraints/02_02_organizational_constraints.md @@ -0,0 +1,18 @@ +--- +title: '02 — Organizational Constraints' +description: 'Organizational constraints that affect architecture decisions.' +status: draft +--- + +# Organizational Constraints + +> e.g., team skillsets, development workflows, CI/CD pipelines. + +Restrictions arising from organizational factors include: + +1. **Team Expertise**: The development team’s familiarity with FastAPI, SQLAlchemy, and frontend technologies like Jinja2 and Chart.js influences the architecture choices to ensure maintainability and ease of development. +2. **Development Processes**: The adoption of Agile methodologies and CI/CD pipelines (using Gitea Actions) shapes the architecture to support continuous integration, automated testing, and deployment practices. +3. **Collaboration Tools**: The use of specific collaboration and version control tools (e.g., Gitea) affects how code is managed, reviewed, and integrated, impacting the overall architecture and development workflow. +4. **Documentation Standards**: The requirement for comprehensive documentation (as seen in the `docs/` folder) necessitates an architecture that is well-structured and easy to understand for both current and future team members. +5. **Knowledge Sharing**: The need for effective knowledge sharing and onboarding processes influences the architecture to ensure that it is accessible and understandable for new team members. +6. **Resource Availability**: The availability of hardware, software, and human resources within the organization can impose constraints on the architecture, affecting decisions related to scalability, performance, and feature implementation. diff --git a/docs/architecture/02_constraints/02_03_regulatory_constraints.md b/docs/architecture/02_constraints/02_03_regulatory_constraints.md new file mode 100644 index 0000000..b8ab6b6 --- /dev/null +++ b/docs/architecture/02_constraints/02_03_regulatory_constraints.md @@ -0,0 +1,17 @@ +--- +title: '02 — Regulatory Constraints' +description: 'Regulatory constraints that affect architecture decisions.' +status: draft +--- + +# Regulatory Constraints + +> e.g., data privacy laws, industry standards. + +Regulatory constraints that impact the architecture of CalMiner include: + +1. **Data Privacy Compliance**: The architecture must ensure compliance with data privacy regulations such as GDPR or CCPA, which may dictate how user data is collected, stored, and processed. +2. **Industry Standards**: Adherence to industry-specific standards and best practices may influence the design of data models, security measures, and reporting functionalities. +3. **Auditability**: The system may need to incorporate logging and auditing features to meet regulatory requirements, affecting the architecture of data storage and access controls. +4. **Data Retention Policies**: Regulatory requirements regarding data retention and deletion may impose constraints on how long certain types of data can be stored, influencing database design and data lifecycle management. +5. **Security Standards**: Compliance with security standards (e.g., ISO/IEC 27001) may necessitate the implementation of specific security measures, such as encryption, access controls, and vulnerability management, which impact the overall architecture. diff --git a/docs/architecture/02_constraints/02_04_environmental_constraints.md b/docs/architecture/02_constraints/02_04_environmental_constraints.md new file mode 100644 index 0000000..2c872a6 --- /dev/null +++ b/docs/architecture/02_constraints/02_04_environmental_constraints.md @@ -0,0 +1,16 @@ +--- +title: '02 — Environmental Constraints' +description: 'Environmental constraints that affect architecture decisions.' +status: draft +--- + +# Environmental Constraints + +> e.g., deployment environments, cloud provider limitations. + +Environmental constraints affecting the architecture include: + +1. **Deployment Environments**: The architecture must accommodate various deployment environments (development, testing, production) with differing configurations and resource allocations. +2. **Cloud Provider Limitations**: If deployed on a specific cloud provider, the architecture may need to align with the provider's services, limitations, and best practices, such as using managed databases or specific container orchestration tools. +3. **Containerization**: The use of Docker for containerization imposes constraints on how the application is packaged, deployed, and scaled, influencing the architecture to ensure compatibility with container orchestration platforms. +4. **Scalability Requirements**: The architecture must be designed to scale efficiently based on anticipated load and usage patterns, considering the limitations of the chosen infrastructure. diff --git a/docs/architecture/02_constraints/02_05_performance_constraints.md b/docs/architecture/02_constraints/02_05_performance_constraints.md new file mode 100644 index 0000000..b3c5c2c --- /dev/null +++ b/docs/architecture/02_constraints/02_05_performance_constraints.md @@ -0,0 +1,14 @@ +--- +title: '02 — Performance Constraints' +description: 'Performance constraints that affect architecture decisions.' +status: draft +--- + +# Performance Constraints + +> e.g., response time requirements, scalability needs. + +Current performance constraints include: + +1. **Response Time Requirements**: The architecture must ensure that the system can respond to user requests within a specified time frame, which may impact design decisions related to caching, database queries, and API performance. +2. **Scalability Needs**: The system should be able to handle increased load and user traffic without significant degradation in performance, necessitating a scalable architecture that can grow with demand. diff --git a/docs/architecture/03_context_and_scope.md b/docs/architecture/03_context_and_scope.md index 54b2fe0..47de28d 100644 --- a/docs/architecture/03_context_and_scope.md +++ b/docs/architecture/03_context_and_scope.md @@ -18,24 +18,7 @@ The CalMiner system operates within the context of mining project management, pr ## Scope of the Architecture -The architecture encompasses the following key areas: - -1. **Data Ingestion**: Mechanisms for collecting and processing data from various sources. -2. **Data Storage**: Solutions for storing and managing historical and real-time data. -3. **Simulation Engine**: Core algorithms and models for scenario analysis. - 3.1. **Modeling Framework**: Tools for defining and managing simulation models. - 3.2. **Parameter Management**: Systems for handling input parameters and configurations. - 3.3. **Execution Engine**: Infrastructure for running simulations and processing results. - 3.4. **Result Storage**: Systems for storing simulation outputs for analysis and reporting. -4. **Financial Reporting**: Tools for generating reports and visualizations based on simulation outcomes. -5. **Risk Assessment**: Frameworks for identifying and evaluating potential project risks. -6. **Profitability Analysis**: Modules for calculating and analyzing project profitability metrics. -7. **User Interface**: Design and implementation of the user-facing components of the system. -8. **Security and Compliance**: Measures to ensure data security and regulatory compliance. -9. **Scalability and Performance**: Strategies for ensuring the system can handle increasing data volumes and user loads. -10. **Integration Points**: Interfaces for integrating with external systems and services. -11. **Monitoring and Logging**: Systems for tracking system performance and user activity. -12. **Maintenance and Support**: Processes for ongoing system maintenance and user support. +See [Architecture Scope](03_scope/03_01_architecture_scope.md) for details. ## Diagram diff --git a/docs/architecture/03_scope/03_01_architecture_scope.md b/docs/architecture/03_scope/03_01_architecture_scope.md new file mode 100644 index 0000000..f845c8c --- /dev/null +++ b/docs/architecture/03_scope/03_01_architecture_scope.md @@ -0,0 +1,26 @@ +--- +title: '03 — Architecture Scope' +description: 'Key areas encompassed by the architecture.' +status: draft +--- + +# Architecture Scope + +The architecture encompasses the following key areas: + +1. **Data Ingestion**: Mechanisms for collecting and processing data from various sources. +2. **Data Storage**: Solutions for storing and managing historical and real-time data. +3. **Simulation Engine**: Core algorithms and models for scenario analysis. + 3.1. **Modeling Framework**: Tools for defining and managing simulation models. + 3.2. **Parameter Management**: Systems for handling input parameters and configurations. + 3.3. **Execution Engine**: Infrastructure for running simulations and processing results. + 3.4. **Result Storage**: Systems for storing simulation outputs for analysis and reporting. +4. **Financial Reporting**: Tools for generating reports and visualizations based on simulation outcomes. +5. **Risk Assessment**: Frameworks for identifying and evaluating potential project risks. +6. **Profitability Analysis**: Modules for calculating and analyzing project profitability metrics. +7. **User Interface**: Design and implementation of the user-facing components of the system. +8. **Security and Compliance**: Measures to ensure data security and regulatory compliance. +9. **Scalability and Performance**: Strategies for ensuring the system can handle increasing data volumes and user loads. +10. **Integration Points**: Interfaces for integrating with external systems and services. +11. **Monitoring and Logging**: Systems for tracking system performance and user activity. +12. **Maintenance and Support**: Processes for ongoing system maintenance and user support. diff --git a/docs/architecture/04_solution_strategy.md b/docs/architecture/04_solution_strategy.md index 1829302..13e1605 100644 --- a/docs/architecture/04_solution_strategy.md +++ b/docs/architecture/04_solution_strategy.md @@ -8,42 +8,9 @@ status: draft This section outlines the high-level solution strategy for implementing the CalMiner system, focusing on major approaches, technology choices, and trade-offs. -## Client-Server Architecture +## Solution Strategy Overview -- **Backend**: FastAPI serves as the backend framework, providing RESTful APIs for data management, simulation execution, and reporting. It leverages SQLAlchemy for ORM-based database interactions with PostgreSQL. -- **Frontend**: Server-rendered Jinja2 templates deliver dynamic HTML views, enhanced with Chart.js for interactive data visualizations. This approach balances performance and simplicity, avoiding the complexity of a full SPA. -- **Middleware**: Custom middleware handles JSON validation to ensure data integrity before processing requests. - -## Technology Choices - -- **FastAPI**: Chosen for its high performance, ease of use, and modern features like async support and automatic OpenAPI documentation. -- **PostgreSQL**: Selected for its robustness, scalability, and support for complex queries, making it suitable for handling the diverse data needs of mining project management. -- **SQLAlchemy**: Provides a flexible and powerful ORM layer, facilitating database interactions while maintaining code readability and maintainability. -- **Chart.js**: Utilized for its simplicity and effectiveness in rendering interactive charts, enhancing the user experience on the dashboard. -- **Jinja2**: Enables server-side rendering of HTML templates, allowing for dynamic content generation while keeping the frontend lightweight. -- **Pydantic**: Used for data validation and serialization, ensuring that incoming request payloads conform to expected schemas. -- **Docker**: Employed for containerization, ensuring consistent deployment across different environments and simplifying dependency management. -- **Redis**: Used as an in-memory data store to cache frequently accessed data, improving application performance and reducing database load. - -## Trade-offs - -- **Server-Rendered vs. SPA**: Opted for server-rendered templates over a single-page application (SPA) to reduce complexity and improve initial load times, at the cost of some interactivity. -- **Synchronous vs. Asynchronous**: While FastAPI supports async operations, the initial implementation focuses on synchronous request handling for simplicity, with plans to introduce async features as needed. -- **Monolithic vs. Microservices**: The initial architecture follows a monolithic approach for ease of development and deployment, with the possibility of refactoring into microservices as the system scales. -- **In-Memory Caching**: Implementing Redis for caching introduces additional infrastructure complexity but significantly enhances performance for read-heavy operations. -- **Database Choice**: PostgreSQL was chosen over NoSQL alternatives due to the structured nature of the data and the need for complex querying capabilities, despite potential scalability challenges. -- **Technology Familiarity**: Selected technologies align with the team's existing skill set to minimize the learning curve and accelerate development, even if some alternatives may offer marginally better performance or features. -- **Extensibility vs. Simplicity**: The architecture is designed to be extensible for future features (e.g., Monte Carlo simulation engine) while maintaining simplicity in the initial implementation to ensure timely delivery of core functionalities. - -## Future Considerations - -- **Scalability**: As the user base grows, consider transitioning to a microservices architecture and implementing load balancing strategies. -- **Asynchronous Processing**: Introduce asynchronous task queues (e.g., Celery) for long-running simulations to improve responsiveness. -- **Enhanced Frontend**: Explore the possibility of integrating a frontend framework (e.g., React or Vue.js) for more dynamic user interactions in future iterations. -- **Advanced Analytics**: Plan for integrating advanced analytics and machine learning capabilities to enhance simulation accuracy and reporting insights. -- **Security Enhancements**: Implement robust authentication and authorization mechanisms to protect sensitive data and ensure compliance with industry standards. -- **Continuous Integration/Continuous Deployment (CI/CD)**: Establish CI/CD pipelines to automate testing, building, and deployment processes for faster and more reliable releases. -- **Monitoring and Logging**: Integrate monitoring tools (e.g., Prometheus, Grafana) and centralized logging solutions (e.g., ELK stack) to track application performance and troubleshoot issues effectively. -- **User Feedback Loop**: Implement mechanisms for collecting user feedback to inform future development priorities and improve user experience. -- **Documentation**: Maintain comprehensive documentation for both developers and end-users to facilitate onboarding and effective use of the system. -- **Testing Strategy**: Develop a robust testing strategy, including unit, integration, and end-to-end tests, to ensure code quality and reliability as the system evolves. +- [Client-Server Architecture](04_strategy/04_01_client_server_architecture.md) +- [Technology Choices](04_strategy/04_02_technology_choices.md) +- [Trade-offs](04_strategy/04_03_trade_offs.md) +- [Future Considerations](04_strategy/04_04_future_considerations.md) diff --git a/docs/architecture/04_strategy/04_01_client_server_architecture.md b/docs/architecture/04_strategy/04_01_client_server_architecture.md new file mode 100644 index 0000000..de8f909 --- /dev/null +++ b/docs/architecture/04_strategy/04_01_client_server_architecture.md @@ -0,0 +1,10 @@ +--- +title: '04.01 — Client-Server Architecture' +description: 'Details on the client-server architecture of CalMiner.' +--- + +# 04.01 — Client-Server Architecture + +- **Backend**: FastAPI serves as the backend framework, providing RESTful APIs for data management, simulation execution, and reporting. It leverages SQLAlchemy for ORM-based database interactions with PostgreSQL. +- **Frontend**: Server-rendered Jinja2 templates deliver dynamic HTML views, enhanced with Chart.js for interactive data visualizations. This approach balances performance and simplicity, avoiding the complexity of a full SPA. +- **Middleware**: Custom middleware handles JSON validation to ensure data integrity before processing requests. diff --git a/docs/architecture/04_strategy/04_02_technology_choices.md b/docs/architecture/04_strategy/04_02_technology_choices.md new file mode 100644 index 0000000..401d18a --- /dev/null +++ b/docs/architecture/04_strategy/04_02_technology_choices.md @@ -0,0 +1,15 @@ +--- +title: '04.02 — Technology Choices' +description: 'Detailed explanation of technology choices in CalMiner.' +--- + +# 04.02 — Technology Choices + +- **FastAPI**: Chosen for its high performance, ease of use, and modern features like async support and automatic OpenAPI documentation. +- **PostgreSQL**: Selected for its robustness, scalability, and support for complex queries, making it suitable for handling the diverse data needs of mining project management. +- **SQLAlchemy**: Provides a flexible and powerful ORM layer, facilitating database interactions while maintaining code readability and maintainability. +- **Chart.js**: Utilized for its simplicity and effectiveness in rendering interactive charts, enhancing the user experience on the dashboard. +- **Jinja2**: Enables server-side rendering of HTML templates, allowing for dynamic content generation while keeping the frontend lightweight. +- **Pydantic**: Used for data validation and serialization, ensuring that incoming request payloads conform to expected schemas. +- **Docker**: Employed for containerization, ensuring consistent deployment across different environments and simplifying dependency management. +- **Redis**: Used as an in-memory data store to cache frequently accessed data, improving application performance and reducing database load. diff --git a/docs/architecture/04_strategy/04_03_trade_offs.md b/docs/architecture/04_strategy/04_03_trade_offs.md new file mode 100644 index 0000000..e584fab --- /dev/null +++ b/docs/architecture/04_strategy/04_03_trade_offs.md @@ -0,0 +1,14 @@ +--- +title: '04.03 — Trade-offs' +description: 'Discussion of trade-offs made in the CalMiner architecture.' +--- + +# 04.03 — Trade-offs + +- **Server-Rendered vs. SPA**: Opted for server-rendered templates over a single-page application (SPA) to reduce complexity and improve initial load times, at the cost of some interactivity. +- **Synchronous vs. Asynchronous**: While FastAPI supports async operations, the initial implementation focuses on synchronous request handling for simplicity, with plans to introduce async features as needed. +- **Monolithic vs. Microservices**: The initial architecture follows a monolithic approach for ease of development and deployment, with the possibility of refactoring into microservices as the system scales. +- **In-Memory Caching**: Implementing Redis for caching introduces additional infrastructure complexity but significantly enhances performance for read-heavy operations. +- **Database Choice**: PostgreSQL was chosen over NoSQL alternatives due to the structured nature of the data and the need for complex querying capabilities, despite potential scalability challenges. +- **Technology Familiarity**: Selected technologies align with the team's existing skill set to minimize the learning curve and accelerate development, even if some alternatives may offer marginally better performance or features. +- **Extensibility vs. Simplicity**: The architecture is designed to be extensible for future features (e.g., Monte Carlo simulation engine) while maintaining simplicity in the initial implementation to ensure timely delivery of core functionalities. diff --git a/docs/architecture/04_strategy/04_04_future_considerations.md b/docs/architecture/04_strategy/04_04_future_considerations.md new file mode 100644 index 0000000..e5351fa --- /dev/null +++ b/docs/architecture/04_strategy/04_04_future_considerations.md @@ -0,0 +1,17 @@ +--- +title: '04.04 — Future Considerations' +description: 'Future considerations for the CalMiner architecture.' +--- + +# 04.04 — Future Considerations + +- **Scalability**: As the user base grows, consider transitioning to a microservices architecture and implementing load balancing strategies. +- **Asynchronous Processing**: Introduce asynchronous task queues (e.g., Celery) for long-running simulations to improve responsiveness. +- **Enhanced Frontend**: Explore the possibility of integrating a frontend framework (e.g., React or Vue.js) for more dynamic user interactions in future iterations. +- **Advanced Analytics**: Plan for integrating advanced analytics and machine learning capabilities to enhance simulation accuracy and reporting insights. +- **Security Enhancements**: Implement robust authentication and authorization mechanisms to protect sensitive data and ensure compliance with industry standards. +- **Continuous Integration/Continuous Deployment (CI/CD)**: Establish CI/CD pipelines to automate testing, building, and deployment processes for faster and more reliable releases. +- **Monitoring and Logging**: Integrate monitoring tools (e.g., Prometheus, Grafana) and centralized logging solutions (e.g., ELK stack) to track application performance and troubleshoot issues effectively. +- **User Feedback Loop**: Implement mechanisms for collecting user feedback to inform future development priorities and improve user experience. +- **Documentation**: Maintain comprehensive documentation for both developers and end-users to facilitate onboarding and effective use of the system. +- **Testing Strategy**: Develop a robust testing strategy, including unit, integration, and end-to-end tests, to ensure code quality and reliability as the system evolves. diff --git a/docs/architecture/05_blocks/05_01_architecture_overview.md b/docs/architecture/05_blocks/05_01_architecture_overview.md new file mode 100644 index 0000000..b2da578 --- /dev/null +++ b/docs/architecture/05_blocks/05_01_architecture_overview.md @@ -0,0 +1,13 @@ +--- +title: '05 — Architecture Overview' +description: "This overview complements architecture with a high-level map of CalMiner's module layout and request flow." +status: draft +--- + +This overview complements [architecture](README.md) with a high-level map of CalMiner's module layout and request flow. + +Refer to the detailed architecture chapters in `docs/architecture/`: + +- Module map & components: [Building Block View](../05_building_block_view.md) +- Request flow & runtime interactions: [Runtime View](../06_runtime_view.md) +- Simulation roadmap & strategy: [Solution Strategy](../04_solution_strategy.md) diff --git a/docs/architecture/05_blocks/05_02_backend_components.md b/docs/architecture/05_blocks/05_02_backend_components.md new file mode 100644 index 0000000..97583d1 --- /dev/null +++ b/docs/architecture/05_blocks/05_02_backend_components.md @@ -0,0 +1,13 @@ +--- +title: '05 — Backend Components' +description: 'Description of the backend components of the CalMiner application.' +status: draft +--- + +- **FastAPI application** (`main.py`): entry point that configures routers, middleware, and startup/shutdown events. +- **Routers** (`routes/`): modular route handlers for scenarios, parameters, costs, consumption, production, equipment, maintenance, simulations, and reporting. Each router defines RESTful endpoints, request/response schemas, and orchestrates service calls. + - leveraging a shared dependency module (`routes/dependencies.get_db`) for SQLAlchemy session management. +- **Models** (`models/`): SQLAlchemy ORM models representing database tables and relationships, encapsulating domain entities like Scenario, CapEx, OpEx, Consumption, ProductionOutput, Equipment, Maintenance, and SimulationResult. +- **Services** (`services/`): business logic layer that processes data, performs calculations, and interacts with models. Key services include reporting calculations and Monte Carlo simulation scaffolding. + - `services/settings.py`: manages application settings backed by the `application_setting` table, including CSS variable defaults, persistence, and environment-driven overrides that surface in both the API and UI. +- **Database** (`config/database.py`): sets up the SQLAlchemy engine and session management for PostgreSQL interactions. diff --git a/docs/architecture/05_blocks/05_03_frontend_components.md b/docs/architecture/05_blocks/05_03_frontend_components.md new file mode 100644 index 0000000..4bd0eac --- /dev/null +++ b/docs/architecture/05_blocks/05_03_frontend_components.md @@ -0,0 +1,11 @@ +--- +title: '05 — Frontend Components' +description: 'Description of the frontend components of the CalMiner application.' +status: draft +--- + +- **Templates** (`templates/`): Jinja2 templates for server-rendered HTML views, extending a shared base layout with a persistent sidebar for navigation. +- **Static Assets** (`static/`): CSS and JavaScript files for styling and interactivity. Shared CSS variables in `static/css/main.css` define the color palette, while page-specific JS modules in `static/js/` handle dynamic behaviors. +- **Reusable partials** (`templates/partials/components.html`): macro library that standardises select inputs, feedback/empty states, and table wrappers so pages remain consistent while keeping DOM hooks stable for existing JavaScript modules. + - `templates/settings.html`: Settings hub that renders theme controls and environment override tables using metadata provided by `routes/ui.py`. + - `static/js/settings.js`: applies client-side validation, form submission, and live CSS updates for theme changes, respecting environment-managed variables returned by the API. diff --git a/docs/architecture/05_blocks/05_04_middleware_utilities.md b/docs/architecture/05_blocks/05_04_middleware_utilities.md new file mode 100644 index 0000000..32cee3e --- /dev/null +++ b/docs/architecture/05_blocks/05_04_middleware_utilities.md @@ -0,0 +1,8 @@ +--- +title: '05 — Middleware & Utilities' +description: 'Description of the middleware and utility components of the CalMiner application.' +status: draft +--- + +- **Middleware** (`middleware/validation.py`): applies JSON validation before requests reach routers. +- **Testing** (`tests/unit/`): pytest suite covering route and service behavior, including UI rendering checks and negative-path router validation tests to ensure consistent HTTP error semantics. Playwright end-to-end coverage is planned for core smoke flows (dashboard load, scenario inputs, reporting) and will attach in CI once scaffolding is completed. diff --git a/docs/architecture/05_building_block_view.md b/docs/architecture/05_building_block_view.md index 98837bc..afeea2f 100644 --- a/docs/architecture/05_building_block_view.md +++ b/docs/architecture/05_building_block_view.md @@ -1,6 +1,6 @@ --- -title: '05 — Building Block View' -description: 'Explain the static structure: modules, components, services and their relationships.' +title: "05 — Building Block View" +description: "Explain the static structure: modules, components, services and their relationships." status: draft --- @@ -8,257 +8,9 @@ status: draft # 05 — Building Block View -## Architecture overview +## Building Block Overview -This overview complements [architecture](README.md) with a high-level map of CalMiner's module layout and request flow. - -Refer to the detailed architecture chapters in `docs/architecture/`: - -- Module map & components: [Building Block View](05_building_block_view.md) -- Request flow & runtime interactions: [Runtime View](06_runtime_view.md) -- Simulation roadmap & strategy: [Solution Strategy](04_solution_strategy.md) - -## System Components - -### Backend - -- **FastAPI application** (`main.py`): entry point that configures routers, middleware, and startup/shutdown events. -- **Routers** (`routes/`): modular route handlers for scenarios, parameters, costs, consumption, production, equipment, maintenance, simulations, and reporting. Each router defines RESTful endpoints, request/response schemas, and orchestrates service calls. - - leveraging a shared dependency module (`routes/dependencies.get_db`) for SQLAlchemy session management. -- **Models** (`models/`): SQLAlchemy ORM models representing database tables and relationships, encapsulating domain entities like Scenario, CapEx, OpEx, Consumption, ProductionOutput, Equipment, Maintenance, and SimulationResult. -- **Services** (`services/`): business logic layer that processes data, performs calculations, and interacts with models. Key services include reporting calculations and Monte Carlo simulation scaffolding. - - `services/settings.py`: manages application settings backed by the `application_setting` table, including CSS variable defaults, persistence, and environment-driven overrides that surface in both the API and UI. -- **Database** (`config/database.py`): sets up the SQLAlchemy engine and session management for PostgreSQL interactions. - -### Frontend - -- **Templates** (`templates/`): Jinja2 templates for server-rendered HTML views, extending a shared base layout with a persistent sidebar for navigation. -- **Static Assets** (`static/`): CSS and JavaScript files for styling and interactivity. Shared CSS variables in `static/css/main.css` define the color palette, while page-specific JS modules in `static/js/` handle dynamic behaviors. -- **Reusable partials** (`templates/partials/components.html`): macro library that standardises select inputs, feedback/empty states, and table wrappers so pages remain consistent while keeping DOM hooks stable for existing JavaScript modules. - - `templates/settings.html`: Settings hub that renders theme controls and environment override tables using metadata provided by `routes/ui.py`. - - `static/js/settings.js`: applies client-side validation, form submission, and live CSS updates for theme changes, respecting environment-managed variables returned by the API. - -### Middleware & Utilities - -- **Middleware** (`middleware/validation.py`): applies JSON validation before requests reach routers. -- **Testing** (`tests/unit/`): pytest suite covering route and service behavior, including UI rendering checks and negative-path router validation tests to ensure consistent HTTP error semantics. Playwright end-to-end coverage is planned for core smoke flows (dashboard load, scenario inputs, reporting) and will attach in CI once scaffolding is completed. - -### Level 1 Overview - -```mermaid -graph LR - U["User (Browser)"] - - subgraph FE[Frontend] - FE_TPL["Templates (Jinja2)"] - FE_STATIC["Static Assets (CSS/JS)"] - FE_PARTS["Reusable Partials"] - FE_SETTINGS["Settings View & JS"] - end - - subgraph BE[Backend — FastAPI] - BE_APP["FastAPI App (main.py)"] - BE_ROUTES["Routers"] - BE_SERVICES["Services"] - BE_MODELS["Models (SQLAlchemy)"] - BE_DB["Database Layer"] - end - - subgraph MW[Middleware & Utilities] - MW_VAL["JSON Validation Middleware"] - end - - subgraph QA[Testing] - QA_UNIT["Unit Tests (pytest)"] - QA_E2E["E2E (Playwright, planned)"] - end - - %% High-level flows - U -->|HTTP| BE_APP - U --> FE - FE --> BE_ROUTES - BE_APP --> BE_ROUTES - BE_ROUTES --> BE_SERVICES - BE_SERVICES --> BE_MODELS - BE_MODELS --> BE_DB - - MW_VAL --> BE_APP - - QA_UNIT --> BE_ROUTES - QA_UNIT --> BE_SERVICES - QA_UNIT --> FE - QA_UNIT --> MW_VAL - QA_E2E --> U - QA_E2E --> BE_APP -``` - -### Level 2 Overview - -```mermaid -graph LR - %% Direction - %% LR = left-to-right for a wide architecture view - - %% === Clients === - U["User (Browser)"] - - %% === Frontend === - subgraph FE[Frontend] - TPL["Jinja2 Templates -(templates/) -• base layout + sidebar"] - PARTS["Reusable Partials -(templates/partials/components.html) -• inputs • empty states • table wrappers"] - STATIC["Static Assets -(static/) -• CSS: static/css/main.css (palette via CSS vars) -• JS: static/js/*.js (page modules)"] - SETPAGE["Settings View -(templates/settings.html)"] - SETJS["Settings Logic -(static/js/settings.js) -• validation • submit • live CSS updates"] - end - - %% === Backend === - subgraph BE[Backend — FastAPI] - MAIN["FastAPI App -(main.py) -• routers • middleware • startup/shutdown"] - - subgraph ROUTES[Routers] - R_SCN["scenarios"] - R_PAR["parameters"] - R_CST["costs"] - R_CONS["consumption"] - R_PROD["production"] - R_EQP["equipment"] - R_MNT["maintenance"] - R_SIM["simulations"] - R_REP["reporting"] - R_UI["ui.py (metadata for UI)"] - DEP["dependencies.get_db -(shared SQLAlchemy session)"] - end - - subgraph SRV[Services] - S_BLL["Business Logic Layer -• orchestrates models + calc"] - S_REP["Reporting Calculations"] - S_SIM["Monte Carlo -(simulation scaffolding)"] - S_SET["Settings Manager -(services/settings.py) -• defaults via CSS vars -• persistence in DB -• env overrides -• surfaces to API & UI"] - end - - subgraph MOD[Models] - M_SCN["Scenario"] - M_CAP["CapEx"] - M_OPEX["OpEx"] - M_CONS["Consumption"] - M_PROD["ProductionOutput"] - M_EQP["Equipment"] - M_MNT["Maintenance"] - M_SIMR["SimulationResult"] - end - - subgraph DB[Database Layer] - CFG["config/database.py -(SQLAlchemy engine & sessions)"] - PG[("PostgreSQL")] - APPSET["application_setting table"] - end - end - - %% === Middleware & Utilities === - subgraph MW[Middleware & Utilities] - VAL["JSON Validation Middleware -(middleware/validation.py)"] - end - - subgraph TEST[Testing] - UNIT["pytest unit tests -(tests/unit/) -• routes • services • UI rendering -• negative-path validation"] - E2E["Playwright E2E (planned) -• dashboard • scenario inputs • reporting -• attach in CI"] - end - - %% ===================== Edges / Flows ===================== - %% User to Frontend/Backend - U -->|HTTP GET| MAIN - U --> TPL - TPL -->|server-rendered HTML| U - STATIC --> U - PARTS --> TPL - SETPAGE --> U - SETJS --> U - - %% Frontend to Routers (AJAX/form submits) - SETJS -->|fetch/POST| R_UI - TPL -->|form submit / fetch| ROUTES - - %% FastAPI app wiring and middleware - VAL --> MAIN - MAIN --> ROUTES - - %% Routers to Services - ROUTES -->|calls| SRV - R_REP -->|calc| S_REP - R_SIM -->|run| S_SIM - R_UI -->|read/write settings meta| S_SET - - %% Services to Models & DB - SRV --> MOD - MOD --> CFG - CFG --> PG - - %% Settings manager persistence path - S_SET -->|persist/read| APPSET - APPSET --- PG - - %% Shared DB session dependency - DEP -. provides .-> ROUTES - DEP -. session .-> SRV - - %% Model entities mapping - S_BLL --> M_SCN & M_CAP & M_OPEX & M_CONS & M_PROD & M_EQP & M_MNT & M_SIMR - - %% Testing coverage - UNIT --> ROUTES - UNIT --> SRV - UNIT --> TPL - UNIT --> VAL - E2E --> U - E2E --> MAIN - - %% Legend - classDef store fill:#fff,stroke:#555,stroke-width:1px; - class PG store; -``` - -## Module Map (code) - -- `scenario.py`: central scenario entity with relationships to cost, consumption, production, equipment, maintenance, and simulation results. -- `capex.py`, `opex.py`: financial expenditures tied to scenarios. -- `consumption.py`, `production_output.py`: operational data tables. -- `equipment.py`, `maintenance.py`: asset management models. -- `simulation_result.py`: stores Monte Carlo iteration outputs. -- `application_setting.py`: persists editable application configuration, currently focused on theme variables but designed to store future settings categories. - -## Service Layer - -- `reporting.py`: computes aggregates (count, min/max, mean, median, percentiles, standard deviation, variance, tail-risk metrics) from simulation results. -- `simulation.py`: scaffolds Monte Carlo simulation logic (currently in-memory; persistence planned). -- `currency.py`: handles currency normalization for cost tables. -- `utils.py`: shared helper functions (e.g., statistical calculations). -- `validation.py`: JSON schema validation middleware. -- `database.py`: SQLAlchemy engine and session setup. -- `dependencies.py`: FastAPI dependency injection for DB sessions. +- [Architecture Overview](05_blocks/05_01_architecture_overview.md) +- [Backend Components](05_blocks/05_02_backend_components.md) +- [Frontend Components](05_blocks/05_03_frontend_components.md) +- [Middleware & Utilities](05_blocks/05_04_middleware_utilities.md) diff --git a/docs/architecture/13_ui_and_style.md b/docs/architecture/13_ui_and_style.md index d3502ca..f4e4bad 100644 --- a/docs/architecture/13_ui_and_style.md +++ b/docs/architecture/13_ui_and_style.md @@ -1,7 +1,5 @@ # 13 — UI, templates and styling -Status: migrated - This chapter collects UI integration notes, reusable template components, styling audit points and per-page UI data/actions. ## Reusable Template Components diff --git a/requirements.txt b/requirements.txt index 5a3cc02..1c46aea 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,4 +7,5 @@ httpx jinja2 pandas numpy -passlib \ No newline at end of file +passlib +python-jose \ No newline at end of file diff --git a/routes/users.py b/routes/users.py index dd9ddc6..d538292 100644 --- a/routes/users.py +++ b/routes/users.py @@ -1,40 +1,15 @@ from fastapi import APIRouter, Depends, HTTPException, status -from fastapi.security import OAuth2PasswordBearer from sqlalchemy.orm import Session from config.database import get_db from models.user import User -from services.security import get_password_hash, verify_password, create_access_token, SECRET_KEY, ALGORITHM +from services.security import get_password_hash, verify_password, create_access_token, SECRET_KEY, ALGORITHM, get_current_user, oauth2_scheme from jose import jwt, JWTError from schemas.user import UserCreate, UserInDB, UserLogin, UserUpdate, PasswordResetRequest, PasswordReset, Token router = APIRouter(prefix="/users", tags=["users"]) -oauth2_scheme = OAuth2PasswordBearer(tokenUrl="users/login") - - -async def get_current_user(token: str = Depends(oauth2_scheme), db: Session = Depends(get_db)): - credentials_exception = HTTPException( - status_code=status.HTTP_401_UNAUTHORIZED, - detail="Could not validate credentials", - headers={"WWW-Authenticate": "Bearer"}, - ) - try: - payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM]) - username: str = payload.get("sub") - if username is None: - raise credentials_exception - if username is None: - raise credentials_exception - except JWTError: - raise credentials_exception - user = db.query(User).filter(User.username == username).first() - if user is None: - raise credentials_exception - return user - - @router.post("/register", response_model=UserInDB, status_code=status.HTTP_201_CREATED) async def register_user(user: UserCreate, db: Session = Depends(get_db)): db_user = db.query(User).filter(User.username == user.username).first() diff --git a/services/security.py b/services/security.py index ce376e3..24782c5 100644 --- a/services/security.py +++ b/services/security.py @@ -1,8 +1,13 @@ from datetime import datetime, timedelta from typing import Any, Union -from jose import jwt +from fastapi import HTTPException, status, Depends +from fastapi.security import OAuth2PasswordBearer +from jose import jwt, JWTError from passlib.context import CryptContext +from sqlalchemy.orm import Session + +from config.database import get_db ACCESS_TOKEN_EXPIRE_MINUTES = 30 @@ -11,6 +16,8 @@ ALGORITHM = "HS256" pwd_context = CryptContext(schemes=["pbkdf2_sha256"], deprecated="auto") +oauth2_scheme = OAuth2PasswordBearer(tokenUrl="users/login") + def create_access_token( subject: Union[str, Any], expires_delta: Union[timedelta, None] = None @@ -30,3 +37,23 @@ def verify_password(plain_password: str, hashed_password: str) -> bool: def get_password_hash(password: str) -> str: return pwd_context.hash(password) + + +async def get_current_user(token: str = Depends(oauth2_scheme), db: Session = Depends(get_db)): + from models.user import User + credentials_exception = HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Could not validate credentials", + headers={"WWW-Authenticate": "Bearer"}, + ) + try: + payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM]) + username = payload.get("sub") + if username is None: + raise credentials_exception + except JWTError: + raise credentials_exception + user = db.query(User).filter(User.username == username).first() + if user is None: + raise credentials_exception + return user From ee0a7a5bf5601e3c40ee9f760bcb8f5309028cc4 Mon Sep 17 00:00:00 2001 From: zwitschi Date: Mon, 27 Oct 2025 12:50:20 +0100 Subject: [PATCH 08/31] fix: Add missing newlines for improved readability in test workflow --- .gitea/workflows/test.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitea/workflows/test.yml b/.gitea/workflows/test.yml index a095935..fab7ef7 100644 --- a/.gitea/workflows/test.yml +++ b/.gitea/workflows/test.yml @@ -37,6 +37,7 @@ jobs: steps: - name: Checkout code uses: actions/checkout@v4 + - name: Cache pip dependencies uses: actions/cache@v4 with: @@ -44,10 +45,12 @@ jobs: key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt', 'requirements-test.txt') }} restore-keys: | ${{ runner.os }}-pip- + - name: Prepare Python environment uses: ./.gitea/actions/setup-python-env with: install-playwright: ${{ matrix.target != 'e2e' }} + - name: Run tests run: | if [ "${{ matrix.target }}" = "unit" ]; then From e37488bcf64da942c933e423be60fed4d0a0ebdb Mon Sep 17 00:00:00 2001 From: zwitschi Date: Mon, 27 Oct 2025 12:51:58 +0100 Subject: [PATCH 09/31] fix: Comment out pip dependency caching in test workflow --- .gitea/workflows/test.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.gitea/workflows/test.yml b/.gitea/workflows/test.yml index fab7ef7..5c9b235 100644 --- a/.gitea/workflows/test.yml +++ b/.gitea/workflows/test.yml @@ -38,13 +38,13 @@ jobs: - name: Checkout code uses: actions/checkout@v4 - - name: Cache pip dependencies - uses: actions/cache@v4 - with: - path: ~/.cache/pip - key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt', 'requirements-test.txt') }} - restore-keys: | - ${{ runner.os }}-pip- + # - name: Cache pip dependencies + # uses: actions/cache@v4 + # with: + # path: ~/.cache/pip + # key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt', 'requirements-test.txt') }} + # restore-keys: | + # ${{ runner.os }}-pip- - name: Prepare Python environment uses: ./.gitea/actions/setup-python-env From b1d50a56e03dc7c1eabd589f2c9760e1901c6496 Mon Sep 17 00:00:00 2001 From: zwitschi Date: Mon, 27 Oct 2025 14:56:37 +0100 Subject: [PATCH 10/31] feat: Consolidate user, role, and theme settings tables into a single migration file --- scripts/migrations/000_base.sql | 28 +++++++++++++++++++ ...51025_create_application_setting_table.sql | 25 ----------------- .../20251027_create_theme_settings_table.sql | 11 -------- .../20251027_create_user_and_role_tables.sql | 15 ---------- scripts/setup_database.py | 12 +++++--- tests/unit/test_setup_database.py | 2 ++ 6 files changed, 38 insertions(+), 55 deletions(-) delete mode 100644 scripts/migrations/20251025_create_application_setting_table.sql delete mode 100644 scripts/migrations/20251027_create_theme_settings_table.sql delete mode 100644 scripts/migrations/20251027_create_user_and_role_tables.sql diff --git a/scripts/migrations/000_base.sql b/scripts/migrations/000_base.sql index b2af060..11f9358 100644 --- a/scripts/migrations/000_base.sql +++ b/scripts/migrations/000_base.sql @@ -158,4 +158,32 @@ ALTER TABLE capex ALTER TABLE opex DROP COLUMN IF EXISTS currency_code; +-- Role-based access control tables +CREATE TABLE IF NOT EXISTS roles ( + id SERIAL PRIMARY KEY, + name VARCHAR(255) UNIQUE NOT NULL +); + +CREATE TABLE IF NOT EXISTS users ( + id SERIAL PRIMARY KEY, + username VARCHAR(255) UNIQUE NOT NULL, + email VARCHAR(255) UNIQUE NOT NULL, + hashed_password VARCHAR(255) NOT NULL, + role_id INTEGER NOT NULL REFERENCES roles (id) ON DELETE RESTRICT +); + +CREATE INDEX IF NOT EXISTS ix_users_username ON users (username); +CREATE INDEX IF NOT EXISTS ix_users_email ON users (email); + +-- Theme settings configuration table +CREATE TABLE IF NOT EXISTS theme_settings ( + id SERIAL PRIMARY KEY, + theme_name VARCHAR(255) UNIQUE NOT NULL, + primary_color VARCHAR(7) NOT NULL, + secondary_color VARCHAR(7) NOT NULL, + accent_color VARCHAR(7) NOT NULL, + background_color VARCHAR(7) NOT NULL, + text_color VARCHAR(7) NOT NULL +); + COMMIT; diff --git a/scripts/migrations/20251025_create_application_setting_table.sql b/scripts/migrations/20251025_create_application_setting_table.sql deleted file mode 100644 index 380a14a..0000000 --- a/scripts/migrations/20251025_create_application_setting_table.sql +++ /dev/null @@ -1,25 +0,0 @@ --- Migration: Create application_setting table for configurable application options --- Date: 2025-10-25 --- Description: Introduces persistent storage for application-level settings such as theme colors. - -BEGIN; - -CREATE TABLE IF NOT EXISTS application_setting ( - id SERIAL PRIMARY KEY, - key VARCHAR(128) NOT NULL UNIQUE, - value TEXT NOT NULL, - value_type VARCHAR(32) NOT NULL DEFAULT 'string', - category VARCHAR(32) NOT NULL DEFAULT 'general', - description TEXT, - is_editable BOOLEAN NOT NULL DEFAULT TRUE, - created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), - updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() -); - -CREATE UNIQUE INDEX IF NOT EXISTS ux_application_setting_key - ON application_setting (key); - -CREATE INDEX IF NOT EXISTS ix_application_setting_category - ON application_setting (category); - -COMMIT; diff --git a/scripts/migrations/20251027_create_theme_settings_table.sql b/scripts/migrations/20251027_create_theme_settings_table.sql deleted file mode 100644 index 8e2b448..0000000 --- a/scripts/migrations/20251027_create_theme_settings_table.sql +++ /dev/null @@ -1,11 +0,0 @@ --- Migration: 20251027_create_theme_settings_table.sql - -CREATE TABLE theme_settings ( - id SERIAL PRIMARY KEY, - theme_name VARCHAR(255) UNIQUE NOT NULL, - primary_color VARCHAR(7) NOT NULL, - secondary_color VARCHAR(7) NOT NULL, - accent_color VARCHAR(7) NOT NULL, - background_color VARCHAR(7) NOT NULL, - text_color VARCHAR(7) NOT NULL -); diff --git a/scripts/migrations/20251027_create_user_and_role_tables.sql b/scripts/migrations/20251027_create_user_and_role_tables.sql deleted file mode 100644 index 5ae47b2..0000000 --- a/scripts/migrations/20251027_create_user_and_role_tables.sql +++ /dev/null @@ -1,15 +0,0 @@ --- Migration: 20251027_create_user_and_role_tables.sql - -CREATE TABLE roles ( - id SERIAL PRIMARY KEY, - name VARCHAR(255) UNIQUE NOT NULL -); - -CREATE TABLE users ( - id SERIAL PRIMARY KEY, - username VARCHAR(255) UNIQUE NOT NULL, - email VARCHAR(255) UNIQUE NOT NULL, - hashed_password VARCHAR(255) NOT NULL, - role_id INTEGER NOT NULL, - FOREIGN KEY (role_id) REFERENCES roles(id) -); diff --git a/scripts/setup_database.py b/scripts/setup_database.py index 3c51eb3..5d09fb8 100644 --- a/scripts/setup_database.py +++ b/scripts/setup_database.py @@ -22,6 +22,7 @@ connection string; this script will still honor the granular inputs above. """ from __future__ import annotations +from config.database import Base import argparse import importlib import logging @@ -43,7 +44,6 @@ from sqlalchemy import create_engine, inspect ROOT_DIR = Path(__file__).resolve().parents[1] if str(ROOT_DIR) not in sys.path: sys.path.insert(0, str(ROOT_DIR)) -from config.database import Base logger = logging.getLogger(__name__) @@ -126,7 +126,8 @@ class DatabaseConfig: ] if missing: raise RuntimeError( - "Missing required database configuration: " + ", ".join(missing) + "Missing required database configuration: " + + ", ".join(missing) ) host = cast(str, host) @@ -340,7 +341,8 @@ class DatabaseSetup: rollback_label = f"drop database {self.config.database}" self._register_rollback( rollback_label, - lambda db=self.config.database: self._drop_database(db), + lambda db=self.config.database: self._drop_database( + db), ) logger.info("Created database '%s'", self.config.database) finally: @@ -409,7 +411,8 @@ class DatabaseSetup: rollback_label = f"drop role {self.config.user}" self._register_rollback( rollback_label, - lambda role=self.config.user: self._drop_role(role), + lambda role=self.config.user: self._drop_role( + role), ) else: logger.info("Role '%s' already present", self.config.user) @@ -839,6 +842,7 @@ class DatabaseSetup: seed_args = argparse.Namespace( currencies=True, units=True, + theme=True, defaults=False, dry_run=dry_run, verbose=0, diff --git a/tests/unit/test_setup_database.py b/tests/unit/test_setup_database.py index 4432d16..efea513 100644 --- a/tests/unit/test_setup_database.py +++ b/tests/unit/test_setup_database.py @@ -46,6 +46,7 @@ def test_seed_baseline_data_dry_run_skips_verification( assert namespace_arg.dry_run is True assert namespace_arg.currencies is True assert namespace_arg.units is True + assert namespace_arg.theme is True assert seed_run.call_args.kwargs["config"] is setup_instance.config verify_mock.assert_not_called() @@ -67,6 +68,7 @@ def test_seed_baseline_data_invokes_verification( assert isinstance(namespace_arg, argparse.Namespace) assert namespace_arg.dry_run is False assert seed_run.call_args.kwargs["config"] is setup_instance.config + assert namespace_arg.theme is True verify_mock.assert_called_once_with( expected_currency_codes=expected_currencies, expected_unit_codes=expected_units, From 8bb5456864f8b6726e85cae12db25b1250562671 Mon Sep 17 00:00:00 2001 From: zwitschi Date: Mon, 27 Oct 2025 14:59:44 +0100 Subject: [PATCH 11/31] fix: Update container condition for e2e tests in workflow --- .gitea/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitea/workflows/test.yml b/.gitea/workflows/test.yml index 5c9b235..e70cf0f 100644 --- a/.gitea/workflows/test.yml +++ b/.gitea/workflows/test.yml @@ -5,7 +5,7 @@ jobs: tests: name: ${{ matrix.target }} tests runs-on: ubuntu-latest - container: ${{ matrix.target == 'e2e' && 'mcr.microsoft.com/playwright/python:v1.40.0-jammy' || '' }} + container: ${{ matrix.target == 'e2e'}} env: DATABASE_DRIVER: postgresql DATABASE_HOST: postgres From 573e255769e73b86037fe624807bcdceee837ac8 Mon Sep 17 00:00:00 2001 From: zwitschi Date: Mon, 27 Oct 2025 15:12:50 +0100 Subject: [PATCH 12/31] fix: Enhance argument handling in seed data script and add unit tests --- scripts/seed_data.py | 30 ++++++++++++++++------- tests/unit/test_seed_data.py | 46 ++++++++++++++++++++++++++++++++++++ 2 files changed, 67 insertions(+), 9 deletions(-) create mode 100644 tests/unit/test_seed_data.py diff --git a/scripts/seed_data.py b/scripts/seed_data.py index f7c035f..4db4c1c 100644 --- a/scripts/seed_data.py +++ b/scripts/seed_data.py @@ -135,24 +135,36 @@ def run_with_namespace( *, config: Optional[DatabaseConfig] = None, ) -> None: + if not hasattr(args, "verbose"): + args.verbose = 0 + if not hasattr(args, "dry_run"): + args.dry_run = False + _configure_logging(args) - if not any((args.currencies, args.units, args.theme, args.defaults)): + currencies = bool(getattr(args, "currencies", False)) + units = bool(getattr(args, "units", False)) + theme = bool(getattr(args, "theme", False)) + defaults = bool(getattr(args, "defaults", False)) + dry_run = bool(getattr(args, "dry_run", False)) + + if not any((currencies, units, theme, defaults)): logger.info("No seeding options provided; exiting") return config = config or DatabaseConfig.from_env() + with psycopg2.connect(config.application_dsn()) as conn: conn.autocommit = True with conn.cursor() as cursor: - if args.currencies: - _seed_currencies(cursor, dry_run=args.dry_run) - if args.units: - _seed_units(cursor, dry_run=args.dry_run) - if args.theme: - _seed_theme(cursor, dry_run=args.dry_run) - if args.defaults: - _seed_defaults(cursor, dry_run=args.dry_run) + if currencies: + _seed_currencies(cursor, dry_run=dry_run) + if units: + _seed_units(cursor, dry_run=dry_run) + if theme: + _seed_theme(cursor, dry_run=dry_run) + if defaults: + _seed_defaults(cursor, dry_run=dry_run) def _seed_currencies(cursor, *, dry_run: bool) -> None: diff --git a/tests/unit/test_seed_data.py b/tests/unit/test_seed_data.py new file mode 100644 index 0000000..87094b9 --- /dev/null +++ b/tests/unit/test_seed_data.py @@ -0,0 +1,46 @@ +import argparse +from unittest import mock + +import scripts.seed_data as seed_data +from scripts.seed_data import DatabaseConfig + + +def test_run_with_namespace_handles_missing_theme_flag_without_actions() -> None: + args = argparse.Namespace(currencies=False, units=False, defaults=False) + config = mock.create_autospec(DatabaseConfig) + config.application_dsn.return_value = "postgresql://example" + + with ( + mock.patch("scripts.seed_data._configure_logging") as configure_logging, + mock.patch("scripts.seed_data.psycopg2.connect") as connect_mock, + mock.patch.object(seed_data.logger, "info") as info_mock, + ): + seed_data.run_with_namespace(args, config=config) + + configure_logging.assert_called_once() + connect_mock.assert_not_called() + info_mock.assert_called_with("No seeding options provided; exiting") + + +def test_run_with_namespace_seeds_defaults_without_theme_flag() -> None: + args = argparse.Namespace( + currencies=False, units=False, defaults=True, dry_run=False) + config = mock.create_autospec(DatabaseConfig) + config.application_dsn.return_value = "postgresql://example" + + connection_mock = mock.MagicMock() + cursor_context = mock.MagicMock() + cursor_mock = mock.MagicMock() + connection_mock.__enter__.return_value = connection_mock + connection_mock.cursor.return_value = cursor_context + cursor_context.__enter__.return_value = cursor_mock + + with ( + mock.patch("scripts.seed_data._configure_logging"), + mock.patch("scripts.seed_data.psycopg2.connect", return_value=connection_mock) as connect_mock, + mock.patch("scripts.seed_data._seed_defaults") as seed_defaults, + ): + seed_data.run_with_namespace(args, config=config) + + connect_mock.assert_called_once_with(config.application_dsn()) + seed_defaults.assert_called_once_with(cursor_mock, dry_run=False) From 2f5306b7932ac776a44191e864dd46848333a291 Mon Sep 17 00:00:00 2001 From: zwitschi Date: Mon, 27 Oct 2025 15:29:05 +0100 Subject: [PATCH 13/31] fix: Update container configuration for test jobs to use specific Playwright image --- .gitea/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitea/workflows/test.yml b/.gitea/workflows/test.yml index e70cf0f..8df0a5a 100644 --- a/.gitea/workflows/test.yml +++ b/.gitea/workflows/test.yml @@ -5,7 +5,7 @@ jobs: tests: name: ${{ matrix.target }} tests runs-on: ubuntu-latest - container: ${{ matrix.target == 'e2e'}} + container: mcr.microsoft.com/playwright/python:v1.48.0-jammy env: DATABASE_DRIVER: postgresql DATABASE_HOST: postgres From a861efeabf9367b0cb38ed0ab4eb408553c28ce3 Mon Sep 17 00:00:00 2001 From: zwitschi Date: Mon, 27 Oct 2025 15:39:53 +0100 Subject: [PATCH 14/31] fix: Add Node.js runtime installation step to test workflow --- .gitea/workflows/test.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.gitea/workflows/test.yml b/.gitea/workflows/test.yml index 8df0a5a..974ead1 100644 --- a/.gitea/workflows/test.yml +++ b/.gitea/workflows/test.yml @@ -35,6 +35,14 @@ jobs: --health-timeout 5s --health-retries 10 steps: + - name: Install Node.js runtime + shell: bash + run: | + set -euo pipefail + export DEBIAN_FRONTEND=noninteractive + curl -fsSL https://deb.nodesource.com/setup_20.x | bash - + apt-get install -y nodejs + - name: Checkout code uses: actions/checkout@v4 From 7d0c8bfc537ddb430de364012f82fb2df8f8a120 Mon Sep 17 00:00:00 2001 From: zwitschi Date: Mon, 27 Oct 2025 16:47:59 +0100 Subject: [PATCH 15/31] fix: Improve proxy configuration handling in setup action --- .gitea/actions/setup-python-env/action.yml | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/.gitea/actions/setup-python-env/action.yml b/.gitea/actions/setup-python-env/action.yml index b85b21e..0658ec0 100644 --- a/.gitea/actions/setup-python-env/action.yml +++ b/.gitea/actions/setup-python-env/action.yml @@ -44,10 +44,13 @@ runs: echo "HTTP_PROXY=${PROXY_HOST}" echo "HTTPS_PROXY=${PROXY_HOST}" } >> "$GITHUB_ENV" - sudo tee /etc/apt/apt.conf.d/01proxy >/dev/null </dev/null 2>&1; then + printf 'Acquire::http::Proxy "%s";\nAcquire::https::Proxy "%s";\n' "${PROXY_HOST}" "${PROXY_HOST}" | sudo tee /etc/apt/apt.conf.d/01proxy >/dev/null + elif [ "$(id -u)" -eq 0 ]; then + printf 'Acquire::http::Proxy "%s";\nAcquire::https::Proxy "%s";\n' "${PROXY_HOST}" "${PROXY_HOST}" > /etc/apt/apt.conf.d/01proxy + else + echo "Skipping /etc/apt/apt.conf.d/01proxy update; sudo/root not available" >&2 + fi - name: Install dependencies shell: bash run: | From 7385bdad3ece17a21ef3978955a59a22ead13a01 Mon Sep 17 00:00:00 2001 From: zwitschi Date: Mon, 27 Oct 2025 18:04:15 +0100 Subject: [PATCH 16/31] feat: Add theme normalization and API integration for theme settings --- docker-compose.dev.yml | 50 ++++++++++++++++++++++++++++++++++++++++++ static/js/theme.js | 36 +++++++++++++++++++++++++----- 2 files changed, 81 insertions(+), 5 deletions(-) create mode 100644 docker-compose.dev.yml diff --git a/docker-compose.dev.yml b/docker-compose.dev.yml new file mode 100644 index 0000000..870c37c --- /dev/null +++ b/docker-compose.dev.yml @@ -0,0 +1,50 @@ +services: + api: + build: + context: . + dockerfile: Dockerfile + command: uvicorn main:app --host 0.0.0.0 --port 8000 --reload + ports: + - "8000:8000" + environment: + - DATABASE_HOST=db + - DATABASE_PORT=5432 + - DATABASE_USER=calminer + - DATABASE_PASSWORD=calminer + - DATABASE_NAME=calminer_dev + volumes: + - .:/app + depends_on: + db: + condition: service_healthy + networks: + - calminer_backend + + db: + image: postgres:16 + restart: unless-stopped + environment: + - POSTGRES_DB=calminer_dev + - POSTGRES_USER=calminer + - POSTGRES_PASSWORD=calminer + - LANG=en_US.UTF-8 + - LC_ALL=en_US.UTF-8 + - POSTGRES_INITDB_ARGS=--encoding=UTF8 --locale=en_US.UTF-8 + ports: + - "5432:5432" + volumes: + - pg_data_dev:/var/lib/postgresql/data + healthcheck: + test: ["CMD-SHELL", "pg_isready -U calminer -d calminer_dev"] + interval: 10s + timeout: 5s + retries: 5 + networks: + - calminer_backend + +networks: + calminer_backend: + driver: bridge + +volumes: + pg_data_dev: diff --git a/static/js/theme.js b/static/js/theme.js index 0ff624f..a26f2bb 100644 --- a/static/js/theme.js +++ b/static/js/theme.js @@ -41,6 +41,30 @@ document.addEventListener('DOMContentLoaded', () => { }); }); + const THEME_API_URL = '/api/settings/theme'; + + const normalizeTheme = (theme) => { + if (!theme || typeof theme !== 'object') { + return {}; + } + const { + theme_name, + primary_color, + secondary_color, + accent_color, + background_color, + text_color, + } = theme; + return { + theme_name, + primary_color, + secondary_color, + accent_color, + background_color, + text_color, + }; + }; + if (themeSettingsForm) { themeSettingsForm.addEventListener('submit', async (event) => { event.preventDefault(); @@ -49,7 +73,7 @@ document.addEventListener('DOMContentLoaded', () => { const themeData = Object.fromEntries(formData.entries()); try { - const response = await fetch('/api/theme-settings', { + const response = await fetch(THEME_API_URL, { method: 'POST', headers: { 'Content-Type': 'application/json', @@ -58,9 +82,11 @@ document.addEventListener('DOMContentLoaded', () => { }); if (response.ok) { + const payload = await response.json(); + const savedTheme = normalizeTheme(payload?.theme ?? themeData); alert('Theme settings saved successfully!'); - applyTheme(themeData); - saveTheme(themeData); + applyTheme(savedTheme); + saveTheme(savedTheme); } else { const errorData = await response.json(); alert(`Error saving theme settings: ${errorData.detail}`); @@ -91,9 +117,9 @@ document.addEventListener('DOMContentLoaded', () => { // If no saved theme, load from backend (if available) async function loadAndApplyThemeFromServer() { try { - const response = await fetch('/api/theme-settings'); // Assuming a GET endpoint for theme settings + const response = await fetch(THEME_API_URL); if (response.ok) { - const theme = await response.json(); + const theme = normalizeTheme(await response.json()); applyTheme(theme); saveTheme(theme); // Save to local storage for future use } else { From 54137b88d73db3b1a9f6649e520aaaaeb917c11b Mon Sep 17 00:00:00 2001 From: zwitschi Date: Mon, 27 Oct 2025 18:39:20 +0100 Subject: [PATCH 17/31] feat: Enhance Python environment setup with system Python option and improve dependency installation refactor: Clean up imports in currencies and users routes fix: Update theme settings saving logic and clean up test imports --- .gitea/actions/setup-python-env/action.yml | 30 +++++++++++++++++----- .gitea/workflows/test.yml | 1 + routes/currencies.py | 2 +- routes/settings.py | 2 +- routes/users.py | 16 ++++++++---- scripts/backfill_currency.py | 7 +++-- scripts/seed_data.py | 3 +-- tests/unit/test_theme_settings.py | 7 ----- 8 files changed, 42 insertions(+), 26 deletions(-) diff --git a/.gitea/actions/setup-python-env/action.yml b/.gitea/actions/setup-python-env/action.yml index 0658ec0..d824b84 100644 --- a/.gitea/actions/setup-python-env/action.yml +++ b/.gitea/actions/setup-python-env/action.yml @@ -5,30 +5,48 @@ inputs: python-version: description: Python version to install. required: false - default: "3.10" + default: '3.10' + use-system-python: + description: Skip setup-python and rely on the system Python already available in the environment. + required: false + default: 'false' install-playwright: description: Install Playwright browsers when true. required: false - default: "false" + default: 'false' install-requirements: description: Space-delimited list of requirement files to install. required: false - default: "requirements.txt requirements-test.txt" + default: 'requirements.txt requirements-test.txt' run-db-setup: description: Run database wait and setup scripts when true. required: false - default: "true" + default: 'true' db-dry-run: description: Execute setup script dry run before live run when true. required: false - default: "true" + default: 'true' runs: using: composite steps: - name: Set up Python + if: ${{ inputs.use-system-python != 'true' }} uses: actions/setup-python@v5 with: python-version: ${{ inputs.python-version }} + + - name: Verify system Python + if: ${{ inputs.use-system-python == 'true' }} + shell: bash + run: | + set -euo pipefail + if ! command -v python >/dev/null 2>&1; then + echo "Python executable not found on PATH" >&2 + exit 1 + fi + python --version + python -m pip --version >/dev/null 2>&1 || python -m ensurepip --upgrade + python -m pip --version - name: Configure apt proxy shell: bash run: | @@ -59,7 +77,7 @@ runs: if [ -n "${requirements}" ]; then for requirement in ${requirements}; do if [ -f "${requirement}" ]; then - pip install -r "${requirement}" + python -m pip install -r "${requirement}" else echo "Requirement file ${requirement} not found" >&2 exit 1 diff --git a/.gitea/workflows/test.yml b/.gitea/workflows/test.yml index 974ead1..78f56f2 100644 --- a/.gitea/workflows/test.yml +++ b/.gitea/workflows/test.yml @@ -58,6 +58,7 @@ jobs: uses: ./.gitea/actions/setup-python-env with: install-playwright: ${{ matrix.target != 'e2e' }} + use-system-python: 'true' - name: Run tests run: | diff --git a/routes/currencies.py b/routes/currencies.py index 642bb11..8899366 100644 --- a/routes/currencies.py +++ b/routes/currencies.py @@ -1,4 +1,4 @@ -from typing import Dict, List, Optional +from typing import List, Optional from fastapi import APIRouter, Depends, HTTPException, Query, status from pydantic import BaseModel, ConfigDict, Field, field_validator diff --git a/routes/settings.py b/routes/settings.py index 2308d7b..ed06fb5 100644 --- a/routes/settings.py +++ b/routes/settings.py @@ -101,7 +101,7 @@ class ThemeSettings(BaseModel): @router.post("/theme") async def update_theme(theme_data: ThemeSettings, db: Session = Depends(get_db)): data_dict = theme_data.model_dump() - saved = save_theme_settings(db, data_dict) + save_theme_settings(db, data_dict) return {"message": "Theme updated", "theme": data_dict} diff --git a/routes/users.py b/routes/users.py index d538292..5de7092 100644 --- a/routes/users.py +++ b/routes/users.py @@ -3,9 +3,15 @@ from sqlalchemy.orm import Session from config.database import get_db from models.user import User -from services.security import get_password_hash, verify_password, create_access_token, SECRET_KEY, ALGORITHM, get_current_user, oauth2_scheme -from jose import jwt, JWTError -from schemas.user import UserCreate, UserInDB, UserLogin, UserUpdate, PasswordResetRequest, PasswordReset, Token +from services.security import create_access_token, get_current_user +from schemas.user import ( + PasswordReset, + PasswordResetRequest, + UserCreate, + UserInDB, + UserLogin, + UserUpdate, +) router = APIRouter(prefix="/users", tags=["users"]) @@ -62,7 +68,7 @@ async def update_user_me(user_update: UserUpdate, current_user: User = Depends(g if existing_user: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail="Username already taken") - current_user.username = user_update.username + setattr(current_user, "username", user_update.username) if user_update.email and user_update.email != current_user.email: existing_user = db.query(User).filter( @@ -70,7 +76,7 @@ async def update_user_me(user_update: UserUpdate, current_user: User = Depends(g if existing_user: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail="Email already registered") - current_user.email = user_update.email + setattr(current_user, "email", user_update.email) if user_update.password: current_user.set_password(user_update.password) diff --git a/scripts/backfill_currency.py b/scripts/backfill_currency.py index c56d6af..4651021 100644 --- a/scripts/backfill_currency.py +++ b/scripts/backfill_currency.py @@ -43,15 +43,14 @@ def backfill( engine = create_engine(db_url) with engine.begin() as conn: # Ensure currency table exists - res = ( + if db_url.startswith("sqlite:"): conn.execute( text( "SELECT name FROM sqlite_master WHERE type='table' AND name='currency';" ) ) - if db_url.startswith("sqlite:") - else conn.execute(text("SELECT to_regclass('public.currency');")) - ) + else: + conn.execute(text("SELECT to_regclass('public.currency');")) # Note: we don't strictly depend on the above - we assume migration was already applied # Helper: find or create currency by code diff --git a/scripts/seed_data.py b/scripts/seed_data.py index 4db4c1c..b762d04 100644 --- a/scripts/seed_data.py +++ b/scripts/seed_data.py @@ -16,8 +16,7 @@ from __future__ import annotations import argparse import logging -import os -from typing import Iterable, Optional +from typing import Optional import psycopg2 from psycopg2 import errors diff --git a/tests/unit/test_theme_settings.py b/tests/unit/test_theme_settings.py index c1e79ba..e24f7ec 100644 --- a/tests/unit/test_theme_settings.py +++ b/tests/unit/test_theme_settings.py @@ -1,15 +1,8 @@ -import pytest from sqlalchemy.orm import Session -from fastapi.testclient import TestClient -from main import app -from models.theme_setting import ThemeSetting from services.settings import save_theme_settings, get_theme_settings -client = TestClient(app) - - def test_save_theme_settings(db_session: Session): theme_data = { "theme_name": "dark", From 1f8a5952433cf6dd7582a937e58f1fa3ad1da83b Mon Sep 17 00:00:00 2001 From: zwitschi Date: Mon, 27 Oct 2025 18:58:18 +0100 Subject: [PATCH 18/31] fix: Export PYTHONPATH to GitHub environment for test workflows --- .gitea/workflows/test.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.gitea/workflows/test.yml b/.gitea/workflows/test.yml index 78f56f2..f7e72b1 100644 --- a/.gitea/workflows/test.yml +++ b/.gitea/workflows/test.yml @@ -46,6 +46,12 @@ jobs: - name: Checkout code uses: actions/checkout@v4 + - name: Export PYTHONPATH + shell: bash + run: | + set -euo pipefail + echo "PYTHONPATH=/workspace/allucanget/calminer" >> "$GITHUB_ENV" + # - name: Cache pip dependencies # uses: actions/cache@v4 # with: From 2f07e6fb75764141e4d7dafd346816b234216866 Mon Sep 17 00:00:00 2001 From: zwitschi Date: Mon, 27 Oct 2025 19:07:10 +0100 Subject: [PATCH 19/31] fix: Update Playwright Python container version to v1.55.0 --- .gitea/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitea/workflows/test.yml b/.gitea/workflows/test.yml index f7e72b1..4c98712 100644 --- a/.gitea/workflows/test.yml +++ b/.gitea/workflows/test.yml @@ -5,7 +5,7 @@ jobs: tests: name: ${{ matrix.target }} tests runs-on: ubuntu-latest - container: mcr.microsoft.com/playwright/python:v1.48.0-jammy + container: mcr.microsoft.com/playwright/python:v1.55.0-jammy env: DATABASE_DRIVER: postgresql DATABASE_HOST: postgres From b56045ca6a0ff81b98808ddcfd6fe8b12eb0a6fd Mon Sep 17 00:00:00 2001 From: zwitschi Date: Mon, 27 Oct 2025 19:44:43 +0100 Subject: [PATCH 20/31] feat: Add Docker Compose configuration for testing and API services --- docker-compose.test.yml | 82 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 docker-compose.test.yml diff --git a/docker-compose.test.yml b/docker-compose.test.yml new file mode 100644 index 0000000..997413d --- /dev/null +++ b/docker-compose.test.yml @@ -0,0 +1,82 @@ +services: + tests: + build: + context: . + dockerfile: Dockerfile + command: > + sh -c "set -eu; pip install -r requirements-test.txt; python scripts/setup_database.py --ensure-database --ensure-role --ensure-schema --initialize-schema --run-migrations --seed-data --dry-run -v; python scripts/setup_database.py --ensure-database --ensure-role --ensure-schema --initialize-schema --run-migrations --seed-data -v; pytest $${PYTEST_TARGET:-tests/unit}" + environment: + DATABASE_DRIVER: postgresql + DATABASE_HOST: postgres + DATABASE_PORT: 5432 + DATABASE_NAME: calminer_test + DATABASE_USER: calminer_test + DATABASE_PASSWORD: calminer_test_password + DATABASE_SCHEMA: public + DATABASE_SUPERUSER: postgres + DATABASE_SUPERUSER_PASSWORD: postgres + DATABASE_SUPERUSER_DB: postgres + DATABASE_URL: postgresql+psycopg2://calminer_test:calminer_test_password@postgres:5432/calminer_test + PYTEST_TARGET: tests/unit + PYTHONPATH: /app + depends_on: + postgres: + condition: service_healthy + volumes: + - .:/app + - pip_cache_test:/root/.cache/pip + networks: + - calminer_test + + api: + build: + context: . + dockerfile: Dockerfile + command: uvicorn main:app --host 0.0.0.0 --port 8000 --reload + environment: + DATABASE_DRIVER: postgresql + DATABASE_HOST: postgres + DATABASE_PORT: 5432 + DATABASE_NAME: calminer_test + DATABASE_USER: calminer_test + DATABASE_PASSWORD: calminer_test_password + DATABASE_SCHEMA: public + DATABASE_URL: postgresql+psycopg2://calminer_test:calminer_test_password@postgres:5432/calminer_test + PYTHONPATH: /app + depends_on: + postgres: + condition: service_healthy + ports: + - "8001:8000" + networks: + - calminer_test + + postgres: + image: postgres:16 + restart: unless-stopped + environment: + POSTGRES_DB: calminer_test + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + LANG: en_US.UTF-8 + LC_ALL: en_US.UTF-8 + POSTGRES_INITDB_ARGS: --encoding=UTF8 --locale=en_US.UTF-8 + healthcheck: + test: ["CMD-SHELL", "pg_isready -U postgres -d calminer_test"] + interval: 10s + timeout: 5s + retries: 5 + ports: + - "5433:5432" + volumes: + - pg_data_test:/var/lib/postgresql/data + networks: + - calminer_test + +networks: + calminer_test: + driver: bridge + +volumes: + pg_data_test: + pip_cache_test: From a6a5f630cc9e02e2ec7f92c290a73bc5cb1c1164 Mon Sep 17 00:00:00 2001 From: zwitschi Date: Mon, 27 Oct 2025 19:46:35 +0100 Subject: [PATCH 21/31] feat: Add initial Docker Compose configuration for API service --- docker-compose.yml | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 docker-compose.yml diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..e2ff96c --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,39 @@ +services: + api: + image: ${CALMINER_IMAGE:-calminer-api:latest} + build: + context: . + dockerfile: Dockerfile + restart: unless-stopped + env_file: + - config/setup_production.env + environment: + UVICORN_WORKERS: ${UVICORN_WORKERS:-2} + UVICORN_LOG_LEVEL: ${UVICORN_LOG_LEVEL:-info} + command: + [ + "sh", + "-c", + "uvicorn main:app --host 0.0.0.0 --port 8000 --workers ${UVICORN_WORKERS:-2} --log-level ${UVICORN_LOG_LEVEL:-info}", + ] + ports: + - "${CALMINER_API_PORT:-8000}:8000" + healthcheck: + test: + - "CMD-SHELL" + - 'python -c "import urllib.request; urllib.request.urlopen(''http://127.0.0.1:8000/docs'').read()"' + interval: 30s + timeout: 10s + retries: 5 + start_period: 30s + networks: + - calminer_backend + logging: + driver: json-file + options: + max-size: "10m" + max-file: "3" + +networks: + calminer_backend: + driver: bridge From dcb08ab1b8e03eb675a1f876461dd5a6897b5e25 Mon Sep 17 00:00:00 2001 From: zwitschi Date: Mon, 27 Oct 2025 20:57:36 +0100 Subject: [PATCH 22/31] feat: Add production and development Docker Compose configurations, health check endpoint, and update documentation --- README.md | 14 ++- backups/.gitkeep | 0 config/setup_production.env.example | 35 +++++++ docker-compose.prod.yml | 130 ++++++++++++++++++++++++ docs/architecture/07_deployment_view.md | 36 ++++++- docs/quickstart.md | 102 ++++++++++++++++++- main.py | 5 + 7 files changed, 318 insertions(+), 4 deletions(-) create mode 100644 backups/.gitkeep create mode 100644 config/setup_production.env.example create mode 100644 docker-compose.prod.yml diff --git a/README.md b/README.md index 37f0e90..875c722 100644 --- a/README.md +++ b/README.md @@ -78,7 +78,19 @@ docker run --rm -p 8000:8000 ^ ### Orchestrated Deployment -Use `docker compose` or an orchestrator of your choice to co-locate PostgreSQL/Redis alongside the app when needed. The image expects migrations to be applied before startup. +Use `docker compose` or an orchestrator of your choice to co-locate PostgreSQL/Redis/Traefik alongside the app when needed. The image expects migrations to be applied before startup. + +### Production docker-compose workflow + +`docker-compose.prod.yml` covers the API plus optional Traefik (`reverse-proxy` profile) and on-host Postgres (`local-db` profile). Commands, health checks, and environment variables are documented in [docs/quickstart.md](docs/quickstart.md#compose-driven-production-stack) and expanded in [docs/architecture/07_deployment_view.md](docs/architecture/07_deployment_view.md). + +### Development docker-compose workflow + +`docker-compose.dev.yml` runs FastAPI (with reload) and Postgres in a single stack. See [docs/quickstart.md](docs/quickstart.md#compose-driven-development-stack) for lifecycle commands and troubleshooting, plus the architecture chapter ([docs/architecture/15_development_setup.md](docs/architecture/15_development_setup.md)) for deeper context. + +### Test docker-compose workflow + +`docker-compose.test.yml` mirrors the CI pipeline: it provisions Postgres, runs the database bootstrap script, and executes pytest. Usage examples live in [docs/quickstart.md](docs/quickstart.md#compose-driven-test-stack). ## CI/CD expectations diff --git a/backups/.gitkeep b/backups/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/config/setup_production.env.example b/config/setup_production.env.example new file mode 100644 index 0000000..fefd6f2 --- /dev/null +++ b/config/setup_production.env.example @@ -0,0 +1,35 @@ +# Copy this file to config/setup_production.env and replace values with production secrets + +# Container image and runtime configuration +CALMINER_IMAGE=registry.example.com/calminer/api:latest +CALMINER_DOMAIN=calminer.example.com +TRAEFIK_ACME_EMAIL=ops@example.com +CALMINER_API_PORT=8000 +UVICORN_WORKERS=4 +UVICORN_LOG_LEVEL=info +CALMINER_NETWORK=calminer_backend +API_LIMIT_CPUS=1.0 +API_LIMIT_MEMORY=1g +API_RESERVATION_MEMORY=512m +TRAEFIK_LIMIT_CPUS=0.5 +TRAEFIK_LIMIT_MEMORY=512m +POSTGRES_LIMIT_CPUS=1.0 +POSTGRES_LIMIT_MEMORY=2g +POSTGRES_RESERVATION_MEMORY=1g + +# Application database connection +DATABASE_DRIVER=postgresql+psycopg2 +DATABASE_HOST=production-db.internal +DATABASE_PORT=5432 +DATABASE_NAME=calminer +DATABASE_USER=calminer_app +DATABASE_PASSWORD=ChangeMe123! +DATABASE_SCHEMA=public + +# Optional consolidated SQLAlchemy URL (overrides granular settings when set) +# DATABASE_URL=postgresql+psycopg2://calminer_app:ChangeMe123!@production-db.internal:5432/calminer + +# Superuser credentials used by scripts/setup_database.py for migrations/seed data +DATABASE_SUPERUSER=postgres +DATABASE_SUPERUSER_PASSWORD=ChangeMeSuper123! +DATABASE_SUPERUSER_DB=postgres diff --git a/docker-compose.prod.yml b/docker-compose.prod.yml new file mode 100644 index 0000000..75c0cdd --- /dev/null +++ b/docker-compose.prod.yml @@ -0,0 +1,130 @@ +services: + api: + image: ${CALMINER_IMAGE:-calminer-api:latest} + build: + context: . + dockerfile: Dockerfile + restart: unless-stopped + env_file: + - config/setup_production.env + environment: + UVICORN_WORKERS: ${UVICORN_WORKERS:-2} + UVICORN_LOG_LEVEL: ${UVICORN_LOG_LEVEL:-info} + command: + [ + "sh", + "-c", + "uvicorn main:app --host 0.0.0.0 --port 8000 --workers ${UVICORN_WORKERS:-2} --log-level ${UVICORN_LOG_LEVEL:-info}", + ] + ports: + - "${CALMINER_API_PORT:-8000}:8000" + deploy: + resources: + limits: + cpus: ${API_LIMIT_CPUS:-1.0} + memory: ${API_LIMIT_MEMORY:-1g} + reservations: + memory: ${API_RESERVATION_MEMORY:-512m} + healthcheck: + test: + - "CMD-SHELL" + - 'python -c "import urllib.request; urllib.request.urlopen(''http://127.0.0.1:8000/health'').read()"' + interval: 30s + timeout: 10s + retries: 5 + start_period: 30s + networks: + - calminer_backend + logging: + driver: json-file + options: + max-size: "10m" + max-file: "3" + labels: + - "traefik.enable=true" + - "traefik.http.routers.calminer.rule=Host(`${CALMINER_DOMAIN}`)" + - "traefik.http.routers.calminer.entrypoints=websecure" + - "traefik.http.routers.calminer.tls.certresolver=letsencrypt" + - "traefik.http.services.calminer.loadbalancer.server.port=8000" + + traefik: + image: traefik:v3.1 + restart: unless-stopped + command: + - "--providers.docker=true" + - "--providers.docker.exposedbydefault=false" + - "--entrypoints.web.address=:80" + - "--entrypoints.websecure.address=:443" + - "--certificatesresolvers.letsencrypt.acme.tlschallenge=true" + - "--certificatesresolvers.letsencrypt.acme.email=${TRAEFIK_ACME_EMAIL:?TRAEFIK_ACME_EMAIL not set}" + - "--certificatesresolvers.letsencrypt.acme.storage=/letsencrypt/acme.json" + ports: + - "80:80" + - "443:443" + deploy: + resources: + limits: + cpus: ${TRAEFIK_LIMIT_CPUS:-0.5} + memory: ${TRAEFIK_LIMIT_MEMORY:-512m} + volumes: + - /var/run/docker.sock:/var/run/docker.sock:ro + - traefik_letsencrypt:/letsencrypt + networks: + - calminer_backend + profiles: + - reverse-proxy + healthcheck: + test: + - "CMD" + - "traefik" + - "healthcheck" + - "--entrypoints=web" + - "--entrypoints=websecure" + interval: 30s + timeout: 10s + retries: 5 + + postgres: + image: postgres:16 + profiles: + - local-db + restart: unless-stopped + environment: + POSTGRES_DB: ${POSTGRES_DB:-calminer} + POSTGRES_USER: ${POSTGRES_USER:-calminer} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-changeme} + LANG: en_US.UTF-8 + LC_ALL: en_US.UTF-8 + POSTGRES_INITDB_ARGS: --encoding=UTF8 --locale=en_US.UTF-8 + ports: + - "${CALMINER_DB_PORT:-5432}:5432" + deploy: + resources: + limits: + cpus: ${POSTGRES_LIMIT_CPUS:-1.0} + memory: ${POSTGRES_LIMIT_MEMORY:-2g} + reservations: + memory: ${POSTGRES_RESERVATION_MEMORY:-1g} + volumes: + - pg_data_prod:/var/lib/postgresql/data + - ./backups:/backups + healthcheck: + test: + [ + "CMD-SHELL", + "pg_isready -U ${POSTGRES_USER:-calminer} -d ${POSTGRES_DB:-calminer}", + ] + interval: 30s + timeout: 10s + retries: 5 + networks: + - calminer_backend + +networks: + calminer_backend: + name: ${CALMINER_NETWORK:-calminer_backend} + driver: bridge + +volumes: + pg_data_prod: + traefik_letsencrypt: diff --git a/docs/architecture/07_deployment_view.md b/docs/architecture/07_deployment_view.md index e3455a3..4a3ae6e 100644 --- a/docs/architecture/07_deployment_view.md +++ b/docs/architecture/07_deployment_view.md @@ -1,6 +1,6 @@ --- -title: "07 — Deployment View" -description: "Describe deployment topology, infrastructure components, and environments (dev/stage/prod)." +title: '07 — Deployment View' +description: 'Describe deployment topology, infrastructure components, and environments (dev/stage/prod).' status: draft --- @@ -85,6 +85,14 @@ The development environment is set up for local development and testing. It incl - Local PostgreSQL instance (docker compose recommended, script available at `docker-compose.postgres.yml`) - FastAPI server running in debug mode +`docker-compose.dev.yml` encapsulates this topology: + +- `api` service mounts the repository for live reloads (`uvicorn --reload`) and depends on the database health check. +- `db` service uses the Debian-based `postgres:16` image with UTF-8 locale configuration and persists data in `pg_data_dev`. +- A shared `calminer_backend` bridge network keeps traffic contained; ports 8000/5432 are published for local tooling. + +See [docs/quickstart.md](../quickstart.md#compose-driven-development-stack) for command examples and volume maintenance tips. + ### Testing Environment The testing environment is set up for automated testing and quality assurance. It includes: @@ -93,6 +101,14 @@ The testing environment is set up for automated testing and quality assurance. I - FastAPI server running in testing mode - Automated test suite (e.g., pytest) for running unit and integration tests +`docker-compose.test.yml` provisions an ephemeral CI-like stack: + +- `tests` service builds the application image, installs `requirements-test.txt`, runs the database setup script (dry-run + apply), then executes pytest. +- `api` service is available on port 8001 for manual verification against the test database. +- `postgres` service seeds a disposable Postgres 16 instance with health checks and named volumes (`pg_data_test`, `pip_cache_test`). + +Typical commands mirror the CI workflow (`docker compose -f docker-compose.test.yml run --rm tests`); the [quickstart](../quickstart.md#compose-driven-test-stack) lists variations and teardown steps. + ### Production Environment The production environment is set up for serving live traffic and includes: @@ -102,6 +118,22 @@ The production environment is set up for serving live traffic and includes: - Load balancer (Traefik) for distributing incoming requests - Monitoring and logging tools for tracking application performance +#### Production docker compose topology + +- `docker-compose.prod.yml` defines the runtime topology for operator-managed deployments. +- `api` service runs the FastAPI image with resource limits (`API_LIMIT_CPUS`, `API_LIMIT_MEMORY`) and a `/health` probe consumed by Traefik and the Compose health check. +- `traefik` service (enabled via the `reverse-proxy` profile) terminates TLS using the ACME resolver configured by `TRAEFIK_ACME_EMAIL` and routes `CALMINER_DOMAIN` traffic to the API. +- `postgres` service (enabled via the `local-db` profile) exists for edge deployments without managed PostgreSQL and persists data in the `pg_data_prod` volume while mounting `./backups` for operator snapshots. +- All services join the configurable `CALMINER_NETWORK` (defaults to `calminer_backend`) to keep traffic isolated from host networks. + +Deployment workflow: + +1. Copy `config/setup_production.env.example` to `config/setup_production.env` and populate domain, registry image tag, database credentials, and resource budgets. +2. Launch the stack with `docker compose --env-file config/setup_production.env -f docker-compose.prod.yml --profile reverse-proxy up -d` (append `--profile local-db` when hosting Postgres locally). +3. Run database migrations and seeding using `docker compose --env-file config/setup_production.env -f docker-compose.prod.yml run --rm api python scripts/setup_database.py --run-migrations --seed-data`. +4. Monitor container health via `docker compose -f docker-compose.prod.yml ps` or Traefik dashboards; the API health endpoint returns `{ "status": "ok" }` when ready. +5. Shut down with `docker compose -f docker-compose.prod.yml down` (volumes persist unless `-v` is supplied). + ## Containerized Deployment Flow The Docker-based deployment path aligns with the solution strategy documented in [Solution Strategy](04_solution_strategy.md) and the CI practices captured in [Testing & CI](07_deployment/07_01_testing_ci.md.md). diff --git a/docs/quickstart.md b/docs/quickstart.md index e98d227..ea43291 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -4,6 +4,13 @@ This document contains the expanded development, usage, testing, and migration g ## Development +### Prerequisites + +- Python 3.10+ +- Node.js 20+ (for Playwright-driven E2E tests) +- Docker (optional, required for containerized workflows) +- Git + To get started locally: ```powershell @@ -47,6 +54,99 @@ docker run --rm -p 8000:8000 ^ If you maintain a Postgres or Redis dependency locally, consider authoring a `docker compose` stack that pairs them with the app container. The Docker image expects the database to be reachable and migrations executed before serving traffic. +### Compose-driven development stack + +The repository ships with `docker-compose.dev.yml`, wiring the API and database into a single development stack. It defaults to the Debian-based `postgres:16` image so UTF-8 locales are available without additional tooling and mounts persistent data in the `pg_data_dev` volume. + +Typical workflow (run from the repository root): + +```powershell +# Build images and ensure dependencies are cached +docker compose -f docker-compose.dev.yml build + +# Start FastAPI and Postgres in the background +docker compose -f docker-compose.dev.yml up -d + +# Tail logs for both services +docker compose -f docker-compose.dev.yml logs -f + +# Stop services but keep the database volume for reuse +docker compose -f docker-compose.dev.yml down + +# Remove the persistent Postgres volume when you need a clean slate +docker volume rm calminer_pg_data_dev # optional; confirm exact name with `docker volume ls` +``` + +Environment variables used by the containers live directly in the compose file (`DATABASE_HOST=db`, `DATABASE_NAME=calminer_dev`, etc.), so no extra `.env` file is required. Adjust or override them via `docker compose ... -e VAR=value` if necessary. + +For a deeper walkthrough (including volume naming conventions, port mappings, and how the stack fits into the broader architecture), cross-check `docs/architecture/15_development_setup.md`. That chapter mirrors the compose defaults captured here so both documents stay in sync. + +### Compose-driven test stack + +Use `docker-compose.test.yml` to spin up a Postgres 16 container and execute the Python test suite in a disposable worker container: + +```powershell +# Build images used by the test workflow +docker compose -f docker-compose.test.yml build + +# Run the default target (unit tests) +docker compose -f docker-compose.test.yml run --rm tests + +# Run a specific target (e.g., full suite) +docker compose -f docker-compose.test.yml run --rm -e PYTEST_TARGET=tests tests + +# Tear everything down and drop the test database volume +docker compose -f docker-compose.test.yml down -v +``` + +The `tests` service prepares the database via `scripts/setup_database.py` before invoking pytest, ensuring migrations and seed data mirror CI behaviour. Named volumes (`pip_cache_test`, `pg_data_test`) cache dependencies and data between runs; remove them with `down -v` whenever you want a pristine environment. An `api` service is available on `http://localhost:8001` for spot-checking API responses against the same test database. + +### Compose-driven production stack + +Use `docker-compose.prod.yml` for operator-managed deployments. The file defines: + +- `api`: FastAPI container with configurable CPU/memory limits and a `/health` probe. +- `traefik`: Optional (enable with the `reverse-proxy` profile) to terminate TLS and route traffic based on `CALMINER_DOMAIN`. +- `postgres`: Optional (enable with the `local-db` profile) when a managed database is unavailable; persists data in `pg_data_prod` and mounts `./backups`. + +Commands (run from the repository root): + +```powershell +# Prepare environment variables once per environment +copy config\setup_production.env.example config\setup_production.env + +# Start API behind Traefik +docker compose ^ + --env-file config/setup_production.env ^ + -f docker-compose.prod.yml ^ + --profile reverse-proxy ^ + up -d + +# Add the local Postgres profile when running without managed DB +docker compose ^ + --env-file config/setup_production.env ^ + -f docker-compose.prod.yml ^ + --profile reverse-proxy --profile local-db ^ + up -d + +# Apply migrations/seed data +docker compose ^ + --env-file config/setup_production.env ^ + -f docker-compose.prod.yml ^ + run --rm api ^ + python scripts/setup_database.py --run-migrations --seed-data + +# Check health (FastAPI exposes /health) +docker compose -f docker-compose.prod.yml ps + +# Stop services (volumes persist unless -v is supplied) +docker compose -f docker-compose.prod.yml down +``` + +Key environment variables (documented in `config/setup_production.env.example`): container image tag, domain/ACME email, published ports, network name, and resource limits (`API_LIMIT_CPUS`, `API_LIMIT_MEMORY`, etc.). + +For deployment topology diagrams and operational sequencing, see [docs/architecture/07_deployment_view.md](architecture/07_deployment_view.md#production-docker-compose-topology). + ## Usage Overview - **API base URL**: `http://localhost:8000/api` @@ -98,7 +198,7 @@ python scripts/setup_database.py --run-migrations --seed-data The dry-run invocation reports which steps would execute without making changes. The live run applies the baseline (if not already recorded in `schema_migrations`) and seeds the reference data relied upon by the UI and API. > ℹ️ When `--seed-data` is supplied without `--run-migrations`, the bootstrap script automatically applies any pending SQL migrations first so the `application_setting` table (and future settings-backed features) are present before seeding. - +> > ℹ️ The application still accepts `DATABASE_URL` as a fallback if the granular variables are not set. ## Database bootstrap workflow diff --git a/main.py b/main.py index 171cd88..858296d 100644 --- a/main.py +++ b/main.py @@ -32,6 +32,11 @@ async def json_validation( return await validate_json(request, call_next) +@app.get("/health", summary="Container health probe") +async def health() -> dict[str, str]: + return {"status": "ok"} + + app.mount("/static", StaticFiles(directory="static"), name="static") # Include API routers From 723f6a62b888abd3e3b08ec9331f54f778ff5180 Mon Sep 17 00:00:00 2001 From: zwitschi Date: Mon, 27 Oct 2025 21:12:46 +0100 Subject: [PATCH 23/31] feat: Enhance CI workflows with health checks and update PostgreSQL image version --- .gitea/workflows/build-and-push.yml | 4 ++++ .gitea/workflows/deploy.yml | 13 +++++++++++++ .gitea/workflows/test.yml | 6 ++++-- 3 files changed, 21 insertions(+), 2 deletions(-) diff --git a/.gitea/workflows/build-and-push.yml b/.gitea/workflows/build-and-push.yml index cab1385..bd6ce12 100644 --- a/.gitea/workflows/build-and-push.yml +++ b/.gitea/workflows/build-and-push.yml @@ -53,6 +53,8 @@ jobs: - name: Set up QEMU and Buildx uses: docker/setup-buildx-action@v3 + with: + install: false - name: Log in to Gitea registry if: ${{ steps.meta.outputs.on_default == 'true' }} @@ -72,3 +74,5 @@ jobs: tags: | ${{ env.REGISTRY_URL }}/${{ env.REGISTRY_ORG }}/${{ env.REGISTRY_IMAGE_NAME }}:latest ${{ env.REGISTRY_URL }}/${{ env.REGISTRY_ORG }}/${{ env.REGISTRY_IMAGE_NAME }}:${{ steps.meta.outputs.sha }} + cache-from: type=gha + cache-to: type=gha,mode=max diff --git a/.gitea/workflows/deploy.yml b/.gitea/workflows/deploy.yml index 6566347..ae619eb 100644 --- a/.gitea/workflows/deploy.yml +++ b/.gitea/workflows/deploy.yml @@ -49,3 +49,16 @@ jobs: -e DATABASE_NAME=${{ secrets.DATABASE_NAME }} \ -e DATABASE_SCHEMA=${{ secrets.DATABASE_SCHEMA }} \ "$IMAGE_PATH:$IMAGE_SHA" + + for attempt in {1..10}; do + if curl -fsS http://localhost:8000/health >/dev/null; then + echo "Deployment health check passed" + exit 0 + fi + echo "Health check attempt ${attempt} failed; retrying in 3s" + sleep 3 + done + + echo "Deployment health check failed after retries" >&2 + docker logs calminer >&2 || true + exit 1 diff --git a/.gitea/workflows/test.yml b/.gitea/workflows/test.yml index 4c98712..c859748 100644 --- a/.gitea/workflows/test.yml +++ b/.gitea/workflows/test.yml @@ -24,7 +24,7 @@ jobs: target: [unit, e2e, lint] services: postgres: - image: postgres:16-alpine + image: postgres:16 env: POSTGRES_DB: calminer_ci POSTGRES_USER: calminer @@ -36,6 +36,7 @@ jobs: --health-retries 10 steps: - name: Install Node.js runtime + if: ${{ matrix.target == 'e2e' }} shell: bash run: | set -euo pipefail @@ -63,8 +64,9 @@ jobs: - name: Prepare Python environment uses: ./.gitea/actions/setup-python-env with: - install-playwright: ${{ matrix.target != 'e2e' }} + install-playwright: ${{ matrix.target == 'e2e' }} use-system-python: 'true' + run-db-setup: ${{ matrix.target != 'lint' }} - name: Run tests run: | From c5a9a7c96f8efe9e4389d1deb6e678520f71a7ac Mon Sep 17 00:00:00 2001 From: zwitschi Date: Mon, 27 Oct 2025 22:07:31 +0100 Subject: [PATCH 24/31] fix: Remove conditional execution for Node.js runtime installation in test workflow --- .gitea/workflows/test.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.gitea/workflows/test.yml b/.gitea/workflows/test.yml index c859748..a0cb25d 100644 --- a/.gitea/workflows/test.yml +++ b/.gitea/workflows/test.yml @@ -36,7 +36,6 @@ jobs: --health-retries 10 steps: - name: Install Node.js runtime - if: ${{ matrix.target == 'e2e' }} shell: bash run: | set -euo pipefail From 50446c4248b47a76de1ffed5bdd2316a669bbab4 Mon Sep 17 00:00:00 2001 From: zwitschi Date: Tue, 28 Oct 2025 06:49:22 +0100 Subject: [PATCH 25/31] feat: Refactor test workflow to separate lint, unit, and e2e jobs with health checks for PostgreSQL service --- .gitea/workflows/test.yml | 99 +++++++++++++++++++++++++++++---------- 1 file changed, 73 insertions(+), 26 deletions(-) diff --git a/.gitea/workflows/test.yml b/.gitea/workflows/test.yml index a0cb25d..b7e1c9c 100644 --- a/.gitea/workflows/test.yml +++ b/.gitea/workflows/test.yml @@ -2,8 +2,75 @@ name: Run Tests on: [push] jobs: - tests: - name: ${{ matrix.target }} tests + lint: + name: Lint + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Export PYTHONPATH + shell: bash + run: | + set -euo pipefail + echo "PYTHONPATH=${{ github.workspace }}" >> "$GITHUB_ENV" + + - name: Prepare Python environment + uses: ./.gitea/actions/setup-python-env + with: + use-system-python: 'true' + run-db-setup: 'false' + + - name: Run lint checks + run: ruff check . + + unit: + name: Unit Tests + runs-on: ubuntu-latest + env: + DATABASE_DRIVER: postgresql + DATABASE_HOST: postgres + DATABASE_PORT: '5432' + DATABASE_NAME: calminer_ci + DATABASE_USER: calminer + DATABASE_PASSWORD: secret + DATABASE_SCHEMA: public + DATABASE_SUPERUSER: calminer + DATABASE_SUPERUSER_PASSWORD: secret + DATABASE_SUPERUSER_DB: calminer_ci + DATABASE_URL: postgresql+psycopg2://calminer:secret@postgres:5432/calminer_ci + services: + postgres: + image: postgres:16 + env: + POSTGRES_DB: calminer_ci + POSTGRES_USER: calminer + POSTGRES_PASSWORD: secret + options: >- + --health-cmd "pg_isready -U calminer -d calminer_ci" + --health-interval 10s + --health-timeout 5s + --health-retries 10 + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Export PYTHONPATH + shell: bash + run: | + set -euo pipefail + echo "PYTHONPATH=${{ github.workspace }}" >> "$GITHUB_ENV" + + - name: Prepare Python environment + uses: ./.gitea/actions/setup-python-env + with: + use-system-python: 'true' + + - name: Run unit tests + run: pytest tests/unit + + e2e: + name: E2E Tests runs-on: ubuntu-latest container: mcr.microsoft.com/playwright/python:v1.55.0-jammy env: @@ -18,10 +85,6 @@ jobs: DATABASE_SUPERUSER_PASSWORD: secret DATABASE_SUPERUSER_DB: calminer_ci DATABASE_URL: postgresql+psycopg2://calminer:secret@postgres:5432/calminer_ci - strategy: - fail-fast: false - matrix: - target: [unit, e2e, lint] services: postgres: image: postgres:16 @@ -50,29 +113,13 @@ jobs: shell: bash run: | set -euo pipefail - echo "PYTHONPATH=/workspace/allucanget/calminer" >> "$GITHUB_ENV" - - # - name: Cache pip dependencies - # uses: actions/cache@v4 - # with: - # path: ~/.cache/pip - # key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt', 'requirements-test.txt') }} - # restore-keys: | - # ${{ runner.os }}-pip- + echo "PYTHONPATH=${{ github.workspace }}" >> "$GITHUB_ENV" - name: Prepare Python environment uses: ./.gitea/actions/setup-python-env with: - install-playwright: ${{ matrix.target == 'e2e' }} use-system-python: 'true' - run-db-setup: ${{ matrix.target != 'lint' }} + install-playwright: 'true' - - name: Run tests - run: | - if [ "${{ matrix.target }}" = "unit" ]; then - pytest tests/unit - elif [ "${{ matrix.target }}" = "lint" ]; then - ruff check . - else - pytest tests/e2e - fi + - name: Run e2e tests + run: pytest tests/e2e From 89a4f663b52b5999ab1a8f68e5d87e52f88e9ec5 Mon Sep 17 00:00:00 2001 From: zwitschi Date: Tue, 28 Oct 2025 07:42:25 +0100 Subject: [PATCH 26/31] feat: Add virtual environment creation step for Python setup --- .gitea/actions/setup-python-env/action.yml | 26 ++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/.gitea/actions/setup-python-env/action.yml b/.gitea/actions/setup-python-env/action.yml index d824b84..c51e30d 100644 --- a/.gitea/actions/setup-python-env/action.yml +++ b/.gitea/actions/setup-python-env/action.yml @@ -47,6 +47,32 @@ runs: python --version python -m pip --version >/dev/null 2>&1 || python -m ensurepip --upgrade python -m pip --version + - name: Create virtual environment + if: ${{ inputs.use-system-python == 'true' }} + shell: bash + run: | + set -euo pipefail + if [ -z "${RUNNER_TEMP:-}" ]; then + echo "RUNNER_TEMP is not set; cannot create virtual environment" >&2 + exit 1 + fi + VENV_PATH="$(mktemp -d "${RUNNER_TEMP%/}/ci-venv-XXXXXX")" + python -m venv "${VENV_PATH}" + PATH_ENTRY="" + if [ -f "${VENV_PATH}/bin/activate" ]; then + PATH_ENTRY="${VENV_PATH}/bin" + elif [ -f "${VENV_PATH}/Scripts/activate" ]; then + PATH_ENTRY="${VENV_PATH}/Scripts" + else + echo "Unable to locate virtual environment scripts" >&2 + exit 1 + fi + export PATH="${PATH_ENTRY}:${PATH}" + echo "${PATH_ENTRY}" >> "${GITHUB_PATH}" + echo "VIRTUAL_ENV=${VENV_PATH}" >> "${GITHUB_ENV}" + # Re-evaluate the python binary for subsequent steps + python --version + python -m pip --version - name: Configure apt proxy shell: bash run: | From a772960390ad2edd757bf9021e4406c19ce57f43 Mon Sep 17 00:00:00 2001 From: zwitschi Date: Tue, 28 Oct 2025 07:56:24 +0100 Subject: [PATCH 27/31] feat: Add option to create isolated virtual environment in Python setup action --- .gitea/actions/setup-python-env/action.yml | 6 +++++- .gitea/workflows/test.yml | 2 ++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/.gitea/actions/setup-python-env/action.yml b/.gitea/actions/setup-python-env/action.yml index c51e30d..3ad1a66 100644 --- a/.gitea/actions/setup-python-env/action.yml +++ b/.gitea/actions/setup-python-env/action.yml @@ -26,6 +26,10 @@ inputs: description: Execute setup script dry run before live run when true. required: false default: 'true' + create-venv: + description: Create an isolated virtual environment when using the system Python. + required: false + default: 'false' runs: using: composite steps: @@ -48,7 +52,7 @@ runs: python -m pip --version >/dev/null 2>&1 || python -m ensurepip --upgrade python -m pip --version - name: Create virtual environment - if: ${{ inputs.use-system-python == 'true' }} + if: ${{ inputs.use-system-python == 'true' && inputs.create-venv == 'true' }} shell: bash run: | set -euo pipefail diff --git a/.gitea/workflows/test.yml b/.gitea/workflows/test.yml index b7e1c9c..dac32fc 100644 --- a/.gitea/workflows/test.yml +++ b/.gitea/workflows/test.yml @@ -20,6 +20,7 @@ jobs: with: use-system-python: 'true' run-db-setup: 'false' + create-venv: 'true' - name: Run lint checks run: ruff check . @@ -65,6 +66,7 @@ jobs: uses: ./.gitea/actions/setup-python-env with: use-system-python: 'true' + create-venv: 'true' - name: Run unit tests run: pytest tests/unit From 3da8a50ac41df1d123fb3ea92c29cda59cdee1f5 Mon Sep 17 00:00:00 2001 From: zwitschi Date: Tue, 28 Oct 2025 08:19:07 +0100 Subject: [PATCH 28/31] feat: Add E2E testing workflow with Playwright and PostgreSQL service --- .gitea/workflows/test-e2e.yml | 84 +++++++++++++++++++++++++++++++++++ .gitea/workflows/test.yml | 55 ----------------------- 2 files changed, 84 insertions(+), 55 deletions(-) create mode 100644 .gitea/workflows/test-e2e.yml diff --git a/.gitea/workflows/test-e2e.yml b/.gitea/workflows/test-e2e.yml new file mode 100644 index 0000000..59b8693 --- /dev/null +++ b/.gitea/workflows/test-e2e.yml @@ -0,0 +1,84 @@ +name: Run E2E Tests + +on: + push: + branches: + - '**' + pull_request: + branches: + - main + workflow_dispatch: + +jobs: + e2e: + name: E2E Tests + runs-on: ubuntu-latest + container: mcr.microsoft.com/playwright/python:v1.55.0-jammy + env: + DATABASE_DRIVER: postgresql + DATABASE_HOST: postgres + DATABASE_PORT: '5432' + DATABASE_NAME: calminer_ci + DATABASE_USER: calminer + DATABASE_PASSWORD: secret + DATABASE_SCHEMA: public + DATABASE_SUPERUSER: calminer + DATABASE_SUPERUSER_PASSWORD: secret + DATABASE_SUPERUSER_DB: calminer_ci + DATABASE_URL: postgresql+psycopg2://calminer:secret@postgres:5432/calminer_ci + services: + postgres: + image: postgres:16 + env: + POSTGRES_DB: calminer_ci + POSTGRES_USER: calminer + POSTGRES_PASSWORD: secret + options: >- + --health-cmd "pg_isready -U calminer -d calminer_ci" + --health-interval 10s + --health-timeout 5s + --health-retries 10 + steps: + - name: Install Node.js runtime + shell: bash + run: | + set -euo pipefail + export DEBIAN_FRONTEND=noninteractive + curl -fsSL https://deb.nodesource.com/setup_20.x | bash - + apt-get install -y nodejs + + - name: Checkout code + uses: actions/checkout@v4 + + - name: Export PYTHONPATH + shell: bash + run: | + set -euo pipefail + echo "PYTHONPATH=${{ github.workspace }}" >> "$GITHUB_ENV" + + - name: Prepare Python environment + uses: ./.gitea/actions/setup-python-env + with: + use-system-python: 'true' + install-playwright: 'true' + run-db-setup: 'true' + + - name: Run e2e tests + run: | + set -euo pipefail + mkdir -p artifacts/pytest + pytest tests/e2e --junitxml=artifacts/pytest/e2e-results.xml + + - name: Upload pytest results + if: always() + uses: actions/upload-artifact@v4 + with: + name: e2e-pytest-results + path: artifacts/pytest/ + + - name: Upload Playwright artifacts + if: failure() + uses: actions/upload-artifact@v4 + with: + name: playwright-artifacts + path: playwright-report diff --git a/.gitea/workflows/test.yml b/.gitea/workflows/test.yml index dac32fc..2ffcbe1 100644 --- a/.gitea/workflows/test.yml +++ b/.gitea/workflows/test.yml @@ -70,58 +70,3 @@ jobs: - name: Run unit tests run: pytest tests/unit - - e2e: - name: E2E Tests - runs-on: ubuntu-latest - container: mcr.microsoft.com/playwright/python:v1.55.0-jammy - env: - DATABASE_DRIVER: postgresql - DATABASE_HOST: postgres - DATABASE_PORT: '5432' - DATABASE_NAME: calminer_ci - DATABASE_USER: calminer - DATABASE_PASSWORD: secret - DATABASE_SCHEMA: public - DATABASE_SUPERUSER: calminer - DATABASE_SUPERUSER_PASSWORD: secret - DATABASE_SUPERUSER_DB: calminer_ci - DATABASE_URL: postgresql+psycopg2://calminer:secret@postgres:5432/calminer_ci - services: - postgres: - image: postgres:16 - env: - POSTGRES_DB: calminer_ci - POSTGRES_USER: calminer - POSTGRES_PASSWORD: secret - options: >- - --health-cmd "pg_isready -U calminer -d calminer_ci" - --health-interval 10s - --health-timeout 5s - --health-retries 10 - steps: - - name: Install Node.js runtime - shell: bash - run: | - set -euo pipefail - export DEBIAN_FRONTEND=noninteractive - curl -fsSL https://deb.nodesource.com/setup_20.x | bash - - apt-get install -y nodejs - - - name: Checkout code - uses: actions/checkout@v4 - - - name: Export PYTHONPATH - shell: bash - run: | - set -euo pipefail - echo "PYTHONPATH=${{ github.workspace }}" >> "$GITHUB_ENV" - - - name: Prepare Python environment - uses: ./.gitea/actions/setup-python-env - with: - use-system-python: 'true' - install-playwright: 'true' - - - name: Run e2e tests - run: pytest tests/e2e From 2136dbdd44fc5d2b5faaf54ef84cf6c5c204bf51 Mon Sep 17 00:00:00 2001 From: zwitschi Date: Tue, 28 Oct 2025 08:29:12 +0100 Subject: [PATCH 29/31] fix: Ensure bash shell is explicitly set for running E2E tests --- .gitea/workflows/test-e2e.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitea/workflows/test-e2e.yml b/.gitea/workflows/test-e2e.yml index 59b8693..317fb9a 100644 --- a/.gitea/workflows/test-e2e.yml +++ b/.gitea/workflows/test-e2e.yml @@ -64,6 +64,7 @@ jobs: run-db-setup: 'true' - name: Run e2e tests + shell: bash run: | set -euo pipefail mkdir -p artifacts/pytest From 99d9ea7770580bc8fc9f308ab9115d4ad433d51e Mon Sep 17 00:00:00 2001 From: zwitschi Date: Tue, 28 Oct 2025 08:34:27 +0100 Subject: [PATCH 30/31] fix: Downgrade upload-artifact action to v3 for consistency --- .gitea/workflows/test-e2e.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitea/workflows/test-e2e.yml b/.gitea/workflows/test-e2e.yml index 317fb9a..bfa0955 100644 --- a/.gitea/workflows/test-e2e.yml +++ b/.gitea/workflows/test-e2e.yml @@ -72,14 +72,14 @@ jobs: - name: Upload pytest results if: always() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v3 with: name: e2e-pytest-results path: artifacts/pytest/ - name: Upload Playwright artifacts if: failure() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v3 with: name: playwright-artifacts path: playwright-report From e1689c3a3120b3aa054da5f13cb2d7df2ae63b50 Mon Sep 17 00:00:00 2001 From: zwitschi Date: Tue, 28 Oct 2025 08:52:37 +0100 Subject: [PATCH 31/31] fix: Update pydantic version constraint in requirements.txt --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index 1c46aea..0f27fee 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,5 @@ fastapi +pydantic>=2.0,<3.0 uvicorn sqlalchemy psycopg2-binary