Compare commits
111 Commits
b6511e5273
...
v2
| Author | SHA1 | Date | |
|---|---|---|---|
| 958c165721 | |||
| 6e835c83eb | |||
| 75924fca84 | |||
| ac9ffddbde | |||
| 4e5a4c645d | |||
| e9678b6736 | |||
| e5e346b26a | |||
| b0e623d68e | |||
| 30dbc13fae | |||
| 31b9a1058a | |||
| bcd993d57c | |||
| 1262a4a63f | |||
| fb6816de00 | |||
| 4d0e1a9989 | |||
| ed8e05147c | |||
| 522b1e4105 | |||
| 4f00bf0d3c | |||
| 3551b0356d | |||
| 521a8abc2d | |||
| 1feae7ff85 | |||
| 1240b08740 | |||
| d9fd82b2e3 | |||
| 6c1570a254 | |||
| b1a6df9f90 | |||
| 6d496a599e | |||
| 1199813da0 | |||
| acf6f50bbd | |||
| ad306bd0aa | |||
| ed4187970c | |||
| 0fbe9f543e | |||
| 80825c2c5d | |||
| 44a3bfc1bf | |||
| 1f892ebdbb | |||
| bcdc9e861e | |||
| 23523f70f1 | |||
| 8ef6724960 | |||
| 6e466a3fd2 | |||
| 9d4c807475 | |||
| 9cd555e134 | |||
| e72e297c61 | |||
| 101d9309fd | |||
| 9556f9e1f1 | |||
| 4488cacdc9 | |||
| e06a6ae068 | |||
| 3bdae3c54c | |||
| d89b09fa80 | |||
| 2214bbe64f | |||
| 5d6592d657 | |||
| 3988171b46 | |||
| 1520724cab | |||
| 014d96c105 | |||
| 55fa1f56c1 | |||
| 53eacc352e | |||
| 2bfa498624 | |||
| 4cfc5d9ffa | |||
| ce7f4aa776 | |||
| e0497f58f0 | |||
| 60410fd71d | |||
| f55c77312d | |||
| 63ec4a6953 | |||
| b0ff79ae9c | |||
| 0670d05722 | |||
| 0694d4ec4b | |||
| ce9c174b53 | |||
| f68321cd04 | |||
| 44ff4d0e62 | |||
| 4364927965 | |||
| 795a9f99f4 | |||
| 032e6d2681 | |||
| 51c0fcec95 | |||
| 3051f91ab0 | |||
| e2465188c2 | |||
| 43b1e53837 | |||
| 4b33a5dba3 | |||
| 5f183faa63 | |||
| 1a7581cda0 | |||
| b1a0153a8d | |||
| 609b0d779f | |||
| eaef99f0ac | |||
| 3bc124c11f | |||
| 7058eb4172 | |||
| e0fa3861a6 | |||
| ab328b1a0b | |||
| 24cb3c2f57 | |||
| 118657491c | |||
| 0f79864188 | |||
| 27262bdfa3 | |||
| 3601c2e422 | |||
| 53879a411f | |||
| 2d848c2e09 | |||
| dad862e48e | |||
| 400f85c907 | |||
| 7f5ed6a42d | |||
| 053da332ac | |||
| 02da881d3e | |||
| c39dde3198 | |||
| faea6777a0 | |||
| d36611606d | |||
| 191500aeb7 | |||
| 61b42b3041 | |||
| 8bf46b80c8 | |||
| c69f933684 | |||
| c6fdc2d923 | |||
| dc3ebfbba5 | |||
| 32a96a27c5 | |||
| 203a5d08f2 | |||
| c6a0eb2588 | |||
| d807a50f77 | |||
| 22ddfb671d | |||
| 971b4a19ea | |||
| 5b1278cbea |
25
.env.development
Normal file
25
.env.development
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
# Development Environment Configuration
|
||||||
|
ENVIRONMENT=development
|
||||||
|
DEBUG=true
|
||||||
|
LOG_LEVEL=DEBUG
|
||||||
|
|
||||||
|
# Database Configuration
|
||||||
|
DATABASE_HOST=postgres
|
||||||
|
DATABASE_PORT=5432
|
||||||
|
DATABASE_USER=calminer
|
||||||
|
DATABASE_PASSWORD=calminer_password
|
||||||
|
DATABASE_NAME=calminer_db
|
||||||
|
DATABASE_DRIVER=postgresql
|
||||||
|
|
||||||
|
# Application Settings
|
||||||
|
CALMINER_EXPORT_MAX_ROWS=1000
|
||||||
|
CALMINER_IMPORT_MAX_ROWS=10000
|
||||||
|
CALMINER_EXPORT_METADATA=true
|
||||||
|
CALMINER_IMPORT_STAGING_TTL=300
|
||||||
|
|
||||||
|
# Admin Seeding (for development)
|
||||||
|
CALMINER_SEED_ADMIN_EMAIL=admin@calminer.local
|
||||||
|
CALMINER_SEED_ADMIN_USERNAME=admin
|
||||||
|
CALMINER_SEED_ADMIN_PASSWORD=ChangeMe123!
|
||||||
|
CALMINER_SEED_ADMIN_ROLES=admin
|
||||||
|
CALMINER_SEED_FORCE=false
|
||||||
12
.env.example
12
.env.example
@@ -10,5 +10,13 @@ DATABASE_NAME=calminer
|
|||||||
# Optional: set a schema (comma-separated for multiple entries)
|
# Optional: set a schema (comma-separated for multiple entries)
|
||||||
# DATABASE_SCHEMA=public
|
# DATABASE_SCHEMA=public
|
||||||
|
|
||||||
# Legacy fallback (still supported, but granular settings are preferred)
|
# Default administrative credentials are provided at deployment time through environment variables
|
||||||
# DATABASE_URL=postgresql://<user>:<password>@localhost:5432/calminer
|
# (`CALMINER_SEED_ADMIN_EMAIL`, `CALMINER_SEED_ADMIN_USERNAME`, `CALMINER_SEED_ADMIN_PASSWORD`, `CALMINER_SEED_ADMIN_ROLES`).
|
||||||
|
# These values are consumed by a shared bootstrap helper on application startup, ensuring mandatory roles and the administrator account exist before any user interaction.
|
||||||
|
CALMINER_SEED_ADMIN_EMAIL=<email>
|
||||||
|
CALMINER_SEED_ADMIN_USERNAME=<username>
|
||||||
|
CALMINER_SEED_ADMIN_PASSWORD=<password>
|
||||||
|
CALMINER_SEED_ADMIN_ROLES=<roles>
|
||||||
|
# Operators can request a managed credential reset by setting `CALMINER_SEED_FORCE=true`.
|
||||||
|
# On the next startup the helper rotates the admin password and reapplies role assignments, so downstream environments must update stored secrets immediately after the reset.
|
||||||
|
# CALMINER_SEED_FORCE=false
|
||||||
25
.env.production
Normal file
25
.env.production
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
# Production Environment Configuration
|
||||||
|
ENVIRONMENT=production
|
||||||
|
DEBUG=false
|
||||||
|
LOG_LEVEL=WARNING
|
||||||
|
|
||||||
|
# Database Configuration (MUST be set externally - no defaults)
|
||||||
|
DATABASE_HOST=
|
||||||
|
DATABASE_PORT=5432
|
||||||
|
DATABASE_USER=
|
||||||
|
DATABASE_PASSWORD=
|
||||||
|
DATABASE_NAME=
|
||||||
|
DATABASE_DRIVER=postgresql
|
||||||
|
|
||||||
|
# Application Settings
|
||||||
|
CALMINER_EXPORT_MAX_ROWS=100000
|
||||||
|
CALMINER_IMPORT_MAX_ROWS=100000
|
||||||
|
CALMINER_EXPORT_METADATA=true
|
||||||
|
CALMINER_IMPORT_STAGING_TTL=3600
|
||||||
|
|
||||||
|
# Admin Seeding (for production - set strong password)
|
||||||
|
CALMINER_SEED_ADMIN_EMAIL=admin@calminer.com
|
||||||
|
CALMINER_SEED_ADMIN_USERNAME=admin
|
||||||
|
CALMINER_SEED_ADMIN_PASSWORD=CHANGE_THIS_VERY_STRONG_PASSWORD
|
||||||
|
CALMINER_SEED_ADMIN_ROLES=admin
|
||||||
|
CALMINER_SEED_FORCE=false
|
||||||
25
.env.staging
Normal file
25
.env.staging
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
# Staging Environment Configuration
|
||||||
|
ENVIRONMENT=staging
|
||||||
|
DEBUG=false
|
||||||
|
LOG_LEVEL=INFO
|
||||||
|
|
||||||
|
# Database Configuration (override with actual staging values)
|
||||||
|
DATABASE_HOST=postgres
|
||||||
|
DATABASE_PORT=5432
|
||||||
|
DATABASE_USER=calminer_staging
|
||||||
|
DATABASE_PASSWORD=CHANGE_THIS_STRONG_PASSWORD
|
||||||
|
DATABASE_NAME=calminer_staging_db
|
||||||
|
DATABASE_DRIVER=postgresql
|
||||||
|
|
||||||
|
# Application Settings
|
||||||
|
CALMINER_EXPORT_MAX_ROWS=50000
|
||||||
|
CALMINER_IMPORT_MAX_ROWS=50000
|
||||||
|
CALMINER_EXPORT_METADATA=true
|
||||||
|
CALMINER_IMPORT_STAGING_TTL=600
|
||||||
|
|
||||||
|
# Admin Seeding (for staging)
|
||||||
|
CALMINER_SEED_ADMIN_EMAIL=admin@staging.calminer.com
|
||||||
|
CALMINER_SEED_ADMIN_USERNAME=admin
|
||||||
|
CALMINER_SEED_ADMIN_PASSWORD=CHANGE_THIS_STRONG_PASSWORD
|
||||||
|
CALMINER_SEED_ADMIN_ROLES=admin
|
||||||
|
CALMINER_SEED_FORCE=false
|
||||||
3
.gitattributes
vendored
Normal file
3
.gitattributes
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
* text=auto
|
||||||
|
|
||||||
|
Dockerfile text eol=lf
|
||||||
150
.gitea/workflows/ci-build.yml
Normal file
150
.gitea/workflows/ci-build.yml
Normal file
@@ -0,0 +1,150 @@
|
|||||||
|
name: CI - Build
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
DEFAULT_BRANCH: main
|
||||||
|
REGISTRY_URL: ${{ secrets.REGISTRY_URL }}
|
||||||
|
REGISTRY_USERNAME: ${{ secrets.REGISTRY_USERNAME }}
|
||||||
|
REGISTRY_PASSWORD: ${{ secrets.REGISTRY_PASSWORD }}
|
||||||
|
REGISTRY_CONTAINER_NAME: calminer
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Collect workflow metadata
|
||||||
|
id: meta
|
||||||
|
shell: bash
|
||||||
|
env:
|
||||||
|
DEFAULT_BRANCH: ${{ env.DEFAULT_BRANCH }}
|
||||||
|
run: |
|
||||||
|
ref_name="${GITHUB_REF_NAME:-${GITHUB_REF##*/}}"
|
||||||
|
event_name="${GITHUB_EVENT_NAME:-}"
|
||||||
|
sha="${GITHUB_SHA:-}"
|
||||||
|
|
||||||
|
if [ "$ref_name" = "${DEFAULT_BRANCH:-main}" ]; then
|
||||||
|
echo "on_default=true" >> "$GITHUB_OUTPUT"
|
||||||
|
else
|
||||||
|
echo "on_default=false" >> "$GITHUB_OUTPUT"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "ref_name=$ref_name" >> "$GITHUB_OUTPUT"
|
||||||
|
echo "event_name=$event_name" >> "$GITHUB_OUTPUT"
|
||||||
|
echo "sha=$sha" >> "$GITHUB_OUTPUT"
|
||||||
|
|
||||||
|
- name: Set up QEMU and Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
|
- name: Log in to gitea registry
|
||||||
|
if: ${{ steps.meta.outputs.on_default == 'true' }}
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
continue-on-error: true
|
||||||
|
with:
|
||||||
|
registry: ${{ env.REGISTRY_URL }}
|
||||||
|
username: ${{ env.REGISTRY_USERNAME }}
|
||||||
|
password: ${{ env.REGISTRY_PASSWORD }}
|
||||||
|
|
||||||
|
- name: Build image
|
||||||
|
id: build-image
|
||||||
|
env:
|
||||||
|
REGISTRY_URL: ${{ env.REGISTRY_URL }}
|
||||||
|
REGISTRY_CONTAINER_NAME: ${{ env.REGISTRY_CONTAINER_NAME }}
|
||||||
|
SHA_TAG: ${{ steps.meta.outputs.sha }}
|
||||||
|
PUSH_IMAGE: ${{ steps.meta.outputs.on_default == 'true' && steps.meta.outputs.event_name != 'pull_request' && env.REGISTRY_URL != '' && env.REGISTRY_USERNAME != '' && env.REGISTRY_PASSWORD != '' }}
|
||||||
|
run: |
|
||||||
|
set -eo pipefail
|
||||||
|
LOG_FILE=build.log
|
||||||
|
if [ "${PUSH_IMAGE}" = "true" ]; then
|
||||||
|
docker buildx build \
|
||||||
|
--push \
|
||||||
|
--tag "${REGISTRY_URL}/allucanget/${REGISTRY_CONTAINER_NAME}:latest" \
|
||||||
|
--tag "${REGISTRY_URL}/allucanget/${REGISTRY_CONTAINER_NAME}:${SHA_TAG}" \
|
||||||
|
--file Dockerfile \
|
||||||
|
. 2>&1 | tee "${LOG_FILE}"
|
||||||
|
else
|
||||||
|
docker buildx build \
|
||||||
|
--load \
|
||||||
|
--tag "${REGISTRY_CONTAINER_NAME}:ci" \
|
||||||
|
--file Dockerfile \
|
||||||
|
. 2>&1 | tee "${LOG_FILE}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Upload docker build logs
|
||||||
|
if: failure()
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: docker-build-logs
|
||||||
|
path: build.log
|
||||||
|
|
||||||
|
deploy:
|
||||||
|
needs: build
|
||||||
|
if: github.ref == 'refs/heads/main' && github.event_name != 'pull_request'
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
REGISTRY_URL: ${{ secrets.REGISTRY_URL }}
|
||||||
|
REGISTRY_CONTAINER_NAME: calminer
|
||||||
|
KUBE_CONFIG: ${{ secrets.KUBE_CONFIG }}
|
||||||
|
STAGING_KUBE_CONFIG: ${{ secrets.STAGING_KUBE_CONFIG }}
|
||||||
|
PROD_KUBE_CONFIG: ${{ secrets.PROD_KUBE_CONFIG }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up kubectl for staging
|
||||||
|
if: github.event.head_commit && contains(github.event.head_commit.message, '[deploy staging]')
|
||||||
|
uses: azure/k8s-set-context@v3
|
||||||
|
with:
|
||||||
|
method: kubeconfig
|
||||||
|
kubeconfig: ${{ env.STAGING_KUBE_CONFIG }}
|
||||||
|
|
||||||
|
- name: Set up kubectl for production
|
||||||
|
if: github.event.head_commit && contains(github.event.head_commit.message, '[deploy production]')
|
||||||
|
uses: azure/k8s-set-context@v3
|
||||||
|
with:
|
||||||
|
method: kubeconfig
|
||||||
|
kubeconfig: ${{ env.PROD_KUBE_CONFIG }}
|
||||||
|
|
||||||
|
- name: Deploy to staging
|
||||||
|
if: github.event.head_commit && contains(github.event.head_commit.message, '[deploy staging]')
|
||||||
|
run: |
|
||||||
|
kubectl set image deployment/calminer-app calminer=${REGISTRY_URL}/allucanget/${REGISTRY_CONTAINER_NAME}:latest
|
||||||
|
kubectl apply -f k8s/configmap.yaml
|
||||||
|
kubectl apply -f k8s/secret.yaml
|
||||||
|
kubectl rollout status deployment/calminer-app
|
||||||
|
|
||||||
|
- name: Collect staging deployment logs
|
||||||
|
if: github.event.head_commit && contains(github.event.head_commit.message, '[deploy staging]')
|
||||||
|
run: |
|
||||||
|
mkdir -p logs/deployment/staging
|
||||||
|
kubectl get pods -o wide > logs/deployment/staging/pods.txt
|
||||||
|
kubectl get deployment calminer-app -o yaml > logs/deployment/staging/deployment.yaml
|
||||||
|
kubectl logs deployment/calminer-app --all-containers=true --tail=500 > logs/deployment/staging/calminer-app.log
|
||||||
|
|
||||||
|
- name: Deploy to production
|
||||||
|
if: github.event.head_commit && contains(github.event.head_commit.message, '[deploy production]')
|
||||||
|
run: |
|
||||||
|
kubectl set image deployment/calminer-app calminer=${REGISTRY_URL}/allucanget/${REGISTRY_CONTAINER_NAME}:latest
|
||||||
|
kubectl apply -f k8s/configmap.yaml
|
||||||
|
kubectl apply -f k8s/secret.yaml
|
||||||
|
kubectl rollout status deployment/calminer-app
|
||||||
|
|
||||||
|
- name: Collect production deployment logs
|
||||||
|
if: github.event.head_commit && contains(github.event.head_commit.message, '[deploy production]')
|
||||||
|
run: |
|
||||||
|
mkdir -p logs/deployment/production
|
||||||
|
kubectl get pods -o wide > logs/deployment/production/pods.txt
|
||||||
|
kubectl get deployment calminer-app -o yaml > logs/deployment/production/deployment.yaml
|
||||||
|
kubectl logs deployment/calminer-app --all-containers=true --tail=500 > logs/deployment/production/calminer-app.log
|
||||||
|
|
||||||
|
- name: Upload deployment logs
|
||||||
|
if: always()
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: deployment-logs
|
||||||
|
path: logs/deployment
|
||||||
|
if-no-files-found: ignore
|
||||||
44
.gitea/workflows/ci-lint.yml
Normal file
44
.gitea/workflows/ci-lint.yml
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
name: CI - Lint
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
lint:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
APT_CACHER_NG: http://192.168.88.14:3142
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: "3.12"
|
||||||
|
|
||||||
|
- name: Configure apt proxy
|
||||||
|
run: |
|
||||||
|
if [ -n "${APT_CACHER_NG}" ]; then
|
||||||
|
echo "Acquire::http::Proxy \"${APT_CACHER_NG}\";" | tee /etc/apt/apt.conf.d/01apt-cacher-ng
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Install system packages
|
||||||
|
run: |
|
||||||
|
apt-get update
|
||||||
|
apt-get install -y build-essential libpq-dev
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
python -m pip install --upgrade pip
|
||||||
|
pip install -r requirements.txt
|
||||||
|
pip install -r requirements-test.txt
|
||||||
|
|
||||||
|
- name: Run Ruff
|
||||||
|
run: ruff check .
|
||||||
|
|
||||||
|
- name: Run Black
|
||||||
|
run: black --check .
|
||||||
|
|
||||||
|
- name: Run Bandit
|
||||||
|
run: bandit -c pyproject.toml -r tests
|
||||||
73
.gitea/workflows/ci-test.yml
Normal file
73
.gitea/workflows/ci-test.yml
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
name: CI - Test
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
APT_CACHER_NG: http://192.168.88.14:3142
|
||||||
|
DB_DRIVER: postgresql+psycopg2
|
||||||
|
DB_HOST: 192.168.88.35
|
||||||
|
DB_NAME: calminer_test
|
||||||
|
DB_USER: calminer
|
||||||
|
DB_PASSWORD: calminer_password
|
||||||
|
services:
|
||||||
|
postgres:
|
||||||
|
image: postgres:17
|
||||||
|
env:
|
||||||
|
POSTGRES_USER: ${{ env.DB_USER }}
|
||||||
|
POSTGRES_PASSWORD: ${{ env.DB_PASSWORD }}
|
||||||
|
POSTGRES_DB: ${{ env.DB_NAME }}
|
||||||
|
options: >-
|
||||||
|
--health-cmd pg_isready
|
||||||
|
--health-interval 10s
|
||||||
|
--health-timeout 5s
|
||||||
|
--health-retries 5
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: "3.12"
|
||||||
|
|
||||||
|
- name: Configure apt proxy
|
||||||
|
run: |
|
||||||
|
if [ -n "${APT_CACHER_NG}" ]; then
|
||||||
|
echo "Acquire::http::Proxy \"${APT_CACHER_NG}\";" | tee /etc/apt/apt.conf.d/01apt-cacher-ng
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Install system packages
|
||||||
|
run: |
|
||||||
|
apt-get update
|
||||||
|
apt-get install -y build-essential libpq-dev
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
python -m pip install --upgrade pip
|
||||||
|
pip install -r requirements.txt
|
||||||
|
pip install -r requirements-test.txt
|
||||||
|
|
||||||
|
- name: Run tests
|
||||||
|
env:
|
||||||
|
DATABASE_DRIVER: ${{ env.DB_DRIVER }}
|
||||||
|
DATABASE_HOST: postgres
|
||||||
|
DATABASE_PORT: 5432
|
||||||
|
DATABASE_USER: ${{ env.DB_USER }}
|
||||||
|
DATABASE_PASSWORD: ${{ env.DB_PASSWORD }}
|
||||||
|
DATABASE_NAME: ${{ env.DB_NAME }}
|
||||||
|
run: |
|
||||||
|
pytest --cov=. --cov-report=term-missing --cov-report=xml --cov-fail-under=80 --junitxml=pytest-report.xml
|
||||||
|
|
||||||
|
- name: Upload test artifacts
|
||||||
|
if: always()
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: test-artifacts
|
||||||
|
path: |
|
||||||
|
coverage.xml
|
||||||
|
pytest-report.xml
|
||||||
30
.gitea/workflows/ci.yml
Normal file
30
.gitea/workflows/ci.yml
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
name: CI
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
- develop
|
||||||
|
- v2
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
- develop
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
lint:
|
||||||
|
uses: ./.gitea/workflows/ci-lint.yml
|
||||||
|
secrets: inherit
|
||||||
|
|
||||||
|
test:
|
||||||
|
needs: lint
|
||||||
|
uses: ./.gitea/workflows/ci-test.yml
|
||||||
|
secrets: inherit
|
||||||
|
|
||||||
|
build:
|
||||||
|
needs:
|
||||||
|
- lint
|
||||||
|
- test
|
||||||
|
uses: ./.gitea/workflows/ci-build.yml
|
||||||
|
secrets: inherit
|
||||||
@@ -2,12 +2,66 @@ name: CI
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches: [main, develop]
|
branches: [main, develop, v2]
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: [main, develop]
|
branches: [main, develop]
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
lint:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
APT_CACHER_NG: http://192.168.88.14:3142
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: "3.12"
|
||||||
|
|
||||||
|
# - name: Get pip cache dir
|
||||||
|
# id: pip-cache
|
||||||
|
# run: |
|
||||||
|
# echo "path=$(pip cache dir)" >> $GITEA_OUTPUT
|
||||||
|
# echo "Pip cache dir: $(pip cache dir)"
|
||||||
|
|
||||||
|
# - name: Cache pip dependencies
|
||||||
|
# uses: actions/cache@v4
|
||||||
|
# with:
|
||||||
|
# path: ${{ steps.pip-cache.outputs.path }}
|
||||||
|
# key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt', 'requirements-test.txt', 'pyproject.toml') }}
|
||||||
|
# restore-keys: |
|
||||||
|
# ${{ runner.os }}-pip-
|
||||||
|
|
||||||
|
- name: Configure apt proxy
|
||||||
|
run: |
|
||||||
|
if [ -n "${APT_CACHER_NG}" ]; then
|
||||||
|
echo "Acquire::http::Proxy \"${APT_CACHER_NG}\";" | tee /etc/apt/apt.conf.d/01apt-cacher-ng
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Install system packages
|
||||||
|
run: |
|
||||||
|
apt-get update
|
||||||
|
apt-get install -y build-essential libpq-dev
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
python -m pip install --upgrade pip
|
||||||
|
pip install -r requirements.txt
|
||||||
|
pip install -r requirements-test.txt
|
||||||
|
|
||||||
|
- name: Run Ruff
|
||||||
|
run: ruff check .
|
||||||
|
|
||||||
|
- name: Run Black
|
||||||
|
run: black --check .
|
||||||
|
|
||||||
|
- name: Run Bandit
|
||||||
|
run: bandit -c pyproject.toml -r tests
|
||||||
|
|
||||||
test:
|
test:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: lint
|
||||||
env:
|
env:
|
||||||
APT_CACHER_NG: http://192.168.88.14:3142
|
APT_CACHER_NG: http://192.168.88.14:3142
|
||||||
DB_DRIVER: postgresql+psycopg2
|
DB_DRIVER: postgresql+psycopg2
|
||||||
@@ -15,8 +69,6 @@ jobs:
|
|||||||
DB_NAME: calminer_test
|
DB_NAME: calminer_test
|
||||||
DB_USER: calminer
|
DB_USER: calminer
|
||||||
DB_PASSWORD: calminer_password
|
DB_PASSWORD: calminer_password
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
postgres:
|
postgres:
|
||||||
image: postgres:17
|
image: postgres:17
|
||||||
@@ -29,35 +81,38 @@ jobs:
|
|||||||
--health-interval 10s
|
--health-interval 10s
|
||||||
--health-timeout 5s
|
--health-timeout 5s
|
||||||
--health-retries 5
|
--health-retries 5
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: '3.11'
|
python-version: "3.12"
|
||||||
|
|
||||||
- name: Get pip cache dir
|
# - name: Get pip cache dir
|
||||||
id: pip-cache
|
# id: pip-cache
|
||||||
|
# run: |
|
||||||
|
# echo "path=$(pip cache dir)" >> $GITEA_OUTPUT
|
||||||
|
# echo "Pip cache dir: $(pip cache dir)"
|
||||||
|
|
||||||
|
# - name: Cache pip dependencies
|
||||||
|
# uses: actions/cache@v4
|
||||||
|
# with:
|
||||||
|
# path: ${{ steps.pip-cache.outputs.path }}
|
||||||
|
# key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt', 'requirements-test.txt', 'pyproject.toml') }}
|
||||||
|
# restore-keys: |
|
||||||
|
# ${{ runner.os }}-pip-
|
||||||
|
|
||||||
|
- name: Configure apt proxy
|
||||||
run: |
|
run: |
|
||||||
echo "path=$(pip cache dir)" >> $GITEA_OUTPUT
|
if [ -n "${APT_CACHER_NG}" ]; then
|
||||||
|
echo "Acquire::http::Proxy \"${APT_CACHER_NG}\";" | tee /etc/apt/apt.conf.d/01apt-cacher-ng
|
||||||
|
fi
|
||||||
|
|
||||||
- name: Cache pip dependencies
|
- name: Install system packages
|
||||||
uses: actions/cache@v4
|
run: |
|
||||||
with:
|
|
||||||
path: ${{ steps.pip-cache.outputs.path }}
|
|
||||||
key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt', 'requirements-test.txt') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-pip-
|
|
||||||
|
|
||||||
- name: Update apt-cacher-ng config
|
|
||||||
run: |-
|
|
||||||
echo 'Acquire::http::Proxy "{{ env.APT_CACHER_NG }}";' | tee /etc/apt/apt.conf.d/01apt-cacher-ng
|
|
||||||
apt-get update
|
apt-get update
|
||||||
|
apt-get install -y build-essential libpq-dev
|
||||||
- name: Update system packages
|
|
||||||
run: apt-get upgrade -y
|
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
@@ -65,12 +120,6 @@ jobs:
|
|||||||
pip install -r requirements.txt
|
pip install -r requirements.txt
|
||||||
pip install -r requirements-test.txt
|
pip install -r requirements-test.txt
|
||||||
|
|
||||||
- name: Install Playwright system dependencies
|
|
||||||
run: playwright install-deps
|
|
||||||
|
|
||||||
- name: Install Playwright browsers
|
|
||||||
run: playwright install
|
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
env:
|
env:
|
||||||
DATABASE_DRIVER: ${{ env.DB_DRIVER }}
|
DATABASE_DRIVER: ${{ env.DB_DRIVER }}
|
||||||
@@ -80,15 +129,22 @@ jobs:
|
|||||||
DATABASE_PASSWORD: ${{ env.DB_PASSWORD }}
|
DATABASE_PASSWORD: ${{ env.DB_PASSWORD }}
|
||||||
DATABASE_NAME: ${{ env.DB_NAME }}
|
DATABASE_NAME: ${{ env.DB_NAME }}
|
||||||
run: |
|
run: |
|
||||||
pytest tests/ --cov=.
|
pytest --cov=. --cov-report=term-missing --cov-report=xml --cov-fail-under=80 --junitxml=pytest-report.xml
|
||||||
|
|
||||||
- name: Build Docker image
|
- name: Upload test artifacts
|
||||||
run: |
|
if: always()
|
||||||
docker build -t calminer .
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: test-artifacts
|
||||||
|
path: |
|
||||||
|
coverage.xml
|
||||||
|
pytest-report.xml
|
||||||
|
|
||||||
build:
|
build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: test
|
needs:
|
||||||
|
- lint
|
||||||
|
- test
|
||||||
env:
|
env:
|
||||||
DEFAULT_BRANCH: main
|
DEFAULT_BRANCH: main
|
||||||
REGISTRY_URL: ${{ secrets.REGISTRY_URL }}
|
REGISTRY_URL: ${{ secrets.REGISTRY_URL }}
|
||||||
@@ -129,12 +185,108 @@ jobs:
|
|||||||
username: ${{ env.REGISTRY_USERNAME }}
|
username: ${{ env.REGISTRY_USERNAME }}
|
||||||
password: ${{ env.REGISTRY_PASSWORD }}
|
password: ${{ env.REGISTRY_PASSWORD }}
|
||||||
|
|
||||||
- name: Build and push image
|
- name: Build image
|
||||||
uses: docker/build-push-action@v5
|
id: build-image
|
||||||
|
env:
|
||||||
|
REGISTRY_URL: ${{ env.REGISTRY_URL }}
|
||||||
|
REGISTRY_CONTAINER_NAME: ${{ env.REGISTRY_CONTAINER_NAME }}
|
||||||
|
SHA_TAG: ${{ steps.meta.outputs.sha }}
|
||||||
|
PUSH_IMAGE: ${{ steps.meta.outputs.on_default == 'true' && steps.meta.outputs.event_name != 'pull_request' && env.REGISTRY_URL != '' && env.REGISTRY_USERNAME != '' && env.REGISTRY_PASSWORD != '' }}
|
||||||
|
run: |
|
||||||
|
set -eo pipefail
|
||||||
|
LOG_FILE=build.log
|
||||||
|
if [ "${PUSH_IMAGE}" = "true" ]; then
|
||||||
|
docker buildx build \
|
||||||
|
--push \
|
||||||
|
--tag "${REGISTRY_URL}/allucanget/${REGISTRY_CONTAINER_NAME}:latest" \
|
||||||
|
--tag "${REGISTRY_URL}/allucanget/${REGISTRY_CONTAINER_NAME}:${SHA_TAG}" \
|
||||||
|
--file Dockerfile \
|
||||||
|
. 2>&1 | tee "${LOG_FILE}"
|
||||||
|
else
|
||||||
|
docker buildx build \
|
||||||
|
--load \
|
||||||
|
--tag "${REGISTRY_CONTAINER_NAME}:ci" \
|
||||||
|
--file Dockerfile \
|
||||||
|
. 2>&1 | tee "${LOG_FILE}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Upload docker build logs
|
||||||
|
if: failure()
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
context: .
|
name: docker-build-logs
|
||||||
file: Dockerfile
|
path: build.log
|
||||||
push: ${{ steps.meta.outputs.on_default == 'true' && steps.meta.outputs.event_name != 'pull_request' && (env.REGISTRY_URL != '' && env.REGISTRY_USERNAME != '' && env.REGISTRY_PASSWORD != '') }}
|
|
||||||
tags: |
|
deploy:
|
||||||
${{ env.REGISTRY_URL }}/allucanget/${{ env.REGISTRY_CONTAINER_NAME }}:latest
|
runs-on: ubuntu-latest
|
||||||
${{ env.REGISTRY_URL }}/allucanget/${{ env.REGISTRY_CONTAINER_NAME }}:${{ steps.meta.outputs.sha }}
|
needs: build
|
||||||
|
if: github.ref == 'refs/heads/main' && github.event_name != 'pull_request'
|
||||||
|
env:
|
||||||
|
REGISTRY_URL: ${{ secrets.REGISTRY_URL }}
|
||||||
|
REGISTRY_CONTAINER_NAME: calminer
|
||||||
|
KUBE_CONFIG: ${{ secrets.KUBE_CONFIG }}
|
||||||
|
STAGING_KUBE_CONFIG: ${{ secrets.STAGING_KUBE_CONFIG }}
|
||||||
|
PROD_KUBE_CONFIG: ${{ secrets.PROD_KUBE_CONFIG }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up kubectl for staging
|
||||||
|
if: github.ref == 'refs/heads/main' && contains(github.event.head_commit.message, '[deploy staging]')
|
||||||
|
uses: azure/k8s-set-context@v3
|
||||||
|
with:
|
||||||
|
method: kubeconfig
|
||||||
|
kubeconfig: ${{ env.STAGING_KUBE_CONFIG }}
|
||||||
|
|
||||||
|
- name: Set up kubectl for production
|
||||||
|
if: github.ref == 'refs/heads/main' && contains(github.event.head_commit.message, '[deploy production]')
|
||||||
|
uses: azure/k8s-set-context@v3
|
||||||
|
with:
|
||||||
|
method: kubeconfig
|
||||||
|
kubeconfig: ${{ env.PROD_KUBE_CONFIG }}
|
||||||
|
|
||||||
|
- name: Deploy to staging
|
||||||
|
if: github.ref == 'refs/heads/main' && contains(github.event.head_commit.message, '[deploy staging]')
|
||||||
|
run: |
|
||||||
|
# Update image in deployment
|
||||||
|
kubectl set image deployment/calminer-app calminer=${REGISTRY_URL}/allucanget/${REGISTRY_CONTAINER_NAME}:latest
|
||||||
|
# Apply any config changes
|
||||||
|
kubectl apply -f k8s/configmap.yaml
|
||||||
|
kubectl apply -f k8s/secret.yaml
|
||||||
|
# Wait for rollout
|
||||||
|
kubectl rollout status deployment/calminer-app
|
||||||
|
|
||||||
|
- name: Collect staging deployment logs
|
||||||
|
if: github.ref == 'refs/heads/main' && contains(github.event.head_commit.message, '[deploy staging]')
|
||||||
|
run: |
|
||||||
|
mkdir -p logs/deployment/staging
|
||||||
|
kubectl get pods -o wide > logs/deployment/staging/pods.txt
|
||||||
|
kubectl get deployment calminer-app -o yaml > logs/deployment/staging/deployment.yaml
|
||||||
|
kubectl logs deployment/calminer-app --all-containers=true --tail=500 > logs/deployment/staging/calminer-app.log
|
||||||
|
|
||||||
|
- name: Deploy to production
|
||||||
|
if: github.ref == 'refs/heads/main' && contains(github.event.head_commit.message, '[deploy production]')
|
||||||
|
run: |
|
||||||
|
# Update image in deployment
|
||||||
|
kubectl set image deployment/calminer-app calminer=${REGISTRY_URL}/allucanget/${REGISTRY_CONTAINER_NAME}:latest
|
||||||
|
# Apply any config changes
|
||||||
|
kubectl apply -f k8s/configmap.yaml
|
||||||
|
kubectl apply -f k8s/secret.yaml
|
||||||
|
# Wait for rollout
|
||||||
|
kubectl rollout status deployment/calminer-app
|
||||||
|
|
||||||
|
- name: Collect production deployment logs
|
||||||
|
if: github.ref == 'refs/heads/main' && contains(github.event.head_commit.message, '[deploy production]')
|
||||||
|
run: |
|
||||||
|
mkdir -p logs/deployment/production
|
||||||
|
kubectl get pods -o wide > logs/deployment/production/pods.txt
|
||||||
|
kubectl get deployment calminer-app -o yaml > logs/deployment/production/deployment.yaml
|
||||||
|
kubectl logs deployment/calminer-app --all-containers=true --tail=500 > logs/deployment/production/calminer-app.log
|
||||||
|
|
||||||
|
- name: Upload deployment logs
|
||||||
|
if: always()
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: deployment-logs
|
||||||
|
path: logs/deployment
|
||||||
|
if-no-files-found: ignore
|
||||||
|
|||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -17,6 +17,7 @@ env/
|
|||||||
# environment variables
|
# environment variables
|
||||||
.env
|
.env
|
||||||
*.env
|
*.env
|
||||||
|
.env.*
|
||||||
# except example files
|
# except example files
|
||||||
!config/*.env.example
|
!config/*.env.example
|
||||||
|
|
||||||
@@ -46,8 +47,10 @@ htmlcov/
|
|||||||
logs/
|
logs/
|
||||||
|
|
||||||
# SQLite database
|
# SQLite database
|
||||||
|
data/
|
||||||
*.sqlite3
|
*.sqlite3
|
||||||
test*.db
|
test*.db
|
||||||
|
local*.db
|
||||||
|
|
||||||
# Act runner files
|
# Act runner files
|
||||||
.runner
|
.runner
|
||||||
|
|||||||
13
.pre-commit-config.yaml
Normal file
13
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
repos:
|
||||||
|
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||||
|
rev: v0.6.1
|
||||||
|
hooks:
|
||||||
|
- id: ruff
|
||||||
|
- repo: https://github.com/psf/black-pre-commit-mirror
|
||||||
|
rev: 24.8.0
|
||||||
|
hooks:
|
||||||
|
- id: black
|
||||||
|
- repo: https://github.com/PyCQA/bandit
|
||||||
|
rev: 1.7.9
|
||||||
|
hooks:
|
||||||
|
- id: bandit
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
{
|
|
||||||
"semi": true,
|
|
||||||
"singleQuote": true,
|
|
||||||
"trailingComma": "es5",
|
|
||||||
"printWidth": 80,
|
|
||||||
"tabWidth": 2,
|
|
||||||
"useTabs": false
|
|
||||||
}
|
|
||||||
46
Dockerfile
46
Dockerfile
@@ -41,8 +41,25 @@ if url:
|
|||||||
finally:
|
finally:
|
||||||
sock.close()
|
sock.close()
|
||||||
PY
|
PY
|
||||||
apt-get update
|
APT_PROXY_CONFIG=/etc/apt/apt.conf.d/01proxy
|
||||||
apt-get install -y --no-install-recommends build-essential gcc libpq-dev
|
|
||||||
|
apt_update_with_fallback() {
|
||||||
|
if ! apt-get update; then
|
||||||
|
rm -f "$APT_PROXY_CONFIG"
|
||||||
|
apt-get update
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
apt_install_with_fallback() {
|
||||||
|
if ! apt-get install -y --no-install-recommends "$@"; then
|
||||||
|
rm -f "$APT_PROXY_CONFIG"
|
||||||
|
apt-get update
|
||||||
|
apt-get install -y --no-install-recommends "$@"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
apt_update_with_fallback
|
||||||
|
apt_install_with_fallback build-essential gcc libpq-dev
|
||||||
pip install --upgrade pip
|
pip install --upgrade pip
|
||||||
pip wheel --no-deps --wheel-dir /wheels -r requirements.txt
|
pip wheel --no-deps --wheel-dir /wheels -r requirements.txt
|
||||||
apt-get purge -y --auto-remove build-essential gcc
|
apt-get purge -y --auto-remove build-essential gcc
|
||||||
@@ -88,8 +105,25 @@ if url:
|
|||||||
finally:
|
finally:
|
||||||
sock.close()
|
sock.close()
|
||||||
PY
|
PY
|
||||||
apt-get update
|
APT_PROXY_CONFIG=/etc/apt/apt.conf.d/01proxy
|
||||||
apt-get install -y --no-install-recommends libpq5
|
|
||||||
|
apt_update_with_fallback() {
|
||||||
|
if ! apt-get update; then
|
||||||
|
rm -f "$APT_PROXY_CONFIG"
|
||||||
|
apt-get update
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
apt_install_with_fallback() {
|
||||||
|
if ! apt-get install -y --no-install-recommends "$@"; then
|
||||||
|
rm -f "$APT_PROXY_CONFIG"
|
||||||
|
apt-get update
|
||||||
|
apt-get install -y --no-install-recommends "$@"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
apt_update_with_fallback
|
||||||
|
apt_install_with_fallback libpq5
|
||||||
rm -rf /var/lib/apt/lists/*
|
rm -rf /var/lib/apt/lists/*
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
@@ -108,4 +142,6 @@ USER appuser
|
|||||||
|
|
||||||
EXPOSE 8003
|
EXPOSE 8003
|
||||||
|
|
||||||
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8003", "--workers", "4"]
|
ENTRYPOINT ["uvicorn"]
|
||||||
|
|
||||||
|
CMD ["main:app", "--host", "0.0.0.0", "--port", "8003", "--workers", "4"]
|
||||||
|
|||||||
97
README.md
97
README.md
@@ -6,99 +6,8 @@ Focuses on ore mining operations and covering parameters such as capital and ope
|
|||||||
|
|
||||||
The system is designed to help mining companies make informed decisions by simulating various scenarios and analyzing potential outcomes based on stochastic variables.
|
The system is designed to help mining companies make informed decisions by simulating various scenarios and analyzing potential outcomes based on stochastic variables.
|
||||||
|
|
||||||
## Current Features
|
|
||||||
|
|
||||||
> [!TIP]
|
|
||||||
> TODO: Update this section to reflect the current feature set.
|
|
||||||
|
|
||||||
| Feature | Category | Description | Status |
|
|
||||||
| ---------------------- | ----------- | ------------------------------------------------------------------------------------ | ----------- |
|
|
||||||
| Scenario Management | Core | Manage multiple mining scenarios with independent parameter sets and outputs. | Done |
|
|
||||||
| Parameter Definition | Core | Define and manage various parameters for each scenario. | Done |
|
|
||||||
| Cost Tracking | Financial | Capture and analyze capital and operational expenditures. | Done |
|
|
||||||
| Consumption Tracking | Operational | Record resource consumption tied to scenarios. | Done |
|
|
||||||
| Production Output | Operational | Store and analyze production metrics such as tonnage, recovery, and revenue drivers. | Done |
|
|
||||||
| Equipment Management | Operational | Manage equipment inventories and specifications for each scenario. | Done |
|
|
||||||
| Maintenance Logging | Operational | Log maintenance events and costs associated with equipment. | Started |
|
|
||||||
| Reporting Dashboard | Analytics | View aggregated statistics and visualizations for scenario outputs. | In Progress |
|
|
||||||
| Monte Carlo Simulation | Analytics | Run stochastic simulations to assess risk and variability in outcomes. | Started |
|
|
||||||
| Application Settings | Core | Manage global application settings such as themes and currency options. | Done |
|
|
||||||
|
|
||||||
## Key UI/UX Features
|
|
||||||
|
|
||||||
- **Unified UI Shell**: Server-rendered templates extend a shared base layout with a persistent left sidebar linking scenarios, parameters, costs, consumption, production, equipment, maintenance, simulations, and reporting views.
|
|
||||||
- **Modular Frontend Scripts**: Page-specific interactions in `static/js/` modules, keeping templates lean while enabling browser caching and reuse.
|
|
||||||
|
|
||||||
## Planned Features
|
|
||||||
|
|
||||||
See [Roadmap](docs/roadmap.md) for details on planned features and enhancements.
|
|
||||||
|
|
||||||
## Documentation & quickstart
|
## Documentation & quickstart
|
||||||
|
|
||||||
This repository contains detailed developer and architecture documentation in the `docs/` folder.
|
- Detailed developer, architecture, and operations guides live in the companion [calminer-docs](../calminer-docs/) repository. Please see the [README](../calminer-docs/README.md) there for instructions.
|
||||||
|
- For a local run, create a `.env` (see `.env.example`), install requirements, then execute `python -m scripts.init_db` followed by `uvicorn main:app --reload`. The initializer is safe to rerun and seeds demo data automatically.
|
||||||
### Settings overview
|
- To wipe and recreate the schema in development, run `CALMINER_ENV=development python -m scripts.reset_db` before invoking the initializer again.
|
||||||
|
|
||||||
The Settings page (`/ui/settings`) lets administrators adjust global theme colors stored in the `application_setting` table. Changes are instantly applied across the UI. Environment variables prefixed with `CALMINER_THEME_` (for example, `CALMINER_THEME_COLOR_PRIMARY`) automatically override individual CSS variables and render as read-only in the form, ensuring deployment-time overrides take precedence while remaining visible to operators.
|
|
||||||
|
|
||||||
[Quickstart](docs/quickstart.md) contains developer quickstart, migrations, testing and current status.
|
|
||||||
|
|
||||||
Key architecture documents: see [architecture](docs/architecture/README.md) for the arc42-based architecture documentation.
|
|
||||||
|
|
||||||
For contributors: the `routes/`, `models/` and `services/` folders contain the primary application code. Tests and E2E specs are in `tests/`.
|
|
||||||
|
|
||||||
## Run with Docker
|
|
||||||
|
|
||||||
The repository ships with a multi-stage `Dockerfile` that produces a slim runtime image.
|
|
||||||
|
|
||||||
### Build container
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker build -t calminer .
|
|
||||||
```
|
|
||||||
|
|
||||||
### Push to registry
|
|
||||||
|
|
||||||
To push the image to a registry, tag it appropriately and push:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker tag calminer your-registry/calminer:latest
|
|
||||||
docker push your-registry/calminer:latest
|
|
||||||
```
|
|
||||||
|
|
||||||
### Run container
|
|
||||||
|
|
||||||
To run the container, ensure PostgreSQL is available and set environment variables:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker run -p 8000:8000 \
|
|
||||||
-e DATABASE_HOST=your-postgres-host \
|
|
||||||
-e DATABASE_PORT=5432 \
|
|
||||||
-e DATABASE_USER=calminer \
|
|
||||||
-e DATABASE_PASSWORD=your-password \
|
|
||||||
-e DATABASE_NAME=calminer_db \
|
|
||||||
calminer
|
|
||||||
```
|
|
||||||
|
|
||||||
## Development with Docker Compose
|
|
||||||
|
|
||||||
For local development, use `docker-compose.yml` which includes the app and PostgreSQL services.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Start services
|
|
||||||
docker-compose up
|
|
||||||
|
|
||||||
# Or run in background
|
|
||||||
docker-compose up -d
|
|
||||||
|
|
||||||
# Stop services
|
|
||||||
docker-compose down
|
|
||||||
```
|
|
||||||
|
|
||||||
The app will be available at `http://localhost:8000`, PostgreSQL at `localhost:5432`.
|
|
||||||
|
|
||||||
## CI/CD
|
|
||||||
|
|
||||||
CalMiner uses Gitea Actions workflows stored in `.gitea/workflows/`:
|
|
||||||
|
|
||||||
- `ci.yml`: Runs on push and PR to main/develop branches. Sets up Python, installs dependencies, runs tests with coverage, and builds the Docker image.
|
|
||||||
|
|||||||
112
changelog.md
Normal file
112
changelog.md
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
# Changelog
|
||||||
|
|
||||||
|
## 2025-11-13
|
||||||
|
|
||||||
|
- Completed the UI alignment initiative by consolidating shared form and button styles into `static/css/forms.css` and `static/css/main.css`, introducing the semantic palette in `static/css/theme-default.css`, and spot-checking key pages plus contrast reports.
|
||||||
|
- Refactored the architecture data model docs by turning `calminer-docs/architecture/08_concepts/02_data_model.md` into a concise overview that links to new detail pages covering SQLAlchemy models, navigation metadata, enumerations, Pydantic schemas, and monitoring tables.
|
||||||
|
- Nested the calculator navigation under Projects by updating `scripts/init_db.py` seeds, teaching `services/navigation.py` to resolve scenario-scoped hrefs for profitability/opex/capex, and extending sidebar coverage through `tests/integration/test_navigation_sidebar_calculations.py` plus `tests/services/test_navigation_service.py` to validate admin/viewer visibility and contextual URL generation.
|
||||||
|
- Added navigation sidebar integration coverage by extending `tests/conftest.py` with role-switching headers, seeding admin/viewer test users, and adding `tests/integration/test_navigation_sidebar.py` to assert ordered link rendering for admins, viewer filtering of admin-only entries, and anonymous rejection of the endpoint.
|
||||||
|
- Finalised the financial data import/export templates by inventorying required fields, defining CSV column specs with validation rules, drafting Excel workbook layouts, documenting end-user workflows in `calminer-docs/userguide/data_import_export.md`, and recording stakeholder review steps alongside updated TODO/DONE tracking.
|
||||||
|
- Scoped profitability calculator UI under the scenario hierarchy by adding `/calculations/projects/{project_id}/scenarios/{scenario_id}/profitability` GET/POST handlers, updating scenario templates and sidebar navigation to link to the new route, and extending `tests/test_project_scenario_routes.py` with coverage for the scenario path plus legacy redirect behaviour (module run: 14 passed).
|
||||||
|
- Extended scenario frontend regression coverage by updating `tests/test_project_scenario_routes.py` to assert project/scenario breadcrumbs and calculator navigation, normalising escaped URLs, and re-running the module tests (13 passing).
|
||||||
|
- Cleared FastAPI and Pydantic deprecation warnings by migrating `scripts/init_db.py` to `@field_validator`, replacing the `main.py` startup hook with a lifespan handler, auditing template response call signatures, confirming HTTP 422 constant usage, and re-running the full pytest suite to ensure a clean warning slate.
|
||||||
|
- Delivered the capex planner end-to-end: added scaffolded UI in `templates/scenarios/capex.html`, wired GET/POST handlers through `routes/calculations.py`, implemented calculation logic plus snapshot persistence in `services/calculations.py` and `models/capex_snapshot.py`, updated navigation links, and introduced unit tests in `tests/services/test_calculations_capex.py`.
|
||||||
|
- Updated UI navigation to surface the opex planner by adding the sidebar link in `templates/partials/sidebar_nav.html`, wiring a scenario detail action in `templates/scenarios/detail.html`.
|
||||||
|
- Completed manual validation of the Capex Planner UI flows (sidebar entry, scenario deep link, validation errors, successful calculation) with results captured in `manual_tests/capex.md`, documented snapshot verification steps, and noted the optional JSON client check for future follow-up.
|
||||||
|
- Added opex calculation unit tests in `tests/services/test_calculations_opex.py` covering success metrics, currency validation, frequency enforcement, and evaluation horizon extension.
|
||||||
|
- Documented the Opex Planner workflow in `calminer-docs/userguide/opex_planner.md`, linked it from the user guide index, extended `calminer-docs/architecture/08_concepts/02_data_model.md` with snapshot coverage, and captured the completion in `.github/instructions/DONE.md`.
|
||||||
|
- Implemented opex integration coverage in `tests/integration/test_opex_calculations.py`, exercising HTML and JSON flows, verifying snapshot persistence, and asserting currency mismatch handling for form and API submissions.
|
||||||
|
- Executed the full pytest suite with coverage (211 tests) to confirm no regressions or warnings after the opex documentation updates.
|
||||||
|
- Completed the navigation sidebar API migration by finalising the database-backed service, refactoring `templates/partials/sidebar_nav.html` to consume the endpoint, hydrating via `static/js/navigation_sidebar.js`, and updating HTML route dependencies (`routes/projects.py`, `routes/scenarios.py`, `routes/reports.py`, `routes/imports.py`, `routes/calculations.py`) to use redirect-aware guards so anonymous visitors receive login redirects instead of JSON errors (manual verification via curl across projects, scenarios, reports, and calculations pages).
|
||||||
|
|
||||||
|
## 2025-11-12
|
||||||
|
|
||||||
|
- Fixed critical 500 error in reporting dashboard by correcting route reference in reporting.html template - changed 'reports.project_list_page' to 'projects.project_list_page' to resolve NoMatchFound error when accessing /ui/reporting.
|
||||||
|
- Completed navigation validation by inventorying all sidebar navigation links, identifying missing routes for simulations, reporting, settings, themes, and currencies, created new UI routes in routes/ui.py with proper authentication guards, built corresponding templates (simulations.html, reporting.html, settings.html, theme_settings.html, currencies.html), registered the UI router in main.py, updated sidebar navigation to use route names instead of hardcoded URLs, and enhanced navigation.js to use dynamic URL resolution for proper route handling.
|
||||||
|
- Fixed critical template rendering error in sidebar_nav.html where URL objects from `request.url_for()` were being used with string methods, causing TypeError. Added `|string` filters to convert URL objects to strings for proper template rendering.
|
||||||
|
- Integrated Plotly charting for interactive visualizations in reporting templates, added chart generation methods to ReportingService (`generate_npv_comparison_chart`, `generate_distribution_histogram`), updated project summary and scenario distribution contexts to include chart JSON data, enhanced templates with chart containers and JavaScript rendering, added chart-container CSS styling, and validated all reporting tests pass.
|
||||||
|
|
||||||
|
- Completed local run verification: started application with `uvicorn main:app --reload` without errors, verified authenticated routes (/login, /, /projects/ui, /projects) load correctly with seeded data, and summarized findings for deployment pipeline readiness.
|
||||||
|
- Fixed docker-compose.override.yml command array to remove duplicate "uvicorn" entry, enabling successful container startup with uvicorn reload in development mode.
|
||||||
|
- Completed deployment pipeline verification: built Docker image without errors, validated docker-compose configuration, deployed locally with docker-compose (app and postgres containers started successfully), and confirmed application startup logs showing database bootstrap and seeded data initialization.
|
||||||
|
- Completed documentation of current data models: updated `calminer-docs/architecture/08_concepts/02_data_model.md` with comprehensive SQLAlchemy model schemas, enumerations, Pydantic API schemas, and analysis of discrepancies between models and schemas.
|
||||||
|
- Switched `models/performance_metric.py` to reuse the shared declarative base from `config.database`, clearing the SQLAlchemy 2.0 `declarative_base` deprecation warning and verifying repository tests still pass.
|
||||||
|
- Replaced the Alembic migration workflow with the idempotent Pydantic-backed initializer (`scripts/init_db.py`), added a guarded reset utility (`scripts/reset_db.py`), removed migration artifacts/tooling (Alembic directory, config, Docker entrypoint), refreshed the container entrypoint to invoke `uvicorn` directly, and updated installation/architecture docs plus the README to direct developers to the new seeding/reset flow.
|
||||||
|
- Eliminated Bandit hardcoded-secret findings by replacing literal JWT tokens and passwords across auth/security tests with randomized helpers drawn from `tests/utils/security.py`, ensuring fixtures still assert expected behaviours.
|
||||||
|
- Centralized Bandit configuration in `pyproject.toml`, reran `bandit -c pyproject.toml -r calminer tests`, and verified the scan now reports zero issues.
|
||||||
|
- Diagnosed admin bootstrap failure caused by legacy `roles` schema, added Alembic migration `20251112_00_add_roles_metadata_columns.py` to backfill `display_name`, `description`, `created_at`, and `updated_at`, and verified the migration via full pytest run in the activated `.venv`.
|
||||||
|
- Resolved Ruff E402 warnings by moving module docstrings ahead of `from __future__ import annotations` across currency and pricing service modules, dropped the unused `HTTPException` import in `monitoring/__init__.py`, and confirmed a clean `ruff check .` run.
|
||||||
|
- Enhanced the deploy job in `.gitea/workflows/cicache.yml` to capture Kubernetes pod, deployment, and container logs into `/logs/deployment/` for staging/production rollouts and publish them via a `deployment-logs` artifact, updating CI/CD documentation with retrieval instructions.
|
||||||
|
- Fixed CI dashboard template lookup failures by renaming `templates/Dashboard.html` to `templates/dashboard.html` and verifying `tests/test_dashboard_route.py` locally to ensure TemplateNotFound no longer occurs on case-sensitive filesystems.
|
||||||
|
- Implemented SQLite support as primary local database with environment-driven backend switching (`CALMINER_USE_SQLITE=true`), updated `scripts/init_db.py` for database-agnostic DDL generation (PostgreSQL enums vs SQLite CHECK constraints), tested compatibility with both backends, and verified application startup and seeded data initialization work seamlessly across SQLite and PostgreSQL.
|
||||||
|
|
||||||
|
## 2025-11-11
|
||||||
|
|
||||||
|
- Collapsed legacy Alembic revisions into `alembic/versions/00_initial.py`, removed superseded migration files, and verified the consolidated schema via SQLite upgrade and Postgres version stamping.
|
||||||
|
- Implemented base URL routing to redirect unauthenticated users to login and authenticated users to dashboard.
|
||||||
|
- Added comprehensive end-to-end tests for login flow, including redirects, session handling, and error messaging for invalid/inactive accounts.
|
||||||
|
- Updated header and footer templates to consistently use `logo_big.png` image instead of text logo, with appropriate CSS styling for sizing.
|
||||||
|
- Centralised ISO-4217 currency validation across scenarios, imports, and export filters (`models/scenario.py`, `routes/scenarios.py`, `schemas/scenario.py`, `schemas/imports.py`, `services/export_query.py`) so malformed codes are rejected consistently at every entry point.
|
||||||
|
- Updated scenario services and UI flows to surface friendly validation errors and added regression coverage for imports, exports, API creation, and lifecycle flows ensuring currencies are normalised end-to-end.
|
||||||
|
- Linked projects to their pricing settings by updating SQLAlchemy models, repositories, seeding utilities, and migrations, and added regression tests to cover the new association and default backfill.
|
||||||
|
- Bootstrapped database-stored pricing settings at application startup, aligned initial data seeding with the database-first metadata flow, and added tests covering pricing bootstrap creation, project assignment, and idempotency.
|
||||||
|
- Extended pricing configuration support to prefer persisted metadata via `dependencies.get_pricing_metadata`, added retrieval tests for project/default fallbacks, and refreshed docs (`calminer-docs/specifications/price_calculation.md`, `pricing_settings_data_model.md`) to describe the database-backed workflow and bootstrap behaviour.
|
||||||
|
- Added `services/financial.py` NPV, IRR, and payback helpers with robust cash-flow normalisation, convergence safeguards, and fractional period support, plus comprehensive pytest coverage exercising representative project scenarios and failure modes.
|
||||||
|
- Authored `calminer-docs/specifications/financial_metrics.md` capturing DCF assumptions, solver behaviours, and worked examples, and cross-linked the architecture concepts to the new reference for consistent navigation.
|
||||||
|
- Implemented `services/simulation.py` Monte Carlo engine with configurable distributions, summary aggregation, and reproducible RNG seeding, introduced regression tests in `tests/test_simulation.py`, and documented configuration/usage in `calminer-docs/specifications/monte_carlo_simulation.md` with architecture cross-links.
|
||||||
|
- Polished reporting HTML contexts by cleaning stray fragments in `routes/reports.py`, adding download action metadata for project and scenario pages, and generating scenario comparison download URLs with correctly serialised repeated `scenario_ids` parameters.
|
||||||
|
- Consolidated Alembic history into a single initial migration (`20251111_00_initial_schema.py`), removed superseded revision files, and ensured Alembic metadata still references the project metadata for clean bootstrap.
|
||||||
|
- Added `scripts/run_migrations.py` and a Docker entrypoint wrapper to run Alembic migrations before `uvicorn` starts, removed the fallback `Base.metadata.create_all` call, and updated `calminer-docs/admin/installation.md` so developers know how to apply migrations locally or via Docker.
|
||||||
|
- Configured pytest defaults to collect coverage (`--cov`) with an 80% fail-under gate, excluded entrypoint/reporting scaffolds from the calculation, updated contributor docs with the standard `pytest` command, and verified the suite now reports 83% coverage.
|
||||||
|
- Standardized color scheme and typography by moving alert styles to `main.css`, adding typography rules with CSS variables, updating auth templates for consistent button classes, and ensuring all templates use centralized color and spacing variables.
|
||||||
|
- Improved navigation flow by adding two big chevron buttons on top of the navigation sidebar to allow users to navigate to the previous and next page in the page navigation list, including JavaScript logic for determining current page and handling navigation.
|
||||||
|
- Established pytest-based unit and integration test suites with coverage thresholds, achieving 83% coverage across 181 tests, with configuration in pyproject.toml and documentation in CONTRIBUTING.md.
|
||||||
|
- Configured CI pipelines to run tests, linting, and security checks on each change, adding Bandit security scanning to the workflow and verifying execution on pushes and PRs to main/develop branches.
|
||||||
|
- Added deployment automation with Docker Compose for local development and Kubernetes manifests for production, ensuring environment parity and documenting processes in calminer-docs/admin/installation.md.
|
||||||
|
- Completed monitoring instrumentation by adding business metrics observation to project and scenario repository operations, and simulation performance tracking to Monte Carlo service with success/error status and duration metrics.
|
||||||
|
- Updated TODO list to reflect completed monitoring implementation tasks and validated changes with passing simulation tests.
|
||||||
|
- Implemented comprehensive performance monitoring for scalability (FR-006) with Prometheus metrics collection for HTTP requests, import/export operations, and general application metrics.
|
||||||
|
- Added database model for persistent metric storage with aggregation endpoints for KPIs like request latency, error rates, and throughput.
|
||||||
|
- Created FastAPI middleware for automatic request metric collection and background persistence to database.
|
||||||
|
- Extended monitoring router with performance metrics API endpoints and detailed health checks.
|
||||||
|
- Added Alembic migration for performance_metrics table and updated model imports.
|
||||||
|
- Completed concurrent interaction testing implementation, validating database transaction isolation under threading and establishing async testing framework for future concurrency enhancements.
|
||||||
|
- Implemented comprehensive deployment automation with Docker Compose configurations for development, staging, and production environments ensuring environment parity.
|
||||||
|
- Set up Kubernetes manifests with resource limits, health checks, and secrets management for production deployment.
|
||||||
|
- Configured CI/CD workflows for automated Docker image building, registry pushing, and Kubernetes deployment to staging/production environments.
|
||||||
|
- Documented deployment processes, environment configurations, and CI/CD workflows in project documentation.
|
||||||
|
- Validated deployment automation through Docker Compose configuration testing and CI/CD pipeline structure.
|
||||||
|
|
||||||
|
## 2025-11-10
|
||||||
|
|
||||||
|
- Added dedicated pytest coverage for guard dependencies, exercising success plus failure paths (missing session, inactive user, missing roles, project/scenario access errors) via `tests/test_dependencies_guards.py`.
|
||||||
|
- Added integration tests in `tests/test_authorization_integration.py` verifying anonymous 401 responses, role-based 403s, and authorized project manager flows across API and UI endpoints.
|
||||||
|
- Implemented environment-driven admin bootstrap settings, wired the `bootstrap_admin` helper into FastAPI startup, added pytest coverage for creation/idempotency/reset logic, and documented operational guidance in the RBAC plan and security concept.
|
||||||
|
- Retired the legacy authentication RBAC implementation plan document after migrating its guidance into live documentation and synchronized the contributor instructions to reflect the removal.
|
||||||
|
- Completed the Authentication & RBAC checklist by shipping the new models, migrations, repositories, guard dependencies, and integration tests.
|
||||||
|
- Documented the project/scenario import/export field mapping and file format guidelines in `calminer-docs/requirements/FR-008.md`, and introduced `schemas/imports.py` with Pydantic models that normalise incoming CSV/Excel rows for projects and scenarios.
|
||||||
|
- Added `services/importers.py` to load CSV/XLSX files into the new import schemas, pulled in `openpyxl` for Excel support, and covered the parsing behaviour with `tests/test_import_parsing.py`.
|
||||||
|
- Expanded the import ingestion workflow with staging previews, transactional persistence commits, FastAPI preview/commit endpoints under `/imports`, and new API tests (`tests/test_import_ingestion.py`, `tests/test_import_api.py`) ensuring end-to-end coverage.
|
||||||
|
- Added persistent audit logging via `ImportExportLog`, structured log emission, Prometheus metrics instrumentation, `/metrics` endpoint exposure, and updated operator/deployment documentation to guide monitoring setup.
|
||||||
|
|
||||||
|
## 2025-11-09
|
||||||
|
|
||||||
|
- Captured current implementation status, requirements coverage, missing features, and prioritized roadmap in `calminer-docs/implementation_status.md` to guide future development.
|
||||||
|
- Added core SQLAlchemy domain models, shared metadata descriptors, and Alembic migration setup (with initial schema snapshot) to establish the persistence layer foundation.
|
||||||
|
- Introduced repository and unit-of-work helpers for projects, scenarios, financial inputs, and simulation parameters to support service-layer operations.
|
||||||
|
- Added SQLite-backed pytest coverage for repository and unit-of-work behaviours to validate persistence interactions.
|
||||||
|
- Exposed project and scenario CRUD APIs with validated schemas and integrated them into the FastAPI application.
|
||||||
|
- Connected project and scenario routers to new Jinja2 list/detail/edit views with HTML forms and redirects.
|
||||||
|
- Implemented FR-009 client-side enhancements with responsive navigation toggle, mobile-first scenario tables, and shared asset loading across templates.
|
||||||
|
- Added scenario comparison validator, FastAPI comparison endpoint, and comprehensive unit tests to enforce FR-009 validation rules through API errors.
|
||||||
|
- Delivered a new dashboard experience with `templates/dashboard.html`, dedicated styling, and a FastAPI route supplying real project/scenario metrics via repository helpers.
|
||||||
|
- Extended repositories with count/recency utilities and added pytest coverage, including a dashboard rendering smoke test validating empty-state messaging.
|
||||||
|
- Brought project and scenario detail pages plus their forms in line with the dashboard visuals, adding metric cards, layout grids, and refreshed CTA styles.
|
||||||
|
- Reordered project route registration to prioritize static UI paths, eliminating 422 errors on `/projects/ui` and `/projects/create`, and added pytest smoke coverage for the navigation endpoints.
|
||||||
|
- Added end-to-end integration tests for project and scenario lifecycles, validating HTML redirects, template rendering, and API interactions, and updated `ProjectRepository.get` to deduplicate joined loads for detail views.
|
||||||
|
- Updated all Jinja2 template responses to the new Starlette signature to eliminate deprecation warnings while keeping request-aware context available to the templates.
|
||||||
|
- Introduced `services/security.py` to centralize Argon2 password hashing utilities and JWT creation/verification with typed payloads, and added pytest coverage for hashing, expiry, tampering, and token type mismatch scenarios.
|
||||||
|
- Added `routes/auth.py` with registration, login, and password reset flows, refreshed auth templates with error messaging, wired navigation links, and introduced end-to-end pytest coverage for the new forms and token flows.
|
||||||
|
- Implemented cookie-based authentication session middleware with automatic access token refresh, logout handling, navigation adjustments, and documentation/test updates capturing the new behaviour.
|
||||||
|
- Delivered idempotent seeding utilities with `scripts/initial_data.py`, entry-point runner `scripts/00_initial_data.py`, documentation updates, and pytest coverage to verify role/admin provisioning.
|
||||||
|
- Secured project and scenario routers with RBAC guard dependencies, enforced repository access checks via helper utilities, and aligned template routes with FastAPI dependency injection patterns.
|
||||||
1
config/__init__.py
Normal file
1
config/__init__.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
"""Configuration package."""
|
||||||
@@ -11,12 +11,21 @@ def _build_database_url() -> str:
|
|||||||
"""Construct the SQLAlchemy database URL from granular environment vars.
|
"""Construct the SQLAlchemy database URL from granular environment vars.
|
||||||
|
|
||||||
Falls back to `DATABASE_URL` for backward compatibility.
|
Falls back to `DATABASE_URL` for backward compatibility.
|
||||||
|
Supports SQLite when CALMINER_USE_SQLITE is set.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
legacy_url = os.environ.get("DATABASE_URL", "")
|
legacy_url = os.environ.get("DATABASE_URL", "")
|
||||||
if legacy_url and legacy_url.strip() != "":
|
if legacy_url and legacy_url.strip() != "":
|
||||||
return legacy_url
|
return legacy_url
|
||||||
|
|
||||||
|
use_sqlite = os.environ.get("CALMINER_USE_SQLITE", "").lower() in ("true", "1", "yes")
|
||||||
|
if use_sqlite:
|
||||||
|
# Use SQLite database
|
||||||
|
db_path = os.environ.get("DATABASE_PATH", "./data/calminer.db")
|
||||||
|
# Ensure the directory exists
|
||||||
|
os.makedirs(os.path.dirname(db_path), exist_ok=True)
|
||||||
|
return f"sqlite:///{db_path}"
|
||||||
|
|
||||||
driver = os.environ.get("DATABASE_DRIVER", "postgresql")
|
driver = os.environ.get("DATABASE_DRIVER", "postgresql")
|
||||||
host = os.environ.get("DATABASE_HOST")
|
host = os.environ.get("DATABASE_HOST")
|
||||||
port = os.environ.get("DATABASE_PORT", "5432")
|
port = os.environ.get("DATABASE_PORT", "5432")
|
||||||
@@ -54,7 +63,15 @@ def _build_database_url() -> str:
|
|||||||
DATABASE_URL = _build_database_url()
|
DATABASE_URL = _build_database_url()
|
||||||
|
|
||||||
engine = create_engine(DATABASE_URL, echo=True, future=True)
|
engine = create_engine(DATABASE_URL, echo=True, future=True)
|
||||||
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
|
# Avoid expiring ORM objects on commit so that objects returned from UnitOfWork
|
||||||
|
# remain usable for the duration of the request cycle without causing
|
||||||
|
# DetachedInstanceError when accessed after the session commits.
|
||||||
|
SessionLocal = sessionmaker(
|
||||||
|
autocommit=False,
|
||||||
|
autoflush=False,
|
||||||
|
bind=engine,
|
||||||
|
expire_on_commit=False,
|
||||||
|
)
|
||||||
Base = declarative_base()
|
Base = declarative_base()
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
233
config/settings.py
Normal file
233
config/settings.py
Normal file
@@ -0,0 +1,233 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import os
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from datetime import timedelta
|
||||||
|
from functools import lru_cache
|
||||||
|
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from services.pricing import PricingMetadata
|
||||||
|
|
||||||
|
from services.security import JWTSettings
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True, slots=True)
|
||||||
|
class AdminBootstrapSettings:
|
||||||
|
"""Default administrator bootstrap configuration."""
|
||||||
|
|
||||||
|
email: str
|
||||||
|
username: str
|
||||||
|
password: str
|
||||||
|
roles: tuple[str, ...]
|
||||||
|
force_reset: bool
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True, slots=True)
|
||||||
|
class SessionSettings:
|
||||||
|
"""Cookie and header configuration for session token transport."""
|
||||||
|
|
||||||
|
access_cookie_name: str
|
||||||
|
refresh_cookie_name: str
|
||||||
|
cookie_secure: bool
|
||||||
|
cookie_domain: Optional[str]
|
||||||
|
cookie_path: str
|
||||||
|
header_name: str
|
||||||
|
header_prefix: str
|
||||||
|
allow_header_fallback: bool
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True, slots=True)
|
||||||
|
class Settings:
|
||||||
|
"""Application configuration sourced from environment variables."""
|
||||||
|
|
||||||
|
jwt_secret_key: str = "change-me"
|
||||||
|
jwt_algorithm: str = "HS256"
|
||||||
|
jwt_access_token_minutes: int = 15
|
||||||
|
jwt_refresh_token_days: int = 7
|
||||||
|
session_access_cookie_name: str = "calminer_access_token"
|
||||||
|
session_refresh_cookie_name: str = "calminer_refresh_token"
|
||||||
|
session_cookie_secure: bool = False
|
||||||
|
session_cookie_domain: Optional[str] = None
|
||||||
|
session_cookie_path: str = "/"
|
||||||
|
session_header_name: str = "Authorization"
|
||||||
|
session_header_prefix: str = "Bearer"
|
||||||
|
session_allow_header_fallback: bool = True
|
||||||
|
admin_email: str = "admin@calminer.local"
|
||||||
|
admin_username: str = "admin"
|
||||||
|
admin_password: str = "ChangeMe123!"
|
||||||
|
admin_roles: tuple[str, ...] = ("admin",)
|
||||||
|
admin_force_reset: bool = False
|
||||||
|
pricing_default_payable_pct: float = 100.0
|
||||||
|
pricing_default_currency: str | None = "USD"
|
||||||
|
pricing_moisture_threshold_pct: float = 8.0
|
||||||
|
pricing_moisture_penalty_per_pct: float = 0.0
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_environment(cls) -> "Settings":
|
||||||
|
"""Construct settings from environment variables."""
|
||||||
|
|
||||||
|
return cls(
|
||||||
|
jwt_secret_key=os.getenv("CALMINER_JWT_SECRET", "change-me"),
|
||||||
|
jwt_algorithm=os.getenv("CALMINER_JWT_ALGORITHM", "HS256"),
|
||||||
|
jwt_access_token_minutes=cls._int_from_env(
|
||||||
|
"CALMINER_JWT_ACCESS_MINUTES", 15
|
||||||
|
),
|
||||||
|
jwt_refresh_token_days=cls._int_from_env(
|
||||||
|
"CALMINER_JWT_REFRESH_DAYS", 7
|
||||||
|
),
|
||||||
|
session_access_cookie_name=os.getenv(
|
||||||
|
"CALMINER_SESSION_ACCESS_COOKIE", "calminer_access_token"
|
||||||
|
),
|
||||||
|
session_refresh_cookie_name=os.getenv(
|
||||||
|
"CALMINER_SESSION_REFRESH_COOKIE", "calminer_refresh_token"
|
||||||
|
),
|
||||||
|
session_cookie_secure=cls._bool_from_env(
|
||||||
|
"CALMINER_SESSION_COOKIE_SECURE", False
|
||||||
|
),
|
||||||
|
session_cookie_domain=os.getenv("CALMINER_SESSION_COOKIE_DOMAIN"),
|
||||||
|
session_cookie_path=os.getenv("CALMINER_SESSION_COOKIE_PATH", "/"),
|
||||||
|
session_header_name=os.getenv(
|
||||||
|
"CALMINER_SESSION_HEADER_NAME", "Authorization"
|
||||||
|
),
|
||||||
|
session_header_prefix=os.getenv(
|
||||||
|
"CALMINER_SESSION_HEADER_PREFIX", "Bearer"
|
||||||
|
),
|
||||||
|
session_allow_header_fallback=cls._bool_from_env(
|
||||||
|
"CALMINER_SESSION_ALLOW_HEADER_FALLBACK", True
|
||||||
|
),
|
||||||
|
admin_email=os.getenv(
|
||||||
|
"CALMINER_SEED_ADMIN_EMAIL", "admin@calminer.local"
|
||||||
|
),
|
||||||
|
admin_username=os.getenv(
|
||||||
|
"CALMINER_SEED_ADMIN_USERNAME", "admin"
|
||||||
|
),
|
||||||
|
admin_password=os.getenv(
|
||||||
|
"CALMINER_SEED_ADMIN_PASSWORD", "ChangeMe123!"
|
||||||
|
),
|
||||||
|
admin_roles=cls._parse_admin_roles(
|
||||||
|
os.getenv("CALMINER_SEED_ADMIN_ROLES")
|
||||||
|
),
|
||||||
|
admin_force_reset=cls._bool_from_env(
|
||||||
|
"CALMINER_SEED_FORCE", False
|
||||||
|
),
|
||||||
|
pricing_default_payable_pct=cls._float_from_env(
|
||||||
|
"CALMINER_PRICING_DEFAULT_PAYABLE_PCT", 100.0
|
||||||
|
),
|
||||||
|
pricing_default_currency=cls._optional_str(
|
||||||
|
"CALMINER_PRICING_DEFAULT_CURRENCY", "USD"
|
||||||
|
),
|
||||||
|
pricing_moisture_threshold_pct=cls._float_from_env(
|
||||||
|
"CALMINER_PRICING_MOISTURE_THRESHOLD_PCT", 8.0
|
||||||
|
),
|
||||||
|
pricing_moisture_penalty_per_pct=cls._float_from_env(
|
||||||
|
"CALMINER_PRICING_MOISTURE_PENALTY_PER_PCT", 0.0
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _int_from_env(name: str, default: int) -> int:
|
||||||
|
raw_value = os.getenv(name)
|
||||||
|
if raw_value is None:
|
||||||
|
return default
|
||||||
|
try:
|
||||||
|
return int(raw_value)
|
||||||
|
except ValueError:
|
||||||
|
return default
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _bool_from_env(name: str, default: bool) -> bool:
|
||||||
|
raw_value = os.getenv(name)
|
||||||
|
if raw_value is None:
|
||||||
|
return default
|
||||||
|
lowered = raw_value.strip().lower()
|
||||||
|
if lowered in {"1", "true", "yes", "on"}:
|
||||||
|
return True
|
||||||
|
if lowered in {"0", "false", "no", "off"}:
|
||||||
|
return False
|
||||||
|
return default
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _parse_admin_roles(raw_value: str | None) -> tuple[str, ...]:
|
||||||
|
if not raw_value:
|
||||||
|
return ("admin",)
|
||||||
|
parts = [segment.strip()
|
||||||
|
for segment in raw_value.split(",") if segment.strip()]
|
||||||
|
if "admin" not in parts:
|
||||||
|
parts.insert(0, "admin")
|
||||||
|
seen: set[str] = set()
|
||||||
|
ordered: list[str] = []
|
||||||
|
for role_name in parts:
|
||||||
|
if role_name not in seen:
|
||||||
|
ordered.append(role_name)
|
||||||
|
seen.add(role_name)
|
||||||
|
return tuple(ordered)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _float_from_env(name: str, default: float) -> float:
|
||||||
|
raw_value = os.getenv(name)
|
||||||
|
if raw_value is None:
|
||||||
|
return default
|
||||||
|
try:
|
||||||
|
return float(raw_value)
|
||||||
|
except ValueError:
|
||||||
|
return default
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _optional_str(name: str, default: str | None = None) -> str | None:
|
||||||
|
raw_value = os.getenv(name)
|
||||||
|
if raw_value is None or raw_value.strip() == "":
|
||||||
|
return default
|
||||||
|
return raw_value.strip()
|
||||||
|
|
||||||
|
def jwt_settings(self) -> JWTSettings:
|
||||||
|
"""Build runtime JWT settings compatible with token helpers."""
|
||||||
|
|
||||||
|
return JWTSettings(
|
||||||
|
secret_key=self.jwt_secret_key,
|
||||||
|
algorithm=self.jwt_algorithm,
|
||||||
|
access_token_ttl=timedelta(minutes=self.jwt_access_token_minutes),
|
||||||
|
refresh_token_ttl=timedelta(days=self.jwt_refresh_token_days),
|
||||||
|
)
|
||||||
|
|
||||||
|
def session_settings(self) -> SessionSettings:
|
||||||
|
"""Provide transport configuration for session tokens."""
|
||||||
|
|
||||||
|
return SessionSettings(
|
||||||
|
access_cookie_name=self.session_access_cookie_name,
|
||||||
|
refresh_cookie_name=self.session_refresh_cookie_name,
|
||||||
|
cookie_secure=self.session_cookie_secure,
|
||||||
|
cookie_domain=self.session_cookie_domain,
|
||||||
|
cookie_path=self.session_cookie_path,
|
||||||
|
header_name=self.session_header_name,
|
||||||
|
header_prefix=self.session_header_prefix,
|
||||||
|
allow_header_fallback=self.session_allow_header_fallback,
|
||||||
|
)
|
||||||
|
|
||||||
|
def admin_bootstrap_settings(self) -> AdminBootstrapSettings:
|
||||||
|
"""Return configured admin bootstrap settings."""
|
||||||
|
|
||||||
|
return AdminBootstrapSettings(
|
||||||
|
email=self.admin_email,
|
||||||
|
username=self.admin_username,
|
||||||
|
password=self.admin_password,
|
||||||
|
roles=self.admin_roles,
|
||||||
|
force_reset=self.admin_force_reset,
|
||||||
|
)
|
||||||
|
|
||||||
|
def pricing_metadata(self) -> PricingMetadata:
|
||||||
|
"""Build pricing metadata defaults."""
|
||||||
|
|
||||||
|
return PricingMetadata(
|
||||||
|
default_payable_pct=self.pricing_default_payable_pct,
|
||||||
|
default_currency=self.pricing_default_currency,
|
||||||
|
moisture_threshold_pct=self.pricing_moisture_threshold_pct,
|
||||||
|
moisture_penalty_per_pct=self.pricing_moisture_penalty_per_pct,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@lru_cache(maxsize=1)
|
||||||
|
def get_settings() -> Settings:
|
||||||
|
"""Return cached application settings."""
|
||||||
|
|
||||||
|
return Settings.from_environment()
|
||||||
@@ -1,35 +0,0 @@
|
|||||||
# Copy this file to config/setup_production.env and replace values with production secrets
|
|
||||||
|
|
||||||
# Container image and runtime configuration
|
|
||||||
CALMINER_IMAGE=registry.example.com/calminer/api:latest
|
|
||||||
CALMINER_DOMAIN=calminer.example.com
|
|
||||||
TRAEFIK_ACME_EMAIL=ops@example.com
|
|
||||||
CALMINER_API_PORT=8000
|
|
||||||
UVICORN_WORKERS=4
|
|
||||||
UVICORN_LOG_LEVEL=info
|
|
||||||
CALMINER_NETWORK=calminer_backend
|
|
||||||
API_LIMIT_CPUS=1.0
|
|
||||||
API_LIMIT_MEMORY=1g
|
|
||||||
API_RESERVATION_MEMORY=512m
|
|
||||||
TRAEFIK_LIMIT_CPUS=0.5
|
|
||||||
TRAEFIK_LIMIT_MEMORY=512m
|
|
||||||
POSTGRES_LIMIT_CPUS=1.0
|
|
||||||
POSTGRES_LIMIT_MEMORY=2g
|
|
||||||
POSTGRES_RESERVATION_MEMORY=1g
|
|
||||||
|
|
||||||
# Application database connection
|
|
||||||
DATABASE_DRIVER=postgresql+psycopg2
|
|
||||||
DATABASE_HOST=production-db.internal
|
|
||||||
DATABASE_PORT=5432
|
|
||||||
DATABASE_NAME=calminer
|
|
||||||
DATABASE_USER=calminer_app
|
|
||||||
DATABASE_PASSWORD=ChangeMe123!
|
|
||||||
DATABASE_SCHEMA=public
|
|
||||||
|
|
||||||
# Optional consolidated SQLAlchemy URL (overrides granular settings when set)
|
|
||||||
# DATABASE_URL=postgresql+psycopg2://calminer_app:ChangeMe123!@production-db.internal:5432/calminer
|
|
||||||
|
|
||||||
# Superuser credentials used by scripts/setup_database.py for migrations/seed data
|
|
||||||
DATABASE_SUPERUSER=postgres
|
|
||||||
DATABASE_SUPERUSER_PASSWORD=ChangeMeSuper123!
|
|
||||||
DATABASE_SUPERUSER_DB=postgres
|
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
# Sample environment configuration for staging deployment
|
|
||||||
DATABASE_HOST=staging-db.internal
|
|
||||||
DATABASE_PORT=5432
|
|
||||||
DATABASE_NAME=calminer_staging
|
|
||||||
DATABASE_USER=calminer_app
|
|
||||||
DATABASE_PASSWORD=<app-password>
|
|
||||||
|
|
||||||
# Admin connection used for provisioning database and roles
|
|
||||||
DATABASE_SUPERUSER=postgres
|
|
||||||
DATABASE_SUPERUSER_PASSWORD=<admin-password>
|
|
||||||
DATABASE_SUPERUSER_DB=postgres
|
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
# Sample environment configuration for running scripts/setup_database.py against a test instance
|
|
||||||
DATABASE_DRIVER=postgresql
|
|
||||||
DATABASE_HOST=postgres
|
|
||||||
DATABASE_PORT=5432
|
|
||||||
DATABASE_NAME=calminer_test
|
|
||||||
DATABASE_USER=calminer_test
|
|
||||||
DATABASE_PASSWORD=<test-password>
|
|
||||||
# optional: specify schema if different from 'public'
|
|
||||||
#DATABASE_SCHEMA=public
|
|
||||||
|
|
||||||
# Admin connection used for provisioning database and roles
|
|
||||||
DATABASE_SUPERUSER=postgres
|
|
||||||
DATABASE_SUPERUSER_PASSWORD=<superuser-password>
|
|
||||||
DATABASE_SUPERUSER_DB=postgres
|
|
||||||
400
dependencies.py
Normal file
400
dependencies.py
Normal file
@@ -0,0 +1,400 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from collections.abc import Callable, Iterable, Generator
|
||||||
|
|
||||||
|
from fastapi import Depends, HTTPException, Request, status
|
||||||
|
|
||||||
|
from config.settings import Settings, get_settings
|
||||||
|
from models import Project, Role, Scenario, User
|
||||||
|
from services.authorization import (
|
||||||
|
ensure_project_access as ensure_project_access_helper,
|
||||||
|
ensure_scenario_access as ensure_scenario_access_helper,
|
||||||
|
ensure_scenario_in_project as ensure_scenario_in_project_helper,
|
||||||
|
)
|
||||||
|
from services.exceptions import AuthorizationError, EntityNotFoundError
|
||||||
|
from services.security import JWTSettings
|
||||||
|
from services.session import (
|
||||||
|
AuthSession,
|
||||||
|
SessionStrategy,
|
||||||
|
SessionTokens,
|
||||||
|
build_session_strategy,
|
||||||
|
extract_session_tokens,
|
||||||
|
)
|
||||||
|
from services.unit_of_work import UnitOfWork
|
||||||
|
from services.importers import ImportIngestionService
|
||||||
|
from services.pricing import PricingMetadata
|
||||||
|
from services.navigation import NavigationService
|
||||||
|
from services.scenario_evaluation import ScenarioPricingConfig, ScenarioPricingEvaluator
|
||||||
|
from services.repositories import pricing_settings_to_metadata
|
||||||
|
|
||||||
|
|
||||||
|
def get_unit_of_work() -> Generator[UnitOfWork, None, None]:
|
||||||
|
"""FastAPI dependency yielding a unit-of-work instance."""
|
||||||
|
|
||||||
|
with UnitOfWork() as uow:
|
||||||
|
yield uow
|
||||||
|
|
||||||
|
|
||||||
|
_IMPORT_INGESTION_SERVICE = ImportIngestionService(lambda: UnitOfWork())
|
||||||
|
|
||||||
|
|
||||||
|
def get_import_ingestion_service() -> ImportIngestionService:
|
||||||
|
"""Provide singleton import ingestion service."""
|
||||||
|
|
||||||
|
return _IMPORT_INGESTION_SERVICE
|
||||||
|
|
||||||
|
|
||||||
|
def get_application_settings() -> Settings:
|
||||||
|
"""Provide cached application settings instance."""
|
||||||
|
|
||||||
|
return get_settings()
|
||||||
|
|
||||||
|
|
||||||
|
def get_pricing_metadata(
|
||||||
|
settings: Settings = Depends(get_application_settings),
|
||||||
|
uow: UnitOfWork = Depends(get_unit_of_work),
|
||||||
|
) -> PricingMetadata:
|
||||||
|
"""Return pricing metadata defaults sourced from persisted pricing settings."""
|
||||||
|
|
||||||
|
stored = uow.get_pricing_metadata()
|
||||||
|
if stored is not None:
|
||||||
|
return stored
|
||||||
|
|
||||||
|
fallback = settings.pricing_metadata()
|
||||||
|
seed_result = uow.ensure_default_pricing_settings(metadata=fallback)
|
||||||
|
return pricing_settings_to_metadata(seed_result.settings)
|
||||||
|
|
||||||
|
|
||||||
|
def get_navigation_service(
|
||||||
|
uow: UnitOfWork = Depends(get_unit_of_work),
|
||||||
|
) -> NavigationService:
|
||||||
|
if not uow.navigation:
|
||||||
|
raise RuntimeError("Navigation repository is not initialised")
|
||||||
|
return NavigationService(uow.navigation)
|
||||||
|
|
||||||
|
|
||||||
|
def get_pricing_evaluator(
|
||||||
|
metadata: PricingMetadata = Depends(get_pricing_metadata),
|
||||||
|
) -> ScenarioPricingEvaluator:
|
||||||
|
"""Provide a configured scenario pricing evaluator."""
|
||||||
|
|
||||||
|
return ScenarioPricingEvaluator(ScenarioPricingConfig(metadata=metadata))
|
||||||
|
|
||||||
|
|
||||||
|
def get_jwt_settings() -> JWTSettings:
|
||||||
|
"""Provide JWT runtime configuration derived from settings."""
|
||||||
|
|
||||||
|
return get_settings().jwt_settings()
|
||||||
|
|
||||||
|
|
||||||
|
def get_session_strategy(
|
||||||
|
settings: Settings = Depends(get_application_settings),
|
||||||
|
) -> SessionStrategy:
|
||||||
|
"""Yield configured session transport strategy."""
|
||||||
|
|
||||||
|
return build_session_strategy(settings.session_settings())
|
||||||
|
|
||||||
|
|
||||||
|
def get_session_tokens(
|
||||||
|
request: Request,
|
||||||
|
strategy: SessionStrategy = Depends(get_session_strategy),
|
||||||
|
) -> SessionTokens:
|
||||||
|
"""Extract raw session tokens from the incoming request."""
|
||||||
|
|
||||||
|
existing = getattr(request.state, "auth_session", None)
|
||||||
|
if isinstance(existing, AuthSession):
|
||||||
|
return existing.tokens
|
||||||
|
|
||||||
|
tokens = extract_session_tokens(request, strategy)
|
||||||
|
request.state.auth_session = AuthSession(tokens=tokens)
|
||||||
|
return tokens
|
||||||
|
|
||||||
|
|
||||||
|
def get_auth_session(
|
||||||
|
request: Request,
|
||||||
|
tokens: SessionTokens = Depends(get_session_tokens),
|
||||||
|
) -> AuthSession:
|
||||||
|
"""Provide authentication session context for the current request."""
|
||||||
|
|
||||||
|
existing = getattr(request.state, "auth_session", None)
|
||||||
|
if isinstance(existing, AuthSession):
|
||||||
|
return existing
|
||||||
|
|
||||||
|
if tokens.is_empty:
|
||||||
|
session = AuthSession.anonymous()
|
||||||
|
else:
|
||||||
|
session = AuthSession(tokens=tokens)
|
||||||
|
request.state.auth_session = session
|
||||||
|
return session
|
||||||
|
|
||||||
|
|
||||||
|
def get_current_user(
|
||||||
|
session: AuthSession = Depends(get_auth_session),
|
||||||
|
) -> User | None:
|
||||||
|
"""Return the current authenticated user if present."""
|
||||||
|
|
||||||
|
return session.user
|
||||||
|
|
||||||
|
|
||||||
|
def require_current_user(
|
||||||
|
session: AuthSession = Depends(get_auth_session),
|
||||||
|
) -> User:
|
||||||
|
"""Ensure that a request is authenticated and return the user context."""
|
||||||
|
|
||||||
|
if session.user is None or session.tokens.is_empty:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||||
|
detail="Authentication required.",
|
||||||
|
)
|
||||||
|
return session.user
|
||||||
|
|
||||||
|
|
||||||
|
def require_authenticated_user(
|
||||||
|
user: User = Depends(require_current_user),
|
||||||
|
) -> User:
|
||||||
|
"""Ensure the current user account is active."""
|
||||||
|
|
||||||
|
if not user.is_active:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_403_FORBIDDEN,
|
||||||
|
detail="User account is disabled.",
|
||||||
|
)
|
||||||
|
return user
|
||||||
|
|
||||||
|
|
||||||
|
def require_authenticated_user_html(
|
||||||
|
request: Request,
|
||||||
|
session: AuthSession = Depends(get_auth_session),
|
||||||
|
) -> User:
|
||||||
|
"""HTML-aware authenticated dependency that redirects anonymous sessions."""
|
||||||
|
|
||||||
|
user = session.user
|
||||||
|
if user is None or session.tokens.is_empty:
|
||||||
|
login_url = str(request.url_for("auth.login_form"))
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_303_SEE_OTHER,
|
||||||
|
headers={"Location": login_url},
|
||||||
|
)
|
||||||
|
|
||||||
|
if not user.is_active:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_403_FORBIDDEN,
|
||||||
|
detail="User account is disabled.",
|
||||||
|
)
|
||||||
|
return user
|
||||||
|
|
||||||
|
|
||||||
|
def _user_role_names(user: User) -> set[str]:
|
||||||
|
roles: Iterable[Role] = getattr(user, "roles", []) or []
|
||||||
|
return {role.name for role in roles}
|
||||||
|
|
||||||
|
|
||||||
|
def require_roles(*roles: str) -> Callable[[User], User]:
|
||||||
|
"""Dependency factory enforcing membership in one of the given roles."""
|
||||||
|
|
||||||
|
required = tuple(role.strip() for role in roles if role.strip())
|
||||||
|
if not required:
|
||||||
|
raise ValueError("require_roles requires at least one role name")
|
||||||
|
|
||||||
|
def _dependency(user: User = Depends(require_authenticated_user)) -> User:
|
||||||
|
if user.is_superuser:
|
||||||
|
return user
|
||||||
|
|
||||||
|
role_names = _user_role_names(user)
|
||||||
|
if not any(role in role_names for role in required):
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_403_FORBIDDEN,
|
||||||
|
detail="Insufficient permissions for this action.",
|
||||||
|
)
|
||||||
|
return user
|
||||||
|
|
||||||
|
return _dependency
|
||||||
|
|
||||||
|
|
||||||
|
def require_any_role(*roles: str) -> Callable[[User], User]:
|
||||||
|
"""Alias of require_roles for readability in some contexts."""
|
||||||
|
|
||||||
|
return require_roles(*roles)
|
||||||
|
|
||||||
|
|
||||||
|
def require_roles_html(*roles: str) -> Callable[[Request], User]:
|
||||||
|
"""Ensure user is authenticated for HTML responses; redirect anonymous to login."""
|
||||||
|
|
||||||
|
required = tuple(role.strip() for role in roles if role.strip())
|
||||||
|
if not required:
|
||||||
|
raise ValueError("require_roles_html requires at least one role name")
|
||||||
|
|
||||||
|
def _dependency(
|
||||||
|
request: Request,
|
||||||
|
session: AuthSession = Depends(get_auth_session),
|
||||||
|
) -> User:
|
||||||
|
user = session.user
|
||||||
|
if user is None:
|
||||||
|
login_url = str(request.url_for("auth.login_form"))
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_303_SEE_OTHER,
|
||||||
|
headers={"Location": login_url},
|
||||||
|
)
|
||||||
|
|
||||||
|
if user.is_superuser:
|
||||||
|
return user
|
||||||
|
|
||||||
|
role_names = _user_role_names(user)
|
||||||
|
if not any(role in role_names for role in required):
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_403_FORBIDDEN,
|
||||||
|
detail="Insufficient permissions for this action.",
|
||||||
|
)
|
||||||
|
return user
|
||||||
|
|
||||||
|
return _dependency
|
||||||
|
|
||||||
|
|
||||||
|
def require_any_role_html(*roles: str) -> Callable[[Request], User]:
|
||||||
|
"""Alias of require_roles_html for readability."""
|
||||||
|
|
||||||
|
return require_roles_html(*roles)
|
||||||
|
|
||||||
|
|
||||||
|
def require_project_resource(
|
||||||
|
*,
|
||||||
|
require_manage: bool = False,
|
||||||
|
user_dependency: Callable[..., User] = require_authenticated_user,
|
||||||
|
) -> Callable[[int], Project]:
|
||||||
|
"""Dependency factory that resolves a project with authorization checks."""
|
||||||
|
|
||||||
|
def _dependency(
|
||||||
|
project_id: int,
|
||||||
|
user: User = Depends(user_dependency),
|
||||||
|
uow: UnitOfWork = Depends(get_unit_of_work),
|
||||||
|
) -> Project:
|
||||||
|
try:
|
||||||
|
return ensure_project_access_helper(
|
||||||
|
uow,
|
||||||
|
project_id=project_id,
|
||||||
|
user=user,
|
||||||
|
require_manage=require_manage,
|
||||||
|
)
|
||||||
|
except EntityNotFoundError as exc:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_404_NOT_FOUND,
|
||||||
|
detail=str(exc),
|
||||||
|
) from exc
|
||||||
|
except AuthorizationError as exc:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_403_FORBIDDEN,
|
||||||
|
detail=str(exc),
|
||||||
|
) from exc
|
||||||
|
|
||||||
|
return _dependency
|
||||||
|
|
||||||
|
|
||||||
|
def require_scenario_resource(
|
||||||
|
*,
|
||||||
|
require_manage: bool = False,
|
||||||
|
with_children: bool = False,
|
||||||
|
user_dependency: Callable[..., User] = require_authenticated_user,
|
||||||
|
) -> Callable[[int], Scenario]:
|
||||||
|
"""Dependency factory that resolves a scenario with authorization checks."""
|
||||||
|
|
||||||
|
def _dependency(
|
||||||
|
scenario_id: int,
|
||||||
|
user: User = Depends(user_dependency),
|
||||||
|
uow: UnitOfWork = Depends(get_unit_of_work),
|
||||||
|
) -> Scenario:
|
||||||
|
try:
|
||||||
|
return ensure_scenario_access_helper(
|
||||||
|
uow,
|
||||||
|
scenario_id=scenario_id,
|
||||||
|
user=user,
|
||||||
|
require_manage=require_manage,
|
||||||
|
with_children=with_children,
|
||||||
|
)
|
||||||
|
except EntityNotFoundError as exc:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_404_NOT_FOUND,
|
||||||
|
detail=str(exc),
|
||||||
|
) from exc
|
||||||
|
except AuthorizationError as exc:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_403_FORBIDDEN,
|
||||||
|
detail=str(exc),
|
||||||
|
) from exc
|
||||||
|
|
||||||
|
return _dependency
|
||||||
|
|
||||||
|
|
||||||
|
def require_project_scenario_resource(
|
||||||
|
*,
|
||||||
|
require_manage: bool = False,
|
||||||
|
with_children: bool = False,
|
||||||
|
user_dependency: Callable[..., User] = require_authenticated_user,
|
||||||
|
) -> Callable[[int, int], Scenario]:
|
||||||
|
"""Dependency factory ensuring a scenario belongs to the given project and is accessible."""
|
||||||
|
|
||||||
|
def _dependency(
|
||||||
|
project_id: int,
|
||||||
|
scenario_id: int,
|
||||||
|
user: User = Depends(user_dependency),
|
||||||
|
uow: UnitOfWork = Depends(get_unit_of_work),
|
||||||
|
) -> Scenario:
|
||||||
|
try:
|
||||||
|
return ensure_scenario_in_project_helper(
|
||||||
|
uow,
|
||||||
|
project_id=project_id,
|
||||||
|
scenario_id=scenario_id,
|
||||||
|
user=user,
|
||||||
|
require_manage=require_manage,
|
||||||
|
with_children=with_children,
|
||||||
|
)
|
||||||
|
except EntityNotFoundError as exc:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_404_NOT_FOUND,
|
||||||
|
detail=str(exc),
|
||||||
|
) from exc
|
||||||
|
except AuthorizationError as exc:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_403_FORBIDDEN,
|
||||||
|
detail=str(exc),
|
||||||
|
) from exc
|
||||||
|
|
||||||
|
return _dependency
|
||||||
|
|
||||||
|
|
||||||
|
def require_project_resource_html(
|
||||||
|
*, require_manage: bool = False
|
||||||
|
) -> Callable[[int], Project]:
|
||||||
|
"""HTML-aware project loader that redirects anonymous sessions."""
|
||||||
|
|
||||||
|
return require_project_resource(
|
||||||
|
require_manage=require_manage,
|
||||||
|
user_dependency=require_authenticated_user_html,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def require_scenario_resource_html(
|
||||||
|
*,
|
||||||
|
require_manage: bool = False,
|
||||||
|
with_children: bool = False,
|
||||||
|
) -> Callable[[int], Scenario]:
|
||||||
|
"""HTML-aware scenario loader that redirects anonymous sessions."""
|
||||||
|
|
||||||
|
return require_scenario_resource(
|
||||||
|
require_manage=require_manage,
|
||||||
|
with_children=with_children,
|
||||||
|
user_dependency=require_authenticated_user_html,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def require_project_scenario_resource_html(
|
||||||
|
*,
|
||||||
|
require_manage: bool = False,
|
||||||
|
with_children: bool = False,
|
||||||
|
) -> Callable[[int, int], Scenario]:
|
||||||
|
"""HTML-aware project-scenario loader redirecting anonymous sessions."""
|
||||||
|
|
||||||
|
return require_project_scenario_resource(
|
||||||
|
require_manage=require_manage,
|
||||||
|
with_children=with_children,
|
||||||
|
user_dependency=require_authenticated_user_html,
|
||||||
|
)
|
||||||
59
docker-compose.override.yml
Normal file
59
docker-compose.override.yml
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
version: "3.8"
|
||||||
|
|
||||||
|
services:
|
||||||
|
app:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
args:
|
||||||
|
APT_CACHE_URL: ${APT_CACHE_URL:-}
|
||||||
|
environment:
|
||||||
|
- ENVIRONMENT=development
|
||||||
|
- DEBUG=true
|
||||||
|
- LOG_LEVEL=DEBUG
|
||||||
|
# Override database to use local postgres service
|
||||||
|
- DATABASE_HOST=postgres
|
||||||
|
- DATABASE_PORT=5432
|
||||||
|
- DATABASE_USER=calminer
|
||||||
|
- DATABASE_PASSWORD=calminer_password
|
||||||
|
- DATABASE_NAME=calminer_db
|
||||||
|
- DATABASE_DRIVER=postgresql
|
||||||
|
# Development-specific settings
|
||||||
|
- CALMINER_EXPORT_MAX_ROWS=1000
|
||||||
|
- CALMINER_IMPORT_MAX_ROWS=10000
|
||||||
|
volumes:
|
||||||
|
# Mount source code for live reloading (if using --reload)
|
||||||
|
- .:/app:ro
|
||||||
|
# Override logs volume to local for easier access
|
||||||
|
- ./logs:/app/logs
|
||||||
|
ports:
|
||||||
|
- "8003:8003"
|
||||||
|
# Override command for development with reload
|
||||||
|
command:
|
||||||
|
[
|
||||||
|
"main:app",
|
||||||
|
"--host",
|
||||||
|
"0.0.0.0",
|
||||||
|
"--port",
|
||||||
|
"8003",
|
||||||
|
"--reload",
|
||||||
|
"--workers",
|
||||||
|
"1",
|
||||||
|
]
|
||||||
|
depends_on:
|
||||||
|
- postgres
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
postgres:
|
||||||
|
environment:
|
||||||
|
- POSTGRES_USER=calminer
|
||||||
|
- POSTGRES_PASSWORD=calminer_password
|
||||||
|
- POSTGRES_DB=calminer_db
|
||||||
|
ports:
|
||||||
|
- "5432:5432"
|
||||||
|
volumes:
|
||||||
|
- postgres_data:/var/lib/postgresql/data
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
postgres_data:
|
||||||
77
docker-compose.prod.yml
Normal file
77
docker-compose.prod.yml
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
version: "3.8"
|
||||||
|
|
||||||
|
services:
|
||||||
|
app:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
args:
|
||||||
|
APT_CACHE_URL: ${APT_CACHE_URL:-}
|
||||||
|
environment:
|
||||||
|
- ENVIRONMENT=production
|
||||||
|
- DEBUG=false
|
||||||
|
- LOG_LEVEL=WARNING
|
||||||
|
# Database configuration - must be provided externally
|
||||||
|
- DATABASE_HOST=${DATABASE_HOST}
|
||||||
|
- DATABASE_PORT=${DATABASE_PORT:-5432}
|
||||||
|
- DATABASE_USER=${DATABASE_USER}
|
||||||
|
- DATABASE_PASSWORD=${DATABASE_PASSWORD}
|
||||||
|
- DATABASE_NAME=${DATABASE_NAME}
|
||||||
|
- DATABASE_DRIVER=postgresql
|
||||||
|
# Production-specific settings
|
||||||
|
- CALMINER_EXPORT_MAX_ROWS=100000
|
||||||
|
- CALMINER_IMPORT_MAX_ROWS=100000
|
||||||
|
- CALMINER_EXPORT_METADATA=true
|
||||||
|
- CALMINER_IMPORT_STAGING_TTL=3600
|
||||||
|
ports:
|
||||||
|
- "8003:8003"
|
||||||
|
depends_on:
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
restart: unless-stopped
|
||||||
|
# Production health checks
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "curl", "-f", "http://localhost:8003/health"]
|
||||||
|
interval: 60s
|
||||||
|
timeout: 30s
|
||||||
|
retries: 5
|
||||||
|
start_period: 60s
|
||||||
|
# Resource limits for production
|
||||||
|
deploy:
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpus: "1.0"
|
||||||
|
memory: 1G
|
||||||
|
reservations:
|
||||||
|
cpus: "0.5"
|
||||||
|
memory: 512M
|
||||||
|
|
||||||
|
postgres:
|
||||||
|
environment:
|
||||||
|
- POSTGRES_USER=${DATABASE_USER}
|
||||||
|
- POSTGRES_PASSWORD=${DATABASE_PASSWORD}
|
||||||
|
- POSTGRES_DB=${DATABASE_NAME}
|
||||||
|
ports:
|
||||||
|
- "5432:5432"
|
||||||
|
volumes:
|
||||||
|
- postgres_data:/var/lib/postgresql/data
|
||||||
|
restart: unless-stopped
|
||||||
|
# Production postgres health check
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "pg_isready -U ${DATABASE_USER} -d ${DATABASE_NAME}"]
|
||||||
|
interval: 60s
|
||||||
|
timeout: 30s
|
||||||
|
retries: 5
|
||||||
|
start_period: 60s
|
||||||
|
# Resource limits for postgres
|
||||||
|
deploy:
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpus: "1.0"
|
||||||
|
memory: 2G
|
||||||
|
reservations:
|
||||||
|
cpus: "0.5"
|
||||||
|
memory: 1G
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
postgres_data:
|
||||||
62
docker-compose.staging.yml
Normal file
62
docker-compose.staging.yml
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
version: "3.8"
|
||||||
|
|
||||||
|
services:
|
||||||
|
app:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
args:
|
||||||
|
APT_CACHE_URL: ${APT_CACHE_URL:-}
|
||||||
|
environment:
|
||||||
|
- ENVIRONMENT=staging
|
||||||
|
- DEBUG=false
|
||||||
|
- LOG_LEVEL=INFO
|
||||||
|
# Database configuration - can be overridden by external env
|
||||||
|
- DATABASE_HOST=${DATABASE_HOST:-postgres}
|
||||||
|
- DATABASE_PORT=${DATABASE_PORT:-5432}
|
||||||
|
- DATABASE_USER=${DATABASE_USER:-calminer}
|
||||||
|
- DATABASE_PASSWORD=${DATABASE_PASSWORD}
|
||||||
|
- DATABASE_NAME=${DATABASE_NAME:-calminer_db}
|
||||||
|
- DATABASE_DRIVER=postgresql
|
||||||
|
# Staging-specific settings
|
||||||
|
- CALMINER_EXPORT_MAX_ROWS=50000
|
||||||
|
- CALMINER_IMPORT_MAX_ROWS=50000
|
||||||
|
- CALMINER_EXPORT_METADATA=true
|
||||||
|
- CALMINER_IMPORT_STAGING_TTL=600
|
||||||
|
ports:
|
||||||
|
- "8003:8003"
|
||||||
|
depends_on:
|
||||||
|
- postgres
|
||||||
|
restart: unless-stopped
|
||||||
|
# Health check for staging
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "curl", "-f", "http://localhost:8003/health"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 40s
|
||||||
|
|
||||||
|
postgres:
|
||||||
|
environment:
|
||||||
|
- POSTGRES_USER=${DATABASE_USER:-calminer}
|
||||||
|
- POSTGRES_PASSWORD=${DATABASE_PASSWORD}
|
||||||
|
- POSTGRES_DB=${DATABASE_NAME:-calminer_db}
|
||||||
|
ports:
|
||||||
|
- "5432:5432"
|
||||||
|
volumes:
|
||||||
|
- postgres_data:/var/lib/postgresql/data
|
||||||
|
restart: unless-stopped
|
||||||
|
# Health check for postgres
|
||||||
|
healthcheck:
|
||||||
|
test:
|
||||||
|
[
|
||||||
|
"CMD-SHELL",
|
||||||
|
"pg_isready -U ${DATABASE_USER:-calminer} -d ${DATABASE_NAME:-calminer_db}",
|
||||||
|
]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 30s
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
postgres_data:
|
||||||
@@ -8,11 +8,13 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "8003:8003"
|
- "8003:8003"
|
||||||
environment:
|
environment:
|
||||||
- DATABASE_HOST=postgres
|
# Environment-specific variables should be set in override files
|
||||||
- DATABASE_PORT=5432
|
- ENVIRONMENT=${ENVIRONMENT:-production}
|
||||||
- DATABASE_USER=calminer
|
- DATABASE_HOST=${DATABASE_HOST:-postgres}
|
||||||
- DATABASE_PASSWORD=calminer_password
|
- DATABASE_PORT=${DATABASE_PORT:-5432}
|
||||||
- DATABASE_NAME=calminer_db
|
- DATABASE_USER=${DATABASE_USER}
|
||||||
|
- DATABASE_PASSWORD=${DATABASE_PASSWORD}
|
||||||
|
- DATABASE_NAME=${DATABASE_NAME}
|
||||||
- DATABASE_DRIVER=postgresql
|
- DATABASE_DRIVER=postgresql
|
||||||
depends_on:
|
depends_on:
|
||||||
- postgres
|
- postgres
|
||||||
@@ -23,9 +25,9 @@ services:
|
|||||||
postgres:
|
postgres:
|
||||||
image: postgres:17
|
image: postgres:17
|
||||||
environment:
|
environment:
|
||||||
- POSTGRES_USER=calminer
|
- POSTGRES_USER=${DATABASE_USER}
|
||||||
- POSTGRES_PASSWORD=calminer_password
|
- POSTGRES_PASSWORD=${DATABASE_PASSWORD}
|
||||||
- POSTGRES_DB=calminer_db
|
- POSTGRES_DB=${DATABASE_NAME}
|
||||||
ports:
|
ports:
|
||||||
- "5432:5432"
|
- "5432:5432"
|
||||||
volumes:
|
volumes:
|
||||||
|
|||||||
@@ -1,62 +0,0 @@
|
|||||||
---
|
|
||||||
title: "01 — Introduction and Goals"
|
|
||||||
description: "System purpose, stakeholders, and high-level goals; project introduction and business/technical goals."
|
|
||||||
status: draft
|
|
||||||
---
|
|
||||||
|
|
||||||
# 01 — Introduction and Goals
|
|
||||||
|
|
||||||
## Purpose
|
|
||||||
|
|
||||||
CalMiner aims to provide a comprehensive platform for mining project scenario analysis, enabling stakeholders to make informed decisions based on data-driven insights.
|
|
||||||
|
|
||||||
## Stakeholders
|
|
||||||
|
|
||||||
- **Project Managers**: Require tools for scenario planning and risk assessment.
|
|
||||||
- **Data Analysts**: Need access to historical data and simulation results for analysis.
|
|
||||||
- **Executives**: Seek high-level insights and reporting for strategic decision-making.
|
|
||||||
|
|
||||||
## High-Level Goals
|
|
||||||
|
|
||||||
1. **Comprehensive Scenario Analysis**: Enable users to create and analyze multiple project scenarios to assess risks and opportunities.
|
|
||||||
2. **Data-Driven Decision Making**: Provide stakeholders with the insights needed to make informed decisions based on simulation results.
|
|
||||||
3. **User-Friendly Interface**: Ensure the platform is accessible and easy to use for all stakeholders, regardless of technical expertise.
|
|
||||||
|
|
||||||
## System Overview
|
|
||||||
|
|
||||||
FastAPI application that collects mining project inputs, persists scenario-specific records, and surfaces aggregated insights. The platform targets Monte Carlo driven planning, with deterministic CRUD features in place and simulation logic staged for future work.
|
|
||||||
|
|
||||||
Frontend components are server-rendered Jinja2 templates, with Chart.js powering the dashboard visualization. The backend leverages SQLAlchemy for ORM mapping to a PostgreSQL database.
|
|
||||||
|
|
||||||
### Runtime Flow
|
|
||||||
|
|
||||||
1. Users navigate to form templates or API clients to manage scenarios, parameters, and operational data.
|
|
||||||
2. FastAPI routers validate payloads with Pydantic models, then delegate to SQLAlchemy sessions for persistence.
|
|
||||||
3. Simulation runs (placeholder `services/simulation.py`) will consume stored parameters to emit iteration results via `/api/simulations/run`.
|
|
||||||
4. Reporting requests POST simulation outputs to `/api/reporting/summary`; the reporting service calculates aggregates (count, min/max, mean, median, percentiles, standard deviation, variance, and tail-risk metrics at the 95% confidence level).
|
|
||||||
5. `templates/Dashboard.html` fetches summaries, renders metric cards, and plots distribution charts with Chart.js for stakeholder review.
|
|
||||||
|
|
||||||
### Current implementation status (summary)
|
|
||||||
|
|
||||||
- Currency normalization, simulation scaffold, and reporting service exist; see [quickstart](../quickstart.md) for full status and migration instructions.
|
|
||||||
|
|
||||||
## MVP Features (migrated)
|
|
||||||
|
|
||||||
The following MVP features and priorities were defined during initial planning.
|
|
||||||
|
|
||||||
### Prioritized Features
|
|
||||||
|
|
||||||
1. **Scenario Creation and Management** (High Priority): Allow users to create, edit, and delete scenarios. Rationale: Core functionality for what-if analysis.
|
|
||||||
1. **Parameter Input and Validation** (High Priority): Input process parameters with validation. Rationale: Ensures data integrity for simulations.
|
|
||||||
1. **Monte Carlo Simulation Run** (High Priority): Execute simulations and store results. Rationale: Key differentiator for risk analysis.
|
|
||||||
1. **Basic Reporting** (Medium Priority): Display NPV, IRR, EBITDA from simulation results. Rationale: Essential for decision-making.
|
|
||||||
1. **Cost Tracking Dashboard** (Medium Priority): Visualize CAPEX and OPEX. Rationale: Helps monitor expenses.
|
|
||||||
1. **Consumption Monitoring** (Low Priority): Track resource consumption. Rationale: Useful for optimization.
|
|
||||||
1. **User Authentication** (Medium Priority): Basic login/logout. Rationale: Security for multi-user access.
|
|
||||||
1. **Export Results** (Low Priority): Export simulation data to CSV/PDF. Rationale: For external analysis.
|
|
||||||
|
|
||||||
### Rationale for Prioritization
|
|
||||||
|
|
||||||
- High: Core simulation and scenario features first.
|
|
||||||
- Medium: Reporting and auth for usability.
|
|
||||||
- Low: Nice-to-haves after basics.
|
|
||||||
@@ -1,127 +0,0 @@
|
|||||||
---
|
|
||||||
title: '02 — Architecture Constraints'
|
|
||||||
description: 'Document imposed constraints: technical, organizational, regulatory, and environmental constraints that affect architecture decisions.'
|
|
||||||
status: draft
|
|
||||||
---
|
|
||||||
|
|
||||||
# 02 — Architecture Constraints
|
|
||||||
|
|
||||||
## Constraints Overview
|
|
||||||
|
|
||||||
- [Technical Constraints](02_constraints/02_01_technical_constraints.md)
|
|
||||||
- [Organizational Constraints](02_constraints/02_02_organizational_constraints.md)
|
|
||||||
- [Regulatory Constraints](02_constraints/02_03_regulatory_constraints.md)
|
|
||||||
- [Environmental Constraints](02_constraints/02_04_environmental_constraints.md)
|
|
||||||
- [Performance Constraints](02_constraints/02_05_performance_constraints.md)
|
|
||||||
|
|
||||||
## Security Constraints
|
|
||||||
|
|
||||||
> e.g., authentication mechanisms, data encryption standards.
|
|
||||||
|
|
||||||
## Budgetary Constraints
|
|
||||||
|
|
||||||
> e.g., licensing costs, infrastructure budgets.
|
|
||||||
|
|
||||||
## Time Constraints
|
|
||||||
|
|
||||||
> e.g., project deadlines, release schedules.
|
|
||||||
|
|
||||||
## Interoperability Constraints
|
|
||||||
|
|
||||||
> e.g., integration with existing systems, third-party services.
|
|
||||||
|
|
||||||
## Maintainability Constraints
|
|
||||||
|
|
||||||
> e.g., code modularity, documentation standards.
|
|
||||||
|
|
||||||
## Usability Constraints
|
|
||||||
|
|
||||||
> e.g., user interface design principles, accessibility requirements.
|
|
||||||
|
|
||||||
## Data Constraints
|
|
||||||
|
|
||||||
> e.g., data storage formats, data retention policies.
|
|
||||||
|
|
||||||
## Deployment Constraints
|
|
||||||
|
|
||||||
> e.g., deployment environments, cloud provider limitations.
|
|
||||||
|
|
||||||
## Testing Constraints
|
|
||||||
|
|
||||||
> e.g., testing frameworks, test coverage requirements.
|
|
||||||
|
|
||||||
## Localization Constraints
|
|
||||||
|
|
||||||
> e.g., multi-language support, regional settings.
|
|
||||||
|
|
||||||
## Versioning Constraints
|
|
||||||
|
|
||||||
> e.g., API versioning strategies, backward compatibility.
|
|
||||||
|
|
||||||
## Monitoring Constraints
|
|
||||||
|
|
||||||
> e.g., logging standards, performance monitoring tools.
|
|
||||||
|
|
||||||
## Backup and Recovery Constraints
|
|
||||||
|
|
||||||
> e.g., data backup frequency, disaster recovery plans.
|
|
||||||
|
|
||||||
## Development Constraints
|
|
||||||
|
|
||||||
> e.g., coding languages, frameworks, libraries to be used or avoided.
|
|
||||||
|
|
||||||
## Collaboration Constraints
|
|
||||||
|
|
||||||
> e.g., communication tools, collaboration platforms.
|
|
||||||
|
|
||||||
## Documentation Constraints
|
|
||||||
|
|
||||||
> e.g., documentation tools, style guides.
|
|
||||||
|
|
||||||
## Training Constraints
|
|
||||||
|
|
||||||
> e.g., training programs, skill development initiatives.
|
|
||||||
|
|
||||||
## Support Constraints
|
|
||||||
|
|
||||||
> e.g., support channels, response time expectations.
|
|
||||||
|
|
||||||
## Legal Constraints
|
|
||||||
|
|
||||||
> e.g., compliance requirements, intellectual property considerations.
|
|
||||||
|
|
||||||
## Ethical Constraints
|
|
||||||
|
|
||||||
> e.g., ethical considerations in data usage, user privacy.
|
|
||||||
|
|
||||||
## Environmental Impact Constraints
|
|
||||||
|
|
||||||
> e.g., energy consumption considerations, sustainability goals.
|
|
||||||
|
|
||||||
## Innovation Constraints
|
|
||||||
|
|
||||||
> e.g., limitations on adopting new technologies, risk tolerance for experimentation.
|
|
||||||
|
|
||||||
## Cultural Constraints
|
|
||||||
|
|
||||||
> e.g., organizational culture, team dynamics affecting development practices.
|
|
||||||
|
|
||||||
## Stakeholder Constraints
|
|
||||||
|
|
||||||
> e.g., stakeholder expectations, communication preferences.
|
|
||||||
|
|
||||||
## Change Management Constraints
|
|
||||||
|
|
||||||
> e.g., processes for handling changes, version control practices.
|
|
||||||
|
|
||||||
## Resource Constraints
|
|
||||||
|
|
||||||
> e.g., availability of hardware, software, and human resources.
|
|
||||||
|
|
||||||
## Process Constraints
|
|
||||||
|
|
||||||
> e.g., development methodologies (Agile, Scrum), project management tools.
|
|
||||||
|
|
||||||
## Quality Constraints
|
|
||||||
|
|
||||||
> e.g., code quality standards, testing requirements.
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
---
|
|
||||||
title: '02 — Technical Constraints'
|
|
||||||
description: 'Technical constraints that affect architecture decisions.'
|
|
||||||
status: draft
|
|
||||||
---
|
|
||||||
|
|
||||||
# Technical Constraints
|
|
||||||
|
|
||||||
> e.g., choice of FastAPI, PostgreSQL, SQLAlchemy, Chart.js, Jinja2 templates.
|
|
||||||
|
|
||||||
The architecture of CalMiner is influenced by several technical constraints that shape its design and implementation:
|
|
||||||
|
|
||||||
1. **Framework Selection**: The choice of FastAPI as the web framework imposes constraints on how the application handles requests, routing, and middleware. FastAPI's asynchronous capabilities must be leveraged appropriately to ensure optimal performance.
|
|
||||||
2. **Database Technology**: The use of PostgreSQL as the primary database system dictates the data modeling, querying capabilities, and transaction management strategies. SQLAlchemy ORM is used for database interactions, which requires adherence to its conventions and limitations.
|
|
||||||
3. **Frontend Technologies**: The decision to use Jinja2 for server-side templating and Chart.js for data visualization influences the structure of the frontend code and the way dynamic content is rendered.
|
|
||||||
4. **Simulation Logic**: The Monte Carlo simulation logic must be designed to efficiently handle large datasets and perform computations within the constraints of the chosen programming language (Python) and its libraries.
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
---
|
|
||||||
title: '02 — Organizational Constraints'
|
|
||||||
description: 'Organizational constraints that affect architecture decisions.'
|
|
||||||
status: draft
|
|
||||||
---
|
|
||||||
|
|
||||||
# Organizational Constraints
|
|
||||||
|
|
||||||
> e.g., team skillsets, development workflows, CI/CD pipelines.
|
|
||||||
|
|
||||||
Restrictions arising from organizational factors include:
|
|
||||||
|
|
||||||
1. **Team Expertise**: The development team’s familiarity with FastAPI, SQLAlchemy, and frontend technologies like Jinja2 and Chart.js influences the architecture choices to ensure maintainability and ease of development.
|
|
||||||
2. **Development Processes**: The adoption of Agile methodologies and CI/CD pipelines (using Gitea Actions) shapes the architecture to support continuous integration, automated testing, and deployment practices.
|
|
||||||
3. **Collaboration Tools**: The use of specific collaboration and version control tools (e.g., Gitea) affects how code is managed, reviewed, and integrated, impacting the overall architecture and development workflow.
|
|
||||||
4. **Documentation Standards**: The requirement for comprehensive documentation (as seen in the `docs/` folder) necessitates an architecture that is well-structured and easy to understand for both current and future team members.
|
|
||||||
5. **Knowledge Sharing**: The need for effective knowledge sharing and onboarding processes influences the architecture to ensure that it is accessible and understandable for new team members.
|
|
||||||
6. **Resource Availability**: The availability of hardware, software, and human resources within the organization can impose constraints on the architecture, affecting decisions related to scalability, performance, and feature implementation.
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
---
|
|
||||||
title: '02 — Regulatory Constraints'
|
|
||||||
description: 'Regulatory constraints that affect architecture decisions.'
|
|
||||||
status: draft
|
|
||||||
---
|
|
||||||
|
|
||||||
# Regulatory Constraints
|
|
||||||
|
|
||||||
> e.g., data privacy laws, industry standards.
|
|
||||||
|
|
||||||
Regulatory constraints that impact the architecture of CalMiner include:
|
|
||||||
|
|
||||||
1. **Data Privacy Compliance**: The architecture must ensure compliance with data privacy regulations such as GDPR or CCPA, which may dictate how user data is collected, stored, and processed.
|
|
||||||
2. **Industry Standards**: Adherence to industry-specific standards and best practices may influence the design of data models, security measures, and reporting functionalities.
|
|
||||||
3. **Auditability**: The system may need to incorporate logging and auditing features to meet regulatory requirements, affecting the architecture of data storage and access controls.
|
|
||||||
4. **Data Retention Policies**: Regulatory requirements regarding data retention and deletion may impose constraints on how long certain types of data can be stored, influencing database design and data lifecycle management.
|
|
||||||
5. **Security Standards**: Compliance with security standards (e.g., ISO/IEC 27001) may necessitate the implementation of specific security measures, such as encryption, access controls, and vulnerability management, which impact the overall architecture.
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
---
|
|
||||||
title: '02 — Environmental Constraints'
|
|
||||||
description: 'Environmental constraints that affect architecture decisions.'
|
|
||||||
status: draft
|
|
||||||
---
|
|
||||||
|
|
||||||
# Environmental Constraints
|
|
||||||
|
|
||||||
> e.g., deployment environments, cloud provider limitations.
|
|
||||||
|
|
||||||
Environmental constraints affecting the architecture include:
|
|
||||||
|
|
||||||
1. **Deployment Environments**: The architecture must accommodate various deployment environments (development, testing, production) with differing configurations and resource allocations.
|
|
||||||
2. **Cloud Provider Limitations**: If deployed on a specific cloud provider, the architecture may need to align with the provider's services, limitations, and best practices, such as using managed databases or specific container orchestration tools.
|
|
||||||
3. **Containerization**: The use of Docker for containerization imposes constraints on how the application is packaged, deployed, and scaled, influencing the architecture to ensure compatibility with container orchestration platforms.
|
|
||||||
4. **Scalability Requirements**: The architecture must be designed to scale efficiently based on anticipated load and usage patterns, considering the limitations of the chosen infrastructure.
|
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
---
|
|
||||||
title: '02 — Performance Constraints'
|
|
||||||
description: 'Performance constraints that affect architecture decisions.'
|
|
||||||
status: draft
|
|
||||||
---
|
|
||||||
|
|
||||||
# Performance Constraints
|
|
||||||
|
|
||||||
> e.g., response time requirements, scalability needs.
|
|
||||||
|
|
||||||
Current performance constraints include:
|
|
||||||
|
|
||||||
1. **Response Time Requirements**: The architecture must ensure that the system can respond to user requests within a specified time frame, which may impact design decisions related to caching, database queries, and API performance.
|
|
||||||
2. **Scalability Needs**: The system should be able to handle increased load and user traffic without significant degradation in performance, necessitating a scalable architecture that can grow with demand.
|
|
||||||
@@ -1,40 +0,0 @@
|
|||||||
---
|
|
||||||
title: "03 — Context and Scope"
|
|
||||||
description: "Describe system context, external actors, and the scope of the architecture."
|
|
||||||
status: draft
|
|
||||||
---
|
|
||||||
|
|
||||||
# 03 — Context and Scope
|
|
||||||
|
|
||||||
## System Context
|
|
||||||
|
|
||||||
The CalMiner system operates within the context of mining project management, providing tools for scenario analysis and decision support. It interacts with various data sources, including historical project data and real-time operational metrics.
|
|
||||||
|
|
||||||
## External Actors
|
|
||||||
|
|
||||||
- **Project Managers**: Utilize the platform for scenario planning and risk assessment.
|
|
||||||
- **Data Analysts**: Analyze simulation results and derive insights.
|
|
||||||
- **Executives**: Review high-level reports and dashboards for strategic decision-making.
|
|
||||||
|
|
||||||
## Scope of the Architecture
|
|
||||||
|
|
||||||
See [Architecture Scope](03_scope/03_01_architecture_scope.md) for details.
|
|
||||||
|
|
||||||
## Diagram
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
sequenceDiagram
|
|
||||||
participant PM as Project Manager
|
|
||||||
participant DA as Data Analyst
|
|
||||||
participant EX as Executive
|
|
||||||
participant CM as CalMiner System
|
|
||||||
|
|
||||||
PM->>CM: Create and manage scenarios
|
|
||||||
DA->>CM: Analyze simulation results
|
|
||||||
EX->>CM: Review reports and dashboards
|
|
||||||
CM->>PM: Provide scenario planning tools
|
|
||||||
CM->>DA: Deliver analysis insights
|
|
||||||
CM->>EX: Generate high-level reports
|
|
||||||
```
|
|
||||||
|
|
||||||
This diagram illustrates the key components of the CalMiner system and their interactions with external actors.
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
---
|
|
||||||
title: '03 — Architecture Scope'
|
|
||||||
description: 'Key areas encompassed by the architecture.'
|
|
||||||
status: draft
|
|
||||||
---
|
|
||||||
|
|
||||||
# Architecture Scope
|
|
||||||
|
|
||||||
The architecture encompasses the following key areas:
|
|
||||||
|
|
||||||
1. **Data Ingestion**: Mechanisms for collecting and processing data from various sources.
|
|
||||||
2. **Data Storage**: Solutions for storing and managing historical and real-time data.
|
|
||||||
3. **Simulation Engine**: Core algorithms and models for scenario analysis.
|
|
||||||
3.1. **Modeling Framework**: Tools for defining and managing simulation models.
|
|
||||||
3.2. **Parameter Management**: Systems for handling input parameters and configurations.
|
|
||||||
3.3. **Execution Engine**: Infrastructure for running simulations and processing results.
|
|
||||||
3.4. **Result Storage**: Systems for storing simulation outputs for analysis and reporting.
|
|
||||||
4. **Financial Reporting**: Tools for generating reports and visualizations based on simulation outcomes.
|
|
||||||
5. **Risk Assessment**: Frameworks for identifying and evaluating potential project risks.
|
|
||||||
6. **Profitability Analysis**: Modules for calculating and analyzing project profitability metrics.
|
|
||||||
7. **User Interface**: Design and implementation of the user-facing components of the system.
|
|
||||||
8. **Security and Compliance**: Measures to ensure data security and regulatory compliance.
|
|
||||||
9. **Scalability and Performance**: Strategies for ensuring the system can handle increasing data volumes and user loads.
|
|
||||||
10. **Integration Points**: Interfaces for integrating with external systems and services.
|
|
||||||
11. **Monitoring and Logging**: Systems for tracking system performance and user activity.
|
|
||||||
12. **Maintenance and Support**: Processes for ongoing system maintenance and user support.
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
---
|
|
||||||
title: "04 — Solution Strategy"
|
|
||||||
description: "High-level solution strategy describing major approaches, technology choices, and trade-offs."
|
|
||||||
status: draft
|
|
||||||
---
|
|
||||||
|
|
||||||
# 04 — Solution Strategy
|
|
||||||
|
|
||||||
This section outlines the high-level solution strategy for implementing the CalMiner system, focusing on major approaches, technology choices, and trade-offs.
|
|
||||||
|
|
||||||
## Solution Strategy Overview
|
|
||||||
|
|
||||||
- [Client-Server Architecture](04_strategy/04_01_client_server_architecture.md)
|
|
||||||
- [Technology Choices](04_strategy/04_02_technology_choices.md)
|
|
||||||
- [Trade-offs](04_strategy/04_03_trade_offs.md)
|
|
||||||
- [Future Considerations](04_strategy/04_04_future_considerations.md)
|
|
||||||
@@ -1,10 +0,0 @@
|
|||||||
---
|
|
||||||
title: '04.01 — Client-Server Architecture'
|
|
||||||
description: 'Details on the client-server architecture of CalMiner.'
|
|
||||||
---
|
|
||||||
|
|
||||||
# 04.01 — Client-Server Architecture
|
|
||||||
|
|
||||||
- **Backend**: FastAPI serves as the backend framework, providing RESTful APIs for data management, simulation execution, and reporting. It leverages SQLAlchemy for ORM-based database interactions with PostgreSQL.
|
|
||||||
- **Frontend**: Server-rendered Jinja2 templates deliver dynamic HTML views, enhanced with Chart.js for interactive data visualizations. This approach balances performance and simplicity, avoiding the complexity of a full SPA.
|
|
||||||
- **Middleware**: Custom middleware handles JSON validation to ensure data integrity before processing requests.
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
---
|
|
||||||
title: '04.02 — Technology Choices'
|
|
||||||
description: 'Detailed explanation of technology choices in CalMiner.'
|
|
||||||
---
|
|
||||||
|
|
||||||
# 04.02 — Technology Choices
|
|
||||||
|
|
||||||
- **FastAPI**: Chosen for its high performance, ease of use, and modern features like async support and automatic OpenAPI documentation.
|
|
||||||
- **PostgreSQL**: Selected for its robustness, scalability, and support for complex queries, making it suitable for handling the diverse data needs of mining project management.
|
|
||||||
- **SQLAlchemy**: Provides a flexible and powerful ORM layer, facilitating database interactions while maintaining code readability and maintainability.
|
|
||||||
- **Chart.js**: Utilized for its simplicity and effectiveness in rendering interactive charts, enhancing the user experience on the dashboard.
|
|
||||||
- **Jinja2**: Enables server-side rendering of HTML templates, allowing for dynamic content generation while keeping the frontend lightweight.
|
|
||||||
- **Pydantic**: Used for data validation and serialization, ensuring that incoming request payloads conform to expected schemas.
|
|
||||||
- **Docker**: Employed for containerization, ensuring consistent deployment across different environments and simplifying dependency management.
|
|
||||||
- **Redis**: Used as an in-memory data store to cache frequently accessed data, improving application performance and reducing database load.
|
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
---
|
|
||||||
title: '04.03 — Trade-offs'
|
|
||||||
description: 'Discussion of trade-offs made in the CalMiner architecture.'
|
|
||||||
---
|
|
||||||
|
|
||||||
# 04.03 — Trade-offs
|
|
||||||
|
|
||||||
- **Server-Rendered vs. SPA**: Opted for server-rendered templates over a single-page application (SPA) to reduce complexity and improve initial load times, at the cost of some interactivity.
|
|
||||||
- **Synchronous vs. Asynchronous**: While FastAPI supports async operations, the initial implementation focuses on synchronous request handling for simplicity, with plans to introduce async features as needed.
|
|
||||||
- **Monolithic vs. Microservices**: The initial architecture follows a monolithic approach for ease of development and deployment, with the possibility of refactoring into microservices as the system scales.
|
|
||||||
- **In-Memory Caching**: Implementing Redis for caching introduces additional infrastructure complexity but significantly enhances performance for read-heavy operations.
|
|
||||||
- **Database Choice**: PostgreSQL was chosen over NoSQL alternatives due to the structured nature of the data and the need for complex querying capabilities, despite potential scalability challenges.
|
|
||||||
- **Technology Familiarity**: Selected technologies align with the team's existing skill set to minimize the learning curve and accelerate development, even if some alternatives may offer marginally better performance or features.
|
|
||||||
- **Extensibility vs. Simplicity**: The architecture is designed to be extensible for future features (e.g., Monte Carlo simulation engine) while maintaining simplicity in the initial implementation to ensure timely delivery of core functionalities.
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
---
|
|
||||||
title: '04.04 — Future Considerations'
|
|
||||||
description: 'Future considerations for the CalMiner architecture.'
|
|
||||||
---
|
|
||||||
|
|
||||||
# 04.04 — Future Considerations
|
|
||||||
|
|
||||||
- **Scalability**: As the user base grows, consider transitioning to a microservices architecture and implementing load balancing strategies.
|
|
||||||
- **Asynchronous Processing**: Introduce asynchronous task queues (e.g., Celery) for long-running simulations to improve responsiveness.
|
|
||||||
- **Enhanced Frontend**: Explore the possibility of integrating a frontend framework (e.g., React or Vue.js) for more dynamic user interactions in future iterations.
|
|
||||||
- **Advanced Analytics**: Plan for integrating advanced analytics and machine learning capabilities to enhance simulation accuracy and reporting insights.
|
|
||||||
- **Security Enhancements**: Implement robust authentication and authorization mechanisms to protect sensitive data and ensure compliance with industry standards.
|
|
||||||
- **Continuous Integration/Continuous Deployment (CI/CD)**: Establish CI/CD pipelines to automate testing, building, and deployment processes for faster and more reliable releases.
|
|
||||||
- **Monitoring and Logging**: Integrate monitoring tools (e.g., Prometheus, Grafana) and centralized logging solutions (e.g., ELK stack) to track application performance and troubleshoot issues effectively.
|
|
||||||
- **User Feedback Loop**: Implement mechanisms for collecting user feedback to inform future development priorities and improve user experience.
|
|
||||||
- **Documentation**: Maintain comprehensive documentation for both developers and end-users to facilitate onboarding and effective use of the system.
|
|
||||||
- **Testing Strategy**: Develop a robust testing strategy, including unit, integration, and end-to-end tests, to ensure code quality and reliability as the system evolves.
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
---
|
|
||||||
title: '05 — Architecture Overview'
|
|
||||||
description: "This overview complements architecture with a high-level map of CalMiner's module layout and request flow."
|
|
||||||
status: draft
|
|
||||||
---
|
|
||||||
|
|
||||||
This overview complements [architecture](README.md) with a high-level map of CalMiner's module layout and request flow.
|
|
||||||
|
|
||||||
Refer to the detailed architecture chapters in `docs/architecture/`:
|
|
||||||
|
|
||||||
- Module map & components: [Building Block View](../05_building_block_view.md)
|
|
||||||
- Request flow & runtime interactions: [Runtime View](../06_runtime_view.md)
|
|
||||||
- Simulation roadmap & strategy: [Solution Strategy](../04_solution_strategy.md)
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
---
|
|
||||||
title: '05 — Backend Components'
|
|
||||||
description: 'Description of the backend components of the CalMiner application.'
|
|
||||||
status: draft
|
|
||||||
---
|
|
||||||
|
|
||||||
- **FastAPI application** (`main.py`): entry point that configures routers, middleware, and startup/shutdown events.
|
|
||||||
- **Routers** (`routes/`): modular route handlers for scenarios, parameters, costs, consumption, production, equipment, maintenance, simulations, and reporting. Each router defines RESTful endpoints, request/response schemas, and orchestrates service calls.
|
|
||||||
- leveraging a shared dependency module (`routes/dependencies.get_db`) for SQLAlchemy session management.
|
|
||||||
- **Models** (`models/`): SQLAlchemy ORM models representing database tables and relationships, encapsulating domain entities like Scenario, CapEx, OpEx, Consumption, ProductionOutput, Equipment, Maintenance, and SimulationResult.
|
|
||||||
- **Services** (`services/`): business logic layer that processes data, performs calculations, and interacts with models. Key services include reporting calculations and Monte Carlo simulation scaffolding.
|
|
||||||
- `services/settings.py`: manages application settings backed by the `application_setting` table, including CSS variable defaults, persistence, and environment-driven overrides that surface in both the API and UI.
|
|
||||||
- **Database** (`config/database.py`): sets up the SQLAlchemy engine and session management for PostgreSQL interactions.
|
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
---
|
|
||||||
title: '05 — Frontend Components'
|
|
||||||
description: 'Description of the frontend components of the CalMiner application.'
|
|
||||||
status: draft
|
|
||||||
---
|
|
||||||
|
|
||||||
- **Templates** (`templates/`): Jinja2 templates for server-rendered HTML views, extending a shared base layout with a persistent sidebar for navigation.
|
|
||||||
- **Static Assets** (`static/`): CSS and JavaScript files for styling and interactivity. Shared CSS variables in `static/css/main.css` define the color palette, while page-specific JS modules in `static/js/` handle dynamic behaviors.
|
|
||||||
- **Reusable partials** (`templates/partials/components.html`): macro library that standardises select inputs, feedback/empty states, and table wrappers so pages remain consistent while keeping DOM hooks stable for existing JavaScript modules.
|
|
||||||
- `templates/settings.html`: Settings hub that renders theme controls and environment override tables using metadata provided by `routes/ui.py`.
|
|
||||||
- `static/js/settings.js`: applies client-side validation, form submission, and live CSS updates for theme changes, respecting environment-managed variables returned by the API.
|
|
||||||
@@ -1,88 +0,0 @@
|
|||||||
# Theming
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
|
|
||||||
CalMiner uses a centralized theming system based on CSS custom properties (variables) to ensure consistent styling across the application. The theme is stored in the database and can be customized through environment variables or the UI settings page.
|
|
||||||
|
|
||||||
## Default Theme Settings
|
|
||||||
|
|
||||||
The default theme provides a light, professional color palette suitable for business applications. The colors are defined as CSS custom properties and stored in the `application_setting` table with category "theme".
|
|
||||||
|
|
||||||
### Color Palette
|
|
||||||
|
|
||||||
| CSS Variable | Default Value | Description |
|
|
||||||
| --------------------------- | ------------------------ | ------------------------ |
|
|
||||||
| `--color-background` | `#f4f5f7` | Main background color |
|
|
||||||
| `--color-surface` | `#ffffff` | Surface/card background |
|
|
||||||
| `--color-text-primary` | `#2a1f33` | Primary text color |
|
|
||||||
| `--color-text-secondary` | `#624769` | Secondary text color |
|
|
||||||
| `--color-text-muted` | `#64748b` | Muted text color |
|
|
||||||
| `--color-text-subtle` | `#94a3b8` | Subtle text color |
|
|
||||||
| `--color-text-invert` | `#ffffff` | Text on dark backgrounds |
|
|
||||||
| `--color-text-dark` | `#0f172a` | Dark text for contrast |
|
|
||||||
| `--color-text-strong` | `#111827` | Strong/bold text |
|
|
||||||
| `--color-primary` | `#5f320d` | Primary brand color |
|
|
||||||
| `--color-primary-strong` | `#7e4c13` | Stronger primary |
|
|
||||||
| `--color-primary-stronger` | `#837c15` | Strongest primary |
|
|
||||||
| `--color-accent` | `#bff838` | Accent/highlight color |
|
|
||||||
| `--color-border` | `#e2e8f0` | Default border color |
|
|
||||||
| `--color-border-strong` | `#cbd5e1` | Strong border color |
|
|
||||||
| `--color-highlight` | `#eef2ff` | Highlight background |
|
|
||||||
| `--color-panel-shadow` | `rgba(15, 23, 42, 0.08)` | Subtle shadow |
|
|
||||||
| `--color-panel-shadow-deep` | `rgba(15, 23, 42, 0.12)` | Deeper shadow |
|
|
||||||
| `--color-surface-alt` | `#f8fafc` | Alternative surface |
|
|
||||||
| `--color-success` | `#047857` | Success state color |
|
|
||||||
| `--color-error` | `#b91c1c` | Error state color |
|
|
||||||
|
|
||||||
## Customization
|
|
||||||
|
|
||||||
### Environment Variables
|
|
||||||
|
|
||||||
Theme colors can be overridden using environment variables with the prefix `CALMINER_THEME_`. For example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
export CALMINER_THEME_COLOR_BACKGROUND="#000000"
|
|
||||||
export CALMINER_THEME_COLOR_ACCENT="#ff0000"
|
|
||||||
```
|
|
||||||
|
|
||||||
The variable names are derived by:
|
|
||||||
|
|
||||||
1. Removing the `--` prefix
|
|
||||||
2. Converting to uppercase
|
|
||||||
3. Replacing `-` with `_`
|
|
||||||
4. Adding `CALMINER_THEME_` prefix
|
|
||||||
|
|
||||||
### Database Storage
|
|
||||||
|
|
||||||
Settings are stored in the `application_setting` table with:
|
|
||||||
|
|
||||||
- `category`: "theme"
|
|
||||||
- `value_type`: "color"
|
|
||||||
- `is_editable`: true
|
|
||||||
|
|
||||||
### UI Settings
|
|
||||||
|
|
||||||
Users can modify theme colors through the settings page at `/ui/settings`.
|
|
||||||
|
|
||||||
## Implementation
|
|
||||||
|
|
||||||
The theming system is implemented in:
|
|
||||||
|
|
||||||
- `services/settings.py`: Color management and defaults
|
|
||||||
- `routes/settings.py`: API endpoints for theme settings
|
|
||||||
- `static/css/main.css`: CSS variable definitions
|
|
||||||
- `templates/settings.html`: UI for theme customization
|
|
||||||
|
|
||||||
## Seeding
|
|
||||||
|
|
||||||
Default theme settings are seeded during database setup using the seed script:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
python scripts/seed_data.py --theme
|
|
||||||
```
|
|
||||||
|
|
||||||
Or as part of defaults:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
python scripts/seed_data.py --defaults
|
|
||||||
```
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
---
|
|
||||||
title: '05 — Middleware & Utilities'
|
|
||||||
description: 'Description of the middleware and utility components of the CalMiner application.'
|
|
||||||
status: draft
|
|
||||||
---
|
|
||||||
|
|
||||||
- **Middleware** (`middleware/validation.py`): applies JSON validation before requests reach routers.
|
|
||||||
- **Testing** (`tests/unit/`): pytest suite covering route and service behavior, including UI rendering checks and negative-path router validation tests to ensure consistent HTTP error semantics. Playwright end-to-end coverage is planned for core smoke flows (dashboard load, scenario inputs, reporting) and will attach in CI once scaffolding is completed.
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
---
|
|
||||||
title: "05 — Building Block View"
|
|
||||||
description: "Explain the static structure: modules, components, services and their relationships."
|
|
||||||
status: draft
|
|
||||||
---
|
|
||||||
|
|
||||||
<!-- markdownlint-disable-next-line MD025 -->
|
|
||||||
|
|
||||||
# 05 — Building Block View
|
|
||||||
|
|
||||||
## Building Block Overview
|
|
||||||
|
|
||||||
- [Architecture Overview](05_blocks/05_01_architecture_overview.md)
|
|
||||||
- [Backend Components](05_blocks/05_02_backend_components.md)
|
|
||||||
- [Frontend Components](05_blocks/05_03_frontend_components.md)
|
|
||||||
- [Middleware & Utilities](05_blocks/05_04_middleware_utilities.md)
|
|
||||||
@@ -1,288 +0,0 @@
|
|||||||
---
|
|
||||||
title: "06 — Runtime View"
|
|
||||||
description: "Describe runtime aspects: request flows, lifecycle of key interactions, and runtime components."
|
|
||||||
status: draft
|
|
||||||
---
|
|
||||||
|
|
||||||
# 06 — Runtime View
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
|
|
||||||
The runtime view focuses on the dynamic behavior of the CalMiner application during execution. It illustrates how various components interact to fulfill user requests, process data, and generate outputs. Key runtime scenarios include scenario management, parameter input handling, cost tracking, consumption tracking, production output recording, equipment management, maintenance logging, Monte Carlo simulations, and reporting.
|
|
||||||
|
|
||||||
## Request Flow
|
|
||||||
|
|
||||||
1. **User Interaction**: A user interacts with the web application through the UI, triggering actions such as creating a scenario, inputting parameters, or generating reports.
|
|
||||||
2. **API Request**: The frontend sends HTTP requests (GET, POST, PUT, DELETE) to the appropriate API endpoints defined in the `routes/` directory.
|
|
||||||
3. **Routing**: The FastAPI framework routes the incoming requests to the corresponding route handlers.
|
|
||||||
4. **Service Layer**: Route handlers invoke services from the `services/` directory to process the business logic.
|
|
||||||
5. **Database Interaction**: Services interact with the database via ORM models defined in the `models/` directory to perform CRUD operations.
|
|
||||||
6. **Response Generation**: After processing, services return data to the route handlers, which format the response (JSON or HTML) and send it back to the frontend.
|
|
||||||
7. **UI Update**: The frontend updates the UI based on the response, rendering new data or updating existing views.
|
|
||||||
8. **Reporting Pipeline**: For reporting, data is aggregated from various sources, processed to generate statistics, and presented in the dashboard using Chart.js.
|
|
||||||
9. **Monte Carlo Simulations**: Stochastic simulations are executed in the backend, generating probabilistic outcomes that are stored temporarily and used for risk analysis in reports.
|
|
||||||
10. **Error Handling**: Throughout the process, error handling mechanisms ensure that exceptions are caught and appropriate responses are sent back to the user.
|
|
||||||
|
|
||||||
Request flow diagram:
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
sequenceDiagram
|
|
||||||
participant User
|
|
||||||
participant Frontend
|
|
||||||
participant API
|
|
||||||
participant Service
|
|
||||||
participant Database
|
|
||||||
|
|
||||||
User->>Frontend: Interact with UI
|
|
||||||
Frontend->>API: Send HTTP Request
|
|
||||||
API->>Service: Route to Handler
|
|
||||||
Service->>Database: Perform CRUD Operation
|
|
||||||
Database-->>Service: Return Data
|
|
||||||
Service-->>API: Return Processed Data
|
|
||||||
API-->>Frontend: Send Response
|
|
||||||
Frontend-->>User: Update UI
|
|
||||||
|
|
||||||
participant Reporting
|
|
||||||
|
|
||||||
Service->>Reporting: Aggregate Data
|
|
||||||
Reporting-->>Service: Return Report Data
|
|
||||||
Service-->>API: Return Report Response
|
|
||||||
API-->>Frontend: Send Report Data
|
|
||||||
Frontend-->>User: Render Report
|
|
||||||
|
|
||||||
participant Simulation
|
|
||||||
Service->>Simulation: Execute Monte Carlo Simulation
|
|
||||||
Simulation-->>Service: Return Simulation Results
|
|
||||||
|
|
||||||
Service-->>API: Return Simulation Data
|
|
||||||
API-->>Frontend: Send Simulation Data
|
|
||||||
Frontend-->>User: Display Simulation Results
|
|
||||||
```
|
|
||||||
|
|
||||||
## Key Runtime Scenarios
|
|
||||||
|
|
||||||
### Scenario Management
|
|
||||||
|
|
||||||
1. User accesses the scenario list via the UI.
|
|
||||||
2. The frontend sends a GET request to `/api/scenarios`.
|
|
||||||
3. The `ScenarioService` retrieves scenarios from the database.
|
|
||||||
4. The response is rendered in the UI.
|
|
||||||
5. For scenario creation, the user submits a form, triggering a POST request to `/api/scenarios`, which the `ScenarioService` processes to create a new scenario in the database.
|
|
||||||
6. The UI updates to reflect the new scenario.
|
|
||||||
|
|
||||||
Scenario management diagram:
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
sequenceDiagram
|
|
||||||
participant User
|
|
||||||
participant Frontend
|
|
||||||
participant API
|
|
||||||
participant ScenarioService
|
|
||||||
participant Database
|
|
||||||
|
|
||||||
User->>Frontend: Access Scenario List
|
|
||||||
Frontend->>API: GET /api/scenarios
|
|
||||||
API->>ScenarioService: Route to Handler
|
|
||||||
ScenarioService->>Database: Retrieve Scenarios
|
|
||||||
Database-->>ScenarioService: Return Scenarios
|
|
||||||
ScenarioService-->>API: Return Scenario Data
|
|
||||||
API-->>Frontend: Send Response
|
|
||||||
Frontend-->>User: Render Scenario List
|
|
||||||
|
|
||||||
User->>Frontend: Submit New Scenario Form
|
|
||||||
Frontend->>API: POST /api/scenarios
|
|
||||||
API->>ScenarioService: Route to Handler
|
|
||||||
ScenarioService->>Database: Create New Scenario
|
|
||||||
Database-->>ScenarioService: Confirm Creation
|
|
||||||
ScenarioService-->>API: Return New Scenario Data
|
|
||||||
API-->>Frontend: Send Response
|
|
||||||
Frontend-->>User: Update UI with New Scenario
|
|
||||||
```
|
|
||||||
|
|
||||||
### Process Parameter Input
|
|
||||||
|
|
||||||
1. User navigates to the parameter input form.
|
|
||||||
2. The frontend fetches existing parameters via a GET request to `/api/parameters`.
|
|
||||||
3. The `ParameterService` retrieves parameters from the database.
|
|
||||||
4. The response is rendered in the UI.
|
|
||||||
5. For parameter updates, the user submits a form, triggering a PUT request to `/api/parameters/:id`, which the `ParameterService` processes to update the parameter in the database.
|
|
||||||
6. The UI updates to reflect the changes.
|
|
||||||
|
|
||||||
Parameter input diagram:
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
sequenceDiagram
|
|
||||||
participant User
|
|
||||||
participant Frontend
|
|
||||||
participant API
|
|
||||||
participant ParameterService
|
|
||||||
participant Database
|
|
||||||
|
|
||||||
User->>Frontend: Navigate to Parameter Input Form
|
|
||||||
Frontend->>API: GET /api/parameters
|
|
||||||
API->>ParameterService: Route to Handler
|
|
||||||
ParameterService->>Database: Retrieve Parameters
|
|
||||||
Database-->>ParameterService: Return Parameters
|
|
||||||
ParameterService-->>API: Return Parameter Data
|
|
||||||
API-->>Frontend: Send Response
|
|
||||||
Frontend-->>User: Render Parameter Form
|
|
||||||
|
|
||||||
User->>Frontend: Submit Parameter Update Form
|
|
||||||
Frontend->>API: PUT /api/parameters/:id
|
|
||||||
API->>ParameterService: Route to Handler
|
|
||||||
ParameterService->>Database: Update Parameter
|
|
||||||
Database-->>ParameterService: Confirm Update
|
|
||||||
ParameterService-->>API: Return Updated Parameter Data
|
|
||||||
API-->>Frontend: Send Response
|
|
||||||
Frontend-->>User: Update UI with Updated Parameter
|
|
||||||
```
|
|
||||||
|
|
||||||
### Cost Tracking
|
|
||||||
|
|
||||||
1. User accesses the cost tracking view.
|
|
||||||
2. The frontend sends a GET request to `/api/costs` to fetch existing cost records.
|
|
||||||
3. The `CostService` retrieves cost data from the database.
|
|
||||||
4. The response is rendered in the UI.
|
|
||||||
5. For cost updates, the user submits a form, triggering a PUT request to `/api/costs/:id`, which the `CostService` processes to update the cost record in the database.
|
|
||||||
6. The UI updates to reflect the changes.
|
|
||||||
|
|
||||||
Cost tracking diagram:
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
sequenceDiagram
|
|
||||||
participant User
|
|
||||||
participant Frontend
|
|
||||||
participant API
|
|
||||||
participant CostService
|
|
||||||
participant Database
|
|
||||||
|
|
||||||
User->>Frontend: Access Cost Tracking View
|
|
||||||
Frontend->>API: GET /api/costs
|
|
||||||
API->>CostService: Route to Handler
|
|
||||||
CostService->>Database: Retrieve Cost Records
|
|
||||||
Database-->>CostService: Return Cost Data
|
|
||||||
CostService-->>API: Return Cost Data
|
|
||||||
API-->>Frontend: Send Response
|
|
||||||
Frontend-->>User: Render Cost Tracking View
|
|
||||||
|
|
||||||
User->>Frontend: Submit Cost Update Form
|
|
||||||
Frontend->>API: PUT /api/costs/:id
|
|
||||||
API->>CostService: Route to Handler
|
|
||||||
CostService->>Database: Update Cost Record
|
|
||||||
Database-->>CostService: Confirm Update
|
|
||||||
CostService-->>API: Return Updated Cost Data
|
|
||||||
API-->>Frontend: Send Response
|
|
||||||
Frontend-->>User: Update UI with Updated Cost Data
|
|
||||||
```
|
|
||||||
|
|
||||||
## Reporting Pipeline and UI Integration
|
|
||||||
|
|
||||||
1. **Data Sources**
|
|
||||||
|
|
||||||
- Scenario-linked calculations (costs, consumption, production) produce raw figures stored in dedicated tables (`capex`, `opex`, `consumption`, `production_output`).
|
|
||||||
- Monte Carlo simulations (currently transient) generate arrays of `{ "result": float }` tuples that the dashboard or downstream tooling passes directly to reporting endpoints.
|
|
||||||
|
|
||||||
2. **API Contract**
|
|
||||||
|
|
||||||
- `POST /api/reporting/summary` accepts a JSON array of result objects and validates shape through `_validate_payload` in `routes/reporting.py`.
|
|
||||||
- On success it returns a structured payload (`ReportSummary`) containing count, mean, median, min/max, standard deviation, and percentile values, all as floats.
|
|
||||||
|
|
||||||
3. **Service Layer**
|
|
||||||
|
|
||||||
- `services/reporting.generate_report` converts the sanitized payload into descriptive statistics using Python’s standard library (`statistics` module) to avoid external dependencies.
|
|
||||||
- The service remains stateless; no database read/write occurs, which keeps summary calculations deterministic and idempotent.
|
|
||||||
- Extended KPIs (surfaced in the API and dashboard):
|
|
||||||
- `variance`: population variance computed as the square of the population standard deviation.
|
|
||||||
- `percentile_5` and `percentile_95`: lower and upper tail interpolated percentiles for sensitivity bounds.
|
|
||||||
- `value_at_risk_95`: 5th percentile threshold representing the minimum outcome within a 95% confidence band.
|
|
||||||
- `expected_shortfall_95`: mean of all outcomes at or below the `value_at_risk_95`, highlighting tail exposure.
|
|
||||||
|
|
||||||
4. **UI Consumption**
|
|
||||||
|
|
||||||
- `templates/Dashboard.html` posts the user-provided dataset to the summary endpoint, renders metric cards for each field, and charts the distribution using Chart.js.
|
|
||||||
- `SUMMARY_FIELDS` now includes variance, 5th/10th/90th/95th percentiles, and tail-risk metrics (VaR/Expected Shortfall at 95%); tooltip annotations surface the tail metrics alongside the percentile line chart.
|
|
||||||
- Error handling surfaces HTTP failures inline so users can address malformed JSON or backend availability issues without leaving the page.
|
|
||||||
|
|
||||||
Reporting pipeline diagram:
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
sequenceDiagram
|
|
||||||
participant User
|
|
||||||
participant Frontend
|
|
||||||
participant API
|
|
||||||
participant ReportingService
|
|
||||||
|
|
||||||
User->>Frontend: Input Data for Reporting
|
|
||||||
Frontend->>API: POST /api/reporting/summary
|
|
||||||
API->>ReportingService: Route to Handler
|
|
||||||
ReportingService->>ReportingService: Validate Payload
|
|
||||||
ReportingService->>ReportingService: Compute Statistics
|
|
||||||
ReportingService-->>API: Return Report Summary
|
|
||||||
API-->>Frontend: Send Report Summary
|
|
||||||
Frontend-->>User: Render Report Metrics and Charts
|
|
||||||
```
|
|
||||||
|
|
||||||
## Monte Carlo Simulation Execution
|
|
||||||
|
|
||||||
1. User initiates a Monte Carlo simulation via the UI.
|
|
||||||
2. The frontend sends a POST request to `/api/simulations/run` with simulation parameters.
|
|
||||||
3. The `SimulationService` executes the Monte Carlo logic, generating stochastic results.
|
|
||||||
4. The results are temporarily stored and returned to the frontend.
|
|
||||||
5. The UI displays the simulation results and allows users to trigger reporting based on these outcomes.
|
|
||||||
6. The reporting pipeline processes the simulation results as described above.
|
|
||||||
7. Error handling ensures that any issues during simulation execution are communicated back to the user.
|
|
||||||
8. Monte Carlo simulation diagram:
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
sequenceDiagram
|
|
||||||
participant User
|
|
||||||
participant Frontend
|
|
||||||
participant API
|
|
||||||
participant SimulationService
|
|
||||||
|
|
||||||
User->>Frontend: Input Simulation Parameters
|
|
||||||
Frontend->>API: POST /api/simulations/run
|
|
||||||
API->>SimulationService: Route to Handler
|
|
||||||
SimulationService->>SimulationService: Execute Monte Carlo Logic
|
|
||||||
SimulationService-->>API: Return Simulation Results
|
|
||||||
API-->>Frontend: Send Simulation Results
|
|
||||||
Frontend-->>User: Render Simulation Results
|
|
||||||
```
|
|
||||||
|
|
||||||
## Error Handling
|
|
||||||
|
|
||||||
Throughout the runtime processes, error handling mechanisms are implemented to catch exceptions and provide meaningful feedback to users. Common error scenarios include:
|
|
||||||
|
|
||||||
- Invalid input data
|
|
||||||
- Database connection issues
|
|
||||||
- Simulation execution errors
|
|
||||||
- Reporting calculation failures
|
|
||||||
- API endpoint unavailability
|
|
||||||
- Timeouts during long-running operations
|
|
||||||
- Unauthorized access attempts
|
|
||||||
- Data validation failures
|
|
||||||
- Resource not found errors
|
|
||||||
|
|
||||||
Error handling diagram:
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
sequenceDiagram
|
|
||||||
participant User
|
|
||||||
participant Frontend
|
|
||||||
participant API
|
|
||||||
participant Service
|
|
||||||
|
|
||||||
User->>Frontend: Perform Action
|
|
||||||
Frontend->>API: Send Request
|
|
||||||
API->>Service: Route to Handler
|
|
||||||
Service->>Service: Process Request
|
|
||||||
alt Success
|
|
||||||
Service-->>API: Return Data
|
|
||||||
API-->>Frontend: Send Response
|
|
||||||
Frontend-->>User: Update UI
|
|
||||||
else Error
|
|
||||||
Service-->>API: Return Error
|
|
||||||
API-->>Frontend: Send Error Response
|
|
||||||
Frontend-->>User: Display Error Message
|
|
||||||
end
|
|
||||||
```
|
|
||||||
@@ -1,215 +0,0 @@
|
|||||||
# Testing, CI and Quality Assurance
|
|
||||||
|
|
||||||
This chapter centralizes the project's testing strategy, CI configuration, and quality targets.
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
|
|
||||||
CalMiner uses a combination of unit, integration, and end-to-end tests to ensure quality.
|
|
||||||
|
|
||||||
### Frameworks
|
|
||||||
|
|
||||||
- Backend: pytest for unit and integration tests.
|
|
||||||
- Frontend: pytest with Playwright for E2E tests.
|
|
||||||
- Database: pytest fixtures with psycopg2 for DB tests.
|
|
||||||
|
|
||||||
### Test Types
|
|
||||||
|
|
||||||
- Unit Tests: Test individual functions/modules.
|
|
||||||
- Integration Tests: Test API endpoints and DB interactions.
|
|
||||||
- E2E Tests: Playwright for full user flows.
|
|
||||||
|
|
||||||
### CI/CD
|
|
||||||
|
|
||||||
- Use Gitea Actions for CI/CD; workflows live under `.gitea/workflows/`.
|
|
||||||
- `ci.yml` runs on push and pull requests to `main` and `develop` branches. It provisions a temporary PostgreSQL 15 service, sets up Python 3.11, installs dependencies from `requirements.txt` and `requirements-test.txt`, runs pytest with coverage on all tests, and builds the Docker image.
|
|
||||||
- Run tests on pull requests to shared branches; enforce coverage target ≥80% (pytest-cov).
|
|
||||||
|
|
||||||
### Running Tests
|
|
||||||
|
|
||||||
- Unit: `pytest tests/unit/`
|
|
||||||
- E2E: `pytest tests/e2e/`
|
|
||||||
- All: `pytest`
|
|
||||||
|
|
||||||
### Test Directory Structure
|
|
||||||
|
|
||||||
Organize tests under the `tests/` directory mirroring the application structure:
|
|
||||||
|
|
||||||
```text
|
|
||||||
tests/
|
|
||||||
unit/
|
|
||||||
test_<module>.py
|
|
||||||
e2e/
|
|
||||||
test_<flow>.py
|
|
||||||
fixtures/
|
|
||||||
conftest.py
|
|
||||||
```
|
|
||||||
|
|
||||||
### Fixtures and Test Data
|
|
||||||
|
|
||||||
- Define reusable fixtures in `tests/fixtures/conftest.py`.
|
|
||||||
- Use temporary in-memory databases or isolated schemas for DB tests.
|
|
||||||
- Load sample data via fixtures for consistent test environments.
|
|
||||||
- Leverage the `seeded_ui_data` fixture in `tests/unit/conftest.py` to populate scenarios with related cost, maintenance, and simulation records for deterministic UI route checks.
|
|
||||||
|
|
||||||
### E2E (Playwright) Tests
|
|
||||||
|
|
||||||
The E2E test suite, located in `tests/e2e/`, uses Playwright to simulate user interactions in a live browser environment. These tests are designed to catch issues in the UI, frontend-backend integration, and overall application flow.
|
|
||||||
|
|
||||||
#### Fixtures
|
|
||||||
|
|
||||||
- `live_server`: A session-scoped fixture that launches the FastAPI application in a separate process, making it accessible to the browser.
|
|
||||||
- `playwright_instance`, `browser`, `page`: Standard `pytest-playwright` fixtures for managing the Playwright instance, browser, and individual pages.
|
|
||||||
|
|
||||||
#### Smoke Tests
|
|
||||||
|
|
||||||
- UI Page Loading: `test_smoke.py` contains a parameterized test that systematically navigates to all UI routes to ensure they load without errors, have the correct title, and display a primary heading.
|
|
||||||
- Form Submissions: Each major form in the application has a corresponding test file (e.g., `test_scenarios.py`, `test_costs.py`) that verifies: page loads, create item by filling the form, success message, and UI updates.
|
|
||||||
|
|
||||||
### Running E2E Tests
|
|
||||||
|
|
||||||
To run the Playwright tests:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
pytest tests/e2e/
|
|
||||||
```
|
|
||||||
|
|
||||||
To run headed mode:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
pytest tests/e2e/ --headed
|
|
||||||
```
|
|
||||||
|
|
||||||
### Mocking and Dependency Injection
|
|
||||||
|
|
||||||
- Use `unittest.mock` to mock external dependencies.
|
|
||||||
- Inject dependencies via function parameters or FastAPI's dependency overrides in tests.
|
|
||||||
|
|
||||||
### Code Coverage
|
|
||||||
|
|
||||||
- Install `pytest-cov` to generate coverage reports.
|
|
||||||
- Run with coverage: `pytest --cov --cov-report=term` (use `--cov-report=html` when visualizing hotspots).
|
|
||||||
- Target 95%+ overall coverage. Focus on historically low modules: `services/simulation.py`, `services/reporting.py`, `middleware/validation.py`, and `routes/ui.py`.
|
|
||||||
- Latest snapshot (2025-10-21): `pytest --cov=. --cov-report=term-missing` returns **91%** overall coverage.
|
|
||||||
|
|
||||||
### CI Integration
|
|
||||||
|
|
||||||
`test.yml` encapsulates the steps below:
|
|
||||||
|
|
||||||
- Check out the repository and set up Python 3.10.
|
|
||||||
- Configure the runner's apt proxy (if available), install project dependencies (requirements + test extras), and download Playwright browsers.
|
|
||||||
- Run `pytest` (extend with `--cov` flags when enforcing coverage).
|
|
||||||
|
|
||||||
> The pip cache step is temporarily disabled in `test.yml` until the self-hosted cache service is exposed (see `docs/ci-cache-troubleshooting.md`).
|
|
||||||
|
|
||||||
`build-and-push.yml` adds:
|
|
||||||
|
|
||||||
- Registry login using repository secrets.
|
|
||||||
- Docker image build/push with GHA cache storage (`cache-from/cache-to` set to `type=gha`).
|
|
||||||
|
|
||||||
`deploy.yml` handles:
|
|
||||||
|
|
||||||
- SSH into the deployment host.
|
|
||||||
- Pull the tagged image from the registry.
|
|
||||||
- Stop, remove, and relaunch the `calminer` container exposing port 8000.
|
|
||||||
|
|
||||||
When adding new workflows, mirror this structure to ensure secrets, caching, and deployment steps remain aligned with the production environment.
|
|
||||||
|
|
||||||
## Workflow Optimization Opportunities
|
|
||||||
|
|
||||||
### `test.yml`
|
|
||||||
|
|
||||||
- Run the apt-proxy setup once via a composite action or preconfigured runner image if additional matrix jobs are added.
|
|
||||||
- Collapse dependency installation into a single `pip install -r requirements-test.txt` call (includes base requirements) once caching is restored.
|
|
||||||
- Investigate caching or pre-baking Playwright browser binaries to eliminate >650 MB cold downloads per run.
|
|
||||||
|
|
||||||
### `build-and-push.yml`
|
|
||||||
|
|
||||||
- Skip QEMU setup or explicitly constrain Buildx to linux/amd64 to reduce startup time.
|
|
||||||
- Enable `cache-from` / `cache-to` settings (registry or `type=gha`) to reuse Docker build layers between runs.
|
|
||||||
|
|
||||||
### `deploy.yml`
|
|
||||||
|
|
||||||
- Extract deployment script into a reusable shell script or compose file to minimize inline secrets and ease multi-environment scaling.
|
|
||||||
- Add a post-deploy health check (e.g., `curl` readiness probe) before declaring success.
|
|
||||||
|
|
||||||
### Priority Overview
|
|
||||||
|
|
||||||
1. Restore shared caching for Python wheels and Playwright browsers once infrastructure exposes the cache service (highest impact on runtime and bandwidth; requires coordination with CI owners).
|
|
||||||
2. Enable Docker layer caching in `build-and-push.yml` to shorten build cycles (medium effort, immediate benefit to release workflows).
|
|
||||||
3. Add post-deploy health verification to `deploy.yml` (low effort, improves confidence in automation).
|
|
||||||
4. Streamline redundant setup steps in `test.yml` (medium effort once cache strategy is in place; consider composite actions or base image updates).
|
|
||||||
|
|
||||||
### Setup Consolidation Opportunities
|
|
||||||
|
|
||||||
- `Run Tests` matrix jobs each execute the apt proxy configuration, pip installs, database wait, and setup scripts. A composite action or shell script wrapper could centralize these routines and parameterize target-specific behavior (unit vs e2e) to avoid copy/paste maintenance as additional jobs (lint, type check) are introduced.
|
|
||||||
- Both the test and build workflows perform a `checkout` step; while unavoidable per workflow, shared git submodules or sparse checkout rules could be encapsulated in a composite action to keep options consistent.
|
|
||||||
- The database setup script currently runs twice (dry-run and live) for every matrix leg. Evaluate whether the dry-run remains necessary once migrations stabilize; if retained, consider adding an environment variable toggle to skip redundant seed operations for read-only suites (e.g., lint).
|
|
||||||
|
|
||||||
### Proposed Shared Setup Action
|
|
||||||
|
|
||||||
- Location: `.gitea/actions/setup-python-env/action.yml` (composite action).
|
|
||||||
- Inputs:
|
|
||||||
- `python-version` (default `3.10`): forwarded to `actions/setup-python`.
|
|
||||||
- `install-playwright` (default `false`): when `true`, run `python -m playwright install --with-deps`.
|
|
||||||
- `install-requirements` (default `requirements.txt requirements-test.txt`): space-delimited list pip installs iterate over.
|
|
||||||
- `run-db-setup` (default `true`): toggles database wait + setup scripts.
|
|
||||||
- `db-dry-run` (default `true`): controls whether the dry-run invocation executes.
|
|
||||||
- Steps encapsulated:
|
|
||||||
1. Set up Python via `actions/setup-python@v5` using provided version.
|
|
||||||
2. Configure apt proxy via shared shell snippet (with graceful fallback when proxy offline).
|
|
||||||
3. Iterate over requirement files and execute `pip install -r <file>`.
|
|
||||||
4. If `install-playwright == true`, install browsers.
|
|
||||||
5. If `run-db-setup == true`, run the wait-for-Postgres python snippet and call `scripts/setup_database.py`, honoring `db-dry-run` toggle.
|
|
||||||
- Usage sketch (in `test.yml`):
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
- name: Prepare Python environment
|
|
||||||
uses: ./.gitea/actions/setup-python-env
|
|
||||||
with:
|
|
||||||
install-playwright: ${{ matrix.target == 'e2e' }}
|
|
||||||
db-dry-run: true
|
|
||||||
```
|
|
||||||
|
|
||||||
- Benefits: centralizes proxy logic and dependency installs, reduces duplication across matrix jobs, and keeps future lint/type-check jobs lightweight by disabling database setup.
|
|
||||||
- Implementation status: action available at `.gitea/actions/setup-python-env` and consumed by `test.yml`; extend to additional workflows as they adopt the shared routine.
|
|
||||||
- Obsolete steps removed: individual apt proxy, dependency install, Playwright, and database setup commands pruned from `test.yml` once the composite action was integrated.
|
|
||||||
|
|
||||||
## CI Owner Coordination Notes
|
|
||||||
|
|
||||||
### Key Findings
|
|
||||||
|
|
||||||
- Self-hosted runner: ASUS System Product Name chassis with AMD Ryzen 7 7700X (8 physical cores / 16 threads) and 63.2 GB usable RAM; `act_runner` configuration not overridden, so only one workflow job runs concurrently today.
|
|
||||||
- Unit test matrix job: completes 117 pytest cases in roughly 4.1 seconds after Postgres spins up; Docker services consume ~150 MB for `postgres:16-alpine`, with minimal sustained CPU load once tests begin.
|
|
||||||
- End-to-end matrix job: `pytest tests/e2e` averages 21‑22 seconds of execution, but a cold run downloads ~179 MB of apt packages plus ~470 MB of Playwright browser bundles (Chromium, Firefox, WebKit, FFmpeg), exceeding 650 MB network transfer and adding several gigabytes of disk writes if caches are absent.
|
|
||||||
- Both jobs reuse existing Python package caches when available; absent a shared cache service, repeated Playwright installs remain the dominant cost driver for cold executions.
|
|
||||||
|
|
||||||
### Open Questions
|
|
||||||
|
|
||||||
- Can we raise the runner concurrency above the default single job, or provision an additional runner, so the test matrix can execute without serializing queued workflows?
|
|
||||||
- Is there a central cache or artifact service available for Python wheels and Playwright browser bundles to avoid ~650 MB downloads on cold starts?
|
|
||||||
- Are we permitted to bake Playwright browsers into the base runner image, or should we pursue a shared cache/proxy solution instead?
|
|
||||||
|
|
||||||
### Outreach Draft
|
|
||||||
|
|
||||||
```text
|
|
||||||
Subject: CalMiner CI parallelization support
|
|
||||||
|
|
||||||
Hi <CI Owner>,
|
|
||||||
|
|
||||||
We recently updated the CalMiner test workflow to fan out unit and Playwright E2E suites in parallel. While validating the change, we gathered the following:
|
|
||||||
|
|
||||||
- Runner host: ASUS System Product Name with AMD Ryzen 7 7700X (8 cores / 16 threads), ~63 GB RAM, default `act_runner` concurrency (1 job at a time).
|
|
||||||
- Unit job finishes in ~4.1 s once Postgres is ready; light CPU and network usage.
|
|
||||||
- E2E job finishes in ~22 s, but a cold run pulls ~179 MB of apt packages plus ~470 MB of Playwright browser payloads (>650 MB download, several GB disk writes) because we do not have a shared cache yet.
|
|
||||||
|
|
||||||
To move forward, could you help with the following?
|
|
||||||
|
|
||||||
1. Confirm whether we can raise the runner concurrency limit or provision an additional runner so parallel jobs do not queue behind one another.
|
|
||||||
2. Let us know if a central cache (Artifactory, Nexus, etc.) is available for Python wheels and Playwright browser bundles, or if we should consider baking the browsers into the runner image instead.
|
|
||||||
3. Share any guidance on preferred caching or proxy solutions for large binary installs on self-hosted runners.
|
|
||||||
|
|
||||||
Once we have clarity, we can finalize the parallel rollout and update the documentation accordingly.
|
|
||||||
|
|
||||||
Thanks,
|
|
||||||
<Your Name>
|
|
||||||
```
|
|
||||||
@@ -1,82 +0,0 @@
|
|||||||
# Database Deployment
|
|
||||||
|
|
||||||
## Migrations & Baseline
|
|
||||||
|
|
||||||
A consolidated baseline migration (`scripts/migrations/000_base.sql`) captures all schema changes required for a fresh installation. The script is idempotent: it creates the `currency` and `measurement_unit` reference tables, provisions the `application_setting` store for configurable UI/system options, ensures consumption and production records expose unit metadata, and enforces the foreign keys used by CAPEX and OPEX.
|
|
||||||
|
|
||||||
Configure granular database settings in your PowerShell session before running migrations:
|
|
||||||
|
|
||||||
```powershell
|
|
||||||
$env:DATABASE_DRIVER = 'postgresql'
|
|
||||||
$env:DATABASE_HOST = 'localhost'
|
|
||||||
$env:DATABASE_PORT = '5432'
|
|
||||||
$env:DATABASE_USER = 'calminer'
|
|
||||||
$env:DATABASE_PASSWORD = 's3cret'
|
|
||||||
$env:DATABASE_NAME = 'calminer'
|
|
||||||
$env:DATABASE_SCHEMA = 'public'
|
|
||||||
python scripts/setup_database.py --run-migrations --seed-data --dry-run
|
|
||||||
python scripts/setup_database.py --run-migrations --seed-data
|
|
||||||
```
|
|
||||||
|
|
||||||
The dry-run invocation reports which steps would execute without making changes. The live run applies the baseline (if not already recorded in `schema_migrations`) and seeds the reference data relied upon by the UI and API.
|
|
||||||
|
|
||||||
> ℹ️ When `--seed-data` is supplied without `--run-migrations`, the bootstrap script automatically applies any pending SQL migrations first so the `application_setting` table (and future settings-backed features) are present before seeding.
|
|
||||||
>
|
|
||||||
> ℹ️ The application still accepts `DATABASE_URL` as a fallback if the granular variables are not set.
|
|
||||||
|
|
||||||
## Database bootstrap workflow
|
|
||||||
|
|
||||||
Provision or refresh a database instance with `scripts/setup_database.py`. Populate the required environment variables (an example lives at `config/setup_test.env.example`) and run:
|
|
||||||
|
|
||||||
```powershell
|
|
||||||
# Load test credentials (PowerShell)
|
|
||||||
Get-Content .\config\setup_test.env.example |
|
|
||||||
ForEach-Object {
|
|
||||||
if ($_ -and -not $_.StartsWith('#')) {
|
|
||||||
$name, $value = $_ -split '=', 2
|
|
||||||
Set-Item -Path Env:$name -Value $value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Dry-run to inspect the planned actions
|
|
||||||
python scripts/setup_database.py --ensure-database --ensure-role --ensure-schema --initialize-schema --run-migrations --seed-data --dry-run -v
|
|
||||||
|
|
||||||
# Execute the full workflow
|
|
||||||
python scripts/setup_database.py --ensure-database --ensure-role --ensure-schema --initialize-schema --run-migrations --seed-data -v
|
|
||||||
```
|
|
||||||
|
|
||||||
Typical log output confirms:
|
|
||||||
|
|
||||||
- Admin and application connections succeed for the supplied credentials.
|
|
||||||
- Database and role creation are idempotent (`already present` when rerun).
|
|
||||||
- SQLAlchemy metadata either reports missing tables or `All tables already exist`.
|
|
||||||
- Migrations list pending files and finish with `Applied N migrations` (a new database reports `Applied 1 migrations` for `000_base.sql`).
|
|
||||||
|
|
||||||
After a successful run the target database contains all application tables plus `schema_migrations`, and that table records each applied migration file. New installations only record `000_base.sql`; upgraded environments retain historical entries alongside the baseline.
|
|
||||||
|
|
||||||
### Seeding reference data
|
|
||||||
|
|
||||||
`scripts/seed_data.py` provides targeted control over the baseline datasets when the full setup script is not required:
|
|
||||||
|
|
||||||
```powershell
|
|
||||||
python scripts/seed_data.py --currencies --units --dry-run
|
|
||||||
python scripts/seed_data.py --currencies --units
|
|
||||||
```
|
|
||||||
|
|
||||||
The seeder upserts the canonical currency catalog (`USD`, `EUR`, `CLP`, `RMB`, `GBP`, `CAD`, `AUD`) using ASCII-safe symbols (`USD$`, `EUR`, etc.) and the measurement units referenced by the UI (`tonnes`, `kilograms`, `pounds`, `liters`, `cubic_meters`, `kilowatt_hours`). The setup script invokes the same seeder when `--seed-data` is provided and verifies the expected rows afterward, warning if any are missing or inactive.
|
|
||||||
|
|
||||||
### Rollback guidance
|
|
||||||
|
|
||||||
`scripts/setup_database.py` now tracks compensating actions when it creates the database or application role. If a later step fails, the script replays those rollback actions (dropping the newly created database or role and revoking grants) before exiting. Dry runs never register rollback steps and remain read-only.
|
|
||||||
|
|
||||||
If the script reports that some rollback steps could not complete—for example because a connection cannot be established—rerun the script with `--dry-run` to confirm the desired end state and then apply the outstanding cleanup manually:
|
|
||||||
|
|
||||||
```powershell
|
|
||||||
python scripts/setup_database.py --ensure-database --ensure-role --dry-run -v
|
|
||||||
|
|
||||||
# Manual cleanup examples when automation cannot connect
|
|
||||||
psql -d postgres -c "DROP DATABASE IF EXISTS calminer"
|
|
||||||
psql -d postgres -c "DROP ROLE IF EXISTS calminer"
|
|
||||||
```
|
|
||||||
|
|
||||||
After a failure and rollback, rerun the full setup once the environment issues are resolved.
|
|
||||||
@@ -1,152 +0,0 @@
|
|||||||
# Gitea Action Runner Setup
|
|
||||||
|
|
||||||
This guide describes how to provision, configure, and maintain self-hosted runners for CalMiner's Gitea-based CI/CD pipelines.
|
|
||||||
|
|
||||||
## 1. Purpose and Scope
|
|
||||||
|
|
||||||
- Explain the role runners play in executing GitHub Actions–compatible workflows inside our private Gitea instance.
|
|
||||||
- Define supported environments (Windows hosts running Docker for Linux containers today, Alpine or other Linux variants as future additions).
|
|
||||||
- Provide repeatable steps so additional runners can be brought online quickly and consistently.
|
|
||||||
|
|
||||||
## 2. Prerequisites
|
|
||||||
|
|
||||||
- **Hardware**: Minimum 8 vCPU, 16 GB RAM, and 50 GB free disk. For Playwright-heavy suites, plan for ≥60 GB free to absorb browser caches.
|
|
||||||
- **Operating system**: Current runner uses Windows 11 Pro (10.0.26100, 64-bit). Linux instructions mirror the same flow; see section 7 for Alpine specifics.
|
|
||||||
- **Container engine**: Docker Desktop (Windows) or Docker Engine (Linux) with pull access to `docker.gitea.com/runner-images` and `postgres:16-alpine`.
|
|
||||||
- **Dependencies**: `curl`, `tar`, PowerShell 7+ (Windows), or standard GNU utilities (Linux) to unpack releases.
|
|
||||||
- **Gitea access**: Repository admin or site admin token with permission to register self-hosted runners (`Settings → Runners → New Runner`).
|
|
||||||
|
|
||||||
### Current Runner Inventory (October 2025)
|
|
||||||
|
|
||||||
- Hostname `DESKTOP-GLB3A15`; ASUS System Product Name chassis with AMD Ryzen 7 7700X (8C/16T) and ~63 GB usable RAM.
|
|
||||||
- Windows 11 Pro 10.0.26100 (64-bit) hosting Docker containers for Ubuntu-based job images.
|
|
||||||
- `act_runner` version `v0.2.13`; no `act_runner.yaml` present, so defaults apply (single concurrency, no custom labels beyond registration).
|
|
||||||
- Registered against `http://192.168.88.30:3000` with labels:
|
|
||||||
- `ubuntu-latest:docker://docker.gitea.com/runner-images:ubuntu-latest`
|
|
||||||
- `ubuntu-24.04:docker://docker.gitea.com/runner-images:ubuntu-24.04`
|
|
||||||
- `ubuntu-22.04:docker://docker.gitea.com/runner-images:ubuntu-22.04`
|
|
||||||
- Runner metadata stored in `.runner`; removing this file forces re-registration and should only be done intentionally.
|
|
||||||
|
|
||||||
## 3. Runner Installation
|
|
||||||
|
|
||||||
### 3.1 Download and Extract
|
|
||||||
|
|
||||||
```powershell
|
|
||||||
$runnerVersion = "v0.2.13"
|
|
||||||
$downloadUrl = "https://gitea.com/gitea/act_runner/releases/download/$runnerVersion/act_runner_${runnerVersion}_windows_amd64.zip"
|
|
||||||
Invoke-WebRequest -Uri $downloadUrl -OutFile act_runner.zip
|
|
||||||
Expand-Archive act_runner.zip -DestinationPath C:\Tools\act-runner -Force
|
|
||||||
```
|
|
||||||
|
|
||||||
For Linux, download the `linux_amd64.tar.gz` artifact and extract with `tar -xzf` into `/opt/act-runner`.
|
|
||||||
|
|
||||||
### 3.2 Configure Working Directory
|
|
||||||
|
|
||||||
```powershell
|
|
||||||
Set-Location C:\Tools\act-runner
|
|
||||||
New-Item -ItemType Directory -Path logs -Force | Out-Null
|
|
||||||
```
|
|
||||||
|
|
||||||
Ensure the directory is writable by the service account that will execute the runner.
|
|
||||||
|
|
||||||
### 3.3 Register With Gitea
|
|
||||||
|
|
||||||
1. In Gitea, navigate to the repository or organization **Settings → Runners → New Runner**.
|
|
||||||
2. Copy the registration token and instance URL.
|
|
||||||
3. Execute the registration wizard:
|
|
||||||
|
|
||||||
```powershell
|
|
||||||
.\act_runner.exe register --instance http://192.168.88.30:3000 --token <TOKEN> --labels "ubuntu-latest:docker://docker.gitea.com/runner-images:ubuntu-latest" "ubuntu-24.04:docker://docker.gitea.com/runner-images:ubuntu-24.04" "ubuntu-22.04:docker://docker.gitea.com/runner-images:ubuntu-22.04"
|
|
||||||
```
|
|
||||||
|
|
||||||
Linux syntax is identical using `./act_runner register`.
|
|
||||||
|
|
||||||
This command populates `.runner` with the runner ID, UUID, and labels.
|
|
||||||
|
|
||||||
## 4. Service Configuration
|
|
||||||
|
|
||||||
### 4.1 Windows Service
|
|
||||||
|
|
||||||
Act Runner provides a built-in service helper:
|
|
||||||
|
|
||||||
```powershell
|
|
||||||
.\act_runner.exe install
|
|
||||||
.\act_runner.exe start
|
|
||||||
```
|
|
||||||
|
|
||||||
The service runs under `LocalSystem` by default. Use `.\act_runner.exe install --user <DOMAIN\User> --password <Secret>` if isolation is required.
|
|
||||||
|
|
||||||
### 4.2 Linux systemd Unit
|
|
||||||
|
|
||||||
Create `/etc/systemd/system/act-runner.service`:
|
|
||||||
|
|
||||||
```ini
|
|
||||||
[Unit]
|
|
||||||
Description=Gitea Act Runner
|
|
||||||
After=docker.service
|
|
||||||
Requires=docker.service
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
WorkingDirectory=/opt/act-runner
|
|
||||||
ExecStart=/opt/act-runner/act_runner daemon
|
|
||||||
Restart=always
|
|
||||||
RestartSec=10
|
|
||||||
Environment="HTTP_PROXY=http://apt-cacher:3142" "HTTPS_PROXY=http://apt-cacher:3142"
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
```
|
|
||||||
|
|
||||||
Enable and start:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
sudo systemctl daemon-reload
|
|
||||||
sudo systemctl enable --now act-runner.service
|
|
||||||
```
|
|
||||||
|
|
||||||
### 4.3 Environment Variables and Proxy Settings
|
|
||||||
|
|
||||||
- Configure `HTTP_PROXY`, `HTTPS_PROXY`, and their lowercase variants to leverage the shared apt cache (`http://apt-cacher:3142`).
|
|
||||||
- Persist Docker registry credentials (for `docker.gitea.com`) in the service user profile using `docker login`; workflows rely on cached authentication for builds.
|
|
||||||
- To expose pip caching once infrastructure is available, set `PIP_INDEX_URL` and `PIP_EXTRA_INDEX_URL` at the service level.
|
|
||||||
|
|
||||||
### 4.4 Logging
|
|
||||||
|
|
||||||
- Windows services write to `%ProgramData%\act-runner\logs`. Redirect or forward to centralized logging if required.
|
|
||||||
- Linux installations can leverage `journalctl -u act-runner` and logrotate rules for `/opt/act-runner/logs`.
|
|
||||||
|
|
||||||
## 5. Network and Security
|
|
||||||
|
|
||||||
- **Outbound**: Allow HTTPS traffic to the Gitea instance, Docker Hub, docker.gitea.com, npm (for Playwright), PyPI, and the apt cache proxy.
|
|
||||||
- **Inbound**: No inbound ports are required; block unsolicited traffic on internet-facing hosts.
|
|
||||||
- **Credentials**: Store deployment SSH keys and registry credentials in Gitea secrets, not on the runner host.
|
|
||||||
- **Least privilege**: Run the service under a dedicated account with access only to Docker and required directories.
|
|
||||||
|
|
||||||
## 6. Maintenance and Upgrades
|
|
||||||
|
|
||||||
- **Version checks**: Monitor `https://gitea.com/gitea/act_runner/releases` and schedule upgrades quarterly or when security fixes drop.
|
|
||||||
- **Upgrade procedure**: Stop the service, replace `act_runner` binary, restart. Re-registration is not required as long as `.runner` remains intact.
|
|
||||||
- **Health checks**: Periodically validate connectivity with `act_runner exec --detect-event -W .gitea/workflows/test.yml` and inspect workflow durations to catch regressions.
|
|
||||||
- **Cleanup**: Purge Docker images and volumes monthly (`docker system prune -af`) to reclaim disk space.
|
|
||||||
- **Troubleshooting**: Use `act_runner diagnose` (if available in newer versions) or review logs for repeated failures; reset by stopping the service, deleting stale job containers (`docker ps -a`), and restarting.
|
|
||||||
|
|
||||||
## 7. Alpine-based Runner Notes
|
|
||||||
|
|
||||||
- Install baseline packages: `apk add docker bash curl coreutils nodejs npm python3 py3-pip libstdc++`.
|
|
||||||
- Playwright requirements: add `apk add chromium nss freetype harfbuzz ca-certificates mesa-gl` or install Playwright browsers via `npx playwright install --with-deps` using the Alpine bundle.
|
|
||||||
- Musl vs glibc: When workflows require glibc (e.g., certain Python wheels), include `apk add gcompat` or base images on `frolvlad/alpine-glibc`.
|
|
||||||
- Systemd alternative: Use `rc-service` or `supervisord` to manage `act_runner daemon` on Alpine since systemd is absent.
|
|
||||||
- Storage: Mount `/var/lib/docker` to persistent storage if running inside a VM, ensuring browser downloads and layer caches survive restarts.
|
|
||||||
|
|
||||||
## 8. Appendix
|
|
||||||
|
|
||||||
- **Troubleshooting checklist**:
|
|
||||||
- Verify Docker daemon is healthy (`docker info`).
|
|
||||||
- Confirm `.runner` file exists and lists expected labels.
|
|
||||||
- Re-run `act_runner register` if the runner no longer appears in Gitea.
|
|
||||||
- Check proxy endpoints are reachable before jobs start downloading dependencies.
|
|
||||||
|
|
||||||
- **Related documentation**:
|
|
||||||
- `docs/architecture/07_deployment/07_01_testing_ci.md` (workflow architecture and CI owner coordination).
|
|
||||||
- `docs/ci-cache-troubleshooting.md` (pip caching status and known issues).
|
|
||||||
- `.gitea/actions/setup-python-env/action.yml` (shared job preparation logic referenced in workflows).
|
|
||||||
@@ -1,165 +0,0 @@
|
|||||||
---
|
|
||||||
title: '07 — Deployment View'
|
|
||||||
description: 'Describe deployment topology, infrastructure components, and environments (dev/stage/prod).'
|
|
||||||
status: draft
|
|
||||||
---
|
|
||||||
|
|
||||||
<!-- markdownlint-disable-next-line MD025 -->
|
|
||||||
|
|
||||||
# 07 — Deployment View
|
|
||||||
|
|
||||||
## Deployment Topology
|
|
||||||
|
|
||||||
The CalMiner application is deployed using a multi-tier architecture consisting of the following layers:
|
|
||||||
|
|
||||||
1. **Client Layer**: This layer consists of web browsers that interact with the application through a user interface rendered by Jinja2 templates and enhanced with JavaScript (Chart.js for dashboards).
|
|
||||||
2. **Web Application Layer**: This layer hosts the FastAPI application, which handles API requests, business logic, and serves HTML templates. It communicates with the database layer for data persistence.
|
|
||||||
3. **Database Layer**: This layer consists of a PostgreSQL database that stores all application data, including scenarios, parameters, costs, consumption, production outputs, equipment, maintenance logs, and simulation results.
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
graph TD
|
|
||||||
A[Client Layer] --> B[Web Application Layer]
|
|
||||||
B --> C[Database Layer]
|
|
||||||
```
|
|
||||||
|
|
||||||
## Infrastructure Components
|
|
||||||
|
|
||||||
The infrastructure components for the application include:
|
|
||||||
|
|
||||||
- **Reverse Proxy (optional)**: An Nginx or Apache server can be used as a reverse proxy.
|
|
||||||
- **Containerization**: Docker images are generated via the repository `Dockerfile`, using a multi-stage build to keep the final runtime minimal.
|
|
||||||
- **CI/CD Pipeline**: Automated pipelines (Gitea Actions) run tests, build/push Docker images, and trigger deployments.
|
|
||||||
- **Gitea Actions Workflows**: Located under `.gitea/workflows/`, these workflows handle testing, building, pushing, and deploying the application.
|
|
||||||
- **Gitea Action Runners**: Self-hosted runners execute the CI/CD workflows.
|
|
||||||
- **Testing and Continuous Integration**: Automated tests ensure code quality before deployment, also documented in [Testing & CI](07_deployment/07_01_testing_ci.md.md).
|
|
||||||
- **Docker Infrastructure**: Docker is used to containerize the application for consistent deployment across environments.
|
|
||||||
- **Portainer**: Production deployment environment for managing Docker containers.
|
|
||||||
- **Web Server**: Hosts the FastAPI application and serves API endpoints.
|
|
||||||
- **Database Server**: PostgreSQL database for persisting application data.
|
|
||||||
- **Static File Server**: Serves static assets such as CSS, JavaScript, and image files.
|
|
||||||
- **Cloud Infrastructure (optional)**: The application can be deployed on cloud platforms.
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
graph TD
|
|
||||||
G[Git Repository] --> C[CI/CD Pipeline]
|
|
||||||
C --> GAW[Gitea Action Workflows]
|
|
||||||
GAW --> GAR[Gitea Action Runners]
|
|
||||||
GAR --> T[Testing]
|
|
||||||
GAR --> CI[Continuous Integration]
|
|
||||||
T --> G
|
|
||||||
CI --> G
|
|
||||||
|
|
||||||
W[Web Server] --> DB[Database Server]
|
|
||||||
RP[Reverse Proxy] --> W
|
|
||||||
I((Internet)) <--> RP
|
|
||||||
PO[Containerization] --> W
|
|
||||||
C[CI/CD Pipeline] --> PO
|
|
||||||
W --> S[Static File Server]
|
|
||||||
S --> RP
|
|
||||||
PO --> DB
|
|
||||||
PO --> S
|
|
||||||
```
|
|
||||||
|
|
||||||
## Environments
|
|
||||||
|
|
||||||
The application can be deployed in multiple environments to support development, testing, and production.
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
graph TD
|
|
||||||
R[Repository] --> DEV[Development Environment]
|
|
||||||
R[Repository] --> TEST[Testing Environment]
|
|
||||||
R[Repository] --> PROD[Production Environment]
|
|
||||||
|
|
||||||
DEV --> W_DEV[Web Server - Dev]
|
|
||||||
DEV --> DB_DEV[Database Server - Dev]
|
|
||||||
TEST --> W_TEST[Web Server - Test]
|
|
||||||
TEST --> DB_TEST[Database Server - Test]
|
|
||||||
PROD --> W_PROD[Web Server - Prod]
|
|
||||||
PROD --> DB_PROD[Database Server - Prod]
|
|
||||||
```
|
|
||||||
|
|
||||||
### Development Environment
|
|
||||||
|
|
||||||
The development environment is set up for local development and testing. It includes:
|
|
||||||
|
|
||||||
- Local PostgreSQL instance (docker compose recommended, script available at `docker-compose.postgres.yml`)
|
|
||||||
- FastAPI server running in debug mode
|
|
||||||
|
|
||||||
`docker-compose.dev.yml` encapsulates this topology:
|
|
||||||
|
|
||||||
- `api` service mounts the repository for live reloads (`uvicorn --reload`) and depends on the database health check.
|
|
||||||
- `db` service uses the Debian-based `postgres:16` image with UTF-8 locale configuration and persists data in `pg_data_dev`.
|
|
||||||
- A shared `calminer_backend` bridge network keeps traffic contained; ports 8000/5432 are published for local tooling.
|
|
||||||
|
|
||||||
See [docs/quickstart.md](../quickstart.md#compose-driven-development-stack) for command examples and volume maintenance tips.
|
|
||||||
|
|
||||||
### Testing Environment
|
|
||||||
|
|
||||||
The testing environment is set up for automated testing and quality assurance. It includes:
|
|
||||||
|
|
||||||
- Staging PostgreSQL instance
|
|
||||||
- FastAPI server running in testing mode
|
|
||||||
- Automated test suite (e.g., pytest) for running unit and integration tests
|
|
||||||
|
|
||||||
`docker-compose.test.yml` provisions an ephemeral CI-like stack:
|
|
||||||
|
|
||||||
- `tests` service builds the application image, installs `requirements-test.txt`, runs the database setup script (dry-run + apply), then executes pytest.
|
|
||||||
- `api` service is available on port 8001 for manual verification against the test database.
|
|
||||||
- `postgres` service seeds a disposable Postgres 16 instance with health checks and named volumes (`pg_data_test`, `pip_cache_test`).
|
|
||||||
|
|
||||||
Typical commands mirror the CI workflow (`docker compose -f docker-compose.test.yml run --rm tests`); the [quickstart](../quickstart.md#compose-driven-test-stack) lists variations and teardown steps.
|
|
||||||
|
|
||||||
### Production Environment
|
|
||||||
|
|
||||||
The production environment is set up for serving live traffic and includes:
|
|
||||||
|
|
||||||
- Production PostgreSQL instance
|
|
||||||
- FastAPI server running in production mode
|
|
||||||
- Load balancer (Traefik) for distributing incoming requests
|
|
||||||
- Monitoring and logging tools for tracking application performance
|
|
||||||
|
|
||||||
#### Production docker compose topology
|
|
||||||
|
|
||||||
- `docker-compose.prod.yml` defines the runtime topology for operator-managed deployments.
|
|
||||||
- `api` service runs the FastAPI image with resource limits (`API_LIMIT_CPUS`, `API_LIMIT_MEMORY`) and a `/health` probe consumed by Traefik and the Compose health check.
|
|
||||||
- `traefik` service (enabled via the `reverse-proxy` profile) terminates TLS using the ACME resolver configured by `TRAEFIK_ACME_EMAIL` and routes `CALMINER_DOMAIN` traffic to the API.
|
|
||||||
- `postgres` service (enabled via the `local-db` profile) exists for edge deployments without managed PostgreSQL and persists data in the `pg_data_prod` volume while mounting `./backups` for operator snapshots.
|
|
||||||
- All services join the configurable `CALMINER_NETWORK` (defaults to `calminer_backend`) to keep traffic isolated from host networks.
|
|
||||||
|
|
||||||
Deployment workflow:
|
|
||||||
|
|
||||||
1. Copy `config/setup_production.env.example` to `config/setup_production.env` and populate domain, registry image tag, database credentials, and resource budgets.
|
|
||||||
2. Launch the stack with `docker compose --env-file config/setup_production.env -f docker-compose.prod.yml --profile reverse-proxy up -d` (append `--profile local-db` when hosting Postgres locally).
|
|
||||||
3. Run database migrations and seeding using `docker compose --env-file config/setup_production.env -f docker-compose.prod.yml run --rm api python scripts/setup_database.py --run-migrations --seed-data`.
|
|
||||||
4. Monitor container health via `docker compose -f docker-compose.prod.yml ps` or Traefik dashboards; the API health endpoint returns `{ "status": "ok" }` when ready.
|
|
||||||
5. Shut down with `docker compose -f docker-compose.prod.yml down` (volumes persist unless `-v` is supplied).
|
|
||||||
|
|
||||||
## Containerized Deployment Flow
|
|
||||||
|
|
||||||
The Docker-based deployment path aligns with the solution strategy documented in [Solution Strategy](04_solution_strategy.md) and the CI practices captured in [Testing & CI](07_deployment/07_01_testing_ci.md.md).
|
|
||||||
|
|
||||||
### Image Build
|
|
||||||
|
|
||||||
- The multi-stage `Dockerfile` installs dependencies in a builder layer (including system compilers and Python packages) and copies only the required runtime artifacts to the final image.
|
|
||||||
- Build arguments are minimal; database configuration is supplied at runtime via granular variables (`DATABASE_DRIVER`, `DATABASE_HOST`, `DATABASE_PORT`, `DATABASE_USER`, `DATABASE_PASSWORD`, `DATABASE_NAME`, optional `DATABASE_SCHEMA`). Secrets and configuration should be passed via environment variables or an orchestrator.
|
|
||||||
- The resulting image exposes port `8000` and starts `uvicorn main:app` (see main [README.md](../../README.md)).
|
|
||||||
|
|
||||||
### Runtime Environment
|
|
||||||
|
|
||||||
- For single-node deployments, run the container alongside PostgreSQL/Redis using Docker Compose or an equivalent orchestrator.
|
|
||||||
- A reverse proxy (Traefik) terminates TLS and forwards traffic to the container on port `8000`.
|
|
||||||
- Migrations must be applied prior to rolling out a new image; automation can hook into the deploy step to run `scripts/run_migrations.py`.
|
|
||||||
|
|
||||||
### CI/CD Integration
|
|
||||||
|
|
||||||
- Gitea Actions workflows reside under `.gitea/workflows/`.
|
|
||||||
- `test.yml` executes the pytest suite using cached pip dependencies.
|
|
||||||
- `build-and-push.yml` logs into the container registry, rebuilds the Docker image using GitHub Actions cache-backed layers, and pushes `latest` (and additional tags as required).
|
|
||||||
- `deploy.yml` connects to the target host via SSH, pulls the pushed tag, stops any existing container, and launches the new version.
|
|
||||||
- Required secrets: `REGISTRY_URL`, `REGISTRY_USERNAME`, `REGISTRY_PASSWORD`, `SSH_HOST`, `SSH_USERNAME`, `SSH_PRIVATE_KEY`.
|
|
||||||
- Extend these workflows when introducing staging/blue-green deployments; keep cross-links with [Testing & CI](07_deployment/07_01_testing_ci.md.md) up to date.
|
|
||||||
|
|
||||||
## Integrations and Future Work (deployment-related)
|
|
||||||
|
|
||||||
- **Persistence of results**: `/api/simulations/run` currently returns in-memory results; next iteration should persist to `simulation_result` and reference scenarios.
|
|
||||||
- **Deployment**: implement infrastructure-as-code (e.g., Terraform/Ansible) to provision the hosting environment and maintain parity across dev/stage/prod.
|
|
||||||
@@ -1,64 +0,0 @@
|
|||||||
---
|
|
||||||
title: "08 — Concepts"
|
|
||||||
description: "Document key concepts, domain models, and terminology used throughout the architecture documentation."
|
|
||||||
status: draft
|
|
||||||
---
|
|
||||||
|
|
||||||
# 08 — Concepts
|
|
||||||
|
|
||||||
## Key Concepts
|
|
||||||
|
|
||||||
### Scenario
|
|
||||||
|
|
||||||
A `scenario` represents a distinct mining project configuration, encapsulating all relevant parameters, costs, consumption, production outputs, equipment, maintenance logs, and simulation results. Each scenario is independent, allowing users to model and analyze different mining strategies.
|
|
||||||
|
|
||||||
### Parameterization
|
|
||||||
|
|
||||||
Parameters are defined for each scenario to capture inputs such as resource consumption rates, production targets, cost factors, and equipment specifications. Parameters can have fixed values or be linked to probability distributions for stochastic simulations.
|
|
||||||
|
|
||||||
### Monte Carlo Simulation
|
|
||||||
|
|
||||||
The Monte Carlo simulation engine allows users to perform risk analysis by running multiple iterations of a scenario with varying input parameters based on defined probability distributions. This helps in understanding the range of possible outcomes and their associated probabilities.
|
|
||||||
|
|
||||||
## Domain Model
|
|
||||||
|
|
||||||
The domain model consists of the following key entities:
|
|
||||||
|
|
||||||
- `Scenario`: Represents a mining project configuration.
|
|
||||||
- `Parameter`: Input values for scenarios, which can be fixed or probabilistic.
|
|
||||||
- `Cost`: Tracks capital and operational expenditures.
|
|
||||||
- `Consumption`: Records resource usage.
|
|
||||||
- `ProductionOutput`: Captures production metrics.
|
|
||||||
- `Equipment`: Represents mining equipment associated with a scenario.
|
|
||||||
- `Maintenance`: Logs maintenance events for equipment.
|
|
||||||
- `SimulationResult`: Stores results from Monte Carlo simulations.
|
|
||||||
- `Distribution`: Defines probability distributions for stochastic parameters.
|
|
||||||
- `User`: Represents application users and their roles.
|
|
||||||
- `Report`: Generated reports summarizing scenario analyses.
|
|
||||||
- `Dashboard`: Visual representation of key performance indicators and metrics.
|
|
||||||
- `AuditLog`: Tracks changes and actions performed within the application.
|
|
||||||
- `Notification`: Alerts and messages related to scenario events and updates.
|
|
||||||
- `Tag`: Labels for categorizing scenarios and other entities.
|
|
||||||
- `Attachment`: Files associated with scenarios, such as documents or images.
|
|
||||||
- `Version`: Tracks different versions of scenarios and their configurations.
|
|
||||||
|
|
||||||
### Detailed Domain Models
|
|
||||||
|
|
||||||
See [Domain Models](08_concepts/08_01_domain_models.md) document for detailed class diagrams and entity relationships.
|
|
||||||
|
|
||||||
## Data Model Highlights
|
|
||||||
|
|
||||||
- `scenario`: central entity describing a mining scenario; owns relationships to cost, consumption, production, equipment, and maintenance tables.
|
|
||||||
- `capex`, `opex`: monetary tracking linked to scenarios.
|
|
||||||
- `consumption`: resource usage entries parameterized by scenario and description.
|
|
||||||
- `parameter`: scenario inputs with base `value` and optional distribution linkage via `distribution_id`, `distribution_type`, and JSON `distribution_parameters` to support simulation sampling.
|
|
||||||
- `production_output`: production metrics per scenario.
|
|
||||||
- `equipment` and `maintenance`: equipment inventory and maintenance events with dates/costs.
|
|
||||||
- `simulation_result`: staging table for future Monte Carlo outputs (not yet populated by `run_simulation`).
|
|
||||||
- `application_setting`: centralized key/value store for UI and system configuration, supporting typed values, categories, and editability flags so administrators can manage theme variables and future global options without code changes.
|
|
||||||
|
|
||||||
Foreign keys secure referential integrity between domain tables and their scenarios, enabling per-scenario analytics.
|
|
||||||
|
|
||||||
### Detailed Data Models
|
|
||||||
|
|
||||||
See [Data Models](08_concepts/08_02_data_models.md) document for detailed ER diagrams and table descriptions.
|
|
||||||
@@ -1,36 +0,0 @@
|
|||||||
# User Roles and Permissions Model
|
|
||||||
|
|
||||||
This document outlines the proposed user roles and permissions model for the CalMiner application.
|
|
||||||
|
|
||||||
## User Roles
|
|
||||||
|
|
||||||
- **Admin:** Full access to all features, including user management, application settings, and all data.
|
|
||||||
- **Analyst:** Can create, view, edit, and delete scenarios, run simulations, and view reports. Cannot modify application settings or manage users.
|
|
||||||
- **Viewer:** Can view scenarios, simulations, and reports. Cannot create, edit, or delete anything.
|
|
||||||
|
|
||||||
## Permissions (examples)
|
|
||||||
|
|
||||||
- `users:manage`: Admin only.
|
|
||||||
- `settings:manage`: Admin only.
|
|
||||||
- `scenarios:create`: Admin, Analyst.
|
|
||||||
- `scenarios:view`: Admin, Analyst, Viewer.
|
|
||||||
- `scenarios:edit`: Admin, Analyst.
|
|
||||||
- `scenarios:delete`: Admin, Analyst.
|
|
||||||
- `simulations:run`: Admin, Analyst.
|
|
||||||
- `simulations:view`: Admin, Analyst, Viewer.
|
|
||||||
- `reports:view`: Admin, Analyst, Viewer.
|
|
||||||
|
|
||||||
## Authentication System
|
|
||||||
|
|
||||||
The authentication system uses JWT (JSON Web Tokens) for securing API endpoints. Users can register with a username, email, and password. Passwords are hashed using a `passlib` CryptContext for secure, configurable hashing. Upon successful login, an access token is issued, which must be included in subsequent requests for protected resources.
|
|
||||||
|
|
||||||
## Key Components
|
|
||||||
|
|
||||||
- **Password Hashing:** `passlib.context.CryptContext` with `bcrypt` scheme.
|
|
||||||
- **Token Creation & Verification:** `jose.jwt` for encoding and decoding JWTs.
|
|
||||||
- **Authentication Flow:**
|
|
||||||
1. User registers via `/users/register`.
|
|
||||||
2. User logs in via `/users/login` to obtain an access token.
|
|
||||||
3. The access token is sent in the `Authorization` header (Bearer token) for protected routes.
|
|
||||||
4. The `get_current_user` dependency verifies the token and retrieves the authenticated user.
|
|
||||||
- **Password Reset:** A placeholder `forgot_password` endpoint is available, and a `reset_password` endpoint allows users to set a new password with a valid token (token generation and email sending are not yet implemented).
|
|
||||||
@@ -1,106 +0,0 @@
|
|||||||
# Data Models
|
|
||||||
|
|
||||||
## Data Model Highlights
|
|
||||||
|
|
||||||
- `scenario`: central entity describing a mining scenario; owns relationships to cost, consumption, production, equipment, and maintenance tables.
|
|
||||||
- `capex`, `opex`: monetary tracking linked to scenarios.
|
|
||||||
- `consumption`: resource usage entries parameterized by scenario and description.
|
|
||||||
- `parameter`: scenario inputs with base `value` and optional distribution linkage via `distribution_id`, `distribution_type`, and JSON `distribution_parameters` to support simulation sampling.
|
|
||||||
- `production_output`: production metrics per scenario.
|
|
||||||
- `equipment` and `maintenance`: equipment inventory and maintenance events with dates/costs.
|
|
||||||
- `simulation_result`: staging table for future Monte Carlo outputs (not yet populated by `run_simulation`).
|
|
||||||
|
|
||||||
Foreign keys secure referential integrity between domain tables and their scenarios, enabling per-scenario analytics.
|
|
||||||
|
|
||||||
## Schema Diagrams
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
erDiagram
|
|
||||||
SCENARIO ||--o{ CAPEX : has
|
|
||||||
SCENARIO ||--o{ OPEX : has
|
|
||||||
SCENARIO ||--o{ CONSUMPTION : has
|
|
||||||
SCENARIO ||--o{ PARAMETER : has
|
|
||||||
SCENARIO ||--o{ PRODUCTION_OUTPUT : has
|
|
||||||
SCENARIO ||--o{ EQUIPMENT : has
|
|
||||||
EQUIPMENT ||--o{ MAINTENANCE : has
|
|
||||||
SCENARIO ||--o{ SIMULATION_RESULT : has
|
|
||||||
|
|
||||||
SCENARIO {
|
|
||||||
int id PK
|
|
||||||
string name
|
|
||||||
string description
|
|
||||||
datetime created_at
|
|
||||||
datetime updated_at
|
|
||||||
}
|
|
||||||
CAPEX {
|
|
||||||
int id PK
|
|
||||||
int scenario_id FK
|
|
||||||
float amount
|
|
||||||
string description
|
|
||||||
datetime created_at
|
|
||||||
datetime updated_at
|
|
||||||
}
|
|
||||||
OPEX {
|
|
||||||
int id PK
|
|
||||||
int scenario_id FK
|
|
||||||
float amount
|
|
||||||
string description
|
|
||||||
datetime created_at
|
|
||||||
datetime updated_at
|
|
||||||
}
|
|
||||||
CONSUMPTION {
|
|
||||||
int id PK
|
|
||||||
int scenario_id FK
|
|
||||||
string resource_type
|
|
||||||
float quantity
|
|
||||||
string description
|
|
||||||
datetime created_at
|
|
||||||
datetime updated_at
|
|
||||||
}
|
|
||||||
|
|
||||||
PRODUCTION_OUTPUT {
|
|
||||||
int id PK
|
|
||||||
int scenario_id FK
|
|
||||||
float tonnage
|
|
||||||
float recovery_rate
|
|
||||||
float revenue
|
|
||||||
datetime created_at
|
|
||||||
datetime updated_at
|
|
||||||
}
|
|
||||||
EQUIPMENT {
|
|
||||||
int id PK
|
|
||||||
int scenario_id FK
|
|
||||||
string name
|
|
||||||
string type
|
|
||||||
datetime created_at
|
|
||||||
datetime updated_at
|
|
||||||
}
|
|
||||||
MAINTENANCE {
|
|
||||||
int id PK
|
|
||||||
int equipment_id FK
|
|
||||||
date maintenance_date
|
|
||||||
float cost
|
|
||||||
string description
|
|
||||||
datetime created_at
|
|
||||||
datetime updated_at
|
|
||||||
}
|
|
||||||
SIMULATION_RESULT {
|
|
||||||
int id PK
|
|
||||||
int scenario_id FK
|
|
||||||
json result_data
|
|
||||||
datetime created_at
|
|
||||||
datetime updated_at
|
|
||||||
}
|
|
||||||
PARAMETER {
|
|
||||||
int id PK
|
|
||||||
int scenario_id FK
|
|
||||||
string name
|
|
||||||
float value
|
|
||||||
int distribution_id FK
|
|
||||||
string distribution_type
|
|
||||||
json distribution_parameters
|
|
||||||
datetime created_at
|
|
||||||
datetime updated_at
|
|
||||||
}
|
|
||||||
|
|
||||||
```
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
# 09 — Architecture Decisions
|
|
||||||
|
|
||||||
Status: skeleton
|
|
||||||
|
|
||||||
Record important architectural decisions, their rationale, and alternatives considered.
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
# 10 — Quality Requirements
|
|
||||||
|
|
||||||
Status: skeleton
|
|
||||||
|
|
||||||
List non-functional requirements (performance, scalability, reliability, security) and measurable acceptance criteria.
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
# 11 — Technical Risks
|
|
||||||
|
|
||||||
Status: skeleton
|
|
||||||
|
|
||||||
Document potential technical risks, mitigation strategies, and monitoring suggestions.
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
# 12 — Glossary
|
|
||||||
|
|
||||||
Status: skeleton
|
|
||||||
|
|
||||||
Project glossary and definitions for domain-specific terms.
|
|
||||||
@@ -1,42 +0,0 @@
|
|||||||
---
|
|
||||||
title: 'CalMiner Architecture Documentation'
|
|
||||||
description: 'arc42-based architecture documentation for the CalMiner project'
|
|
||||||
---
|
|
||||||
|
|
||||||
# Architecture documentation (arc42 mapping)
|
|
||||||
|
|
||||||
This folder mirrors the arc42 chapter structure (adapted to Markdown).
|
|
||||||
|
|
||||||
## Files
|
|
||||||
|
|
||||||
- [01 Introduction and Goals](01_introduction_and_goals.md)
|
|
||||||
- [02 Architecture Constraints](02_architecture_constraints.md)
|
|
||||||
- [02_01 Technical Constraints](02_constraints/02_01_technical_constraints.md)
|
|
||||||
- [02_02 Organizational Constraints](02_constraints/02_02_organizational_constraints.md)
|
|
||||||
- [02_03 Regulatory Constraints](02_constraints/02_03_regulatory_constraints.md)
|
|
||||||
- [02_04 Environmental Constraints](02_constraints/02_04_environmental_constraints.md)
|
|
||||||
- [02_05 Performance Constraints](02_constraints/02_05_performance_constraints.md)
|
|
||||||
- [03 Context and Scope](03_context_and_scope.md)
|
|
||||||
- [03_01 Architecture Scope](03_scope/03_01_architecture_scope.md)
|
|
||||||
- [04 Solution Strategy](04_solution_strategy.md)
|
|
||||||
- [04_01 Client-Server Architecture](04_strategy/04_01_client_server_architecture.md)
|
|
||||||
- [04_02 Technology Choices](04_strategy/04_02_technology_choices.md)
|
|
||||||
- [04_03 Trade-offs](04_strategy/04_03_trade_offs.md)
|
|
||||||
- [04_04 Future Considerations](04_strategy/04_04_future_considerations.md)
|
|
||||||
- [05 Building Block View](05_building_block_view.md)
|
|
||||||
- [05_01 Architecture Overview](05_blocks/05_01_architecture_overview.md)
|
|
||||||
- [05_02 Backend Components](05_blocks/05_02_backend_components.md)
|
|
||||||
- [05_03 Frontend Components](05_blocks/05_03_frontend_components.md)
|
|
||||||
- [05_03 Theming](05_blocks/05_03_theming.md)
|
|
||||||
- [05_04 Middleware & Utilities](05_blocks/05_04_middleware_utilities.md)
|
|
||||||
- [06 Runtime View](06_runtime_view.md)
|
|
||||||
- [07 Deployment View](07_deployment_view.md)
|
|
||||||
- [07_01 Testing & CI](07_deployment/07_01_testing_ci.md.md)
|
|
||||||
- [07_02 Database](07_deployment/07_02_database.md)
|
|
||||||
- [08 Concepts](08_concepts.md)
|
|
||||||
- [08_01 Security](08_concepts/08_01_security.md)
|
|
||||||
- [08_02 Data Models](08_concepts/08_02_data_models.md)
|
|
||||||
- [09 Architecture Decisions](09_architecture_decisions.md)
|
|
||||||
- [10 Quality Requirements](10_quality_requirements.md)
|
|
||||||
- [11 Technical Risks](11_technical_risks.md)
|
|
||||||
- [12 Glossary](12_glossary.md)
|
|
||||||
@@ -1,27 +0,0 @@
|
|||||||
# CI Cache Troubleshooting
|
|
||||||
|
|
||||||
## Background
|
|
||||||
|
|
||||||
The test workflow (`.gitea/workflows/test.yml`) uses the `actions/cache` action to reuse the pip download cache located at `~/.cache/pip`. The cache key now hashes both `requirements.txt` and `requirements-test.txt` so the cache stays aligned with dependency changes.
|
|
||||||
|
|
||||||
## Current Observation
|
|
||||||
|
|
||||||
Recent CI runs report the following warning when the cache step executes:
|
|
||||||
|
|
||||||
```text
|
|
||||||
::warning::Failed to restore: getCacheEntry failed: connect ETIMEDOUT 172.17.0.5:40181
|
|
||||||
Cache not found for input keys: Linux-pip-<hash>, Linux-pip-
|
|
||||||
```
|
|
||||||
|
|
||||||
The timeout indicates the runner cannot reach the cache backend rather than a normal cache miss.
|
|
||||||
|
|
||||||
## Recommended Follow-Up
|
|
||||||
|
|
||||||
- Confirm that the Actions cache service is enabled for the CI environment (Gitea runners require the cache server URL to be provided via `ACTIONS_CACHE_URL` and `ACTIONS_RUNTIME_URL`).
|
|
||||||
- Verify network connectivity from the runner to the cache service endpoint and ensure required ports are open.
|
|
||||||
- After connectivity is restored, rerun the workflow to allow the cache to be populated and confirm subsequent runs restore the cache without warnings.
|
|
||||||
|
|
||||||
## Interim Guidance
|
|
||||||
|
|
||||||
- The workflow will proceed without cached dependencies, but package installs may take longer.
|
|
||||||
- Keep the cache step in place so it begins working automatically once the infrastructure is configured.
|
|
||||||
@@ -1,104 +0,0 @@
|
|||||||
# Development Environment Setup
|
|
||||||
|
|
||||||
This document outlines the local development environment and steps to get the project running.
|
|
||||||
|
|
||||||
## Prerequisites
|
|
||||||
|
|
||||||
- Python (version 3.11+)
|
|
||||||
- PostgreSQL (version 13+)
|
|
||||||
- Git
|
|
||||||
- Docker and Docker Compose (optional, for containerized development)
|
|
||||||
|
|
||||||
## Clone and Project Setup
|
|
||||||
|
|
||||||
```powershell
|
|
||||||
# Clone the repository
|
|
||||||
git clone https://git.allucanget.biz/allucanget/calminer.git
|
|
||||||
cd calminer
|
|
||||||
```
|
|
||||||
|
|
||||||
## Development with Docker Compose (Recommended)
|
|
||||||
|
|
||||||
For a quick setup without installing PostgreSQL locally, use Docker Compose:
|
|
||||||
|
|
||||||
```powershell
|
|
||||||
# Start services
|
|
||||||
docker-compose up
|
|
||||||
|
|
||||||
# The app will be available at http://localhost:8000
|
|
||||||
# Database is automatically set up
|
|
||||||
```
|
|
||||||
|
|
||||||
To run in background:
|
|
||||||
|
|
||||||
```powershell
|
|
||||||
docker-compose up -d
|
|
||||||
```
|
|
||||||
|
|
||||||
To stop:
|
|
||||||
|
|
||||||
```powershell
|
|
||||||
docker-compose down
|
|
||||||
```
|
|
||||||
|
|
||||||
## Manual Development Setup
|
|
||||||
|
|
||||||
### Virtual Environment
|
|
||||||
|
|
||||||
```powershell
|
|
||||||
# Create and activate a virtual environment
|
|
||||||
python -m venv .venv
|
|
||||||
.\.venv\Scripts\Activate.ps1
|
|
||||||
```
|
|
||||||
|
|
||||||
### Install Dependencies
|
|
||||||
|
|
||||||
```powershell
|
|
||||||
pip install -r requirements.txt
|
|
||||||
```
|
|
||||||
|
|
||||||
### Database Setup
|
|
||||||
|
|
||||||
1. Create database user:
|
|
||||||
|
|
||||||
```sql
|
|
||||||
CREATE USER calminer_user WITH PASSWORD 'your_password';
|
|
||||||
```
|
|
||||||
|
|
||||||
1. Create database:
|
|
||||||
|
|
||||||
```sql
|
|
||||||
CREATE DATABASE calminer;
|
|
||||||
```
|
|
||||||
|
|
||||||
### Environment Variables
|
|
||||||
|
|
||||||
1. Copy `.env.example` to `.env` at project root.
|
|
||||||
1. Edit `.env` to set database connection details:
|
|
||||||
|
|
||||||
```dotenv
|
|
||||||
DATABASE_DRIVER=postgresql
|
|
||||||
DATABASE_HOST=localhost
|
|
||||||
DATABASE_PORT=5432
|
|
||||||
DATABASE_USER=calminer_user
|
|
||||||
DATABASE_PASSWORD=your_password
|
|
||||||
DATABASE_NAME=calminer
|
|
||||||
DATABASE_SCHEMA=public
|
|
||||||
```
|
|
||||||
|
|
||||||
1. The application uses `python-dotenv` to load these variables. A legacy `DATABASE_URL` value is still accepted if the granular keys are omitted.
|
|
||||||
|
|
||||||
### Running the Application
|
|
||||||
|
|
||||||
```powershell
|
|
||||||
# Start the FastAPI server
|
|
||||||
uvicorn main:app --reload
|
|
||||||
```
|
|
||||||
|
|
||||||
## Testing
|
|
||||||
|
|
||||||
```powershell
|
|
||||||
pytest
|
|
||||||
```
|
|
||||||
|
|
||||||
E2E tests use Playwright and a session-scoped `live_server` fixture that starts the app at `http://localhost:8001` for browser-driven tests.
|
|
||||||
@@ -1,100 +0,0 @@
|
|||||||
# Staging Environment Setup
|
|
||||||
|
|
||||||
This guide outlines how to provision and validate the CalMiner staging database using `scripts/setup_database.py`. It complements the local and CI-focused instructions in `docs/quickstart.md`.
|
|
||||||
|
|
||||||
## Prerequisites
|
|
||||||
|
|
||||||
- Network access to the staging infrastructure (VPN or bastion, as required by ops).
|
|
||||||
- Provisioned PostgreSQL instance with superuser or delegated admin credentials for maintenance.
|
|
||||||
- Application credentials (role + password) dedicated to CalMiner staging.
|
|
||||||
- The application repository checked out with Python dependencies installed (`pip install -r requirements.txt`).
|
|
||||||
- Optional but recommended: a writable directory (for example `reports/`) to capture setup logs.
|
|
||||||
|
|
||||||
> Replace the placeholder values in the examples below with the actual host, port, and credential details supplied by ops.
|
|
||||||
|
|
||||||
## Environment Configuration
|
|
||||||
|
|
||||||
Populate the following environment variables before invoking the setup script. Store them in a secure location such as `config/setup_staging.env` (excluded from source control) and load them with `dotenv` or your shell profile.
|
|
||||||
|
|
||||||
| Variable | Description |
|
|
||||||
| ----------------------------- | ----------------------------------------------------------------------------------------- |
|
|
||||||
| `DATABASE_HOST` | Staging PostgreSQL hostname or IP (for example `staging-db.internal`). |
|
|
||||||
| `DATABASE_PORT` | Port exposed by the staging PostgreSQL service (default `5432`). |
|
|
||||||
| `DATABASE_NAME` | CalMiner staging database name (for example `calminer_staging`). |
|
|
||||||
| `DATABASE_USER` | Application role used by the FastAPI app (for example `calminer_app`). |
|
|
||||||
| `DATABASE_PASSWORD` | Password for the application role. |
|
|
||||||
| `DATABASE_SCHEMA` | Optional non-public schema; omit or set to `public` otherwise. |
|
|
||||||
| `DATABASE_SUPERUSER` | Administrative role with rights to create roles/databases (for example `calminer_admin`). |
|
|
||||||
| `DATABASE_SUPERUSER_PASSWORD` | Password for the administrative role. |
|
|
||||||
| `DATABASE_SUPERUSER_DB` | Database to connect to for admin tasks (default `postgres`). |
|
|
||||||
| `DATABASE_ADMIN_URL` | Optional DSN that overrides the granular admin settings above. |
|
|
||||||
|
|
||||||
You may also set `DATABASE_URL` for application runtime convenience, but the setup script only requires the values listed in the table.
|
|
||||||
|
|
||||||
### Loading Variables (PowerShell example)
|
|
||||||
|
|
||||||
```powershell
|
|
||||||
$env:DATABASE_HOST = "staging-db.internal"
|
|
||||||
$env:DATABASE_PORT = "5432"
|
|
||||||
$env:DATABASE_NAME = "calminer_staging"
|
|
||||||
$env:DATABASE_USER = "calminer_app"
|
|
||||||
$env:DATABASE_PASSWORD = "<app-password>"
|
|
||||||
$env:DATABASE_SUPERUSER = "calminer_admin"
|
|
||||||
$env:DATABASE_SUPERUSER_PASSWORD = "<admin-password>"
|
|
||||||
$env:DATABASE_SUPERUSER_DB = "postgres"
|
|
||||||
```
|
|
||||||
|
|
||||||
For bash shells, export the same variables using `export VARIABLE=value` or load them through `dotenv`.
|
|
||||||
|
|
||||||
## Setup Workflow
|
|
||||||
|
|
||||||
Run the setup script in three phases to validate idempotency and capture diagnostics:
|
|
||||||
|
|
||||||
1. **Dry run (diagnostic):**
|
|
||||||
|
|
||||||
```powershell
|
|
||||||
python scripts/setup_database.py --ensure-database --ensure-role --ensure-schema --initialize-schema --run-migrations --seed-data --dry-run -v `
|
|
||||||
2>&1 | Tee-Object -FilePath reports/setup_staging_dry_run.log
|
|
||||||
```
|
|
||||||
|
|
||||||
Confirm that the script reports planned actions without failures. If the application role is missing, a dry run will log skip messages until a live run creates the role.
|
|
||||||
|
|
||||||
2. **Apply changes:**
|
|
||||||
|
|
||||||
```powershell
|
|
||||||
python scripts/setup_database.py --ensure-database --ensure-role --ensure-schema --initialize-schema --run-migrations --seed-data -v `
|
|
||||||
2>&1 | Tee-Object -FilePath reports/setup_staging_apply.log
|
|
||||||
```
|
|
||||||
|
|
||||||
Verify the log for successful database creation, role grants, migration execution, and seed verification.
|
|
||||||
|
|
||||||
3. **Post-apply dry run:**
|
|
||||||
|
|
||||||
```powershell
|
|
||||||
python scripts/setup_database.py --ensure-database --ensure-role --ensure-schema --initialize-schema --run-migrations --seed-data --dry-run -v `
|
|
||||||
2>&1 | Tee-Object -FilePath reports/setup_staging_post_apply.log
|
|
||||||
```
|
|
||||||
|
|
||||||
This run should confirm that all schema objects, migrations, and seed data are already in place.
|
|
||||||
|
|
||||||
## Validation Checklist
|
|
||||||
|
|
||||||
- [ ] Confirm the staging application can connect using the application DSN (for example, run `pytest tests/e2e/test_smoke.py` against staging or trigger a smoke test workflow).
|
|
||||||
- [ ] Inspect `schema_migrations` to ensure the baseline migration (`000_base.sql`) is recorded.
|
|
||||||
- [ ] Spot-check seeded reference data (`currency`, `measurement_unit`) for correctness.
|
|
||||||
- [ ] Capture and archive the three setup logs in a shared location for audit purposes.
|
|
||||||
|
|
||||||
## Troubleshooting
|
|
||||||
|
|
||||||
- If the dry run reports skipped actions because the application role does not exist, proceed with the live run; subsequent dry runs will validate as expected.
|
|
||||||
- Connection errors usually stem from network restrictions or incorrect credentials. Validate reachability with `psql` or `pg_isready` using the same host/port and credentials.
|
|
||||||
- For permission issues during migrations or seeding, confirm the admin role has rights on the target database and that the application role inherits the expected privileges.
|
|
||||||
|
|
||||||
## Rollback Guidance
|
|
||||||
|
|
||||||
- Database creation and role grants register rollback actions when not running in dry-run mode. If a later step fails, rerun the script without `--dry-run`; it will automatically revoke grants or drop newly created resources as part of the rollback routine.
|
|
||||||
- For staged environments where manual intervention is required, coordinate with ops before dropping databases or roles.
|
|
||||||
|
|
||||||
## Next Steps
|
|
||||||
|
|
||||||
- Keep this document updated as staging infrastructure evolves (for example, when migrating to managed services or rotating credentials).
|
|
||||||
@@ -1,124 +0,0 @@
|
|||||||
# UI, templates and styling
|
|
||||||
|
|
||||||
This document outlines the UI structure, template components, CSS variable conventions, and per-page data/actions for the CalMiner application.
|
|
||||||
|
|
||||||
## Reusable Template Components
|
|
||||||
|
|
||||||
To reduce duplication across form-centric pages, shared Jinja macros live in `templates/partials/components.html`.
|
|
||||||
|
|
||||||
- `select_field(...)`: renders labeled `<select>` controls with consistent placeholder handling and optional preselection. Existing JavaScript modules continue to target the generated IDs, so template calls must pass the same identifiers (`consumption-form-scenario`, etc.).
|
|
||||||
- `feedback(...)` and `empty_state(...)`: wrap status messages in standard classes (`feedback`, `empty-state`) with optional `hidden` toggles so scripts can control visibility without reimplementing markup.
|
|
||||||
- `table_container(...)`: provides a semantic wrapper and optional heading around tabular content; the `{% call %}` body supplies the `<thead>`, `<tbody>`, and `<tfoot>` elements while the macro applies the `table-container` class and manages hidden state.
|
|
||||||
|
|
||||||
Pages like `templates/consumption.html` and `templates/costs.html` already consume these helpers to keep markup aligned while preserving existing JavaScript selectors.
|
|
||||||
|
|
||||||
Import macros via:
|
|
||||||
|
|
||||||
```jinja
|
|
||||||
{% from "partials/components.html" import select_field, feedback, table_container with context %}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Styling Audit Notes (2025-10-21)
|
|
||||||
|
|
||||||
- **Spacing**: Panels (`section.panel`) sometimes lack consistent vertical rhythm between headings, form grids, and tables. Extra top/bottom margin utilities would help align content.
|
|
||||||
- **Typography**: Headings rely on browser defaults; font-size scale is uneven between `<h2>` and `<h3>`. Define explicit scale tokens (e.g., `--font-size-lg`) for predictable sizing.
|
|
||||||
- **Forms**: `.form-grid` uses fixed column gaps that collapse on small screens; introduce responsive grid rules to stack gracefully below ~768px.
|
|
||||||
- **Tables**: `.table-container` wrappers need overflow handling for narrow viewports; consider `overflow-x: auto` with padding adjustments.
|
|
||||||
- **Feedback/Empty states**: Messages use default font weight and spacing; a utility class for margin/padding would ensure consistent separation from forms or tables.
|
|
||||||
|
|
||||||
## CSS Variable Naming Conventions
|
|
||||||
|
|
||||||
The project adheres to a clear and descriptive naming convention for CSS variables, primarily defined in `static/css/main.css`.
|
|
||||||
|
|
||||||
## Naming Structure
|
|
||||||
|
|
||||||
Variables are prefixed based on their category:
|
|
||||||
|
|
||||||
- `--color-`: For all color-related variables (e.g., `--color-primary`, `--color-background`, `--color-text-primary`).
|
|
||||||
- `--space-`: For spacing and layout-related variables (e.g., `--space-sm`, `--space-md`, `--space-lg`).
|
|
||||||
- `--font-size-`: For font size variables (e.g., `--font-size-base`, `--font-size-lg`).
|
|
||||||
- Other specific prefixes for components or properties (e.g., `--panel-radius`, `--table-radius`).
|
|
||||||
|
|
||||||
## Descriptive Names
|
|
||||||
|
|
||||||
Color names are chosen to be semantically meaningful rather than literal color values, allowing for easier theme changes. For example:
|
|
||||||
|
|
||||||
- `--color-primary`: Represents the main brand color.
|
|
||||||
- `--color-accent`: Represents an accent color used for highlights.
|
|
||||||
- `--color-text-primary`: The main text color.
|
|
||||||
- `--color-text-muted`: A lighter text color for less emphasis.
|
|
||||||
- `--color-surface`: The background color for UI elements like cards or panels.
|
|
||||||
- `--color-background`: The overall page background color.
|
|
||||||
|
|
||||||
This approach ensures that the CSS variables are intuitive, maintainable, and easily adaptable for future theme customizations.
|
|
||||||
|
|
||||||
## Per-page data & actions
|
|
||||||
|
|
||||||
Short reference of per-page APIs and primary actions used by templates and scripts.
|
|
||||||
|
|
||||||
- Scenarios (`templates/ScenarioForm.html`):
|
|
||||||
|
|
||||||
- Data: `GET /api/scenarios/`
|
|
||||||
- Actions: `POST /api/scenarios/`
|
|
||||||
|
|
||||||
- Parameters (`templates/ParameterInput.html`):
|
|
||||||
|
|
||||||
- Data: `GET /api/scenarios/`, `GET /api/parameters/`
|
|
||||||
- Actions: `POST /api/parameters/`
|
|
||||||
|
|
||||||
- Costs (`templates/costs.html`):
|
|
||||||
|
|
||||||
- Data: `GET /api/costs/capex`, `GET /api/costs/opex`
|
|
||||||
- Actions: `POST /api/costs/capex`, `POST /api/costs/opex`
|
|
||||||
|
|
||||||
- Consumption (`templates/consumption.html`):
|
|
||||||
|
|
||||||
- Data: `GET /api/consumption/`
|
|
||||||
- Actions: `POST /api/consumption/`
|
|
||||||
|
|
||||||
- Production (`templates/production.html`):
|
|
||||||
|
|
||||||
- Data: `GET /api/production/`
|
|
||||||
- Actions: `POST /api/production/`
|
|
||||||
|
|
||||||
- Equipment (`templates/equipment.html`):
|
|
||||||
|
|
||||||
- Data: `GET /api/equipment/`
|
|
||||||
- Actions: `POST /api/equipment/`
|
|
||||||
|
|
||||||
- Maintenance (`templates/maintenance.html`):
|
|
||||||
|
|
||||||
- Data: `GET /api/maintenance/` (pagination support)
|
|
||||||
- Actions: `POST /api/maintenance/`, `PUT /api/maintenance/{id}`, `DELETE /api/maintenance/{id}`
|
|
||||||
|
|
||||||
- Simulations (`templates/simulations.html`):
|
|
||||||
|
|
||||||
- Data: `GET /api/scenarios/`, `GET /api/parameters/`
|
|
||||||
- Actions: `POST /api/simulations/run`
|
|
||||||
|
|
||||||
- Reporting (`templates/reporting.html` and `templates/Dashboard.html`):
|
|
||||||
- Data: `POST /api/reporting/summary` (accepts arrays of `{ "result": float }` objects)
|
|
||||||
- Actions: Trigger summary refreshes and export/download actions.
|
|
||||||
|
|
||||||
## Navigation Structure
|
|
||||||
|
|
||||||
The application uses a sidebar navigation menu organized into the following top-level categories:
|
|
||||||
|
|
||||||
- **Dashboard**: Main overview page.
|
|
||||||
- **Overview**: Sub-menu for core scenario inputs.
|
|
||||||
- Parameters: Process parameters configuration.
|
|
||||||
- Costs: Capital and operating costs.
|
|
||||||
- Consumption: Resource consumption tracking.
|
|
||||||
- Production: Production output settings.
|
|
||||||
- Equipment: Equipment inventory (with Maintenance sub-item).
|
|
||||||
- **Simulations**: Monte Carlo simulation runs.
|
|
||||||
- **Analytics**: Reporting and analytics.
|
|
||||||
- **Settings**: Administrative settings (with Themes and Currency Management sub-items).
|
|
||||||
|
|
||||||
## UI Template Audit (2025-10-20)
|
|
||||||
|
|
||||||
- Existing HTML templates: `ScenarioForm.html`, `ParameterInput.html`, and `Dashboard.html` (reporting summary view).
|
|
||||||
- Coverage gaps remain for costs, consumption, production, equipment, maintenance, and simulation workflows—no dedicated templates yet.
|
|
||||||
- Shared layout primitives (navigation/header/footer) are absent; current pages duplicate boilerplate markup.
|
|
||||||
- Dashboard currently covers reporting metrics but should be wired to a central `/` route once the shared layout lands.
|
|
||||||
- Next steps: introduce a `base.html`, refactor existing templates to extend it, and scaffold placeholder pages for the remaining features.
|
|
||||||
@@ -1,53 +0,0 @@
|
|||||||
# Consolidated Migration Baseline Plan
|
|
||||||
|
|
||||||
This note outlines the content and structure of the planned baseline migration (`scripts/migrations/000_base.sql`). The objective is to capture the currently required schema changes in a single idempotent script so that fresh environments only need to apply one SQL file before proceeding with incremental migrations.
|
|
||||||
|
|
||||||
## Guiding Principles
|
|
||||||
|
|
||||||
1. **Idempotent DDL**: Every `CREATE` or `ALTER` statement must tolerate repeated execution. Use `IF NOT EXISTS` guards or existence checks (`information_schema`) where necessary.
|
|
||||||
2. **Order of Operations**: Create reference tables first, then update dependent tables, finally enforce foreign keys and constraints.
|
|
||||||
3. **Data Safety**: Default data seeded by migrations should be minimal and in ASCII-only form to avoid encoding issues in various shells and CI logs.
|
|
||||||
4. **Compatibility**: The baseline must reflect the schema shape expected by the current SQLAlchemy models, API routes, and seeding scripts.
|
|
||||||
|
|
||||||
## Schema Elements to Include
|
|
||||||
|
|
||||||
### 1. `currency` Table
|
|
||||||
|
|
||||||
- Columns: `id SERIAL PRIMARY KEY`, `code VARCHAR(3) UNIQUE NOT NULL`, `name VARCHAR(128) NOT NULL`, `symbol VARCHAR(8)`, `is_active BOOLEAN NOT NULL DEFAULT TRUE`.
|
|
||||||
- Index: implicit via unique constraint on `code`.
|
|
||||||
- Seed rows matching `scripts.seed_data.CURRENCY_SEEDS` (ASCII-only symbols such as `USD$`, `CAD$`).
|
|
||||||
- Upsert logic using `ON CONFLICT (code) DO UPDATE` to keep names/symbols in sync when rerun.
|
|
||||||
|
|
||||||
### 2. Currency Integration for CAPEX/OPEX
|
|
||||||
|
|
||||||
- Add `currency_id INTEGER` columns with `IF NOT EXISTS` guards.
|
|
||||||
- Populate `currency_id` from legacy `currency_code` if the column exists.
|
|
||||||
- Default null `currency_id` values to the USD row, then `ALTER` to `SET NOT NULL`.
|
|
||||||
- Create `fk_capex_currency` and `fk_opex_currency` constraints with `ON DELETE RESTRICT` semantics.
|
|
||||||
- Drop legacy `currency_code` column if it exists (safe because new column holds data).
|
|
||||||
|
|
||||||
### 3. Measurement Metadata on Consumption/Production
|
|
||||||
|
|
||||||
- Ensure `consumption` and `production_output` tables have `unit_name VARCHAR(64)` and `unit_symbol VARCHAR(16)` columns with `IF NOT EXISTS` guards.
|
|
||||||
|
|
||||||
### 4. `measurement_unit` Reference Table
|
|
||||||
|
|
||||||
- Columns: `id SERIAL PRIMARY KEY`, `code VARCHAR(64) UNIQUE NOT NULL`, `name VARCHAR(128) NOT NULL`, `symbol VARCHAR(16)`, `unit_type VARCHAR(32) NOT NULL`, `is_active BOOLEAN NOT NULL DEFAULT TRUE`, `created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()`, `updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()`.
|
|
||||||
- Assume a simple trigger to maintain `updated_at` is deferred: automate via application layer later; for now, omit trigger.
|
|
||||||
- Seed rows matching `MEASUREMENT_UNIT_SEEDS` (ASCII names/symbols). Use `ON CONFLICT (code) DO UPDATE` to keep descriptive fields aligned.
|
|
||||||
|
|
||||||
### 5. Transaction Handling
|
|
||||||
|
|
||||||
- Wrap the main operations in a single `BEGIN; ... COMMIT;` block.
|
|
||||||
- Use subtransactions (`DO $$ ... $$;`) only where conditional logic is required (e.g., checking column existence before backfill).
|
|
||||||
|
|
||||||
## Migration Tracking Alignment
|
|
||||||
|
|
||||||
- Baseline file will be named `000_base.sql`. After execution, insert a row into `schema_migrations` with filename `000_base.sql` to keep the tracking table aligned.
|
|
||||||
- Existing migrations (`20251021_add_currency_and_unit_fields.sql`, `20251022_create_currency_table_and_fks.sql`) remain for historical reference but will no longer be applied to new environments once the baseline is present.
|
|
||||||
|
|
||||||
## Next Steps
|
|
||||||
|
|
||||||
1. Draft `000_base.sql` reflecting the steps above.
|
|
||||||
2. Update `run_migrations` to recognise the baseline file and mark older migrations as applied when the baseline exists.
|
|
||||||
3. Provide documentation in `docs/quickstart.md` explaining how to reset an environment using the baseline plus seeds.
|
|
||||||
@@ -1,88 +0,0 @@
|
|||||||
# Developer Quickstart
|
|
||||||
|
|
||||||
- [Developer Quickstart](#developer-quickstart)
|
|
||||||
- [Development](#development)
|
|
||||||
- [User Interface](#user-interface)
|
|
||||||
- [Testing](#testing)
|
|
||||||
- [Staging](#staging)
|
|
||||||
- [Deployment](#deployment)
|
|
||||||
- [Using Docker Compose](#using-docker-compose)
|
|
||||||
- [Manual Docker Deployment](#manual-docker-deployment)
|
|
||||||
- [Database Deployment \& Migrations](#database-deployment--migrations)
|
|
||||||
- [Usage Overview](#usage-overview)
|
|
||||||
- [Theme configuration](#theme-configuration)
|
|
||||||
- [Where to look next](#where-to-look-next)
|
|
||||||
|
|
||||||
This document provides a quickstart guide for developers to set up and run the CalMiner application locally.
|
|
||||||
|
|
||||||
## Development
|
|
||||||
|
|
||||||
See [Development Setup](docs/developer/development_setup.md).
|
|
||||||
|
|
||||||
### User Interface
|
|
||||||
|
|
||||||
There is a dedicated [UI and Style](docs/developer/ui_and_style.md) guide for frontend contributors.
|
|
||||||
|
|
||||||
### Testing
|
|
||||||
|
|
||||||
Testing is described in the [Testing CI](docs/architecture/07_deployment/07_01_testing_ci.md) document.
|
|
||||||
|
|
||||||
## Staging
|
|
||||||
|
|
||||||
Staging environment setup is covered in [Staging Environment Setup](docs/developer/staging_environment_setup.md).
|
|
||||||
|
|
||||||
## Deployment
|
|
||||||
|
|
||||||
The application can be deployed using Docker containers.
|
|
||||||
|
|
||||||
### Using Docker Compose
|
|
||||||
|
|
||||||
For production deployment, use the provided `docker-compose.yml`:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker-compose up -d
|
|
||||||
```
|
|
||||||
|
|
||||||
This starts the FastAPI app and PostgreSQL database.
|
|
||||||
|
|
||||||
### Manual Docker Deployment
|
|
||||||
|
|
||||||
Build and run the container manually:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker build -t calminer .
|
|
||||||
docker run -d -p 8000:8000 \
|
|
||||||
-e DATABASE_HOST=your-postgres-host \
|
|
||||||
-e DATABASE_USER=calminer \
|
|
||||||
-e DATABASE_PASSWORD=your-password \
|
|
||||||
-e DATABASE_NAME=calminer_db \
|
|
||||||
calminer
|
|
||||||
```
|
|
||||||
|
|
||||||
Ensure the database is set up and migrated before running.
|
|
||||||
|
|
||||||
### Database Deployment & Migrations
|
|
||||||
|
|
||||||
See the [Database Deployment & Migrations](docs/architecture/07_deployment/07_02_database_deployment_migrations.md) document for details on database deployment and migration strategies.
|
|
||||||
|
|
||||||
## Usage Overview
|
|
||||||
|
|
||||||
- **Run the application**: Follow the [Development Setup](docs/developer/development_setup.md) to get the application running locally.
|
|
||||||
- **Access the UI**: Open your web browser and navigate to `http://localhost:8000/ui` to access the user interface.
|
|
||||||
- **API base URL**: `http://localhost:8000/api`
|
|
||||||
- Key routes include creating scenarios, parameters, costs, consumption, production, equipment, maintenance, and reporting summaries. See the `routes/` directory for full details.
|
|
||||||
- **UI base URL**: `http://localhost:8000/ui`
|
|
||||||
|
|
||||||
### Theme configuration
|
|
||||||
|
|
||||||
Theming is laid out in [Theming](docs/architecture/05_03_theming.md).
|
|
||||||
|
|
||||||
## Where to look next
|
|
||||||
|
|
||||||
- Architecture overview & chapters: [architecture](architecture/README.md) (per-chapter files under `docs/architecture/`)
|
|
||||||
- [Testing & CI](architecture/07_deployment/07_01_testing_ci.md.md)
|
|
||||||
- [Development setup](developer/development_setup.md)
|
|
||||||
- Implementation plan & roadmap: [Solution strategy](architecture/04_solution_strategy.md)
|
|
||||||
- Routes: [routes](../routes/)
|
|
||||||
- Services: [services](../services/)
|
|
||||||
- Scripts: [scripts](../scripts/) (migrations and backfills)
|
|
||||||
@@ -1,37 +0,0 @@
|
|||||||
# Roadmap
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
|
|
||||||
## Scenario Enhancements
|
|
||||||
|
|
||||||
For each scenario, the goal is to evaluate financial viability, operational efficiency, and risk factors associated with the mining project. This data is used to perform calculations, generate reports, and visualize results through charts and dashboards, enabling users to make informed decisions based on comprehensive analysis.
|
|
||||||
|
|
||||||
### Scenario & Data Management
|
|
||||||
|
|
||||||
Scenarios are the core organizational unit within CalMiner, allowing users to create, manage, and analyze different mining project configurations. Each scenario encapsulates a unique set of parameters and data inputs that define the mining operation being modeled.
|
|
||||||
|
|
||||||
#### Scenario Creation
|
|
||||||
|
|
||||||
Users can create new scenarios by providing a unique name and description. The system will generate a new scenario with default parameters, which can be customized later.
|
|
||||||
|
|
||||||
#### Scenario Management
|
|
||||||
|
|
||||||
Users can manage existing scenarios by modifying their parameters, adding new data inputs, or deleting them as needed.
|
|
||||||
|
|
||||||
#### Data Inputs
|
|
||||||
|
|
||||||
Users can define and manage various data inputs for each scenario, including:
|
|
||||||
|
|
||||||
- **Geological Data**: Input data related to the geological characteristics of the mining site.
|
|
||||||
- **Operational Parameters**: Define parameters such as mining methods, equipment specifications, and workforce details.
|
|
||||||
- **Financial Data**: Input cost structures, revenue models, and financial assumptions.
|
|
||||||
- **Environmental Data**: Include data related to environmental impact, regulations, and sustainability practices.
|
|
||||||
- **Technical Data**: Specify technical parameters such as ore grades, recovery rates, and processing methods.
|
|
||||||
- **Social Data**: Incorporate social impact assessments, community engagement plans, and stakeholder analysis.
|
|
||||||
- **Regulatory Data**: Include data related to legal and regulatory requirements, permits, and compliance measures.
|
|
||||||
- **Market Data**: Input market conditions, commodity prices, and economic indicators that may affect the mining operation.
|
|
||||||
- **Risk Data**: Define risk factors, probabilities, and mitigation strategies for the mining project.
|
|
||||||
- **Logistical Data**: Include data related to transportation, supply chain management, and infrastructure requirements.
|
|
||||||
- **Maintenance Data**: Input maintenance schedules, costs, and equipment reliability metrics.
|
|
||||||
- **Human Resources Data**: Define workforce requirements, training programs, and labor costs.
|
|
||||||
- **Health and Safety Data**: Include data related to workplace safety protocols, incident rates, and health programs.
|
|
||||||
14
k8s/configmap.yaml
Normal file
14
k8s/configmap.yaml
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: calminer-config
|
||||||
|
data:
|
||||||
|
DATABASE_HOST: "calminer-db"
|
||||||
|
DATABASE_PORT: "5432"
|
||||||
|
DATABASE_USER: "calminer"
|
||||||
|
DATABASE_NAME: "calminer_db"
|
||||||
|
DATABASE_DRIVER: "postgresql"
|
||||||
|
CALMINER_EXPORT_MAX_ROWS: "10000"
|
||||||
|
CALMINER_EXPORT_METADATA: "true"
|
||||||
|
CALMINER_IMPORT_STAGING_TTL: "300"
|
||||||
|
CALMINER_IMPORT_MAX_ROWS: "50000"
|
||||||
54
k8s/deployment.yaml
Normal file
54
k8s/deployment.yaml
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: calminer-app
|
||||||
|
labels:
|
||||||
|
app: calminer
|
||||||
|
spec:
|
||||||
|
replicas: 3
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: calminer
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: calminer
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: calminer
|
||||||
|
image: registry.example.com/calminer:latest
|
||||||
|
ports:
|
||||||
|
- containerPort: 8003
|
||||||
|
envFrom:
|
||||||
|
- configMapRef:
|
||||||
|
name: calminer-config
|
||||||
|
- secretRef:
|
||||||
|
name: calminer-secrets
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
memory: "256Mi"
|
||||||
|
cpu: "250m"
|
||||||
|
limits:
|
||||||
|
memory: "512Mi"
|
||||||
|
cpu: "500m"
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /health
|
||||||
|
port: 8003
|
||||||
|
initialDelaySeconds: 30
|
||||||
|
periodSeconds: 10
|
||||||
|
readinessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /health
|
||||||
|
port: 8003
|
||||||
|
initialDelaySeconds: 5
|
||||||
|
periodSeconds: 5
|
||||||
|
initContainers:
|
||||||
|
- name: wait-for-db
|
||||||
|
image: postgres:17
|
||||||
|
command:
|
||||||
|
[
|
||||||
|
"sh",
|
||||||
|
"-c",
|
||||||
|
"until pg_isready -h calminer-db -p 5432; do echo waiting for database; sleep 2; done;",
|
||||||
|
]
|
||||||
18
k8s/ingress.yaml
Normal file
18
k8s/ingress.yaml
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
apiVersion: networking.k8s.io/v1
|
||||||
|
kind: Ingress
|
||||||
|
metadata:
|
||||||
|
name: calminer-ingress
|
||||||
|
annotations:
|
||||||
|
nginx.ingress.kubernetes.io/rewrite-target: /
|
||||||
|
spec:
|
||||||
|
rules:
|
||||||
|
- host: calminer.example.com
|
||||||
|
http:
|
||||||
|
paths:
|
||||||
|
- path: /
|
||||||
|
pathType: Prefix
|
||||||
|
backend:
|
||||||
|
service:
|
||||||
|
name: calminer-service
|
||||||
|
port:
|
||||||
|
number: 80
|
||||||
13
k8s/postgres-service.yaml
Normal file
13
k8s/postgres-service.yaml
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: calminer-db
|
||||||
|
labels:
|
||||||
|
app: calminer-db
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
app: calminer-db
|
||||||
|
ports:
|
||||||
|
- port: 5432
|
||||||
|
targetPort: 5432
|
||||||
|
clusterIP: None # Headless service for StatefulSet
|
||||||
48
k8s/postgres.yaml
Normal file
48
k8s/postgres.yaml
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
apiVersion: apps/v1
|
||||||
|
kind: StatefulSet
|
||||||
|
metadata:
|
||||||
|
name: calminer-db
|
||||||
|
spec:
|
||||||
|
serviceName: calminer-db
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: calminer-db
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: calminer-db
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: postgres
|
||||||
|
image: postgres:17
|
||||||
|
ports:
|
||||||
|
- containerPort: 5432
|
||||||
|
env:
|
||||||
|
- name: POSTGRES_USER
|
||||||
|
value: "calminer"
|
||||||
|
- name: POSTGRES_PASSWORD
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: calminer-secrets
|
||||||
|
key: DATABASE_PASSWORD
|
||||||
|
- name: POSTGRES_DB
|
||||||
|
value: "calminer_db"
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
memory: "256Mi"
|
||||||
|
cpu: "250m"
|
||||||
|
limits:
|
||||||
|
memory: "512Mi"
|
||||||
|
cpu: "500m"
|
||||||
|
volumeMounts:
|
||||||
|
- name: postgres-storage
|
||||||
|
mountPath: /var/lib/postgresql/data
|
||||||
|
volumeClaimTemplates:
|
||||||
|
- metadata:
|
||||||
|
name: postgres-storage
|
||||||
|
spec:
|
||||||
|
accessModes: ["ReadWriteOnce"]
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 10Gi
|
||||||
8
k8s/secret.yaml
Normal file
8
k8s/secret.yaml
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: calminer-secrets
|
||||||
|
type: Opaque
|
||||||
|
data:
|
||||||
|
DATABASE_PASSWORD: Y2FsbWluZXJfcGFzc3dvcmQ= # base64 encoded 'calminer_password'
|
||||||
|
CALMINER_SEED_ADMIN_PASSWORD: Q2hhbmdlTWUxMjMh # base64 encoded 'ChangeMe123!'
|
||||||
14
k8s/service.yaml
Normal file
14
k8s/service.yaml
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: calminer-service
|
||||||
|
labels:
|
||||||
|
app: calminer
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
app: calminer
|
||||||
|
ports:
|
||||||
|
- port: 80
|
||||||
|
targetPort: 8003
|
||||||
|
protocol: TCP
|
||||||
|
type: ClusterIP
|
||||||
125
main.py
125
main.py
@@ -1,28 +1,88 @@
|
|||||||
from routes.distributions import router as distributions_router
|
import logging
|
||||||
from routes.ui import router as ui_router
|
from contextlib import asynccontextmanager
|
||||||
from routes.parameters import router as parameters_router
|
|
||||||
from typing import Awaitable, Callable
|
from typing import Awaitable, Callable
|
||||||
|
|
||||||
from fastapi import FastAPI, Request, Response
|
from fastapi import FastAPI, Request, Response
|
||||||
from fastapi.staticfiles import StaticFiles
|
from fastapi.staticfiles import StaticFiles
|
||||||
|
from fastapi.responses import FileResponse
|
||||||
|
|
||||||
|
from config.settings import get_settings
|
||||||
|
from middleware.auth_session import AuthSessionMiddleware
|
||||||
|
from middleware.metrics import MetricsMiddleware
|
||||||
from middleware.validation import validate_json
|
from middleware.validation import validate_json
|
||||||
from config.database import Base, engine
|
from routes.auth import router as auth_router
|
||||||
|
from routes.dashboard import router as dashboard_router
|
||||||
|
from routes.calculations import router as calculations_router
|
||||||
|
from routes.imports import router as imports_router
|
||||||
|
from routes.exports import router as exports_router
|
||||||
|
from routes.projects import router as projects_router
|
||||||
|
from routes.reports import router as reports_router
|
||||||
from routes.scenarios import router as scenarios_router
|
from routes.scenarios import router as scenarios_router
|
||||||
from routes.costs import router as costs_router
|
from routes.ui import router as ui_router
|
||||||
from routes.consumption import router as consumption_router
|
from routes.navigation import router as navigation_router
|
||||||
from routes.production import router as production_router
|
from monitoring import router as monitoring_router
|
||||||
from routes.equipment import router as equipment_router
|
from services.bootstrap import bootstrap_admin, bootstrap_pricing_settings
|
||||||
from routes.reporting import router as reporting_router
|
from scripts.init_db import init_db as init_db_script
|
||||||
from routes.currencies import router as currencies_router
|
|
||||||
from routes.simulations import router as simulations_router
|
|
||||||
from routes.maintenance import router as maintenance_router
|
|
||||||
from routes.settings import router as settings_router
|
|
||||||
from routes.users import router as users_router
|
|
||||||
|
|
||||||
# Initialize database schema
|
logger = logging.getLogger(__name__)
|
||||||
Base.metadata.create_all(bind=engine)
|
|
||||||
|
|
||||||
app = FastAPI()
|
|
||||||
|
async def _bootstrap_startup() -> None:
|
||||||
|
settings = get_settings()
|
||||||
|
admin_settings = settings.admin_bootstrap_settings()
|
||||||
|
pricing_metadata = settings.pricing_metadata()
|
||||||
|
try:
|
||||||
|
try:
|
||||||
|
init_db_script()
|
||||||
|
except Exception:
|
||||||
|
logger.exception(
|
||||||
|
"DB initializer failed; continuing to bootstrap (non-fatal)")
|
||||||
|
|
||||||
|
role_result, admin_result = bootstrap_admin(settings=admin_settings)
|
||||||
|
pricing_result = bootstrap_pricing_settings(metadata=pricing_metadata)
|
||||||
|
logger.info(
|
||||||
|
"Admin bootstrap completed: roles=%s created=%s updated=%s rotated=%s assigned=%s",
|
||||||
|
role_result.ensured,
|
||||||
|
admin_result.created_user,
|
||||||
|
admin_result.updated_user,
|
||||||
|
admin_result.password_rotated,
|
||||||
|
admin_result.roles_granted,
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
seed = pricing_result.seed
|
||||||
|
slug = getattr(seed.settings, "slug", None) if seed and getattr(
|
||||||
|
seed, "settings", None) else None
|
||||||
|
created = getattr(seed, "created", None)
|
||||||
|
updated_fields = getattr(seed, "updated_fields", None)
|
||||||
|
impurity_upserts = getattr(seed, "impurity_upserts", None)
|
||||||
|
logger.info(
|
||||||
|
"Pricing settings bootstrap completed: slug=%s created=%s updated_fields=%s impurity_upserts=%s projects_assigned=%s",
|
||||||
|
slug,
|
||||||
|
created,
|
||||||
|
updated_fields,
|
||||||
|
impurity_upserts,
|
||||||
|
pricing_result.projects_assigned,
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
logger.info(
|
||||||
|
"Pricing settings bootstrap completed (partial): projects_assigned=%s",
|
||||||
|
pricing_result.projects_assigned,
|
||||||
|
)
|
||||||
|
except Exception: # pragma: no cover - defensive logging
|
||||||
|
logger.exception(
|
||||||
|
"Failed to bootstrap administrator or pricing settings")
|
||||||
|
|
||||||
|
|
||||||
|
@asynccontextmanager
|
||||||
|
async def app_lifespan(_: FastAPI):
|
||||||
|
await _bootstrap_startup()
|
||||||
|
yield
|
||||||
|
|
||||||
|
|
||||||
|
app = FastAPI(lifespan=app_lifespan)
|
||||||
|
|
||||||
|
app.add_middleware(AuthSessionMiddleware)
|
||||||
|
app.add_middleware(MetricsMiddleware)
|
||||||
|
|
||||||
|
|
||||||
@app.middleware("http")
|
@app.middleware("http")
|
||||||
@@ -37,20 +97,23 @@ async def health() -> dict[str, str]:
|
|||||||
return {"status": "ok"}
|
return {"status": "ok"}
|
||||||
|
|
||||||
|
|
||||||
app.mount("/static", StaticFiles(directory="static"), name="static")
|
@app.get("/favicon.ico", include_in_schema=False)
|
||||||
|
async def favicon() -> Response:
|
||||||
|
static_directory = "static"
|
||||||
|
favicon_img = "favicon.ico"
|
||||||
|
return FileResponse(f"{static_directory}/{favicon_img}")
|
||||||
|
|
||||||
# Include API routers
|
|
||||||
|
app.include_router(dashboard_router)
|
||||||
|
app.include_router(calculations_router)
|
||||||
|
app.include_router(auth_router)
|
||||||
|
app.include_router(imports_router)
|
||||||
|
app.include_router(exports_router)
|
||||||
|
app.include_router(projects_router)
|
||||||
app.include_router(scenarios_router)
|
app.include_router(scenarios_router)
|
||||||
app.include_router(parameters_router)
|
app.include_router(reports_router)
|
||||||
app.include_router(distributions_router)
|
|
||||||
app.include_router(costs_router)
|
|
||||||
app.include_router(consumption_router)
|
|
||||||
app.include_router(simulations_router)
|
|
||||||
app.include_router(production_router)
|
|
||||||
app.include_router(equipment_router)
|
|
||||||
app.include_router(maintenance_router)
|
|
||||||
app.include_router(reporting_router)
|
|
||||||
app.include_router(currencies_router)
|
|
||||||
app.include_router(settings_router)
|
|
||||||
app.include_router(ui_router)
|
app.include_router(ui_router)
|
||||||
app.include_router(users_router)
|
app.include_router(monitoring_router)
|
||||||
|
app.include_router(navigation_router)
|
||||||
|
|
||||||
|
app.mount("/static", StaticFiles(directory="static"), name="static")
|
||||||
|
|||||||
218
middleware/auth_session.py
Normal file
218
middleware/auth_session.py
Normal file
@@ -0,0 +1,218 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from typing import Callable, Iterable, Optional
|
||||||
|
|
||||||
|
from fastapi import Request, Response
|
||||||
|
from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint
|
||||||
|
from starlette.types import ASGIApp
|
||||||
|
|
||||||
|
from config.settings import Settings, get_settings
|
||||||
|
from sqlalchemy.orm.exc import DetachedInstanceError
|
||||||
|
from models import User
|
||||||
|
from monitoring.metrics import ACTIVE_CONNECTIONS
|
||||||
|
from services.exceptions import EntityNotFoundError
|
||||||
|
from services.security import (
|
||||||
|
JWTSettings,
|
||||||
|
TokenDecodeError,
|
||||||
|
TokenError,
|
||||||
|
TokenExpiredError,
|
||||||
|
TokenTypeMismatchError,
|
||||||
|
create_access_token,
|
||||||
|
create_refresh_token,
|
||||||
|
decode_access_token,
|
||||||
|
decode_refresh_token,
|
||||||
|
)
|
||||||
|
from services.session import (
|
||||||
|
AuthSession,
|
||||||
|
SessionStrategy,
|
||||||
|
SessionTokens,
|
||||||
|
build_session_strategy,
|
||||||
|
clear_session_cookies,
|
||||||
|
extract_session_tokens,
|
||||||
|
set_session_cookies,
|
||||||
|
)
|
||||||
|
from services.unit_of_work import UnitOfWork
|
||||||
|
|
||||||
|
_AUTH_SCOPE = "auth"
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(slots=True)
|
||||||
|
class _ResolutionResult:
|
||||||
|
session: AuthSession
|
||||||
|
strategy: SessionStrategy
|
||||||
|
jwt_settings: JWTSettings
|
||||||
|
|
||||||
|
|
||||||
|
class AuthSessionMiddleware(BaseHTTPMiddleware):
|
||||||
|
"""Resolve authenticated users from session cookies and refresh tokens."""
|
||||||
|
|
||||||
|
_active_sessions: int = 0
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
app: ASGIApp,
|
||||||
|
*,
|
||||||
|
settings_provider: Callable[[], Settings] = get_settings,
|
||||||
|
unit_of_work_factory: Callable[[], UnitOfWork] = UnitOfWork,
|
||||||
|
refresh_scopes: Iterable[str] | None = None,
|
||||||
|
) -> None:
|
||||||
|
super().__init__(app)
|
||||||
|
self._settings_provider = settings_provider
|
||||||
|
self._unit_of_work_factory = unit_of_work_factory
|
||||||
|
self._refresh_scopes = tuple(
|
||||||
|
refresh_scopes) if refresh_scopes else (_AUTH_SCOPE,)
|
||||||
|
|
||||||
|
async def dispatch(self, request: Request, call_next: RequestResponseEndpoint) -> Response:
|
||||||
|
resolved = self._resolve_session(request)
|
||||||
|
|
||||||
|
# Track active sessions for authenticated users
|
||||||
|
try:
|
||||||
|
user_active = bool(resolved.session.user and getattr(
|
||||||
|
resolved.session.user, "is_active", False))
|
||||||
|
except DetachedInstanceError:
|
||||||
|
user_active = False
|
||||||
|
|
||||||
|
if user_active:
|
||||||
|
AuthSessionMiddleware._active_sessions += 1
|
||||||
|
ACTIVE_CONNECTIONS.set(AuthSessionMiddleware._active_sessions)
|
||||||
|
|
||||||
|
response: Response | None = None
|
||||||
|
try:
|
||||||
|
response = await call_next(request)
|
||||||
|
return response
|
||||||
|
finally:
|
||||||
|
# Always decrement the active sessions counter if we incremented it.
|
||||||
|
if user_active:
|
||||||
|
AuthSessionMiddleware._active_sessions = max(
|
||||||
|
0, AuthSessionMiddleware._active_sessions - 1)
|
||||||
|
ACTIVE_CONNECTIONS.set(AuthSessionMiddleware._active_sessions)
|
||||||
|
|
||||||
|
# Only apply session cookies if a response was produced by downstream
|
||||||
|
# application. If an exception occurred before a response was created
|
||||||
|
# we avoid raising another error here.
|
||||||
|
import logging
|
||||||
|
if response is not None:
|
||||||
|
try:
|
||||||
|
self._apply_session(response, resolved)
|
||||||
|
except Exception:
|
||||||
|
logging.getLogger(__name__).exception(
|
||||||
|
"Failed to apply session cookies to response"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logging.getLogger(__name__).debug(
|
||||||
|
"AuthSessionMiddleware: no response produced by downstream app (response is None)"
|
||||||
|
)
|
||||||
|
|
||||||
|
def _resolve_session(self, request: Request) -> _ResolutionResult:
|
||||||
|
settings = self._settings_provider()
|
||||||
|
jwt_settings = settings.jwt_settings()
|
||||||
|
strategy = build_session_strategy(settings.session_settings())
|
||||||
|
|
||||||
|
tokens = extract_session_tokens(request, strategy)
|
||||||
|
session = AuthSession(tokens=tokens)
|
||||||
|
request.state.auth_session = session
|
||||||
|
|
||||||
|
if tokens.access_token:
|
||||||
|
if self._try_access_token(session, tokens, jwt_settings):
|
||||||
|
return _ResolutionResult(session=session, strategy=strategy, jwt_settings=jwt_settings)
|
||||||
|
|
||||||
|
if tokens.refresh_token:
|
||||||
|
self._try_refresh_token(
|
||||||
|
session, tokens.refresh_token, jwt_settings)
|
||||||
|
|
||||||
|
return _ResolutionResult(session=session, strategy=strategy, jwt_settings=jwt_settings)
|
||||||
|
|
||||||
|
def _try_access_token(
|
||||||
|
self,
|
||||||
|
session: AuthSession,
|
||||||
|
tokens: SessionTokens,
|
||||||
|
jwt_settings: JWTSettings,
|
||||||
|
) -> bool:
|
||||||
|
try:
|
||||||
|
payload = decode_access_token(
|
||||||
|
tokens.access_token or "", jwt_settings)
|
||||||
|
except TokenExpiredError:
|
||||||
|
return False
|
||||||
|
except (TokenDecodeError, TokenTypeMismatchError, TokenError):
|
||||||
|
session.mark_cleared()
|
||||||
|
return False
|
||||||
|
|
||||||
|
user = self._load_user(payload.sub)
|
||||||
|
if not user or not user.is_active or _AUTH_SCOPE not in payload.scopes:
|
||||||
|
session.mark_cleared()
|
||||||
|
return False
|
||||||
|
|
||||||
|
session.user = user
|
||||||
|
session.scopes = tuple(payload.scopes)
|
||||||
|
session.set_role_slugs(role.name for role in getattr(user, "roles", []) if role)
|
||||||
|
return True
|
||||||
|
|
||||||
|
def _try_refresh_token(
|
||||||
|
self,
|
||||||
|
session: AuthSession,
|
||||||
|
refresh_token: str,
|
||||||
|
jwt_settings: JWTSettings,
|
||||||
|
) -> None:
|
||||||
|
try:
|
||||||
|
payload = decode_refresh_token(refresh_token, jwt_settings)
|
||||||
|
except (TokenExpiredError, TokenDecodeError, TokenTypeMismatchError, TokenError):
|
||||||
|
session.mark_cleared()
|
||||||
|
return
|
||||||
|
|
||||||
|
user = self._load_user(payload.sub)
|
||||||
|
if not user or not user.is_active or not self._is_refresh_scope_allowed(payload.scopes):
|
||||||
|
session.mark_cleared()
|
||||||
|
return
|
||||||
|
|
||||||
|
session.user = user
|
||||||
|
session.scopes = tuple(payload.scopes)
|
||||||
|
session.set_role_slugs(role.name for role in getattr(user, "roles", []) if role)
|
||||||
|
|
||||||
|
access_token = create_access_token(
|
||||||
|
str(user.id),
|
||||||
|
jwt_settings,
|
||||||
|
scopes=payload.scopes,
|
||||||
|
)
|
||||||
|
new_refresh = create_refresh_token(
|
||||||
|
str(user.id),
|
||||||
|
jwt_settings,
|
||||||
|
scopes=payload.scopes,
|
||||||
|
)
|
||||||
|
session.issue_tokens(access_token=access_token,
|
||||||
|
refresh_token=new_refresh)
|
||||||
|
|
||||||
|
def _is_refresh_scope_allowed(self, scopes: Iterable[str]) -> bool:
|
||||||
|
candidate_scopes = set(scopes)
|
||||||
|
return any(scope in candidate_scopes for scope in self._refresh_scopes)
|
||||||
|
|
||||||
|
def _load_user(self, subject: str) -> Optional[User]:
|
||||||
|
try:
|
||||||
|
user_id = int(subject)
|
||||||
|
except ValueError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
with self._unit_of_work_factory() as uow:
|
||||||
|
if not uow.users:
|
||||||
|
return None
|
||||||
|
try:
|
||||||
|
user = uow.users.get(user_id, with_roles=True)
|
||||||
|
except EntityNotFoundError:
|
||||||
|
return None
|
||||||
|
return user
|
||||||
|
|
||||||
|
def _apply_session(self, response: Response, resolved: _ResolutionResult) -> None:
|
||||||
|
session = resolved.session
|
||||||
|
if session.clear_cookies:
|
||||||
|
clear_session_cookies(response, resolved.strategy)
|
||||||
|
return
|
||||||
|
|
||||||
|
if session.issued_access_token:
|
||||||
|
refresh_token = session.issued_refresh_token or session.tokens.refresh_token
|
||||||
|
set_session_cookies(
|
||||||
|
response,
|
||||||
|
access_token=session.issued_access_token,
|
||||||
|
refresh_token=refresh_token,
|
||||||
|
strategy=resolved.strategy,
|
||||||
|
jwt_settings=resolved.jwt_settings,
|
||||||
|
)
|
||||||
58
middleware/metrics.py
Normal file
58
middleware/metrics.py
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import time
|
||||||
|
from typing import Callable
|
||||||
|
|
||||||
|
from fastapi import Request, Response
|
||||||
|
from starlette.middleware.base import BaseHTTPMiddleware
|
||||||
|
|
||||||
|
from monitoring.metrics import observe_request
|
||||||
|
from services.metrics import get_metrics_service
|
||||||
|
|
||||||
|
|
||||||
|
class MetricsMiddleware(BaseHTTPMiddleware):
|
||||||
|
async def dispatch(self, request: Request, call_next: Callable[[Request], Response]) -> Response:
|
||||||
|
start_time = time.time()
|
||||||
|
response = await call_next(request)
|
||||||
|
process_time = time.time() - start_time
|
||||||
|
|
||||||
|
observe_request(
|
||||||
|
method=request.method,
|
||||||
|
endpoint=request.url.path,
|
||||||
|
status=response.status_code,
|
||||||
|
seconds=process_time,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Store in database asynchronously
|
||||||
|
background_tasks = getattr(request.state, "background_tasks", None)
|
||||||
|
if background_tasks:
|
||||||
|
background_tasks.add_task(
|
||||||
|
store_request_metric,
|
||||||
|
method=request.method,
|
||||||
|
endpoint=request.url.path,
|
||||||
|
status_code=response.status_code,
|
||||||
|
duration_seconds=process_time,
|
||||||
|
)
|
||||||
|
|
||||||
|
return response
|
||||||
|
|
||||||
|
|
||||||
|
async def store_request_metric(
|
||||||
|
method: str, endpoint: str, status_code: int, duration_seconds: float
|
||||||
|
) -> None:
|
||||||
|
"""Store request metric in database."""
|
||||||
|
try:
|
||||||
|
service = get_metrics_service()
|
||||||
|
service.store_metric(
|
||||||
|
metric_name="http_request",
|
||||||
|
value=duration_seconds,
|
||||||
|
labels={"method": method, "endpoint": endpoint,
|
||||||
|
"status": status_code},
|
||||||
|
endpoint=endpoint,
|
||||||
|
method=method,
|
||||||
|
status_code=status_code,
|
||||||
|
duration_seconds=duration_seconds,
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
# Log error but don't fail the request
|
||||||
|
pass
|
||||||
@@ -10,10 +10,14 @@ async def validate_json(
|
|||||||
) -> Response:
|
) -> Response:
|
||||||
# Only validate JSON for requests with a body
|
# Only validate JSON for requests with a body
|
||||||
if request.method in ("POST", "PUT", "PATCH"):
|
if request.method in ("POST", "PUT", "PATCH"):
|
||||||
try:
|
# Only attempt JSON parsing when the client indicates a JSON content type.
|
||||||
# attempt to parse json body
|
content_type = (request.headers.get("content-type") or "").lower()
|
||||||
await request.json()
|
if "json" in content_type:
|
||||||
except Exception:
|
try:
|
||||||
raise HTTPException(status_code=400, detail="Invalid JSON payload")
|
# attempt to parse json body
|
||||||
|
await request.json()
|
||||||
|
except Exception:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=400, detail="Invalid JSON payload")
|
||||||
response = await call_next(request)
|
response = await call_next(request)
|
||||||
return response
|
return response
|
||||||
|
|||||||
@@ -1,10 +1,72 @@
|
|||||||
"""
|
"""Database models and shared metadata for the CalMiner domain."""
|
||||||
models package initializer. Import key models so they're registered
|
|
||||||
with the shared Base.metadata when the package is imported by tests.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from . import application_setting # noqa: F401
|
from .financial_input import FinancialInput
|
||||||
from . import currency # noqa: F401
|
from .metadata import (
|
||||||
from . import role # noqa: F401
|
COST_BUCKET_METADATA,
|
||||||
from . import user # noqa: F401
|
RESOURCE_METADATA,
|
||||||
from . import theme_setting # noqa: F401
|
STOCHASTIC_VARIABLE_METADATA,
|
||||||
|
ResourceDescriptor,
|
||||||
|
StochasticVariableDescriptor,
|
||||||
|
)
|
||||||
|
from .performance_metric import PerformanceMetric
|
||||||
|
from .pricing_settings import (
|
||||||
|
PricingImpuritySettings,
|
||||||
|
PricingMetalSettings,
|
||||||
|
PricingSettings,
|
||||||
|
)
|
||||||
|
from .enums import (
|
||||||
|
CostBucket,
|
||||||
|
DistributionType,
|
||||||
|
FinancialCategory,
|
||||||
|
MiningOperationType,
|
||||||
|
ResourceType,
|
||||||
|
ScenarioStatus,
|
||||||
|
StochasticVariable,
|
||||||
|
)
|
||||||
|
from .project import Project
|
||||||
|
from .scenario import Scenario
|
||||||
|
from .simulation_parameter import SimulationParameter
|
||||||
|
from .user import Role, User, UserRole, password_context
|
||||||
|
from .navigation import NavigationGroup, NavigationLink
|
||||||
|
|
||||||
|
from .profitability_snapshot import ProjectProfitability, ScenarioProfitability
|
||||||
|
from .capex_snapshot import ProjectCapexSnapshot, ScenarioCapexSnapshot
|
||||||
|
from .opex_snapshot import (
|
||||||
|
ProjectOpexSnapshot,
|
||||||
|
ScenarioOpexSnapshot,
|
||||||
|
)
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"FinancialCategory",
|
||||||
|
"FinancialInput",
|
||||||
|
"MiningOperationType",
|
||||||
|
"Project",
|
||||||
|
"ProjectProfitability",
|
||||||
|
"ProjectCapexSnapshot",
|
||||||
|
"ProjectOpexSnapshot",
|
||||||
|
"PricingSettings",
|
||||||
|
"PricingMetalSettings",
|
||||||
|
"PricingImpuritySettings",
|
||||||
|
"Scenario",
|
||||||
|
"ScenarioProfitability",
|
||||||
|
"ScenarioCapexSnapshot",
|
||||||
|
"ScenarioOpexSnapshot",
|
||||||
|
"ScenarioStatus",
|
||||||
|
"DistributionType",
|
||||||
|
"SimulationParameter",
|
||||||
|
"ResourceType",
|
||||||
|
"CostBucket",
|
||||||
|
"StochasticVariable",
|
||||||
|
"RESOURCE_METADATA",
|
||||||
|
"COST_BUCKET_METADATA",
|
||||||
|
"STOCHASTIC_VARIABLE_METADATA",
|
||||||
|
"ResourceDescriptor",
|
||||||
|
"StochasticVariableDescriptor",
|
||||||
|
"User",
|
||||||
|
"Role",
|
||||||
|
"UserRole",
|
||||||
|
"password_context",
|
||||||
|
"PerformanceMetric",
|
||||||
|
"NavigationGroup",
|
||||||
|
"NavigationLink",
|
||||||
|
]
|
||||||
|
|||||||
@@ -1,38 +0,0 @@
|
|||||||
from datetime import datetime
|
|
||||||
from typing import Optional
|
|
||||||
|
|
||||||
from sqlalchemy import Boolean, DateTime, String, Text
|
|
||||||
from sqlalchemy.orm import Mapped, mapped_column
|
|
||||||
from sqlalchemy.sql import func
|
|
||||||
|
|
||||||
from config.database import Base
|
|
||||||
|
|
||||||
|
|
||||||
class ApplicationSetting(Base):
|
|
||||||
__tablename__ = "application_setting"
|
|
||||||
|
|
||||||
id: Mapped[int] = mapped_column(primary_key=True, index=True)
|
|
||||||
key: Mapped[str] = mapped_column(String(128), unique=True, nullable=False)
|
|
||||||
value: Mapped[str] = mapped_column(Text, nullable=False)
|
|
||||||
value_type: Mapped[str] = mapped_column(
|
|
||||||
String(32), nullable=False, default="string"
|
|
||||||
)
|
|
||||||
category: Mapped[str] = mapped_column(
|
|
||||||
String(32), nullable=False, default="general"
|
|
||||||
)
|
|
||||||
description: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
|
|
||||||
is_editable: Mapped[bool] = mapped_column(
|
|
||||||
Boolean, nullable=False, default=True
|
|
||||||
)
|
|
||||||
created_at: Mapped[datetime] = mapped_column(
|
|
||||||
DateTime(timezone=True), server_default=func.now(), nullable=False
|
|
||||||
)
|
|
||||||
updated_at: Mapped[datetime] = mapped_column(
|
|
||||||
DateTime(timezone=True),
|
|
||||||
server_default=func.now(),
|
|
||||||
onupdate=func.now(),
|
|
||||||
nullable=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
def __repr__(self) -> str:
|
|
||||||
return f"<ApplicationSetting key={self.key} category={self.category}>"
|
|
||||||
@@ -1,71 +0,0 @@
|
|||||||
from sqlalchemy import event, text
|
|
||||||
from sqlalchemy import Column, Integer, Float, String, ForeignKey
|
|
||||||
from sqlalchemy.orm import relationship
|
|
||||||
from config.database import Base
|
|
||||||
|
|
||||||
|
|
||||||
class Capex(Base):
|
|
||||||
__tablename__ = "capex"
|
|
||||||
|
|
||||||
id = Column(Integer, primary_key=True, index=True)
|
|
||||||
scenario_id = Column(Integer, ForeignKey("scenario.id"), nullable=False)
|
|
||||||
amount = Column(Float, nullable=False)
|
|
||||||
description = Column(String, nullable=True)
|
|
||||||
currency_id = Column(Integer, ForeignKey("currency.id"), nullable=False)
|
|
||||||
|
|
||||||
scenario = relationship("Scenario", back_populates="capex_items")
|
|
||||||
currency = relationship("Currency", back_populates="capex_items")
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return (
|
|
||||||
f"<Capex id={self.id} scenario_id={self.scenario_id} "
|
|
||||||
f"amount={self.amount} currency_id={self.currency_id}>"
|
|
||||||
)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def currency_code(self) -> str:
|
|
||||||
return self.currency.code if self.currency else None
|
|
||||||
|
|
||||||
@currency_code.setter
|
|
||||||
def currency_code(self, value: str) -> None:
|
|
||||||
# store pending code so application code or migrations can pick it up
|
|
||||||
setattr(
|
|
||||||
self, "_currency_code_pending", (value or "USD").strip().upper()
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# SQLAlchemy event handlers to ensure currency_id is set before insert/update
|
|
||||||
|
|
||||||
|
|
||||||
def _resolve_currency(mapper, connection, target):
|
|
||||||
# If currency_id already set, nothing to do
|
|
||||||
if getattr(target, "currency_id", None):
|
|
||||||
return
|
|
||||||
code = getattr(target, "_currency_code_pending", None) or "USD"
|
|
||||||
# Try to find existing currency id
|
|
||||||
row = connection.execute(
|
|
||||||
text("SELECT id FROM currency WHERE code = :code"), {"code": code}
|
|
||||||
).fetchone()
|
|
||||||
if row:
|
|
||||||
cid = row[0]
|
|
||||||
else:
|
|
||||||
# Insert new currency and attempt to get lastrowid
|
|
||||||
res = connection.execute(
|
|
||||||
text(
|
|
||||||
"INSERT INTO currency (code, name, symbol, is_active) VALUES (:code, :name, :symbol, :active)"
|
|
||||||
),
|
|
||||||
{"code": code, "name": code, "symbol": None, "active": True},
|
|
||||||
)
|
|
||||||
try:
|
|
||||||
cid = res.lastrowid
|
|
||||||
except Exception:
|
|
||||||
# fallback: select after insert
|
|
||||||
cid = connection.execute(
|
|
||||||
text("SELECT id FROM currency WHERE code = :code"),
|
|
||||||
{"code": code},
|
|
||||||
).scalar()
|
|
||||||
target.currency_id = cid
|
|
||||||
|
|
||||||
|
|
||||||
event.listen(Capex, "before_insert", _resolve_currency)
|
|
||||||
event.listen(Capex, "before_update", _resolve_currency)
|
|
||||||
111
models/capex_snapshot.py
Normal file
111
models/capex_snapshot.py
Normal file
@@ -0,0 +1,111 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from datetime import datetime
|
||||||
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
|
from sqlalchemy import JSON, DateTime, ForeignKey, Integer, Numeric, String
|
||||||
|
from sqlalchemy.orm import Mapped, mapped_column, relationship
|
||||||
|
from sqlalchemy.sql import func
|
||||||
|
|
||||||
|
from config.database import Base
|
||||||
|
|
||||||
|
if TYPE_CHECKING: # pragma: no cover
|
||||||
|
from .project import Project
|
||||||
|
from .scenario import Scenario
|
||||||
|
from .user import User
|
||||||
|
|
||||||
|
|
||||||
|
class ProjectCapexSnapshot(Base):
|
||||||
|
"""Snapshot of aggregated capex metrics at the project level."""
|
||||||
|
|
||||||
|
__tablename__ = "project_capex_snapshots"
|
||||||
|
|
||||||
|
id: Mapped[int] = mapped_column(Integer, primary_key=True)
|
||||||
|
project_id: Mapped[int] = mapped_column(
|
||||||
|
ForeignKey("projects.id", ondelete="CASCADE"), nullable=False, index=True
|
||||||
|
)
|
||||||
|
created_by_id: Mapped[int | None] = mapped_column(
|
||||||
|
ForeignKey("users.id", ondelete="SET NULL"), nullable=True, index=True
|
||||||
|
)
|
||||||
|
calculation_source: Mapped[str | None] = mapped_column(
|
||||||
|
String(64), nullable=True)
|
||||||
|
calculated_at: Mapped[datetime] = mapped_column(
|
||||||
|
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||||
|
)
|
||||||
|
currency_code: Mapped[str | None] = mapped_column(String(3), nullable=True)
|
||||||
|
total_capex: Mapped[float | None] = mapped_column(
|
||||||
|
Numeric(18, 2), nullable=True)
|
||||||
|
contingency_pct: Mapped[float | None] = mapped_column(
|
||||||
|
Numeric(12, 6), nullable=True)
|
||||||
|
contingency_amount: Mapped[float | None] = mapped_column(
|
||||||
|
Numeric(18, 2), nullable=True)
|
||||||
|
total_with_contingency: Mapped[float | None] = mapped_column(
|
||||||
|
Numeric(18, 2), nullable=True)
|
||||||
|
component_count: Mapped[int | None] = mapped_column(Integer, nullable=True)
|
||||||
|
payload: Mapped[dict | None] = mapped_column(JSON, nullable=True)
|
||||||
|
created_at: Mapped[datetime] = mapped_column(
|
||||||
|
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||||
|
)
|
||||||
|
updated_at: Mapped[datetime] = mapped_column(
|
||||||
|
DateTime(timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()
|
||||||
|
)
|
||||||
|
|
||||||
|
project: Mapped[Project] = relationship(
|
||||||
|
"Project", back_populates="capex_snapshots"
|
||||||
|
)
|
||||||
|
created_by: Mapped[User | None] = relationship("User")
|
||||||
|
|
||||||
|
def __repr__(self) -> str: # pragma: no cover
|
||||||
|
return (
|
||||||
|
"ProjectCapexSnapshot(id={id!r}, project_id={project_id!r}, total_capex={total_capex!r})".format(
|
||||||
|
id=self.id, project_id=self.project_id, total_capex=self.total_capex
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ScenarioCapexSnapshot(Base):
|
||||||
|
"""Snapshot of capex metrics for an individual scenario."""
|
||||||
|
|
||||||
|
__tablename__ = "scenario_capex_snapshots"
|
||||||
|
|
||||||
|
id: Mapped[int] = mapped_column(Integer, primary_key=True)
|
||||||
|
scenario_id: Mapped[int] = mapped_column(
|
||||||
|
ForeignKey("scenarios.id", ondelete="CASCADE"), nullable=False, index=True
|
||||||
|
)
|
||||||
|
created_by_id: Mapped[int | None] = mapped_column(
|
||||||
|
ForeignKey("users.id", ondelete="SET NULL"), nullable=True, index=True
|
||||||
|
)
|
||||||
|
calculation_source: Mapped[str | None] = mapped_column(
|
||||||
|
String(64), nullable=True)
|
||||||
|
calculated_at: Mapped[datetime] = mapped_column(
|
||||||
|
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||||
|
)
|
||||||
|
currency_code: Mapped[str | None] = mapped_column(String(3), nullable=True)
|
||||||
|
total_capex: Mapped[float | None] = mapped_column(
|
||||||
|
Numeric(18, 2), nullable=True)
|
||||||
|
contingency_pct: Mapped[float | None] = mapped_column(
|
||||||
|
Numeric(12, 6), nullable=True)
|
||||||
|
contingency_amount: Mapped[float | None] = mapped_column(
|
||||||
|
Numeric(18, 2), nullable=True)
|
||||||
|
total_with_contingency: Mapped[float | None] = mapped_column(
|
||||||
|
Numeric(18, 2), nullable=True)
|
||||||
|
component_count: Mapped[int | None] = mapped_column(Integer, nullable=True)
|
||||||
|
payload: Mapped[dict | None] = mapped_column(JSON, nullable=True)
|
||||||
|
created_at: Mapped[datetime] = mapped_column(
|
||||||
|
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||||
|
)
|
||||||
|
updated_at: Mapped[datetime] = mapped_column(
|
||||||
|
DateTime(timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()
|
||||||
|
)
|
||||||
|
|
||||||
|
scenario: Mapped[Scenario] = relationship(
|
||||||
|
"Scenario", back_populates="capex_snapshots"
|
||||||
|
)
|
||||||
|
created_by: Mapped[User | None] = relationship("User")
|
||||||
|
|
||||||
|
def __repr__(self) -> str: # pragma: no cover
|
||||||
|
return (
|
||||||
|
"ScenarioCapexSnapshot(id={id!r}, scenario_id={scenario_id!r}, total_capex={total_capex!r})".format(
|
||||||
|
id=self.id, scenario_id=self.scenario_id, total_capex=self.total_capex
|
||||||
|
)
|
||||||
|
)
|
||||||
@@ -1,22 +0,0 @@
|
|||||||
from sqlalchemy import Column, Integer, Float, String, ForeignKey
|
|
||||||
from sqlalchemy.orm import relationship
|
|
||||||
from config.database import Base
|
|
||||||
|
|
||||||
|
|
||||||
class Consumption(Base):
|
|
||||||
__tablename__ = "consumption"
|
|
||||||
|
|
||||||
id = Column(Integer, primary_key=True, index=True)
|
|
||||||
scenario_id = Column(Integer, ForeignKey("scenario.id"), nullable=False)
|
|
||||||
amount = Column(Float, nullable=False)
|
|
||||||
description = Column(String, nullable=True)
|
|
||||||
unit_name = Column(String(64), nullable=True)
|
|
||||||
unit_symbol = Column(String(16), nullable=True)
|
|
||||||
|
|
||||||
scenario = relationship("Scenario", back_populates="consumption_items")
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return (
|
|
||||||
f"<Consumption id={self.id} scenario_id={self.scenario_id} "
|
|
||||||
f"amount={self.amount} unit={self.unit_symbol or self.unit_name}>"
|
|
||||||
)
|
|
||||||
@@ -1,24 +0,0 @@
|
|||||||
from sqlalchemy import Column, Integer, String, Boolean
|
|
||||||
from sqlalchemy.orm import relationship
|
|
||||||
from config.database import Base
|
|
||||||
|
|
||||||
|
|
||||||
class Currency(Base):
|
|
||||||
__tablename__ = "currency"
|
|
||||||
|
|
||||||
id = Column(Integer, primary_key=True, index=True)
|
|
||||||
code = Column(String(3), nullable=False, unique=True, index=True)
|
|
||||||
name = Column(String(128), nullable=False)
|
|
||||||
symbol = Column(String(8), nullable=True)
|
|
||||||
is_active = Column(Boolean, nullable=False, default=True)
|
|
||||||
|
|
||||||
# reverse relationships (optional)
|
|
||||||
capex_items = relationship(
|
|
||||||
"Capex", back_populates="currency", lazy="select"
|
|
||||||
)
|
|
||||||
opex_items = relationship("Opex", back_populates="currency", lazy="select")
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return (
|
|
||||||
f"<Currency code={self.code} name={self.name} symbol={self.symbol}>"
|
|
||||||
)
|
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
from sqlalchemy import Column, Integer, String, JSON
|
|
||||||
from config.database import Base
|
|
||||||
|
|
||||||
|
|
||||||
class Distribution(Base):
|
|
||||||
__tablename__ = "distribution"
|
|
||||||
|
|
||||||
id = Column(Integer, primary_key=True, index=True)
|
|
||||||
name = Column(String, nullable=False)
|
|
||||||
distribution_type = Column(String, nullable=False)
|
|
||||||
parameters = Column(JSON, nullable=True)
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return f"<Distribution id={self.id} name={self.name} type={self.distribution_type}>"
|
|
||||||
96
models/enums.py
Normal file
96
models/enums.py
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from enum import Enum
|
||||||
|
from typing import Type
|
||||||
|
|
||||||
|
from sqlalchemy import Enum as SQLEnum
|
||||||
|
|
||||||
|
|
||||||
|
def sql_enum(enum_cls: Type[Enum], *, name: str) -> SQLEnum:
|
||||||
|
"""Build a SQLAlchemy Enum that maps using the enum member values."""
|
||||||
|
|
||||||
|
return SQLEnum(
|
||||||
|
enum_cls,
|
||||||
|
name=name,
|
||||||
|
create_type=False,
|
||||||
|
validate_strings=True,
|
||||||
|
values_callable=lambda enum_cls: [member.value for member in enum_cls],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class MiningOperationType(str, Enum):
|
||||||
|
"""Supported mining operation categories."""
|
||||||
|
|
||||||
|
OPEN_PIT = "open_pit"
|
||||||
|
UNDERGROUND = "underground"
|
||||||
|
IN_SITU_LEACH = "in_situ_leach"
|
||||||
|
PLACER = "placer"
|
||||||
|
QUARRY = "quarry"
|
||||||
|
MOUNTAINTOP_REMOVAL = "mountaintop_removal"
|
||||||
|
OTHER = "other"
|
||||||
|
|
||||||
|
|
||||||
|
class ScenarioStatus(str, Enum):
|
||||||
|
"""Lifecycle states for project scenarios."""
|
||||||
|
|
||||||
|
DRAFT = "draft"
|
||||||
|
ACTIVE = "active"
|
||||||
|
ARCHIVED = "archived"
|
||||||
|
|
||||||
|
|
||||||
|
class FinancialCategory(str, Enum):
|
||||||
|
"""Enumeration of cost and revenue classifications."""
|
||||||
|
|
||||||
|
CAPITAL_EXPENDITURE = "capex"
|
||||||
|
OPERATING_EXPENDITURE = "opex"
|
||||||
|
REVENUE = "revenue"
|
||||||
|
CONTINGENCY = "contingency"
|
||||||
|
OTHER = "other"
|
||||||
|
|
||||||
|
|
||||||
|
class DistributionType(str, Enum):
|
||||||
|
"""Supported stochastic distribution families for simulations."""
|
||||||
|
|
||||||
|
NORMAL = "normal"
|
||||||
|
TRIANGULAR = "triangular"
|
||||||
|
UNIFORM = "uniform"
|
||||||
|
LOGNORMAL = "lognormal"
|
||||||
|
CUSTOM = "custom"
|
||||||
|
|
||||||
|
|
||||||
|
class ResourceType(str, Enum):
|
||||||
|
"""Primary consumables and resources used in mining operations."""
|
||||||
|
|
||||||
|
DIESEL = "diesel"
|
||||||
|
ELECTRICITY = "electricity"
|
||||||
|
WATER = "water"
|
||||||
|
EXPLOSIVES = "explosives"
|
||||||
|
REAGENTS = "reagents"
|
||||||
|
LABOR = "labor"
|
||||||
|
EQUIPMENT_HOURS = "equipment_hours"
|
||||||
|
TAILINGS_CAPACITY = "tailings_capacity"
|
||||||
|
|
||||||
|
|
||||||
|
class CostBucket(str, Enum):
|
||||||
|
"""Granular cost buckets aligned with project accounting."""
|
||||||
|
|
||||||
|
CAPITAL_INITIAL = "capital_initial"
|
||||||
|
CAPITAL_SUSTAINING = "capital_sustaining"
|
||||||
|
OPERATING_FIXED = "operating_fixed"
|
||||||
|
OPERATING_VARIABLE = "operating_variable"
|
||||||
|
MAINTENANCE = "maintenance"
|
||||||
|
RECLAMATION = "reclamation"
|
||||||
|
ROYALTIES = "royalties"
|
||||||
|
GENERAL_ADMIN = "general_admin"
|
||||||
|
|
||||||
|
|
||||||
|
class StochasticVariable(str, Enum):
|
||||||
|
"""Domain variables that typically require probabilistic modelling."""
|
||||||
|
|
||||||
|
ORE_GRADE = "ore_grade"
|
||||||
|
RECOVERY_RATE = "recovery_rate"
|
||||||
|
METAL_PRICE = "metal_price"
|
||||||
|
OPERATING_COST = "operating_cost"
|
||||||
|
CAPITAL_COST = "capital_cost"
|
||||||
|
DISCOUNT_RATE = "discount_rate"
|
||||||
|
THROUGHPUT = "throughput"
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
from sqlalchemy import Column, Integer, String, ForeignKey
|
|
||||||
from sqlalchemy.orm import relationship
|
|
||||||
from config.database import Base
|
|
||||||
|
|
||||||
|
|
||||||
class Equipment(Base):
|
|
||||||
__tablename__ = "equipment"
|
|
||||||
|
|
||||||
id = Column(Integer, primary_key=True, index=True)
|
|
||||||
scenario_id = Column(Integer, ForeignKey("scenario.id"), nullable=False)
|
|
||||||
name = Column(String, nullable=False)
|
|
||||||
description = Column(String, nullable=True)
|
|
||||||
|
|
||||||
scenario = relationship("Scenario", back_populates="equipment_items")
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return f"<Equipment id={self.id} scenario_id={self.scenario_id} name={self.name}>"
|
|
||||||
62
models/financial_input.py
Normal file
62
models/financial_input.py
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from datetime import date, datetime
|
||||||
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
|
from sqlalchemy import (
|
||||||
|
Date,
|
||||||
|
DateTime,
|
||||||
|
ForeignKey,
|
||||||
|
Integer,
|
||||||
|
Numeric,
|
||||||
|
String,
|
||||||
|
Text,
|
||||||
|
)
|
||||||
|
from sqlalchemy.orm import Mapped, mapped_column, relationship, validates
|
||||||
|
|
||||||
|
from sqlalchemy.sql import func
|
||||||
|
|
||||||
|
from config.database import Base
|
||||||
|
from .enums import CostBucket, FinancialCategory, sql_enum
|
||||||
|
from services.currency import normalise_currency
|
||||||
|
|
||||||
|
if TYPE_CHECKING: # pragma: no cover
|
||||||
|
from .scenario import Scenario
|
||||||
|
|
||||||
|
|
||||||
|
class FinancialInput(Base):
|
||||||
|
"""Line-item financial assumption attached to a scenario."""
|
||||||
|
|
||||||
|
__tablename__ = "financial_inputs"
|
||||||
|
|
||||||
|
id: Mapped[int] = mapped_column(Integer, primary_key=True)
|
||||||
|
scenario_id: Mapped[int] = mapped_column(
|
||||||
|
ForeignKey("scenarios.id", ondelete="CASCADE"), nullable=False, index=True
|
||||||
|
)
|
||||||
|
name: Mapped[str] = mapped_column(String(255), nullable=False)
|
||||||
|
category: Mapped[FinancialCategory] = mapped_column(
|
||||||
|
sql_enum(FinancialCategory, name="financialcategory"), nullable=False
|
||||||
|
)
|
||||||
|
cost_bucket: Mapped[CostBucket | None] = mapped_column(
|
||||||
|
sql_enum(CostBucket, name="costbucket"), nullable=True
|
||||||
|
)
|
||||||
|
amount: Mapped[float] = mapped_column(Numeric(18, 2), nullable=False)
|
||||||
|
currency: Mapped[str | None] = mapped_column(String(3), nullable=True)
|
||||||
|
effective_date: Mapped[date | None] = mapped_column(Date, nullable=True)
|
||||||
|
notes: Mapped[str | None] = mapped_column(Text, nullable=True)
|
||||||
|
created_at: Mapped[datetime] = mapped_column(
|
||||||
|
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||||
|
)
|
||||||
|
updated_at: Mapped[datetime] = mapped_column(
|
||||||
|
DateTime(timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()
|
||||||
|
)
|
||||||
|
|
||||||
|
scenario: Mapped["Scenario"] = relationship(
|
||||||
|
"Scenario", back_populates="financial_inputs")
|
||||||
|
|
||||||
|
@validates("currency")
|
||||||
|
def _validate_currency(self, key: str, value: str | None) -> str | None:
|
||||||
|
return normalise_currency(value)
|
||||||
|
|
||||||
|
def __repr__(self) -> str: # pragma: no cover
|
||||||
|
return f"FinancialInput(id={self.id!r}, scenario_id={self.scenario_id!r}, name={self.name!r})"
|
||||||
31
models/import_export_log.py
Normal file
31
models/import_export_log.py
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
|
||||||
|
from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text
|
||||||
|
from sqlalchemy.sql import func
|
||||||
|
|
||||||
|
from config.database import Base
|
||||||
|
|
||||||
|
|
||||||
|
class ImportExportLog(Base):
|
||||||
|
"""Audit log for import and export operations."""
|
||||||
|
|
||||||
|
__tablename__ = "import_export_logs"
|
||||||
|
|
||||||
|
id = Column(Integer, primary_key=True, index=True)
|
||||||
|
action = Column(String(32), nullable=False) # preview, commit, export
|
||||||
|
dataset = Column(String(32), nullable=False) # projects, scenarios, etc.
|
||||||
|
status = Column(String(16), nullable=False) # success, failure
|
||||||
|
filename = Column(String(255), nullable=True)
|
||||||
|
row_count = Column(Integer, nullable=True)
|
||||||
|
detail = Column(Text, nullable=True)
|
||||||
|
user_id = Column(Integer, ForeignKey("users.id"), nullable=True)
|
||||||
|
created_at = Column(
|
||||||
|
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||||
|
)
|
||||||
|
|
||||||
|
def __repr__(self) -> str: # pragma: no cover
|
||||||
|
return (
|
||||||
|
f"ImportExportLog(id={self.id}, action={self.action}, "
|
||||||
|
f"dataset={self.dataset}, status={self.status})"
|
||||||
|
)
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
from sqlalchemy import Column, Date, Float, ForeignKey, Integer, String
|
|
||||||
from sqlalchemy.orm import relationship
|
|
||||||
from config.database import Base
|
|
||||||
|
|
||||||
|
|
||||||
class Maintenance(Base):
|
|
||||||
__tablename__ = "maintenance"
|
|
||||||
|
|
||||||
id = Column(Integer, primary_key=True, index=True)
|
|
||||||
equipment_id = Column(Integer, ForeignKey("equipment.id"), nullable=False)
|
|
||||||
scenario_id = Column(Integer, ForeignKey("scenario.id"), nullable=False)
|
|
||||||
maintenance_date = Column(Date, nullable=False)
|
|
||||||
description = Column(String, nullable=True)
|
|
||||||
cost = Column(Float, nullable=False)
|
|
||||||
|
|
||||||
equipment = relationship("Equipment")
|
|
||||||
scenario = relationship("Scenario", back_populates="maintenance_items")
|
|
||||||
|
|
||||||
def __repr__(self) -> str:
|
|
||||||
return (
|
|
||||||
f"<Maintenance id={self.id} equipment_id={self.equipment_id} "
|
|
||||||
f"scenario_id={self.scenario_id} date={self.maintenance_date} cost={self.cost}>"
|
|
||||||
)
|
|
||||||
108
models/metadata.py
Normal file
108
models/metadata.py
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from .enums import ResourceType, CostBucket, StochasticVariable
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True)
|
||||||
|
class ResourceDescriptor:
|
||||||
|
"""Describes canonical metadata for a resource type."""
|
||||||
|
|
||||||
|
unit: str
|
||||||
|
description: str
|
||||||
|
|
||||||
|
|
||||||
|
RESOURCE_METADATA: dict[ResourceType, ResourceDescriptor] = {
|
||||||
|
ResourceType.DIESEL: ResourceDescriptor(unit="L", description="Diesel fuel consumption"),
|
||||||
|
ResourceType.ELECTRICITY: ResourceDescriptor(unit="kWh", description="Electrical power usage"),
|
||||||
|
ResourceType.WATER: ResourceDescriptor(unit="m3", description="Process and dust suppression water"),
|
||||||
|
ResourceType.EXPLOSIVES: ResourceDescriptor(unit="kg", description="Blasting agent consumption"),
|
||||||
|
ResourceType.REAGENTS: ResourceDescriptor(unit="kg", description="Processing reagents"),
|
||||||
|
ResourceType.LABOR: ResourceDescriptor(unit="hours", description="Direct labor hours"),
|
||||||
|
ResourceType.EQUIPMENT_HOURS: ResourceDescriptor(unit="hours", description="Mobile equipment operating hours"),
|
||||||
|
ResourceType.TAILINGS_CAPACITY: ResourceDescriptor(unit="m3", description="Tailings storage usage"),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True)
|
||||||
|
class CostBucketDescriptor:
|
||||||
|
"""Describes reporting label and guidance for a cost bucket."""
|
||||||
|
|
||||||
|
label: str
|
||||||
|
description: str
|
||||||
|
|
||||||
|
|
||||||
|
COST_BUCKET_METADATA: dict[CostBucket, CostBucketDescriptor] = {
|
||||||
|
CostBucket.CAPITAL_INITIAL: CostBucketDescriptor(
|
||||||
|
label="Initial Capital",
|
||||||
|
description="Pre-production capital required to construct the mine",
|
||||||
|
),
|
||||||
|
CostBucket.CAPITAL_SUSTAINING: CostBucketDescriptor(
|
||||||
|
label="Sustaining Capital",
|
||||||
|
description="Ongoing capital investments to maintain operations",
|
||||||
|
),
|
||||||
|
CostBucket.OPERATING_FIXED: CostBucketDescriptor(
|
||||||
|
label="Fixed Operating",
|
||||||
|
description="Fixed operating costs independent of production rate",
|
||||||
|
),
|
||||||
|
CostBucket.OPERATING_VARIABLE: CostBucketDescriptor(
|
||||||
|
label="Variable Operating",
|
||||||
|
description="Costs that scale with throughput or production",
|
||||||
|
),
|
||||||
|
CostBucket.MAINTENANCE: CostBucketDescriptor(
|
||||||
|
label="Maintenance",
|
||||||
|
description="Maintenance and repair expenditures",
|
||||||
|
),
|
||||||
|
CostBucket.RECLAMATION: CostBucketDescriptor(
|
||||||
|
label="Reclamation",
|
||||||
|
description="Mine closure and reclamation liabilities",
|
||||||
|
),
|
||||||
|
CostBucket.ROYALTIES: CostBucketDescriptor(
|
||||||
|
label="Royalties",
|
||||||
|
description="Royalty and streaming obligations",
|
||||||
|
),
|
||||||
|
CostBucket.GENERAL_ADMIN: CostBucketDescriptor(
|
||||||
|
label="G&A",
|
||||||
|
description="Corporate and site general and administrative costs",
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True)
|
||||||
|
class StochasticVariableDescriptor:
|
||||||
|
"""Metadata describing how a stochastic variable is typically modelled."""
|
||||||
|
|
||||||
|
unit: str
|
||||||
|
description: str
|
||||||
|
|
||||||
|
|
||||||
|
STOCHASTIC_VARIABLE_METADATA: dict[StochasticVariable, StochasticVariableDescriptor] = {
|
||||||
|
StochasticVariable.ORE_GRADE: StochasticVariableDescriptor(
|
||||||
|
unit="g/t",
|
||||||
|
description="Head grade variability across the ore body",
|
||||||
|
),
|
||||||
|
StochasticVariable.RECOVERY_RATE: StochasticVariableDescriptor(
|
||||||
|
unit="%",
|
||||||
|
description="Metallurgical recovery uncertainty",
|
||||||
|
),
|
||||||
|
StochasticVariable.METAL_PRICE: StochasticVariableDescriptor(
|
||||||
|
unit="$/unit",
|
||||||
|
description="Commodity price fluctuations",
|
||||||
|
),
|
||||||
|
StochasticVariable.OPERATING_COST: StochasticVariableDescriptor(
|
||||||
|
unit="$/t",
|
||||||
|
description="Operating cost per tonne volatility",
|
||||||
|
),
|
||||||
|
StochasticVariable.CAPITAL_COST: StochasticVariableDescriptor(
|
||||||
|
unit="$",
|
||||||
|
description="Capital cost overrun/underrun potential",
|
||||||
|
),
|
||||||
|
StochasticVariable.DISCOUNT_RATE: StochasticVariableDescriptor(
|
||||||
|
unit="%",
|
||||||
|
description="Discount rate sensitivity",
|
||||||
|
),
|
||||||
|
StochasticVariable.THROUGHPUT: StochasticVariableDescriptor(
|
||||||
|
unit="t/d",
|
||||||
|
description="Plant throughput variability",
|
||||||
|
),
|
||||||
|
}
|
||||||
125
models/navigation.py
Normal file
125
models/navigation.py
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from datetime import datetime
|
||||||
|
from typing import List, Optional
|
||||||
|
|
||||||
|
from sqlalchemy import (
|
||||||
|
Boolean,
|
||||||
|
CheckConstraint,
|
||||||
|
DateTime,
|
||||||
|
ForeignKey,
|
||||||
|
Index,
|
||||||
|
Integer,
|
||||||
|
String,
|
||||||
|
UniqueConstraint,
|
||||||
|
)
|
||||||
|
from sqlalchemy.orm import Mapped, mapped_column, relationship
|
||||||
|
from sqlalchemy.sql import func
|
||||||
|
from sqlalchemy.ext.mutable import MutableList
|
||||||
|
from sqlalchemy import JSON
|
||||||
|
|
||||||
|
from config.database import Base
|
||||||
|
|
||||||
|
|
||||||
|
class NavigationGroup(Base):
|
||||||
|
__tablename__ = "navigation_groups"
|
||||||
|
__table_args__ = (
|
||||||
|
UniqueConstraint("slug", name="uq_navigation_groups_slug"),
|
||||||
|
Index("ix_navigation_groups_sort_order", "sort_order"),
|
||||||
|
)
|
||||||
|
|
||||||
|
id: Mapped[int] = mapped_column(Integer, primary_key=True)
|
||||||
|
slug: Mapped[str] = mapped_column(String(64), nullable=False)
|
||||||
|
label: Mapped[str] = mapped_column(String(128), nullable=False)
|
||||||
|
sort_order: Mapped[int] = mapped_column(
|
||||||
|
Integer, nullable=False, default=100)
|
||||||
|
icon: Mapped[Optional[str]] = mapped_column(String(64))
|
||||||
|
tooltip: Mapped[Optional[str]] = mapped_column(String(255))
|
||||||
|
is_enabled: Mapped[bool] = mapped_column(
|
||||||
|
Boolean, nullable=False, default=True)
|
||||||
|
created_at: Mapped[datetime] = mapped_column(
|
||||||
|
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||||
|
)
|
||||||
|
updated_at: Mapped[datetime] = mapped_column(
|
||||||
|
DateTime(timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()
|
||||||
|
)
|
||||||
|
|
||||||
|
links: Mapped[List["NavigationLink"]] = relationship(
|
||||||
|
"NavigationLink",
|
||||||
|
back_populates="group",
|
||||||
|
cascade="all, delete-orphan",
|
||||||
|
order_by="NavigationLink.sort_order",
|
||||||
|
)
|
||||||
|
|
||||||
|
def __repr__(self) -> str: # pragma: no cover
|
||||||
|
return f"NavigationGroup(id={self.id!r}, slug={self.slug!r})"
|
||||||
|
|
||||||
|
|
||||||
|
class NavigationLink(Base):
|
||||||
|
__tablename__ = "navigation_links"
|
||||||
|
__table_args__ = (
|
||||||
|
UniqueConstraint("group_id", "slug",
|
||||||
|
name="uq_navigation_links_group_slug"),
|
||||||
|
Index("ix_navigation_links_group_sort", "group_id", "sort_order"),
|
||||||
|
Index("ix_navigation_links_parent_sort",
|
||||||
|
"parent_link_id", "sort_order"),
|
||||||
|
CheckConstraint(
|
||||||
|
"(route_name IS NOT NULL) OR (href_override IS NOT NULL)",
|
||||||
|
name="ck_navigation_links_route_or_href",
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
id: Mapped[int] = mapped_column(Integer, primary_key=True)
|
||||||
|
group_id: Mapped[int] = mapped_column(
|
||||||
|
ForeignKey("navigation_groups.id", ondelete="CASCADE"), nullable=False
|
||||||
|
)
|
||||||
|
parent_link_id: Mapped[Optional[int]] = mapped_column(
|
||||||
|
ForeignKey("navigation_links.id", ondelete="CASCADE")
|
||||||
|
)
|
||||||
|
slug: Mapped[str] = mapped_column(String(64), nullable=False)
|
||||||
|
label: Mapped[str] = mapped_column(String(128), nullable=False)
|
||||||
|
route_name: Mapped[Optional[str]] = mapped_column(String(128))
|
||||||
|
href_override: Mapped[Optional[str]] = mapped_column(String(512))
|
||||||
|
match_prefix: Mapped[Optional[str]] = mapped_column(String(512))
|
||||||
|
sort_order: Mapped[int] = mapped_column(
|
||||||
|
Integer, nullable=False, default=100)
|
||||||
|
icon: Mapped[Optional[str]] = mapped_column(String(64))
|
||||||
|
tooltip: Mapped[Optional[str]] = mapped_column(String(255))
|
||||||
|
required_roles: Mapped[list[str]] = mapped_column(
|
||||||
|
MutableList.as_mutable(JSON), nullable=False, default=list
|
||||||
|
)
|
||||||
|
is_enabled: Mapped[bool] = mapped_column(
|
||||||
|
Boolean, nullable=False, default=True)
|
||||||
|
is_external: Mapped[bool] = mapped_column(
|
||||||
|
Boolean, nullable=False, default=False)
|
||||||
|
created_at: Mapped[datetime] = mapped_column(
|
||||||
|
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||||
|
)
|
||||||
|
updated_at: Mapped[datetime] = mapped_column(
|
||||||
|
DateTime(timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()
|
||||||
|
)
|
||||||
|
|
||||||
|
group: Mapped[NavigationGroup] = relationship(
|
||||||
|
NavigationGroup,
|
||||||
|
back_populates="links",
|
||||||
|
)
|
||||||
|
parent: Mapped[Optional["NavigationLink"]] = relationship(
|
||||||
|
"NavigationLink",
|
||||||
|
remote_side="NavigationLink.id",
|
||||||
|
back_populates="children",
|
||||||
|
)
|
||||||
|
children: Mapped[List["NavigationLink"]] = relationship(
|
||||||
|
"NavigationLink",
|
||||||
|
back_populates="parent",
|
||||||
|
cascade="all, delete-orphan",
|
||||||
|
order_by="NavigationLink.sort_order",
|
||||||
|
)
|
||||||
|
|
||||||
|
def is_visible_for_roles(self, roles: list[str]) -> bool:
|
||||||
|
if not self.required_roles:
|
||||||
|
return True
|
||||||
|
role_set = set(roles)
|
||||||
|
return any(role in role_set for role in self.required_roles)
|
||||||
|
|
||||||
|
def __repr__(self) -> str: # pragma: no cover
|
||||||
|
return f"NavigationLink(id={self.id!r}, slug={self.slug!r})"
|
||||||
@@ -1,63 +0,0 @@
|
|||||||
from sqlalchemy import event, text
|
|
||||||
from sqlalchemy import Column, Integer, Float, String, ForeignKey
|
|
||||||
from sqlalchemy.orm import relationship
|
|
||||||
from config.database import Base
|
|
||||||
|
|
||||||
|
|
||||||
class Opex(Base):
|
|
||||||
__tablename__ = "opex"
|
|
||||||
|
|
||||||
id = Column(Integer, primary_key=True, index=True)
|
|
||||||
scenario_id = Column(Integer, ForeignKey("scenario.id"), nullable=False)
|
|
||||||
amount = Column(Float, nullable=False)
|
|
||||||
description = Column(String, nullable=True)
|
|
||||||
currency_id = Column(Integer, ForeignKey("currency.id"), nullable=False)
|
|
||||||
|
|
||||||
scenario = relationship("Scenario", back_populates="opex_items")
|
|
||||||
currency = relationship("Currency", back_populates="opex_items")
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return (
|
|
||||||
f"<Opex id={self.id} scenario_id={self.scenario_id} "
|
|
||||||
f"amount={self.amount} currency_id={self.currency_id}>"
|
|
||||||
)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def currency_code(self) -> str:
|
|
||||||
return self.currency.code if self.currency else None
|
|
||||||
|
|
||||||
@currency_code.setter
|
|
||||||
def currency_code(self, value: str) -> None:
|
|
||||||
setattr(
|
|
||||||
self, "_currency_code_pending", (value or "USD").strip().upper()
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def _resolve_currency_opex(mapper, connection, target):
|
|
||||||
if getattr(target, "currency_id", None):
|
|
||||||
return
|
|
||||||
code = getattr(target, "_currency_code_pending", None) or "USD"
|
|
||||||
row = connection.execute(
|
|
||||||
text("SELECT id FROM currency WHERE code = :code"), {"code": code}
|
|
||||||
).fetchone()
|
|
||||||
if row:
|
|
||||||
cid = row[0]
|
|
||||||
else:
|
|
||||||
res = connection.execute(
|
|
||||||
text(
|
|
||||||
"INSERT INTO currency (code, name, symbol, is_active) VALUES (:code, :name, :symbol, :active)"
|
|
||||||
),
|
|
||||||
{"code": code, "name": code, "symbol": None, "active": True},
|
|
||||||
)
|
|
||||||
try:
|
|
||||||
cid = res.lastrowid
|
|
||||||
except Exception:
|
|
||||||
cid = connection.execute(
|
|
||||||
text("SELECT id FROM currency WHERE code = :code"),
|
|
||||||
{"code": code},
|
|
||||||
).scalar()
|
|
||||||
target.currency_id = cid
|
|
||||||
|
|
||||||
|
|
||||||
event.listen(Opex, "before_insert", _resolve_currency_opex)
|
|
||||||
event.listen(Opex, "before_update", _resolve_currency_opex)
|
|
||||||
123
models/opex_snapshot.py
Normal file
123
models/opex_snapshot.py
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from datetime import datetime
|
||||||
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
|
from sqlalchemy import JSON, Boolean, DateTime, ForeignKey, Integer, Numeric, String
|
||||||
|
from sqlalchemy.orm import Mapped, mapped_column, relationship
|
||||||
|
from sqlalchemy.sql import func
|
||||||
|
|
||||||
|
from config.database import Base
|
||||||
|
|
||||||
|
if TYPE_CHECKING: # pragma: no cover
|
||||||
|
from .project import Project
|
||||||
|
from .scenario import Scenario
|
||||||
|
from .user import User
|
||||||
|
|
||||||
|
|
||||||
|
class ProjectOpexSnapshot(Base):
|
||||||
|
"""Snapshot of recurring opex metrics at the project level."""
|
||||||
|
|
||||||
|
__tablename__ = "project_opex_snapshots"
|
||||||
|
|
||||||
|
id: Mapped[int] = mapped_column(Integer, primary_key=True)
|
||||||
|
project_id: Mapped[int] = mapped_column(
|
||||||
|
ForeignKey("projects.id", ondelete="CASCADE"), nullable=False, index=True
|
||||||
|
)
|
||||||
|
created_by_id: Mapped[int | None] = mapped_column(
|
||||||
|
ForeignKey("users.id", ondelete="SET NULL"), nullable=True, index=True
|
||||||
|
)
|
||||||
|
calculation_source: Mapped[str | None] = mapped_column(
|
||||||
|
String(64), nullable=True)
|
||||||
|
calculated_at: Mapped[datetime] = mapped_column(
|
||||||
|
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||||
|
)
|
||||||
|
currency_code: Mapped[str | None] = mapped_column(String(3), nullable=True)
|
||||||
|
overall_annual: Mapped[float | None] = mapped_column(
|
||||||
|
Numeric(18, 2), nullable=True)
|
||||||
|
escalated_total: Mapped[float | None] = mapped_column(
|
||||||
|
Numeric(18, 2), nullable=True)
|
||||||
|
annual_average: Mapped[float | None] = mapped_column(
|
||||||
|
Numeric(18, 2), nullable=True)
|
||||||
|
evaluation_horizon_years: Mapped[int | None] = mapped_column(
|
||||||
|
Integer, nullable=True)
|
||||||
|
escalation_pct: Mapped[float | None] = mapped_column(
|
||||||
|
Numeric(12, 6), nullable=True)
|
||||||
|
apply_escalation: Mapped[bool] = mapped_column(
|
||||||
|
Boolean, nullable=False, default=True)
|
||||||
|
component_count: Mapped[int | None] = mapped_column(Integer, nullable=True)
|
||||||
|
payload: Mapped[dict | None] = mapped_column(JSON, nullable=True)
|
||||||
|
created_at: Mapped[datetime] = mapped_column(
|
||||||
|
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||||
|
)
|
||||||
|
updated_at: Mapped[datetime] = mapped_column(
|
||||||
|
DateTime(timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()
|
||||||
|
)
|
||||||
|
|
||||||
|
project: Mapped[Project] = relationship(
|
||||||
|
"Project", back_populates="opex_snapshots"
|
||||||
|
)
|
||||||
|
created_by: Mapped[User | None] = relationship("User")
|
||||||
|
|
||||||
|
def __repr__(self) -> str: # pragma: no cover
|
||||||
|
return (
|
||||||
|
"ProjectOpexSnapshot(id={id!r}, project_id={project_id!r}, overall_annual={overall_annual!r})".format(
|
||||||
|
id=self.id,
|
||||||
|
project_id=self.project_id,
|
||||||
|
overall_annual=self.overall_annual,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ScenarioOpexSnapshot(Base):
|
||||||
|
"""Snapshot of opex metrics for an individual scenario."""
|
||||||
|
|
||||||
|
__tablename__ = "scenario_opex_snapshots"
|
||||||
|
|
||||||
|
id: Mapped[int] = mapped_column(Integer, primary_key=True)
|
||||||
|
scenario_id: Mapped[int] = mapped_column(
|
||||||
|
ForeignKey("scenarios.id", ondelete="CASCADE"), nullable=False, index=True
|
||||||
|
)
|
||||||
|
created_by_id: Mapped[int | None] = mapped_column(
|
||||||
|
ForeignKey("users.id", ondelete="SET NULL"), nullable=True, index=True
|
||||||
|
)
|
||||||
|
calculation_source: Mapped[str | None] = mapped_column(
|
||||||
|
String(64), nullable=True)
|
||||||
|
calculated_at: Mapped[datetime] = mapped_column(
|
||||||
|
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||||
|
)
|
||||||
|
currency_code: Mapped[str | None] = mapped_column(String(3), nullable=True)
|
||||||
|
overall_annual: Mapped[float | None] = mapped_column(
|
||||||
|
Numeric(18, 2), nullable=True)
|
||||||
|
escalated_total: Mapped[float | None] = mapped_column(
|
||||||
|
Numeric(18, 2), nullable=True)
|
||||||
|
annual_average: Mapped[float | None] = mapped_column(
|
||||||
|
Numeric(18, 2), nullable=True)
|
||||||
|
evaluation_horizon_years: Mapped[int | None] = mapped_column(
|
||||||
|
Integer, nullable=True)
|
||||||
|
escalation_pct: Mapped[float | None] = mapped_column(
|
||||||
|
Numeric(12, 6), nullable=True)
|
||||||
|
apply_escalation: Mapped[bool] = mapped_column(
|
||||||
|
Boolean, nullable=False, default=True)
|
||||||
|
component_count: Mapped[int | None] = mapped_column(Integer, nullable=True)
|
||||||
|
payload: Mapped[dict | None] = mapped_column(JSON, nullable=True)
|
||||||
|
created_at: Mapped[datetime] = mapped_column(
|
||||||
|
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||||
|
)
|
||||||
|
updated_at: Mapped[datetime] = mapped_column(
|
||||||
|
DateTime(timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()
|
||||||
|
)
|
||||||
|
|
||||||
|
scenario: Mapped[Scenario] = relationship(
|
||||||
|
"Scenario", back_populates="opex_snapshots"
|
||||||
|
)
|
||||||
|
created_by: Mapped[User | None] = relationship("User")
|
||||||
|
|
||||||
|
def __repr__(self) -> str: # pragma: no cover
|
||||||
|
return (
|
||||||
|
"ScenarioOpexSnapshot(id={id!r}, scenario_id={scenario_id!r}, overall_annual={overall_annual!r})".format(
|
||||||
|
id=self.id,
|
||||||
|
scenario_id=self.scenario_id,
|
||||||
|
overall_annual=self.overall_annual,
|
||||||
|
)
|
||||||
|
)
|
||||||
@@ -1,29 +0,0 @@
|
|||||||
from typing import Any, Dict, Optional
|
|
||||||
|
|
||||||
from sqlalchemy import ForeignKey, JSON
|
|
||||||
from sqlalchemy.orm import Mapped, mapped_column, relationship
|
|
||||||
from config.database import Base
|
|
||||||
|
|
||||||
|
|
||||||
class Parameter(Base):
|
|
||||||
__tablename__ = "parameter"
|
|
||||||
|
|
||||||
id: Mapped[int] = mapped_column(primary_key=True, index=True)
|
|
||||||
scenario_id: Mapped[int] = mapped_column(
|
|
||||||
ForeignKey("scenario.id"), nullable=False
|
|
||||||
)
|
|
||||||
name: Mapped[str] = mapped_column(nullable=False)
|
|
||||||
value: Mapped[float] = mapped_column(nullable=False)
|
|
||||||
distribution_id: Mapped[Optional[int]] = mapped_column(
|
|
||||||
ForeignKey("distribution.id"), nullable=True
|
|
||||||
)
|
|
||||||
distribution_type: Mapped[Optional[str]] = mapped_column(nullable=True)
|
|
||||||
distribution_parameters: Mapped[Optional[Dict[str, Any]]] = mapped_column(
|
|
||||||
JSON, nullable=True
|
|
||||||
)
|
|
||||||
|
|
||||||
scenario = relationship("Scenario", back_populates="parameters")
|
|
||||||
distribution = relationship("Distribution")
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return f"<Parameter id={self.id} name={self.name} value={self.value}>"
|
|
||||||
24
models/performance_metric.py
Normal file
24
models/performance_metric.py
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
from sqlalchemy import Column, DateTime, Float, Integer, String
|
||||||
|
|
||||||
|
from config.database import Base
|
||||||
|
|
||||||
|
|
||||||
|
class PerformanceMetric(Base):
|
||||||
|
__tablename__ = "performance_metrics"
|
||||||
|
|
||||||
|
id = Column(Integer, primary_key=True, index=True)
|
||||||
|
timestamp = Column(DateTime, default=datetime.utcnow, index=True)
|
||||||
|
metric_name = Column(String, index=True)
|
||||||
|
value = Column(Float)
|
||||||
|
labels = Column(String) # JSON string of labels
|
||||||
|
endpoint = Column(String, index=True, nullable=True)
|
||||||
|
method = Column(String, nullable=True)
|
||||||
|
status_code = Column(Integer, nullable=True)
|
||||||
|
duration_seconds = Column(Float, nullable=True)
|
||||||
|
|
||||||
|
def __repr__(self) -> str:
|
||||||
|
return f"<PerformanceMetric(id={self.id}, name={self.metric_name}, value={self.value})>"
|
||||||
176
models/pricing_settings.py
Normal file
176
models/pricing_settings.py
Normal file
@@ -0,0 +1,176 @@
|
|||||||
|
"""Database models for persisted pricing configuration settings."""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from datetime import datetime
|
||||||
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
|
from sqlalchemy import (
|
||||||
|
JSON,
|
||||||
|
DateTime,
|
||||||
|
ForeignKey,
|
||||||
|
Integer,
|
||||||
|
Numeric,
|
||||||
|
String,
|
||||||
|
Text,
|
||||||
|
UniqueConstraint,
|
||||||
|
)
|
||||||
|
from sqlalchemy.orm import Mapped, mapped_column, relationship, validates
|
||||||
|
from sqlalchemy.sql import func
|
||||||
|
|
||||||
|
from config.database import Base
|
||||||
|
from services.currency import normalise_currency
|
||||||
|
|
||||||
|
if TYPE_CHECKING: # pragma: no cover
|
||||||
|
from .project import Project
|
||||||
|
|
||||||
|
|
||||||
|
class PricingSettings(Base):
|
||||||
|
"""Persisted pricing defaults applied to scenario evaluations."""
|
||||||
|
|
||||||
|
__tablename__ = "pricing_settings"
|
||||||
|
|
||||||
|
id: Mapped[int] = mapped_column(Integer, primary_key=True)
|
||||||
|
name: Mapped[str] = mapped_column(String(128), nullable=False, unique=True)
|
||||||
|
slug: Mapped[str] = mapped_column(String(64), nullable=False, unique=True)
|
||||||
|
description: Mapped[str | None] = mapped_column(Text, nullable=True)
|
||||||
|
default_currency: Mapped[str | None] = mapped_column(
|
||||||
|
String(3), nullable=True)
|
||||||
|
default_payable_pct: Mapped[float] = mapped_column(
|
||||||
|
Numeric(5, 2), nullable=False, default=100.0
|
||||||
|
)
|
||||||
|
moisture_threshold_pct: Mapped[float] = mapped_column(
|
||||||
|
Numeric(5, 2), nullable=False, default=8.0
|
||||||
|
)
|
||||||
|
moisture_penalty_per_pct: Mapped[float] = mapped_column(
|
||||||
|
Numeric(14, 4), nullable=False, default=0.0
|
||||||
|
)
|
||||||
|
metadata_payload: Mapped[dict | None] = mapped_column(
|
||||||
|
"metadata", JSON, nullable=True
|
||||||
|
)
|
||||||
|
created_at: Mapped[datetime] = mapped_column(
|
||||||
|
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||||
|
)
|
||||||
|
updated_at: Mapped[datetime] = mapped_column(
|
||||||
|
DateTime(timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()
|
||||||
|
)
|
||||||
|
|
||||||
|
metal_overrides: Mapped[list["PricingMetalSettings"]] = relationship(
|
||||||
|
"PricingMetalSettings",
|
||||||
|
back_populates="pricing_settings",
|
||||||
|
cascade="all, delete-orphan",
|
||||||
|
passive_deletes=True,
|
||||||
|
)
|
||||||
|
impurity_overrides: Mapped[list["PricingImpuritySettings"]] = relationship(
|
||||||
|
"PricingImpuritySettings",
|
||||||
|
back_populates="pricing_settings",
|
||||||
|
cascade="all, delete-orphan",
|
||||||
|
passive_deletes=True,
|
||||||
|
)
|
||||||
|
projects: Mapped[list["Project"]] = relationship(
|
||||||
|
"Project",
|
||||||
|
back_populates="pricing_settings",
|
||||||
|
cascade="all",
|
||||||
|
)
|
||||||
|
|
||||||
|
@validates("slug")
|
||||||
|
def _normalise_slug(self, key: str, value: str) -> str:
|
||||||
|
return value.strip().lower()
|
||||||
|
|
||||||
|
@validates("default_currency")
|
||||||
|
def _validate_currency(self, key: str, value: str | None) -> str | None:
|
||||||
|
return normalise_currency(value)
|
||||||
|
|
||||||
|
def __repr__(self) -> str: # pragma: no cover
|
||||||
|
return f"PricingSettings(id={self.id!r}, slug={self.slug!r})"
|
||||||
|
|
||||||
|
|
||||||
|
class PricingMetalSettings(Base):
|
||||||
|
"""Contract-specific overrides for a particular metal."""
|
||||||
|
|
||||||
|
__tablename__ = "pricing_metal_settings"
|
||||||
|
__table_args__ = (
|
||||||
|
UniqueConstraint(
|
||||||
|
"pricing_settings_id", "metal_code", name="uq_pricing_metal_settings_code"
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
id: Mapped[int] = mapped_column(Integer, primary_key=True)
|
||||||
|
pricing_settings_id: Mapped[int] = mapped_column(
|
||||||
|
ForeignKey("pricing_settings.id", ondelete="CASCADE"), nullable=False, index=True
|
||||||
|
)
|
||||||
|
metal_code: Mapped[str] = mapped_column(String(32), nullable=False)
|
||||||
|
payable_pct: Mapped[float | None] = mapped_column(
|
||||||
|
Numeric(5, 2), nullable=True)
|
||||||
|
moisture_threshold_pct: Mapped[float | None] = mapped_column(
|
||||||
|
Numeric(5, 2), nullable=True)
|
||||||
|
moisture_penalty_per_pct: Mapped[float | None] = mapped_column(
|
||||||
|
Numeric(14, 4), nullable=True
|
||||||
|
)
|
||||||
|
data: Mapped[dict | None] = mapped_column(JSON, nullable=True)
|
||||||
|
created_at: Mapped[datetime] = mapped_column(
|
||||||
|
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||||
|
)
|
||||||
|
updated_at: Mapped[datetime] = mapped_column(
|
||||||
|
DateTime(timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()
|
||||||
|
)
|
||||||
|
|
||||||
|
pricing_settings: Mapped["PricingSettings"] = relationship(
|
||||||
|
"PricingSettings", back_populates="metal_overrides"
|
||||||
|
)
|
||||||
|
|
||||||
|
@validates("metal_code")
|
||||||
|
def _normalise_metal_code(self, key: str, value: str) -> str:
|
||||||
|
return value.strip().lower()
|
||||||
|
|
||||||
|
def __repr__(self) -> str: # pragma: no cover
|
||||||
|
return (
|
||||||
|
"PricingMetalSettings(" # noqa: ISC001
|
||||||
|
f"id={self.id!r}, pricing_settings_id={self.pricing_settings_id!r}, "
|
||||||
|
f"metal_code={self.metal_code!r})"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class PricingImpuritySettings(Base):
|
||||||
|
"""Impurity penalty thresholds associated with pricing settings."""
|
||||||
|
|
||||||
|
__tablename__ = "pricing_impurity_settings"
|
||||||
|
__table_args__ = (
|
||||||
|
UniqueConstraint(
|
||||||
|
"pricing_settings_id",
|
||||||
|
"impurity_code",
|
||||||
|
name="uq_pricing_impurity_settings_code",
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
id: Mapped[int] = mapped_column(Integer, primary_key=True)
|
||||||
|
pricing_settings_id: Mapped[int] = mapped_column(
|
||||||
|
ForeignKey("pricing_settings.id", ondelete="CASCADE"), nullable=False, index=True
|
||||||
|
)
|
||||||
|
impurity_code: Mapped[str] = mapped_column(String(32), nullable=False)
|
||||||
|
threshold_ppm: Mapped[float] = mapped_column(
|
||||||
|
Numeric(14, 4), nullable=False, default=0.0)
|
||||||
|
penalty_per_ppm: Mapped[float] = mapped_column(
|
||||||
|
Numeric(14, 4), nullable=False, default=0.0)
|
||||||
|
notes: Mapped[str | None] = mapped_column(Text, nullable=True)
|
||||||
|
created_at: Mapped[datetime] = mapped_column(
|
||||||
|
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||||
|
)
|
||||||
|
updated_at: Mapped[datetime] = mapped_column(
|
||||||
|
DateTime(timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()
|
||||||
|
)
|
||||||
|
|
||||||
|
pricing_settings: Mapped["PricingSettings"] = relationship(
|
||||||
|
"PricingSettings", back_populates="impurity_overrides"
|
||||||
|
)
|
||||||
|
|
||||||
|
@validates("impurity_code")
|
||||||
|
def _normalise_impurity_code(self, key: str, value: str) -> str:
|
||||||
|
return value.strip().upper()
|
||||||
|
|
||||||
|
def __repr__(self) -> str: # pragma: no cover
|
||||||
|
return (
|
||||||
|
"PricingImpuritySettings(" # noqa: ISC001
|
||||||
|
f"id={self.id!r}, pricing_settings_id={self.pricing_settings_id!r}, "
|
||||||
|
f"impurity_code={self.impurity_code!r})"
|
||||||
|
)
|
||||||
@@ -1,24 +0,0 @@
|
|||||||
from sqlalchemy import Column, Integer, Float, String, ForeignKey
|
|
||||||
from sqlalchemy.orm import relationship
|
|
||||||
from config.database import Base
|
|
||||||
|
|
||||||
|
|
||||||
class ProductionOutput(Base):
|
|
||||||
__tablename__ = "production_output"
|
|
||||||
|
|
||||||
id = Column(Integer, primary_key=True, index=True)
|
|
||||||
scenario_id = Column(Integer, ForeignKey("scenario.id"), nullable=False)
|
|
||||||
amount = Column(Float, nullable=False)
|
|
||||||
description = Column(String, nullable=True)
|
|
||||||
unit_name = Column(String(64), nullable=True)
|
|
||||||
unit_symbol = Column(String(16), nullable=True)
|
|
||||||
|
|
||||||
scenario = relationship(
|
|
||||||
"Scenario", back_populates="production_output_items"
|
|
||||||
)
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return (
|
|
||||||
f"<ProductionOutput id={self.id} scenario_id={self.scenario_id} "
|
|
||||||
f"amount={self.amount} unit={self.unit_symbol or self.unit_name}>"
|
|
||||||
)
|
|
||||||
133
models/profitability_snapshot.py
Normal file
133
models/profitability_snapshot.py
Normal file
@@ -0,0 +1,133 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from datetime import datetime
|
||||||
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
|
from sqlalchemy import JSON, DateTime, ForeignKey, Integer, Numeric, String
|
||||||
|
from sqlalchemy.orm import Mapped, mapped_column, relationship
|
||||||
|
from sqlalchemy.sql import func
|
||||||
|
|
||||||
|
from config.database import Base
|
||||||
|
|
||||||
|
if TYPE_CHECKING: # pragma: no cover
|
||||||
|
from .project import Project
|
||||||
|
from .scenario import Scenario
|
||||||
|
from .user import User
|
||||||
|
|
||||||
|
|
||||||
|
class ProjectProfitability(Base):
|
||||||
|
"""Snapshot of aggregated profitability metrics at the project level."""
|
||||||
|
|
||||||
|
__tablename__ = "project_profitability_snapshots"
|
||||||
|
|
||||||
|
id: Mapped[int] = mapped_column(Integer, primary_key=True)
|
||||||
|
project_id: Mapped[int] = mapped_column(
|
||||||
|
ForeignKey("projects.id", ondelete="CASCADE"), nullable=False, index=True
|
||||||
|
)
|
||||||
|
created_by_id: Mapped[int | None] = mapped_column(
|
||||||
|
ForeignKey("users.id", ondelete="SET NULL"), nullable=True, index=True
|
||||||
|
)
|
||||||
|
calculation_source: Mapped[str | None] = mapped_column(
|
||||||
|
String(64), nullable=True)
|
||||||
|
calculated_at: Mapped[datetime] = mapped_column(
|
||||||
|
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||||
|
)
|
||||||
|
currency_code: Mapped[str | None] = mapped_column(String(3), nullable=True)
|
||||||
|
npv: Mapped[float | None] = mapped_column(Numeric(18, 2), nullable=True)
|
||||||
|
irr_pct: Mapped[float | None] = mapped_column(
|
||||||
|
Numeric(12, 6), nullable=True)
|
||||||
|
payback_period_years: Mapped[float | None] = mapped_column(
|
||||||
|
Numeric(12, 4), nullable=True
|
||||||
|
)
|
||||||
|
margin_pct: Mapped[float | None] = mapped_column(
|
||||||
|
Numeric(12, 6), nullable=True)
|
||||||
|
revenue_total: Mapped[float | None] = mapped_column(
|
||||||
|
Numeric(18, 2), nullable=True)
|
||||||
|
opex_total: Mapped[float | None] = mapped_column(
|
||||||
|
Numeric(18, 2), nullable=True
|
||||||
|
)
|
||||||
|
sustaining_capex_total: Mapped[float | None] = mapped_column(
|
||||||
|
Numeric(18, 2), nullable=True
|
||||||
|
)
|
||||||
|
capex: Mapped[float | None] = mapped_column(
|
||||||
|
Numeric(18, 2), nullable=True)
|
||||||
|
net_cash_flow_total: Mapped[float | None] = mapped_column(
|
||||||
|
Numeric(18, 2), nullable=True
|
||||||
|
)
|
||||||
|
payload: Mapped[dict | None] = mapped_column(JSON, nullable=True)
|
||||||
|
created_at: Mapped[datetime] = mapped_column(
|
||||||
|
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||||
|
)
|
||||||
|
updated_at: Mapped[datetime] = mapped_column(
|
||||||
|
DateTime(timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()
|
||||||
|
)
|
||||||
|
|
||||||
|
project: Mapped[Project] = relationship(
|
||||||
|
"Project", back_populates="profitability_snapshots")
|
||||||
|
created_by: Mapped[User | None] = relationship("User")
|
||||||
|
|
||||||
|
def __repr__(self) -> str: # pragma: no cover
|
||||||
|
return (
|
||||||
|
"ProjectProfitability(id={id!r}, project_id={project_id!r}, npv={npv!r})".format(
|
||||||
|
id=self.id, project_id=self.project_id, npv=self.npv
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ScenarioProfitability(Base):
|
||||||
|
"""Snapshot of profitability metrics for an individual scenario."""
|
||||||
|
|
||||||
|
__tablename__ = "scenario_profitability_snapshots"
|
||||||
|
|
||||||
|
id: Mapped[int] = mapped_column(Integer, primary_key=True)
|
||||||
|
scenario_id: Mapped[int] = mapped_column(
|
||||||
|
ForeignKey("scenarios.id", ondelete="CASCADE"), nullable=False, index=True
|
||||||
|
)
|
||||||
|
created_by_id: Mapped[int | None] = mapped_column(
|
||||||
|
ForeignKey("users.id", ondelete="SET NULL"), nullable=True, index=True
|
||||||
|
)
|
||||||
|
calculation_source: Mapped[str | None] = mapped_column(
|
||||||
|
String(64), nullable=True)
|
||||||
|
calculated_at: Mapped[datetime] = mapped_column(
|
||||||
|
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||||
|
)
|
||||||
|
currency_code: Mapped[str | None] = mapped_column(String(3), nullable=True)
|
||||||
|
npv: Mapped[float | None] = mapped_column(Numeric(18, 2), nullable=True)
|
||||||
|
irr_pct: Mapped[float | None] = mapped_column(
|
||||||
|
Numeric(12, 6), nullable=True)
|
||||||
|
payback_period_years: Mapped[float | None] = mapped_column(
|
||||||
|
Numeric(12, 4), nullable=True
|
||||||
|
)
|
||||||
|
margin_pct: Mapped[float | None] = mapped_column(
|
||||||
|
Numeric(12, 6), nullable=True)
|
||||||
|
revenue_total: Mapped[float | None] = mapped_column(
|
||||||
|
Numeric(18, 2), nullable=True)
|
||||||
|
opex_total: Mapped[float | None] = mapped_column(
|
||||||
|
Numeric(18, 2), nullable=True
|
||||||
|
)
|
||||||
|
sustaining_capex_total: Mapped[float | None] = mapped_column(
|
||||||
|
Numeric(18, 2), nullable=True
|
||||||
|
)
|
||||||
|
capex: Mapped[float | None] = mapped_column(
|
||||||
|
Numeric(18, 2), nullable=True)
|
||||||
|
net_cash_flow_total: Mapped[float | None] = mapped_column(
|
||||||
|
Numeric(18, 2), nullable=True
|
||||||
|
)
|
||||||
|
payload: Mapped[dict | None] = mapped_column(JSON, nullable=True)
|
||||||
|
created_at: Mapped[datetime] = mapped_column(
|
||||||
|
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||||
|
)
|
||||||
|
updated_at: Mapped[datetime] = mapped_column(
|
||||||
|
DateTime(timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()
|
||||||
|
)
|
||||||
|
|
||||||
|
scenario: Mapped[Scenario] = relationship(
|
||||||
|
"Scenario", back_populates="profitability_snapshots")
|
||||||
|
created_by: Mapped[User | None] = relationship("User")
|
||||||
|
|
||||||
|
def __repr__(self) -> str: # pragma: no cover
|
||||||
|
return (
|
||||||
|
"ScenarioProfitability(id={id!r}, scenario_id={scenario_id!r}, npv={npv!r})".format(
|
||||||
|
id=self.id, scenario_id=self.scenario_id, npv=self.npv
|
||||||
|
)
|
||||||
|
)
|
||||||
104
models/project.py
Normal file
104
models/project.py
Normal file
@@ -0,0 +1,104 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from datetime import datetime
|
||||||
|
from typing import TYPE_CHECKING, List
|
||||||
|
|
||||||
|
from .enums import MiningOperationType, sql_enum
|
||||||
|
from .profitability_snapshot import ProjectProfitability
|
||||||
|
from .capex_snapshot import ProjectCapexSnapshot
|
||||||
|
from .opex_snapshot import ProjectOpexSnapshot
|
||||||
|
|
||||||
|
from sqlalchemy import DateTime, ForeignKey, Integer, String, Text
|
||||||
|
from sqlalchemy.orm import Mapped, mapped_column, relationship
|
||||||
|
from sqlalchemy.sql import func
|
||||||
|
|
||||||
|
from config.database import Base
|
||||||
|
|
||||||
|
if TYPE_CHECKING: # pragma: no cover
|
||||||
|
from .scenario import Scenario
|
||||||
|
from .pricing_settings import PricingSettings
|
||||||
|
|
||||||
|
|
||||||
|
class Project(Base):
|
||||||
|
"""Top-level mining project grouping multiple scenarios."""
|
||||||
|
|
||||||
|
__tablename__ = "projects"
|
||||||
|
|
||||||
|
id: Mapped[int] = mapped_column(Integer, primary_key=True, index=True)
|
||||||
|
name: Mapped[str] = mapped_column(String(255), nullable=False, unique=True)
|
||||||
|
location: Mapped[str | None] = mapped_column(String(255), nullable=True)
|
||||||
|
operation_type: Mapped[MiningOperationType] = mapped_column(
|
||||||
|
sql_enum(MiningOperationType, name="miningoperationtype"),
|
||||||
|
nullable=False,
|
||||||
|
default=MiningOperationType.OTHER,
|
||||||
|
)
|
||||||
|
description: Mapped[str | None] = mapped_column(Text, nullable=True)
|
||||||
|
pricing_settings_id: Mapped[int | None] = mapped_column(
|
||||||
|
ForeignKey("pricing_settings.id", ondelete="SET NULL"),
|
||||||
|
nullable=True,
|
||||||
|
)
|
||||||
|
created_at: Mapped[datetime] = mapped_column(
|
||||||
|
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||||
|
)
|
||||||
|
updated_at: Mapped[datetime] = mapped_column(
|
||||||
|
DateTime(timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()
|
||||||
|
)
|
||||||
|
|
||||||
|
scenarios: Mapped[List["Scenario"]] = relationship(
|
||||||
|
"Scenario",
|
||||||
|
back_populates="project",
|
||||||
|
cascade="all, delete-orphan",
|
||||||
|
passive_deletes=True,
|
||||||
|
)
|
||||||
|
pricing_settings: Mapped["PricingSettings | None"] = relationship(
|
||||||
|
"PricingSettings",
|
||||||
|
back_populates="projects",
|
||||||
|
)
|
||||||
|
profitability_snapshots: Mapped[List["ProjectProfitability"]] = relationship(
|
||||||
|
"ProjectProfitability",
|
||||||
|
back_populates="project",
|
||||||
|
cascade="all, delete-orphan",
|
||||||
|
order_by=lambda: ProjectProfitability.calculated_at.desc(),
|
||||||
|
passive_deletes=True,
|
||||||
|
)
|
||||||
|
capex_snapshots: Mapped[List["ProjectCapexSnapshot"]] = relationship(
|
||||||
|
"ProjectCapexSnapshot",
|
||||||
|
back_populates="project",
|
||||||
|
cascade="all, delete-orphan",
|
||||||
|
order_by=lambda: ProjectCapexSnapshot.calculated_at.desc(),
|
||||||
|
passive_deletes=True,
|
||||||
|
)
|
||||||
|
opex_snapshots: Mapped[List["ProjectOpexSnapshot"]] = relationship(
|
||||||
|
"ProjectOpexSnapshot",
|
||||||
|
back_populates="project",
|
||||||
|
cascade="all, delete-orphan",
|
||||||
|
order_by=lambda: ProjectOpexSnapshot.calculated_at.desc(),
|
||||||
|
passive_deletes=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def latest_profitability(self) -> "ProjectProfitability | None":
|
||||||
|
"""Return the most recent profitability snapshot, if any."""
|
||||||
|
|
||||||
|
if not self.profitability_snapshots:
|
||||||
|
return None
|
||||||
|
return self.profitability_snapshots[0]
|
||||||
|
|
||||||
|
@property
|
||||||
|
def latest_capex(self) -> "ProjectCapexSnapshot | None":
|
||||||
|
"""Return the most recent capex snapshot, if any."""
|
||||||
|
|
||||||
|
if not self.capex_snapshots:
|
||||||
|
return None
|
||||||
|
return self.capex_snapshots[0]
|
||||||
|
|
||||||
|
@property
|
||||||
|
def latest_opex(self) -> "ProjectOpexSnapshot | None":
|
||||||
|
"""Return the most recent opex snapshot, if any."""
|
||||||
|
|
||||||
|
if not self.opex_snapshots:
|
||||||
|
return None
|
||||||
|
return self.opex_snapshots[0]
|
||||||
|
|
||||||
|
def __repr__(self) -> str: # pragma: no cover - helpful for debugging
|
||||||
|
return f"Project(id={self.id!r}, name={self.name!r})"
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user