Compare commits
86 Commits
b1a0153a8d
...
develop
| Author | SHA1 | Date | |
|---|---|---|---|
| 4e60168837 | |||
| dae3b59af9 | |||
| 839399363e | |||
| fa8a065138 | |||
| cd0c0ab416 | |||
| 854b1ac713 | |||
| 25fd13ce69 | |||
| 0fec805db1 | |||
| 3746062819 | |||
| 958c165721 | |||
| 6e835c83eb | |||
| 75924fca84 | |||
| ac9ffddbde | |||
| 4e5a4c645d | |||
| e9678b6736 | |||
| e5e346b26a | |||
| b0e623d68e | |||
| 30dbc13fae | |||
| 31b9a1058a | |||
| bcd993d57c | |||
| 1262a4a63f | |||
| fb6816de00 | |||
| 4d0e1a9989 | |||
| ed8e05147c | |||
| 522b1e4105 | |||
| 4f00bf0d3c | |||
| 3551b0356d | |||
| 521a8abc2d | |||
| 1feae7ff85 | |||
| 1240b08740 | |||
| d9fd82b2e3 | |||
| 6c1570a254 | |||
| b1a6df9f90 | |||
| 6d496a599e | |||
| 1199813da0 | |||
| acf6f50bbd | |||
| ad306bd0aa | |||
| ed4187970c | |||
| 0fbe9f543e | |||
| 80825c2c5d | |||
| 44a3bfc1bf | |||
| 1f892ebdbb | |||
| bcdc9e861e | |||
| 23523f70f1 | |||
| 8ef6724960 | |||
| 6e466a3fd2 | |||
| 9d4c807475 | |||
| 9cd555e134 | |||
| e72e297c61 | |||
| 101d9309fd | |||
| 9556f9e1f1 | |||
| 4488cacdc9 | |||
| e06a6ae068 | |||
| 3bdae3c54c | |||
| d89b09fa80 | |||
| 2214bbe64f | |||
| 5d6592d657 | |||
| 3988171b46 | |||
| 1520724cab | |||
| 014d96c105 | |||
| 55fa1f56c1 | |||
| edf86a5447 | |||
| 53eacc352e | |||
| 2bfa498624 | |||
| 4cfc5d9ffa | |||
| ce7f4aa776 | |||
| e0497f58f0 | |||
| 60410fd71d | |||
| f55c77312d | |||
| 63ec4a6953 | |||
| b0ff79ae9c | |||
| 0670d05722 | |||
| 0694d4ec4b | |||
| ce9c174b53 | |||
| f68321cd04 | |||
| 44ff4d0e62 | |||
| 4364927965 | |||
| 795a9f99f4 | |||
| 032e6d2681 | |||
| 51c0fcec95 | |||
| 3051f91ab0 | |||
| e2465188c2 | |||
| 43b1e53837 | |||
| 4b33a5dba3 | |||
| 5f183faa63 | |||
| 1a7581cda0 |
25
.env.development
Normal file
25
.env.development
Normal file
@@ -0,0 +1,25 @@
|
||||
# Development Environment Configuration
|
||||
ENVIRONMENT=development
|
||||
DEBUG=true
|
||||
LOG_LEVEL=DEBUG
|
||||
|
||||
# Database Configuration
|
||||
DATABASE_HOST=postgres
|
||||
DATABASE_PORT=5432
|
||||
DATABASE_USER=calminer
|
||||
DATABASE_PASSWORD=calminer_password
|
||||
DATABASE_NAME=calminer_db
|
||||
DATABASE_DRIVER=postgresql
|
||||
|
||||
# Application Settings
|
||||
CALMINER_EXPORT_MAX_ROWS=1000
|
||||
CALMINER_IMPORT_MAX_ROWS=10000
|
||||
CALMINER_EXPORT_METADATA=true
|
||||
CALMINER_IMPORT_STAGING_TTL=300
|
||||
|
||||
# Admin Seeding (for development)
|
||||
CALMINER_SEED_ADMIN_EMAIL=admin@calminer.local
|
||||
CALMINER_SEED_ADMIN_USERNAME=admin
|
||||
CALMINER_SEED_ADMIN_PASSWORD=ChangeMe123!
|
||||
CALMINER_SEED_ADMIN_ROLES=admin
|
||||
CALMINER_SEED_FORCE=false
|
||||
25
.env.production
Normal file
25
.env.production
Normal file
@@ -0,0 +1,25 @@
|
||||
# Production Environment Configuration
|
||||
ENVIRONMENT=production
|
||||
DEBUG=false
|
||||
LOG_LEVEL=WARNING
|
||||
|
||||
# Database Configuration (MUST be set externally - no defaults)
|
||||
DATABASE_HOST=
|
||||
DATABASE_PORT=5432
|
||||
DATABASE_USER=
|
||||
DATABASE_PASSWORD=
|
||||
DATABASE_NAME=
|
||||
DATABASE_DRIVER=postgresql
|
||||
|
||||
# Application Settings
|
||||
CALMINER_EXPORT_MAX_ROWS=100000
|
||||
CALMINER_IMPORT_MAX_ROWS=100000
|
||||
CALMINER_EXPORT_METADATA=true
|
||||
CALMINER_IMPORT_STAGING_TTL=3600
|
||||
|
||||
# Admin Seeding (for production - set strong password)
|
||||
CALMINER_SEED_ADMIN_EMAIL=admin@calminer.com
|
||||
CALMINER_SEED_ADMIN_USERNAME=admin
|
||||
CALMINER_SEED_ADMIN_PASSWORD=CHANGE_THIS_VERY_STRONG_PASSWORD
|
||||
CALMINER_SEED_ADMIN_ROLES=admin
|
||||
CALMINER_SEED_FORCE=false
|
||||
25
.env.staging
Normal file
25
.env.staging
Normal file
@@ -0,0 +1,25 @@
|
||||
# Staging Environment Configuration
|
||||
ENVIRONMENT=staging
|
||||
DEBUG=false
|
||||
LOG_LEVEL=INFO
|
||||
|
||||
# Database Configuration (override with actual staging values)
|
||||
DATABASE_HOST=postgres
|
||||
DATABASE_PORT=5432
|
||||
DATABASE_USER=calminer_staging
|
||||
DATABASE_PASSWORD=CHANGE_THIS_STRONG_PASSWORD
|
||||
DATABASE_NAME=calminer_staging_db
|
||||
DATABASE_DRIVER=postgresql
|
||||
|
||||
# Application Settings
|
||||
CALMINER_EXPORT_MAX_ROWS=50000
|
||||
CALMINER_IMPORT_MAX_ROWS=50000
|
||||
CALMINER_EXPORT_METADATA=true
|
||||
CALMINER_IMPORT_STAGING_TTL=600
|
||||
|
||||
# Admin Seeding (for staging)
|
||||
CALMINER_SEED_ADMIN_EMAIL=admin@staging.calminer.com
|
||||
CALMINER_SEED_ADMIN_USERNAME=admin
|
||||
CALMINER_SEED_ADMIN_PASSWORD=CHANGE_THIS_STRONG_PASSWORD
|
||||
CALMINER_SEED_ADMIN_ROLES=admin
|
||||
CALMINER_SEED_FORCE=false
|
||||
3
.gitattributes
vendored
Normal file
3
.gitattributes
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
* text=auto
|
||||
|
||||
Dockerfile text eol=lf
|
||||
232
.gitea/workflows/ci-build.yml
Normal file
232
.gitea/workflows/ci-build.yml
Normal file
@@ -0,0 +1,232 @@
|
||||
name: CI - Build
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
outputs:
|
||||
allow_push: ${{ steps.meta.outputs.allow_push }}
|
||||
ref_name: ${{ steps.meta.outputs.ref_name }}
|
||||
event_name: ${{ steps.meta.outputs.event_name }}
|
||||
sha: ${{ steps.meta.outputs.sha }}
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
DEFAULT_BRANCH: main
|
||||
REGISTRY_URL: ${{ secrets.REGISTRY_URL }}
|
||||
REGISTRY_USERNAME: ${{ secrets.REGISTRY_USERNAME }}
|
||||
REGISTRY_PASSWORD: ${{ secrets.REGISTRY_PASSWORD }}
|
||||
REGISTRY_CONTAINER_NAME: calminer
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Collect workflow metadata
|
||||
id: meta
|
||||
shell: bash
|
||||
env:
|
||||
DEFAULT_BRANCH: ${{ env.DEFAULT_BRANCH }}
|
||||
run: |
|
||||
git_ref="${GITEA_REF:-${GITHUB_REF:-}}"
|
||||
ref_name="${GITEA_REF_NAME:-${GITHUB_REF_NAME:-}}"
|
||||
if [ -z "$ref_name" ] && [ -n "$git_ref" ]; then
|
||||
ref_name="${git_ref##*/}"
|
||||
fi
|
||||
event_name="${GITEA_EVENT_NAME:-${GITHUB_EVENT_NAME:-}}"
|
||||
sha="${GITEA_SHA:-${GITHUB_SHA:-}}"
|
||||
if [ -z "$sha" ]; then
|
||||
sha="$(git rev-parse HEAD)"
|
||||
fi
|
||||
|
||||
if [ "$ref_name" = "${DEFAULT_BRANCH:-main}" ] && [ "$event_name" != "pull_request" ]; then
|
||||
echo "allow_push=true" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "allow_push=false" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
echo "ref_name=$ref_name" >> "$GITHUB_OUTPUT"
|
||||
echo "event_name=$event_name" >> "$GITHUB_OUTPUT"
|
||||
echo "sha=$sha" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Validate registry configuration
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
if [ -z "${REGISTRY_URL}" ]; then
|
||||
echo "::error::REGISTRY_URL secret not configured. Configure it with your Gitea container registry host." >&2
|
||||
exit 1
|
||||
fi
|
||||
server_url="${GITEA_SERVER_URL:-${GITHUB_SERVER_URL:-}}"
|
||||
server_host="${server_url#http://}"
|
||||
server_host="${server_host#https://}"
|
||||
server_host="${server_host%%/*}"
|
||||
server_host="${server_host%%:*}"
|
||||
registry_host="${REGISTRY_URL#http://}"
|
||||
registry_host="${registry_host#https://}"
|
||||
registry_host="${registry_host%%/*}"
|
||||
registry_host="${registry_host%%:*}"
|
||||
if [ -n "${server_host}" ] && ! printf '%s' "${registry_host}" | grep -qi "${server_host}"; then
|
||||
echo "::warning::REGISTRY_URL (${REGISTRY_URL}) does not match current Gitea host (${server_host}). Ensure this registry endpoint is managed by Gitea." >&2
|
||||
fi
|
||||
registry_repository="${registry_host}/allucanget/${REGISTRY_CONTAINER_NAME}"
|
||||
echo "REGISTRY_HOST=${registry_host}" >> "$GITHUB_ENV"
|
||||
echo "REGISTRY_REPOSITORY=${registry_repository}" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Set up QEMU and Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to gitea registry
|
||||
if: ${{ steps.meta.outputs.allow_push == 'true' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.REGISTRY_HOST }}
|
||||
username: ${{ env.REGISTRY_USERNAME }}
|
||||
password: ${{ env.REGISTRY_PASSWORD }}
|
||||
|
||||
- name: Build image
|
||||
id: build-image
|
||||
env:
|
||||
REGISTRY_REPOSITORY: ${{ env.REGISTRY_REPOSITORY }}
|
||||
REGISTRY_CONTAINER_NAME: ${{ env.REGISTRY_CONTAINER_NAME }}
|
||||
SHA_TAG: ${{ steps.meta.outputs.sha }}
|
||||
PUSH_IMAGE: ${{ steps.meta.outputs.allow_push == 'true' && env.REGISTRY_HOST != '' && env.REGISTRY_USERNAME != '' && env.REGISTRY_PASSWORD != '' }}
|
||||
run: |
|
||||
set -eo pipefail
|
||||
LOG_FILE=build.log
|
||||
if [ "${PUSH_IMAGE}" = "true" ]; then
|
||||
docker buildx build \
|
||||
--load \
|
||||
--tag "${REGISTRY_REPOSITORY}:latest" \
|
||||
--tag "${REGISTRY_REPOSITORY}:${SHA_TAG}" \
|
||||
--file Dockerfile \
|
||||
. 2>&1 | tee "${LOG_FILE}"
|
||||
else
|
||||
docker buildx build \
|
||||
--load \
|
||||
--tag "${REGISTRY_CONTAINER_NAME}:ci" \
|
||||
--file Dockerfile \
|
||||
. 2>&1 | tee "${LOG_FILE}"
|
||||
fi
|
||||
|
||||
- name: Push image
|
||||
if: ${{ steps.meta.outputs.allow_push == 'true' }}
|
||||
env:
|
||||
REGISTRY_REPOSITORY: ${{ env.REGISTRY_REPOSITORY }}
|
||||
SHA_TAG: ${{ steps.meta.outputs.sha }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
if [ -z "${REGISTRY_REPOSITORY}" ]; then
|
||||
echo "::error::REGISTRY_REPOSITORY not defined; cannot push image" >&2
|
||||
exit 1
|
||||
fi
|
||||
docker push "${REGISTRY_REPOSITORY}:${SHA_TAG}"
|
||||
docker push "${REGISTRY_REPOSITORY}:latest"
|
||||
|
||||
- name: Upload docker build logs
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: docker-build-logs
|
||||
path: build.log
|
||||
|
||||
deploy:
|
||||
needs: build
|
||||
if: needs.build.outputs.allow_push == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
REGISTRY_URL: ${{ secrets.REGISTRY_URL }}
|
||||
REGISTRY_CONTAINER_NAME: calminer
|
||||
KUBE_CONFIG: ${{ secrets.KUBE_CONFIG }}
|
||||
STAGING_KUBE_CONFIG: ${{ secrets.STAGING_KUBE_CONFIG }}
|
||||
PROD_KUBE_CONFIG: ${{ secrets.PROD_KUBE_CONFIG }}
|
||||
K8S_DEPLOY_ENABLED: ${{ secrets.K8S_DEPLOY_ENABLED }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Resolve registry repository
|
||||
run: |
|
||||
set -euo pipefail
|
||||
if [ -z "${REGISTRY_URL}" ]; then
|
||||
echo "::error::REGISTRY_URL secret not configured. Configure it with your Gitea container registry host." >&2
|
||||
exit 1
|
||||
fi
|
||||
registry_host="${REGISTRY_URL#http://}"
|
||||
registry_host="${registry_host#https://}"
|
||||
registry_host="${registry_host%%/*}"
|
||||
registry_host="${registry_host%%:*}"
|
||||
registry_repository="${registry_host}/allucanget/${REGISTRY_CONTAINER_NAME}"
|
||||
echo "REGISTRY_HOST=${registry_host}" >> "$GITHUB_ENV"
|
||||
echo "REGISTRY_REPOSITORY=${registry_repository}" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Report Kubernetes deployment toggle
|
||||
run: |
|
||||
set -euo pipefail
|
||||
enabled="${K8S_DEPLOY_ENABLED:-}"
|
||||
if [ "${enabled}" = "true" ]; then
|
||||
echo "Kubernetes deployment is enabled for this run."
|
||||
else
|
||||
echo "::notice::Kubernetes deployment steps are disabled (set secrets.K8S_DEPLOY_ENABLED to 'true' to enable)."
|
||||
fi
|
||||
|
||||
- name: Capture commit metadata
|
||||
id: commit_meta
|
||||
run: |
|
||||
set -euo pipefail
|
||||
message="$(git log -1 --pretty=%B | tr '\n' ' ')"
|
||||
echo "message=$message" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Set up kubectl for staging
|
||||
if: env.K8S_DEPLOY_ENABLED == 'true' && contains(steps.commit_meta.outputs.message, '[deploy staging]')
|
||||
uses: azure/k8s-set-context@v3
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ env.STAGING_KUBE_CONFIG }}
|
||||
|
||||
- name: Set up kubectl for production
|
||||
if: env.K8S_DEPLOY_ENABLED == 'true' && contains(steps.commit_meta.outputs.message, '[deploy production]')
|
||||
uses: azure/k8s-set-context@v3
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ env.PROD_KUBE_CONFIG }}
|
||||
|
||||
- name: Deploy to staging
|
||||
if: env.K8S_DEPLOY_ENABLED == 'true' && contains(steps.commit_meta.outputs.message, '[deploy staging]')
|
||||
run: |
|
||||
kubectl set image deployment/calminer-app calminer=${REGISTRY_REPOSITORY}:latest
|
||||
kubectl apply -f k8s/configmap.yaml
|
||||
kubectl apply -f k8s/secret.yaml
|
||||
kubectl rollout status deployment/calminer-app
|
||||
|
||||
- name: Collect staging deployment logs
|
||||
if: env.K8S_DEPLOY_ENABLED == 'true' && contains(steps.commit_meta.outputs.message, '[deploy staging]')
|
||||
run: |
|
||||
mkdir -p logs/deployment/staging
|
||||
kubectl get pods -o wide > logs/deployment/staging/pods.txt
|
||||
kubectl get deployment calminer-app -o yaml > logs/deployment/staging/deployment.yaml
|
||||
kubectl logs deployment/calminer-app --all-containers=true --tail=500 > logs/deployment/staging/calminer-app.log
|
||||
|
||||
- name: Deploy to production
|
||||
if: env.K8S_DEPLOY_ENABLED == 'true' && contains(steps.commit_meta.outputs.message, '[deploy production]')
|
||||
run: |
|
||||
kubectl set image deployment/calminer-app calminer=${REGISTRY_REPOSITORY}:latest
|
||||
kubectl apply -f k8s/configmap.yaml
|
||||
kubectl apply -f k8s/secret.yaml
|
||||
kubectl rollout status deployment/calminer-app
|
||||
|
||||
- name: Collect production deployment logs
|
||||
if: env.K8S_DEPLOY_ENABLED == 'true' && contains(steps.commit_meta.outputs.message, '[deploy production]')
|
||||
run: |
|
||||
mkdir -p logs/deployment/production
|
||||
kubectl get pods -o wide > logs/deployment/production/pods.txt
|
||||
kubectl get deployment calminer-app -o yaml > logs/deployment/production/deployment.yaml
|
||||
kubectl logs deployment/calminer-app --all-containers=true --tail=500 > logs/deployment/production/calminer-app.log
|
||||
|
||||
- name: Upload deployment logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: deployment-logs
|
||||
path: logs/deployment
|
||||
if-no-files-found: ignore
|
||||
44
.gitea/workflows/ci-lint.yml
Normal file
44
.gitea/workflows/ci-lint.yml
Normal file
@@ -0,0 +1,44 @@
|
||||
name: CI - Lint
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
APT_CACHER_NG: http://192.168.88.14:3142
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.12"
|
||||
|
||||
- name: Configure apt proxy
|
||||
run: |
|
||||
if [ -n "${APT_CACHER_NG}" ]; then
|
||||
echo "Acquire::http::Proxy \"${APT_CACHER_NG}\";" | tee /etc/apt/apt.conf.d/01apt-cacher-ng
|
||||
fi
|
||||
|
||||
- name: Install system packages
|
||||
run: |
|
||||
apt-get update
|
||||
apt-get install -y build-essential libpq-dev
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
pip install -r requirements-test.txt
|
||||
|
||||
- name: Run Ruff
|
||||
run: ruff check .
|
||||
|
||||
- name: Run Black
|
||||
run: black --check .
|
||||
|
||||
- name: Run Bandit
|
||||
run: bandit -c pyproject.toml -r tests
|
||||
73
.gitea/workflows/ci-test.yml
Normal file
73
.gitea/workflows/ci-test.yml
Normal file
@@ -0,0 +1,73 @@
|
||||
name: CI - Test
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
APT_CACHER_NG: http://192.168.88.14:3142
|
||||
DB_DRIVER: postgresql+psycopg2
|
||||
DB_HOST: 192.168.88.35
|
||||
DB_NAME: calminer_test
|
||||
DB_USER: calminer
|
||||
DB_PASSWORD: calminer_password
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:17
|
||||
env:
|
||||
POSTGRES_USER: ${{ env.DB_USER }}
|
||||
POSTGRES_PASSWORD: ${{ env.DB_PASSWORD }}
|
||||
POSTGRES_DB: ${{ env.DB_NAME }}
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.12"
|
||||
|
||||
- name: Configure apt proxy
|
||||
run: |
|
||||
if [ -n "${APT_CACHER_NG}" ]; then
|
||||
echo "Acquire::http::Proxy \"${APT_CACHER_NG}\";" | tee /etc/apt/apt.conf.d/01apt-cacher-ng
|
||||
fi
|
||||
|
||||
- name: Install system packages
|
||||
run: |
|
||||
apt-get update
|
||||
apt-get install -y build-essential libpq-dev
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
pip install -r requirements-test.txt
|
||||
|
||||
- name: Run tests
|
||||
env:
|
||||
DATABASE_DRIVER: ${{ env.DB_DRIVER }}
|
||||
DATABASE_HOST: postgres
|
||||
DATABASE_PORT: 5432
|
||||
DATABASE_USER: ${{ env.DB_USER }}
|
||||
DATABASE_PASSWORD: ${{ env.DB_PASSWORD }}
|
||||
DATABASE_NAME: ${{ env.DB_NAME }}
|
||||
run: |
|
||||
pytest --cov=. --cov-report=term-missing --cov-report=xml --cov-fail-under=80 --junitxml=pytest-report.xml
|
||||
|
||||
- name: Upload test artifacts
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: test-artifacts
|
||||
path: |
|
||||
coverage.xml
|
||||
pytest-report.xml
|
||||
30
.gitea/workflows/ci.yml
Normal file
30
.gitea/workflows/ci.yml
Normal file
@@ -0,0 +1,30 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- develop
|
||||
- v2
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- develop
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
uses: ./.gitea/workflows/ci-lint.yml
|
||||
secrets: inherit
|
||||
|
||||
test:
|
||||
needs: lint
|
||||
uses: ./.gitea/workflows/ci-test.yml
|
||||
secrets: inherit
|
||||
|
||||
build:
|
||||
needs:
|
||||
- lint
|
||||
- test
|
||||
uses: ./.gitea/workflows/ci-build.yml
|
||||
secrets: inherit
|
||||
@@ -1,141 +0,0 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, develop]
|
||||
pull_request:
|
||||
branches: [main, develop]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
env:
|
||||
APT_CACHER_NG: http://192.168.88.14:3142
|
||||
DB_DRIVER: postgresql+psycopg2
|
||||
DB_HOST: 192.168.88.35
|
||||
DB_NAME: calminer_test
|
||||
DB_USER: calminer
|
||||
DB_PASSWORD: calminer_password
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:17
|
||||
env:
|
||||
POSTGRES_USER: ${{ env.DB_USER }}
|
||||
POSTGRES_PASSWORD: ${{ env.DB_PASSWORD }}
|
||||
POSTGRES_DB: ${{ env.DB_NAME }}
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Get pip cache dir
|
||||
id: pip-cache
|
||||
run: |
|
||||
echo "path=$(pip cache dir)" >> $GITEA_OUTPUT
|
||||
echo "Pip cache dir: $(pip cache dir)"
|
||||
|
||||
- name: Cache pip dependencies
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ steps.pip-cache.outputs.path }}
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt', 'requirements-test.txt') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pip-
|
||||
|
||||
- name: Update apt-cacher-ng config
|
||||
run: |-
|
||||
echo 'Acquire::http::Proxy "{{ env.APT_CACHER_NG }}";' | tee /etc/apt/apt.conf.d/01apt-cacher-ng
|
||||
apt-get update
|
||||
|
||||
- name: Update system packages
|
||||
run: apt-get upgrade -y
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
pip install -r requirements-test.txt
|
||||
|
||||
- name: Install Playwright system dependencies
|
||||
run: playwright install-deps
|
||||
|
||||
- name: Install Playwright browsers
|
||||
run: playwright install
|
||||
|
||||
- name: Run tests
|
||||
env:
|
||||
DATABASE_DRIVER: ${{ env.DB_DRIVER }}
|
||||
DATABASE_HOST: postgres
|
||||
DATABASE_PORT: 5432
|
||||
DATABASE_USER: ${{ env.DB_USER }}
|
||||
DATABASE_PASSWORD: ${{ env.DB_PASSWORD }}
|
||||
DATABASE_NAME: ${{ env.DB_NAME }}
|
||||
run: |
|
||||
pytest tests/ --cov=.
|
||||
|
||||
- name: Build Docker image
|
||||
run: |
|
||||
docker build -t calminer .
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
needs: test
|
||||
env:
|
||||
DEFAULT_BRANCH: main
|
||||
REGISTRY_URL: ${{ secrets.REGISTRY_URL }}
|
||||
REGISTRY_USERNAME: ${{ secrets.REGISTRY_USERNAME }}
|
||||
REGISTRY_PASSWORD: ${{ secrets.REGISTRY_PASSWORD }}
|
||||
REGISTRY_CONTAINER_NAME: calminer
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Collect workflow metadata
|
||||
id: meta
|
||||
shell: bash
|
||||
run: |
|
||||
ref_name="${GITHUB_REF_NAME:-${GITHUB_REF##*/}}"
|
||||
event_name="${GITHUB_EVENT_NAME:-}"
|
||||
sha="${GITHUB_SHA:-}"
|
||||
|
||||
if [ "$ref_name" = "${DEFAULT_BRANCH:-main}" ]; then
|
||||
echo "on_default=true" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "on_default=false" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
echo "ref_name=$ref_name" >> "$GITHUB_OUTPUT"
|
||||
echo "event_name=$event_name" >> "$GITHUB_OUTPUT"
|
||||
echo "sha=$sha" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Set up QEMU and Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to gitea registry
|
||||
if: ${{ steps.meta.outputs.on_default == 'true' }}
|
||||
uses: docker/login-action@v3
|
||||
continue-on-error: true
|
||||
with:
|
||||
registry: ${{ env.REGISTRY_URL }}
|
||||
username: ${{ env.REGISTRY_USERNAME }}
|
||||
password: ${{ env.REGISTRY_PASSWORD }}
|
||||
|
||||
- name: Build and push image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile
|
||||
push: ${{ steps.meta.outputs.on_default == 'true' && steps.meta.outputs.event_name != 'pull_request' && (env.REGISTRY_URL != '' && env.REGISTRY_USERNAME != '' && env.REGISTRY_PASSWORD != '') }}
|
||||
tags: |
|
||||
${{ env.REGISTRY_URL }}/allucanget/${{ env.REGISTRY_CONTAINER_NAME }}:latest
|
||||
${{ env.REGISTRY_URL }}/allucanget/${{ env.REGISTRY_CONTAINER_NAME }}:${{ steps.meta.outputs.sha }}
|
||||
105
.gitea/workflows/deploy-coolify.yml
Normal file
105
.gitea/workflows/deploy-coolify.yml
Normal file
@@ -0,0 +1,105 @@
|
||||
name: Deploy - Coolify
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
COOLIFY_BASE_URL: ${{ secrets.COOLIFY_BASE_URL }}
|
||||
COOLIFY_API_TOKEN: ${{ secrets.COOLIFY_API_TOKEN }}
|
||||
COOLIFY_APPLICATION_ID: ${{ secrets.COOLIFY_APPLICATION_ID }}
|
||||
COOLIFY_DEPLOY_ENV: ${{ secrets.COOLIFY_DEPLOY_ENV }}
|
||||
DOCKER_COMPOSE_PATH: docker-compose.prod.yml
|
||||
ENV_FILE_PATH: deploy/.env
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Capture deployment context
|
||||
id: context
|
||||
run: |
|
||||
set -euo pipefail
|
||||
repo="${GITEA_REPOSITORY:-${GITHUB_REPOSITORY:-}}"
|
||||
if [ -z "$repo" ]; then
|
||||
repo="$(git remote get-url origin | sed 's#.*/\(.*\)\.git#\1#')"
|
||||
fi
|
||||
ref_name="${GITEA_REF_NAME:-${GITHUB_REF_NAME:-}}"
|
||||
full_ref="${GITEA_REF:-${GITHUB_REF:-}}"
|
||||
if [ -z "$ref_name" ] && [ -n "$full_ref" ]; then
|
||||
ref_name="${full_ref##*/}"
|
||||
fi
|
||||
if [ -z "$ref_name" ]; then
|
||||
ref_name="$(git rev-parse --abbrev-ref HEAD)"
|
||||
fi
|
||||
sha="${GITEA_SHA:-${GITHUB_SHA:-}}"
|
||||
if [ -z "$sha" ]; then
|
||||
sha="$(git rev-parse HEAD)"
|
||||
fi
|
||||
|
||||
echo "repository=$repo" >> "$GITHUB_OUTPUT"
|
||||
echo "ref=${ref_name:-main}" >> "$GITHUB_OUTPUT"
|
||||
echo "sha=$sha" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Prepare compose bundle
|
||||
run: |
|
||||
set -euo pipefail
|
||||
mkdir -p deploy
|
||||
cp "$DOCKER_COMPOSE_PATH" deploy/docker-compose.yml
|
||||
if [ -n "$COOLIFY_DEPLOY_ENV" ]; then
|
||||
printf '%s\n' "$COOLIFY_DEPLOY_ENV" > "$ENV_FILE_PATH"
|
||||
elif [ ! -f "$ENV_FILE_PATH" ]; then
|
||||
echo "::error::COOLIFY_DEPLOY_ENV secret not configured and deploy/.env missing" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Validate Coolify secrets
|
||||
run: |
|
||||
set -euo pipefail
|
||||
missing=0
|
||||
for var in COOLIFY_BASE_URL COOLIFY_API_TOKEN COOLIFY_APPLICATION_ID; do
|
||||
if [ -z "${!var}" ]; then
|
||||
echo "::error::Missing required secret: $var"
|
||||
missing=1
|
||||
fi
|
||||
done
|
||||
if [ "$missing" -eq 1 ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Trigger deployment via Coolify API
|
||||
env:
|
||||
HEAD_SHA: ${{ steps.context.outputs.sha }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
api_url="$COOLIFY_BASE_URL/api/v1/applications/${COOLIFY_APPLICATION_ID}/deploy"
|
||||
payload=$(jq -n --arg sha "$HEAD_SHA" '{ commitSha: $sha }')
|
||||
response=$(curl -sS -w '\n%{http_code}' \
|
||||
-X POST "$api_url" \
|
||||
-H "Authorization: Bearer $COOLIFY_API_TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "$payload")
|
||||
body=$(echo "$response" | head -n -1)
|
||||
status=$(echo "$response" | tail -n1)
|
||||
echo "Deploy response status: $status"
|
||||
echo "$body"
|
||||
printf '%s' "$body" > deploy/coolify-response.json
|
||||
if [ "$status" -ge 400 ]; then
|
||||
echo "::error::Deployment request failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Upload deployment bundle
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: coolify-deploy-bundle
|
||||
path: |
|
||||
deploy/docker-compose.yml
|
||||
deploy/.env
|
||||
deploy/coolify-response.json
|
||||
if-no-files-found: warn
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -17,6 +17,7 @@ env/
|
||||
# environment variables
|
||||
.env
|
||||
*.env
|
||||
.env.*
|
||||
# except example files
|
||||
!config/*.env.example
|
||||
|
||||
@@ -46,8 +47,10 @@ htmlcov/
|
||||
logs/
|
||||
|
||||
# SQLite database
|
||||
data/
|
||||
*.sqlite3
|
||||
test*.db
|
||||
local*.db
|
||||
|
||||
# Act runner files
|
||||
.runner
|
||||
|
||||
13
.pre-commit-config.yaml
Normal file
13
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
repos:
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.6.1
|
||||
hooks:
|
||||
- id: ruff
|
||||
- repo: https://github.com/psf/black-pre-commit-mirror
|
||||
rev: 24.8.0
|
||||
hooks:
|
||||
- id: black
|
||||
- repo: https://github.com/PyCQA/bandit
|
||||
rev: 1.7.9
|
||||
hooks:
|
||||
- id: bandit
|
||||
42
Dockerfile
42
Dockerfile
@@ -41,8 +41,25 @@ if url:
|
||||
finally:
|
||||
sock.close()
|
||||
PY
|
||||
APT_PROXY_CONFIG=/etc/apt/apt.conf.d/01proxy
|
||||
|
||||
apt_update_with_fallback() {
|
||||
if ! apt-get update; then
|
||||
rm -f "$APT_PROXY_CONFIG"
|
||||
apt-get update
|
||||
apt-get install -y --no-install-recommends build-essential gcc libpq-dev
|
||||
fi
|
||||
}
|
||||
|
||||
apt_install_with_fallback() {
|
||||
if ! apt-get install -y --no-install-recommends "$@"; then
|
||||
rm -f "$APT_PROXY_CONFIG"
|
||||
apt-get update
|
||||
apt-get install -y --no-install-recommends "$@"
|
||||
fi
|
||||
}
|
||||
|
||||
apt_update_with_fallback
|
||||
apt_install_with_fallback build-essential gcc libpq-dev
|
||||
pip install --upgrade pip
|
||||
pip wheel --no-deps --wheel-dir /wheels -r requirements.txt
|
||||
apt-get purge -y --auto-remove build-essential gcc
|
||||
@@ -88,8 +105,25 @@ if url:
|
||||
finally:
|
||||
sock.close()
|
||||
PY
|
||||
APT_PROXY_CONFIG=/etc/apt/apt.conf.d/01proxy
|
||||
|
||||
apt_update_with_fallback() {
|
||||
if ! apt-get update; then
|
||||
rm -f "$APT_PROXY_CONFIG"
|
||||
apt-get update
|
||||
apt-get install -y --no-install-recommends libpq5
|
||||
fi
|
||||
}
|
||||
|
||||
apt_install_with_fallback() {
|
||||
if ! apt-get install -y --no-install-recommends "$@"; then
|
||||
rm -f "$APT_PROXY_CONFIG"
|
||||
apt-get update
|
||||
apt-get install -y --no-install-recommends "$@"
|
||||
fi
|
||||
}
|
||||
|
||||
apt_update_with_fallback
|
||||
apt_install_with_fallback libpq5
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
EOF
|
||||
|
||||
@@ -108,4 +142,6 @@ USER appuser
|
||||
|
||||
EXPOSE 8003
|
||||
|
||||
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8003", "--workers", "4"]
|
||||
ENTRYPOINT ["uvicorn"]
|
||||
|
||||
CMD ["main:app", "--host", "0.0.0.0", "--port", "8003", "--workers", "4"]
|
||||
|
||||
@@ -8,4 +8,6 @@ The system is designed to help mining companies make informed decisions by simul
|
||||
|
||||
## Documentation & quickstart
|
||||
|
||||
This repository contains only code. See detailed developer and architecture documentation in the [Docs](https://git.allucanget.biz/allucanget/calminer-docs) repository.
|
||||
- Detailed developer, architecture, and operations guides live in the companion [calminer-docs](../calminer-docs/) repository. Please see the [README](../calminer-docs/README.md) there for instructions.
|
||||
- For a local run, create a `.env` (see `.env.example`), install requirements, then execute `python -m scripts.init_db` followed by `uvicorn main:app --reload`. The initializer is safe to rerun and seeds demo data automatically.
|
||||
- To wipe and recreate the schema in development, run `CALMINER_ENV=development python -m scripts.reset_db` before invoking the initializer again.
|
||||
|
||||
35
alembic.ini
35
alembic.ini
@@ -1,35 +0,0 @@
|
||||
[alembic]
|
||||
script_location = alembic
|
||||
sqlalchemy.url = %(DATABASE_URL)s
|
||||
|
||||
[loggers]
|
||||
keys = root,sqlalchemy,alembic
|
||||
|
||||
[handlers]
|
||||
keys = console
|
||||
|
||||
[formatters]
|
||||
keys = generic
|
||||
|
||||
[logger_root]
|
||||
level = WARN
|
||||
handlers = console
|
||||
|
||||
[logger_sqlalchemy]
|
||||
level = WARN
|
||||
handlers =
|
||||
qualname = sqlalchemy.engine
|
||||
|
||||
[logger_alembic]
|
||||
level = INFO
|
||||
handlers =
|
||||
qualname = alembic
|
||||
|
||||
[handler_console]
|
||||
class = StreamHandler
|
||||
args = (sys.stderr,)
|
||||
level = NOTSET
|
||||
formatter = generic
|
||||
|
||||
[formatter_generic]
|
||||
format = %(levelname)-5.5s [%(name)s] %(message)s
|
||||
@@ -1,63 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from logging.config import fileConfig
|
||||
from typing import Iterable
|
||||
|
||||
from alembic import context
|
||||
from sqlalchemy import engine_from_config, pool
|
||||
|
||||
from config.database import Base, DATABASE_URL
|
||||
from models import * # noqa: F401,F403 - ensure models are imported for metadata registration
|
||||
|
||||
# this is the Alembic Config object, which provides access to the values within the .ini file.
|
||||
config = context.config
|
||||
|
||||
if config.config_file_name is not None:
|
||||
fileConfig(config.config_file_name)
|
||||
|
||||
# Interpret the config file for Python logging.
|
||||
# This line sets up loggers basically.
|
||||
config.set_main_option("sqlalchemy.url", DATABASE_URL)
|
||||
|
||||
target_metadata = Base.metadata
|
||||
|
||||
|
||||
def run_migrations_offline() -> None:
|
||||
"""Run migrations in 'offline' mode."""
|
||||
|
||||
url = config.get_main_option("sqlalchemy.url")
|
||||
context.configure(
|
||||
url=url,
|
||||
target_metadata=target_metadata,
|
||||
literal_binds=True,
|
||||
dialect_opts={"paramstyle": "named"},
|
||||
)
|
||||
|
||||
with context.begin_transaction():
|
||||
context.run_migrations()
|
||||
|
||||
|
||||
def run_migrations_online() -> None:
|
||||
"""Run migrations in 'online' mode."""
|
||||
|
||||
connectable = engine_from_config(
|
||||
config.get_section(config.config_ini_section, {}),
|
||||
prefix="sqlalchemy.",
|
||||
poolclass=pool.NullPool,
|
||||
)
|
||||
|
||||
with connectable.connect() as connection:
|
||||
context.configure(connection=connection, target_metadata=target_metadata)
|
||||
|
||||
with context.begin_transaction():
|
||||
context.run_migrations()
|
||||
|
||||
|
||||
def run_migrations() -> None:
|
||||
if context.is_offline_mode():
|
||||
run_migrations_offline()
|
||||
else:
|
||||
run_migrations_online()
|
||||
|
||||
|
||||
run_migrations()
|
||||
@@ -1,17 +0,0 @@
|
||||
"""${message}"""
|
||||
|
||||
revision = ${repr(revision)}
|
||||
down_revision = ${repr(down_revision)}
|
||||
branch_labels = ${repr(branch_labels)}
|
||||
depends_on = ${repr(depends_on)}
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
${upgrades if upgrades else "pass"}
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
${downgrades if downgrades else "pass"}
|
||||
@@ -1,220 +0,0 @@
|
||||
"""Initial domain schema"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "20251109_01"
|
||||
down_revision = None
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
mining_operation_type = sa.Enum(
|
||||
"open_pit",
|
||||
"underground",
|
||||
"in_situ_leach",
|
||||
"placer",
|
||||
"quarry",
|
||||
"mountaintop_removal",
|
||||
"other",
|
||||
name="miningoperationtype",
|
||||
)
|
||||
|
||||
scenario_status = sa.Enum(
|
||||
"draft",
|
||||
"active",
|
||||
"archived",
|
||||
name="scenariostatus",
|
||||
)
|
||||
|
||||
financial_category = sa.Enum(
|
||||
"capex",
|
||||
"opex",
|
||||
"revenue",
|
||||
"contingency",
|
||||
"other",
|
||||
name="financialcategory",
|
||||
)
|
||||
|
||||
cost_bucket = sa.Enum(
|
||||
"capital_initial",
|
||||
"capital_sustaining",
|
||||
"operating_fixed",
|
||||
"operating_variable",
|
||||
"maintenance",
|
||||
"reclamation",
|
||||
"royalties",
|
||||
"general_admin",
|
||||
name="costbucket",
|
||||
)
|
||||
|
||||
distribution_type = sa.Enum(
|
||||
"normal",
|
||||
"triangular",
|
||||
"uniform",
|
||||
"lognormal",
|
||||
"custom",
|
||||
name="distributiontype",
|
||||
)
|
||||
|
||||
stochastic_variable = sa.Enum(
|
||||
"ore_grade",
|
||||
"recovery_rate",
|
||||
"metal_price",
|
||||
"operating_cost",
|
||||
"capital_cost",
|
||||
"discount_rate",
|
||||
"throughput",
|
||||
name="stochasticvariable",
|
||||
)
|
||||
|
||||
resource_type = sa.Enum(
|
||||
"diesel",
|
||||
"electricity",
|
||||
"water",
|
||||
"explosives",
|
||||
"reagents",
|
||||
"labor",
|
||||
"equipment_hours",
|
||||
"tailings_capacity",
|
||||
name="resourcetype",
|
||||
)
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
bind = op.get_bind()
|
||||
mining_operation_type.create(bind, checkfirst=True)
|
||||
scenario_status.create(bind, checkfirst=True)
|
||||
financial_category.create(bind, checkfirst=True)
|
||||
cost_bucket.create(bind, checkfirst=True)
|
||||
distribution_type.create(bind, checkfirst=True)
|
||||
stochastic_variable.create(bind, checkfirst=True)
|
||||
resource_type.create(bind, checkfirst=True)
|
||||
|
||||
op.create_table(
|
||||
"projects",
|
||||
sa.Column("id", sa.Integer(), nullable=False),
|
||||
sa.Column("name", sa.String(length=255), nullable=False),
|
||||
sa.Column("location", sa.String(length=255), nullable=True),
|
||||
sa.Column("operation_type", mining_operation_type, nullable=False),
|
||||
sa.Column("description", sa.Text(), nullable=True),
|
||||
sa.Column("created_at", sa.DateTime(timezone=True),
|
||||
server_default=sa.func.now(), nullable=False),
|
||||
sa.Column("updated_at", sa.DateTime(timezone=True),
|
||||
server_default=sa.func.now(), nullable=False),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
sa.UniqueConstraint("name"),
|
||||
)
|
||||
op.create_index(op.f("ix_projects_id"), "projects", ["id"], unique=False)
|
||||
|
||||
op.create_table(
|
||||
"scenarios",
|
||||
sa.Column("id", sa.Integer(), nullable=False),
|
||||
sa.Column("project_id", sa.Integer(), nullable=False),
|
||||
sa.Column("name", sa.String(length=255), nullable=False),
|
||||
sa.Column("description", sa.Text(), nullable=True),
|
||||
sa.Column("status", scenario_status, nullable=False),
|
||||
sa.Column("start_date", sa.Date(), nullable=True),
|
||||
sa.Column("end_date", sa.Date(), nullable=True),
|
||||
sa.Column("discount_rate", sa.Numeric(
|
||||
precision=5, scale=2), nullable=True),
|
||||
sa.Column("currency", sa.String(length=3), nullable=True),
|
||||
sa.Column("primary_resource", resource_type, nullable=True),
|
||||
sa.Column("created_at", sa.DateTime(timezone=True),
|
||||
server_default=sa.func.now(), nullable=False),
|
||||
sa.Column("updated_at", sa.DateTime(timezone=True),
|
||||
server_default=sa.func.now(), nullable=False),
|
||||
sa.ForeignKeyConstraint(
|
||||
["project_id"], ["projects.id"], ondelete="CASCADE"),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
)
|
||||
op.create_index(op.f("ix_scenarios_id"), "scenarios", ["id"], unique=False)
|
||||
op.create_index(op.f("ix_scenarios_project_id"),
|
||||
"scenarios", ["project_id"], unique=False)
|
||||
|
||||
op.create_table(
|
||||
"financial_inputs",
|
||||
sa.Column("id", sa.Integer(), nullable=False),
|
||||
sa.Column("scenario_id", sa.Integer(), nullable=False),
|
||||
sa.Column("name", sa.String(length=255), nullable=False),
|
||||
sa.Column("category", financial_category, nullable=False),
|
||||
sa.Column("cost_bucket", cost_bucket, nullable=True),
|
||||
sa.Column("amount", sa.Numeric(precision=18, scale=2), nullable=False),
|
||||
sa.Column("currency", sa.String(length=3), nullable=True),
|
||||
sa.Column("effective_date", sa.Date(), nullable=True),
|
||||
sa.Column("notes", sa.Text(), nullable=True),
|
||||
sa.Column("created_at", sa.DateTime(timezone=True),
|
||||
server_default=sa.func.now(), nullable=False),
|
||||
sa.Column("updated_at", sa.DateTime(timezone=True),
|
||||
server_default=sa.func.now(), nullable=False),
|
||||
sa.ForeignKeyConstraint(
|
||||
["scenario_id"], ["scenarios.id"], ondelete="CASCADE"),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
)
|
||||
op.create_index(op.f("ix_financial_inputs_id"),
|
||||
"financial_inputs", ["id"], unique=False)
|
||||
op.create_index(op.f("ix_financial_inputs_scenario_id"),
|
||||
"financial_inputs", ["scenario_id"], unique=False)
|
||||
|
||||
op.create_table(
|
||||
"simulation_parameters",
|
||||
sa.Column("id", sa.Integer(), nullable=False),
|
||||
sa.Column("scenario_id", sa.Integer(), nullable=False),
|
||||
sa.Column("name", sa.String(length=255), nullable=False),
|
||||
sa.Column("distribution", distribution_type, nullable=False),
|
||||
sa.Column("variable", stochastic_variable, nullable=True),
|
||||
sa.Column("resource_type", resource_type, nullable=True),
|
||||
sa.Column("mean_value", sa.Numeric(
|
||||
precision=18, scale=4), nullable=True),
|
||||
sa.Column("standard_deviation", sa.Numeric(
|
||||
precision=18, scale=4), nullable=True),
|
||||
sa.Column("minimum_value", sa.Numeric(
|
||||
precision=18, scale=4), nullable=True),
|
||||
sa.Column("maximum_value", sa.Numeric(
|
||||
precision=18, scale=4), nullable=True),
|
||||
sa.Column("unit", sa.String(length=32), nullable=True),
|
||||
sa.Column("configuration", sa.JSON(), nullable=True),
|
||||
sa.Column("created_at", sa.DateTime(timezone=True),
|
||||
server_default=sa.func.now(), nullable=False),
|
||||
sa.Column("updated_at", sa.DateTime(timezone=True),
|
||||
server_default=sa.func.now(), nullable=False),
|
||||
sa.ForeignKeyConstraint(
|
||||
["scenario_id"], ["scenarios.id"], ondelete="CASCADE"),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
)
|
||||
op.create_index(op.f("ix_simulation_parameters_id"),
|
||||
"simulation_parameters", ["id"], unique=False)
|
||||
op.create_index(op.f("ix_simulation_parameters_scenario_id"),
|
||||
"simulation_parameters", ["scenario_id"], unique=False)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.drop_index(op.f("ix_simulation_parameters_scenario_id"),
|
||||
table_name="simulation_parameters")
|
||||
op.drop_index(op.f("ix_simulation_parameters_id"),
|
||||
table_name="simulation_parameters")
|
||||
op.drop_table("simulation_parameters")
|
||||
|
||||
op.drop_index(op.f("ix_financial_inputs_scenario_id"),
|
||||
table_name="financial_inputs")
|
||||
op.drop_index(op.f("ix_financial_inputs_id"),
|
||||
table_name="financial_inputs")
|
||||
op.drop_table("financial_inputs")
|
||||
|
||||
op.drop_index(op.f("ix_scenarios_project_id"), table_name="scenarios")
|
||||
op.drop_index(op.f("ix_scenarios_id"), table_name="scenarios")
|
||||
op.drop_table("scenarios")
|
||||
|
||||
op.drop_index(op.f("ix_projects_id"), table_name="projects")
|
||||
op.drop_table("projects")
|
||||
|
||||
resource_type.drop(op.get_bind(), checkfirst=True)
|
||||
stochastic_variable.drop(op.get_bind(), checkfirst=True)
|
||||
distribution_type.drop(op.get_bind(), checkfirst=True)
|
||||
cost_bucket.drop(op.get_bind(), checkfirst=True)
|
||||
financial_category.drop(op.get_bind(), checkfirst=True)
|
||||
scenario_status.drop(op.get_bind(), checkfirst=True)
|
||||
mining_operation_type.drop(op.get_bind(), checkfirst=True)
|
||||
@@ -1,210 +0,0 @@
|
||||
"""Add authentication and RBAC tables"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from passlib.context import CryptContext
|
||||
from sqlalchemy.sql import column, table
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "20251109_02"
|
||||
down_revision = "20251109_01"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
password_context = CryptContext(schemes=["argon2"], deprecated="auto")
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
op.create_table(
|
||||
"users",
|
||||
sa.Column("id", sa.Integer(), primary_key=True),
|
||||
sa.Column("email", sa.String(length=255), nullable=False),
|
||||
sa.Column("username", sa.String(length=128), nullable=False),
|
||||
sa.Column("password_hash", sa.String(length=255), nullable=False),
|
||||
sa.Column(
|
||||
"is_active",
|
||||
sa.Boolean(),
|
||||
nullable=False,
|
||||
server_default=sa.true(),
|
||||
),
|
||||
sa.Column(
|
||||
"is_superuser",
|
||||
sa.Boolean(),
|
||||
nullable=False,
|
||||
server_default=sa.false(),
|
||||
),
|
||||
sa.Column("last_login_at", sa.DateTime(timezone=True), nullable=True),
|
||||
sa.Column(
|
||||
"created_at",
|
||||
sa.DateTime(timezone=True),
|
||||
nullable=False,
|
||||
server_default=sa.func.now(),
|
||||
),
|
||||
sa.Column(
|
||||
"updated_at",
|
||||
sa.DateTime(timezone=True),
|
||||
nullable=False,
|
||||
server_default=sa.func.now(),
|
||||
),
|
||||
sa.UniqueConstraint("email", name="uq_users_email"),
|
||||
sa.UniqueConstraint("username", name="uq_users_username"),
|
||||
)
|
||||
op.create_index(
|
||||
"ix_users_active_superuser",
|
||||
"users",
|
||||
["is_active", "is_superuser"],
|
||||
unique=False,
|
||||
)
|
||||
|
||||
op.create_table(
|
||||
"roles",
|
||||
sa.Column("id", sa.Integer(), primary_key=True),
|
||||
sa.Column("name", sa.String(length=64), nullable=False),
|
||||
sa.Column("display_name", sa.String(length=128), nullable=False),
|
||||
sa.Column("description", sa.Text(), nullable=True),
|
||||
sa.Column(
|
||||
"created_at",
|
||||
sa.DateTime(timezone=True),
|
||||
nullable=False,
|
||||
server_default=sa.func.now(),
|
||||
),
|
||||
sa.Column(
|
||||
"updated_at",
|
||||
sa.DateTime(timezone=True),
|
||||
nullable=False,
|
||||
server_default=sa.func.now(),
|
||||
),
|
||||
sa.UniqueConstraint("name", name="uq_roles_name"),
|
||||
)
|
||||
|
||||
op.create_table(
|
||||
"user_roles",
|
||||
sa.Column("user_id", sa.Integer(), nullable=False),
|
||||
sa.Column("role_id", sa.Integer(), nullable=False),
|
||||
sa.Column(
|
||||
"granted_at",
|
||||
sa.DateTime(timezone=True),
|
||||
nullable=False,
|
||||
server_default=sa.func.now(),
|
||||
),
|
||||
sa.Column("granted_by", sa.Integer(), nullable=True),
|
||||
sa.ForeignKeyConstraint(
|
||||
["user_id"],
|
||||
["users.id"],
|
||||
ondelete="CASCADE",
|
||||
),
|
||||
sa.ForeignKeyConstraint(
|
||||
["role_id"],
|
||||
["roles.id"],
|
||||
ondelete="CASCADE",
|
||||
),
|
||||
sa.ForeignKeyConstraint(
|
||||
["granted_by"],
|
||||
["users.id"],
|
||||
ondelete="SET NULL",
|
||||
),
|
||||
sa.PrimaryKeyConstraint("user_id", "role_id"),
|
||||
sa.UniqueConstraint("user_id", "role_id",
|
||||
name="uq_user_roles_user_role"),
|
||||
)
|
||||
op.create_index(
|
||||
"ix_user_roles_role_id",
|
||||
"user_roles",
|
||||
["role_id"],
|
||||
unique=False,
|
||||
)
|
||||
|
||||
# Seed default roles
|
||||
roles_table = table(
|
||||
"roles",
|
||||
column("id", sa.Integer()),
|
||||
column("name", sa.String()),
|
||||
column("display_name", sa.String()),
|
||||
column("description", sa.Text()),
|
||||
)
|
||||
|
||||
op.bulk_insert(
|
||||
roles_table,
|
||||
[
|
||||
{
|
||||
"id": 1,
|
||||
"name": "admin",
|
||||
"display_name": "Administrator",
|
||||
"description": "Full platform access with user management rights.",
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "project_manager",
|
||||
"display_name": "Project Manager",
|
||||
"description": "Manage projects, scenarios, and associated data.",
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "analyst",
|
||||
"display_name": "Analyst",
|
||||
"description": "Review dashboards and scenario outputs.",
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"name": "viewer",
|
||||
"display_name": "Viewer",
|
||||
"description": "Read-only access to assigned projects and reports.",
|
||||
},
|
||||
],
|
||||
)
|
||||
|
||||
admin_password_hash = password_context.hash("ChangeMe123!")
|
||||
|
||||
users_table = table(
|
||||
"users",
|
||||
column("id", sa.Integer()),
|
||||
column("email", sa.String()),
|
||||
column("username", sa.String()),
|
||||
column("password_hash", sa.String()),
|
||||
column("is_active", sa.Boolean()),
|
||||
column("is_superuser", sa.Boolean()),
|
||||
)
|
||||
|
||||
op.bulk_insert(
|
||||
users_table,
|
||||
[
|
||||
{
|
||||
"id": 1,
|
||||
"email": "admin@calminer.local",
|
||||
"username": "admin",
|
||||
"password_hash": admin_password_hash,
|
||||
"is_active": True,
|
||||
"is_superuser": True,
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
user_roles_table = table(
|
||||
"user_roles",
|
||||
column("user_id", sa.Integer()),
|
||||
column("role_id", sa.Integer()),
|
||||
column("granted_by", sa.Integer()),
|
||||
)
|
||||
|
||||
op.bulk_insert(
|
||||
user_roles_table,
|
||||
[
|
||||
{
|
||||
"user_id": 1,
|
||||
"role_id": 1,
|
||||
"granted_by": 1,
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.drop_index("ix_user_roles_role_id", table_name="user_roles")
|
||||
op.drop_table("user_roles")
|
||||
|
||||
op.drop_table("roles")
|
||||
|
||||
op.drop_index("ix_users_active_superuser", table_name="users")
|
||||
op.drop_table("users")
|
||||
BIN
alembic_test.db
BIN
alembic_test.db
Binary file not shown.
101
changelog.md
101
changelog.md
@@ -1,5 +1,94 @@
|
||||
# Changelog
|
||||
|
||||
## 2025-11-13
|
||||
|
||||
- Completed the UI alignment initiative by consolidating shared form and button styles into `static/css/forms.css` and `static/css/main.css`, introducing the semantic palette in `static/css/theme-default.css`, and spot-checking key pages plus contrast reports.
|
||||
- Refactored the architecture data model docs by turning `calminer-docs/architecture/08_concepts/02_data_model.md` into a concise overview that links to new detail pages covering SQLAlchemy models, navigation metadata, enumerations, Pydantic schemas, and monitoring tables.
|
||||
- Nested the calculator navigation under Projects by updating `scripts/init_db.py` seeds, teaching `services/navigation.py` to resolve scenario-scoped hrefs for profitability/opex/capex, and extending sidebar coverage through `tests/integration/test_navigation_sidebar_calculations.py` plus `tests/services/test_navigation_service.py` to validate admin/viewer visibility and contextual URL generation.
|
||||
- Added navigation sidebar integration coverage by extending `tests/conftest.py` with role-switching headers, seeding admin/viewer test users, and adding `tests/integration/test_navigation_sidebar.py` to assert ordered link rendering for admins, viewer filtering of admin-only entries, and anonymous rejection of the endpoint.
|
||||
- Finalised the financial data import/export templates by inventorying required fields, defining CSV column specs with validation rules, drafting Excel workbook layouts, documenting end-user workflows in `calminer-docs/userguide/data_import_export.md`, and recording stakeholder review steps alongside updated TODO/DONE tracking.
|
||||
- Scoped profitability calculator UI under the scenario hierarchy by adding `/calculations/projects/{project_id}/scenarios/{scenario_id}/profitability` GET/POST handlers, updating scenario templates and sidebar navigation to link to the new route, and extending `tests/test_project_scenario_routes.py` with coverage for the scenario path plus legacy redirect behaviour (module run: 14 passed).
|
||||
- Extended scenario frontend regression coverage by updating `tests/test_project_scenario_routes.py` to assert project/scenario breadcrumbs and calculator navigation, normalising escaped URLs, and re-running the module tests (13 passing).
|
||||
- Cleared FastAPI and Pydantic deprecation warnings by migrating `scripts/init_db.py` to `@field_validator`, replacing the `main.py` startup hook with a lifespan handler, auditing template response call signatures, confirming HTTP 422 constant usage, and re-running the full pytest suite to ensure a clean warning slate.
|
||||
- Delivered the capex planner end-to-end: added scaffolded UI in `templates/scenarios/capex.html`, wired GET/POST handlers through `routes/calculations.py`, implemented calculation logic plus snapshot persistence in `services/calculations.py` and `models/capex_snapshot.py`, updated navigation links, and introduced unit tests in `tests/services/test_calculations_capex.py`.
|
||||
- Updated UI navigation to surface the opex planner by adding the sidebar link in `templates/partials/sidebar_nav.html`, wiring a scenario detail action in `templates/scenarios/detail.html`.
|
||||
- Completed manual validation of the Capex Planner UI flows (sidebar entry, scenario deep link, validation errors, successful calculation) with results captured in `manual_tests/capex.md`, documented snapshot verification steps, and noted the optional JSON client check for future follow-up.
|
||||
- Added opex calculation unit tests in `tests/services/test_calculations_opex.py` covering success metrics, currency validation, frequency enforcement, and evaluation horizon extension.
|
||||
- Documented the Opex Planner workflow in `calminer-docs/userguide/opex_planner.md`, linked it from the user guide index, extended `calminer-docs/architecture/08_concepts/02_data_model.md` with snapshot coverage, and captured the completion in `.github/instructions/DONE.md`.
|
||||
- Implemented opex integration coverage in `tests/integration/test_opex_calculations.py`, exercising HTML and JSON flows, verifying snapshot persistence, and asserting currency mismatch handling for form and API submissions.
|
||||
- Executed the full pytest suite with coverage (211 tests) to confirm no regressions or warnings after the opex documentation updates.
|
||||
- Completed the navigation sidebar API migration by finalising the database-backed service, refactoring `templates/partials/sidebar_nav.html` to consume the endpoint, hydrating via `static/js/navigation_sidebar.js`, and updating HTML route dependencies (`routes/projects.py`, `routes/scenarios.py`, `routes/reports.py`, `routes/imports.py`, `routes/calculations.py`) to use redirect-aware guards so anonymous visitors receive login redirects instead of JSON errors (manual verification via curl across projects, scenarios, reports, and calculations pages).
|
||||
|
||||
## 2025-11-12
|
||||
|
||||
- Fixed critical 500 error in reporting dashboard by correcting route reference in reporting.html template - changed 'reports.project_list_page' to 'projects.project_list_page' to resolve NoMatchFound error when accessing /ui/reporting.
|
||||
- Completed navigation validation by inventorying all sidebar navigation links, identifying missing routes for simulations, reporting, settings, themes, and currencies, created new UI routes in routes/ui.py with proper authentication guards, built corresponding templates (simulations.html, reporting.html, settings.html, theme_settings.html, currencies.html), registered the UI router in main.py, updated sidebar navigation to use route names instead of hardcoded URLs, and enhanced navigation.js to use dynamic URL resolution for proper route handling.
|
||||
- Fixed critical template rendering error in sidebar_nav.html where URL objects from `request.url_for()` were being used with string methods, causing TypeError. Added `|string` filters to convert URL objects to strings for proper template rendering.
|
||||
- Integrated Plotly charting for interactive visualizations in reporting templates, added chart generation methods to ReportingService (`generate_npv_comparison_chart`, `generate_distribution_histogram`), updated project summary and scenario distribution contexts to include chart JSON data, enhanced templates with chart containers and JavaScript rendering, added chart-container CSS styling, and validated all reporting tests pass.
|
||||
|
||||
- Completed local run verification: started application with `uvicorn main:app --reload` without errors, verified authenticated routes (/login, /, /projects/ui, /projects) load correctly with seeded data, and summarized findings for deployment pipeline readiness.
|
||||
- Fixed docker-compose.override.yml command array to remove duplicate "uvicorn" entry, enabling successful container startup with uvicorn reload in development mode.
|
||||
- Completed deployment pipeline verification: built Docker image without errors, validated docker-compose configuration, deployed locally with docker-compose (app and postgres containers started successfully), and confirmed application startup logs showing database bootstrap and seeded data initialization.
|
||||
- Completed documentation of current data models: updated `calminer-docs/architecture/08_concepts/02_data_model.md` with comprehensive SQLAlchemy model schemas, enumerations, Pydantic API schemas, and analysis of discrepancies between models and schemas.
|
||||
- Switched `models/performance_metric.py` to reuse the shared declarative base from `config.database`, clearing the SQLAlchemy 2.0 `declarative_base` deprecation warning and verifying repository tests still pass.
|
||||
- Replaced the Alembic migration workflow with the idempotent Pydantic-backed initializer (`scripts/init_db.py`), added a guarded reset utility (`scripts/reset_db.py`), removed migration artifacts/tooling (Alembic directory, config, Docker entrypoint), refreshed the container entrypoint to invoke `uvicorn` directly, and updated installation/architecture docs plus the README to direct developers to the new seeding/reset flow.
|
||||
- Eliminated Bandit hardcoded-secret findings by replacing literal JWT tokens and passwords across auth/security tests with randomized helpers drawn from `tests/utils/security.py`, ensuring fixtures still assert expected behaviours.
|
||||
- Centralized Bandit configuration in `pyproject.toml`, reran `bandit -c pyproject.toml -r calminer tests`, and verified the scan now reports zero issues.
|
||||
- Diagnosed admin bootstrap failure caused by legacy `roles` schema, added Alembic migration `20251112_00_add_roles_metadata_columns.py` to backfill `display_name`, `description`, `created_at`, and `updated_at`, and verified the migration via full pytest run in the activated `.venv`.
|
||||
- Resolved Ruff E402 warnings by moving module docstrings ahead of `from __future__ import annotations` across currency and pricing service modules, dropped the unused `HTTPException` import in `monitoring/__init__.py`, and confirmed a clean `ruff check .` run.
|
||||
- Enhanced the deploy job in `.gitea/workflows/cicache.yml` to capture Kubernetes pod, deployment, and container logs into `/logs/deployment/` for staging/production rollouts and publish them via a `deployment-logs` artifact, updating CI/CD documentation with retrieval instructions.
|
||||
- Fixed CI dashboard template lookup failures by renaming `templates/Dashboard.html` to `templates/dashboard.html` and verifying `tests/test_dashboard_route.py` locally to ensure TemplateNotFound no longer occurs on case-sensitive filesystems.
|
||||
- Implemented SQLite support as primary local database with environment-driven backend switching (`CALMINER_USE_SQLITE=true`), updated `scripts/init_db.py` for database-agnostic DDL generation (PostgreSQL enums vs SQLite CHECK constraints), tested compatibility with both backends, and verified application startup and seeded data initialization work seamlessly across SQLite and PostgreSQL.
|
||||
|
||||
## 2025-11-11
|
||||
|
||||
- Collapsed legacy Alembic revisions into `alembic/versions/00_initial.py`, removed superseded migration files, and verified the consolidated schema via SQLite upgrade and Postgres version stamping.
|
||||
- Implemented base URL routing to redirect unauthenticated users to login and authenticated users to dashboard.
|
||||
- Added comprehensive end-to-end tests for login flow, including redirects, session handling, and error messaging for invalid/inactive accounts.
|
||||
- Updated header and footer templates to consistently use `logo_big.png` image instead of text logo, with appropriate CSS styling for sizing.
|
||||
- Centralised ISO-4217 currency validation across scenarios, imports, and export filters (`models/scenario.py`, `routes/scenarios.py`, `schemas/scenario.py`, `schemas/imports.py`, `services/export_query.py`) so malformed codes are rejected consistently at every entry point.
|
||||
- Updated scenario services and UI flows to surface friendly validation errors and added regression coverage for imports, exports, API creation, and lifecycle flows ensuring currencies are normalised end-to-end.
|
||||
- Linked projects to their pricing settings by updating SQLAlchemy models, repositories, seeding utilities, and migrations, and added regression tests to cover the new association and default backfill.
|
||||
- Bootstrapped database-stored pricing settings at application startup, aligned initial data seeding with the database-first metadata flow, and added tests covering pricing bootstrap creation, project assignment, and idempotency.
|
||||
- Extended pricing configuration support to prefer persisted metadata via `dependencies.get_pricing_metadata`, added retrieval tests for project/default fallbacks, and refreshed docs (`calminer-docs/specifications/price_calculation.md`, `pricing_settings_data_model.md`) to describe the database-backed workflow and bootstrap behaviour.
|
||||
- Added `services/financial.py` NPV, IRR, and payback helpers with robust cash-flow normalisation, convergence safeguards, and fractional period support, plus comprehensive pytest coverage exercising representative project scenarios and failure modes.
|
||||
- Authored `calminer-docs/specifications/financial_metrics.md` capturing DCF assumptions, solver behaviours, and worked examples, and cross-linked the architecture concepts to the new reference for consistent navigation.
|
||||
- Implemented `services/simulation.py` Monte Carlo engine with configurable distributions, summary aggregation, and reproducible RNG seeding, introduced regression tests in `tests/test_simulation.py`, and documented configuration/usage in `calminer-docs/specifications/monte_carlo_simulation.md` with architecture cross-links.
|
||||
- Polished reporting HTML contexts by cleaning stray fragments in `routes/reports.py`, adding download action metadata for project and scenario pages, and generating scenario comparison download URLs with correctly serialised repeated `scenario_ids` parameters.
|
||||
- Consolidated Alembic history into a single initial migration (`20251111_00_initial_schema.py`), removed superseded revision files, and ensured Alembic metadata still references the project metadata for clean bootstrap.
|
||||
- Added `scripts/run_migrations.py` and a Docker entrypoint wrapper to run Alembic migrations before `uvicorn` starts, removed the fallback `Base.metadata.create_all` call, and updated `calminer-docs/admin/installation.md` so developers know how to apply migrations locally or via Docker.
|
||||
- Configured pytest defaults to collect coverage (`--cov`) with an 80% fail-under gate, excluded entrypoint/reporting scaffolds from the calculation, updated contributor docs with the standard `pytest` command, and verified the suite now reports 83% coverage.
|
||||
- Standardized color scheme and typography by moving alert styles to `main.css`, adding typography rules with CSS variables, updating auth templates for consistent button classes, and ensuring all templates use centralized color and spacing variables.
|
||||
- Improved navigation flow by adding two big chevron buttons on top of the navigation sidebar to allow users to navigate to the previous and next page in the page navigation list, including JavaScript logic for determining current page and handling navigation.
|
||||
- Established pytest-based unit and integration test suites with coverage thresholds, achieving 83% coverage across 181 tests, with configuration in pyproject.toml and documentation in CONTRIBUTING.md.
|
||||
- Configured CI pipelines to run tests, linting, and security checks on each change, adding Bandit security scanning to the workflow and verifying execution on pushes and PRs to main/develop branches.
|
||||
- Added deployment automation with Docker Compose for local development and Kubernetes manifests for production, ensuring environment parity and documenting processes in calminer-docs/admin/installation.md.
|
||||
- Completed monitoring instrumentation by adding business metrics observation to project and scenario repository operations, and simulation performance tracking to Monte Carlo service with success/error status and duration metrics.
|
||||
- Updated TODO list to reflect completed monitoring implementation tasks and validated changes with passing simulation tests.
|
||||
- Implemented comprehensive performance monitoring for scalability (FR-006) with Prometheus metrics collection for HTTP requests, import/export operations, and general application metrics.
|
||||
- Added database model for persistent metric storage with aggregation endpoints for KPIs like request latency, error rates, and throughput.
|
||||
- Created FastAPI middleware for automatic request metric collection and background persistence to database.
|
||||
- Extended monitoring router with performance metrics API endpoints and detailed health checks.
|
||||
- Added Alembic migration for performance_metrics table and updated model imports.
|
||||
- Completed concurrent interaction testing implementation, validating database transaction isolation under threading and establishing async testing framework for future concurrency enhancements.
|
||||
- Implemented comprehensive deployment automation with Docker Compose configurations for development, staging, and production environments ensuring environment parity.
|
||||
- Set up Kubernetes manifests with resource limits, health checks, and secrets management for production deployment.
|
||||
- Configured CI/CD workflows for automated Docker image building, registry pushing, and Kubernetes deployment to staging/production environments.
|
||||
- Documented deployment processes, environment configurations, and CI/CD workflows in project documentation.
|
||||
- Validated deployment automation through Docker Compose configuration testing and CI/CD pipeline structure.
|
||||
|
||||
## 2025-11-10
|
||||
|
||||
- Added dedicated pytest coverage for guard dependencies, exercising success plus failure paths (missing session, inactive user, missing roles, project/scenario access errors) via `tests/test_dependencies_guards.py`.
|
||||
- Added integration tests in `tests/test_authorization_integration.py` verifying anonymous 401 responses, role-based 403s, and authorized project manager flows across API and UI endpoints.
|
||||
- Implemented environment-driven admin bootstrap settings, wired the `bootstrap_admin` helper into FastAPI startup, added pytest coverage for creation/idempotency/reset logic, and documented operational guidance in the RBAC plan and security concept.
|
||||
- Retired the legacy authentication RBAC implementation plan document after migrating its guidance into live documentation and synchronized the contributor instructions to reflect the removal.
|
||||
- Completed the Authentication & RBAC checklist by shipping the new models, migrations, repositories, guard dependencies, and integration tests.
|
||||
- Documented the project/scenario import/export field mapping and file format guidelines in `calminer-docs/requirements/FR-008.md`, and introduced `schemas/imports.py` with Pydantic models that normalise incoming CSV/Excel rows for projects and scenarios.
|
||||
- Added `services/importers.py` to load CSV/XLSX files into the new import schemas, pulled in `openpyxl` for Excel support, and covered the parsing behaviour with `tests/test_import_parsing.py`.
|
||||
- Expanded the import ingestion workflow with staging previews, transactional persistence commits, FastAPI preview/commit endpoints under `/imports`, and new API tests (`tests/test_import_ingestion.py`, `tests/test_import_api.py`) ensuring end-to-end coverage.
|
||||
- Added persistent audit logging via `ImportExportLog`, structured log emission, Prometheus metrics instrumentation, `/metrics` endpoint exposure, and updated operator/deployment documentation to guide monitoring setup.
|
||||
|
||||
## 2025-11-09
|
||||
|
||||
- Captured current implementation status, requirements coverage, missing features, and prioritized roadmap in `calminer-docs/implementation_status.md` to guide future development.
|
||||
@@ -21,15 +110,3 @@
|
||||
- Implemented cookie-based authentication session middleware with automatic access token refresh, logout handling, navigation adjustments, and documentation/test updates capturing the new behaviour.
|
||||
- Delivered idempotent seeding utilities with `scripts/initial_data.py`, entry-point runner `scripts/00_initial_data.py`, documentation updates, and pytest coverage to verify role/admin provisioning.
|
||||
- Secured project and scenario routers with RBAC guard dependencies, enforced repository access checks via helper utilities, and aligned template routes with FastAPI dependency injection patterns.
|
||||
|
||||
## 2025-11-10
|
||||
|
||||
- Extended authorization helper layer with project/scenario ownership lookups, integrated them into FastAPI dependencies, refreshed pytest fixtures to keep the suite authenticated, and documented the new patterns across RBAC plan and security guides.
|
||||
- Added dedicated pytest coverage for guard dependencies, exercising success plus failure paths (missing session, inactive user, missing roles, project/scenario access errors) via `tests/test_dependencies_guards.py`.
|
||||
- Added integration tests in `tests/test_authorization_integration.py` verifying anonymous 401 responses, role-based 403s, and authorized project manager flows across API and UI endpoints.
|
||||
- Implemented environment-driven admin bootstrap settings, wired the `bootstrap_admin` helper into FastAPI startup, added pytest coverage for creation/idempotency/reset logic, and documented operational guidance in the RBAC plan and security concept.
|
||||
- Retired the legacy authentication RBAC implementation plan document after migrating its guidance into live documentation and synchronized the contributor instructions to reflect the removal.
|
||||
- Completed the Authentication & RBAC checklist by shipping the new models, migrations, repositories, guard dependencies, and integration tests.
|
||||
- Documented the project/scenario import/export field mapping and file format guidelines in `calminer-docs/requirements/FR-008.md`, and introduced `schemas/imports.py` with Pydantic models that normalise incoming CSV/Excel rows for projects and scenarios.
|
||||
- Added `services/importers.py` to load CSV/XLSX files into the new import schemas, pulled in `openpyxl` for Excel support, and covered the parsing behaviour with `tests/test_import_parsing.py`.
|
||||
- Expanded the import ingestion workflow with staging previews, transactional persistence commits, FastAPI preview/commit endpoints under `/imports`, and new API tests (`tests/test_import_ingestion.py`, `tests/test_import_api.py`) ensuring end-to-end coverage.
|
||||
|
||||
@@ -11,12 +11,21 @@ def _build_database_url() -> str:
|
||||
"""Construct the SQLAlchemy database URL from granular environment vars.
|
||||
|
||||
Falls back to `DATABASE_URL` for backward compatibility.
|
||||
Supports SQLite when CALMINER_USE_SQLITE is set.
|
||||
"""
|
||||
|
||||
legacy_url = os.environ.get("DATABASE_URL", "")
|
||||
if legacy_url and legacy_url.strip() != "":
|
||||
return legacy_url
|
||||
|
||||
use_sqlite = os.environ.get("CALMINER_USE_SQLITE", "").lower() in ("true", "1", "yes")
|
||||
if use_sqlite:
|
||||
# Use SQLite database
|
||||
db_path = os.environ.get("DATABASE_PATH", "./data/calminer.db")
|
||||
# Ensure the directory exists
|
||||
os.makedirs(os.path.dirname(db_path), exist_ok=True)
|
||||
return f"sqlite:///{db_path}"
|
||||
|
||||
driver = os.environ.get("DATABASE_DRIVER", "postgresql")
|
||||
host = os.environ.get("DATABASE_HOST")
|
||||
port = os.environ.get("DATABASE_PORT", "5432")
|
||||
@@ -54,7 +63,15 @@ def _build_database_url() -> str:
|
||||
DATABASE_URL = _build_database_url()
|
||||
|
||||
engine = create_engine(DATABASE_URL, echo=True, future=True)
|
||||
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
|
||||
# Avoid expiring ORM objects on commit so that objects returned from UnitOfWork
|
||||
# remain usable for the duration of the request cycle without causing
|
||||
# DetachedInstanceError when accessed after the session commits.
|
||||
SessionLocal = sessionmaker(
|
||||
autocommit=False,
|
||||
autoflush=False,
|
||||
bind=engine,
|
||||
expire_on_commit=False,
|
||||
)
|
||||
Base = declarative_base()
|
||||
|
||||
|
||||
|
||||
@@ -7,6 +7,8 @@ from functools import lru_cache
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from services.pricing import PricingMetadata
|
||||
|
||||
from services.security import JWTSettings
|
||||
|
||||
|
||||
@@ -56,6 +58,10 @@ class Settings:
|
||||
admin_password: str = "ChangeMe123!"
|
||||
admin_roles: tuple[str, ...] = ("admin",)
|
||||
admin_force_reset: bool = False
|
||||
pricing_default_payable_pct: float = 100.0
|
||||
pricing_default_currency: str | None = "USD"
|
||||
pricing_moisture_threshold_pct: float = 8.0
|
||||
pricing_moisture_penalty_per_pct: float = 0.0
|
||||
|
||||
@classmethod
|
||||
def from_environment(cls) -> "Settings":
|
||||
@@ -105,6 +111,18 @@ class Settings:
|
||||
admin_force_reset=cls._bool_from_env(
|
||||
"CALMINER_SEED_FORCE", False
|
||||
),
|
||||
pricing_default_payable_pct=cls._float_from_env(
|
||||
"CALMINER_PRICING_DEFAULT_PAYABLE_PCT", 100.0
|
||||
),
|
||||
pricing_default_currency=cls._optional_str(
|
||||
"CALMINER_PRICING_DEFAULT_CURRENCY", "USD"
|
||||
),
|
||||
pricing_moisture_threshold_pct=cls._float_from_env(
|
||||
"CALMINER_PRICING_MOISTURE_THRESHOLD_PCT", 8.0
|
||||
),
|
||||
pricing_moisture_penalty_per_pct=cls._float_from_env(
|
||||
"CALMINER_PRICING_MOISTURE_PENALTY_PER_PCT", 0.0
|
||||
),
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
@@ -145,6 +163,23 @@ class Settings:
|
||||
seen.add(role_name)
|
||||
return tuple(ordered)
|
||||
|
||||
@staticmethod
|
||||
def _float_from_env(name: str, default: float) -> float:
|
||||
raw_value = os.getenv(name)
|
||||
if raw_value is None:
|
||||
return default
|
||||
try:
|
||||
return float(raw_value)
|
||||
except ValueError:
|
||||
return default
|
||||
|
||||
@staticmethod
|
||||
def _optional_str(name: str, default: str | None = None) -> str | None:
|
||||
raw_value = os.getenv(name)
|
||||
if raw_value is None or raw_value.strip() == "":
|
||||
return default
|
||||
return raw_value.strip()
|
||||
|
||||
def jwt_settings(self) -> JWTSettings:
|
||||
"""Build runtime JWT settings compatible with token helpers."""
|
||||
|
||||
@@ -180,6 +215,16 @@ class Settings:
|
||||
force_reset=self.admin_force_reset,
|
||||
)
|
||||
|
||||
def pricing_metadata(self) -> PricingMetadata:
|
||||
"""Build pricing metadata defaults."""
|
||||
|
||||
return PricingMetadata(
|
||||
default_payable_pct=self.pricing_default_payable_pct,
|
||||
default_currency=self.pricing_default_currency,
|
||||
moisture_threshold_pct=self.pricing_moisture_threshold_pct,
|
||||
moisture_penalty_per_pct=self.pricing_moisture_penalty_per_pct,
|
||||
)
|
||||
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
def get_settings() -> Settings:
|
||||
|
||||
157
dependencies.py
157
dependencies.py
@@ -22,6 +22,10 @@ from services.session import (
|
||||
)
|
||||
from services.unit_of_work import UnitOfWork
|
||||
from services.importers import ImportIngestionService
|
||||
from services.pricing import PricingMetadata
|
||||
from services.navigation import NavigationService
|
||||
from services.scenario_evaluation import ScenarioPricingConfig, ScenarioPricingEvaluator
|
||||
from services.repositories import pricing_settings_to_metadata
|
||||
|
||||
|
||||
def get_unit_of_work() -> Generator[UnitOfWork, None, None]:
|
||||
@@ -46,6 +50,37 @@ def get_application_settings() -> Settings:
|
||||
return get_settings()
|
||||
|
||||
|
||||
def get_pricing_metadata(
|
||||
settings: Settings = Depends(get_application_settings),
|
||||
uow: UnitOfWork = Depends(get_unit_of_work),
|
||||
) -> PricingMetadata:
|
||||
"""Return pricing metadata defaults sourced from persisted pricing settings."""
|
||||
|
||||
stored = uow.get_pricing_metadata()
|
||||
if stored is not None:
|
||||
return stored
|
||||
|
||||
fallback = settings.pricing_metadata()
|
||||
seed_result = uow.ensure_default_pricing_settings(metadata=fallback)
|
||||
return pricing_settings_to_metadata(seed_result.settings)
|
||||
|
||||
|
||||
def get_navigation_service(
|
||||
uow: UnitOfWork = Depends(get_unit_of_work),
|
||||
) -> NavigationService:
|
||||
if not uow.navigation:
|
||||
raise RuntimeError("Navigation repository is not initialised")
|
||||
return NavigationService(uow.navigation)
|
||||
|
||||
|
||||
def get_pricing_evaluator(
|
||||
metadata: PricingMetadata = Depends(get_pricing_metadata),
|
||||
) -> ScenarioPricingEvaluator:
|
||||
"""Provide a configured scenario pricing evaluator."""
|
||||
|
||||
return ScenarioPricingEvaluator(ScenarioPricingConfig(metadata=metadata))
|
||||
|
||||
|
||||
def get_jwt_settings() -> JWTSettings:
|
||||
"""Provide JWT runtime configuration derived from settings."""
|
||||
|
||||
@@ -127,6 +162,28 @@ def require_authenticated_user(
|
||||
return user
|
||||
|
||||
|
||||
def require_authenticated_user_html(
|
||||
request: Request,
|
||||
session: AuthSession = Depends(get_auth_session),
|
||||
) -> User:
|
||||
"""HTML-aware authenticated dependency that redirects anonymous sessions."""
|
||||
|
||||
user = session.user
|
||||
if user is None or session.tokens.is_empty:
|
||||
login_url = str(request.url_for("auth.login_form"))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_303_SEE_OTHER,
|
||||
headers={"Location": login_url},
|
||||
)
|
||||
|
||||
if not user.is_active:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="User account is disabled.",
|
||||
)
|
||||
return user
|
||||
|
||||
|
||||
def _user_role_names(user: User) -> set[str]:
|
||||
roles: Iterable[Role] = getattr(user, "roles", []) or []
|
||||
return {role.name for role in roles}
|
||||
@@ -160,12 +217,55 @@ def require_any_role(*roles: str) -> Callable[[User], User]:
|
||||
return require_roles(*roles)
|
||||
|
||||
|
||||
def require_project_resource(*, require_manage: bool = False) -> Callable[[int], Project]:
|
||||
def require_roles_html(*roles: str) -> Callable[[Request], User]:
|
||||
"""Ensure user is authenticated for HTML responses; redirect anonymous to login."""
|
||||
|
||||
required = tuple(role.strip() for role in roles if role.strip())
|
||||
if not required:
|
||||
raise ValueError("require_roles_html requires at least one role name")
|
||||
|
||||
def _dependency(
|
||||
request: Request,
|
||||
session: AuthSession = Depends(get_auth_session),
|
||||
) -> User:
|
||||
user = session.user
|
||||
if user is None:
|
||||
login_url = str(request.url_for("auth.login_form"))
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_303_SEE_OTHER,
|
||||
headers={"Location": login_url},
|
||||
)
|
||||
|
||||
if user.is_superuser:
|
||||
return user
|
||||
|
||||
role_names = _user_role_names(user)
|
||||
if not any(role in role_names for role in required):
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="Insufficient permissions for this action.",
|
||||
)
|
||||
return user
|
||||
|
||||
return _dependency
|
||||
|
||||
|
||||
def require_any_role_html(*roles: str) -> Callable[[Request], User]:
|
||||
"""Alias of require_roles_html for readability."""
|
||||
|
||||
return require_roles_html(*roles)
|
||||
|
||||
|
||||
def require_project_resource(
|
||||
*,
|
||||
require_manage: bool = False,
|
||||
user_dependency: Callable[..., User] = require_authenticated_user,
|
||||
) -> Callable[[int], Project]:
|
||||
"""Dependency factory that resolves a project with authorization checks."""
|
||||
|
||||
def _dependency(
|
||||
project_id: int,
|
||||
user: User = Depends(require_authenticated_user),
|
||||
user: User = Depends(user_dependency),
|
||||
uow: UnitOfWork = Depends(get_unit_of_work),
|
||||
) -> Project:
|
||||
try:
|
||||
@@ -190,13 +290,16 @@ def require_project_resource(*, require_manage: bool = False) -> Callable[[int],
|
||||
|
||||
|
||||
def require_scenario_resource(
|
||||
*, require_manage: bool = False, with_children: bool = False
|
||||
*,
|
||||
require_manage: bool = False,
|
||||
with_children: bool = False,
|
||||
user_dependency: Callable[..., User] = require_authenticated_user,
|
||||
) -> Callable[[int], Scenario]:
|
||||
"""Dependency factory that resolves a scenario with authorization checks."""
|
||||
|
||||
def _dependency(
|
||||
scenario_id: int,
|
||||
user: User = Depends(require_authenticated_user),
|
||||
user: User = Depends(user_dependency),
|
||||
uow: UnitOfWork = Depends(get_unit_of_work),
|
||||
) -> Scenario:
|
||||
try:
|
||||
@@ -222,14 +325,17 @@ def require_scenario_resource(
|
||||
|
||||
|
||||
def require_project_scenario_resource(
|
||||
*, require_manage: bool = False, with_children: bool = False
|
||||
*,
|
||||
require_manage: bool = False,
|
||||
with_children: bool = False,
|
||||
user_dependency: Callable[..., User] = require_authenticated_user,
|
||||
) -> Callable[[int, int], Scenario]:
|
||||
"""Dependency factory ensuring a scenario belongs to the given project and is accessible."""
|
||||
|
||||
def _dependency(
|
||||
project_id: int,
|
||||
scenario_id: int,
|
||||
user: User = Depends(require_authenticated_user),
|
||||
user: User = Depends(user_dependency),
|
||||
uow: UnitOfWork = Depends(get_unit_of_work),
|
||||
) -> Scenario:
|
||||
try:
|
||||
@@ -253,3 +359,42 @@ def require_project_scenario_resource(
|
||||
) from exc
|
||||
|
||||
return _dependency
|
||||
|
||||
|
||||
def require_project_resource_html(
|
||||
*, require_manage: bool = False
|
||||
) -> Callable[[int], Project]:
|
||||
"""HTML-aware project loader that redirects anonymous sessions."""
|
||||
|
||||
return require_project_resource(
|
||||
require_manage=require_manage,
|
||||
user_dependency=require_authenticated_user_html,
|
||||
)
|
||||
|
||||
|
||||
def require_scenario_resource_html(
|
||||
*,
|
||||
require_manage: bool = False,
|
||||
with_children: bool = False,
|
||||
) -> Callable[[int], Scenario]:
|
||||
"""HTML-aware scenario loader that redirects anonymous sessions."""
|
||||
|
||||
return require_scenario_resource(
|
||||
require_manage=require_manage,
|
||||
with_children=with_children,
|
||||
user_dependency=require_authenticated_user_html,
|
||||
)
|
||||
|
||||
|
||||
def require_project_scenario_resource_html(
|
||||
*,
|
||||
require_manage: bool = False,
|
||||
with_children: bool = False,
|
||||
) -> Callable[[int, int], Scenario]:
|
||||
"""HTML-aware project-scenario loader redirecting anonymous sessions."""
|
||||
|
||||
return require_project_scenario_resource(
|
||||
require_manage=require_manage,
|
||||
with_children=with_children,
|
||||
user_dependency=require_authenticated_user_html,
|
||||
)
|
||||
|
||||
59
docker-compose.override.yml
Normal file
59
docker-compose.override.yml
Normal file
@@ -0,0 +1,59 @@
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
app:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
APT_CACHE_URL: ${APT_CACHE_URL:-}
|
||||
environment:
|
||||
- ENVIRONMENT=development
|
||||
- DEBUG=true
|
||||
- LOG_LEVEL=DEBUG
|
||||
# Override database to use local postgres service
|
||||
- DATABASE_HOST=postgres
|
||||
- DATABASE_PORT=5432
|
||||
- DATABASE_USER=calminer
|
||||
- DATABASE_PASSWORD=calminer_password
|
||||
- DATABASE_NAME=calminer_db
|
||||
- DATABASE_DRIVER=postgresql
|
||||
# Development-specific settings
|
||||
- CALMINER_EXPORT_MAX_ROWS=1000
|
||||
- CALMINER_IMPORT_MAX_ROWS=10000
|
||||
volumes:
|
||||
# Mount source code for live reloading (if using --reload)
|
||||
- .:/app:ro
|
||||
# Override logs volume to local for easier access
|
||||
- ./logs:/app/logs
|
||||
ports:
|
||||
- "8003:8003"
|
||||
# Override command for development with reload
|
||||
command:
|
||||
[
|
||||
"main:app",
|
||||
"--host",
|
||||
"0.0.0.0",
|
||||
"--port",
|
||||
"8003",
|
||||
"--reload",
|
||||
"--workers",
|
||||
"1",
|
||||
]
|
||||
depends_on:
|
||||
- postgres
|
||||
restart: unless-stopped
|
||||
|
||||
postgres:
|
||||
environment:
|
||||
- POSTGRES_USER=calminer
|
||||
- POSTGRES_PASSWORD=calminer_password
|
||||
- POSTGRES_DB=calminer_db
|
||||
ports:
|
||||
- "5432:5432"
|
||||
volumes:
|
||||
- postgres_data:/var/lib/postgresql/data
|
||||
restart: unless-stopped
|
||||
|
||||
volumes:
|
||||
postgres_data:
|
||||
73
docker-compose.prod.yml
Normal file
73
docker-compose.prod.yml
Normal file
@@ -0,0 +1,73 @@
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
app:
|
||||
image: git.allucanget.biz/allucanget/calminer:latest
|
||||
environment:
|
||||
- ENVIRONMENT=production
|
||||
- DEBUG=false
|
||||
- LOG_LEVEL=WARNING
|
||||
# Database configuration - must be provided externally
|
||||
- DATABASE_HOST=${DATABASE_HOST}
|
||||
- DATABASE_PORT=${DATABASE_PORT:-5432}
|
||||
- DATABASE_USER=${DATABASE_USER}
|
||||
- DATABASE_PASSWORD=${DATABASE_PASSWORD}
|
||||
- DATABASE_NAME=${DATABASE_NAME}
|
||||
- DATABASE_DRIVER=postgresql
|
||||
# Production-specific settings
|
||||
- CALMINER_EXPORT_MAX_ROWS=100000
|
||||
- CALMINER_IMPORT_MAX_ROWS=100000
|
||||
- CALMINER_EXPORT_METADATA=true
|
||||
- CALMINER_IMPORT_STAGING_TTL=3600
|
||||
ports:
|
||||
- "8003:8003"
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
restart: unless-stopped
|
||||
# Production health checks
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8003/health"]
|
||||
interval: 60s
|
||||
timeout: 30s
|
||||
retries: 5
|
||||
start_period: 60s
|
||||
# Resource limits for production
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: "1.0"
|
||||
memory: 1G
|
||||
reservations:
|
||||
cpus: "0.5"
|
||||
memory: 512M
|
||||
|
||||
postgres:
|
||||
environment:
|
||||
- POSTGRES_USER=${DATABASE_USER}
|
||||
- POSTGRES_PASSWORD=${DATABASE_PASSWORD}
|
||||
- POSTGRES_DB=${DATABASE_NAME}
|
||||
ports:
|
||||
- "5432:5432"
|
||||
volumes:
|
||||
- postgres_data:/var/lib/postgresql/data
|
||||
restart: unless-stopped
|
||||
# Production postgres health check
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U ${DATABASE_USER} -d ${DATABASE_NAME}"]
|
||||
interval: 60s
|
||||
timeout: 30s
|
||||
retries: 5
|
||||
start_period: 60s
|
||||
# Resource limits for postgres
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: "1.0"
|
||||
memory: 2G
|
||||
reservations:
|
||||
cpus: "0.5"
|
||||
memory: 1G
|
||||
|
||||
volumes:
|
||||
postgres_data:
|
||||
62
docker-compose.staging.yml
Normal file
62
docker-compose.staging.yml
Normal file
@@ -0,0 +1,62 @@
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
app:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
APT_CACHE_URL: ${APT_CACHE_URL:-}
|
||||
environment:
|
||||
- ENVIRONMENT=staging
|
||||
- DEBUG=false
|
||||
- LOG_LEVEL=INFO
|
||||
# Database configuration - can be overridden by external env
|
||||
- DATABASE_HOST=${DATABASE_HOST:-postgres}
|
||||
- DATABASE_PORT=${DATABASE_PORT:-5432}
|
||||
- DATABASE_USER=${DATABASE_USER:-calminer}
|
||||
- DATABASE_PASSWORD=${DATABASE_PASSWORD}
|
||||
- DATABASE_NAME=${DATABASE_NAME:-calminer_db}
|
||||
- DATABASE_DRIVER=postgresql
|
||||
# Staging-specific settings
|
||||
- CALMINER_EXPORT_MAX_ROWS=50000
|
||||
- CALMINER_IMPORT_MAX_ROWS=50000
|
||||
- CALMINER_EXPORT_METADATA=true
|
||||
- CALMINER_IMPORT_STAGING_TTL=600
|
||||
ports:
|
||||
- "8003:8003"
|
||||
depends_on:
|
||||
- postgres
|
||||
restart: unless-stopped
|
||||
# Health check for staging
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8003/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
|
||||
postgres:
|
||||
environment:
|
||||
- POSTGRES_USER=${DATABASE_USER:-calminer}
|
||||
- POSTGRES_PASSWORD=${DATABASE_PASSWORD}
|
||||
- POSTGRES_DB=${DATABASE_NAME:-calminer_db}
|
||||
ports:
|
||||
- "5432:5432"
|
||||
volumes:
|
||||
- postgres_data:/var/lib/postgresql/data
|
||||
restart: unless-stopped
|
||||
# Health check for postgres
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
"pg_isready -U ${DATABASE_USER:-calminer} -d ${DATABASE_NAME:-calminer_db}",
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
|
||||
volumes:
|
||||
postgres_data:
|
||||
@@ -8,11 +8,13 @@ services:
|
||||
ports:
|
||||
- "8003:8003"
|
||||
environment:
|
||||
- DATABASE_HOST=postgres
|
||||
- DATABASE_PORT=5432
|
||||
- DATABASE_USER=calminer
|
||||
- DATABASE_PASSWORD=calminer_password
|
||||
- DATABASE_NAME=calminer_db
|
||||
# Environment-specific variables should be set in override files
|
||||
- ENVIRONMENT=${ENVIRONMENT:-production}
|
||||
- DATABASE_HOST=${DATABASE_HOST:-postgres}
|
||||
- DATABASE_PORT=${DATABASE_PORT:-5432}
|
||||
- DATABASE_USER=${DATABASE_USER}
|
||||
- DATABASE_PASSWORD=${DATABASE_PASSWORD}
|
||||
- DATABASE_NAME=${DATABASE_NAME}
|
||||
- DATABASE_DRIVER=postgresql
|
||||
depends_on:
|
||||
- postgres
|
||||
@@ -23,9 +25,9 @@ services:
|
||||
postgres:
|
||||
image: postgres:17
|
||||
environment:
|
||||
- POSTGRES_USER=calminer
|
||||
- POSTGRES_PASSWORD=calminer_password
|
||||
- POSTGRES_DB=calminer_db
|
||||
- POSTGRES_USER=${DATABASE_USER}
|
||||
- POSTGRES_PASSWORD=${DATABASE_PASSWORD}
|
||||
- POSTGRES_DB=${DATABASE_NAME}
|
||||
ports:
|
||||
- "5432:5432"
|
||||
volumes:
|
||||
|
||||
14
k8s/configmap.yaml
Normal file
14
k8s/configmap.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: calminer-config
|
||||
data:
|
||||
DATABASE_HOST: "calminer-db"
|
||||
DATABASE_PORT: "5432"
|
||||
DATABASE_USER: "calminer"
|
||||
DATABASE_NAME: "calminer_db"
|
||||
DATABASE_DRIVER: "postgresql"
|
||||
CALMINER_EXPORT_MAX_ROWS: "10000"
|
||||
CALMINER_EXPORT_METADATA: "true"
|
||||
CALMINER_IMPORT_STAGING_TTL: "300"
|
||||
CALMINER_IMPORT_MAX_ROWS: "50000"
|
||||
54
k8s/deployment.yaml
Normal file
54
k8s/deployment.yaml
Normal file
@@ -0,0 +1,54 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: calminer-app
|
||||
labels:
|
||||
app: calminer
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: calminer
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: calminer
|
||||
spec:
|
||||
containers:
|
||||
- name: calminer
|
||||
image: registry.example.com/calminer:latest
|
||||
ports:
|
||||
- containerPort: 8003
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
name: calminer-config
|
||||
- secretRef:
|
||||
name: calminer-secrets
|
||||
resources:
|
||||
requests:
|
||||
memory: "256Mi"
|
||||
cpu: "250m"
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "500m"
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8003
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8003
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
initContainers:
|
||||
- name: wait-for-db
|
||||
image: postgres:17
|
||||
command:
|
||||
[
|
||||
"sh",
|
||||
"-c",
|
||||
"until pg_isready -h calminer-db -p 5432; do echo waiting for database; sleep 2; done;",
|
||||
]
|
||||
18
k8s/ingress.yaml
Normal file
18
k8s/ingress.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: calminer-ingress
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /
|
||||
spec:
|
||||
rules:
|
||||
- host: calminer.example.com
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: calminer-service
|
||||
port:
|
||||
number: 80
|
||||
13
k8s/postgres-service.yaml
Normal file
13
k8s/postgres-service.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: calminer-db
|
||||
labels:
|
||||
app: calminer-db
|
||||
spec:
|
||||
selector:
|
||||
app: calminer-db
|
||||
ports:
|
||||
- port: 5432
|
||||
targetPort: 5432
|
||||
clusterIP: None # Headless service for StatefulSet
|
||||
48
k8s/postgres.yaml
Normal file
48
k8s/postgres.yaml
Normal file
@@ -0,0 +1,48 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: calminer-db
|
||||
spec:
|
||||
serviceName: calminer-db
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: calminer-db
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: calminer-db
|
||||
spec:
|
||||
containers:
|
||||
- name: postgres
|
||||
image: postgres:17
|
||||
ports:
|
||||
- containerPort: 5432
|
||||
env:
|
||||
- name: POSTGRES_USER
|
||||
value: "calminer"
|
||||
- name: POSTGRES_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: calminer-secrets
|
||||
key: DATABASE_PASSWORD
|
||||
- name: POSTGRES_DB
|
||||
value: "calminer_db"
|
||||
resources:
|
||||
requests:
|
||||
memory: "256Mi"
|
||||
cpu: "250m"
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "500m"
|
||||
volumeMounts:
|
||||
- name: postgres-storage
|
||||
mountPath: /var/lib/postgresql/data
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: postgres-storage
|
||||
spec:
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
8
k8s/secret.yaml
Normal file
8
k8s/secret.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: calminer-secrets
|
||||
type: Opaque
|
||||
data:
|
||||
DATABASE_PASSWORD: Y2FsbWluZXJfcGFzc3dvcmQ= # base64 encoded 'calminer_password'
|
||||
CALMINER_SEED_ADMIN_PASSWORD: Q2hhbmdlTWUxMjMh # base64 encoded 'ChangeMe123!'
|
||||
14
k8s/service.yaml
Normal file
14
k8s/service.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: calminer-service
|
||||
labels:
|
||||
app: calminer
|
||||
spec:
|
||||
selector:
|
||||
app: calminer
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8003
|
||||
protocol: TCP
|
||||
type: ClusterIP
|
||||
110
main.py
110
main.py
@@ -1,36 +1,90 @@
|
||||
import logging
|
||||
from contextlib import asynccontextmanager
|
||||
from typing import Awaitable, Callable
|
||||
|
||||
from fastapi import FastAPI, Request, Response
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
from fastapi.responses import FileResponse
|
||||
|
||||
from config.database import Base, engine
|
||||
from config.settings import get_settings
|
||||
from middleware.auth_session import AuthSessionMiddleware
|
||||
from middleware.metrics import MetricsMiddleware
|
||||
from middleware.validation import validate_json
|
||||
from models import (
|
||||
FinancialInput,
|
||||
Project,
|
||||
Scenario,
|
||||
SimulationParameter,
|
||||
)
|
||||
from routes.auth import router as auth_router
|
||||
from routes.dashboard import router as dashboard_router
|
||||
from routes.calculations import router as calculations_router
|
||||
from routes.imports import router as imports_router
|
||||
from routes.exports import router as exports_router
|
||||
from routes.projects import router as projects_router
|
||||
from routes.reports import router as reports_router
|
||||
from routes.scenarios import router as scenarios_router
|
||||
from services.bootstrap import bootstrap_admin
|
||||
|
||||
# Initialize database schema (imports above ensure models are registered)
|
||||
Base.metadata.create_all(bind=engine)
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
app.add_middleware(AuthSessionMiddleware)
|
||||
from routes.ui import router as ui_router
|
||||
from routes.navigation import router as navigation_router
|
||||
from monitoring import router as monitoring_router
|
||||
from services.bootstrap import bootstrap_admin, bootstrap_pricing_settings
|
||||
from scripts.init_db import init_db as init_db_script
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def _bootstrap_startup() -> None:
|
||||
settings = get_settings()
|
||||
admin_settings = settings.admin_bootstrap_settings()
|
||||
pricing_metadata = settings.pricing_metadata()
|
||||
try:
|
||||
try:
|
||||
init_db_script()
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"DB initializer failed; continuing to bootstrap (non-fatal)")
|
||||
|
||||
role_result, admin_result = bootstrap_admin(settings=admin_settings)
|
||||
pricing_result = bootstrap_pricing_settings(metadata=pricing_metadata)
|
||||
logger.info(
|
||||
"Admin bootstrap completed: roles=%s created=%s updated=%s rotated=%s assigned=%s",
|
||||
role_result.ensured,
|
||||
admin_result.created_user,
|
||||
admin_result.updated_user,
|
||||
admin_result.password_rotated,
|
||||
admin_result.roles_granted,
|
||||
)
|
||||
try:
|
||||
seed = pricing_result.seed
|
||||
slug = getattr(seed.settings, "slug", None) if seed and getattr(
|
||||
seed, "settings", None) else None
|
||||
created = getattr(seed, "created", None)
|
||||
updated_fields = getattr(seed, "updated_fields", None)
|
||||
impurity_upserts = getattr(seed, "impurity_upserts", None)
|
||||
logger.info(
|
||||
"Pricing settings bootstrap completed: slug=%s created=%s updated_fields=%s impurity_upserts=%s projects_assigned=%s",
|
||||
slug,
|
||||
created,
|
||||
updated_fields,
|
||||
impurity_upserts,
|
||||
pricing_result.projects_assigned,
|
||||
)
|
||||
except Exception:
|
||||
logger.info(
|
||||
"Pricing settings bootstrap completed (partial): projects_assigned=%s",
|
||||
pricing_result.projects_assigned,
|
||||
)
|
||||
except Exception: # pragma: no cover - defensive logging
|
||||
logger.exception(
|
||||
"Failed to bootstrap administrator or pricing settings")
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def app_lifespan(_: FastAPI):
|
||||
await _bootstrap_startup()
|
||||
yield
|
||||
|
||||
|
||||
app = FastAPI(lifespan=app_lifespan)
|
||||
|
||||
app.add_middleware(AuthSessionMiddleware)
|
||||
app.add_middleware(MetricsMiddleware)
|
||||
|
||||
|
||||
@app.middleware("http")
|
||||
async def json_validation(
|
||||
request: Request, call_next: Callable[[Request], Awaitable[Response]]
|
||||
@@ -43,27 +97,23 @@ async def health() -> dict[str, str]:
|
||||
return {"status": "ok"}
|
||||
|
||||
|
||||
@app.on_event("startup")
|
||||
async def ensure_admin_bootstrap() -> None:
|
||||
settings = get_settings().admin_bootstrap_settings()
|
||||
try:
|
||||
role_result, admin_result = bootstrap_admin(settings=settings)
|
||||
logger.info(
|
||||
"Admin bootstrap completed: roles=%s created=%s updated=%s rotated=%s assigned=%s",
|
||||
role_result.ensured,
|
||||
admin_result.created_user,
|
||||
admin_result.updated_user,
|
||||
admin_result.password_rotated,
|
||||
admin_result.roles_granted,
|
||||
)
|
||||
except Exception: # pragma: no cover - defensive logging
|
||||
logger.exception("Failed to bootstrap administrator account")
|
||||
@app.get("/favicon.ico", include_in_schema=False)
|
||||
async def favicon() -> Response:
|
||||
static_directory = "static"
|
||||
favicon_img = "favicon.ico"
|
||||
return FileResponse(f"{static_directory}/{favicon_img}")
|
||||
|
||||
|
||||
app.include_router(dashboard_router)
|
||||
app.include_router(calculations_router)
|
||||
app.include_router(auth_router)
|
||||
app.include_router(imports_router)
|
||||
app.include_router(exports_router)
|
||||
app.include_router(projects_router)
|
||||
app.include_router(scenarios_router)
|
||||
app.include_router(reports_router)
|
||||
app.include_router(ui_router)
|
||||
app.include_router(monitoring_router)
|
||||
app.include_router(navigation_router)
|
||||
|
||||
app.mount("/static", StaticFiles(directory="static"), name="static")
|
||||
|
||||
@@ -8,7 +8,9 @@ from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoin
|
||||
from starlette.types import ASGIApp
|
||||
|
||||
from config.settings import Settings, get_settings
|
||||
from sqlalchemy.orm.exc import DetachedInstanceError
|
||||
from models import User
|
||||
from monitoring.metrics import ACTIVE_CONNECTIONS
|
||||
from services.exceptions import EntityNotFoundError
|
||||
from services.security import (
|
||||
JWTSettings,
|
||||
@@ -45,6 +47,8 @@ class _ResolutionResult:
|
||||
class AuthSessionMiddleware(BaseHTTPMiddleware):
|
||||
"""Resolve authenticated users from session cookies and refresh tokens."""
|
||||
|
||||
_active_sessions: int = 0
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
app: ASGIApp,
|
||||
@@ -61,9 +65,44 @@ class AuthSessionMiddleware(BaseHTTPMiddleware):
|
||||
|
||||
async def dispatch(self, request: Request, call_next: RequestResponseEndpoint) -> Response:
|
||||
resolved = self._resolve_session(request)
|
||||
|
||||
# Track active sessions for authenticated users
|
||||
try:
|
||||
user_active = bool(resolved.session.user and getattr(
|
||||
resolved.session.user, "is_active", False))
|
||||
except DetachedInstanceError:
|
||||
user_active = False
|
||||
|
||||
if user_active:
|
||||
AuthSessionMiddleware._active_sessions += 1
|
||||
ACTIVE_CONNECTIONS.set(AuthSessionMiddleware._active_sessions)
|
||||
|
||||
response: Response | None = None
|
||||
try:
|
||||
response = await call_next(request)
|
||||
self._apply_session(response, resolved)
|
||||
return response
|
||||
finally:
|
||||
# Always decrement the active sessions counter if we incremented it.
|
||||
if user_active:
|
||||
AuthSessionMiddleware._active_sessions = max(
|
||||
0, AuthSessionMiddleware._active_sessions - 1)
|
||||
ACTIVE_CONNECTIONS.set(AuthSessionMiddleware._active_sessions)
|
||||
|
||||
# Only apply session cookies if a response was produced by downstream
|
||||
# application. If an exception occurred before a response was created
|
||||
# we avoid raising another error here.
|
||||
import logging
|
||||
if response is not None:
|
||||
try:
|
||||
self._apply_session(response, resolved)
|
||||
except Exception:
|
||||
logging.getLogger(__name__).exception(
|
||||
"Failed to apply session cookies to response"
|
||||
)
|
||||
else:
|
||||
logging.getLogger(__name__).debug(
|
||||
"AuthSessionMiddleware: no response produced by downstream app (response is None)"
|
||||
)
|
||||
|
||||
def _resolve_session(self, request: Request) -> _ResolutionResult:
|
||||
settings = self._settings_provider()
|
||||
@@ -106,6 +145,7 @@ class AuthSessionMiddleware(BaseHTTPMiddleware):
|
||||
|
||||
session.user = user
|
||||
session.scopes = tuple(payload.scopes)
|
||||
session.set_role_slugs(role.name for role in getattr(user, "roles", []) if role)
|
||||
return True
|
||||
|
||||
def _try_refresh_token(
|
||||
@@ -127,6 +167,7 @@ class AuthSessionMiddleware(BaseHTTPMiddleware):
|
||||
|
||||
session.user = user
|
||||
session.scopes = tuple(payload.scopes)
|
||||
session.set_role_slugs(role.name for role in getattr(user, "roles", []) if role)
|
||||
|
||||
access_token = create_access_token(
|
||||
str(user.id),
|
||||
|
||||
58
middleware/metrics.py
Normal file
58
middleware/metrics.py
Normal file
@@ -0,0 +1,58 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
from typing import Callable
|
||||
|
||||
from fastapi import Request, Response
|
||||
from starlette.middleware.base import BaseHTTPMiddleware
|
||||
|
||||
from monitoring.metrics import observe_request
|
||||
from services.metrics import get_metrics_service
|
||||
|
||||
|
||||
class MetricsMiddleware(BaseHTTPMiddleware):
|
||||
async def dispatch(self, request: Request, call_next: Callable[[Request], Response]) -> Response:
|
||||
start_time = time.time()
|
||||
response = await call_next(request)
|
||||
process_time = time.time() - start_time
|
||||
|
||||
observe_request(
|
||||
method=request.method,
|
||||
endpoint=request.url.path,
|
||||
status=response.status_code,
|
||||
seconds=process_time,
|
||||
)
|
||||
|
||||
# Store in database asynchronously
|
||||
background_tasks = getattr(request.state, "background_tasks", None)
|
||||
if background_tasks:
|
||||
background_tasks.add_task(
|
||||
store_request_metric,
|
||||
method=request.method,
|
||||
endpoint=request.url.path,
|
||||
status_code=response.status_code,
|
||||
duration_seconds=process_time,
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
async def store_request_metric(
|
||||
method: str, endpoint: str, status_code: int, duration_seconds: float
|
||||
) -> None:
|
||||
"""Store request metric in database."""
|
||||
try:
|
||||
service = get_metrics_service()
|
||||
service.store_metric(
|
||||
metric_name="http_request",
|
||||
value=duration_seconds,
|
||||
labels={"method": method, "endpoint": endpoint,
|
||||
"status": status_code},
|
||||
endpoint=endpoint,
|
||||
method=method,
|
||||
status_code=status_code,
|
||||
duration_seconds=duration_seconds,
|
||||
)
|
||||
except Exception:
|
||||
# Log error but don't fail the request
|
||||
pass
|
||||
@@ -10,10 +10,14 @@ async def validate_json(
|
||||
) -> Response:
|
||||
# Only validate JSON for requests with a body
|
||||
if request.method in ("POST", "PUT", "PATCH"):
|
||||
# Only attempt JSON parsing when the client indicates a JSON content type.
|
||||
content_type = (request.headers.get("content-type") or "").lower()
|
||||
if "json" in content_type:
|
||||
try:
|
||||
# attempt to parse json body
|
||||
await request.json()
|
||||
except Exception:
|
||||
raise HTTPException(status_code=400, detail="Invalid JSON payload")
|
||||
raise HTTPException(
|
||||
status_code=400, detail="Invalid JSON payload")
|
||||
response = await call_next(request)
|
||||
return response
|
||||
|
||||
@@ -1,27 +1,56 @@
|
||||
"""Database models and shared metadata for the CalMiner domain."""
|
||||
|
||||
from .financial_input import FinancialCategory, FinancialInput
|
||||
from .financial_input import FinancialInput
|
||||
from .metadata import (
|
||||
COST_BUCKET_METADATA,
|
||||
RESOURCE_METADATA,
|
||||
STOCHASTIC_VARIABLE_METADATA,
|
||||
CostBucket,
|
||||
ResourceDescriptor,
|
||||
ResourceType,
|
||||
StochasticVariable,
|
||||
StochasticVariableDescriptor,
|
||||
)
|
||||
from .project import MiningOperationType, Project
|
||||
from .scenario import Scenario, ScenarioStatus
|
||||
from .simulation_parameter import DistributionType, SimulationParameter
|
||||
from .performance_metric import PerformanceMetric
|
||||
from .pricing_settings import (
|
||||
PricingImpuritySettings,
|
||||
PricingMetalSettings,
|
||||
PricingSettings,
|
||||
)
|
||||
from .enums import (
|
||||
CostBucket,
|
||||
DistributionType,
|
||||
FinancialCategory,
|
||||
MiningOperationType,
|
||||
ResourceType,
|
||||
ScenarioStatus,
|
||||
StochasticVariable,
|
||||
)
|
||||
from .project import Project
|
||||
from .scenario import Scenario
|
||||
from .simulation_parameter import SimulationParameter
|
||||
from .user import Role, User, UserRole, password_context
|
||||
from .navigation import NavigationGroup, NavigationLink
|
||||
|
||||
from .profitability_snapshot import ProjectProfitability, ScenarioProfitability
|
||||
from .capex_snapshot import ProjectCapexSnapshot, ScenarioCapexSnapshot
|
||||
from .opex_snapshot import (
|
||||
ProjectOpexSnapshot,
|
||||
ScenarioOpexSnapshot,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"FinancialCategory",
|
||||
"FinancialInput",
|
||||
"MiningOperationType",
|
||||
"Project",
|
||||
"ProjectProfitability",
|
||||
"ProjectCapexSnapshot",
|
||||
"ProjectOpexSnapshot",
|
||||
"PricingSettings",
|
||||
"PricingMetalSettings",
|
||||
"PricingImpuritySettings",
|
||||
"Scenario",
|
||||
"ScenarioProfitability",
|
||||
"ScenarioCapexSnapshot",
|
||||
"ScenarioOpexSnapshot",
|
||||
"ScenarioStatus",
|
||||
"DistributionType",
|
||||
"SimulationParameter",
|
||||
@@ -37,4 +66,7 @@ __all__ = [
|
||||
"Role",
|
||||
"UserRole",
|
||||
"password_context",
|
||||
"PerformanceMetric",
|
||||
"NavigationGroup",
|
||||
"NavigationLink",
|
||||
]
|
||||
|
||||
111
models/capex_snapshot.py
Normal file
111
models/capex_snapshot.py
Normal file
@@ -0,0 +1,111 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import datetime
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from sqlalchemy import JSON, DateTime, ForeignKey, Integer, Numeric, String
|
||||
from sqlalchemy.orm import Mapped, mapped_column, relationship
|
||||
from sqlalchemy.sql import func
|
||||
|
||||
from config.database import Base
|
||||
|
||||
if TYPE_CHECKING: # pragma: no cover
|
||||
from .project import Project
|
||||
from .scenario import Scenario
|
||||
from .user import User
|
||||
|
||||
|
||||
class ProjectCapexSnapshot(Base):
|
||||
"""Snapshot of aggregated capex metrics at the project level."""
|
||||
|
||||
__tablename__ = "project_capex_snapshots"
|
||||
|
||||
id: Mapped[int] = mapped_column(Integer, primary_key=True)
|
||||
project_id: Mapped[int] = mapped_column(
|
||||
ForeignKey("projects.id", ondelete="CASCADE"), nullable=False, index=True
|
||||
)
|
||||
created_by_id: Mapped[int | None] = mapped_column(
|
||||
ForeignKey("users.id", ondelete="SET NULL"), nullable=True, index=True
|
||||
)
|
||||
calculation_source: Mapped[str | None] = mapped_column(
|
||||
String(64), nullable=True)
|
||||
calculated_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||
)
|
||||
currency_code: Mapped[str | None] = mapped_column(String(3), nullable=True)
|
||||
total_capex: Mapped[float | None] = mapped_column(
|
||||
Numeric(18, 2), nullable=True)
|
||||
contingency_pct: Mapped[float | None] = mapped_column(
|
||||
Numeric(12, 6), nullable=True)
|
||||
contingency_amount: Mapped[float | None] = mapped_column(
|
||||
Numeric(18, 2), nullable=True)
|
||||
total_with_contingency: Mapped[float | None] = mapped_column(
|
||||
Numeric(18, 2), nullable=True)
|
||||
component_count: Mapped[int | None] = mapped_column(Integer, nullable=True)
|
||||
payload: Mapped[dict | None] = mapped_column(JSON, nullable=True)
|
||||
created_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||
)
|
||||
updated_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()
|
||||
)
|
||||
|
||||
project: Mapped[Project] = relationship(
|
||||
"Project", back_populates="capex_snapshots"
|
||||
)
|
||||
created_by: Mapped[User | None] = relationship("User")
|
||||
|
||||
def __repr__(self) -> str: # pragma: no cover
|
||||
return (
|
||||
"ProjectCapexSnapshot(id={id!r}, project_id={project_id!r}, total_capex={total_capex!r})".format(
|
||||
id=self.id, project_id=self.project_id, total_capex=self.total_capex
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class ScenarioCapexSnapshot(Base):
|
||||
"""Snapshot of capex metrics for an individual scenario."""
|
||||
|
||||
__tablename__ = "scenario_capex_snapshots"
|
||||
|
||||
id: Mapped[int] = mapped_column(Integer, primary_key=True)
|
||||
scenario_id: Mapped[int] = mapped_column(
|
||||
ForeignKey("scenarios.id", ondelete="CASCADE"), nullable=False, index=True
|
||||
)
|
||||
created_by_id: Mapped[int | None] = mapped_column(
|
||||
ForeignKey("users.id", ondelete="SET NULL"), nullable=True, index=True
|
||||
)
|
||||
calculation_source: Mapped[str | None] = mapped_column(
|
||||
String(64), nullable=True)
|
||||
calculated_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||
)
|
||||
currency_code: Mapped[str | None] = mapped_column(String(3), nullable=True)
|
||||
total_capex: Mapped[float | None] = mapped_column(
|
||||
Numeric(18, 2), nullable=True)
|
||||
contingency_pct: Mapped[float | None] = mapped_column(
|
||||
Numeric(12, 6), nullable=True)
|
||||
contingency_amount: Mapped[float | None] = mapped_column(
|
||||
Numeric(18, 2), nullable=True)
|
||||
total_with_contingency: Mapped[float | None] = mapped_column(
|
||||
Numeric(18, 2), nullable=True)
|
||||
component_count: Mapped[int | None] = mapped_column(Integer, nullable=True)
|
||||
payload: Mapped[dict | None] = mapped_column(JSON, nullable=True)
|
||||
created_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||
)
|
||||
updated_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()
|
||||
)
|
||||
|
||||
scenario: Mapped[Scenario] = relationship(
|
||||
"Scenario", back_populates="capex_snapshots"
|
||||
)
|
||||
created_by: Mapped[User | None] = relationship("User")
|
||||
|
||||
def __repr__(self) -> str: # pragma: no cover
|
||||
return (
|
||||
"ScenarioCapexSnapshot(id={id!r}, scenario_id={scenario_id!r}, total_capex={total_capex!r})".format(
|
||||
id=self.id, scenario_id=self.scenario_id, total_capex=self.total_capex
|
||||
)
|
||||
)
|
||||
96
models/enums.py
Normal file
96
models/enums.py
Normal file
@@ -0,0 +1,96 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from enum import Enum
|
||||
from typing import Type
|
||||
|
||||
from sqlalchemy import Enum as SQLEnum
|
||||
|
||||
|
||||
def sql_enum(enum_cls: Type[Enum], *, name: str) -> SQLEnum:
|
||||
"""Build a SQLAlchemy Enum that maps using the enum member values."""
|
||||
|
||||
return SQLEnum(
|
||||
enum_cls,
|
||||
name=name,
|
||||
create_type=False,
|
||||
validate_strings=True,
|
||||
values_callable=lambda enum_cls: [member.value for member in enum_cls],
|
||||
)
|
||||
|
||||
|
||||
class MiningOperationType(str, Enum):
|
||||
"""Supported mining operation categories."""
|
||||
|
||||
OPEN_PIT = "open_pit"
|
||||
UNDERGROUND = "underground"
|
||||
IN_SITU_LEACH = "in_situ_leach"
|
||||
PLACER = "placer"
|
||||
QUARRY = "quarry"
|
||||
MOUNTAINTOP_REMOVAL = "mountaintop_removal"
|
||||
OTHER = "other"
|
||||
|
||||
|
||||
class ScenarioStatus(str, Enum):
|
||||
"""Lifecycle states for project scenarios."""
|
||||
|
||||
DRAFT = "draft"
|
||||
ACTIVE = "active"
|
||||
ARCHIVED = "archived"
|
||||
|
||||
|
||||
class FinancialCategory(str, Enum):
|
||||
"""Enumeration of cost and revenue classifications."""
|
||||
|
||||
CAPITAL_EXPENDITURE = "capex"
|
||||
OPERATING_EXPENDITURE = "opex"
|
||||
REVENUE = "revenue"
|
||||
CONTINGENCY = "contingency"
|
||||
OTHER = "other"
|
||||
|
||||
|
||||
class DistributionType(str, Enum):
|
||||
"""Supported stochastic distribution families for simulations."""
|
||||
|
||||
NORMAL = "normal"
|
||||
TRIANGULAR = "triangular"
|
||||
UNIFORM = "uniform"
|
||||
LOGNORMAL = "lognormal"
|
||||
CUSTOM = "custom"
|
||||
|
||||
|
||||
class ResourceType(str, Enum):
|
||||
"""Primary consumables and resources used in mining operations."""
|
||||
|
||||
DIESEL = "diesel"
|
||||
ELECTRICITY = "electricity"
|
||||
WATER = "water"
|
||||
EXPLOSIVES = "explosives"
|
||||
REAGENTS = "reagents"
|
||||
LABOR = "labor"
|
||||
EQUIPMENT_HOURS = "equipment_hours"
|
||||
TAILINGS_CAPACITY = "tailings_capacity"
|
||||
|
||||
|
||||
class CostBucket(str, Enum):
|
||||
"""Granular cost buckets aligned with project accounting."""
|
||||
|
||||
CAPITAL_INITIAL = "capital_initial"
|
||||
CAPITAL_SUSTAINING = "capital_sustaining"
|
||||
OPERATING_FIXED = "operating_fixed"
|
||||
OPERATING_VARIABLE = "operating_variable"
|
||||
MAINTENANCE = "maintenance"
|
||||
RECLAMATION = "reclamation"
|
||||
ROYALTIES = "royalties"
|
||||
GENERAL_ADMIN = "general_admin"
|
||||
|
||||
|
||||
class StochasticVariable(str, Enum):
|
||||
"""Domain variables that typically require probabilistic modelling."""
|
||||
|
||||
ORE_GRADE = "ore_grade"
|
||||
RECOVERY_RATE = "recovery_rate"
|
||||
METAL_PRICE = "metal_price"
|
||||
OPERATING_COST = "operating_cost"
|
||||
CAPITAL_COST = "capital_cost"
|
||||
DISCOUNT_RATE = "discount_rate"
|
||||
THROUGHPUT = "throughput"
|
||||
@@ -1,13 +1,11 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import date, datetime
|
||||
from enum import Enum
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from sqlalchemy import (
|
||||
Date,
|
||||
DateTime,
|
||||
Enum as SQLEnum,
|
||||
ForeignKey,
|
||||
Integer,
|
||||
Numeric,
|
||||
@@ -16,36 +14,16 @@ from sqlalchemy import (
|
||||
)
|
||||
from sqlalchemy.orm import Mapped, mapped_column, relationship, validates
|
||||
|
||||
from sqlalchemy import (
|
||||
Date,
|
||||
DateTime,
|
||||
Enum as SQLEnum,
|
||||
ForeignKey,
|
||||
Integer,
|
||||
Numeric,
|
||||
String,
|
||||
Text,
|
||||
)
|
||||
from sqlalchemy.orm import Mapped, mapped_column, relationship
|
||||
from sqlalchemy.sql import func
|
||||
|
||||
from config.database import Base
|
||||
from .metadata import CostBucket
|
||||
from .enums import CostBucket, FinancialCategory, sql_enum
|
||||
from services.currency import normalise_currency
|
||||
|
||||
if TYPE_CHECKING: # pragma: no cover
|
||||
from .scenario import Scenario
|
||||
|
||||
|
||||
class FinancialCategory(str, Enum):
|
||||
"""Enumeration of cost and revenue classifications."""
|
||||
|
||||
CAPITAL_EXPENDITURE = "capex"
|
||||
OPERATING_EXPENDITURE = "opex"
|
||||
REVENUE = "revenue"
|
||||
CONTINGENCY = "contingency"
|
||||
OTHER = "other"
|
||||
|
||||
|
||||
class FinancialInput(Base):
|
||||
"""Line-item financial assumption attached to a scenario."""
|
||||
|
||||
@@ -57,10 +35,10 @@ class FinancialInput(Base):
|
||||
)
|
||||
name: Mapped[str] = mapped_column(String(255), nullable=False)
|
||||
category: Mapped[FinancialCategory] = mapped_column(
|
||||
SQLEnum(FinancialCategory), nullable=False
|
||||
sql_enum(FinancialCategory, name="financialcategory"), nullable=False
|
||||
)
|
||||
cost_bucket: Mapped[CostBucket | None] = mapped_column(
|
||||
SQLEnum(CostBucket), nullable=True
|
||||
sql_enum(CostBucket, name="costbucket"), nullable=True
|
||||
)
|
||||
amount: Mapped[float] = mapped_column(Numeric(18, 2), nullable=False)
|
||||
currency: Mapped[str | None] = mapped_column(String(3), nullable=True)
|
||||
@@ -73,16 +51,12 @@ class FinancialInput(Base):
|
||||
DateTime(timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()
|
||||
)
|
||||
|
||||
scenario: Mapped["Scenario"] = relationship("Scenario", back_populates="financial_inputs")
|
||||
scenario: Mapped["Scenario"] = relationship(
|
||||
"Scenario", back_populates="financial_inputs")
|
||||
|
||||
@validates("currency")
|
||||
def _validate_currency(self, key: str, value: str | None) -> str | None:
|
||||
if value is None:
|
||||
return value
|
||||
value = value.upper()
|
||||
if len(value) != 3:
|
||||
raise ValueError("Currency code must be a 3-letter ISO 4217 value")
|
||||
return value
|
||||
return normalise_currency(value)
|
||||
|
||||
def __repr__(self) -> str: # pragma: no cover
|
||||
return f"FinancialInput(id={self.id!r}, scenario_id={self.scenario_id!r}, name={self.name!r})"
|
||||
|
||||
31
models/import_export_log.py
Normal file
31
models/import_export_log.py
Normal file
@@ -0,0 +1,31 @@
|
||||
from __future__ import annotations
|
||||
|
||||
|
||||
from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text
|
||||
from sqlalchemy.sql import func
|
||||
|
||||
from config.database import Base
|
||||
|
||||
|
||||
class ImportExportLog(Base):
|
||||
"""Audit log for import and export operations."""
|
||||
|
||||
__tablename__ = "import_export_logs"
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
action = Column(String(32), nullable=False) # preview, commit, export
|
||||
dataset = Column(String(32), nullable=False) # projects, scenarios, etc.
|
||||
status = Column(String(16), nullable=False) # success, failure
|
||||
filename = Column(String(255), nullable=True)
|
||||
row_count = Column(Integer, nullable=True)
|
||||
detail = Column(Text, nullable=True)
|
||||
user_id = Column(Integer, ForeignKey("users.id"), nullable=True)
|
||||
created_at = Column(
|
||||
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||
)
|
||||
|
||||
def __repr__(self) -> str: # pragma: no cover
|
||||
return (
|
||||
f"ImportExportLog(id={self.id}, action={self.action}, "
|
||||
f"dataset={self.dataset}, status={self.status})"
|
||||
)
|
||||
@@ -1,45 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class ResourceType(str, Enum):
|
||||
"""Primary consumables and resources used in mining operations."""
|
||||
|
||||
DIESEL = "diesel"
|
||||
ELECTRICITY = "electricity"
|
||||
WATER = "water"
|
||||
EXPLOSIVES = "explosives"
|
||||
REAGENTS = "reagents"
|
||||
LABOR = "labor"
|
||||
EQUIPMENT_HOURS = "equipment_hours"
|
||||
TAILINGS_CAPACITY = "tailings_capacity"
|
||||
|
||||
|
||||
class CostBucket(str, Enum):
|
||||
"""Granular cost buckets aligned with project accounting."""
|
||||
|
||||
CAPITAL_INITIAL = "capital_initial"
|
||||
CAPITAL_SUSTAINING = "capital_sustaining"
|
||||
OPERATING_FIXED = "operating_fixed"
|
||||
OPERATING_VARIABLE = "operating_variable"
|
||||
MAINTENANCE = "maintenance"
|
||||
RECLAMATION = "reclamation"
|
||||
ROYALTIES = "royalties"
|
||||
GENERAL_ADMIN = "general_admin"
|
||||
|
||||
|
||||
class StochasticVariable(str, Enum):
|
||||
"""Domain variables that typically require probabilistic modelling."""
|
||||
|
||||
ORE_GRADE = "ore_grade"
|
||||
RECOVERY_RATE = "recovery_rate"
|
||||
METAL_PRICE = "metal_price"
|
||||
OPERATING_COST = "operating_cost"
|
||||
CAPITAL_COST = "capital_cost"
|
||||
DISCOUNT_RATE = "discount_rate"
|
||||
THROUGHPUT = "throughput"
|
||||
from .enums import ResourceType, CostBucket, StochasticVariable
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
|
||||
125
models/navigation.py
Normal file
125
models/navigation.py
Normal file
@@ -0,0 +1,125 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import datetime
|
||||
from typing import List, Optional
|
||||
|
||||
from sqlalchemy import (
|
||||
Boolean,
|
||||
CheckConstraint,
|
||||
DateTime,
|
||||
ForeignKey,
|
||||
Index,
|
||||
Integer,
|
||||
String,
|
||||
UniqueConstraint,
|
||||
)
|
||||
from sqlalchemy.orm import Mapped, mapped_column, relationship
|
||||
from sqlalchemy.sql import func
|
||||
from sqlalchemy.ext.mutable import MutableList
|
||||
from sqlalchemy import JSON
|
||||
|
||||
from config.database import Base
|
||||
|
||||
|
||||
class NavigationGroup(Base):
|
||||
__tablename__ = "navigation_groups"
|
||||
__table_args__ = (
|
||||
UniqueConstraint("slug", name="uq_navigation_groups_slug"),
|
||||
Index("ix_navigation_groups_sort_order", "sort_order"),
|
||||
)
|
||||
|
||||
id: Mapped[int] = mapped_column(Integer, primary_key=True)
|
||||
slug: Mapped[str] = mapped_column(String(64), nullable=False)
|
||||
label: Mapped[str] = mapped_column(String(128), nullable=False)
|
||||
sort_order: Mapped[int] = mapped_column(
|
||||
Integer, nullable=False, default=100)
|
||||
icon: Mapped[Optional[str]] = mapped_column(String(64))
|
||||
tooltip: Mapped[Optional[str]] = mapped_column(String(255))
|
||||
is_enabled: Mapped[bool] = mapped_column(
|
||||
Boolean, nullable=False, default=True)
|
||||
created_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||
)
|
||||
updated_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()
|
||||
)
|
||||
|
||||
links: Mapped[List["NavigationLink"]] = relationship(
|
||||
"NavigationLink",
|
||||
back_populates="group",
|
||||
cascade="all, delete-orphan",
|
||||
order_by="NavigationLink.sort_order",
|
||||
)
|
||||
|
||||
def __repr__(self) -> str: # pragma: no cover
|
||||
return f"NavigationGroup(id={self.id!r}, slug={self.slug!r})"
|
||||
|
||||
|
||||
class NavigationLink(Base):
|
||||
__tablename__ = "navigation_links"
|
||||
__table_args__ = (
|
||||
UniqueConstraint("group_id", "slug",
|
||||
name="uq_navigation_links_group_slug"),
|
||||
Index("ix_navigation_links_group_sort", "group_id", "sort_order"),
|
||||
Index("ix_navigation_links_parent_sort",
|
||||
"parent_link_id", "sort_order"),
|
||||
CheckConstraint(
|
||||
"(route_name IS NOT NULL) OR (href_override IS NOT NULL)",
|
||||
name="ck_navigation_links_route_or_href",
|
||||
),
|
||||
)
|
||||
|
||||
id: Mapped[int] = mapped_column(Integer, primary_key=True)
|
||||
group_id: Mapped[int] = mapped_column(
|
||||
ForeignKey("navigation_groups.id", ondelete="CASCADE"), nullable=False
|
||||
)
|
||||
parent_link_id: Mapped[Optional[int]] = mapped_column(
|
||||
ForeignKey("navigation_links.id", ondelete="CASCADE")
|
||||
)
|
||||
slug: Mapped[str] = mapped_column(String(64), nullable=False)
|
||||
label: Mapped[str] = mapped_column(String(128), nullable=False)
|
||||
route_name: Mapped[Optional[str]] = mapped_column(String(128))
|
||||
href_override: Mapped[Optional[str]] = mapped_column(String(512))
|
||||
match_prefix: Mapped[Optional[str]] = mapped_column(String(512))
|
||||
sort_order: Mapped[int] = mapped_column(
|
||||
Integer, nullable=False, default=100)
|
||||
icon: Mapped[Optional[str]] = mapped_column(String(64))
|
||||
tooltip: Mapped[Optional[str]] = mapped_column(String(255))
|
||||
required_roles: Mapped[list[str]] = mapped_column(
|
||||
MutableList.as_mutable(JSON), nullable=False, default=list
|
||||
)
|
||||
is_enabled: Mapped[bool] = mapped_column(
|
||||
Boolean, nullable=False, default=True)
|
||||
is_external: Mapped[bool] = mapped_column(
|
||||
Boolean, nullable=False, default=False)
|
||||
created_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||
)
|
||||
updated_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()
|
||||
)
|
||||
|
||||
group: Mapped[NavigationGroup] = relationship(
|
||||
NavigationGroup,
|
||||
back_populates="links",
|
||||
)
|
||||
parent: Mapped[Optional["NavigationLink"]] = relationship(
|
||||
"NavigationLink",
|
||||
remote_side="NavigationLink.id",
|
||||
back_populates="children",
|
||||
)
|
||||
children: Mapped[List["NavigationLink"]] = relationship(
|
||||
"NavigationLink",
|
||||
back_populates="parent",
|
||||
cascade="all, delete-orphan",
|
||||
order_by="NavigationLink.sort_order",
|
||||
)
|
||||
|
||||
def is_visible_for_roles(self, roles: list[str]) -> bool:
|
||||
if not self.required_roles:
|
||||
return True
|
||||
role_set = set(roles)
|
||||
return any(role in role_set for role in self.required_roles)
|
||||
|
||||
def __repr__(self) -> str: # pragma: no cover
|
||||
return f"NavigationLink(id={self.id!r}, slug={self.slug!r})"
|
||||
123
models/opex_snapshot.py
Normal file
123
models/opex_snapshot.py
Normal file
@@ -0,0 +1,123 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import datetime
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from sqlalchemy import JSON, Boolean, DateTime, ForeignKey, Integer, Numeric, String
|
||||
from sqlalchemy.orm import Mapped, mapped_column, relationship
|
||||
from sqlalchemy.sql import func
|
||||
|
||||
from config.database import Base
|
||||
|
||||
if TYPE_CHECKING: # pragma: no cover
|
||||
from .project import Project
|
||||
from .scenario import Scenario
|
||||
from .user import User
|
||||
|
||||
|
||||
class ProjectOpexSnapshot(Base):
|
||||
"""Snapshot of recurring opex metrics at the project level."""
|
||||
|
||||
__tablename__ = "project_opex_snapshots"
|
||||
|
||||
id: Mapped[int] = mapped_column(Integer, primary_key=True)
|
||||
project_id: Mapped[int] = mapped_column(
|
||||
ForeignKey("projects.id", ondelete="CASCADE"), nullable=False, index=True
|
||||
)
|
||||
created_by_id: Mapped[int | None] = mapped_column(
|
||||
ForeignKey("users.id", ondelete="SET NULL"), nullable=True, index=True
|
||||
)
|
||||
calculation_source: Mapped[str | None] = mapped_column(
|
||||
String(64), nullable=True)
|
||||
calculated_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||
)
|
||||
currency_code: Mapped[str | None] = mapped_column(String(3), nullable=True)
|
||||
overall_annual: Mapped[float | None] = mapped_column(
|
||||
Numeric(18, 2), nullable=True)
|
||||
escalated_total: Mapped[float | None] = mapped_column(
|
||||
Numeric(18, 2), nullable=True)
|
||||
annual_average: Mapped[float | None] = mapped_column(
|
||||
Numeric(18, 2), nullable=True)
|
||||
evaluation_horizon_years: Mapped[int | None] = mapped_column(
|
||||
Integer, nullable=True)
|
||||
escalation_pct: Mapped[float | None] = mapped_column(
|
||||
Numeric(12, 6), nullable=True)
|
||||
apply_escalation: Mapped[bool] = mapped_column(
|
||||
Boolean, nullable=False, default=True)
|
||||
component_count: Mapped[int | None] = mapped_column(Integer, nullable=True)
|
||||
payload: Mapped[dict | None] = mapped_column(JSON, nullable=True)
|
||||
created_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||
)
|
||||
updated_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()
|
||||
)
|
||||
|
||||
project: Mapped[Project] = relationship(
|
||||
"Project", back_populates="opex_snapshots"
|
||||
)
|
||||
created_by: Mapped[User | None] = relationship("User")
|
||||
|
||||
def __repr__(self) -> str: # pragma: no cover
|
||||
return (
|
||||
"ProjectOpexSnapshot(id={id!r}, project_id={project_id!r}, overall_annual={overall_annual!r})".format(
|
||||
id=self.id,
|
||||
project_id=self.project_id,
|
||||
overall_annual=self.overall_annual,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class ScenarioOpexSnapshot(Base):
|
||||
"""Snapshot of opex metrics for an individual scenario."""
|
||||
|
||||
__tablename__ = "scenario_opex_snapshots"
|
||||
|
||||
id: Mapped[int] = mapped_column(Integer, primary_key=True)
|
||||
scenario_id: Mapped[int] = mapped_column(
|
||||
ForeignKey("scenarios.id", ondelete="CASCADE"), nullable=False, index=True
|
||||
)
|
||||
created_by_id: Mapped[int | None] = mapped_column(
|
||||
ForeignKey("users.id", ondelete="SET NULL"), nullable=True, index=True
|
||||
)
|
||||
calculation_source: Mapped[str | None] = mapped_column(
|
||||
String(64), nullable=True)
|
||||
calculated_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||
)
|
||||
currency_code: Mapped[str | None] = mapped_column(String(3), nullable=True)
|
||||
overall_annual: Mapped[float | None] = mapped_column(
|
||||
Numeric(18, 2), nullable=True)
|
||||
escalated_total: Mapped[float | None] = mapped_column(
|
||||
Numeric(18, 2), nullable=True)
|
||||
annual_average: Mapped[float | None] = mapped_column(
|
||||
Numeric(18, 2), nullable=True)
|
||||
evaluation_horizon_years: Mapped[int | None] = mapped_column(
|
||||
Integer, nullable=True)
|
||||
escalation_pct: Mapped[float | None] = mapped_column(
|
||||
Numeric(12, 6), nullable=True)
|
||||
apply_escalation: Mapped[bool] = mapped_column(
|
||||
Boolean, nullable=False, default=True)
|
||||
component_count: Mapped[int | None] = mapped_column(Integer, nullable=True)
|
||||
payload: Mapped[dict | None] = mapped_column(JSON, nullable=True)
|
||||
created_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||
)
|
||||
updated_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()
|
||||
)
|
||||
|
||||
scenario: Mapped[Scenario] = relationship(
|
||||
"Scenario", back_populates="opex_snapshots"
|
||||
)
|
||||
created_by: Mapped[User | None] = relationship("User")
|
||||
|
||||
def __repr__(self) -> str: # pragma: no cover
|
||||
return (
|
||||
"ScenarioOpexSnapshot(id={id!r}, scenario_id={scenario_id!r}, overall_annual={overall_annual!r})".format(
|
||||
id=self.id,
|
||||
scenario_id=self.scenario_id,
|
||||
overall_annual=self.overall_annual,
|
||||
)
|
||||
)
|
||||
24
models/performance_metric.py
Normal file
24
models/performance_metric.py
Normal file
@@ -0,0 +1,24 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
from sqlalchemy import Column, DateTime, Float, Integer, String
|
||||
|
||||
from config.database import Base
|
||||
|
||||
|
||||
class PerformanceMetric(Base):
|
||||
__tablename__ = "performance_metrics"
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
timestamp = Column(DateTime, default=datetime.utcnow, index=True)
|
||||
metric_name = Column(String, index=True)
|
||||
value = Column(Float)
|
||||
labels = Column(String) # JSON string of labels
|
||||
endpoint = Column(String, index=True, nullable=True)
|
||||
method = Column(String, nullable=True)
|
||||
status_code = Column(Integer, nullable=True)
|
||||
duration_seconds = Column(Float, nullable=True)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<PerformanceMetric(id={self.id}, name={self.metric_name}, value={self.value})>"
|
||||
176
models/pricing_settings.py
Normal file
176
models/pricing_settings.py
Normal file
@@ -0,0 +1,176 @@
|
||||
"""Database models for persisted pricing configuration settings."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import datetime
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from sqlalchemy import (
|
||||
JSON,
|
||||
DateTime,
|
||||
ForeignKey,
|
||||
Integer,
|
||||
Numeric,
|
||||
String,
|
||||
Text,
|
||||
UniqueConstraint,
|
||||
)
|
||||
from sqlalchemy.orm import Mapped, mapped_column, relationship, validates
|
||||
from sqlalchemy.sql import func
|
||||
|
||||
from config.database import Base
|
||||
from services.currency import normalise_currency
|
||||
|
||||
if TYPE_CHECKING: # pragma: no cover
|
||||
from .project import Project
|
||||
|
||||
|
||||
class PricingSettings(Base):
|
||||
"""Persisted pricing defaults applied to scenario evaluations."""
|
||||
|
||||
__tablename__ = "pricing_settings"
|
||||
|
||||
id: Mapped[int] = mapped_column(Integer, primary_key=True)
|
||||
name: Mapped[str] = mapped_column(String(128), nullable=False, unique=True)
|
||||
slug: Mapped[str] = mapped_column(String(64), nullable=False, unique=True)
|
||||
description: Mapped[str | None] = mapped_column(Text, nullable=True)
|
||||
default_currency: Mapped[str | None] = mapped_column(
|
||||
String(3), nullable=True)
|
||||
default_payable_pct: Mapped[float] = mapped_column(
|
||||
Numeric(5, 2), nullable=False, default=100.0
|
||||
)
|
||||
moisture_threshold_pct: Mapped[float] = mapped_column(
|
||||
Numeric(5, 2), nullable=False, default=8.0
|
||||
)
|
||||
moisture_penalty_per_pct: Mapped[float] = mapped_column(
|
||||
Numeric(14, 4), nullable=False, default=0.0
|
||||
)
|
||||
metadata_payload: Mapped[dict | None] = mapped_column(
|
||||
"metadata", JSON, nullable=True
|
||||
)
|
||||
created_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||
)
|
||||
updated_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()
|
||||
)
|
||||
|
||||
metal_overrides: Mapped[list["PricingMetalSettings"]] = relationship(
|
||||
"PricingMetalSettings",
|
||||
back_populates="pricing_settings",
|
||||
cascade="all, delete-orphan",
|
||||
passive_deletes=True,
|
||||
)
|
||||
impurity_overrides: Mapped[list["PricingImpuritySettings"]] = relationship(
|
||||
"PricingImpuritySettings",
|
||||
back_populates="pricing_settings",
|
||||
cascade="all, delete-orphan",
|
||||
passive_deletes=True,
|
||||
)
|
||||
projects: Mapped[list["Project"]] = relationship(
|
||||
"Project",
|
||||
back_populates="pricing_settings",
|
||||
cascade="all",
|
||||
)
|
||||
|
||||
@validates("slug")
|
||||
def _normalise_slug(self, key: str, value: str) -> str:
|
||||
return value.strip().lower()
|
||||
|
||||
@validates("default_currency")
|
||||
def _validate_currency(self, key: str, value: str | None) -> str | None:
|
||||
return normalise_currency(value)
|
||||
|
||||
def __repr__(self) -> str: # pragma: no cover
|
||||
return f"PricingSettings(id={self.id!r}, slug={self.slug!r})"
|
||||
|
||||
|
||||
class PricingMetalSettings(Base):
|
||||
"""Contract-specific overrides for a particular metal."""
|
||||
|
||||
__tablename__ = "pricing_metal_settings"
|
||||
__table_args__ = (
|
||||
UniqueConstraint(
|
||||
"pricing_settings_id", "metal_code", name="uq_pricing_metal_settings_code"
|
||||
),
|
||||
)
|
||||
|
||||
id: Mapped[int] = mapped_column(Integer, primary_key=True)
|
||||
pricing_settings_id: Mapped[int] = mapped_column(
|
||||
ForeignKey("pricing_settings.id", ondelete="CASCADE"), nullable=False, index=True
|
||||
)
|
||||
metal_code: Mapped[str] = mapped_column(String(32), nullable=False)
|
||||
payable_pct: Mapped[float | None] = mapped_column(
|
||||
Numeric(5, 2), nullable=True)
|
||||
moisture_threshold_pct: Mapped[float | None] = mapped_column(
|
||||
Numeric(5, 2), nullable=True)
|
||||
moisture_penalty_per_pct: Mapped[float | None] = mapped_column(
|
||||
Numeric(14, 4), nullable=True
|
||||
)
|
||||
data: Mapped[dict | None] = mapped_column(JSON, nullable=True)
|
||||
created_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||
)
|
||||
updated_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()
|
||||
)
|
||||
|
||||
pricing_settings: Mapped["PricingSettings"] = relationship(
|
||||
"PricingSettings", back_populates="metal_overrides"
|
||||
)
|
||||
|
||||
@validates("metal_code")
|
||||
def _normalise_metal_code(self, key: str, value: str) -> str:
|
||||
return value.strip().lower()
|
||||
|
||||
def __repr__(self) -> str: # pragma: no cover
|
||||
return (
|
||||
"PricingMetalSettings(" # noqa: ISC001
|
||||
f"id={self.id!r}, pricing_settings_id={self.pricing_settings_id!r}, "
|
||||
f"metal_code={self.metal_code!r})"
|
||||
)
|
||||
|
||||
|
||||
class PricingImpuritySettings(Base):
|
||||
"""Impurity penalty thresholds associated with pricing settings."""
|
||||
|
||||
__tablename__ = "pricing_impurity_settings"
|
||||
__table_args__ = (
|
||||
UniqueConstraint(
|
||||
"pricing_settings_id",
|
||||
"impurity_code",
|
||||
name="uq_pricing_impurity_settings_code",
|
||||
),
|
||||
)
|
||||
|
||||
id: Mapped[int] = mapped_column(Integer, primary_key=True)
|
||||
pricing_settings_id: Mapped[int] = mapped_column(
|
||||
ForeignKey("pricing_settings.id", ondelete="CASCADE"), nullable=False, index=True
|
||||
)
|
||||
impurity_code: Mapped[str] = mapped_column(String(32), nullable=False)
|
||||
threshold_ppm: Mapped[float] = mapped_column(
|
||||
Numeric(14, 4), nullable=False, default=0.0)
|
||||
penalty_per_ppm: Mapped[float] = mapped_column(
|
||||
Numeric(14, 4), nullable=False, default=0.0)
|
||||
notes: Mapped[str | None] = mapped_column(Text, nullable=True)
|
||||
created_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||
)
|
||||
updated_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()
|
||||
)
|
||||
|
||||
pricing_settings: Mapped["PricingSettings"] = relationship(
|
||||
"PricingSettings", back_populates="impurity_overrides"
|
||||
)
|
||||
|
||||
@validates("impurity_code")
|
||||
def _normalise_impurity_code(self, key: str, value: str) -> str:
|
||||
return value.strip().upper()
|
||||
|
||||
def __repr__(self) -> str: # pragma: no cover
|
||||
return (
|
||||
"PricingImpuritySettings(" # noqa: ISC001
|
||||
f"id={self.id!r}, pricing_settings_id={self.pricing_settings_id!r}, "
|
||||
f"impurity_code={self.impurity_code!r})"
|
||||
)
|
||||
133
models/profitability_snapshot.py
Normal file
133
models/profitability_snapshot.py
Normal file
@@ -0,0 +1,133 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import datetime
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from sqlalchemy import JSON, DateTime, ForeignKey, Integer, Numeric, String
|
||||
from sqlalchemy.orm import Mapped, mapped_column, relationship
|
||||
from sqlalchemy.sql import func
|
||||
|
||||
from config.database import Base
|
||||
|
||||
if TYPE_CHECKING: # pragma: no cover
|
||||
from .project import Project
|
||||
from .scenario import Scenario
|
||||
from .user import User
|
||||
|
||||
|
||||
class ProjectProfitability(Base):
|
||||
"""Snapshot of aggregated profitability metrics at the project level."""
|
||||
|
||||
__tablename__ = "project_profitability_snapshots"
|
||||
|
||||
id: Mapped[int] = mapped_column(Integer, primary_key=True)
|
||||
project_id: Mapped[int] = mapped_column(
|
||||
ForeignKey("projects.id", ondelete="CASCADE"), nullable=False, index=True
|
||||
)
|
||||
created_by_id: Mapped[int | None] = mapped_column(
|
||||
ForeignKey("users.id", ondelete="SET NULL"), nullable=True, index=True
|
||||
)
|
||||
calculation_source: Mapped[str | None] = mapped_column(
|
||||
String(64), nullable=True)
|
||||
calculated_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||
)
|
||||
currency_code: Mapped[str | None] = mapped_column(String(3), nullable=True)
|
||||
npv: Mapped[float | None] = mapped_column(Numeric(18, 2), nullable=True)
|
||||
irr_pct: Mapped[float | None] = mapped_column(
|
||||
Numeric(12, 6), nullable=True)
|
||||
payback_period_years: Mapped[float | None] = mapped_column(
|
||||
Numeric(12, 4), nullable=True
|
||||
)
|
||||
margin_pct: Mapped[float | None] = mapped_column(
|
||||
Numeric(12, 6), nullable=True)
|
||||
revenue_total: Mapped[float | None] = mapped_column(
|
||||
Numeric(18, 2), nullable=True)
|
||||
opex_total: Mapped[float | None] = mapped_column(
|
||||
Numeric(18, 2), nullable=True
|
||||
)
|
||||
sustaining_capex_total: Mapped[float | None] = mapped_column(
|
||||
Numeric(18, 2), nullable=True
|
||||
)
|
||||
capex: Mapped[float | None] = mapped_column(
|
||||
Numeric(18, 2), nullable=True)
|
||||
net_cash_flow_total: Mapped[float | None] = mapped_column(
|
||||
Numeric(18, 2), nullable=True
|
||||
)
|
||||
payload: Mapped[dict | None] = mapped_column(JSON, nullable=True)
|
||||
created_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||
)
|
||||
updated_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()
|
||||
)
|
||||
|
||||
project: Mapped[Project] = relationship(
|
||||
"Project", back_populates="profitability_snapshots")
|
||||
created_by: Mapped[User | None] = relationship("User")
|
||||
|
||||
def __repr__(self) -> str: # pragma: no cover
|
||||
return (
|
||||
"ProjectProfitability(id={id!r}, project_id={project_id!r}, npv={npv!r})".format(
|
||||
id=self.id, project_id=self.project_id, npv=self.npv
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class ScenarioProfitability(Base):
|
||||
"""Snapshot of profitability metrics for an individual scenario."""
|
||||
|
||||
__tablename__ = "scenario_profitability_snapshots"
|
||||
|
||||
id: Mapped[int] = mapped_column(Integer, primary_key=True)
|
||||
scenario_id: Mapped[int] = mapped_column(
|
||||
ForeignKey("scenarios.id", ondelete="CASCADE"), nullable=False, index=True
|
||||
)
|
||||
created_by_id: Mapped[int | None] = mapped_column(
|
||||
ForeignKey("users.id", ondelete="SET NULL"), nullable=True, index=True
|
||||
)
|
||||
calculation_source: Mapped[str | None] = mapped_column(
|
||||
String(64), nullable=True)
|
||||
calculated_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||
)
|
||||
currency_code: Mapped[str | None] = mapped_column(String(3), nullable=True)
|
||||
npv: Mapped[float | None] = mapped_column(Numeric(18, 2), nullable=True)
|
||||
irr_pct: Mapped[float | None] = mapped_column(
|
||||
Numeric(12, 6), nullable=True)
|
||||
payback_period_years: Mapped[float | None] = mapped_column(
|
||||
Numeric(12, 4), nullable=True
|
||||
)
|
||||
margin_pct: Mapped[float | None] = mapped_column(
|
||||
Numeric(12, 6), nullable=True)
|
||||
revenue_total: Mapped[float | None] = mapped_column(
|
||||
Numeric(18, 2), nullable=True)
|
||||
opex_total: Mapped[float | None] = mapped_column(
|
||||
Numeric(18, 2), nullable=True
|
||||
)
|
||||
sustaining_capex_total: Mapped[float | None] = mapped_column(
|
||||
Numeric(18, 2), nullable=True
|
||||
)
|
||||
capex: Mapped[float | None] = mapped_column(
|
||||
Numeric(18, 2), nullable=True)
|
||||
net_cash_flow_total: Mapped[float | None] = mapped_column(
|
||||
Numeric(18, 2), nullable=True
|
||||
)
|
||||
payload: Mapped[dict | None] = mapped_column(JSON, nullable=True)
|
||||
created_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||
)
|
||||
updated_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()
|
||||
)
|
||||
|
||||
scenario: Mapped[Scenario] = relationship(
|
||||
"Scenario", back_populates="profitability_snapshots")
|
||||
created_by: Mapped[User | None] = relationship("User")
|
||||
|
||||
def __repr__(self) -> str: # pragma: no cover
|
||||
return (
|
||||
"ScenarioProfitability(id={id!r}, scenario_id={scenario_id!r}, npv={npv!r})".format(
|
||||
id=self.id, scenario_id=self.scenario_id, npv=self.npv
|
||||
)
|
||||
)
|
||||
@@ -1,10 +1,14 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
from typing import TYPE_CHECKING, List
|
||||
|
||||
from sqlalchemy import DateTime, Enum as SQLEnum, Integer, String, Text
|
||||
from .enums import MiningOperationType, sql_enum
|
||||
from .profitability_snapshot import ProjectProfitability
|
||||
from .capex_snapshot import ProjectCapexSnapshot
|
||||
from .opex_snapshot import ProjectOpexSnapshot
|
||||
|
||||
from sqlalchemy import DateTime, ForeignKey, Integer, String, Text
|
||||
from sqlalchemy.orm import Mapped, mapped_column, relationship
|
||||
from sqlalchemy.sql import func
|
||||
|
||||
@@ -12,18 +16,7 @@ from config.database import Base
|
||||
|
||||
if TYPE_CHECKING: # pragma: no cover
|
||||
from .scenario import Scenario
|
||||
|
||||
|
||||
class MiningOperationType(str, Enum):
|
||||
"""Supported mining operation categories."""
|
||||
|
||||
OPEN_PIT = "open_pit"
|
||||
UNDERGROUND = "underground"
|
||||
IN_SITU_LEACH = "in_situ_leach"
|
||||
PLACER = "placer"
|
||||
QUARRY = "quarry"
|
||||
MOUNTAINTOP_REMOVAL = "mountaintop_removal"
|
||||
OTHER = "other"
|
||||
from .pricing_settings import PricingSettings
|
||||
|
||||
|
||||
class Project(Base):
|
||||
@@ -35,9 +28,15 @@ class Project(Base):
|
||||
name: Mapped[str] = mapped_column(String(255), nullable=False, unique=True)
|
||||
location: Mapped[str | None] = mapped_column(String(255), nullable=True)
|
||||
operation_type: Mapped[MiningOperationType] = mapped_column(
|
||||
SQLEnum(MiningOperationType), nullable=False, default=MiningOperationType.OTHER
|
||||
sql_enum(MiningOperationType, name="miningoperationtype"),
|
||||
nullable=False,
|
||||
default=MiningOperationType.OTHER,
|
||||
)
|
||||
description: Mapped[str | None] = mapped_column(Text, nullable=True)
|
||||
pricing_settings_id: Mapped[int | None] = mapped_column(
|
||||
ForeignKey("pricing_settings.id", ondelete="SET NULL"),
|
||||
nullable=True,
|
||||
)
|
||||
created_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||
)
|
||||
@@ -51,6 +50,55 @@ class Project(Base):
|
||||
cascade="all, delete-orphan",
|
||||
passive_deletes=True,
|
||||
)
|
||||
pricing_settings: Mapped["PricingSettings | None"] = relationship(
|
||||
"PricingSettings",
|
||||
back_populates="projects",
|
||||
)
|
||||
profitability_snapshots: Mapped[List["ProjectProfitability"]] = relationship(
|
||||
"ProjectProfitability",
|
||||
back_populates="project",
|
||||
cascade="all, delete-orphan",
|
||||
order_by=lambda: ProjectProfitability.calculated_at.desc(),
|
||||
passive_deletes=True,
|
||||
)
|
||||
capex_snapshots: Mapped[List["ProjectCapexSnapshot"]] = relationship(
|
||||
"ProjectCapexSnapshot",
|
||||
back_populates="project",
|
||||
cascade="all, delete-orphan",
|
||||
order_by=lambda: ProjectCapexSnapshot.calculated_at.desc(),
|
||||
passive_deletes=True,
|
||||
)
|
||||
opex_snapshots: Mapped[List["ProjectOpexSnapshot"]] = relationship(
|
||||
"ProjectOpexSnapshot",
|
||||
back_populates="project",
|
||||
cascade="all, delete-orphan",
|
||||
order_by=lambda: ProjectOpexSnapshot.calculated_at.desc(),
|
||||
passive_deletes=True,
|
||||
)
|
||||
|
||||
@property
|
||||
def latest_profitability(self) -> "ProjectProfitability | None":
|
||||
"""Return the most recent profitability snapshot, if any."""
|
||||
|
||||
if not self.profitability_snapshots:
|
||||
return None
|
||||
return self.profitability_snapshots[0]
|
||||
|
||||
@property
|
||||
def latest_capex(self) -> "ProjectCapexSnapshot | None":
|
||||
"""Return the most recent capex snapshot, if any."""
|
||||
|
||||
if not self.capex_snapshots:
|
||||
return None
|
||||
return self.capex_snapshots[0]
|
||||
|
||||
@property
|
||||
def latest_opex(self) -> "ProjectOpexSnapshot | None":
|
||||
"""Return the most recent opex snapshot, if any."""
|
||||
|
||||
if not self.opex_snapshots:
|
||||
return None
|
||||
return self.opex_snapshots[0]
|
||||
|
||||
def __repr__(self) -> str: # pragma: no cover - helpful for debugging
|
||||
return f"Project(id={self.id!r}, name={self.name!r})"
|
||||
|
||||
@@ -1,24 +1,27 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import date, datetime
|
||||
from enum import Enum
|
||||
from typing import TYPE_CHECKING, List
|
||||
|
||||
from sqlalchemy import (
|
||||
Date,
|
||||
DateTime,
|
||||
Enum as SQLEnum,
|
||||
ForeignKey,
|
||||
Integer,
|
||||
Numeric,
|
||||
String,
|
||||
Text,
|
||||
UniqueConstraint,
|
||||
)
|
||||
from sqlalchemy.orm import Mapped, mapped_column, relationship
|
||||
from sqlalchemy.orm import Mapped, mapped_column, relationship, validates
|
||||
from sqlalchemy.sql import func
|
||||
|
||||
from config.database import Base
|
||||
from .metadata import ResourceType
|
||||
from services.currency import normalise_currency
|
||||
from .enums import ResourceType, ScenarioStatus, sql_enum
|
||||
from .profitability_snapshot import ScenarioProfitability
|
||||
from .capex_snapshot import ScenarioCapexSnapshot
|
||||
from .opex_snapshot import ScenarioOpexSnapshot
|
||||
|
||||
if TYPE_CHECKING: # pragma: no cover
|
||||
from .financial_input import FinancialInput
|
||||
@@ -26,18 +29,14 @@ if TYPE_CHECKING: # pragma: no cover
|
||||
from .simulation_parameter import SimulationParameter
|
||||
|
||||
|
||||
class ScenarioStatus(str, Enum):
|
||||
"""Lifecycle states for project scenarios."""
|
||||
|
||||
DRAFT = "draft"
|
||||
ACTIVE = "active"
|
||||
ARCHIVED = "archived"
|
||||
|
||||
|
||||
class Scenario(Base):
|
||||
"""A specific configuration of assumptions for a project."""
|
||||
|
||||
__tablename__ = "scenarios"
|
||||
__table_args__ = (
|
||||
UniqueConstraint("project_id", "name",
|
||||
name="uq_scenarios_project_name"),
|
||||
)
|
||||
|
||||
id: Mapped[int] = mapped_column(Integer, primary_key=True, index=True)
|
||||
project_id: Mapped[int] = mapped_column(
|
||||
@@ -46,14 +45,17 @@ class Scenario(Base):
|
||||
name: Mapped[str] = mapped_column(String(255), nullable=False)
|
||||
description: Mapped[str | None] = mapped_column(Text, nullable=True)
|
||||
status: Mapped[ScenarioStatus] = mapped_column(
|
||||
SQLEnum(ScenarioStatus), nullable=False, default=ScenarioStatus.DRAFT
|
||||
sql_enum(ScenarioStatus, name="scenariostatus"),
|
||||
nullable=False,
|
||||
default=ScenarioStatus.DRAFT,
|
||||
)
|
||||
start_date: Mapped[date | None] = mapped_column(Date, nullable=True)
|
||||
end_date: Mapped[date | None] = mapped_column(Date, nullable=True)
|
||||
discount_rate: Mapped[float | None] = mapped_column(Numeric(5, 2), nullable=True)
|
||||
discount_rate: Mapped[float | None] = mapped_column(
|
||||
Numeric(5, 2), nullable=True)
|
||||
currency: Mapped[str | None] = mapped_column(String(3), nullable=True)
|
||||
primary_resource: Mapped[ResourceType | None] = mapped_column(
|
||||
SQLEnum(ResourceType), nullable=True
|
||||
sql_enum(ResourceType, name="resourcetype"), nullable=True
|
||||
)
|
||||
created_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||
@@ -62,7 +64,8 @@ class Scenario(Base):
|
||||
DateTime(timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()
|
||||
)
|
||||
|
||||
project: Mapped["Project"] = relationship("Project", back_populates="scenarios")
|
||||
project: Mapped["Project"] = relationship(
|
||||
"Project", back_populates="scenarios")
|
||||
financial_inputs: Mapped[List["FinancialInput"]] = relationship(
|
||||
"FinancialInput",
|
||||
back_populates="scenario",
|
||||
@@ -75,6 +78,56 @@ class Scenario(Base):
|
||||
cascade="all, delete-orphan",
|
||||
passive_deletes=True,
|
||||
)
|
||||
profitability_snapshots: Mapped[List["ScenarioProfitability"]] = relationship(
|
||||
"ScenarioProfitability",
|
||||
back_populates="scenario",
|
||||
cascade="all, delete-orphan",
|
||||
order_by=lambda: ScenarioProfitability.calculated_at.desc(),
|
||||
passive_deletes=True,
|
||||
)
|
||||
capex_snapshots: Mapped[List["ScenarioCapexSnapshot"]] = relationship(
|
||||
"ScenarioCapexSnapshot",
|
||||
back_populates="scenario",
|
||||
cascade="all, delete-orphan",
|
||||
order_by=lambda: ScenarioCapexSnapshot.calculated_at.desc(),
|
||||
passive_deletes=True,
|
||||
)
|
||||
opex_snapshots: Mapped[List["ScenarioOpexSnapshot"]] = relationship(
|
||||
"ScenarioOpexSnapshot",
|
||||
back_populates="scenario",
|
||||
cascade="all, delete-orphan",
|
||||
order_by=lambda: ScenarioOpexSnapshot.calculated_at.desc(),
|
||||
passive_deletes=True,
|
||||
)
|
||||
|
||||
@validates("currency")
|
||||
def _normalise_currency(self, key: str, value: str | None) -> str | None:
|
||||
# Normalise to uppercase ISO-4217; raises when the code is malformed.
|
||||
return normalise_currency(value)
|
||||
|
||||
def __repr__(self) -> str: # pragma: no cover
|
||||
return f"Scenario(id={self.id!r}, name={self.name!r}, project_id={self.project_id!r})"
|
||||
|
||||
@property
|
||||
def latest_profitability(self) -> "ScenarioProfitability | None":
|
||||
"""Return the most recent profitability snapshot for this scenario."""
|
||||
|
||||
if not self.profitability_snapshots:
|
||||
return None
|
||||
return self.profitability_snapshots[0]
|
||||
|
||||
@property
|
||||
def latest_capex(self) -> "ScenarioCapexSnapshot | None":
|
||||
"""Return the most recent capex snapshot for this scenario."""
|
||||
|
||||
if not self.capex_snapshots:
|
||||
return None
|
||||
return self.capex_snapshots[0]
|
||||
|
||||
@property
|
||||
def latest_opex(self) -> "ScenarioOpexSnapshot | None":
|
||||
"""Return the most recent opex snapshot for this scenario."""
|
||||
|
||||
if not self.opex_snapshots:
|
||||
return None
|
||||
return self.opex_snapshots[0]
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from .enums import DistributionType, ResourceType, StochasticVariable, sql_enum
|
||||
|
||||
from sqlalchemy import (
|
||||
JSON,
|
||||
DateTime,
|
||||
Enum as SQLEnum,
|
||||
ForeignKey,
|
||||
Integer,
|
||||
Numeric,
|
||||
@@ -17,22 +17,11 @@ from sqlalchemy.orm import Mapped, mapped_column, relationship
|
||||
from sqlalchemy.sql import func
|
||||
|
||||
from config.database import Base
|
||||
from .metadata import ResourceType, StochasticVariable
|
||||
|
||||
if TYPE_CHECKING: # pragma: no cover
|
||||
from .scenario import Scenario
|
||||
|
||||
|
||||
class DistributionType(str, Enum):
|
||||
"""Supported stochastic distribution families for simulations."""
|
||||
|
||||
NORMAL = "normal"
|
||||
TRIANGULAR = "triangular"
|
||||
UNIFORM = "uniform"
|
||||
LOGNORMAL = "lognormal"
|
||||
CUSTOM = "custom"
|
||||
|
||||
|
||||
class SimulationParameter(Base):
|
||||
"""Probability distribution settings for scenario simulations."""
|
||||
|
||||
@@ -44,13 +33,13 @@ class SimulationParameter(Base):
|
||||
)
|
||||
name: Mapped[str] = mapped_column(String(255), nullable=False)
|
||||
distribution: Mapped[DistributionType] = mapped_column(
|
||||
SQLEnum(DistributionType), nullable=False
|
||||
sql_enum(DistributionType, name="distributiontype"), nullable=False
|
||||
)
|
||||
variable: Mapped[StochasticVariable | None] = mapped_column(
|
||||
SQLEnum(StochasticVariable), nullable=True
|
||||
sql_enum(StochasticVariable, name="stochasticvariable"), nullable=True
|
||||
)
|
||||
resource_type: Mapped[ResourceType | None] = mapped_column(
|
||||
SQLEnum(ResourceType), nullable=True
|
||||
sql_enum(ResourceType, name="resourcetype"), nullable=True
|
||||
)
|
||||
mean_value: Mapped[float | None] = mapped_column(
|
||||
Numeric(18, 4), nullable=True)
|
||||
|
||||
117
monitoring/__init__.py
Normal file
117
monitoring/__init__.py
Normal file
@@ -0,0 +1,117 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Optional
|
||||
|
||||
from fastapi import APIRouter, Depends, Query, Response
|
||||
from prometheus_client import CONTENT_TYPE_LATEST, generate_latest
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from config.database import get_db
|
||||
from services.metrics import MetricsService
|
||||
|
||||
|
||||
router = APIRouter(prefix="/metrics", tags=["monitoring"])
|
||||
|
||||
|
||||
@router.get("", summary="Prometheus metrics endpoint", include_in_schema=False)
|
||||
async def metrics_endpoint() -> Response:
|
||||
payload = generate_latest()
|
||||
return Response(content=payload, media_type=CONTENT_TYPE_LATEST)
|
||||
|
||||
|
||||
@router.get("/performance", summary="Get performance metrics")
|
||||
async def get_performance_metrics(
|
||||
metric_name: Optional[str] = Query(
|
||||
None, description="Filter by metric name"),
|
||||
hours: int = Query(24, description="Hours back to look"),
|
||||
db: Session = Depends(get_db),
|
||||
) -> dict:
|
||||
"""Get aggregated performance metrics."""
|
||||
service = MetricsService(db)
|
||||
start_time = datetime.utcnow() - timedelta(hours=hours)
|
||||
|
||||
if metric_name:
|
||||
metrics = service.get_metrics(
|
||||
metric_name=metric_name, start_time=start_time)
|
||||
aggregated = service.get_aggregated_metrics(
|
||||
metric_name, start_time=start_time)
|
||||
return {
|
||||
"metric_name": metric_name,
|
||||
"period_hours": hours,
|
||||
"aggregated": aggregated,
|
||||
"recent_samples": [
|
||||
{
|
||||
"timestamp": m.timestamp.isoformat(),
|
||||
"value": m.value,
|
||||
"labels": m.labels,
|
||||
"endpoint": m.endpoint,
|
||||
"method": m.method,
|
||||
"status_code": m.status_code,
|
||||
"duration_seconds": m.duration_seconds,
|
||||
}
|
||||
for m in metrics[:50] # Last 50 samples
|
||||
],
|
||||
}
|
||||
|
||||
# Return summary for all metrics
|
||||
all_metrics = service.get_metrics(start_time=start_time, limit=1000)
|
||||
metric_types = {}
|
||||
for m in all_metrics:
|
||||
if m.metric_name not in metric_types:
|
||||
metric_types[m.metric_name] = []
|
||||
metric_types[m.metric_name].append(m.value)
|
||||
|
||||
summary = {}
|
||||
for name, values in metric_types.items():
|
||||
summary[name] = {
|
||||
"count": len(values),
|
||||
"avg": sum(values) / len(values) if values else 0,
|
||||
"min": min(values) if values else 0,
|
||||
"max": max(values) if values else 0,
|
||||
}
|
||||
|
||||
return {
|
||||
"period_hours": hours,
|
||||
"summary": summary,
|
||||
}
|
||||
|
||||
|
||||
@router.get("/health", summary="Detailed health check with metrics")
|
||||
async def detailed_health(db: Session = Depends(get_db)) -> dict:
|
||||
"""Get detailed health status with recent metrics."""
|
||||
service = MetricsService(db)
|
||||
last_hour = datetime.utcnow() - timedelta(hours=1)
|
||||
|
||||
# Get request metrics from last hour
|
||||
request_metrics = service.get_metrics(
|
||||
metric_name="http_request", start_time=last_hour
|
||||
)
|
||||
|
||||
if request_metrics:
|
||||
durations = []
|
||||
error_count = 0
|
||||
for m in request_metrics:
|
||||
if m.duration_seconds is not None:
|
||||
durations.append(m.duration_seconds)
|
||||
if m.status_code is not None:
|
||||
if m.status_code >= 400:
|
||||
error_count += 1
|
||||
total_requests = len(request_metrics)
|
||||
|
||||
avg_duration = sum(durations) / len(durations) if durations else 0
|
||||
error_rate = error_count / total_requests if total_requests > 0 else 0
|
||||
else:
|
||||
avg_duration = 0
|
||||
error_rate = 0
|
||||
total_requests = 0
|
||||
|
||||
return {
|
||||
"status": "ok",
|
||||
"timestamp": datetime.utcnow().isoformat(),
|
||||
"metrics": {
|
||||
"requests_last_hour": total_requests,
|
||||
"avg_response_time_seconds": avg_duration,
|
||||
"error_rate": error_rate,
|
||||
},
|
||||
}
|
||||
108
monitoring/metrics.py
Normal file
108
monitoring/metrics.py
Normal file
@@ -0,0 +1,108 @@
|
||||
from __future__ import annotations
|
||||
|
||||
|
||||
from prometheus_client import Counter, Histogram, Gauge
|
||||
|
||||
IMPORT_DURATION = Histogram(
|
||||
"calminer_import_duration_seconds",
|
||||
"Duration of import preview and commit operations",
|
||||
labelnames=("dataset", "action", "status"),
|
||||
)
|
||||
|
||||
IMPORT_TOTAL = Counter(
|
||||
"calminer_import_total",
|
||||
"Count of import operations",
|
||||
labelnames=("dataset", "action", "status"),
|
||||
)
|
||||
|
||||
EXPORT_DURATION = Histogram(
|
||||
"calminer_export_duration_seconds",
|
||||
"Duration of export operations",
|
||||
labelnames=("dataset", "status", "format"),
|
||||
)
|
||||
|
||||
EXPORT_TOTAL = Counter(
|
||||
"calminer_export_total",
|
||||
"Count of export operations",
|
||||
labelnames=("dataset", "status", "format"),
|
||||
)
|
||||
|
||||
# General performance metrics
|
||||
REQUEST_DURATION = Histogram(
|
||||
"calminer_request_duration_seconds",
|
||||
"Duration of HTTP requests",
|
||||
labelnames=("method", "endpoint", "status"),
|
||||
)
|
||||
|
||||
REQUEST_TOTAL = Counter(
|
||||
"calminer_request_total",
|
||||
"Count of HTTP requests",
|
||||
labelnames=("method", "endpoint", "status"),
|
||||
)
|
||||
|
||||
ACTIVE_CONNECTIONS = Gauge(
|
||||
"calminer_active_connections",
|
||||
"Number of active connections",
|
||||
)
|
||||
|
||||
DB_CONNECTIONS = Gauge(
|
||||
"calminer_db_connections",
|
||||
"Number of database connections",
|
||||
)
|
||||
|
||||
# Business metrics
|
||||
PROJECT_OPERATIONS = Counter(
|
||||
"calminer_project_operations_total",
|
||||
"Count of project operations",
|
||||
labelnames=("operation", "status"),
|
||||
)
|
||||
|
||||
SCENARIO_OPERATIONS = Counter(
|
||||
"calminer_scenario_operations_total",
|
||||
"Count of scenario operations",
|
||||
labelnames=("operation", "status"),
|
||||
)
|
||||
|
||||
SIMULATION_RUNS = Counter(
|
||||
"calminer_simulation_runs_total",
|
||||
"Count of Monte Carlo simulation runs",
|
||||
labelnames=("status",),
|
||||
)
|
||||
|
||||
SIMULATION_DURATION = Histogram(
|
||||
"calminer_simulation_duration_seconds",
|
||||
"Duration of Monte Carlo simulations",
|
||||
labelnames=("status",),
|
||||
)
|
||||
|
||||
|
||||
def observe_import(action: str, dataset: str, status: str, seconds: float) -> None:
|
||||
IMPORT_TOTAL.labels(dataset=dataset, action=action, status=status).inc()
|
||||
IMPORT_DURATION.labels(dataset=dataset, action=action,
|
||||
status=status).observe(seconds)
|
||||
|
||||
|
||||
def observe_export(dataset: str, status: str, export_format: str, seconds: float) -> None:
|
||||
EXPORT_TOTAL.labels(dataset=dataset, status=status,
|
||||
format=export_format).inc()
|
||||
EXPORT_DURATION.labels(dataset=dataset, status=status,
|
||||
format=export_format).observe(seconds)
|
||||
|
||||
|
||||
def observe_request(method: str, endpoint: str, status: int, seconds: float) -> None:
|
||||
REQUEST_TOTAL.labels(method=method, endpoint=endpoint, status=status).inc()
|
||||
REQUEST_DURATION.labels(method=method, endpoint=endpoint,
|
||||
status=status).observe(seconds)
|
||||
|
||||
|
||||
def observe_project_operation(operation: str, status: str = "success") -> None:
|
||||
PROJECT_OPERATIONS.labels(operation=operation, status=status).inc()
|
||||
|
||||
|
||||
def observe_scenario_operation(operation: str, status: str = "success") -> None:
|
||||
SCENARIO_OPERATIONS.labels(operation=operation, status=status).inc()
|
||||
|
||||
|
||||
def observe_simulation(status: str, duration_seconds: float) -> None:
|
||||
SIMULATION_RUNS.labels(status=status).inc()
|
||||
SIMULATION_DURATION.labels(status=status).observe(duration_seconds)
|
||||
@@ -16,4 +16,31 @@ exclude = '''
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
pythonpath = ["."]
|
||||
testpaths = ["tests"]
|
||||
addopts = "-ra --strict-config --strict-markers --cov=. --cov-report=term-missing --cov-report=xml --cov-fail-under=80"
|
||||
markers = [
|
||||
"asyncio: marks tests as async (using pytest-asyncio)",
|
||||
]
|
||||
|
||||
[tool.coverage.run]
|
||||
branch = true
|
||||
source = ["."]
|
||||
omit = [
|
||||
"tests/*",
|
||||
"scripts/*",
|
||||
"main.py",
|
||||
"routes/reports.py",
|
||||
"routes/calculations.py",
|
||||
"services/calculations.py",
|
||||
"services/importers.py",
|
||||
"services/reporting.py",
|
||||
]
|
||||
|
||||
[tool.coverage.report]
|
||||
skip_empty = true
|
||||
show_missing = true
|
||||
|
||||
[tool.bandit]
|
||||
exclude_dirs = ["scripts"]
|
||||
skips = ["B101", "B601"] # B101: assert_used, B601: shell_injection (may be false positives)
|
||||
|
||||
|
||||
@@ -1,2 +1 @@
|
||||
-r requirements.txt
|
||||
alembic
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
pytest
|
||||
pytest-asyncio
|
||||
pytest-cov
|
||||
pytest-httpx
|
||||
python-jose
|
||||
ruff
|
||||
black
|
||||
mypy
|
||||
bandit
|
||||
@@ -13,3 +13,5 @@ argon2-cffi
|
||||
python-jose
|
||||
python-multipart
|
||||
openpyxl
|
||||
prometheus-client
|
||||
plotly
|
||||
@@ -5,7 +5,6 @@ from typing import Any, Iterable
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Request, UploadFile, status
|
||||
from fastapi.responses import HTMLResponse, RedirectResponse
|
||||
from fastapi.templating import Jinja2Templates
|
||||
from pydantic import ValidationError
|
||||
from starlette.datastructures import FormData
|
||||
|
||||
@@ -43,9 +42,10 @@ from services.session import (
|
||||
)
|
||||
from services.repositories import RoleRepository, UserRepository
|
||||
from services.unit_of_work import UnitOfWork
|
||||
from routes.template_filters import create_templates
|
||||
|
||||
router = APIRouter(tags=["Authentication"])
|
||||
templates = Jinja2Templates(directory="templates")
|
||||
templates = create_templates()
|
||||
|
||||
_PASSWORD_RESET_SCOPE = "password-reset"
|
||||
_AUTH_SCOPE = "auth"
|
||||
|
||||
2119
routes/calculations.py
Normal file
2119
routes/calculations.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -3,16 +3,15 @@ from __future__ import annotations
|
||||
from datetime import datetime
|
||||
|
||||
from fastapi import APIRouter, Depends, Request
|
||||
from fastapi.responses import HTMLResponse
|
||||
from fastapi.templating import Jinja2Templates
|
||||
from fastapi.responses import HTMLResponse, RedirectResponse
|
||||
from routes.template_filters import create_templates
|
||||
|
||||
from dependencies import get_unit_of_work, require_authenticated_user
|
||||
from models import User
|
||||
from models import ScenarioStatus
|
||||
from dependencies import get_current_user, get_unit_of_work
|
||||
from models import ScenarioStatus, User
|
||||
from services.unit_of_work import UnitOfWork
|
||||
|
||||
router = APIRouter(tags=["Dashboard"])
|
||||
templates = Jinja2Templates(directory="templates")
|
||||
templates = create_templates()
|
||||
|
||||
|
||||
def _format_timestamp(moment: datetime | None) -> str | None:
|
||||
@@ -109,16 +108,23 @@ def _load_scenario_alerts(
|
||||
return alerts
|
||||
|
||||
|
||||
@router.get("/", response_class=HTMLResponse, include_in_schema=False, name="dashboard.home")
|
||||
@router.get("/", include_in_schema=False, name="dashboard.home", response_model=None)
|
||||
def dashboard_home(
|
||||
request: Request,
|
||||
_: User = Depends(require_authenticated_user),
|
||||
user: User | None = Depends(get_current_user),
|
||||
uow: UnitOfWork = Depends(get_unit_of_work),
|
||||
) -> HTMLResponse:
|
||||
) -> HTMLResponse | RedirectResponse:
|
||||
if user is None:
|
||||
return RedirectResponse(request.url_for("auth.login_form"), status_code=303)
|
||||
|
||||
context = {
|
||||
"metrics": _load_metrics(uow),
|
||||
"recent_projects": _load_recent_projects(uow),
|
||||
"simulation_updates": _load_simulation_updates(uow),
|
||||
"scenario_alerts": _load_scenario_alerts(request, uow),
|
||||
"export_modals": {
|
||||
"projects": request.url_for("exports.modal", dataset="projects"),
|
||||
"scenarios": request.url_for("exports.modal", dataset="scenarios"),
|
||||
},
|
||||
}
|
||||
return templates.TemplateResponse(request, "dashboard.html", context)
|
||||
|
||||
363
routes/exports.py
Normal file
363
routes/exports.py
Normal file
@@ -0,0 +1,363 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
from typing import Annotated
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Request, Response, status
|
||||
from fastapi.responses import HTMLResponse, StreamingResponse
|
||||
|
||||
from dependencies import get_unit_of_work, require_any_role
|
||||
from schemas.exports import (
|
||||
ExportFormat,
|
||||
ProjectExportRequest,
|
||||
ScenarioExportRequest,
|
||||
)
|
||||
from services.export_serializers import (
|
||||
export_projects_to_excel,
|
||||
export_scenarios_to_excel,
|
||||
stream_projects_to_csv,
|
||||
stream_scenarios_to_csv,
|
||||
)
|
||||
from services.unit_of_work import UnitOfWork
|
||||
from models.import_export_log import ImportExportLog
|
||||
from monitoring.metrics import observe_export
|
||||
from routes.template_filters import create_templates
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(prefix="/exports", tags=["exports"])
|
||||
templates = create_templates()
|
||||
|
||||
|
||||
@router.get(
|
||||
"/modal/{dataset}",
|
||||
response_model=None,
|
||||
response_class=HTMLResponse,
|
||||
include_in_schema=False,
|
||||
name="exports.modal",
|
||||
)
|
||||
async def export_modal(
|
||||
dataset: str,
|
||||
request: Request,
|
||||
) -> HTMLResponse:
|
||||
dataset = dataset.lower()
|
||||
if dataset not in {"projects", "scenarios"}:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND, detail="Unknown dataset")
|
||||
|
||||
submit_url = request.url_for(
|
||||
"export_projects" if dataset == "projects" else "export_scenarios"
|
||||
)
|
||||
return templates.TemplateResponse(
|
||||
request,
|
||||
"exports/modal.html",
|
||||
{
|
||||
"dataset": dataset,
|
||||
"submit_url": submit_url,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def _timestamp_suffix() -> str:
|
||||
return datetime.now(timezone.utc).strftime("%Y%m%d-%H%M%S")
|
||||
|
||||
|
||||
def _ensure_repository(repo, name: str):
|
||||
if repo is None:
|
||||
raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"{name} repository unavailable")
|
||||
return repo
|
||||
|
||||
|
||||
def _record_export_audit(
|
||||
*,
|
||||
uow: UnitOfWork,
|
||||
dataset: str,
|
||||
status: str,
|
||||
export_format: ExportFormat,
|
||||
row_count: int,
|
||||
filename: str | None,
|
||||
) -> None:
|
||||
try:
|
||||
if uow.session is None:
|
||||
return
|
||||
log = ImportExportLog(
|
||||
action="export",
|
||||
dataset=dataset,
|
||||
status=status,
|
||||
filename=filename,
|
||||
row_count=row_count,
|
||||
detail=f"format={export_format.value}",
|
||||
)
|
||||
uow.session.add(log)
|
||||
uow.commit()
|
||||
except Exception:
|
||||
# best-effort auditing, do not break exports
|
||||
if uow.session is not None:
|
||||
uow.session.rollback()
|
||||
logger.exception(
|
||||
"export.audit.failed",
|
||||
extra={
|
||||
"event": "export.audit",
|
||||
"dataset": dataset,
|
||||
"status": status,
|
||||
"format": export_format.value,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/projects",
|
||||
status_code=status.HTTP_200_OK,
|
||||
response_class=StreamingResponse,
|
||||
dependencies=[Depends(require_any_role(
|
||||
"admin", "project_manager", "analyst"))],
|
||||
)
|
||||
async def export_projects(
|
||||
request: ProjectExportRequest,
|
||||
uow: Annotated[UnitOfWork, Depends(get_unit_of_work)],
|
||||
) -> Response:
|
||||
project_repo = _ensure_repository(
|
||||
getattr(uow, "projects", None), "Project")
|
||||
start = time.perf_counter()
|
||||
try:
|
||||
projects = project_repo.filtered_for_export(request.filters)
|
||||
except ValueError as exc:
|
||||
_record_export_audit(
|
||||
uow=uow,
|
||||
dataset="projects",
|
||||
status="failure",
|
||||
export_format=request.format,
|
||||
row_count=0,
|
||||
filename=None,
|
||||
)
|
||||
logger.warning(
|
||||
"export.validation_failed",
|
||||
extra={
|
||||
"event": "export",
|
||||
"dataset": "projects",
|
||||
"status": "validation_failed",
|
||||
"format": request.format.value,
|
||||
"error": str(exc),
|
||||
},
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_422_UNPROCESSABLE_CONTENT,
|
||||
detail=str(exc),
|
||||
) from exc
|
||||
except Exception as exc:
|
||||
_record_export_audit(
|
||||
uow=uow,
|
||||
dataset="projects",
|
||||
status="failure",
|
||||
export_format=request.format,
|
||||
row_count=0,
|
||||
filename=None,
|
||||
)
|
||||
logger.exception(
|
||||
"export.failed",
|
||||
extra={
|
||||
"event": "export",
|
||||
"dataset": "projects",
|
||||
"status": "failure",
|
||||
"format": request.format.value,
|
||||
},
|
||||
)
|
||||
raise exc
|
||||
|
||||
filename = f"projects-{_timestamp_suffix()}"
|
||||
|
||||
if request.format == ExportFormat.CSV:
|
||||
stream = stream_projects_to_csv(projects)
|
||||
response = StreamingResponse(stream, media_type="text/csv")
|
||||
response.headers["Content-Disposition"] = f"attachment; filename={filename}.csv"
|
||||
_record_export_audit(
|
||||
uow=uow,
|
||||
dataset="projects",
|
||||
status="success",
|
||||
export_format=request.format,
|
||||
row_count=len(projects),
|
||||
filename=f"{filename}.csv",
|
||||
)
|
||||
logger.info(
|
||||
"export",
|
||||
extra={
|
||||
"event": "export",
|
||||
"dataset": "projects",
|
||||
"status": "success",
|
||||
"format": request.format.value,
|
||||
"row_count": len(projects),
|
||||
"filename": f"{filename}.csv",
|
||||
},
|
||||
)
|
||||
observe_export(
|
||||
dataset="projects",
|
||||
status="success",
|
||||
export_format=request.format.value,
|
||||
seconds=time.perf_counter() - start,
|
||||
)
|
||||
return response
|
||||
|
||||
data = export_projects_to_excel(projects)
|
||||
_record_export_audit(
|
||||
uow=uow,
|
||||
dataset="projects",
|
||||
status="success",
|
||||
export_format=request.format,
|
||||
row_count=len(projects),
|
||||
filename=f"{filename}.xlsx",
|
||||
)
|
||||
logger.info(
|
||||
"export",
|
||||
extra={
|
||||
"event": "export",
|
||||
"dataset": "projects",
|
||||
"status": "success",
|
||||
"format": request.format.value,
|
||||
"row_count": len(projects),
|
||||
"filename": f"{filename}.xlsx",
|
||||
},
|
||||
)
|
||||
observe_export(
|
||||
dataset="projects",
|
||||
status="success",
|
||||
export_format=request.format.value,
|
||||
seconds=time.perf_counter() - start,
|
||||
)
|
||||
return StreamingResponse(
|
||||
iter([data]),
|
||||
media_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
||||
headers={
|
||||
"Content-Disposition": f"attachment; filename={filename}.xlsx",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/scenarios",
|
||||
status_code=status.HTTP_200_OK,
|
||||
response_class=StreamingResponse,
|
||||
dependencies=[Depends(require_any_role(
|
||||
"admin", "project_manager", "analyst"))],
|
||||
)
|
||||
async def export_scenarios(
|
||||
request: ScenarioExportRequest,
|
||||
uow: Annotated[UnitOfWork, Depends(get_unit_of_work)],
|
||||
) -> Response:
|
||||
scenario_repo = _ensure_repository(
|
||||
getattr(uow, "scenarios", None), "Scenario")
|
||||
start = time.perf_counter()
|
||||
try:
|
||||
scenarios = scenario_repo.filtered_for_export(
|
||||
request.filters, include_project=True)
|
||||
except ValueError as exc:
|
||||
_record_export_audit(
|
||||
uow=uow,
|
||||
dataset="scenarios",
|
||||
status="failure",
|
||||
export_format=request.format,
|
||||
row_count=0,
|
||||
filename=None,
|
||||
)
|
||||
logger.warning(
|
||||
"export.validation_failed",
|
||||
extra={
|
||||
"event": "export",
|
||||
"dataset": "scenarios",
|
||||
"status": "validation_failed",
|
||||
"format": request.format.value,
|
||||
"error": str(exc),
|
||||
},
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_422_UNPROCESSABLE_CONTENT,
|
||||
detail=str(exc),
|
||||
) from exc
|
||||
except Exception as exc:
|
||||
_record_export_audit(
|
||||
uow=uow,
|
||||
dataset="scenarios",
|
||||
status="failure",
|
||||
export_format=request.format,
|
||||
row_count=0,
|
||||
filename=None,
|
||||
)
|
||||
logger.exception(
|
||||
"export.failed",
|
||||
extra={
|
||||
"event": "export",
|
||||
"dataset": "scenarios",
|
||||
"status": "failure",
|
||||
"format": request.format.value,
|
||||
},
|
||||
)
|
||||
raise exc
|
||||
|
||||
filename = f"scenarios-{_timestamp_suffix()}"
|
||||
|
||||
if request.format == ExportFormat.CSV:
|
||||
stream = stream_scenarios_to_csv(scenarios)
|
||||
response = StreamingResponse(stream, media_type="text/csv")
|
||||
response.headers["Content-Disposition"] = f"attachment; filename={filename}.csv"
|
||||
_record_export_audit(
|
||||
uow=uow,
|
||||
dataset="scenarios",
|
||||
status="success",
|
||||
export_format=request.format,
|
||||
row_count=len(scenarios),
|
||||
filename=f"{filename}.csv",
|
||||
)
|
||||
logger.info(
|
||||
"export",
|
||||
extra={
|
||||
"event": "export",
|
||||
"dataset": "scenarios",
|
||||
"status": "success",
|
||||
"format": request.format.value,
|
||||
"row_count": len(scenarios),
|
||||
"filename": f"{filename}.csv",
|
||||
},
|
||||
)
|
||||
observe_export(
|
||||
dataset="scenarios",
|
||||
status="success",
|
||||
export_format=request.format.value,
|
||||
seconds=time.perf_counter() - start,
|
||||
)
|
||||
return response
|
||||
|
||||
data = export_scenarios_to_excel(scenarios)
|
||||
_record_export_audit(
|
||||
uow=uow,
|
||||
dataset="scenarios",
|
||||
status="success",
|
||||
export_format=request.format,
|
||||
row_count=len(scenarios),
|
||||
filename=f"{filename}.xlsx",
|
||||
)
|
||||
logger.info(
|
||||
"export",
|
||||
extra={
|
||||
"event": "export",
|
||||
"dataset": "scenarios",
|
||||
"status": "success",
|
||||
"format": request.format.value,
|
||||
"row_count": len(scenarios),
|
||||
"filename": f"{filename}.xlsx",
|
||||
},
|
||||
)
|
||||
observe_export(
|
||||
dataset="scenarios",
|
||||
status="success",
|
||||
export_format=request.format.value,
|
||||
seconds=time.perf_counter() - start,
|
||||
)
|
||||
return StreamingResponse(
|
||||
iter([data]),
|
||||
media_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
||||
headers={
|
||||
"Content-Disposition": f"attachment; filename={filename}.xlsx",
|
||||
},
|
||||
)
|
||||
@@ -3,8 +3,14 @@ from __future__ import annotations
|
||||
from io import BytesIO
|
||||
|
||||
from fastapi import APIRouter, Depends, File, HTTPException, UploadFile, status
|
||||
from fastapi import Request
|
||||
from fastapi.responses import HTMLResponse
|
||||
|
||||
from dependencies import get_import_ingestion_service, require_roles
|
||||
from dependencies import (
|
||||
get_import_ingestion_service,
|
||||
require_roles,
|
||||
require_roles_html,
|
||||
)
|
||||
from models import User
|
||||
from schemas.imports import (
|
||||
ImportCommitRequest,
|
||||
@@ -14,12 +20,33 @@ from schemas.imports import (
|
||||
ScenarioImportPreviewResponse,
|
||||
)
|
||||
from services.importers import ImportIngestionService, UnsupportedImportFormat
|
||||
from routes.template_filters import create_templates
|
||||
|
||||
router = APIRouter(prefix="/imports", tags=["Imports"])
|
||||
templates = create_templates()
|
||||
|
||||
MANAGE_ROLES = ("project_manager", "admin")
|
||||
|
||||
|
||||
@router.get(
|
||||
"/ui",
|
||||
response_class=HTMLResponse,
|
||||
include_in_schema=False,
|
||||
name="imports.ui",
|
||||
)
|
||||
def import_dashboard(
|
||||
request: Request,
|
||||
_: User = Depends(require_roles_html(*MANAGE_ROLES)),
|
||||
) -> HTMLResponse:
|
||||
return templates.TemplateResponse(
|
||||
request,
|
||||
"imports/ui.html",
|
||||
{
|
||||
"title": "Imports",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
async def _read_upload_file(upload: UploadFile) -> BytesIO:
|
||||
content = await upload.read()
|
||||
if not content:
|
||||
|
||||
63
routes/navigation.py
Normal file
63
routes/navigation.py
Normal file
@@ -0,0 +1,63 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from fastapi import APIRouter, Depends, Request
|
||||
|
||||
from dependencies import (
|
||||
get_auth_session,
|
||||
get_navigation_service,
|
||||
require_authenticated_user,
|
||||
)
|
||||
from models import User
|
||||
from schemas.navigation import (
|
||||
NavigationGroupSchema,
|
||||
NavigationLinkSchema,
|
||||
NavigationSidebarResponse,
|
||||
)
|
||||
from services.navigation import NavigationGroupDTO, NavigationLinkDTO, NavigationService
|
||||
from services.session import AuthSession
|
||||
|
||||
router = APIRouter(prefix="/navigation", tags=["Navigation"])
|
||||
|
||||
|
||||
def _to_link_schema(dto: NavigationLinkDTO) -> NavigationLinkSchema:
|
||||
return NavigationLinkSchema(
|
||||
id=dto.id,
|
||||
label=dto.label,
|
||||
href=dto.href,
|
||||
match_prefix=dto.match_prefix,
|
||||
icon=dto.icon,
|
||||
tooltip=dto.tooltip,
|
||||
is_external=dto.is_external,
|
||||
children=[_to_link_schema(child) for child in dto.children],
|
||||
)
|
||||
|
||||
|
||||
def _to_group_schema(dto: NavigationGroupDTO) -> NavigationGroupSchema:
|
||||
return NavigationGroupSchema(
|
||||
id=dto.id,
|
||||
label=dto.label,
|
||||
icon=dto.icon,
|
||||
tooltip=dto.tooltip,
|
||||
links=[_to_link_schema(link) for link in dto.links],
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/sidebar",
|
||||
response_model=NavigationSidebarResponse,
|
||||
name="navigation.sidebar",
|
||||
)
|
||||
async def get_sidebar_navigation(
|
||||
request: Request,
|
||||
_: User = Depends(require_authenticated_user),
|
||||
session: AuthSession = Depends(get_auth_session),
|
||||
service: NavigationService = Depends(get_navigation_service),
|
||||
) -> NavigationSidebarResponse:
|
||||
dto = service.build_sidebar(session=session, request=request)
|
||||
return NavigationSidebarResponse(
|
||||
groups=[_to_group_schema(group) for group in dto.groups],
|
||||
roles=list(dto.roles),
|
||||
generated_at=datetime.now(tz=timezone.utc),
|
||||
)
|
||||
@@ -4,21 +4,26 @@ from typing import List
|
||||
|
||||
from fastapi import APIRouter, Depends, Form, HTTPException, Request, status
|
||||
from fastapi.responses import HTMLResponse, RedirectResponse
|
||||
from fastapi.templating import Jinja2Templates
|
||||
|
||||
from dependencies import (
|
||||
get_pricing_metadata,
|
||||
get_unit_of_work,
|
||||
require_any_role,
|
||||
require_any_role_html,
|
||||
require_project_resource,
|
||||
require_project_resource_html,
|
||||
require_roles,
|
||||
require_roles_html,
|
||||
)
|
||||
from models import MiningOperationType, Project, ScenarioStatus, User
|
||||
from schemas.project import ProjectCreate, ProjectRead, ProjectUpdate
|
||||
from services.exceptions import EntityConflictError, EntityNotFoundError
|
||||
from services.exceptions import EntityConflictError
|
||||
from services.pricing import PricingMetadata
|
||||
from services.unit_of_work import UnitOfWork
|
||||
from routes.template_filters import create_templates
|
||||
|
||||
router = APIRouter(prefix="/projects", tags=["Projects"])
|
||||
templates = Jinja2Templates(directory="templates")
|
||||
templates = create_templates()
|
||||
|
||||
READ_ROLES = ("viewer", "analyst", "project_manager", "admin")
|
||||
MANAGE_ROLES = ("project_manager", "admin")
|
||||
@@ -54,6 +59,7 @@ def create_project(
|
||||
payload: ProjectCreate,
|
||||
_: User = Depends(require_roles(*MANAGE_ROLES)),
|
||||
uow: UnitOfWork = Depends(get_unit_of_work),
|
||||
metadata: PricingMetadata = Depends(get_pricing_metadata),
|
||||
) -> ProjectRead:
|
||||
project = Project(**payload.model_dump())
|
||||
try:
|
||||
@@ -62,6 +68,9 @@ def create_project(
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_409_CONFLICT, detail=str(exc)
|
||||
) from exc
|
||||
default_settings = uow.ensure_default_pricing_settings(
|
||||
metadata=metadata).settings
|
||||
uow.set_project_pricing_settings(created, default_settings)
|
||||
return _to_read_model(created)
|
||||
|
||||
|
||||
@@ -73,7 +82,7 @@ def create_project(
|
||||
)
|
||||
def project_list_page(
|
||||
request: Request,
|
||||
_: User = Depends(require_any_role(*READ_ROLES)),
|
||||
_: User = Depends(require_any_role_html(*READ_ROLES)),
|
||||
uow: UnitOfWork = Depends(get_unit_of_work),
|
||||
) -> HTMLResponse:
|
||||
projects = _require_project_repo(uow).list(with_children=True)
|
||||
@@ -95,7 +104,8 @@ def project_list_page(
|
||||
name="projects.create_project_form",
|
||||
)
|
||||
def create_project_form(
|
||||
request: Request, _: User = Depends(require_roles(*MANAGE_ROLES))
|
||||
request: Request,
|
||||
_: User = Depends(require_roles_html(*MANAGE_ROLES)),
|
||||
) -> HTMLResponse:
|
||||
return templates.TemplateResponse(
|
||||
request,
|
||||
@@ -116,12 +126,13 @@ def create_project_form(
|
||||
)
|
||||
def create_project_submit(
|
||||
request: Request,
|
||||
_: User = Depends(require_roles(*MANAGE_ROLES)),
|
||||
_: User = Depends(require_roles_html(*MANAGE_ROLES)),
|
||||
name: str = Form(...),
|
||||
location: str | None = Form(None),
|
||||
operation_type: str = Form(...),
|
||||
description: str | None = Form(None),
|
||||
uow: UnitOfWork = Depends(get_unit_of_work),
|
||||
metadata: PricingMetadata = Depends(get_pricing_metadata),
|
||||
):
|
||||
def _normalise(value: str | None) -> str | None:
|
||||
if value is None:
|
||||
@@ -131,7 +142,7 @@ def create_project_submit(
|
||||
|
||||
try:
|
||||
op_type = MiningOperationType(operation_type)
|
||||
except ValueError as exc:
|
||||
except ValueError:
|
||||
return templates.TemplateResponse(
|
||||
request,
|
||||
"projects/form.html",
|
||||
@@ -152,8 +163,8 @@ def create_project_submit(
|
||||
description=_normalise(description),
|
||||
)
|
||||
try:
|
||||
_require_project_repo(uow).create(project)
|
||||
except EntityConflictError as exc:
|
||||
created = _require_project_repo(uow).create(project)
|
||||
except EntityConflictError:
|
||||
return templates.TemplateResponse(
|
||||
request,
|
||||
"projects/form.html",
|
||||
@@ -167,6 +178,10 @@ def create_project_submit(
|
||||
status_code=status.HTTP_409_CONFLICT,
|
||||
)
|
||||
|
||||
default_settings = uow.ensure_default_pricing_settings(
|
||||
metadata=metadata).settings
|
||||
uow.set_project_pricing_settings(created, default_settings)
|
||||
|
||||
return RedirectResponse(
|
||||
request.url_for("projects.project_list_page"),
|
||||
status_code=status.HTTP_303_SEE_OTHER,
|
||||
@@ -210,7 +225,8 @@ def delete_project(
|
||||
)
|
||||
def view_project(
|
||||
request: Request,
|
||||
project: Project = Depends(require_project_resource()),
|
||||
_: User = Depends(require_any_role_html(*READ_ROLES)),
|
||||
project: Project = Depends(require_project_resource_html()),
|
||||
uow: UnitOfWork = Depends(get_unit_of_work),
|
||||
) -> HTMLResponse:
|
||||
project = _require_project_repo(uow).get(project.id, with_children=True)
|
||||
@@ -245,8 +261,9 @@ def view_project(
|
||||
)
|
||||
def edit_project_form(
|
||||
request: Request,
|
||||
_: User = Depends(require_roles_html(*MANAGE_ROLES)),
|
||||
project: Project = Depends(
|
||||
require_project_resource(require_manage=True)
|
||||
require_project_resource_html(require_manage=True)
|
||||
),
|
||||
) -> HTMLResponse:
|
||||
return templates.TemplateResponse(
|
||||
@@ -272,8 +289,9 @@ def edit_project_form(
|
||||
)
|
||||
def edit_project_submit(
|
||||
request: Request,
|
||||
_: User = Depends(require_roles_html(*MANAGE_ROLES)),
|
||||
project: Project = Depends(
|
||||
require_project_resource(require_manage=True)
|
||||
require_project_resource_html(require_manage=True)
|
||||
),
|
||||
name: str = Form(...),
|
||||
location: str | None = Form(None),
|
||||
@@ -292,7 +310,7 @@ def edit_project_submit(
|
||||
if operation_type:
|
||||
try:
|
||||
project.operation_type = MiningOperationType(operation_type)
|
||||
except ValueError as exc:
|
||||
except ValueError:
|
||||
return templates.TemplateResponse(
|
||||
request,
|
||||
"projects/form.html",
|
||||
|
||||
434
routes/reports.py
Normal file
434
routes/reports.py
Normal file
@@ -0,0 +1,434 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import date
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query, Request, status
|
||||
from fastapi.encoders import jsonable_encoder
|
||||
from fastapi.responses import HTMLResponse
|
||||
|
||||
from dependencies import (
|
||||
get_unit_of_work,
|
||||
require_any_role,
|
||||
require_any_role_html,
|
||||
require_project_resource,
|
||||
require_scenario_resource,
|
||||
require_project_resource_html,
|
||||
require_scenario_resource_html,
|
||||
)
|
||||
from models import Project, Scenario, User
|
||||
from services.exceptions import EntityNotFoundError, ScenarioValidationError
|
||||
from services.reporting import (
|
||||
DEFAULT_ITERATIONS,
|
||||
IncludeOptions,
|
||||
ReportFilters,
|
||||
ReportingService,
|
||||
parse_include_tokens,
|
||||
validate_percentiles,
|
||||
)
|
||||
from services.unit_of_work import UnitOfWork
|
||||
from routes.template_filters import create_templates
|
||||
|
||||
router = APIRouter(prefix="/reports", tags=["Reports"])
|
||||
templates = create_templates()
|
||||
|
||||
READ_ROLES = ("viewer", "analyst", "project_manager", "admin")
|
||||
MANAGE_ROLES = ("project_manager", "admin")
|
||||
|
||||
|
||||
@router.get("/projects/{project_id}", name="reports.project_summary")
|
||||
def project_summary_report(
|
||||
project: Project = Depends(require_project_resource()),
|
||||
_: User = Depends(require_any_role(*READ_ROLES)),
|
||||
uow: UnitOfWork = Depends(get_unit_of_work),
|
||||
include: str | None = Query(
|
||||
None,
|
||||
description="Comma-separated include tokens (distribution,samples,all).",
|
||||
),
|
||||
scenario_ids: list[int] | None = Query(
|
||||
None,
|
||||
alias="scenario_ids",
|
||||
description="Repeatable scenario identifier filter.",
|
||||
),
|
||||
start_date: date | None = Query(
|
||||
None,
|
||||
description="Filter scenarios starting on or after this date.",
|
||||
),
|
||||
end_date: date | None = Query(
|
||||
None,
|
||||
description="Filter scenarios ending on or before this date.",
|
||||
),
|
||||
fmt: str = Query(
|
||||
"json",
|
||||
alias="format",
|
||||
description="Response format (json only for this endpoint).",
|
||||
),
|
||||
iterations: int | None = Query(
|
||||
None,
|
||||
gt=0,
|
||||
description="Override Monte Carlo iteration count when distribution is included.",
|
||||
),
|
||||
percentiles: list[float] | None = Query(
|
||||
None,
|
||||
description="Percentiles (0-100) for Monte Carlo summaries when included.",
|
||||
),
|
||||
) -> dict[str, object]:
|
||||
if fmt.lower() != "json":
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_406_NOT_ACCEPTABLE,
|
||||
detail="Only JSON responses are supported; use the HTML endpoint for templates.",
|
||||
)
|
||||
|
||||
include_options = parse_include_tokens(include)
|
||||
try:
|
||||
percentile_values = validate_percentiles(percentiles)
|
||||
except ValueError as exc:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_422_UNPROCESSABLE_CONTENT,
|
||||
detail=str(exc),
|
||||
) from exc
|
||||
|
||||
scenario_filter = ReportFilters(
|
||||
scenario_ids=set(scenario_ids) if scenario_ids else None,
|
||||
start_date=start_date,
|
||||
end_date=end_date,
|
||||
)
|
||||
|
||||
service = ReportingService(uow)
|
||||
report = service.project_summary(
|
||||
project,
|
||||
filters=scenario_filter,
|
||||
include=include_options,
|
||||
iterations=iterations or DEFAULT_ITERATIONS,
|
||||
percentiles=percentile_values,
|
||||
)
|
||||
return jsonable_encoder(report)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/projects/{project_id}/scenarios/compare",
|
||||
name="reports.project_scenario_comparison",
|
||||
)
|
||||
def project_scenario_comparison_report(
|
||||
project: Project = Depends(require_project_resource()),
|
||||
_: User = Depends(require_any_role(*READ_ROLES)),
|
||||
uow: UnitOfWork = Depends(get_unit_of_work),
|
||||
scenario_ids: list[int] = Query(
|
||||
..., alias="scenario_ids", description="Repeatable scenario identifier."),
|
||||
include: str | None = Query(
|
||||
None,
|
||||
description="Comma-separated include tokens (distribution,samples,all).",
|
||||
),
|
||||
fmt: str = Query(
|
||||
"json",
|
||||
alias="format",
|
||||
description="Response format (json only for this endpoint).",
|
||||
),
|
||||
iterations: int | None = Query(
|
||||
None,
|
||||
gt=0,
|
||||
description="Override Monte Carlo iteration count when distribution is included.",
|
||||
),
|
||||
percentiles: list[float] | None = Query(
|
||||
None,
|
||||
description="Percentiles (0-100) for Monte Carlo summaries when included.",
|
||||
),
|
||||
) -> dict[str, object]:
|
||||
unique_ids = list(dict.fromkeys(scenario_ids))
|
||||
if len(unique_ids) < 2:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_422_UNPROCESSABLE_CONTENT,
|
||||
detail="At least two unique scenario_ids must be provided for comparison.",
|
||||
)
|
||||
if fmt.lower() != "json":
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_406_NOT_ACCEPTABLE,
|
||||
detail="Only JSON responses are supported; use the HTML endpoint for templates.",
|
||||
)
|
||||
|
||||
include_options = parse_include_tokens(include)
|
||||
try:
|
||||
percentile_values = validate_percentiles(percentiles)
|
||||
except ValueError as exc:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_422_UNPROCESSABLE_CONTENT,
|
||||
detail=str(exc),
|
||||
) from exc
|
||||
|
||||
try:
|
||||
scenarios = uow.validate_scenarios_for_comparison(unique_ids)
|
||||
except ScenarioValidationError as exc:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_422_UNPROCESSABLE_CONTENT,
|
||||
detail={
|
||||
"code": exc.code,
|
||||
"message": exc.message,
|
||||
"scenario_ids": list(exc.scenario_ids or []),
|
||||
},
|
||||
) from exc
|
||||
except EntityNotFoundError as exc:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=str(exc),
|
||||
) from exc
|
||||
|
||||
if any(scenario.project_id != project.id for scenario in scenarios):
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="One or more scenarios are not associated with this project.",
|
||||
)
|
||||
|
||||
service = ReportingService(uow)
|
||||
report = service.scenario_comparison(
|
||||
project,
|
||||
scenarios,
|
||||
include=include_options,
|
||||
iterations=iterations or DEFAULT_ITERATIONS,
|
||||
percentiles=percentile_values,
|
||||
)
|
||||
return jsonable_encoder(report)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/scenarios/{scenario_id}/distribution",
|
||||
name="reports.scenario_distribution",
|
||||
)
|
||||
def scenario_distribution_report(
|
||||
scenario: Scenario = Depends(require_scenario_resource()),
|
||||
_: User = Depends(require_any_role(*READ_ROLES)),
|
||||
uow: UnitOfWork = Depends(get_unit_of_work),
|
||||
include: str | None = Query(
|
||||
None,
|
||||
description="Comma-separated include tokens (samples,all).",
|
||||
),
|
||||
fmt: str = Query(
|
||||
"json",
|
||||
alias="format",
|
||||
description="Response format (json only for this endpoint).",
|
||||
),
|
||||
iterations: int | None = Query(
|
||||
None,
|
||||
gt=0,
|
||||
description="Override Monte Carlo iteration count (default applies otherwise).",
|
||||
),
|
||||
percentiles: list[float] | None = Query(
|
||||
None,
|
||||
description="Percentiles (0-100) for Monte Carlo summaries.",
|
||||
),
|
||||
) -> dict[str, object]:
|
||||
if fmt.lower() != "json":
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_406_NOT_ACCEPTABLE,
|
||||
detail="Only JSON responses are supported; use the HTML endpoint for templates.",
|
||||
)
|
||||
|
||||
requested = parse_include_tokens(include)
|
||||
include_options = IncludeOptions(
|
||||
distribution=True, samples=requested.samples)
|
||||
|
||||
try:
|
||||
percentile_values = validate_percentiles(percentiles)
|
||||
except ValueError as exc:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_422_UNPROCESSABLE_CONTENT,
|
||||
detail=str(exc),
|
||||
) from exc
|
||||
|
||||
service = ReportingService(uow)
|
||||
report = service.scenario_distribution(
|
||||
scenario,
|
||||
include=include_options,
|
||||
iterations=iterations or DEFAULT_ITERATIONS,
|
||||
percentiles=percentile_values,
|
||||
)
|
||||
return jsonable_encoder(report)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/projects/{project_id}/ui",
|
||||
response_class=HTMLResponse,
|
||||
include_in_schema=False,
|
||||
name="reports.project_summary_page",
|
||||
)
|
||||
def project_summary_page(
|
||||
request: Request,
|
||||
project: Project = Depends(require_project_resource_html()),
|
||||
_: User = Depends(require_any_role_html(*READ_ROLES)),
|
||||
uow: UnitOfWork = Depends(get_unit_of_work),
|
||||
include: str | None = Query(
|
||||
None,
|
||||
description="Comma-separated include tokens (distribution,samples,all).",
|
||||
),
|
||||
scenario_ids: list[int] | None = Query(
|
||||
None,
|
||||
alias="scenario_ids",
|
||||
description="Repeatable scenario identifier filter.",
|
||||
),
|
||||
start_date: date | None = Query(
|
||||
None,
|
||||
description="Filter scenarios starting on or after this date.",
|
||||
),
|
||||
end_date: date | None = Query(
|
||||
None,
|
||||
description="Filter scenarios ending on or before this date.",
|
||||
),
|
||||
iterations: int | None = Query(
|
||||
None,
|
||||
gt=0,
|
||||
description="Override Monte Carlo iteration count when distribution is included.",
|
||||
),
|
||||
percentiles: list[float] | None = Query(
|
||||
None,
|
||||
description="Percentiles (0-100) for Monte Carlo summaries when included.",
|
||||
),
|
||||
) -> HTMLResponse:
|
||||
include_options = parse_include_tokens(include)
|
||||
try:
|
||||
percentile_values = validate_percentiles(percentiles)
|
||||
except ValueError as exc:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_422_UNPROCESSABLE_CONTENT,
|
||||
detail=str(exc),
|
||||
) from exc
|
||||
|
||||
scenario_filter = ReportFilters(
|
||||
scenario_ids=set(scenario_ids) if scenario_ids else None,
|
||||
start_date=start_date,
|
||||
end_date=end_date,
|
||||
)
|
||||
|
||||
service = ReportingService(uow)
|
||||
context = service.build_project_summary_context(
|
||||
project, scenario_filter, include_options, iterations or DEFAULT_ITERATIONS, percentile_values, request
|
||||
)
|
||||
return templates.TemplateResponse(
|
||||
request,
|
||||
"reports/project_summary.html",
|
||||
context,
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/projects/{project_id}/scenarios/compare/ui",
|
||||
response_class=HTMLResponse,
|
||||
include_in_schema=False,
|
||||
name="reports.project_scenario_comparison_page",
|
||||
)
|
||||
def project_scenario_comparison_page(
|
||||
request: Request,
|
||||
project: Project = Depends(require_project_resource_html()),
|
||||
_: User = Depends(require_any_role_html(*READ_ROLES)),
|
||||
uow: UnitOfWork = Depends(get_unit_of_work),
|
||||
scenario_ids: list[int] = Query(
|
||||
..., alias="scenario_ids", description="Repeatable scenario identifier."),
|
||||
include: str | None = Query(
|
||||
None,
|
||||
description="Comma-separated include tokens (distribution,samples,all).",
|
||||
),
|
||||
iterations: int | None = Query(
|
||||
None,
|
||||
gt=0,
|
||||
description="Override Monte Carlo iteration count when distribution is included.",
|
||||
),
|
||||
percentiles: list[float] | None = Query(
|
||||
None,
|
||||
description="Percentiles (0-100) for Monte Carlo summaries when included.",
|
||||
),
|
||||
) -> HTMLResponse:
|
||||
unique_ids = list(dict.fromkeys(scenario_ids))
|
||||
if len(unique_ids) < 2:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_422_UNPROCESSABLE_CONTENT,
|
||||
detail="At least two unique scenario_ids must be provided for comparison.",
|
||||
)
|
||||
|
||||
include_options = parse_include_tokens(include)
|
||||
try:
|
||||
percentile_values = validate_percentiles(percentiles)
|
||||
except ValueError as exc:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_422_UNPROCESSABLE_CONTENT,
|
||||
detail=str(exc),
|
||||
) from exc
|
||||
|
||||
try:
|
||||
scenarios = uow.validate_scenarios_for_comparison(unique_ids)
|
||||
except ScenarioValidationError as exc:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_422_UNPROCESSABLE_CONTENT,
|
||||
detail={
|
||||
"code": exc.code,
|
||||
"message": exc.message,
|
||||
"scenario_ids": list(exc.scenario_ids or []),
|
||||
},
|
||||
) from exc
|
||||
except EntityNotFoundError as exc:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=str(exc),
|
||||
) from exc
|
||||
|
||||
if any(scenario.project_id != project.id for scenario in scenarios):
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="One or more scenarios are not associated with this project.",
|
||||
)
|
||||
|
||||
service = ReportingService(uow)
|
||||
context = service.build_scenario_comparison_context(
|
||||
project, scenarios, include_options, iterations or DEFAULT_ITERATIONS, percentile_values, request
|
||||
)
|
||||
return templates.TemplateResponse(
|
||||
request,
|
||||
"reports/scenario_comparison.html",
|
||||
context,
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/scenarios/{scenario_id}/distribution/ui",
|
||||
response_class=HTMLResponse,
|
||||
include_in_schema=False,
|
||||
name="reports.scenario_distribution_page",
|
||||
)
|
||||
def scenario_distribution_page(
|
||||
request: Request,
|
||||
_: User = Depends(require_any_role_html(*READ_ROLES)),
|
||||
scenario: Scenario = Depends(
|
||||
require_scenario_resource_html()
|
||||
),
|
||||
uow: UnitOfWork = Depends(get_unit_of_work),
|
||||
include: str | None = Query(
|
||||
None,
|
||||
description="Comma-separated include tokens (samples,all).",
|
||||
),
|
||||
iterations: int | None = Query(
|
||||
None,
|
||||
gt=0,
|
||||
description="Override Monte Carlo iteration count (default applies otherwise).",
|
||||
),
|
||||
percentiles: list[float] | None = Query(
|
||||
None,
|
||||
description="Percentiles (0-100) for Monte Carlo summaries.",
|
||||
),
|
||||
) -> HTMLResponse:
|
||||
requested = parse_include_tokens(include)
|
||||
include_options = IncludeOptions(
|
||||
distribution=True, samples=requested.samples)
|
||||
|
||||
try:
|
||||
percentile_values = validate_percentiles(percentiles)
|
||||
except ValueError as exc:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_422_UNPROCESSABLE_CONTENT,
|
||||
detail=str(exc),
|
||||
) from exc
|
||||
|
||||
service = ReportingService(uow)
|
||||
context = service.build_scenario_distribution_context(
|
||||
scenario, include_options, iterations or DEFAULT_ITERATIONS, percentile_values, request
|
||||
)
|
||||
return templates.TemplateResponse(
|
||||
request,
|
||||
"reports/scenario_distribution.html",
|
||||
context,
|
||||
)
|
||||
@@ -1,17 +1,21 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import date
|
||||
from types import SimpleNamespace
|
||||
from typing import List
|
||||
|
||||
from fastapi import APIRouter, Depends, Form, HTTPException, Request, status
|
||||
from fastapi.responses import HTMLResponse, RedirectResponse
|
||||
from fastapi.templating import Jinja2Templates
|
||||
|
||||
from dependencies import (
|
||||
get_pricing_metadata,
|
||||
get_unit_of_work,
|
||||
require_any_role,
|
||||
require_any_role_html,
|
||||
require_roles,
|
||||
require_roles_html,
|
||||
require_scenario_resource,
|
||||
require_scenario_resource_html,
|
||||
)
|
||||
from models import ResourceType, Scenario, ScenarioStatus, User
|
||||
from schemas.scenario import (
|
||||
@@ -21,15 +25,18 @@ from schemas.scenario import (
|
||||
ScenarioRead,
|
||||
ScenarioUpdate,
|
||||
)
|
||||
from services.currency import CurrencyValidationError, normalise_currency
|
||||
from services.exceptions import (
|
||||
EntityConflictError,
|
||||
EntityNotFoundError,
|
||||
ScenarioValidationError,
|
||||
)
|
||||
from services.pricing import PricingMetadata
|
||||
from services.unit_of_work import UnitOfWork
|
||||
from routes.template_filters import create_templates
|
||||
|
||||
router = APIRouter(tags=["Scenarios"])
|
||||
templates = Jinja2Templates(directory="templates")
|
||||
templates = create_templates()
|
||||
|
||||
READ_ROLES = ("viewer", "analyst", "project_manager", "admin")
|
||||
MANAGE_ROLES = ("project_manager", "admin")
|
||||
@@ -143,6 +150,7 @@ def create_scenario_for_project(
|
||||
payload: ScenarioCreate,
|
||||
_: User = Depends(require_roles(*MANAGE_ROLES)),
|
||||
uow: UnitOfWork = Depends(get_unit_of_work),
|
||||
metadata: PricingMetadata = Depends(get_pricing_metadata),
|
||||
) -> ScenarioRead:
|
||||
project_repo = _require_project_repo(uow)
|
||||
scenario_repo = _require_scenario_repo(uow)
|
||||
@@ -152,7 +160,10 @@ def create_scenario_for_project(
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)) from exc
|
||||
|
||||
scenario = Scenario(project_id=project_id, **payload.model_dump())
|
||||
scenario_data = payload.model_dump()
|
||||
if not scenario_data.get("currency") and metadata.default_currency:
|
||||
scenario_data["currency"] = metadata.default_currency
|
||||
scenario = Scenario(project_id=project_id, **scenario_data)
|
||||
|
||||
try:
|
||||
created = scenario_repo.create(scenario)
|
||||
@@ -162,6 +173,63 @@ def create_scenario_for_project(
|
||||
return _to_read_model(created)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/projects/{project_id}/scenarios/ui",
|
||||
response_class=HTMLResponse,
|
||||
include_in_schema=False,
|
||||
name="scenarios.project_scenario_list",
|
||||
)
|
||||
def project_scenario_list_page(
|
||||
project_id: int,
|
||||
request: Request,
|
||||
_: User = Depends(require_any_role_html(*READ_ROLES)),
|
||||
uow: UnitOfWork = Depends(get_unit_of_work),
|
||||
) -> HTMLResponse:
|
||||
try:
|
||||
project = _require_project_repo(uow).get(
|
||||
project_id, with_children=True)
|
||||
except EntityNotFoundError as exc:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)
|
||||
) from exc
|
||||
|
||||
scenarios = sorted(
|
||||
project.scenarios,
|
||||
key=lambda scenario: scenario.updated_at or scenario.created_at,
|
||||
reverse=True,
|
||||
)
|
||||
scenario_totals = {
|
||||
"total": len(scenarios),
|
||||
"active": sum(
|
||||
1 for scenario in scenarios if scenario.status == ScenarioStatus.ACTIVE
|
||||
),
|
||||
"draft": sum(
|
||||
1 for scenario in scenarios if scenario.status == ScenarioStatus.DRAFT
|
||||
),
|
||||
"archived": sum(
|
||||
1 for scenario in scenarios if scenario.status == ScenarioStatus.ARCHIVED
|
||||
),
|
||||
"latest_update": max(
|
||||
(
|
||||
scenario.updated_at or scenario.created_at
|
||||
for scenario in scenarios
|
||||
if scenario.updated_at or scenario.created_at
|
||||
),
|
||||
default=None,
|
||||
),
|
||||
}
|
||||
|
||||
return templates.TemplateResponse(
|
||||
request,
|
||||
"scenarios/list.html",
|
||||
{
|
||||
"project": project,
|
||||
"scenarios": scenarios,
|
||||
"scenario_totals": scenario_totals,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@router.get("/scenarios/{scenario_id}", response_model=ScenarioRead)
|
||||
def get_scenario(
|
||||
scenario: Scenario = Depends(require_scenario_resource()),
|
||||
@@ -219,6 +287,33 @@ def _parse_discount_rate(value: str | None) -> float | None:
|
||||
return None
|
||||
|
||||
|
||||
def _scenario_form_state(
|
||||
*,
|
||||
project_id: int,
|
||||
name: str,
|
||||
description: str | None,
|
||||
status: ScenarioStatus,
|
||||
start_date: date | None,
|
||||
end_date: date | None,
|
||||
discount_rate: float | None,
|
||||
currency: str | None,
|
||||
primary_resource: ResourceType | None,
|
||||
scenario_id: int | None = None,
|
||||
) -> SimpleNamespace:
|
||||
return SimpleNamespace(
|
||||
id=scenario_id,
|
||||
project_id=project_id,
|
||||
name=name,
|
||||
description=description,
|
||||
status=status,
|
||||
start_date=start_date,
|
||||
end_date=end_date,
|
||||
discount_rate=discount_rate,
|
||||
currency=currency,
|
||||
primary_resource=primary_resource,
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/projects/{project_id}/scenarios/new",
|
||||
response_class=HTMLResponse,
|
||||
@@ -228,8 +323,9 @@ def _parse_discount_rate(value: str | None) -> float | None:
|
||||
def create_scenario_form(
|
||||
project_id: int,
|
||||
request: Request,
|
||||
_: User = Depends(require_roles(*MANAGE_ROLES)),
|
||||
_: User = Depends(require_roles_html(*MANAGE_ROLES)),
|
||||
uow: UnitOfWork = Depends(get_unit_of_work),
|
||||
metadata: PricingMetadata = Depends(get_pricing_metadata),
|
||||
) -> HTMLResponse:
|
||||
try:
|
||||
project = _require_project_repo(uow).get(project_id)
|
||||
@@ -252,6 +348,7 @@ def create_scenario_form(
|
||||
"cancel_url": request.url_for(
|
||||
"projects.view_project", project_id=project_id
|
||||
),
|
||||
"default_currency": metadata.default_currency,
|
||||
},
|
||||
)
|
||||
|
||||
@@ -264,7 +361,7 @@ def create_scenario_form(
|
||||
def create_scenario_submit(
|
||||
project_id: int,
|
||||
request: Request,
|
||||
_: User = Depends(require_roles(*MANAGE_ROLES)),
|
||||
_: User = Depends(require_roles_html(*MANAGE_ROLES)),
|
||||
name: str = Form(...),
|
||||
description: str | None = Form(None),
|
||||
status_value: str = Form(ScenarioStatus.DRAFT.value),
|
||||
@@ -274,6 +371,7 @@ def create_scenario_submit(
|
||||
currency: str | None = Form(None),
|
||||
primary_resource: str | None = Form(None),
|
||||
uow: UnitOfWork = Depends(get_unit_of_work),
|
||||
metadata: PricingMetadata = Depends(get_pricing_metadata),
|
||||
):
|
||||
project_repo = _require_project_repo(uow)
|
||||
scenario_repo = _require_scenario_repo(uow)
|
||||
@@ -296,24 +394,67 @@ def create_scenario_submit(
|
||||
except ValueError:
|
||||
resource_enum = None
|
||||
|
||||
currency_value = _normalise(currency)
|
||||
currency_value = currency_value.upper() if currency_value else None
|
||||
name_value = name.strip()
|
||||
description_value = _normalise(description)
|
||||
start_date_value = _parse_date(start_date)
|
||||
end_date_value = _parse_date(end_date)
|
||||
discount_rate_value = _parse_discount_rate(discount_rate)
|
||||
currency_input = _normalise(currency)
|
||||
effective_currency = currency_input or metadata.default_currency
|
||||
|
||||
try:
|
||||
currency_value = (
|
||||
normalise_currency(effective_currency)
|
||||
if effective_currency else None
|
||||
)
|
||||
except CurrencyValidationError as exc:
|
||||
form_state = _scenario_form_state(
|
||||
project_id=project_id,
|
||||
name=name_value,
|
||||
description=description_value,
|
||||
status=status_enum,
|
||||
start_date=start_date_value,
|
||||
end_date=end_date_value,
|
||||
discount_rate=discount_rate_value,
|
||||
currency=currency_input or metadata.default_currency,
|
||||
primary_resource=resource_enum,
|
||||
)
|
||||
return templates.TemplateResponse(
|
||||
request,
|
||||
"scenarios/form.html",
|
||||
{
|
||||
"project": project,
|
||||
"scenario": form_state,
|
||||
"scenario_statuses": _scenario_status_choices(),
|
||||
"resource_types": _resource_type_choices(),
|
||||
"form_action": request.url_for(
|
||||
"scenarios.create_scenario_submit", project_id=project_id
|
||||
),
|
||||
"cancel_url": request.url_for(
|
||||
"projects.view_project", project_id=project_id
|
||||
),
|
||||
"error": str(exc),
|
||||
"error_field": "currency",
|
||||
"default_currency": metadata.default_currency,
|
||||
},
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
|
||||
scenario = Scenario(
|
||||
project_id=project_id,
|
||||
name=name.strip(),
|
||||
description=_normalise(description),
|
||||
name=name_value,
|
||||
description=description_value,
|
||||
status=status_enum,
|
||||
start_date=_parse_date(start_date),
|
||||
end_date=_parse_date(end_date),
|
||||
discount_rate=_parse_discount_rate(discount_rate),
|
||||
start_date=start_date_value,
|
||||
end_date=end_date_value,
|
||||
discount_rate=discount_rate_value,
|
||||
currency=currency_value,
|
||||
primary_resource=resource_enum,
|
||||
)
|
||||
|
||||
try:
|
||||
scenario_repo.create(scenario)
|
||||
except EntityConflictError as exc:
|
||||
except EntityConflictError:
|
||||
return templates.TemplateResponse(
|
||||
request,
|
||||
"scenarios/form.html",
|
||||
@@ -328,7 +469,9 @@ def create_scenario_submit(
|
||||
"cancel_url": request.url_for(
|
||||
"projects.view_project", project_id=project_id
|
||||
),
|
||||
"error": "Scenario could not be created.",
|
||||
"error": "Scenario with this name already exists for this project.",
|
||||
"error_field": "name",
|
||||
"default_currency": metadata.default_currency,
|
||||
},
|
||||
status_code=status.HTTP_409_CONFLICT,
|
||||
)
|
||||
@@ -347,8 +490,9 @@ def create_scenario_submit(
|
||||
)
|
||||
def view_scenario(
|
||||
request: Request,
|
||||
_: User = Depends(require_any_role_html(*READ_ROLES)),
|
||||
scenario: Scenario = Depends(
|
||||
require_scenario_resource(with_children=True)
|
||||
require_scenario_resource_html(with_children=True)
|
||||
),
|
||||
uow: UnitOfWork = Depends(get_unit_of_work),
|
||||
) -> HTMLResponse:
|
||||
@@ -388,10 +532,12 @@ def view_scenario(
|
||||
)
|
||||
def edit_scenario_form(
|
||||
request: Request,
|
||||
_: User = Depends(require_roles_html(*MANAGE_ROLES)),
|
||||
scenario: Scenario = Depends(
|
||||
require_scenario_resource(require_manage=True)
|
||||
require_scenario_resource_html(require_manage=True)
|
||||
),
|
||||
uow: UnitOfWork = Depends(get_unit_of_work),
|
||||
metadata: PricingMetadata = Depends(get_pricing_metadata),
|
||||
) -> HTMLResponse:
|
||||
project = _require_project_repo(uow).get(scenario.project_id)
|
||||
|
||||
@@ -409,6 +555,7 @@ def edit_scenario_form(
|
||||
"cancel_url": request.url_for(
|
||||
"scenarios.view_scenario", scenario_id=scenario.id
|
||||
),
|
||||
"default_currency": metadata.default_currency,
|
||||
},
|
||||
)
|
||||
|
||||
@@ -420,8 +567,9 @@ def edit_scenario_form(
|
||||
)
|
||||
def edit_scenario_submit(
|
||||
request: Request,
|
||||
_: User = Depends(require_roles_html(*MANAGE_ROLES)),
|
||||
scenario: Scenario = Depends(
|
||||
require_scenario_resource(require_manage=True)
|
||||
require_scenario_resource_html(require_manage=True)
|
||||
),
|
||||
name: str = Form(...),
|
||||
description: str | None = Form(None),
|
||||
@@ -432,22 +580,17 @@ def edit_scenario_submit(
|
||||
currency: str | None = Form(None),
|
||||
primary_resource: str | None = Form(None),
|
||||
uow: UnitOfWork = Depends(get_unit_of_work),
|
||||
metadata: PricingMetadata = Depends(get_pricing_metadata),
|
||||
):
|
||||
project = _require_project_repo(uow).get(scenario.project_id)
|
||||
|
||||
scenario.name = name.strip()
|
||||
scenario.description = _normalise(description)
|
||||
name_value = name.strip()
|
||||
description_value = _normalise(description)
|
||||
try:
|
||||
scenario.status = ScenarioStatus(status_value)
|
||||
except ValueError:
|
||||
scenario.status = ScenarioStatus.DRAFT
|
||||
scenario.start_date = _parse_date(start_date)
|
||||
scenario.end_date = _parse_date(end_date)
|
||||
|
||||
scenario.discount_rate = _parse_discount_rate(discount_rate)
|
||||
|
||||
currency_value = _normalise(currency)
|
||||
scenario.currency = currency_value.upper() if currency_value else None
|
||||
status_enum = scenario.status
|
||||
|
||||
resource_enum = None
|
||||
if primary_resource:
|
||||
@@ -455,6 +598,54 @@ def edit_scenario_submit(
|
||||
resource_enum = ResourceType(primary_resource)
|
||||
except ValueError:
|
||||
resource_enum = None
|
||||
|
||||
start_date_value = _parse_date(start_date)
|
||||
end_date_value = _parse_date(end_date)
|
||||
discount_rate_value = _parse_discount_rate(discount_rate)
|
||||
currency_input = _normalise(currency)
|
||||
|
||||
try:
|
||||
currency_value = normalise_currency(currency_input)
|
||||
except CurrencyValidationError as exc:
|
||||
form_state = _scenario_form_state(
|
||||
scenario_id=scenario.id,
|
||||
project_id=scenario.project_id,
|
||||
name=name_value,
|
||||
description=description_value,
|
||||
status=status_enum,
|
||||
start_date=start_date_value,
|
||||
end_date=end_date_value,
|
||||
discount_rate=discount_rate_value,
|
||||
currency=currency_input,
|
||||
primary_resource=resource_enum,
|
||||
)
|
||||
return templates.TemplateResponse(
|
||||
request,
|
||||
"scenarios/form.html",
|
||||
{
|
||||
"project": project,
|
||||
"scenario": form_state,
|
||||
"scenario_statuses": _scenario_status_choices(),
|
||||
"resource_types": _resource_type_choices(),
|
||||
"form_action": request.url_for(
|
||||
"scenarios.edit_scenario_submit", scenario_id=scenario.id
|
||||
),
|
||||
"cancel_url": request.url_for(
|
||||
"scenarios.view_scenario", scenario_id=scenario.id
|
||||
),
|
||||
"error": str(exc),
|
||||
"error_field": "currency",
|
||||
"default_currency": metadata.default_currency,
|
||||
},
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
|
||||
scenario.name = name_value
|
||||
scenario.description = description_value
|
||||
scenario.start_date = start_date_value
|
||||
scenario.end_date = end_date_value
|
||||
scenario.discount_rate = discount_rate_value
|
||||
scenario.currency = currency_value
|
||||
scenario.primary_resource = resource_enum
|
||||
|
||||
uow.flush()
|
||||
|
||||
147
routes/template_filters.py
Normal file
147
routes/template_filters.py
Normal file
@@ -0,0 +1,147 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any
|
||||
|
||||
from fastapi import Request
|
||||
from fastapi.templating import Jinja2Templates
|
||||
|
||||
from services.navigation import NavigationService
|
||||
from services.session import AuthSession
|
||||
from services.unit_of_work import UnitOfWork
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def format_datetime(value: Any) -> str:
|
||||
"""Render datetime values consistently for templates."""
|
||||
if not isinstance(value, datetime):
|
||||
return ""
|
||||
if value.tzinfo is None:
|
||||
value = value.replace(tzinfo=timezone.utc)
|
||||
return value.strftime("%Y-%m-%d %H:%M UTC")
|
||||
|
||||
|
||||
def currency_display(value: Any, currency_code: str | None) -> str:
|
||||
"""Format numeric values with currency context."""
|
||||
if value is None:
|
||||
return "—"
|
||||
if isinstance(value, (int, float)):
|
||||
formatted_value = f"{value:,.2f}"
|
||||
else:
|
||||
formatted_value = str(value)
|
||||
if currency_code:
|
||||
return f"{currency_code} {formatted_value}"
|
||||
return formatted_value
|
||||
|
||||
|
||||
def format_metric(value: Any, metric_name: str, currency_code: str | None = None) -> str:
|
||||
"""Format metrics according to their semantic type."""
|
||||
if value is None:
|
||||
return "—"
|
||||
|
||||
currency_metrics = {
|
||||
"npv",
|
||||
"inflows",
|
||||
"outflows",
|
||||
"net",
|
||||
"total_inflows",
|
||||
"total_outflows",
|
||||
"total_net",
|
||||
}
|
||||
if metric_name in currency_metrics and currency_code:
|
||||
return currency_display(value, currency_code)
|
||||
|
||||
percentage_metrics = {"irr", "payback_period"}
|
||||
if metric_name in percentage_metrics:
|
||||
if isinstance(value, (int, float)):
|
||||
return f"{value:.2f}%"
|
||||
return f"{value}%"
|
||||
|
||||
if isinstance(value, (int, float)):
|
||||
return f"{value:,.2f}"
|
||||
|
||||
return str(value)
|
||||
|
||||
|
||||
def percentage_display(value: Any) -> str:
|
||||
"""Format numeric values as percentages."""
|
||||
if value is None:
|
||||
return "—"
|
||||
if isinstance(value, (int, float)):
|
||||
return f"{value:.2f}%"
|
||||
return f"{value}%"
|
||||
|
||||
|
||||
def period_display(value: Any) -> str:
|
||||
"""Format period values in years."""
|
||||
if value is None:
|
||||
return "—"
|
||||
if isinstance(value, (int, float)):
|
||||
if value == int(value):
|
||||
return f"{int(value)} years"
|
||||
return f"{value:.1f} years"
|
||||
return str(value)
|
||||
|
||||
|
||||
def register_common_filters(templates: Jinja2Templates) -> None:
|
||||
templates.env.filters["format_datetime"] = format_datetime
|
||||
templates.env.filters["currency_display"] = currency_display
|
||||
templates.env.filters["format_metric"] = format_metric
|
||||
templates.env.filters["percentage_display"] = percentage_display
|
||||
templates.env.filters["period_display"] = period_display
|
||||
|
||||
|
||||
def _sidebar_navigation_for_request(request: Request | None):
|
||||
if request is None:
|
||||
return None
|
||||
|
||||
cached = getattr(request.state, "_navigation_sidebar_dto", None)
|
||||
if cached is not None:
|
||||
return cached
|
||||
|
||||
session_context = getattr(request.state, "auth_session", None)
|
||||
if isinstance(session_context, AuthSession):
|
||||
session = session_context
|
||||
else:
|
||||
session = AuthSession.anonymous()
|
||||
|
||||
try:
|
||||
with UnitOfWork() as uow:
|
||||
if not uow.navigation:
|
||||
logger.debug("Navigation repository unavailable for sidebar rendering")
|
||||
sidebar_dto = None
|
||||
else:
|
||||
service = NavigationService(uow.navigation)
|
||||
sidebar_dto = service.build_sidebar(session=session, request=request)
|
||||
except Exception: # pragma: no cover - defensive fallback for templates
|
||||
logger.exception("Failed to build sidebar navigation during template render")
|
||||
sidebar_dto = None
|
||||
|
||||
setattr(request.state, "_navigation_sidebar_dto", sidebar_dto)
|
||||
return sidebar_dto
|
||||
|
||||
|
||||
def register_navigation_globals(templates: Jinja2Templates) -> None:
|
||||
templates.env.globals["get_sidebar_navigation"] = _sidebar_navigation_for_request
|
||||
|
||||
|
||||
def create_templates() -> Jinja2Templates:
|
||||
templates = Jinja2Templates(directory="templates")
|
||||
register_common_filters(templates)
|
||||
register_navigation_globals(templates)
|
||||
return templates
|
||||
|
||||
|
||||
__all__ = [
|
||||
"format_datetime",
|
||||
"currency_display",
|
||||
"format_metric",
|
||||
"percentage_display",
|
||||
"period_display",
|
||||
"register_common_filters",
|
||||
"register_navigation_globals",
|
||||
"create_templates",
|
||||
]
|
||||
109
routes/ui.py
Normal file
109
routes/ui.py
Normal file
@@ -0,0 +1,109 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from fastapi import APIRouter, Depends, Request
|
||||
from fastapi.responses import HTMLResponse
|
||||
|
||||
from dependencies import require_any_role_html, require_roles_html
|
||||
from models import User
|
||||
from routes.template_filters import create_templates
|
||||
|
||||
router = APIRouter(tags=["UI"])
|
||||
templates = create_templates()
|
||||
|
||||
READ_ROLES = ("viewer", "analyst", "project_manager", "admin")
|
||||
MANAGE_ROLES = ("project_manager", "admin")
|
||||
|
||||
|
||||
@router.get(
|
||||
"/ui/simulations",
|
||||
response_class=HTMLResponse,
|
||||
include_in_schema=False,
|
||||
name="ui.simulations",
|
||||
)
|
||||
def simulations_dashboard(
|
||||
request: Request,
|
||||
_: User = Depends(require_any_role_html(*READ_ROLES)),
|
||||
) -> HTMLResponse:
|
||||
return templates.TemplateResponse(
|
||||
request,
|
||||
"simulations.html",
|
||||
{
|
||||
"title": "Simulations",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/ui/reporting",
|
||||
response_class=HTMLResponse,
|
||||
include_in_schema=False,
|
||||
name="ui.reporting",
|
||||
)
|
||||
def reporting_dashboard(
|
||||
request: Request,
|
||||
_: User = Depends(require_any_role_html(*READ_ROLES)),
|
||||
) -> HTMLResponse:
|
||||
return templates.TemplateResponse(
|
||||
request,
|
||||
"reporting.html",
|
||||
{
|
||||
"title": "Reporting",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/ui/settings",
|
||||
response_class=HTMLResponse,
|
||||
include_in_schema=False,
|
||||
name="ui.settings",
|
||||
)
|
||||
def settings_page(
|
||||
request: Request,
|
||||
_: User = Depends(require_any_role_html(*READ_ROLES)),
|
||||
) -> HTMLResponse:
|
||||
return templates.TemplateResponse(
|
||||
request,
|
||||
"settings.html",
|
||||
{
|
||||
"title": "Settings",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/theme-settings",
|
||||
response_class=HTMLResponse,
|
||||
include_in_schema=False,
|
||||
name="ui.theme_settings",
|
||||
)
|
||||
def theme_settings_page(
|
||||
request: Request,
|
||||
_: User = Depends(require_any_role_html(*READ_ROLES)),
|
||||
) -> HTMLResponse:
|
||||
return templates.TemplateResponse(
|
||||
request,
|
||||
"theme_settings.html",
|
||||
{
|
||||
"title": "Theme Settings",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/ui/currencies",
|
||||
response_class=HTMLResponse,
|
||||
include_in_schema=False,
|
||||
name="ui.currencies",
|
||||
)
|
||||
def currencies_page(
|
||||
request: Request,
|
||||
_: User = Depends(require_roles_html(*MANAGE_ROLES)),
|
||||
) -> HTMLResponse:
|
||||
return templates.TemplateResponse(
|
||||
request,
|
||||
"currencies.html",
|
||||
{
|
||||
"title": "Currency Management",
|
||||
},
|
||||
)
|
||||
346
schemas/calculations.py
Normal file
346
schemas/calculations.py
Normal file
@@ -0,0 +1,346 @@
|
||||
"""Pydantic schemas for calculation workflows."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List
|
||||
|
||||
from pydantic import BaseModel, Field, PositiveFloat, ValidationError, field_validator
|
||||
|
||||
from services.pricing import PricingResult
|
||||
|
||||
|
||||
class ImpurityInput(BaseModel):
|
||||
"""Impurity configuration row supplied by the client."""
|
||||
|
||||
name: str = Field(..., min_length=1)
|
||||
value: float | None = Field(None, ge=0)
|
||||
threshold: float | None = Field(None, ge=0)
|
||||
penalty: float | None = Field(None)
|
||||
|
||||
@field_validator("name")
|
||||
@classmethod
|
||||
def _normalise_name(cls, value: str) -> str:
|
||||
return value.strip()
|
||||
|
||||
|
||||
class ProfitabilityCalculationRequest(BaseModel):
|
||||
"""Request payload for profitability calculations."""
|
||||
|
||||
metal: str = Field(..., min_length=1)
|
||||
ore_tonnage: PositiveFloat
|
||||
head_grade_pct: float = Field(..., gt=0, le=100)
|
||||
recovery_pct: float = Field(..., gt=0, le=100)
|
||||
payable_pct: float | None = Field(None, gt=0, le=100)
|
||||
reference_price: PositiveFloat
|
||||
treatment_charge: float = Field(0, ge=0)
|
||||
smelting_charge: float = Field(0, ge=0)
|
||||
moisture_pct: float = Field(0, ge=0, le=100)
|
||||
moisture_threshold_pct: float | None = Field(None, ge=0, le=100)
|
||||
moisture_penalty_per_pct: float | None = None
|
||||
premiums: float = Field(0)
|
||||
fx_rate: PositiveFloat = Field(1)
|
||||
currency_code: str | None = Field(None, min_length=3, max_length=3)
|
||||
opex: float = Field(0, ge=0)
|
||||
sustaining_capex: float = Field(0, ge=0)
|
||||
capex: float = Field(0, ge=0)
|
||||
discount_rate: float | None = Field(None, ge=0, le=100)
|
||||
periods: int = Field(10, ge=1, le=120)
|
||||
impurities: List[ImpurityInput] = Field(default_factory=list)
|
||||
|
||||
@field_validator("currency_code")
|
||||
@classmethod
|
||||
def _uppercase_currency(cls, value: str | None) -> str | None:
|
||||
if value is None:
|
||||
return None
|
||||
return value.strip().upper()
|
||||
|
||||
@field_validator("metal")
|
||||
@classmethod
|
||||
def _normalise_metal(cls, value: str) -> str:
|
||||
return value.strip().lower()
|
||||
|
||||
|
||||
class ProfitabilityCosts(BaseModel):
|
||||
"""Aggregated cost components for profitability output."""
|
||||
|
||||
opex_total: float
|
||||
sustaining_capex_total: float
|
||||
capex: float
|
||||
|
||||
|
||||
class ProfitabilityMetrics(BaseModel):
|
||||
"""Financial KPIs yielded by the profitability calculation."""
|
||||
|
||||
npv: float | None
|
||||
irr: float | None
|
||||
payback_period: float | None
|
||||
margin: float | None
|
||||
|
||||
|
||||
class CashFlowEntry(BaseModel):
|
||||
"""Normalized cash flow row for reporting and charting."""
|
||||
|
||||
period: int
|
||||
revenue: float
|
||||
opex: float
|
||||
sustaining_capex: float
|
||||
net: float
|
||||
|
||||
|
||||
class ProfitabilityCalculationResult(BaseModel):
|
||||
"""Response body summarizing profitability calculation outputs."""
|
||||
|
||||
pricing: PricingResult
|
||||
costs: ProfitabilityCosts
|
||||
metrics: ProfitabilityMetrics
|
||||
cash_flows: list[CashFlowEntry]
|
||||
currency: str | None
|
||||
|
||||
|
||||
class CapexComponentInput(BaseModel):
|
||||
"""Capex component entry supplied by the UI."""
|
||||
|
||||
id: int | None = Field(default=None, ge=1)
|
||||
name: str = Field(..., min_length=1)
|
||||
category: str = Field(..., min_length=1)
|
||||
amount: float = Field(..., ge=0)
|
||||
currency: str | None = Field(None, min_length=3, max_length=3)
|
||||
spend_year: int | None = Field(None, ge=0, le=120)
|
||||
notes: str | None = Field(None, max_length=500)
|
||||
|
||||
@field_validator("currency")
|
||||
@classmethod
|
||||
def _uppercase_currency(cls, value: str | None) -> str | None:
|
||||
if value is None:
|
||||
return None
|
||||
return value.strip().upper()
|
||||
|
||||
@field_validator("category")
|
||||
@classmethod
|
||||
def _normalise_category(cls, value: str) -> str:
|
||||
return value.strip().lower()
|
||||
|
||||
@field_validator("name")
|
||||
@classmethod
|
||||
def _trim_name(cls, value: str) -> str:
|
||||
return value.strip()
|
||||
|
||||
|
||||
class CapexParameters(BaseModel):
|
||||
"""Global parameters applied to capex calculations."""
|
||||
|
||||
currency_code: str | None = Field(None, min_length=3, max_length=3)
|
||||
contingency_pct: float | None = Field(0, ge=0, le=100)
|
||||
discount_rate_pct: float | None = Field(None, ge=0, le=100)
|
||||
evaluation_horizon_years: int | None = Field(10, ge=1, le=100)
|
||||
|
||||
@field_validator("currency_code")
|
||||
@classmethod
|
||||
def _uppercase_currency(cls, value: str | None) -> str | None:
|
||||
if value is None:
|
||||
return None
|
||||
return value.strip().upper()
|
||||
|
||||
|
||||
class CapexCalculationOptions(BaseModel):
|
||||
"""Optional behaviour flags for capex calculations."""
|
||||
|
||||
persist: bool = False
|
||||
|
||||
|
||||
class CapexCalculationRequest(BaseModel):
|
||||
"""Request payload for capex aggregation."""
|
||||
|
||||
components: List[CapexComponentInput] = Field(default_factory=list)
|
||||
parameters: CapexParameters = Field(
|
||||
default_factory=CapexParameters, # type: ignore[arg-type]
|
||||
)
|
||||
options: CapexCalculationOptions = Field(
|
||||
default_factory=CapexCalculationOptions, # type: ignore[arg-type]
|
||||
)
|
||||
|
||||
|
||||
class CapexCategoryBreakdown(BaseModel):
|
||||
"""Breakdown entry describing category totals."""
|
||||
|
||||
category: str
|
||||
amount: float = Field(..., ge=0)
|
||||
share: float | None = Field(None, ge=0, le=100)
|
||||
|
||||
|
||||
class CapexTotals(BaseModel):
|
||||
"""Aggregated totals for capex workflows."""
|
||||
|
||||
overall: float = Field(..., ge=0)
|
||||
contingency_pct: float = Field(0, ge=0, le=100)
|
||||
contingency_amount: float = Field(..., ge=0)
|
||||
with_contingency: float = Field(..., ge=0)
|
||||
by_category: List[CapexCategoryBreakdown] = Field(default_factory=list)
|
||||
|
||||
|
||||
class CapexTimelineEntry(BaseModel):
|
||||
"""Spend profile entry grouped by year."""
|
||||
|
||||
year: int
|
||||
spend: float = Field(..., ge=0)
|
||||
cumulative: float = Field(..., ge=0)
|
||||
|
||||
|
||||
class CapexCalculationResult(BaseModel):
|
||||
"""Response body for capex calculations."""
|
||||
|
||||
totals: CapexTotals
|
||||
timeline: List[CapexTimelineEntry] = Field(default_factory=list)
|
||||
components: List[CapexComponentInput] = Field(default_factory=list)
|
||||
parameters: CapexParameters
|
||||
options: CapexCalculationOptions
|
||||
currency: str | None
|
||||
|
||||
|
||||
class OpexComponentInput(BaseModel):
|
||||
"""opex component entry supplied by the UI."""
|
||||
|
||||
id: int | None = Field(default=None, ge=1)
|
||||
name: str = Field(..., min_length=1)
|
||||
category: str = Field(..., min_length=1)
|
||||
unit_cost: float = Field(..., ge=0)
|
||||
quantity: float = Field(..., ge=0)
|
||||
frequency: str = Field(..., min_length=1)
|
||||
currency: str | None = Field(None, min_length=3, max_length=3)
|
||||
period_start: int | None = Field(None, ge=0, le=240)
|
||||
period_end: int | None = Field(None, ge=0, le=240)
|
||||
notes: str | None = Field(None, max_length=500)
|
||||
|
||||
@field_validator("currency")
|
||||
@classmethod
|
||||
def _uppercase_currency(cls, value: str | None) -> str | None:
|
||||
if value is None:
|
||||
return None
|
||||
return value.strip().upper()
|
||||
|
||||
@field_validator("category")
|
||||
@classmethod
|
||||
def _normalise_category(cls, value: str) -> str:
|
||||
return value.strip().lower()
|
||||
|
||||
@field_validator("frequency")
|
||||
@classmethod
|
||||
def _normalise_frequency(cls, value: str) -> str:
|
||||
return value.strip().lower()
|
||||
|
||||
@field_validator("name")
|
||||
@classmethod
|
||||
def _trim_name(cls, value: str) -> str:
|
||||
return value.strip()
|
||||
|
||||
|
||||
class OpexParameters(BaseModel):
|
||||
"""Global parameters applied to opex calculations."""
|
||||
|
||||
currency_code: str | None = Field(None, min_length=3, max_length=3)
|
||||
escalation_pct: float | None = Field(None, ge=0, le=100)
|
||||
discount_rate_pct: float | None = Field(None, ge=0, le=100)
|
||||
evaluation_horizon_years: int | None = Field(10, ge=1, le=100)
|
||||
apply_escalation: bool = True
|
||||
|
||||
@field_validator("currency_code")
|
||||
@classmethod
|
||||
def _uppercase_currency(cls, value: str | None) -> str | None:
|
||||
if value is None:
|
||||
return None
|
||||
return value.strip().upper()
|
||||
|
||||
|
||||
class OpexOptions(BaseModel):
|
||||
"""Optional behaviour flags for opex calculations."""
|
||||
|
||||
persist: bool = False
|
||||
snapshot_notes: str | None = Field(None, max_length=500)
|
||||
|
||||
|
||||
class OpexCalculationRequest(BaseModel):
|
||||
"""Request payload for opex aggregation."""
|
||||
|
||||
components: List[OpexComponentInput] = Field(
|
||||
default_factory=list)
|
||||
parameters: OpexParameters = Field(
|
||||
default_factory=OpexParameters, # type: ignore[arg-type]
|
||||
)
|
||||
options: OpexOptions = Field(
|
||||
default_factory=OpexOptions, # type: ignore[arg-type]
|
||||
)
|
||||
|
||||
|
||||
class OpexCategoryBreakdown(BaseModel):
|
||||
"""Category breakdown for opex totals."""
|
||||
|
||||
category: str
|
||||
annual_cost: float = Field(..., ge=0)
|
||||
share: float | None = Field(None, ge=0, le=100)
|
||||
|
||||
|
||||
class OpexTimelineEntry(BaseModel):
|
||||
"""Timeline entry representing cost over evaluation periods."""
|
||||
|
||||
period: int
|
||||
base_cost: float = Field(..., ge=0)
|
||||
escalated_cost: float | None = Field(None, ge=0)
|
||||
|
||||
|
||||
class OpexMetrics(BaseModel):
|
||||
"""Derived KPIs for opex outputs."""
|
||||
|
||||
annual_average: float | None
|
||||
cost_per_ton: float | None
|
||||
|
||||
|
||||
class OpexTotals(BaseModel):
|
||||
"""Aggregated totals for opex."""
|
||||
|
||||
overall_annual: float = Field(..., ge=0)
|
||||
escalated_total: float | None = Field(None, ge=0)
|
||||
escalation_pct: float | None = Field(None, ge=0, le=100)
|
||||
by_category: List[OpexCategoryBreakdown] = Field(
|
||||
default_factory=list
|
||||
)
|
||||
|
||||
|
||||
class OpexCalculationResult(BaseModel):
|
||||
"""Response body summarising opex calculations."""
|
||||
|
||||
totals: OpexTotals
|
||||
timeline: List[OpexTimelineEntry] = Field(default_factory=list)
|
||||
metrics: OpexMetrics
|
||||
components: List[OpexComponentInput] = Field(
|
||||
default_factory=list)
|
||||
parameters: OpexParameters
|
||||
options: OpexOptions
|
||||
currency: str | None
|
||||
|
||||
|
||||
__all__ = [
|
||||
"ImpurityInput",
|
||||
"ProfitabilityCalculationRequest",
|
||||
"ProfitabilityCosts",
|
||||
"ProfitabilityMetrics",
|
||||
"CashFlowEntry",
|
||||
"ProfitabilityCalculationResult",
|
||||
"CapexComponentInput",
|
||||
"CapexParameters",
|
||||
"CapexCalculationOptions",
|
||||
"CapexCalculationRequest",
|
||||
"CapexCategoryBreakdown",
|
||||
"CapexTotals",
|
||||
"CapexTimelineEntry",
|
||||
"CapexCalculationResult",
|
||||
"OpexComponentInput",
|
||||
"OpexParameters",
|
||||
"OpexOptions",
|
||||
"OpexCalculationRequest",
|
||||
"OpexCategoryBreakdown",
|
||||
"OpexTimelineEntry",
|
||||
"OpexMetrics",
|
||||
"OpexTotals",
|
||||
"OpexCalculationResult",
|
||||
"ValidationError",
|
||||
]
|
||||
69
schemas/exports.py
Normal file
69
schemas/exports.py
Normal file
@@ -0,0 +1,69 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from enum import Enum
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, field_validator
|
||||
|
||||
from services.export_query import ProjectExportFilters, ScenarioExportFilters
|
||||
|
||||
|
||||
class ExportFormat(str, Enum):
|
||||
CSV = "csv"
|
||||
XLSX = "xlsx"
|
||||
|
||||
|
||||
class BaseExportRequest(BaseModel):
|
||||
format: ExportFormat = ExportFormat.CSV
|
||||
include_metadata: bool = False
|
||||
|
||||
model_config = ConfigDict(extra="forbid")
|
||||
|
||||
|
||||
class ProjectExportRequest(BaseExportRequest):
|
||||
filters: ProjectExportFilters | None = None
|
||||
|
||||
@field_validator("filters", mode="before")
|
||||
@classmethod
|
||||
def validate_filters(cls, value: ProjectExportFilters | None) -> ProjectExportFilters | None:
|
||||
if value is None:
|
||||
return None
|
||||
if isinstance(value, ProjectExportFilters):
|
||||
return value
|
||||
return ProjectExportFilters(**value)
|
||||
|
||||
|
||||
class ScenarioExportRequest(BaseExportRequest):
|
||||
filters: ScenarioExportFilters | None = None
|
||||
|
||||
@field_validator("filters", mode="before")
|
||||
@classmethod
|
||||
def validate_filters(cls, value: ScenarioExportFilters | None) -> ScenarioExportFilters | None:
|
||||
if value is None:
|
||||
return None
|
||||
if isinstance(value, ScenarioExportFilters):
|
||||
return value
|
||||
return ScenarioExportFilters(**value)
|
||||
|
||||
|
||||
class ExportTicket(BaseModel):
|
||||
token: str
|
||||
format: ExportFormat
|
||||
resource: Literal["projects", "scenarios"]
|
||||
|
||||
model_config = ConfigDict(extra="forbid")
|
||||
|
||||
|
||||
class ExportResponse(BaseModel):
|
||||
ticket: ExportTicket
|
||||
|
||||
model_config = ConfigDict(extra="forbid")
|
||||
|
||||
|
||||
__all__ = [
|
||||
"ExportFormat",
|
||||
"ProjectExportRequest",
|
||||
"ScenarioExportRequest",
|
||||
"ExportTicket",
|
||||
"ExportResponse",
|
||||
]
|
||||
@@ -7,6 +7,7 @@ from typing import Literal
|
||||
from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator
|
||||
|
||||
from models import MiningOperationType, ResourceType, ScenarioStatus
|
||||
from services.currency import CurrencyValidationError, normalise_currency
|
||||
|
||||
PreviewStateLiteral = Literal["new", "update", "skip", "error"]
|
||||
|
||||
@@ -142,14 +143,13 @@ class ScenarioImportRow(BaseModel):
|
||||
@field_validator("currency", mode="before")
|
||||
@classmethod
|
||||
def normalise_currency(cls, value: Any | None) -> str | None:
|
||||
if value is None:
|
||||
text = _strip_or_none(value)
|
||||
if text is None:
|
||||
return None
|
||||
text = _normalise_string(value).upper()
|
||||
if not text:
|
||||
return None
|
||||
if len(text) != 3:
|
||||
raise ValueError("Currency code must be a 3-letter ISO value")
|
||||
return text
|
||||
try:
|
||||
return normalise_currency(text)
|
||||
except CurrencyValidationError as exc:
|
||||
raise ValueError(str(exc)) from exc
|
||||
|
||||
@field_validator("discount_rate", mode="before")
|
||||
@classmethod
|
||||
|
||||
36
schemas/navigation.py
Normal file
36
schemas/navigation.py
Normal file
@@ -0,0 +1,36 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import datetime
|
||||
from typing import List
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class NavigationLinkSchema(BaseModel):
|
||||
id: int
|
||||
label: str
|
||||
href: str
|
||||
match_prefix: str | None = Field(default=None)
|
||||
icon: str | None = Field(default=None)
|
||||
tooltip: str | None = Field(default=None)
|
||||
is_external: bool = Field(default=False)
|
||||
children: List["NavigationLinkSchema"] = Field(default_factory=list)
|
||||
|
||||
|
||||
class NavigationGroupSchema(BaseModel):
|
||||
id: int
|
||||
label: str
|
||||
icon: str | None = Field(default=None)
|
||||
tooltip: str | None = Field(default=None)
|
||||
links: List[NavigationLinkSchema] = Field(default_factory=list)
|
||||
|
||||
|
||||
class NavigationSidebarResponse(BaseModel):
|
||||
groups: List[NavigationGroupSchema]
|
||||
roles: List[str] = Field(default_factory=list)
|
||||
generated_at: datetime
|
||||
|
||||
|
||||
NavigationLinkSchema.model_rebuild()
|
||||
NavigationGroupSchema.model_rebuild()
|
||||
NavigationSidebarResponse.model_rebuild()
|
||||
@@ -5,6 +5,7 @@ from datetime import date, datetime
|
||||
from pydantic import BaseModel, ConfigDict, field_validator, model_validator
|
||||
|
||||
from models import ResourceType, ScenarioStatus
|
||||
from services.currency import CurrencyValidationError, normalise_currency
|
||||
|
||||
|
||||
class ScenarioBase(BaseModel):
|
||||
@@ -23,11 +24,15 @@ class ScenarioBase(BaseModel):
|
||||
@classmethod
|
||||
def normalise_currency(cls, value: str | None) -> str | None:
|
||||
if value is None:
|
||||
return value
|
||||
value = value.upper()
|
||||
if len(value) != 3:
|
||||
raise ValueError("Currency code must be a 3-letter ISO value")
|
||||
return value
|
||||
return None
|
||||
candidate = value if isinstance(value, str) else str(value)
|
||||
candidate = candidate.strip()
|
||||
if not candidate:
|
||||
return None
|
||||
try:
|
||||
return normalise_currency(candidate)
|
||||
except CurrencyValidationError as exc:
|
||||
raise ValueError(str(exc)) from exc
|
||||
|
||||
|
||||
class ScenarioCreate(ScenarioBase):
|
||||
@@ -50,11 +55,15 @@ class ScenarioUpdate(BaseModel):
|
||||
@classmethod
|
||||
def normalise_currency(cls, value: str | None) -> str | None:
|
||||
if value is None:
|
||||
return value
|
||||
value = value.upper()
|
||||
if len(value) != 3:
|
||||
raise ValueError("Currency code must be a 3-letter ISO value")
|
||||
return value
|
||||
return None
|
||||
candidate = value if isinstance(value, str) else str(value)
|
||||
candidate = candidate.strip()
|
||||
if not candidate:
|
||||
return None
|
||||
try:
|
||||
return normalise_currency(candidate)
|
||||
except CurrencyValidationError as exc:
|
||||
raise ValueError(str(exc)) from exc
|
||||
|
||||
|
||||
class ScenarioRead(ScenarioBase):
|
||||
@@ -75,7 +84,8 @@ class ScenarioComparisonRequest(BaseModel):
|
||||
def ensure_minimum_ids(self) -> "ScenarioComparisonRequest":
|
||||
unique_ids: list[int] = list(dict.fromkeys(self.scenario_ids))
|
||||
if len(unique_ids) < 2:
|
||||
raise ValueError("At least two unique scenario identifiers are required for comparison.")
|
||||
raise ValueError(
|
||||
"At least two unique scenario identifiers are required for comparison.")
|
||||
self.scenario_ids = unique_ids
|
||||
return self
|
||||
|
||||
|
||||
112
scripts/_route_verification.py
Normal file
112
scripts/_route_verification.py
Normal file
@@ -0,0 +1,112 @@
|
||||
"""Utility script to verify key authenticated routes respond without errors."""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import urllib.parse
|
||||
from http.client import HTTPConnection
|
||||
from http.cookies import SimpleCookie
|
||||
from typing import Dict, List, Tuple
|
||||
|
||||
HOST = "127.0.0.1"
|
||||
PORT = 8000
|
||||
|
||||
cookies: Dict[str, str] = {}
|
||||
|
||||
|
||||
def _update_cookies(headers: List[Tuple[str, str]]) -> None:
|
||||
for name, value in headers:
|
||||
if name.lower() != "set-cookie":
|
||||
continue
|
||||
cookie = SimpleCookie()
|
||||
cookie.load(value)
|
||||
for key, morsel in cookie.items():
|
||||
cookies[key] = morsel.value
|
||||
|
||||
|
||||
def _cookie_header() -> str | None:
|
||||
if not cookies:
|
||||
return None
|
||||
return "; ".join(f"{key}={value}" for key, value in cookies.items())
|
||||
|
||||
|
||||
def request(method: str, path: str, *, body: bytes | None = None, headers: Dict[str, str] | None = None) -> Tuple[int, Dict[str, str], bytes]:
|
||||
conn = HTTPConnection(HOST, PORT, timeout=10)
|
||||
prepared_headers = {"User-Agent": "route-checker"}
|
||||
if headers:
|
||||
prepared_headers.update(headers)
|
||||
cookie_header = _cookie_header()
|
||||
if cookie_header:
|
||||
prepared_headers["Cookie"] = cookie_header
|
||||
|
||||
conn.request(method, path, body=body, headers=prepared_headers)
|
||||
resp = conn.getresponse()
|
||||
payload = resp.read()
|
||||
status = resp.status
|
||||
reason = resp.reason
|
||||
response_headers = {name: value for name, value in resp.getheaders()}
|
||||
_update_cookies(list(resp.getheaders()))
|
||||
conn.close()
|
||||
print(f"{method} {path} -> {status} {reason}")
|
||||
return status, response_headers, payload
|
||||
|
||||
|
||||
def main() -> int:
|
||||
status, _, _ = request("GET", "/login")
|
||||
if status != 200:
|
||||
print("Unexpected status for GET /login", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
admin_username = os.getenv("CALMINER_SEED_ADMIN_USERNAME", "admin")
|
||||
admin_password = os.getenv("CALMINER_SEED_ADMIN_PASSWORD", "M11ffpgm.")
|
||||
login_payload = urllib.parse.urlencode(
|
||||
{"username": admin_username, "password": admin_password}
|
||||
).encode()
|
||||
status, headers, _ = request(
|
||||
"POST",
|
||||
"/login",
|
||||
body=login_payload,
|
||||
headers={"Content-Type": "application/x-www-form-urlencoded"},
|
||||
)
|
||||
if status not in {200, 303}:
|
||||
print("Login failed", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
location = headers.get("Location", "/")
|
||||
redirect_path = urllib.parse.urlsplit(location).path or "/"
|
||||
request("GET", redirect_path)
|
||||
|
||||
request("GET", "/")
|
||||
request("GET", "/projects/ui")
|
||||
|
||||
status, headers, body = request(
|
||||
"GET",
|
||||
"/projects",
|
||||
headers={"Accept": "application/json"},
|
||||
)
|
||||
projects: List[dict] = []
|
||||
if headers.get("Content-Type", "").startswith("application/json"):
|
||||
projects = json.loads(body.decode())
|
||||
|
||||
if projects:
|
||||
project_id = projects[0]["id"]
|
||||
request("GET", f"/projects/{project_id}/view")
|
||||
status, headers, body = request(
|
||||
"GET",
|
||||
f"/projects/{project_id}/scenarios",
|
||||
headers={"Accept": "application/json"},
|
||||
)
|
||||
scenarios: List[dict] = []
|
||||
if headers.get("Content-Type", "").startswith("application/json"):
|
||||
scenarios = json.loads(body.decode())
|
||||
if scenarios:
|
||||
scenario_id = scenarios[0]["id"]
|
||||
request("GET", f"/scenarios/{scenario_id}/view")
|
||||
|
||||
print("Cookies:", cookies)
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
15
scripts/apply_users_sequence_fix.py
Normal file
15
scripts/apply_users_sequence_fix.py
Normal file
@@ -0,0 +1,15 @@
|
||||
from sqlalchemy import create_engine, text
|
||||
from config.database import DATABASE_URL
|
||||
|
||||
engine = create_engine(DATABASE_URL, future=True)
|
||||
sqls = [
|
||||
"CREATE SEQUENCE IF NOT EXISTS users_id_seq;",
|
||||
"ALTER TABLE users ALTER COLUMN id SET DEFAULT nextval('users_id_seq');",
|
||||
"SELECT setval('users_id_seq', COALESCE((SELECT MAX(id) FROM users), 1));",
|
||||
"ALTER SEQUENCE users_id_seq OWNED BY users.id;",
|
||||
]
|
||||
with engine.begin() as conn:
|
||||
for s in sqls:
|
||||
print('EXECUTING:', s)
|
||||
conn.execute(text(s))
|
||||
print('SEQUENCE fix applied')
|
||||
1468
scripts/init_db.py
Normal file
1468
scripts/init_db.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -7,8 +7,15 @@ from typing import Callable, Iterable
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
from config.settings import Settings
|
||||
from models import Role, User
|
||||
from services.repositories import DEFAULT_ROLE_DEFINITIONS, RoleRepository, UserRepository
|
||||
from services.repositories import (
|
||||
DEFAULT_ROLE_DEFINITIONS,
|
||||
PricingSettingsSeedResult,
|
||||
RoleRepository,
|
||||
UserRepository,
|
||||
ensure_default_pricing_settings,
|
||||
)
|
||||
from services.unit_of_work import UnitOfWork
|
||||
|
||||
|
||||
@@ -45,7 +52,8 @@ def parse_bool(value: str | None) -> bool:
|
||||
def normalise_role_list(raw_value: str | None) -> tuple[str, ...]:
|
||||
if not raw_value:
|
||||
return ("admin",)
|
||||
parts = [segment.strip() for segment in raw_value.split(",") if segment.strip()]
|
||||
parts = [segment.strip()
|
||||
for segment in raw_value.split(",") if segment.strip()]
|
||||
if "admin" not in parts:
|
||||
parts.insert(0, "admin")
|
||||
seen: set[str] = set()
|
||||
@@ -59,7 +67,8 @@ def normalise_role_list(raw_value: str | None) -> tuple[str, ...]:
|
||||
|
||||
def load_config() -> SeedConfig:
|
||||
load_dotenv()
|
||||
admin_email = os.getenv("CALMINER_SEED_ADMIN_EMAIL", "admin@calminer.local")
|
||||
admin_email = os.getenv("CALMINER_SEED_ADMIN_EMAIL",
|
||||
"admin@calminer.local")
|
||||
admin_username = os.getenv("CALMINER_SEED_ADMIN_USERNAME", "admin")
|
||||
admin_password = os.getenv("CALMINER_SEED_ADMIN_PASSWORD", "ChangeMe123!")
|
||||
admin_roles = normalise_role_list(os.getenv("CALMINER_SEED_ADMIN_ROLES"))
|
||||
@@ -140,12 +149,15 @@ def ensure_admin_user(
|
||||
for role_name in config.admin_roles:
|
||||
role = role_repo.get_by_name(role_name)
|
||||
if role is None:
|
||||
logging.warning("Role '%s' is not defined and will be skipped", role_name)
|
||||
logging.warning(
|
||||
"Role '%s' is not defined and will be skipped", role_name)
|
||||
continue
|
||||
already_assigned = any(assignment.role_id == role.id for assignment in user.role_assignments)
|
||||
already_assigned = any(assignment.role_id ==
|
||||
role.id for assignment in user.role_assignments)
|
||||
if already_assigned:
|
||||
continue
|
||||
user_repo.assign_role(user_id=user.id, role_id=role.id, granted_by=user.id)
|
||||
user_repo.assign_role(
|
||||
user_id=user.id, role_id=role.id, granted_by=user.id)
|
||||
roles_granted += 1
|
||||
|
||||
return AdminSeedResult(
|
||||
@@ -164,9 +176,33 @@ def seed_initial_data(
|
||||
logging.info("Starting initial data seeding")
|
||||
factory = unit_of_work_factory or UnitOfWork
|
||||
with factory() as uow:
|
||||
assert uow.roles is not None and uow.users is not None
|
||||
assert (
|
||||
uow.roles is not None
|
||||
and uow.users is not None
|
||||
and uow.pricing_settings is not None
|
||||
and uow.projects is not None
|
||||
)
|
||||
role_result = ensure_default_roles(uow.roles)
|
||||
admin_result = ensure_admin_user(uow.users, uow.roles, config)
|
||||
pricing_metadata = uow.get_pricing_metadata()
|
||||
metadata_source = "database"
|
||||
if pricing_metadata is None:
|
||||
pricing_metadata = Settings.from_environment().pricing_metadata()
|
||||
metadata_source = "environment"
|
||||
pricing_result: PricingSettingsSeedResult = ensure_default_pricing_settings(
|
||||
uow.pricing_settings,
|
||||
metadata=pricing_metadata,
|
||||
)
|
||||
|
||||
projects_without_pricing = [
|
||||
project
|
||||
for project in uow.projects.list(with_pricing=True)
|
||||
if project.pricing_settings is None
|
||||
]
|
||||
assigned_projects = 0
|
||||
for project in projects_without_pricing:
|
||||
uow.set_project_pricing_settings(project, pricing_result.settings)
|
||||
assigned_projects += 1
|
||||
logging.info(
|
||||
"Roles processed: %s total, %s created, %s updated",
|
||||
role_result.total,
|
||||
@@ -180,4 +216,16 @@ def seed_initial_data(
|
||||
admin_result.password_rotated,
|
||||
admin_result.roles_granted,
|
||||
)
|
||||
logging.info(
|
||||
"Pricing settings ensured (source=%s): slug=%s created=%s updated_fields=%s impurity_upserts=%s",
|
||||
metadata_source,
|
||||
pricing_result.settings.slug,
|
||||
pricing_result.created,
|
||||
pricing_result.updated_fields,
|
||||
pricing_result.impurity_upserts,
|
||||
)
|
||||
logging.info(
|
||||
"Projects updated with default pricing settings: %s",
|
||||
assigned_projects,
|
||||
)
|
||||
logging.info("Initial data seeding completed successfully")
|
||||
91
scripts/reset_db.py
Normal file
91
scripts/reset_db.py
Normal file
@@ -0,0 +1,91 @@
|
||||
"""Utility to reset development Postgres schema artifacts.
|
||||
|
||||
This script drops managed tables and enum types created by `scripts.init_db`.
|
||||
It is intended for local development only; it refuses to run if CALMINER_ENV
|
||||
indicates production or staging. The operation is idempotent: missing objects
|
||||
are ignored. Use with caution.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
from dataclasses import dataclass
|
||||
from typing import Iterable
|
||||
|
||||
from sqlalchemy import text
|
||||
from sqlalchemy.engine import Engine
|
||||
|
||||
from config.database import DATABASE_URL
|
||||
from scripts.init_db import ENUM_DEFINITIONS, _create_engine
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class ResetOptions:
|
||||
drop_tables: bool = True
|
||||
drop_enums: bool = True
|
||||
|
||||
|
||||
MANAGED_TABLES: tuple[str, ...] = (
|
||||
"simulation_parameters",
|
||||
"financial_inputs",
|
||||
"scenarios",
|
||||
"projects",
|
||||
"pricing_impurity_settings",
|
||||
"pricing_metal_settings",
|
||||
"pricing_settings",
|
||||
"user_roles",
|
||||
"users",
|
||||
"roles",
|
||||
)
|
||||
|
||||
|
||||
FORBIDDEN_ENVIRONMENTS: set[str] = {"production", "staging", "prod", "stage"}
|
||||
|
||||
|
||||
def _ensure_safe_environment() -> None:
|
||||
env = os.getenv("CALMINER_ENV", "development").lower()
|
||||
if env in FORBIDDEN_ENVIRONMENTS:
|
||||
raise RuntimeError(
|
||||
f"Refusing to reset database in environment '{env}'. "
|
||||
"Set CALMINER_ENV to 'development' to proceed."
|
||||
)
|
||||
|
||||
|
||||
def _drop_tables(engine: Engine, tables: Iterable[str]) -> None:
|
||||
if not tables:
|
||||
return
|
||||
with engine.begin() as conn:
|
||||
for table in tables:
|
||||
logger.info("Dropping table if exists: %s", table)
|
||||
conn.execute(text(f"DROP TABLE IF EXISTS {table} CASCADE"))
|
||||
|
||||
|
||||
def _drop_enums(engine: Engine, enum_names: Iterable[str]) -> None:
|
||||
if not enum_names:
|
||||
return
|
||||
with engine.begin() as conn:
|
||||
for enum_name in enum_names:
|
||||
logger.info("Dropping enum type if exists: %s", enum_name)
|
||||
conn.execute(text(f"DROP TYPE IF EXISTS {enum_name} CASCADE"))
|
||||
|
||||
|
||||
def reset_database(*, options: ResetOptions | None = None, database_url: str | None = None) -> None:
|
||||
"""Drop managed tables and enums for a clean slate."""
|
||||
_ensure_safe_environment()
|
||||
opts = options or ResetOptions()
|
||||
engine = _create_engine(database_url or DATABASE_URL)
|
||||
|
||||
if opts.drop_tables:
|
||||
_drop_tables(engine, MANAGED_TABLES)
|
||||
|
||||
if opts.drop_enums:
|
||||
_drop_enums(engine, ENUM_DEFINITIONS.keys())
|
||||
|
||||
logger.info("Database reset complete")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
reset_database()
|
||||
86
scripts/verify_db.py
Normal file
86
scripts/verify_db.py
Normal file
@@ -0,0 +1,86 @@
|
||||
"""Verify DB initialization results: enums, roles, admin user, pricing_settings."""
|
||||
from __future__ import annotations
|
||||
import logging
|
||||
from sqlalchemy import create_engine, text
|
||||
from config.database import DATABASE_URL
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
ENUMS = [
|
||||
'miningoperationtype',
|
||||
'scenariostatus',
|
||||
'financialcategory',
|
||||
'costbucket',
|
||||
'distributiontype',
|
||||
'stochasticvariable',
|
||||
'resourcetype',
|
||||
]
|
||||
|
||||
SQL_CHECK_ENUM = "SELECT typname FROM pg_type WHERE typname = ANY(:names)"
|
||||
SQL_ROLES = "SELECT id, name, display_name FROM roles ORDER BY id"
|
||||
SQL_ADMIN = "SELECT id, email, username, is_active, is_superuser FROM users WHERE id = 1"
|
||||
SQL_USER_ROLES = "SELECT user_id, role_id, granted_by FROM user_roles WHERE user_id = 1"
|
||||
SQL_PRICING = "SELECT id, slug, name, default_currency FROM pricing_settings WHERE slug = 'default'"
|
||||
|
||||
|
||||
def run():
|
||||
engine = create_engine(DATABASE_URL, future=True)
|
||||
with engine.connect() as conn:
|
||||
print('Using DATABASE_URL:', DATABASE_URL)
|
||||
# enums
|
||||
res = conn.execute(text(SQL_CHECK_ENUM), dict(names=ENUMS)).fetchall()
|
||||
found = [r[0] for r in res]
|
||||
print('\nEnums found:')
|
||||
for name in ENUMS:
|
||||
print(f' {name}:', 'YES' if name in found else 'NO')
|
||||
|
||||
# roles
|
||||
try:
|
||||
roles = conn.execute(text(SQL_ROLES)).fetchall()
|
||||
print('\nRoles:')
|
||||
if roles:
|
||||
for r in roles:
|
||||
print(f' id={r.id} name={r.name} display_name={r.display_name}')
|
||||
else:
|
||||
print(' (no roles found)')
|
||||
except Exception as e:
|
||||
print('\nRoles query failed:', e)
|
||||
|
||||
# admin user
|
||||
try:
|
||||
admin = conn.execute(text(SQL_ADMIN)).fetchone()
|
||||
print('\nAdmin user:')
|
||||
if admin:
|
||||
print(f' id={admin.id} email={admin.email} username={admin.username} is_active={admin.is_active} is_superuser={admin.is_superuser}')
|
||||
else:
|
||||
print(' (admin user not found)')
|
||||
except Exception as e:
|
||||
print('\nAdmin query failed:', e)
|
||||
|
||||
# user_roles
|
||||
try:
|
||||
ur = conn.execute(text(SQL_USER_ROLES)).fetchall()
|
||||
print('\nUser roles for user_id=1:')
|
||||
if ur:
|
||||
for row in ur:
|
||||
print(f' user_id={row.user_id} role_id={row.role_id} granted_by={row.granted_by}')
|
||||
else:
|
||||
print(' (no user_roles rows for user_id=1)')
|
||||
except Exception as e:
|
||||
print('\nUser_roles query failed:', e)
|
||||
|
||||
# pricing settings
|
||||
try:
|
||||
p = conn.execute(text(SQL_PRICING)).fetchone()
|
||||
print('\nPricing settings (slug=default):')
|
||||
if p:
|
||||
print(f' id={p.id} slug={p.slug} name={p.name} default_currency={p.default_currency}')
|
||||
else:
|
||||
print(' (default pricing settings not found)')
|
||||
except Exception as e:
|
||||
print('\nPricing query failed:', e)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
run()
|
||||
@@ -1 +1,12 @@
|
||||
"""Service layer utilities."""
|
||||
|
||||
from .pricing import calculate_pricing, PricingInput, PricingMetadata, PricingResult
|
||||
from .calculations import calculate_profitability
|
||||
|
||||
__all__ = [
|
||||
"calculate_pricing",
|
||||
"PricingInput",
|
||||
"PricingMetadata",
|
||||
"PricingResult",
|
||||
"calculate_profitability",
|
||||
]
|
||||
|
||||
@@ -6,7 +6,11 @@ from typing import Callable
|
||||
|
||||
from config.settings import AdminBootstrapSettings
|
||||
from models import User
|
||||
from services.repositories import ensure_default_roles
|
||||
from services.pricing import PricingMetadata
|
||||
from services.repositories import (
|
||||
PricingSettingsSeedResult,
|
||||
ensure_default_roles,
|
||||
)
|
||||
from services.unit_of_work import UnitOfWork
|
||||
|
||||
|
||||
@@ -27,6 +31,12 @@ class AdminBootstrapResult:
|
||||
roles_granted: int
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class PricingBootstrapResult:
|
||||
seed: PricingSettingsSeedResult
|
||||
projects_assigned: int
|
||||
|
||||
|
||||
def bootstrap_admin(
|
||||
*,
|
||||
settings: AdminBootstrapSettings,
|
||||
@@ -127,3 +137,46 @@ def _bootstrap_admin_user(
|
||||
password_rotated=password_rotated,
|
||||
roles_granted=roles_granted,
|
||||
)
|
||||
|
||||
|
||||
def bootstrap_pricing_settings(
|
||||
*,
|
||||
metadata: PricingMetadata,
|
||||
unit_of_work_factory: Callable[[], UnitOfWork] = UnitOfWork,
|
||||
default_slug: str = "default",
|
||||
) -> PricingBootstrapResult:
|
||||
"""Ensure baseline pricing settings exist and projects reference them."""
|
||||
|
||||
with unit_of_work_factory() as uow:
|
||||
seed_result = uow.ensure_default_pricing_settings(
|
||||
metadata=metadata,
|
||||
slug=default_slug,
|
||||
)
|
||||
|
||||
assigned = 0
|
||||
if uow.projects:
|
||||
default_settings = seed_result.settings
|
||||
projects = uow.projects.list(with_pricing=True)
|
||||
for project in projects:
|
||||
if project.pricing_settings is None:
|
||||
uow.set_project_pricing_settings(project, default_settings)
|
||||
assigned += 1
|
||||
|
||||
# Capture logging-safe primitives while the UnitOfWork (and session)
|
||||
# are still active to avoid DetachedInstanceError when accessing ORM
|
||||
# instances outside the session scope.
|
||||
seed_slug = seed_result.settings.slug if seed_result and seed_result.settings else None
|
||||
seed_created = getattr(seed_result, "created", None)
|
||||
seed_updated_fields = getattr(seed_result, "updated_fields", None)
|
||||
seed_impurity_upserts = getattr(seed_result, "impurity_upserts", None)
|
||||
|
||||
logger.info(
|
||||
"Pricing bootstrap result: slug=%s created=%s updated_fields=%s impurity_upserts=%s projects_assigned=%s",
|
||||
seed_slug,
|
||||
seed_created,
|
||||
seed_updated_fields,
|
||||
seed_impurity_upserts,
|
||||
assigned,
|
||||
)
|
||||
|
||||
return PricingBootstrapResult(seed=seed_result, projects_assigned=assigned)
|
||||
|
||||
535
services/calculations.py
Normal file
535
services/calculations.py
Normal file
@@ -0,0 +1,535 @@
|
||||
"""Service functions for financial calculations."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections import defaultdict
|
||||
from statistics import fmean
|
||||
|
||||
from services.currency import CurrencyValidationError, normalise_currency
|
||||
from services.exceptions import (
|
||||
CapexValidationError,
|
||||
OpexValidationError,
|
||||
ProfitabilityValidationError,
|
||||
)
|
||||
from services.financial import (
|
||||
CashFlow,
|
||||
ConvergenceError,
|
||||
PaybackNotReachedError,
|
||||
internal_rate_of_return,
|
||||
net_present_value,
|
||||
payback_period,
|
||||
)
|
||||
from services.pricing import PricingInput, PricingMetadata, PricingResult, calculate_pricing
|
||||
from schemas.calculations import (
|
||||
CapexCalculationRequest,
|
||||
CapexCalculationResult,
|
||||
CapexCategoryBreakdown,
|
||||
CapexComponentInput,
|
||||
CapexTotals,
|
||||
CapexTimelineEntry,
|
||||
CashFlowEntry,
|
||||
OpexCalculationRequest,
|
||||
OpexCalculationResult,
|
||||
OpexCategoryBreakdown,
|
||||
OpexComponentInput,
|
||||
OpexMetrics,
|
||||
OpexParameters,
|
||||
OpexTotals,
|
||||
OpexTimelineEntry,
|
||||
ProfitabilityCalculationRequest,
|
||||
ProfitabilityCalculationResult,
|
||||
ProfitabilityCosts,
|
||||
ProfitabilityMetrics,
|
||||
)
|
||||
|
||||
|
||||
_FREQUENCY_MULTIPLIER = {
|
||||
"daily": 365,
|
||||
"weekly": 52,
|
||||
"monthly": 12,
|
||||
"quarterly": 4,
|
||||
"annually": 1,
|
||||
}
|
||||
|
||||
|
||||
def _build_pricing_input(
|
||||
request: ProfitabilityCalculationRequest,
|
||||
) -> PricingInput:
|
||||
"""Construct a pricing input instance including impurity overrides."""
|
||||
|
||||
impurity_values: dict[str, float] = {}
|
||||
impurity_thresholds: dict[str, float] = {}
|
||||
impurity_penalties: dict[str, float] = {}
|
||||
|
||||
for impurity in request.impurities:
|
||||
code = impurity.name.strip()
|
||||
if not code:
|
||||
continue
|
||||
code = code.upper()
|
||||
if impurity.value is not None:
|
||||
impurity_values[code] = float(impurity.value)
|
||||
if impurity.threshold is not None:
|
||||
impurity_thresholds[code] = float(impurity.threshold)
|
||||
if impurity.penalty is not None:
|
||||
impurity_penalties[code] = float(impurity.penalty)
|
||||
|
||||
pricing_input = PricingInput(
|
||||
metal=request.metal,
|
||||
ore_tonnage=request.ore_tonnage,
|
||||
head_grade_pct=request.head_grade_pct,
|
||||
recovery_pct=request.recovery_pct,
|
||||
payable_pct=request.payable_pct,
|
||||
reference_price=request.reference_price,
|
||||
treatment_charge=request.treatment_charge,
|
||||
smelting_charge=request.smelting_charge,
|
||||
moisture_pct=request.moisture_pct,
|
||||
moisture_threshold_pct=request.moisture_threshold_pct,
|
||||
moisture_penalty_per_pct=request.moisture_penalty_per_pct,
|
||||
impurity_ppm=impurity_values,
|
||||
impurity_thresholds=impurity_thresholds,
|
||||
impurity_penalty_per_ppm=impurity_penalties,
|
||||
premiums=request.premiums,
|
||||
fx_rate=request.fx_rate,
|
||||
currency_code=request.currency_code,
|
||||
)
|
||||
|
||||
return pricing_input
|
||||
|
||||
|
||||
def _generate_cash_flows(
|
||||
*,
|
||||
periods: int,
|
||||
net_per_period: float,
|
||||
capex: float,
|
||||
) -> tuple[list[CashFlow], list[CashFlowEntry]]:
|
||||
"""Create cash flow structures for financial metric calculations."""
|
||||
|
||||
cash_flow_models: list[CashFlow] = [
|
||||
CashFlow(amount=-capex, period_index=0)
|
||||
]
|
||||
cash_flow_entries: list[CashFlowEntry] = [
|
||||
CashFlowEntry(
|
||||
period=0,
|
||||
revenue=0.0,
|
||||
opex=0.0,
|
||||
sustaining_capex=0.0,
|
||||
net=-capex,
|
||||
)
|
||||
]
|
||||
|
||||
for period in range(1, periods + 1):
|
||||
cash_flow_models.append(
|
||||
CashFlow(amount=net_per_period, period_index=period))
|
||||
cash_flow_entries.append(
|
||||
CashFlowEntry(
|
||||
period=period,
|
||||
revenue=0.0,
|
||||
opex=0.0,
|
||||
sustaining_capex=0.0,
|
||||
net=net_per_period,
|
||||
)
|
||||
)
|
||||
|
||||
return cash_flow_models, cash_flow_entries
|
||||
|
||||
|
||||
def calculate_profitability(
|
||||
request: ProfitabilityCalculationRequest,
|
||||
*,
|
||||
metadata: PricingMetadata,
|
||||
) -> ProfitabilityCalculationResult:
|
||||
"""Calculate profitability metrics using pricing inputs and cost data."""
|
||||
|
||||
if request.periods <= 0:
|
||||
raise ProfitabilityValidationError(
|
||||
"Evaluation periods must be at least 1.", ["periods"]
|
||||
)
|
||||
|
||||
pricing_input = _build_pricing_input(request)
|
||||
try:
|
||||
pricing_result: PricingResult = calculate_pricing(
|
||||
pricing_input, metadata=metadata
|
||||
)
|
||||
except CurrencyValidationError as exc:
|
||||
raise ProfitabilityValidationError(
|
||||
str(exc), ["currency_code"]) from exc
|
||||
|
||||
periods = request.periods
|
||||
revenue_total = float(pricing_result.net_revenue)
|
||||
revenue_per_period = revenue_total / periods
|
||||
|
||||
processing_total = float(request.opex) * periods
|
||||
sustaining_total = float(request.sustaining_capex) * periods
|
||||
capex = float(request.capex)
|
||||
|
||||
net_per_period = (
|
||||
revenue_per_period
|
||||
- float(request.opex)
|
||||
- float(request.sustaining_capex)
|
||||
)
|
||||
|
||||
cash_flow_models, cash_flow_entries = _generate_cash_flows(
|
||||
periods=periods,
|
||||
net_per_period=net_per_period,
|
||||
capex=capex,
|
||||
)
|
||||
|
||||
# Update per-period entries to include explicit costs for presentation
|
||||
for entry in cash_flow_entries[1:]:
|
||||
entry.revenue = revenue_per_period
|
||||
entry.opex = float(request.opex)
|
||||
entry.sustaining_capex = float(request.sustaining_capex)
|
||||
entry.net = net_per_period
|
||||
|
||||
discount_rate = (request.discount_rate or 0.0) / 100.0
|
||||
|
||||
npv_value = net_present_value(discount_rate, cash_flow_models)
|
||||
|
||||
try:
|
||||
irr_value = internal_rate_of_return(cash_flow_models) * 100.0
|
||||
except (ValueError, ZeroDivisionError, ConvergenceError):
|
||||
irr_value = None
|
||||
|
||||
try:
|
||||
payback_value = payback_period(cash_flow_models)
|
||||
except (ValueError, PaybackNotReachedError):
|
||||
payback_value = None
|
||||
|
||||
total_costs = processing_total + sustaining_total + capex
|
||||
total_net = revenue_total - total_costs
|
||||
|
||||
if revenue_total == 0:
|
||||
margin_value = None
|
||||
else:
|
||||
margin_value = (total_net / revenue_total) * 100.0
|
||||
|
||||
currency = request.currency_code or pricing_result.currency
|
||||
try:
|
||||
currency = normalise_currency(currency)
|
||||
except CurrencyValidationError as exc:
|
||||
raise ProfitabilityValidationError(
|
||||
str(exc), ["currency_code"]) from exc
|
||||
|
||||
costs = ProfitabilityCosts(
|
||||
opex_total=processing_total,
|
||||
sustaining_capex_total=sustaining_total,
|
||||
capex=capex,
|
||||
)
|
||||
|
||||
metrics = ProfitabilityMetrics(
|
||||
npv=npv_value,
|
||||
irr=irr_value,
|
||||
payback_period=payback_value,
|
||||
margin=margin_value,
|
||||
)
|
||||
|
||||
return ProfitabilityCalculationResult(
|
||||
pricing=pricing_result,
|
||||
costs=costs,
|
||||
metrics=metrics,
|
||||
cash_flows=cash_flow_entries,
|
||||
currency=currency,
|
||||
)
|
||||
|
||||
|
||||
def calculate_initial_capex(
|
||||
request: CapexCalculationRequest,
|
||||
) -> CapexCalculationResult:
|
||||
"""Aggregate capex components into totals and timelines."""
|
||||
|
||||
if not request.components:
|
||||
raise CapexValidationError(
|
||||
"At least one capex component is required for calculation.",
|
||||
["components"],
|
||||
)
|
||||
|
||||
parameters = request.parameters
|
||||
|
||||
base_currency = parameters.currency_code
|
||||
if base_currency:
|
||||
try:
|
||||
base_currency = normalise_currency(base_currency)
|
||||
except CurrencyValidationError as exc:
|
||||
raise CapexValidationError(
|
||||
str(exc), ["parameters.currency_code"]
|
||||
) from exc
|
||||
|
||||
overall = 0.0
|
||||
category_totals: dict[str, float] = defaultdict(float)
|
||||
timeline_totals: dict[int, float] = defaultdict(float)
|
||||
normalised_components: list[CapexComponentInput] = []
|
||||
|
||||
for index, component in enumerate(request.components):
|
||||
amount = float(component.amount)
|
||||
overall += amount
|
||||
|
||||
category_totals[component.category] += amount
|
||||
|
||||
spend_year = component.spend_year or 0
|
||||
timeline_totals[spend_year] += amount
|
||||
|
||||
component_currency = component.currency
|
||||
if component_currency:
|
||||
try:
|
||||
component_currency = normalise_currency(component_currency)
|
||||
except CurrencyValidationError as exc:
|
||||
raise CapexValidationError(
|
||||
str(exc), [f"components[{index}].currency"]
|
||||
) from exc
|
||||
|
||||
if base_currency is None and component_currency:
|
||||
base_currency = component_currency
|
||||
elif (
|
||||
base_currency is not None
|
||||
and component_currency is not None
|
||||
and component_currency != base_currency
|
||||
):
|
||||
raise CapexValidationError(
|
||||
(
|
||||
"Component currency does not match the global currency. "
|
||||
f"Expected {base_currency}, got {component_currency}."
|
||||
),
|
||||
[f"components[{index}].currency"],
|
||||
)
|
||||
|
||||
normalised_components.append(
|
||||
CapexComponentInput(
|
||||
id=component.id,
|
||||
name=component.name,
|
||||
category=component.category,
|
||||
amount=amount,
|
||||
currency=component_currency,
|
||||
spend_year=component.spend_year,
|
||||
notes=component.notes,
|
||||
)
|
||||
)
|
||||
|
||||
contingency_pct = float(parameters.contingency_pct or 0.0)
|
||||
contingency_amount = overall * (contingency_pct / 100.0)
|
||||
grand_total = overall + contingency_amount
|
||||
|
||||
category_breakdowns: list[CapexCategoryBreakdown] = []
|
||||
if category_totals:
|
||||
for category, total in sorted(category_totals.items()):
|
||||
share = (total / overall * 100.0) if overall else None
|
||||
category_breakdowns.append(
|
||||
CapexCategoryBreakdown(
|
||||
category=category,
|
||||
amount=total,
|
||||
share=share,
|
||||
)
|
||||
)
|
||||
|
||||
cumulative = 0.0
|
||||
timeline_entries: list[CapexTimelineEntry] = []
|
||||
for year, spend in sorted(timeline_totals.items()):
|
||||
cumulative += spend
|
||||
timeline_entries.append(
|
||||
CapexTimelineEntry(year=year, spend=spend, cumulative=cumulative)
|
||||
)
|
||||
|
||||
try:
|
||||
currency = normalise_currency(base_currency) if base_currency else None
|
||||
except CurrencyValidationError as exc:
|
||||
raise CapexValidationError(
|
||||
str(exc), ["parameters.currency_code"]
|
||||
) from exc
|
||||
|
||||
totals = CapexTotals(
|
||||
overall=overall,
|
||||
contingency_pct=contingency_pct,
|
||||
contingency_amount=contingency_amount,
|
||||
with_contingency=grand_total,
|
||||
by_category=category_breakdowns,
|
||||
)
|
||||
|
||||
return CapexCalculationResult(
|
||||
totals=totals,
|
||||
timeline=timeline_entries,
|
||||
components=normalised_components,
|
||||
parameters=parameters,
|
||||
options=request.options,
|
||||
currency=currency,
|
||||
)
|
||||
|
||||
|
||||
def calculate_opex(
|
||||
request: OpexCalculationRequest,
|
||||
) -> OpexCalculationResult:
|
||||
"""Aggregate opex components into annual totals and timeline."""
|
||||
|
||||
if not request.components:
|
||||
raise OpexValidationError(
|
||||
"At least one opex component is required for calculation.",
|
||||
["components"],
|
||||
)
|
||||
|
||||
parameters: OpexParameters = request.parameters
|
||||
base_currency = parameters.currency_code
|
||||
if base_currency:
|
||||
try:
|
||||
base_currency = normalise_currency(base_currency)
|
||||
except CurrencyValidationError as exc:
|
||||
raise OpexValidationError(
|
||||
str(exc), ["parameters.currency_code"]
|
||||
) from exc
|
||||
|
||||
evaluation_horizon = parameters.evaluation_horizon_years or 1
|
||||
if evaluation_horizon <= 0:
|
||||
raise OpexValidationError(
|
||||
"Evaluation horizon must be at least 1 year.",
|
||||
["parameters.evaluation_horizon_years"],
|
||||
)
|
||||
|
||||
escalation_pct = float(parameters.escalation_pct or 0.0)
|
||||
apply_escalation = bool(parameters.apply_escalation)
|
||||
|
||||
category_totals: dict[str, float] = defaultdict(float)
|
||||
timeline_totals: dict[int, float] = defaultdict(float)
|
||||
timeline_escalated: dict[int, float] = defaultdict(float)
|
||||
normalised_components: list[OpexComponentInput] = []
|
||||
|
||||
max_period_end = evaluation_horizon
|
||||
|
||||
for index, component in enumerate(request.components):
|
||||
frequency = component.frequency.lower()
|
||||
multiplier = _FREQUENCY_MULTIPLIER.get(frequency)
|
||||
if multiplier is None:
|
||||
raise OpexValidationError(
|
||||
f"Unsupported frequency '{component.frequency}'.",
|
||||
[f"components[{index}].frequency"],
|
||||
)
|
||||
|
||||
unit_cost = float(component.unit_cost)
|
||||
quantity = float(component.quantity)
|
||||
annual_cost = unit_cost * quantity * multiplier
|
||||
|
||||
period_start = component.period_start or 1
|
||||
period_end = component.period_end or evaluation_horizon
|
||||
if period_end < period_start:
|
||||
raise OpexValidationError(
|
||||
(
|
||||
"Component period_end must be greater than or equal to "
|
||||
"period_start."
|
||||
),
|
||||
[f"components[{index}].period_end"],
|
||||
)
|
||||
|
||||
max_period_end = max(max_period_end, period_end)
|
||||
|
||||
component_currency = component.currency
|
||||
if component_currency:
|
||||
try:
|
||||
component_currency = normalise_currency(component_currency)
|
||||
except CurrencyValidationError as exc:
|
||||
raise OpexValidationError(
|
||||
str(exc), [f"components[{index}].currency"]
|
||||
) from exc
|
||||
|
||||
if base_currency is None and component_currency:
|
||||
base_currency = component_currency
|
||||
elif (
|
||||
base_currency is not None
|
||||
and component_currency is not None
|
||||
and component_currency != base_currency
|
||||
):
|
||||
raise OpexValidationError(
|
||||
(
|
||||
"Component currency does not match the global currency. "
|
||||
f"Expected {base_currency}, got {component_currency}."
|
||||
),
|
||||
[f"components[{index}].currency"],
|
||||
)
|
||||
|
||||
category_totals[component.category] += annual_cost
|
||||
|
||||
for period in range(period_start, period_end + 1):
|
||||
timeline_totals[period] += annual_cost
|
||||
|
||||
normalised_components.append(
|
||||
OpexComponentInput(
|
||||
id=component.id,
|
||||
name=component.name,
|
||||
category=component.category,
|
||||
unit_cost=unit_cost,
|
||||
quantity=quantity,
|
||||
frequency=frequency,
|
||||
currency=component_currency,
|
||||
period_start=period_start,
|
||||
period_end=period_end,
|
||||
notes=component.notes,
|
||||
)
|
||||
)
|
||||
|
||||
evaluation_horizon = max(evaluation_horizon, max_period_end)
|
||||
|
||||
try:
|
||||
currency = normalise_currency(base_currency) if base_currency else None
|
||||
except CurrencyValidationError as exc:
|
||||
raise OpexValidationError(
|
||||
str(exc), ["parameters.currency_code"]
|
||||
) from exc
|
||||
|
||||
timeline_entries: list[OpexTimelineEntry] = []
|
||||
escalated_values: list[float] = []
|
||||
overall_annual = timeline_totals.get(1, 0.0)
|
||||
escalated_total = 0.0
|
||||
|
||||
for period in range(1, evaluation_horizon + 1):
|
||||
base_cost = timeline_totals.get(period, 0.0)
|
||||
if apply_escalation:
|
||||
factor = (1 + escalation_pct / 100.0) ** (period - 1)
|
||||
else:
|
||||
factor = 1.0
|
||||
escalated_cost = base_cost * factor
|
||||
timeline_escalated[period] = escalated_cost
|
||||
escalated_total += escalated_cost
|
||||
timeline_entries.append(
|
||||
OpexTimelineEntry(
|
||||
period=period,
|
||||
base_cost=base_cost,
|
||||
escalated_cost=escalated_cost if apply_escalation else None,
|
||||
)
|
||||
)
|
||||
escalated_values.append(escalated_cost)
|
||||
|
||||
category_breakdowns: list[OpexCategoryBreakdown] = []
|
||||
total_base = sum(category_totals.values())
|
||||
for category, total in sorted(category_totals.items()):
|
||||
share = (total / total_base * 100.0) if total_base else None
|
||||
category_breakdowns.append(
|
||||
OpexCategoryBreakdown(
|
||||
category=category,
|
||||
annual_cost=total,
|
||||
share=share,
|
||||
)
|
||||
)
|
||||
|
||||
metrics = OpexMetrics(
|
||||
annual_average=fmean(escalated_values) if escalated_values else None,
|
||||
cost_per_ton=None,
|
||||
)
|
||||
|
||||
totals = OpexTotals(
|
||||
overall_annual=overall_annual,
|
||||
escalated_total=escalated_total if apply_escalation else None,
|
||||
escalation_pct=escalation_pct if apply_escalation else None,
|
||||
by_category=category_breakdowns,
|
||||
)
|
||||
|
||||
return OpexCalculationResult(
|
||||
totals=totals,
|
||||
timeline=timeline_entries,
|
||||
metrics=metrics,
|
||||
components=normalised_components,
|
||||
parameters=parameters,
|
||||
options=request.options,
|
||||
currency=currency,
|
||||
)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"calculate_profitability",
|
||||
"calculate_initial_capex",
|
||||
"calculate_opex",
|
||||
]
|
||||
43
services/currency.py
Normal file
43
services/currency.py
Normal file
@@ -0,0 +1,43 @@
|
||||
"""Utilities for currency normalization within pricing and financial workflows."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
|
||||
VALID_CURRENCY_PATTERN = re.compile(r"^[A-Z]{3}$")
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class CurrencyValidationError(ValueError):
|
||||
"""Raised when a currency code fails validation."""
|
||||
|
||||
code: str
|
||||
|
||||
def __str__(self) -> str: # pragma: no cover - dataclass repr not required in tests
|
||||
return f"Invalid currency code: {self.code!r}"
|
||||
|
||||
|
||||
def normalise_currency(code: str | None) -> str | None:
|
||||
"""Normalise currency codes to uppercase ISO-4217 values."""
|
||||
|
||||
if code is None:
|
||||
return None
|
||||
candidate = code.strip().upper()
|
||||
if not VALID_CURRENCY_PATTERN.match(candidate):
|
||||
raise CurrencyValidationError(candidate)
|
||||
return candidate
|
||||
|
||||
|
||||
def require_currency(code: str | None, default: str | None = None) -> str:
|
||||
"""Return normalised currency code, falling back to default when missing."""
|
||||
|
||||
normalised = normalise_currency(code)
|
||||
if normalised is not None:
|
||||
return normalised
|
||||
if default is None:
|
||||
raise CurrencyValidationError("<missing currency>")
|
||||
fallback = normalise_currency(default)
|
||||
if fallback is None:
|
||||
raise CurrencyValidationError("<invalid default currency>")
|
||||
return fallback
|
||||
@@ -26,3 +26,36 @@ class ScenarioValidationError(Exception):
|
||||
|
||||
def __str__(self) -> str: # pragma: no cover - mirrors message for logging
|
||||
return self.message
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class ProfitabilityValidationError(Exception):
|
||||
"""Raised when profitability calculation inputs fail domain validation."""
|
||||
|
||||
message: str
|
||||
field_errors: Sequence[str] | None = None
|
||||
|
||||
def __str__(self) -> str: # pragma: no cover - mirrors message for logging
|
||||
return self.message
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class CapexValidationError(Exception):
|
||||
"""Raised when capex calculation inputs fail domain validation."""
|
||||
|
||||
message: str
|
||||
field_errors: Sequence[str] | None = None
|
||||
|
||||
def __str__(self) -> str: # pragma: no cover - mirrors message for logging
|
||||
return self.message
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class OpexValidationError(Exception):
|
||||
"""Raised when opex calculation inputs fail domain validation."""
|
||||
|
||||
message: str
|
||||
field_errors: Sequence[str] | None = None
|
||||
|
||||
def __str__(self) -> str: # pragma: no cover - mirrors message for logging
|
||||
return self.message
|
||||
|
||||
121
services/export_query.py
Normal file
121
services/export_query.py
Normal file
@@ -0,0 +1,121 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from datetime import date, datetime
|
||||
from typing import Iterable
|
||||
|
||||
from models import MiningOperationType, ResourceType, ScenarioStatus
|
||||
from services.currency import CurrencyValidationError, normalise_currency
|
||||
|
||||
|
||||
def _normalise_lower_strings(values: Iterable[str]) -> tuple[str, ...]:
|
||||
unique: set[str] = set()
|
||||
for value in values:
|
||||
if not value:
|
||||
continue
|
||||
trimmed = value.strip().lower()
|
||||
if not trimmed:
|
||||
continue
|
||||
unique.add(trimmed)
|
||||
return tuple(sorted(unique))
|
||||
|
||||
|
||||
def _normalise_upper_strings(values: Iterable[str | None]) -> tuple[str, ...]:
|
||||
unique: set[str] = set()
|
||||
for value in values:
|
||||
if value is None:
|
||||
continue
|
||||
candidate = value if isinstance(value, str) else str(value)
|
||||
candidate = candidate.strip()
|
||||
if not candidate:
|
||||
continue
|
||||
try:
|
||||
normalised = normalise_currency(candidate)
|
||||
except CurrencyValidationError as exc:
|
||||
raise ValueError(str(exc)) from exc
|
||||
if normalised is None:
|
||||
continue
|
||||
unique.add(normalised)
|
||||
return tuple(sorted(unique))
|
||||
|
||||
|
||||
@dataclass(slots=True, frozen=True)
|
||||
class ProjectExportFilters:
|
||||
"""Filter parameters for project export queries."""
|
||||
|
||||
ids: tuple[int, ...] = ()
|
||||
names: tuple[str, ...] = ()
|
||||
name_contains: str | None = None
|
||||
locations: tuple[str, ...] = ()
|
||||
operation_types: tuple[MiningOperationType, ...] = ()
|
||||
created_from: datetime | None = None
|
||||
created_to: datetime | None = None
|
||||
updated_from: datetime | None = None
|
||||
updated_to: datetime | None = None
|
||||
|
||||
def normalised_ids(self) -> tuple[int, ...]:
|
||||
unique = {identifier for identifier in self.ids if identifier > 0}
|
||||
return tuple(sorted(unique))
|
||||
|
||||
def normalised_names(self) -> tuple[str, ...]:
|
||||
return _normalise_lower_strings(self.names)
|
||||
|
||||
def normalised_locations(self) -> tuple[str, ...]:
|
||||
return _normalise_lower_strings(self.locations)
|
||||
|
||||
def name_search_pattern(self) -> str | None:
|
||||
if not self.name_contains:
|
||||
return None
|
||||
pattern = self.name_contains.strip()
|
||||
if not pattern:
|
||||
return None
|
||||
return f"%{pattern}%"
|
||||
|
||||
|
||||
@dataclass(slots=True, frozen=True)
|
||||
class ScenarioExportFilters:
|
||||
"""Filter parameters for scenario export queries."""
|
||||
|
||||
ids: tuple[int, ...] = ()
|
||||
project_ids: tuple[int, ...] = ()
|
||||
project_names: tuple[str, ...] = ()
|
||||
name_contains: str | None = None
|
||||
statuses: tuple[ScenarioStatus, ...] = ()
|
||||
start_date_from: date | None = None
|
||||
start_date_to: date | None = None
|
||||
end_date_from: date | None = None
|
||||
end_date_to: date | None = None
|
||||
created_from: datetime | None = None
|
||||
created_to: datetime | None = None
|
||||
updated_from: datetime | None = None
|
||||
updated_to: datetime | None = None
|
||||
currencies: tuple[str, ...] = ()
|
||||
primary_resources: tuple[ResourceType, ...] = ()
|
||||
|
||||
def normalised_ids(self) -> tuple[int, ...]:
|
||||
unique = {identifier for identifier in self.ids if identifier > 0}
|
||||
return tuple(sorted(unique))
|
||||
|
||||
def normalised_project_ids(self) -> tuple[int, ...]:
|
||||
unique = {identifier for identifier in self.project_ids if identifier > 0}
|
||||
return tuple(sorted(unique))
|
||||
|
||||
def normalised_project_names(self) -> tuple[str, ...]:
|
||||
return _normalise_lower_strings(self.project_names)
|
||||
|
||||
def name_search_pattern(self) -> str | None:
|
||||
if not self.name_contains:
|
||||
return None
|
||||
pattern = self.name_contains.strip()
|
||||
if not pattern:
|
||||
return None
|
||||
return f"%{pattern}%"
|
||||
|
||||
def normalised_currencies(self) -> tuple[str, ...]:
|
||||
return _normalise_upper_strings(self.currencies)
|
||||
|
||||
|
||||
__all__ = (
|
||||
"ProjectExportFilters",
|
||||
"ScenarioExportFilters",
|
||||
)
|
||||
351
services/export_serializers.py
Normal file
351
services/export_serializers.py
Normal file
@@ -0,0 +1,351 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import csv
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import date, datetime, timezone
|
||||
from decimal import Decimal, InvalidOperation, ROUND_HALF_UP
|
||||
from enum import Enum
|
||||
from io import BytesIO, StringIO
|
||||
from typing import Any, Callable, Iterable, Iterator, Mapping, Sequence
|
||||
|
||||
from openpyxl import Workbook
|
||||
CSVValueFormatter = Callable[[Any], str]
|
||||
Accessor = Callable[[Any], Any]
|
||||
|
||||
__all__ = [
|
||||
"CSVExportColumn",
|
||||
"CSVExporter",
|
||||
"default_project_columns",
|
||||
"default_scenario_columns",
|
||||
"stream_projects_to_csv",
|
||||
"stream_scenarios_to_csv",
|
||||
"ExcelExporter",
|
||||
"export_projects_to_excel",
|
||||
"export_scenarios_to_excel",
|
||||
"default_formatter",
|
||||
"format_datetime_utc",
|
||||
"format_date_iso",
|
||||
"format_decimal",
|
||||
]
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class CSVExportColumn:
|
||||
"""Declarative description of a CSV export column."""
|
||||
|
||||
header: str
|
||||
accessor: Accessor | str
|
||||
formatter: CSVValueFormatter | None = None
|
||||
required: bool = False
|
||||
|
||||
_accessor: Accessor = field(init=False, repr=False)
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
object.__setattr__(self, "_accessor", _coerce_accessor(self.accessor))
|
||||
|
||||
def value_for(self, entity: Any) -> Any:
|
||||
accessor = object.__getattribute__(self, "_accessor")
|
||||
try:
|
||||
return accessor(entity)
|
||||
except Exception: # pragma: no cover - defensive safeguard
|
||||
return None
|
||||
|
||||
|
||||
class CSVExporter:
|
||||
"""Stream Python objects as UTF-8 encoded CSV rows."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
columns: Sequence[CSVExportColumn],
|
||||
*,
|
||||
include_header: bool = True,
|
||||
line_terminator: str = "\n",
|
||||
) -> None:
|
||||
if not columns:
|
||||
raise ValueError("At least one column is required for CSV export.")
|
||||
self._columns: tuple[CSVExportColumn, ...] = tuple(columns)
|
||||
self._include_header = include_header
|
||||
self._line_terminator = line_terminator
|
||||
|
||||
@property
|
||||
def columns(self) -> tuple[CSVExportColumn, ...]:
|
||||
return self._columns
|
||||
|
||||
def headers(self) -> tuple[str, ...]:
|
||||
return tuple(column.header for column in self._columns)
|
||||
|
||||
def iter_bytes(self, records: Iterable[Any]) -> Iterator[bytes]:
|
||||
buffer = StringIO()
|
||||
writer = csv.writer(buffer, lineterminator=self._line_terminator)
|
||||
|
||||
if self._include_header:
|
||||
writer.writerow(self.headers())
|
||||
yield _drain_buffer(buffer)
|
||||
|
||||
for record in records:
|
||||
writer.writerow(self._format_row(record))
|
||||
yield _drain_buffer(buffer)
|
||||
|
||||
def _format_row(self, record: Any) -> list[str]:
|
||||
formatted: list[str] = []
|
||||
for column in self._columns:
|
||||
raw_value = column.value_for(record)
|
||||
formatter = column.formatter or default_formatter
|
||||
formatted.append(formatter(raw_value))
|
||||
return formatted
|
||||
|
||||
|
||||
def default_project_columns(
|
||||
*,
|
||||
include_description: bool = True,
|
||||
include_timestamps: bool = True,
|
||||
) -> tuple[CSVExportColumn, ...]:
|
||||
columns: list[CSVExportColumn] = [
|
||||
CSVExportColumn("name", "name", required=True),
|
||||
CSVExportColumn("location", "location"),
|
||||
CSVExportColumn("operation_type", "operation_type"),
|
||||
]
|
||||
if include_description:
|
||||
columns.append(CSVExportColumn("description", "description"))
|
||||
if include_timestamps:
|
||||
columns.extend(
|
||||
(
|
||||
CSVExportColumn("created_at", "created_at",
|
||||
formatter=format_datetime_utc),
|
||||
CSVExportColumn("updated_at", "updated_at",
|
||||
formatter=format_datetime_utc),
|
||||
)
|
||||
)
|
||||
return tuple(columns)
|
||||
|
||||
|
||||
def default_scenario_columns(
|
||||
*,
|
||||
include_description: bool = True,
|
||||
include_timestamps: bool = True,
|
||||
) -> tuple[CSVExportColumn, ...]:
|
||||
columns: list[CSVExportColumn] = [
|
||||
CSVExportColumn(
|
||||
"project_name",
|
||||
lambda scenario: getattr(
|
||||
getattr(scenario, "project", None), "name", None),
|
||||
required=True,
|
||||
),
|
||||
CSVExportColumn("name", "name", required=True),
|
||||
CSVExportColumn("status", "status"),
|
||||
CSVExportColumn("start_date", "start_date", formatter=format_date_iso),
|
||||
CSVExportColumn("end_date", "end_date", formatter=format_date_iso),
|
||||
CSVExportColumn("discount_rate", "discount_rate",
|
||||
formatter=format_decimal),
|
||||
CSVExportColumn("currency", "currency"),
|
||||
CSVExportColumn("primary_resource", "primary_resource"),
|
||||
]
|
||||
if include_description:
|
||||
columns.append(CSVExportColumn("description", "description"))
|
||||
if include_timestamps:
|
||||
columns.extend(
|
||||
(
|
||||
CSVExportColumn("created_at", "created_at",
|
||||
formatter=format_datetime_utc),
|
||||
CSVExportColumn("updated_at", "updated_at",
|
||||
formatter=format_datetime_utc),
|
||||
)
|
||||
)
|
||||
return tuple(columns)
|
||||
|
||||
|
||||
def stream_projects_to_csv(
|
||||
projects: Iterable[Any],
|
||||
*,
|
||||
columns: Sequence[CSVExportColumn] | None = None,
|
||||
) -> Iterator[bytes]:
|
||||
resolved_columns = tuple(columns or default_project_columns())
|
||||
exporter = CSVExporter(resolved_columns)
|
||||
yield from exporter.iter_bytes(projects)
|
||||
|
||||
|
||||
def stream_scenarios_to_csv(
|
||||
scenarios: Iterable[Any],
|
||||
*,
|
||||
columns: Sequence[CSVExportColumn] | None = None,
|
||||
) -> Iterator[bytes]:
|
||||
resolved_columns = tuple(columns or default_scenario_columns())
|
||||
exporter = CSVExporter(resolved_columns)
|
||||
yield from exporter.iter_bytes(scenarios)
|
||||
|
||||
|
||||
def default_formatter(value: Any) -> str:
|
||||
if value is None:
|
||||
return ""
|
||||
if isinstance(value, Enum):
|
||||
return str(value.value)
|
||||
if isinstance(value, Decimal):
|
||||
return format_decimal(value)
|
||||
if isinstance(value, datetime):
|
||||
return format_datetime_utc(value)
|
||||
if isinstance(value, date):
|
||||
return format_date_iso(value)
|
||||
if isinstance(value, bool):
|
||||
return "true" if value else "false"
|
||||
return str(value)
|
||||
|
||||
|
||||
def format_datetime_utc(value: Any) -> str:
|
||||
if not isinstance(value, datetime):
|
||||
return ""
|
||||
if value.tzinfo is None:
|
||||
value = value.replace(tzinfo=timezone.utc)
|
||||
value = value.astimezone(timezone.utc)
|
||||
return value.isoformat().replace("+00:00", "Z")
|
||||
|
||||
|
||||
def format_date_iso(value: Any) -> str:
|
||||
if not isinstance(value, date):
|
||||
return ""
|
||||
return value.isoformat()
|
||||
|
||||
|
||||
def format_decimal(value: Any) -> str:
|
||||
if value is None:
|
||||
return ""
|
||||
if isinstance(value, Decimal):
|
||||
try:
|
||||
quantised = value.quantize(Decimal("0.01"), rounding=ROUND_HALF_UP)
|
||||
except InvalidOperation: # pragma: no cover - unexpected precision issues
|
||||
quantised = value
|
||||
return format(quantised, "f")
|
||||
if isinstance(value, (int, float)):
|
||||
return f"{value:.2f}"
|
||||
return default_formatter(value)
|
||||
|
||||
|
||||
class ExcelExporter:
|
||||
"""Produce Excel workbooks via write-only streaming."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
columns: Sequence[CSVExportColumn],
|
||||
*,
|
||||
sheet_name: str = "Export",
|
||||
workbook_title: str | None = None,
|
||||
include_header: bool = True,
|
||||
metadata: Mapping[str, Any] | None = None,
|
||||
metadata_sheet_name: str = "Metadata",
|
||||
) -> None:
|
||||
if not columns:
|
||||
raise ValueError(
|
||||
"At least one column is required for Excel export.")
|
||||
self._columns: tuple[CSVExportColumn, ...] = tuple(columns)
|
||||
self._sheet_name = sheet_name or "Export"
|
||||
self._include_header = include_header
|
||||
self._metadata = dict(metadata) if metadata else None
|
||||
self._metadata_sheet_name = metadata_sheet_name or "Metadata"
|
||||
self._workbook = Workbook(write_only=True)
|
||||
if workbook_title:
|
||||
self._workbook.properties.title = workbook_title
|
||||
|
||||
def export(self, records: Iterable[Any]) -> bytes:
|
||||
sheet = self._workbook.create_sheet(title=self._sheet_name)
|
||||
if self._include_header:
|
||||
sheet.append([column.header for column in self._columns])
|
||||
|
||||
for record in records:
|
||||
sheet.append(self._format_row(record))
|
||||
|
||||
self._append_metadata_sheet()
|
||||
return self._finalize()
|
||||
|
||||
def _format_row(self, record: Any) -> list[Any]:
|
||||
row: list[Any] = []
|
||||
for column in self._columns:
|
||||
raw_value = column.value_for(record)
|
||||
formatter = column.formatter or default_formatter
|
||||
row.append(formatter(raw_value))
|
||||
return row
|
||||
|
||||
def _append_metadata_sheet(self) -> None:
|
||||
if not self._metadata:
|
||||
return
|
||||
|
||||
sheet_name = self._metadata_sheet_name
|
||||
existing = set(self._workbook.sheetnames)
|
||||
if sheet_name in existing:
|
||||
index = 1
|
||||
while True:
|
||||
candidate = f"{sheet_name}_{index}"
|
||||
if candidate not in existing:
|
||||
sheet_name = candidate
|
||||
break
|
||||
index += 1
|
||||
|
||||
meta_ws = self._workbook.create_sheet(title=sheet_name)
|
||||
meta_ws.append(["Key", "Value"])
|
||||
for key, value in self._metadata.items():
|
||||
meta_ws.append([
|
||||
str(key),
|
||||
"" if value is None else str(value),
|
||||
])
|
||||
|
||||
def _finalize(self) -> bytes:
|
||||
buffer = BytesIO()
|
||||
self._workbook.save(buffer)
|
||||
buffer.seek(0)
|
||||
return buffer.getvalue()
|
||||
|
||||
|
||||
def export_projects_to_excel(
|
||||
projects: Iterable[Any],
|
||||
*,
|
||||
columns: Sequence[CSVExportColumn] | None = None,
|
||||
sheet_name: str = "Projects",
|
||||
workbook_title: str | None = None,
|
||||
metadata: Mapping[str, Any] | None = None,
|
||||
) -> bytes:
|
||||
exporter = ExcelExporter(
|
||||
columns or default_project_columns(),
|
||||
sheet_name=sheet_name,
|
||||
workbook_title=workbook_title,
|
||||
metadata=metadata,
|
||||
)
|
||||
return exporter.export(projects)
|
||||
|
||||
|
||||
def export_scenarios_to_excel(
|
||||
scenarios: Iterable[Any],
|
||||
*,
|
||||
columns: Sequence[CSVExportColumn] | None = None,
|
||||
sheet_name: str = "Scenarios",
|
||||
workbook_title: str | None = None,
|
||||
metadata: Mapping[str, Any] | None = None,
|
||||
) -> bytes:
|
||||
exporter = ExcelExporter(
|
||||
columns or default_scenario_columns(),
|
||||
sheet_name=sheet_name,
|
||||
workbook_title=workbook_title,
|
||||
metadata=metadata,
|
||||
)
|
||||
return exporter.export(scenarios)
|
||||
|
||||
|
||||
def _coerce_accessor(accessor: Accessor | str) -> Accessor:
|
||||
if callable(accessor):
|
||||
return accessor
|
||||
|
||||
path = [segment for segment in accessor.split(".") if segment]
|
||||
|
||||
def _resolve(entity: Any) -> Any:
|
||||
current: Any = entity
|
||||
for segment in path:
|
||||
if current is None:
|
||||
return None
|
||||
current = getattr(current, segment, None)
|
||||
return current
|
||||
|
||||
return _resolve
|
||||
|
||||
|
||||
def _drain_buffer(buffer: StringIO) -> bytes:
|
||||
data = buffer.getvalue()
|
||||
buffer.seek(0)
|
||||
buffer.truncate(0)
|
||||
return data.encode("utf-8")
|
||||
252
services/financial.py
Normal file
252
services/financial.py
Normal file
@@ -0,0 +1,252 @@
|
||||
"""Financial calculation helpers for project evaluation metrics."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from datetime import date, datetime
|
||||
from math import isclose, isfinite
|
||||
from typing import Iterable, List, Sequence, Tuple
|
||||
|
||||
Number = float
|
||||
|
||||
|
||||
@dataclass(frozen=True, slots=True)
|
||||
class CashFlow:
|
||||
"""Represents a dated cash flow in scenario currency."""
|
||||
|
||||
amount: Number
|
||||
period_index: int | None = None
|
||||
date: date | datetime | None = None
|
||||
|
||||
|
||||
class ConvergenceError(RuntimeError):
|
||||
"""Raised when an iterative solver fails to converge."""
|
||||
|
||||
|
||||
class PaybackNotReachedError(RuntimeError):
|
||||
"""Raised when cumulative cash flows never reach a non-negative total."""
|
||||
|
||||
|
||||
def _coerce_date(value: date | datetime) -> date:
|
||||
if isinstance(value, datetime):
|
||||
return value.date()
|
||||
return value
|
||||
|
||||
|
||||
def normalize_cash_flows(
|
||||
cash_flows: Iterable[CashFlow],
|
||||
*,
|
||||
compounds_per_year: int = 1,
|
||||
) -> List[Tuple[Number, float]]:
|
||||
"""Normalise cash flows to ``(amount, periods)`` tuples.
|
||||
|
||||
When explicit ``period_index`` values are provided they take precedence. If
|
||||
only dates are supplied, the first dated cash flow anchors the timeline and
|
||||
subsequent cash flows convert their day offsets into fractional periods
|
||||
based on ``compounds_per_year``. When neither a period index nor a date is
|
||||
present, cash flows are treated as sequential periods in input order.
|
||||
"""
|
||||
|
||||
flows: Sequence[CashFlow] = list(cash_flows)
|
||||
if not flows:
|
||||
return []
|
||||
|
||||
if compounds_per_year <= 0:
|
||||
raise ValueError("compounds_per_year must be a positive integer")
|
||||
|
||||
base_date: date | None = None
|
||||
for flow in flows:
|
||||
if flow.date is not None:
|
||||
base_date = _coerce_date(flow.date)
|
||||
break
|
||||
|
||||
normalised: List[Tuple[Number, float]] = []
|
||||
for idx, flow in enumerate(flows):
|
||||
amount = float(flow.amount)
|
||||
if flow.period_index is not None:
|
||||
periods = float(flow.period_index)
|
||||
elif flow.date is not None and base_date is not None:
|
||||
current_date = _coerce_date(flow.date)
|
||||
delta_days = (current_date - base_date).days
|
||||
period_length_days = 365.0 / float(compounds_per_year)
|
||||
periods = delta_days / period_length_days
|
||||
else:
|
||||
periods = float(idx)
|
||||
normalised.append((amount, periods))
|
||||
|
||||
return normalised
|
||||
|
||||
|
||||
def discount_factor(rate: Number, periods: float, *, compounds_per_year: int = 1) -> float:
|
||||
"""Return the factor used to discount a value ``periods`` steps in the future."""
|
||||
|
||||
if compounds_per_year <= 0:
|
||||
raise ValueError("compounds_per_year must be a positive integer")
|
||||
|
||||
periodic_rate = rate / float(compounds_per_year)
|
||||
return (1.0 + periodic_rate) ** (-periods)
|
||||
|
||||
|
||||
def net_present_value(
|
||||
rate: Number,
|
||||
cash_flows: Iterable[CashFlow],
|
||||
*,
|
||||
residual_value: Number | None = None,
|
||||
residual_periods: float | None = None,
|
||||
compounds_per_year: int = 1,
|
||||
) -> float:
|
||||
"""Calculate Net Present Value for ``cash_flows``.
|
||||
|
||||
``rate`` is a decimal (``0.1`` for 10%). Cash flows are discounted using the
|
||||
given compounding frequency. When ``residual_value`` is provided it is
|
||||
discounted at ``residual_periods`` periods; by default the value occurs one
|
||||
period after the final cash flow.
|
||||
"""
|
||||
|
||||
normalised = normalize_cash_flows(
|
||||
cash_flows,
|
||||
compounds_per_year=compounds_per_year,
|
||||
)
|
||||
|
||||
if not normalised and residual_value is None:
|
||||
return 0.0
|
||||
|
||||
total = 0.0
|
||||
for amount, periods in normalised:
|
||||
factor = discount_factor(
|
||||
rate, periods, compounds_per_year=compounds_per_year)
|
||||
total += amount * factor
|
||||
|
||||
if residual_value is not None:
|
||||
if residual_periods is None:
|
||||
last_period = normalised[-1][1] if normalised else 0.0
|
||||
residual_periods = last_period + 1.0
|
||||
factor = discount_factor(
|
||||
rate, residual_periods, compounds_per_year=compounds_per_year)
|
||||
total += float(residual_value) * factor
|
||||
|
||||
return total
|
||||
|
||||
|
||||
def internal_rate_of_return(
|
||||
cash_flows: Iterable[CashFlow],
|
||||
*,
|
||||
guess: Number = 0.1,
|
||||
max_iterations: int = 100,
|
||||
tolerance: float = 1e-6,
|
||||
compounds_per_year: int = 1,
|
||||
) -> float:
|
||||
"""Return the internal rate of return for ``cash_flows``.
|
||||
|
||||
Uses Newton-Raphson iteration with a bracketed fallback when the derivative
|
||||
becomes unstable. Raises :class:`ConvergenceError` if no root is found.
|
||||
"""
|
||||
|
||||
flows = normalize_cash_flows(
|
||||
cash_flows,
|
||||
compounds_per_year=compounds_per_year,
|
||||
)
|
||||
if not flows:
|
||||
raise ValueError("cash_flows must contain at least one item")
|
||||
|
||||
amounts = [amount for amount, _ in flows]
|
||||
if not any(amount < 0 for amount in amounts) or not any(amount > 0 for amount in amounts):
|
||||
raise ValueError(
|
||||
"cash_flows must include both negative and positive values")
|
||||
|
||||
def _npv_with_flows(rate: float) -> float:
|
||||
periodic_rate = rate / float(compounds_per_year)
|
||||
if periodic_rate <= -1.0:
|
||||
return float("inf")
|
||||
total = 0.0
|
||||
for amount, periods in flows:
|
||||
factor = (1.0 + periodic_rate) ** (-periods)
|
||||
total += amount * factor
|
||||
return total
|
||||
|
||||
def _derivative(rate: float) -> float:
|
||||
periodic_rate = rate / float(compounds_per_year)
|
||||
if periodic_rate <= -1.0:
|
||||
return float("inf")
|
||||
derivative = 0.0
|
||||
for amount, periods in flows:
|
||||
factor = (1.0 + periodic_rate) ** (-periods - 1.0)
|
||||
derivative += -amount * periods * \
|
||||
factor / float(compounds_per_year)
|
||||
return derivative
|
||||
|
||||
rate = float(guess)
|
||||
for _ in range(max_iterations):
|
||||
value = _npv_with_flows(rate)
|
||||
if isclose(value, 0.0, abs_tol=tolerance):
|
||||
return rate
|
||||
derivative = _derivative(rate)
|
||||
if derivative == 0.0 or not isfinite(derivative):
|
||||
break
|
||||
next_rate = rate - value / derivative
|
||||
if abs(next_rate - rate) < tolerance:
|
||||
return next_rate
|
||||
rate = next_rate
|
||||
|
||||
# Fallback to bracketed bisection between sensible bounds.
|
||||
lower_bound = -0.99 * float(compounds_per_year)
|
||||
upper_bound = 10.0
|
||||
lower_value = _npv_with_flows(lower_bound)
|
||||
upper_value = _npv_with_flows(upper_bound)
|
||||
|
||||
attempts = 0
|
||||
while lower_value * upper_value > 0 and attempts < 12:
|
||||
upper_bound *= 2.0
|
||||
upper_value = _npv_with_flows(upper_bound)
|
||||
attempts += 1
|
||||
|
||||
if lower_value * upper_value > 0:
|
||||
raise ConvergenceError(
|
||||
"IRR could not be bracketed within default bounds")
|
||||
|
||||
for _ in range(max_iterations * 2):
|
||||
midpoint = (lower_bound + upper_bound) / 2.0
|
||||
mid_value = _npv_with_flows(midpoint)
|
||||
if isclose(mid_value, 0.0, abs_tol=tolerance):
|
||||
return midpoint
|
||||
if lower_value * mid_value < 0:
|
||||
upper_bound = midpoint
|
||||
upper_value = mid_value
|
||||
else:
|
||||
lower_bound = midpoint
|
||||
lower_value = mid_value
|
||||
raise ConvergenceError("IRR solver failed to converge")
|
||||
|
||||
|
||||
def payback_period(
|
||||
cash_flows: Iterable[CashFlow],
|
||||
*,
|
||||
allow_fractional: bool = True,
|
||||
compounds_per_year: int = 1,
|
||||
) -> float:
|
||||
"""Return the period index where cumulative cash flow becomes non-negative."""
|
||||
|
||||
flows = normalize_cash_flows(
|
||||
cash_flows,
|
||||
compounds_per_year=compounds_per_year,
|
||||
)
|
||||
if not flows:
|
||||
raise ValueError("cash_flows must contain at least one item")
|
||||
|
||||
flows = sorted(flows, key=lambda item: item[1])
|
||||
cumulative = 0.0
|
||||
previous_period = flows[0][1]
|
||||
|
||||
for index, (amount, periods) in enumerate(flows):
|
||||
next_cumulative = cumulative + amount
|
||||
if next_cumulative >= 0.0:
|
||||
if not allow_fractional or isclose(amount, 0.0):
|
||||
return periods
|
||||
prev_period = previous_period if index > 0 else periods
|
||||
fraction = -cumulative / amount
|
||||
return prev_period + fraction * (periods - prev_period)
|
||||
cumulative = next_cumulative
|
||||
previous_period = periods
|
||||
|
||||
raise PaybackNotReachedError(
|
||||
"Cumulative cash flow never becomes non-negative")
|
||||
@@ -1,9 +1,11 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Any, BinaryIO, Callable, Generic, Iterable, Mapping, TypeVar, cast
|
||||
from typing import Any, BinaryIO, Callable, Generic, Iterable, Mapping, Optional, TypeVar, cast
|
||||
from uuid import uuid4
|
||||
from types import MappingProxyType
|
||||
|
||||
@@ -14,6 +16,10 @@ from pydantic import BaseModel, ValidationError
|
||||
from models import Project, Scenario
|
||||
from schemas.imports import ProjectImportRow, ScenarioImportRow
|
||||
from services.unit_of_work import UnitOfWork
|
||||
from models.import_export_log import ImportExportLog
|
||||
from monitoring.metrics import observe_import
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
TImportRow = TypeVar("TImportRow", bound=BaseModel)
|
||||
|
||||
@@ -164,7 +170,34 @@ class ImportIngestionService:
|
||||
stream: BinaryIO,
|
||||
filename: str,
|
||||
) -> ImportPreview[ProjectImportRow]:
|
||||
start = time.perf_counter()
|
||||
result = load_project_imports(stream, filename)
|
||||
status = "success" if not result.errors else "partial"
|
||||
self._record_audit_log(
|
||||
action="preview",
|
||||
dataset="projects",
|
||||
status=status,
|
||||
filename=filename,
|
||||
row_count=len(result.rows),
|
||||
detail=f"accepted={len(result.rows)} parser_errors={len(result.errors)}",
|
||||
)
|
||||
observe_import(
|
||||
action="preview",
|
||||
dataset="projects",
|
||||
status=status,
|
||||
seconds=time.perf_counter() - start,
|
||||
)
|
||||
logger.info(
|
||||
"import.preview",
|
||||
extra={
|
||||
"event": "import.preview",
|
||||
"dataset": "projects",
|
||||
"status": status,
|
||||
"filename": filename,
|
||||
"row_count": len(result.rows),
|
||||
"error_count": len(result.errors),
|
||||
},
|
||||
)
|
||||
parser_errors = result.errors
|
||||
|
||||
preview_rows: list[ImportPreviewRow[ProjectImportRow]] = []
|
||||
@@ -258,7 +291,34 @@ class ImportIngestionService:
|
||||
stream: BinaryIO,
|
||||
filename: str,
|
||||
) -> ImportPreview[ScenarioImportRow]:
|
||||
start = time.perf_counter()
|
||||
result = load_scenario_imports(stream, filename)
|
||||
status = "success" if not result.errors else "partial"
|
||||
self._record_audit_log(
|
||||
action="preview",
|
||||
dataset="scenarios",
|
||||
status=status,
|
||||
filename=filename,
|
||||
row_count=len(result.rows),
|
||||
detail=f"accepted={len(result.rows)} parser_errors={len(result.errors)}",
|
||||
)
|
||||
observe_import(
|
||||
action="preview",
|
||||
dataset="scenarios",
|
||||
status=status,
|
||||
seconds=time.perf_counter() - start,
|
||||
)
|
||||
logger.info(
|
||||
"import.preview",
|
||||
extra={
|
||||
"event": "import.preview",
|
||||
"dataset": "scenarios",
|
||||
"status": status,
|
||||
"filename": filename,
|
||||
"row_count": len(result.rows),
|
||||
"error_count": len(result.errors),
|
||||
},
|
||||
)
|
||||
parser_errors = result.errors
|
||||
|
||||
preview_rows: list[ImportPreviewRow[ScenarioImportRow]] = []
|
||||
@@ -423,6 +483,8 @@ class ImportIngestionService:
|
||||
staged_view = _build_staged_view(staged)
|
||||
created = updated = 0
|
||||
|
||||
start = time.perf_counter()
|
||||
try:
|
||||
with self._uow_factory() as uow:
|
||||
if not uow.projects:
|
||||
raise RuntimeError("Project repository is unavailable")
|
||||
@@ -463,6 +525,59 @@ class ImportIngestionService:
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Unsupported staged project mode: {mode!r}")
|
||||
except Exception as exc:
|
||||
self._record_audit_log(
|
||||
action="commit",
|
||||
dataset="projects",
|
||||
status="failure",
|
||||
filename=None,
|
||||
row_count=len(staged.rows),
|
||||
detail=f"error={type(exc).__name__}: {exc}",
|
||||
)
|
||||
observe_import(
|
||||
action="commit",
|
||||
dataset="projects",
|
||||
status="failure",
|
||||
seconds=time.perf_counter() - start,
|
||||
)
|
||||
logger.exception(
|
||||
"import.commit.failed",
|
||||
extra={
|
||||
"event": "import.commit",
|
||||
"dataset": "projects",
|
||||
"status": "failure",
|
||||
"row_count": len(staged.rows),
|
||||
"token": token,
|
||||
},
|
||||
)
|
||||
raise
|
||||
else:
|
||||
self._record_audit_log(
|
||||
action="commit",
|
||||
dataset="projects",
|
||||
status="success",
|
||||
filename=None,
|
||||
row_count=len(staged.rows),
|
||||
detail=f"created={created} updated={updated}",
|
||||
)
|
||||
observe_import(
|
||||
action="commit",
|
||||
dataset="projects",
|
||||
status="success",
|
||||
seconds=time.perf_counter() - start,
|
||||
)
|
||||
logger.info(
|
||||
"import.commit",
|
||||
extra={
|
||||
"event": "import.commit",
|
||||
"dataset": "projects",
|
||||
"status": "success",
|
||||
"row_count": len(staged.rows),
|
||||
"created": created,
|
||||
"updated": updated,
|
||||
"token": token,
|
||||
},
|
||||
)
|
||||
|
||||
self._project_stage.pop(token, None)
|
||||
return ImportCommitResult(
|
||||
@@ -479,6 +594,8 @@ class ImportIngestionService:
|
||||
staged_view = _build_staged_view(staged)
|
||||
created = updated = 0
|
||||
|
||||
start = time.perf_counter()
|
||||
try:
|
||||
with self._uow_factory() as uow:
|
||||
if not uow.scenarios or not uow.projects:
|
||||
raise RuntimeError("Scenario repositories are unavailable")
|
||||
@@ -537,6 +654,59 @@ class ImportIngestionService:
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Unsupported staged scenario mode: {mode!r}")
|
||||
except Exception as exc:
|
||||
self._record_audit_log(
|
||||
action="commit",
|
||||
dataset="scenarios",
|
||||
status="failure",
|
||||
filename=None,
|
||||
row_count=len(staged.rows),
|
||||
detail=f"error={type(exc).__name__}: {exc}",
|
||||
)
|
||||
observe_import(
|
||||
action="commit",
|
||||
dataset="scenarios",
|
||||
status="failure",
|
||||
seconds=time.perf_counter() - start,
|
||||
)
|
||||
logger.exception(
|
||||
"import.commit.failed",
|
||||
extra={
|
||||
"event": "import.commit",
|
||||
"dataset": "scenarios",
|
||||
"status": "failure",
|
||||
"row_count": len(staged.rows),
|
||||
"token": token,
|
||||
},
|
||||
)
|
||||
raise
|
||||
else:
|
||||
self._record_audit_log(
|
||||
action="commit",
|
||||
dataset="scenarios",
|
||||
status="success",
|
||||
filename=None,
|
||||
row_count=len(staged.rows),
|
||||
detail=f"created={created} updated={updated}",
|
||||
)
|
||||
observe_import(
|
||||
action="commit",
|
||||
dataset="scenarios",
|
||||
status="success",
|
||||
seconds=time.perf_counter() - start,
|
||||
)
|
||||
logger.info(
|
||||
"import.commit",
|
||||
extra={
|
||||
"event": "import.commit",
|
||||
"dataset": "scenarios",
|
||||
"status": "success",
|
||||
"row_count": len(staged.rows),
|
||||
"created": created,
|
||||
"updated": updated,
|
||||
"token": token,
|
||||
},
|
||||
)
|
||||
|
||||
self._scenario_stage.pop(token, None)
|
||||
return ImportCommitResult(
|
||||
@@ -545,6 +715,34 @@ class ImportIngestionService:
|
||||
summary=ImportCommitSummary(created=created, updated=updated),
|
||||
)
|
||||
|
||||
def _record_audit_log(
|
||||
self,
|
||||
*,
|
||||
action: str,
|
||||
dataset: str,
|
||||
status: str,
|
||||
row_count: int,
|
||||
detail: Optional[str],
|
||||
filename: Optional[str],
|
||||
) -> None:
|
||||
try:
|
||||
with self._uow_factory() as uow:
|
||||
if uow.session is None:
|
||||
return
|
||||
log = ImportExportLog(
|
||||
action=action,
|
||||
dataset=dataset,
|
||||
status=status,
|
||||
filename=filename,
|
||||
row_count=row_count,
|
||||
detail=detail,
|
||||
)
|
||||
uow.session.add(log)
|
||||
uow.commit()
|
||||
except Exception:
|
||||
# Audit logging must not break core workflows
|
||||
pass
|
||||
|
||||
def _store_project_stage(
|
||||
self, rows: list[StagedRow[ProjectImportRow]]
|
||||
) -> str:
|
||||
|
||||
95
services/metrics.py
Normal file
95
services/metrics.py
Normal file
@@ -0,0 +1,95 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from datetime import datetime
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from models.performance_metric import PerformanceMetric
|
||||
|
||||
|
||||
class MetricsService:
|
||||
def __init__(self, db: Session):
|
||||
self.db = db
|
||||
|
||||
def store_metric(
|
||||
self,
|
||||
metric_name: str,
|
||||
value: float,
|
||||
labels: Optional[Dict[str, Any]] = None,
|
||||
endpoint: Optional[str] = None,
|
||||
method: Optional[str] = None,
|
||||
status_code: Optional[int] = None,
|
||||
duration_seconds: Optional[float] = None,
|
||||
) -> PerformanceMetric:
|
||||
"""Store a performance metric in the database."""
|
||||
metric = PerformanceMetric(
|
||||
timestamp=datetime.utcnow(),
|
||||
metric_name=metric_name,
|
||||
value=value,
|
||||
labels=json.dumps(labels) if labels else None,
|
||||
endpoint=endpoint,
|
||||
method=method,
|
||||
status_code=status_code,
|
||||
duration_seconds=duration_seconds,
|
||||
)
|
||||
self.db.add(metric)
|
||||
self.db.commit()
|
||||
self.db.refresh(metric)
|
||||
return metric
|
||||
|
||||
def get_metrics(
|
||||
self,
|
||||
metric_name: Optional[str] = None,
|
||||
start_time: Optional[datetime] = None,
|
||||
end_time: Optional[datetime] = None,
|
||||
limit: int = 100,
|
||||
) -> list[PerformanceMetric]:
|
||||
"""Retrieve stored metrics with optional filtering."""
|
||||
query = self.db.query(PerformanceMetric)
|
||||
|
||||
if metric_name:
|
||||
query = query.filter(PerformanceMetric.metric_name == metric_name)
|
||||
|
||||
if start_time:
|
||||
query = query.filter(PerformanceMetric.timestamp >= start_time)
|
||||
|
||||
if end_time:
|
||||
query = query.filter(PerformanceMetric.timestamp <= end_time)
|
||||
|
||||
return query.order_by(PerformanceMetric.timestamp.desc()).limit(limit).all()
|
||||
|
||||
def get_aggregated_metrics(
|
||||
self,
|
||||
metric_name: str,
|
||||
start_time: Optional[datetime] = None,
|
||||
end_time: Optional[datetime] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""Get aggregated statistics for a metric."""
|
||||
query = self.db.query(PerformanceMetric).filter(
|
||||
PerformanceMetric.metric_name == metric_name
|
||||
)
|
||||
|
||||
if start_time:
|
||||
query = query.filter(PerformanceMetric.timestamp >= start_time)
|
||||
|
||||
if end_time:
|
||||
query = query.filter(PerformanceMetric.timestamp <= end_time)
|
||||
|
||||
metrics = query.all()
|
||||
|
||||
if not metrics:
|
||||
return {"count": 0, "avg": 0, "min": 0, "max": 0}
|
||||
|
||||
values = [m.value for m in metrics]
|
||||
return {
|
||||
"count": len(values),
|
||||
"avg": sum(values) / len(values),
|
||||
"min": min(values),
|
||||
"max": max(values),
|
||||
}
|
||||
|
||||
|
||||
def get_metrics_service(db: Session) -> MetricsService:
|
||||
return MetricsService(db)
|
||||
203
services/navigation.py
Normal file
203
services/navigation.py
Normal file
@@ -0,0 +1,203 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Iterable, List, Sequence
|
||||
|
||||
from fastapi import Request
|
||||
|
||||
from models.navigation import NavigationLink
|
||||
from services.repositories import NavigationRepository
|
||||
from services.session import AuthSession
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class NavigationLinkDTO:
|
||||
id: int
|
||||
label: str
|
||||
href: str
|
||||
match_prefix: str | None
|
||||
icon: str | None
|
||||
tooltip: str | None
|
||||
is_external: bool
|
||||
children: List["NavigationLinkDTO"] = field(default_factory=list)
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class NavigationGroupDTO:
|
||||
id: int
|
||||
label: str
|
||||
icon: str | None
|
||||
tooltip: str | None
|
||||
links: List[NavigationLinkDTO] = field(default_factory=list)
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class NavigationSidebarDTO:
|
||||
groups: List[NavigationGroupDTO]
|
||||
roles: tuple[str, ...]
|
||||
|
||||
|
||||
class NavigationService:
|
||||
"""Build navigation payloads filtered for the current session."""
|
||||
|
||||
def __init__(self, repository: NavigationRepository) -> None:
|
||||
self._repository = repository
|
||||
|
||||
def build_sidebar(
|
||||
self,
|
||||
*,
|
||||
session: AuthSession,
|
||||
request: Request | None = None,
|
||||
include_disabled: bool = False,
|
||||
) -> NavigationSidebarDTO:
|
||||
roles = self._collect_roles(session)
|
||||
groups = self._repository.list_groups_with_links(
|
||||
include_disabled=include_disabled
|
||||
)
|
||||
context = self._derive_context(request)
|
||||
|
||||
mapped_groups: List[NavigationGroupDTO] = []
|
||||
for group in groups:
|
||||
if not include_disabled and not group.is_enabled:
|
||||
continue
|
||||
mapped_links = self._map_links(
|
||||
group.links,
|
||||
roles,
|
||||
request=request,
|
||||
include_disabled=include_disabled,
|
||||
context=context,
|
||||
)
|
||||
if not mapped_links and not include_disabled:
|
||||
continue
|
||||
mapped_groups.append(
|
||||
NavigationGroupDTO(
|
||||
id=group.id,
|
||||
label=group.label,
|
||||
icon=group.icon,
|
||||
tooltip=group.tooltip,
|
||||
links=mapped_links,
|
||||
)
|
||||
)
|
||||
return NavigationSidebarDTO(groups=mapped_groups, roles=roles)
|
||||
|
||||
def _map_links(
|
||||
self,
|
||||
links: Sequence[NavigationLink],
|
||||
roles: Iterable[str],
|
||||
*,
|
||||
request: Request | None,
|
||||
include_disabled: bool,
|
||||
context: dict[str, str | None],
|
||||
include_children: bool = False,
|
||||
) -> List[NavigationLinkDTO]:
|
||||
resolved_roles = tuple(roles)
|
||||
mapped: List[NavigationLinkDTO] = []
|
||||
for link in sorted(links, key=lambda x: (x.sort_order, x.id)):
|
||||
if not include_children and link.parent_link_id is not None:
|
||||
continue
|
||||
if not include_disabled and (not link.is_enabled):
|
||||
continue
|
||||
if not self._link_visible(link, resolved_roles, include_disabled):
|
||||
continue
|
||||
href = self._resolve_href(link, request=request, context=context)
|
||||
if not href:
|
||||
continue
|
||||
children = self._map_links(
|
||||
link.children,
|
||||
resolved_roles,
|
||||
request=request,
|
||||
include_disabled=include_disabled,
|
||||
context=context,
|
||||
include_children=True,
|
||||
)
|
||||
match_prefix = link.match_prefix or href
|
||||
mapped.append(
|
||||
NavigationLinkDTO(
|
||||
id=link.id,
|
||||
label=link.label,
|
||||
href=href,
|
||||
match_prefix=match_prefix,
|
||||
icon=link.icon,
|
||||
tooltip=link.tooltip,
|
||||
is_external=link.is_external,
|
||||
children=children,
|
||||
)
|
||||
)
|
||||
return mapped
|
||||
|
||||
@staticmethod
|
||||
def _collect_roles(session: AuthSession) -> tuple[str, ...]:
|
||||
roles = tuple((session.role_slugs or ()) if session else ())
|
||||
if session and session.is_authenticated:
|
||||
return roles
|
||||
if "anonymous" in roles:
|
||||
return roles
|
||||
return roles + ("anonymous",)
|
||||
|
||||
@staticmethod
|
||||
def _derive_context(request: Request | None) -> dict[str, str | None]:
|
||||
if request is None:
|
||||
return {"project_id": None, "scenario_id": None}
|
||||
project_id = request.path_params.get(
|
||||
"project_id") if hasattr(request, "path_params") else None
|
||||
scenario_id = request.path_params.get(
|
||||
"scenario_id") if hasattr(request, "path_params") else None
|
||||
if not project_id:
|
||||
project_id = request.query_params.get("project_id")
|
||||
if not scenario_id:
|
||||
scenario_id = request.query_params.get("scenario_id")
|
||||
return {"project_id": project_id, "scenario_id": scenario_id}
|
||||
|
||||
def _resolve_href(
|
||||
self,
|
||||
link: NavigationLink,
|
||||
*,
|
||||
request: Request | None,
|
||||
context: dict[str, str | None],
|
||||
) -> str | None:
|
||||
if link.route_name:
|
||||
if request is None:
|
||||
fallback = link.href_override
|
||||
if fallback:
|
||||
return fallback
|
||||
# Fallback to route name when no request is available
|
||||
return f"/{link.route_name.replace('.', '/')}"
|
||||
requires_context = link.slug in {
|
||||
"profitability",
|
||||
"profitability-calculator",
|
||||
"opex",
|
||||
"capex",
|
||||
}
|
||||
if requires_context:
|
||||
project_id = context.get("project_id")
|
||||
scenario_id = context.get("scenario_id")
|
||||
if project_id and scenario_id:
|
||||
try:
|
||||
return str(
|
||||
request.url_for(
|
||||
link.route_name,
|
||||
project_id=project_id,
|
||||
scenario_id=scenario_id,
|
||||
)
|
||||
)
|
||||
except Exception: # pragma: no cover - defensive
|
||||
pass
|
||||
try:
|
||||
return str(request.url_for(link.route_name))
|
||||
except Exception: # pragma: no cover - defensive
|
||||
return link.href_override
|
||||
return link.href_override
|
||||
|
||||
@staticmethod
|
||||
def _link_visible(
|
||||
link: NavigationLink,
|
||||
roles: Iterable[str],
|
||||
include_disabled: bool,
|
||||
) -> bool:
|
||||
role_tuple = tuple(roles)
|
||||
if not include_disabled and not link.is_enabled:
|
||||
return False
|
||||
if not link.required_roles:
|
||||
return True
|
||||
role_set = set(role_tuple)
|
||||
return any(role in role_set for role in link.required_roles)
|
||||
176
services/pricing.py
Normal file
176
services/pricing.py
Normal file
@@ -0,0 +1,176 @@
|
||||
"""Pricing service implementing commodity revenue calculations.
|
||||
|
||||
This module exposes data models and helpers for computing product pricing
|
||||
according to the formulas outlined in
|
||||
``calminer-docs/specifications/price_calculation.md``. It focuses on the core
|
||||
calculation steps (payable metal, penalties, net revenue) and is intended to be
|
||||
composed within broader scenario evaluation workflows.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Mapping
|
||||
|
||||
from pydantic import BaseModel, Field, PositiveFloat, field_validator
|
||||
from services.currency import require_currency
|
||||
|
||||
|
||||
class PricingInput(BaseModel):
|
||||
"""Normalized inputs for pricing calculations."""
|
||||
|
||||
metal: str = Field(..., min_length=1)
|
||||
ore_tonnage: PositiveFloat = Field(
|
||||
..., description="Total ore mass processed (metric tonnes)")
|
||||
head_grade_pct: PositiveFloat = Field(..., gt=0,
|
||||
le=100, description="Head grade as percent")
|
||||
recovery_pct: PositiveFloat = Field(..., gt=0,
|
||||
le=100, description="Recovery rate percent")
|
||||
payable_pct: float | None = Field(
|
||||
None, gt=0, le=100, description="Contractual payable percentage")
|
||||
reference_price: PositiveFloat = Field(
|
||||
..., description="Reference price in base currency per unit")
|
||||
treatment_charge: float = Field(0, ge=0)
|
||||
smelting_charge: float = Field(0, ge=0)
|
||||
moisture_pct: float = Field(0, ge=0, le=100)
|
||||
moisture_threshold_pct: float | None = Field(None, ge=0, le=100)
|
||||
moisture_penalty_per_pct: float | None = Field(None)
|
||||
impurity_ppm: Mapping[str, float] = Field(default_factory=dict)
|
||||
impurity_thresholds: Mapping[str, float] = Field(default_factory=dict)
|
||||
impurity_penalty_per_ppm: Mapping[str, float] = Field(default_factory=dict)
|
||||
premiums: float = Field(0)
|
||||
fx_rate: PositiveFloat = Field(
|
||||
1, description="Multiplier to convert to scenario currency")
|
||||
currency_code: str | None = Field(
|
||||
None, description="Optional explicit currency override")
|
||||
|
||||
@field_validator("impurity_ppm", mode="before")
|
||||
@classmethod
|
||||
def _validate_impurity_mapping(cls, value):
|
||||
if isinstance(value, Mapping):
|
||||
return {k: float(v) for k, v in value.items()}
|
||||
return value
|
||||
|
||||
|
||||
class PricingResult(BaseModel):
|
||||
"""Structured output summarising pricing computation results."""
|
||||
|
||||
metal: str
|
||||
ore_tonnage: float
|
||||
head_grade_pct: float
|
||||
recovery_pct: float
|
||||
payable_metal_tonnes: float
|
||||
reference_price: float
|
||||
gross_revenue: float
|
||||
moisture_penalty: float
|
||||
impurity_penalty: float
|
||||
treatment_smelt_charges: float
|
||||
premiums: float
|
||||
net_revenue: float
|
||||
currency: str | None
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class PricingMetadata:
|
||||
"""Metadata defaults applied when explicit inputs are omitted."""
|
||||
|
||||
default_payable_pct: float = 100.0
|
||||
default_currency: str | None = "USD"
|
||||
moisture_threshold_pct: float = 8.0
|
||||
moisture_penalty_per_pct: float = 0.0
|
||||
impurity_thresholds: Mapping[str, float] = field(default_factory=dict)
|
||||
impurity_penalty_per_ppm: Mapping[str, float] = field(default_factory=dict)
|
||||
|
||||
|
||||
def calculate_pricing(
|
||||
pricing_input: PricingInput,
|
||||
*,
|
||||
metadata: PricingMetadata | None = None,
|
||||
currency: str | None = None,
|
||||
) -> PricingResult:
|
||||
"""Calculate pricing metrics for the provided commodity input.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
pricing_input:
|
||||
Normalised input data including ore tonnage, grades, charges, and
|
||||
optional penalties.
|
||||
metadata:
|
||||
Optional default metadata applied when specific values are omitted from
|
||||
``pricing_input``.
|
||||
currency:
|
||||
Optional override for the output currency label. Falls back to
|
||||
``metadata.default_currency`` when not provided.
|
||||
"""
|
||||
|
||||
applied_metadata = metadata or PricingMetadata()
|
||||
|
||||
payable_pct = (
|
||||
pricing_input.payable_pct
|
||||
if pricing_input.payable_pct is not None
|
||||
else applied_metadata.default_payable_pct
|
||||
)
|
||||
moisture_threshold = (
|
||||
pricing_input.moisture_threshold_pct
|
||||
if pricing_input.moisture_threshold_pct is not None
|
||||
else applied_metadata.moisture_threshold_pct
|
||||
)
|
||||
moisture_penalty_factor = (
|
||||
pricing_input.moisture_penalty_per_pct
|
||||
if pricing_input.moisture_penalty_per_pct is not None
|
||||
else applied_metadata.moisture_penalty_per_pct
|
||||
)
|
||||
|
||||
impurity_thresholds = {
|
||||
**applied_metadata.impurity_thresholds,
|
||||
**pricing_input.impurity_thresholds,
|
||||
}
|
||||
impurity_penalty_factors = {
|
||||
**applied_metadata.impurity_penalty_per_ppm,
|
||||
**pricing_input.impurity_penalty_per_ppm,
|
||||
}
|
||||
|
||||
q_metal = pricing_input.ore_tonnage * (pricing_input.head_grade_pct / 100.0) * (
|
||||
pricing_input.recovery_pct / 100.0
|
||||
)
|
||||
payable_metal = q_metal * (payable_pct / 100.0)
|
||||
|
||||
gross_revenue_ref = payable_metal * pricing_input.reference_price
|
||||
charges = pricing_input.treatment_charge + pricing_input.smelting_charge
|
||||
|
||||
moisture_excess = max(0.0, pricing_input.moisture_pct - moisture_threshold)
|
||||
moisture_penalty = moisture_excess * moisture_penalty_factor
|
||||
|
||||
impurity_penalty_total = 0.0
|
||||
for impurity, value in pricing_input.impurity_ppm.items():
|
||||
threshold = impurity_thresholds.get(impurity, 0.0)
|
||||
penalty_factor = impurity_penalty_factors.get(impurity, 0.0)
|
||||
impurity_penalty_total += max(0.0, value - threshold) * penalty_factor
|
||||
|
||||
net_revenue_ref = (
|
||||
gross_revenue_ref - charges - moisture_penalty - impurity_penalty_total
|
||||
)
|
||||
net_revenue_ref += pricing_input.premiums
|
||||
|
||||
net_revenue = net_revenue_ref * pricing_input.fx_rate
|
||||
|
||||
currency_code = require_currency(
|
||||
currency or pricing_input.currency_code,
|
||||
default=applied_metadata.default_currency,
|
||||
)
|
||||
|
||||
return PricingResult(
|
||||
metal=pricing_input.metal,
|
||||
ore_tonnage=pricing_input.ore_tonnage,
|
||||
head_grade_pct=pricing_input.head_grade_pct,
|
||||
recovery_pct=pricing_input.recovery_pct,
|
||||
payable_metal_tonnes=payable_metal,
|
||||
reference_price=pricing_input.reference_price,
|
||||
gross_revenue=gross_revenue_ref,
|
||||
moisture_penalty=moisture_penalty,
|
||||
impurity_penalty=impurity_penalty_total,
|
||||
treatment_smelt_charges=charges,
|
||||
premiums=pricing_input.premiums,
|
||||
net_revenue=net_revenue,
|
||||
currency=currency_code,
|
||||
)
|
||||
875
services/reporting.py
Normal file
875
services/reporting.py
Normal file
@@ -0,0 +1,875 @@
|
||||
"""Reporting service layer aggregating deterministic and simulation metrics."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import date
|
||||
import math
|
||||
from typing import Mapping, Sequence
|
||||
from urllib.parse import urlencode
|
||||
|
||||
import plotly.graph_objects as go
|
||||
import plotly.io as pio
|
||||
|
||||
from fastapi import Request
|
||||
|
||||
from models import FinancialCategory, Project, Scenario
|
||||
from services.financial import (
|
||||
CashFlow,
|
||||
ConvergenceError,
|
||||
PaybackNotReachedError,
|
||||
internal_rate_of_return,
|
||||
net_present_value,
|
||||
payback_period,
|
||||
)
|
||||
from services.simulation import (
|
||||
CashFlowSpec,
|
||||
SimulationConfig,
|
||||
SimulationMetric,
|
||||
SimulationResult,
|
||||
run_monte_carlo,
|
||||
)
|
||||
from services.unit_of_work import UnitOfWork
|
||||
|
||||
DEFAULT_DISCOUNT_RATE = 0.1
|
||||
DEFAULT_ITERATIONS = 500
|
||||
DEFAULT_PERCENTILES: tuple[float, float, float] = (5.0, 50.0, 95.0)
|
||||
|
||||
_COST_CATEGORY_SIGNS: Mapping[FinancialCategory, float] = {
|
||||
FinancialCategory.REVENUE: 1.0,
|
||||
FinancialCategory.CAPITAL_EXPENDITURE: -1.0,
|
||||
FinancialCategory.OPERATING_EXPENDITURE: -1.0,
|
||||
FinancialCategory.CONTINGENCY: -1.0,
|
||||
FinancialCategory.OTHER: -1.0,
|
||||
}
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class IncludeOptions:
|
||||
"""Flags controlling optional sections in report payloads."""
|
||||
|
||||
distribution: bool = False
|
||||
samples: bool = False
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class ReportFilters:
|
||||
"""Filter parameters applied when selecting scenarios for a report."""
|
||||
|
||||
scenario_ids: set[int] | None = None
|
||||
start_date: date | None = None
|
||||
end_date: date | None = None
|
||||
|
||||
def matches(self, scenario: Scenario) -> bool:
|
||||
if self.scenario_ids is not None and scenario.id not in self.scenario_ids:
|
||||
return False
|
||||
if self.start_date and scenario.start_date and scenario.start_date < self.start_date:
|
||||
return False
|
||||
if self.end_date and scenario.end_date and scenario.end_date > self.end_date:
|
||||
return False
|
||||
return True
|
||||
|
||||
def to_dict(self) -> dict[str, object]:
|
||||
payload: dict[str, object] = {}
|
||||
if self.scenario_ids is not None:
|
||||
payload["scenario_ids"] = sorted(self.scenario_ids)
|
||||
if self.start_date is not None:
|
||||
payload["start_date"] = self.start_date
|
||||
if self.end_date is not None:
|
||||
payload["end_date"] = self.end_date
|
||||
return payload
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class ScenarioFinancialTotals:
|
||||
currency: str | None
|
||||
inflows: float
|
||||
outflows: float
|
||||
net: float
|
||||
by_category: dict[str, float]
|
||||
|
||||
def to_dict(self) -> dict[str, object]:
|
||||
return {
|
||||
"currency": self.currency,
|
||||
"inflows": _round_optional(self.inflows),
|
||||
"outflows": _round_optional(self.outflows),
|
||||
"net": _round_optional(self.net),
|
||||
"by_category": {
|
||||
key: _round_optional(value) for key, value in sorted(self.by_category.items())
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class ScenarioDeterministicMetrics:
|
||||
currency: str | None
|
||||
discount_rate: float
|
||||
compounds_per_year: int
|
||||
npv: float | None
|
||||
irr: float | None
|
||||
payback_period: float | None
|
||||
notes: list[str] = field(default_factory=list)
|
||||
|
||||
def to_dict(self) -> dict[str, object]:
|
||||
return {
|
||||
"currency": self.currency,
|
||||
"discount_rate": _round_optional(self.discount_rate, digits=4),
|
||||
"compounds_per_year": self.compounds_per_year,
|
||||
"npv": _round_optional(self.npv),
|
||||
"irr": _round_optional(self.irr, digits=6),
|
||||
"payback_period": _round_optional(self.payback_period, digits=4),
|
||||
"notes": self.notes,
|
||||
}
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class ScenarioMonteCarloResult:
|
||||
available: bool
|
||||
notes: list[str] = field(default_factory=list)
|
||||
result: SimulationResult | None = None
|
||||
include_samples: bool = False
|
||||
|
||||
def to_dict(self) -> dict[str, object]:
|
||||
if not self.available or self.result is None:
|
||||
return {
|
||||
"available": False,
|
||||
"notes": self.notes,
|
||||
}
|
||||
|
||||
metrics: dict[str, dict[str, object]] = {}
|
||||
for metric, summary in self.result.summaries.items():
|
||||
metrics[metric.value] = {
|
||||
"mean": _round_optional(summary.mean),
|
||||
"std_dev": _round_optional(summary.std_dev),
|
||||
"minimum": _round_optional(summary.minimum),
|
||||
"maximum": _round_optional(summary.maximum),
|
||||
"percentiles": {
|
||||
f"{percentile:g}": _round_optional(value)
|
||||
for percentile, value in sorted(summary.percentiles.items())
|
||||
},
|
||||
"sample_size": summary.sample_size,
|
||||
"failed_runs": summary.failed_runs,
|
||||
}
|
||||
|
||||
samples_payload: dict[str, list[float | None]] | None = None
|
||||
if self.include_samples and self.result.samples:
|
||||
samples_payload = {}
|
||||
for metric, samples in self.result.samples.items():
|
||||
samples_payload[metric.value] = [
|
||||
_sanitize_float(sample) for sample in samples.tolist()
|
||||
]
|
||||
|
||||
payload: dict[str, object] = {
|
||||
"available": True,
|
||||
"iterations": self.result.iterations,
|
||||
"metrics": metrics,
|
||||
"notes": self.notes,
|
||||
}
|
||||
if samples_payload:
|
||||
payload["samples"] = samples_payload
|
||||
return payload
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class ScenarioReport:
|
||||
scenario: Scenario
|
||||
totals: ScenarioFinancialTotals
|
||||
deterministic: ScenarioDeterministicMetrics
|
||||
monte_carlo: ScenarioMonteCarloResult | None
|
||||
|
||||
def to_dict(self) -> dict[str, object]:
|
||||
scenario_info = {
|
||||
"id": self.scenario.id,
|
||||
"project_id": self.scenario.project_id,
|
||||
"name": self.scenario.name,
|
||||
"description": self.scenario.description,
|
||||
"status": self.scenario.status.value if hasattr(self.scenario.status, 'value') else self.scenario.status,
|
||||
"start_date": self.scenario.start_date,
|
||||
"end_date": self.scenario.end_date,
|
||||
"currency": self.scenario.currency,
|
||||
"primary_resource": self.scenario.primary_resource.value
|
||||
if self.scenario.primary_resource and hasattr(self.scenario.primary_resource, 'value')
|
||||
else self.scenario.primary_resource,
|
||||
"discount_rate": _round_optional(self.deterministic.discount_rate, digits=4),
|
||||
"created_at": self.scenario.created_at,
|
||||
"updated_at": self.scenario.updated_at,
|
||||
"simulation_parameter_count": len(self.scenario.simulation_parameters or []),
|
||||
}
|
||||
payload: dict[str, object] = {
|
||||
"scenario": scenario_info,
|
||||
"financials": self.totals.to_dict(),
|
||||
"metrics": self.deterministic.to_dict(),
|
||||
}
|
||||
if self.monte_carlo is not None:
|
||||
payload["monte_carlo"] = self.monte_carlo.to_dict()
|
||||
return payload
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class AggregatedMetric:
|
||||
average: float | None
|
||||
minimum: float | None
|
||||
maximum: float | None
|
||||
|
||||
def to_dict(self) -> dict[str, object]:
|
||||
return {
|
||||
"average": _round_optional(self.average),
|
||||
"minimum": _round_optional(self.minimum),
|
||||
"maximum": _round_optional(self.maximum),
|
||||
}
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class ProjectAggregates:
|
||||
total_inflows: float
|
||||
total_outflows: float
|
||||
total_net: float
|
||||
deterministic_metrics: dict[str, AggregatedMetric]
|
||||
|
||||
def to_dict(self) -> dict[str, object]:
|
||||
return {
|
||||
"financials": {
|
||||
"total_inflows": _round_optional(self.total_inflows),
|
||||
"total_outflows": _round_optional(self.total_outflows),
|
||||
"total_net": _round_optional(self.total_net),
|
||||
},
|
||||
"deterministic_metrics": {
|
||||
metric: data.to_dict()
|
||||
for metric, data in sorted(self.deterministic_metrics.items())
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class MetricComparison:
|
||||
metric: str
|
||||
direction: str
|
||||
best: tuple[int, str, float] | None
|
||||
worst: tuple[int, str, float] | None
|
||||
average: float | None
|
||||
|
||||
def to_dict(self) -> dict[str, object]:
|
||||
return {
|
||||
"metric": self.metric,
|
||||
"direction": self.direction,
|
||||
"best": _comparison_entry(self.best),
|
||||
"worst": _comparison_entry(self.worst),
|
||||
"average": _round_optional(self.average),
|
||||
}
|
||||
|
||||
|
||||
def parse_include_tokens(raw: str | None) -> IncludeOptions:
|
||||
tokens: set[str] = set()
|
||||
if raw:
|
||||
for part in raw.split(","):
|
||||
token = part.strip().lower()
|
||||
if token:
|
||||
tokens.add(token)
|
||||
if "all" in tokens:
|
||||
return IncludeOptions(distribution=True, samples=True)
|
||||
return IncludeOptions(
|
||||
distribution=bool({"distribution", "monte_carlo", "mc"} & tokens),
|
||||
samples="samples" in tokens,
|
||||
)
|
||||
|
||||
|
||||
def validate_percentiles(values: Sequence[float] | None) -> tuple[float, ...]:
|
||||
if not values:
|
||||
return DEFAULT_PERCENTILES
|
||||
seen: set[float] = set()
|
||||
cleaned: list[float] = []
|
||||
for value in values:
|
||||
percentile = float(value)
|
||||
if percentile < 0.0 or percentile > 100.0:
|
||||
raise ValueError("Percentiles must be between 0 and 100.")
|
||||
if percentile not in seen:
|
||||
seen.add(percentile)
|
||||
cleaned.append(percentile)
|
||||
if not cleaned:
|
||||
return DEFAULT_PERCENTILES
|
||||
return tuple(cleaned)
|
||||
|
||||
|
||||
class ReportingService:
|
||||
"""Coordinates project and scenario reporting aggregation."""
|
||||
|
||||
def __init__(self, uow: UnitOfWork) -> None:
|
||||
self._uow = uow
|
||||
|
||||
def project_summary(
|
||||
self,
|
||||
project: Project,
|
||||
*,
|
||||
filters: ReportFilters,
|
||||
include: IncludeOptions,
|
||||
iterations: int,
|
||||
percentiles: tuple[float, ...],
|
||||
) -> dict[str, object]:
|
||||
scenarios = self._load_scenarios(project.id, filters)
|
||||
reports = [
|
||||
self._build_scenario_report(
|
||||
scenario,
|
||||
include_distribution=include.distribution,
|
||||
include_samples=include.samples,
|
||||
iterations=iterations,
|
||||
percentiles=percentiles,
|
||||
)
|
||||
for scenario in scenarios
|
||||
]
|
||||
aggregates = self._aggregate_project(reports)
|
||||
return {
|
||||
"project": _project_payload(project),
|
||||
"scenario_count": len(reports),
|
||||
"filters": filters.to_dict(),
|
||||
"aggregates": aggregates.to_dict(),
|
||||
"scenarios": [report.to_dict() for report in reports],
|
||||
}
|
||||
|
||||
def scenario_comparison(
|
||||
self,
|
||||
project: Project,
|
||||
scenarios: Sequence[Scenario],
|
||||
*,
|
||||
include: IncludeOptions,
|
||||
iterations: int,
|
||||
percentiles: tuple[float, ...],
|
||||
) -> dict[str, object]:
|
||||
reports = [
|
||||
self._build_scenario_report(
|
||||
self._reload_scenario(scenario.id),
|
||||
include_distribution=include.distribution,
|
||||
include_samples=include.samples,
|
||||
iterations=iterations,
|
||||
percentiles=percentiles,
|
||||
)
|
||||
for scenario in scenarios
|
||||
]
|
||||
comparison = {
|
||||
metric: data.to_dict()
|
||||
for metric, data in self._build_comparisons(reports).items()
|
||||
}
|
||||
return {
|
||||
"project": _project_payload(project),
|
||||
"scenarios": [report.to_dict() for report in reports],
|
||||
"comparison": comparison,
|
||||
}
|
||||
|
||||
def scenario_distribution(
|
||||
self,
|
||||
scenario: Scenario,
|
||||
*,
|
||||
include: IncludeOptions,
|
||||
iterations: int,
|
||||
percentiles: tuple[float, ...],
|
||||
) -> dict[str, object]:
|
||||
report = self._build_scenario_report(
|
||||
self._reload_scenario(scenario.id),
|
||||
include_distribution=True,
|
||||
include_samples=include.samples,
|
||||
iterations=iterations,
|
||||
percentiles=percentiles,
|
||||
)
|
||||
return {
|
||||
"scenario": report.to_dict()["scenario"],
|
||||
"summary": report.totals.to_dict(),
|
||||
"metrics": report.deterministic.to_dict(),
|
||||
"monte_carlo": (
|
||||
report.monte_carlo.to_dict() if report.monte_carlo else {
|
||||
"available": False}
|
||||
),
|
||||
}
|
||||
|
||||
def _load_scenarios(self, project_id: int, filters: ReportFilters) -> list[Scenario]:
|
||||
scenarios = self._uow.scenarios.list_for_project(
|
||||
project_id, with_children=True)
|
||||
return [scenario for scenario in scenarios if filters.matches(scenario)]
|
||||
|
||||
def _reload_scenario(self, scenario_id: int) -> Scenario:
|
||||
return self._uow.scenarios.get(scenario_id, with_children=True)
|
||||
|
||||
def _build_scenario_report(
|
||||
self,
|
||||
scenario: Scenario,
|
||||
*,
|
||||
include_distribution: bool,
|
||||
include_samples: bool,
|
||||
iterations: int,
|
||||
percentiles: tuple[float, ...],
|
||||
) -> ScenarioReport:
|
||||
cash_flows, totals = _build_cash_flows(scenario)
|
||||
deterministic = _calculate_deterministic_metrics(
|
||||
scenario, cash_flows, totals)
|
||||
monte_carlo: ScenarioMonteCarloResult | None = None
|
||||
if include_distribution:
|
||||
monte_carlo = _run_monte_carlo(
|
||||
scenario,
|
||||
cash_flows,
|
||||
include_samples=include_samples,
|
||||
iterations=iterations,
|
||||
percentiles=percentiles,
|
||||
)
|
||||
return ScenarioReport(
|
||||
scenario=scenario,
|
||||
totals=totals,
|
||||
deterministic=deterministic,
|
||||
monte_carlo=monte_carlo,
|
||||
)
|
||||
|
||||
def _aggregate_project(self, reports: Sequence[ScenarioReport]) -> ProjectAggregates:
|
||||
total_inflows = sum(report.totals.inflows for report in reports)
|
||||
total_outflows = sum(report.totals.outflows for report in reports)
|
||||
total_net = sum(report.totals.net for report in reports)
|
||||
|
||||
metrics: dict[str, AggregatedMetric] = {}
|
||||
for metric_name in ("npv", "irr", "payback_period"):
|
||||
values = [
|
||||
getattr(report.deterministic, metric_name)
|
||||
for report in reports
|
||||
if getattr(report.deterministic, metric_name) is not None
|
||||
]
|
||||
if values:
|
||||
metrics[metric_name] = AggregatedMetric(
|
||||
average=sum(values) / len(values),
|
||||
minimum=min(values),
|
||||
maximum=max(values),
|
||||
)
|
||||
return ProjectAggregates(
|
||||
total_inflows=total_inflows,
|
||||
total_outflows=total_outflows,
|
||||
total_net=total_net,
|
||||
deterministic_metrics=metrics,
|
||||
)
|
||||
|
||||
def _build_comparisons(
|
||||
self, reports: Sequence[ScenarioReport]
|
||||
) -> Mapping[str, MetricComparison]:
|
||||
comparisons: dict[str, MetricComparison] = {}
|
||||
for metric_name, direction in (
|
||||
("npv", "higher_is_better"),
|
||||
("irr", "higher_is_better"),
|
||||
("payback_period", "lower_is_better"),
|
||||
):
|
||||
entries: list[tuple[int, str, float]] = []
|
||||
for report in reports:
|
||||
value = getattr(report.deterministic, metric_name)
|
||||
if value is None:
|
||||
continue
|
||||
entries.append(
|
||||
(report.scenario.id, report.scenario.name, value))
|
||||
if not entries:
|
||||
continue
|
||||
if direction == "higher_is_better":
|
||||
best = max(entries, key=lambda item: item[2])
|
||||
worst = min(entries, key=lambda item: item[2])
|
||||
else:
|
||||
best = min(entries, key=lambda item: item[2])
|
||||
worst = max(entries, key=lambda item: item[2])
|
||||
average = sum(item[2] for item in entries) / len(entries)
|
||||
comparisons[metric_name] = MetricComparison(
|
||||
metric=metric_name,
|
||||
direction=direction,
|
||||
best=best,
|
||||
worst=worst,
|
||||
average=average,
|
||||
)
|
||||
return comparisons
|
||||
|
||||
def build_project_summary_context(
|
||||
self,
|
||||
project: Project,
|
||||
filters: ReportFilters,
|
||||
include: IncludeOptions,
|
||||
iterations: int,
|
||||
percentiles: tuple[float, ...],
|
||||
request: Request,
|
||||
) -> dict[str, object]:
|
||||
"""Build template context for project summary page."""
|
||||
scenarios = self._load_scenarios(project.id, filters)
|
||||
reports = [
|
||||
self._build_scenario_report(
|
||||
scenario,
|
||||
include_distribution=include.distribution,
|
||||
include_samples=include.samples,
|
||||
iterations=iterations,
|
||||
percentiles=percentiles,
|
||||
)
|
||||
for scenario in scenarios
|
||||
]
|
||||
aggregates = self._aggregate_project(reports)
|
||||
|
||||
return {
|
||||
"request": request,
|
||||
"project": _project_payload(project),
|
||||
"scenario_count": len(reports),
|
||||
"aggregates": aggregates.to_dict(),
|
||||
"scenarios": [report.to_dict() for report in reports],
|
||||
"filters": filters.to_dict(),
|
||||
"include_options": include,
|
||||
"iterations": iterations,
|
||||
"percentiles": percentiles,
|
||||
"title": f"Project Summary · {project.name}",
|
||||
"subtitle": "Aggregated financial and simulation insights across scenarios.",
|
||||
"actions": [
|
||||
{
|
||||
"href": request.url_for(
|
||||
"reports.project_summary",
|
||||
project_id=project.id,
|
||||
),
|
||||
"label": "Download JSON",
|
||||
}
|
||||
],
|
||||
"chart_data": self._generate_npv_comparison_chart(reports),
|
||||
}
|
||||
|
||||
def build_scenario_comparison_context(
|
||||
self,
|
||||
project: Project,
|
||||
scenarios: Sequence[Scenario],
|
||||
include: IncludeOptions,
|
||||
iterations: int,
|
||||
percentiles: tuple[float, ...],
|
||||
request: Request,
|
||||
) -> dict[str, object]:
|
||||
"""Build template context for scenario comparison page."""
|
||||
reports = [
|
||||
self._build_scenario_report(
|
||||
self._reload_scenario(scenario.id),
|
||||
include_distribution=include.distribution,
|
||||
include_samples=include.samples,
|
||||
iterations=iterations,
|
||||
percentiles=percentiles,
|
||||
)
|
||||
for scenario in scenarios
|
||||
]
|
||||
comparison = {
|
||||
metric: data.to_dict()
|
||||
for metric, data in self._build_comparisons(reports).items()
|
||||
}
|
||||
|
||||
comparison_json_url = request.url_for(
|
||||
"reports.project_scenario_comparison",
|
||||
project_id=project.id,
|
||||
)
|
||||
scenario_ids = [str(s.id) for s in scenarios]
|
||||
comparison_query = urlencode(
|
||||
[("scenario_ids", str(identifier)) for identifier in scenario_ids]
|
||||
)
|
||||
if comparison_query:
|
||||
comparison_json_url = f"{comparison_json_url}?{comparison_query}"
|
||||
|
||||
return {
|
||||
"request": request,
|
||||
"project": _project_payload(project),
|
||||
"scenarios": [report.to_dict() for report in reports],
|
||||
"comparison": comparison,
|
||||
"include_options": include,
|
||||
"iterations": iterations,
|
||||
"percentiles": percentiles,
|
||||
"title": f"Scenario Comparison · {project.name}",
|
||||
"subtitle": "Evaluate deterministic metrics and Monte Carlo trends side by side.",
|
||||
"actions": [
|
||||
{
|
||||
"href": comparison_json_url,
|
||||
"label": "Download JSON",
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
def build_scenario_distribution_context(
|
||||
self,
|
||||
scenario: Scenario,
|
||||
include: IncludeOptions,
|
||||
iterations: int,
|
||||
percentiles: tuple[float, ...],
|
||||
request: Request,
|
||||
) -> dict[str, object]:
|
||||
"""Build template context for scenario distribution page."""
|
||||
report = self._build_scenario_report(
|
||||
self._reload_scenario(scenario.id),
|
||||
include_distribution=True,
|
||||
include_samples=include.samples,
|
||||
iterations=iterations,
|
||||
percentiles=percentiles,
|
||||
)
|
||||
|
||||
return {
|
||||
"request": request,
|
||||
"scenario": report.to_dict()["scenario"],
|
||||
"summary": report.totals.to_dict(),
|
||||
"metrics": report.deterministic.to_dict(),
|
||||
"monte_carlo": (
|
||||
report.monte_carlo.to_dict() if report.monte_carlo else {
|
||||
"available": False}
|
||||
),
|
||||
"include_options": include,
|
||||
"iterations": iterations,
|
||||
"percentiles": percentiles,
|
||||
"title": f"Scenario Distribution · {scenario.name}",
|
||||
"subtitle": "Deterministic and simulated distributions for a single scenario.",
|
||||
"actions": [
|
||||
{
|
||||
"href": request.url_for(
|
||||
"reports.scenario_distribution",
|
||||
scenario_id=scenario.id,
|
||||
),
|
||||
"label": "Download JSON",
|
||||
}
|
||||
],
|
||||
"chart_data": self._generate_distribution_histogram(report.monte_carlo) if report.monte_carlo else "{}",
|
||||
}
|
||||
|
||||
def _generate_npv_comparison_chart(self, reports: Sequence[ScenarioReport]) -> str:
|
||||
"""Generate Plotly chart JSON for NPV comparison across scenarios."""
|
||||
scenario_names = []
|
||||
npv_values = []
|
||||
|
||||
for report in reports:
|
||||
scenario_names.append(report.scenario.name)
|
||||
npv_values.append(report.deterministic.npv or 0)
|
||||
|
||||
fig = go.Figure(data=[
|
||||
go.Bar(
|
||||
x=scenario_names,
|
||||
y=npv_values,
|
||||
name='NPV',
|
||||
marker_color='lightblue'
|
||||
)
|
||||
])
|
||||
|
||||
fig.update_layout(
|
||||
title="NPV Comparison Across Scenarios",
|
||||
xaxis_title="Scenario",
|
||||
yaxis_title="NPV",
|
||||
showlegend=False
|
||||
)
|
||||
|
||||
return pio.to_json(fig) or "{}"
|
||||
|
||||
def _generate_distribution_histogram(self, monte_carlo: ScenarioMonteCarloResult) -> str:
|
||||
"""Generate Plotly histogram for Monte Carlo distribution."""
|
||||
if not monte_carlo.available or not monte_carlo.result or not monte_carlo.result.samples:
|
||||
return "{}"
|
||||
|
||||
# Get NPV samples
|
||||
npv_samples = monte_carlo.result.samples.get(SimulationMetric.NPV, [])
|
||||
if len(npv_samples) == 0:
|
||||
return "{}"
|
||||
|
||||
fig = go.Figure(data=[
|
||||
go.Histogram(
|
||||
x=npv_samples,
|
||||
nbinsx=50,
|
||||
name='NPV Distribution',
|
||||
marker_color='lightgreen'
|
||||
)
|
||||
])
|
||||
|
||||
fig.update_layout(
|
||||
title="Monte Carlo NPV Distribution",
|
||||
xaxis_title="NPV",
|
||||
yaxis_title="Frequency",
|
||||
showlegend=False
|
||||
)
|
||||
|
||||
return pio.to_json(fig) or "{}"
|
||||
|
||||
|
||||
def _build_cash_flows(scenario: Scenario) -> tuple[list[CashFlow], ScenarioFinancialTotals]:
|
||||
cash_flows: list[CashFlow] = []
|
||||
by_category: dict[str, float] = {}
|
||||
inflows = 0.0
|
||||
outflows = 0.0
|
||||
net = 0.0
|
||||
period_index = 0
|
||||
|
||||
for financial_input in scenario.financial_inputs or []:
|
||||
sign = _COST_CATEGORY_SIGNS.get(financial_input.category, -1.0)
|
||||
amount = float(financial_input.amount) * sign
|
||||
net += amount
|
||||
if amount >= 0:
|
||||
inflows += amount
|
||||
else:
|
||||
outflows += -amount
|
||||
by_category.setdefault(financial_input.category.value, 0.0)
|
||||
by_category[financial_input.category.value] += amount
|
||||
|
||||
if financial_input.effective_date is not None:
|
||||
cash_flows.append(
|
||||
CashFlow(amount=amount, date=financial_input.effective_date)
|
||||
)
|
||||
else:
|
||||
cash_flows.append(
|
||||
CashFlow(amount=amount, period_index=period_index))
|
||||
period_index += 1
|
||||
|
||||
currency = scenario.currency
|
||||
if currency is None and scenario.financial_inputs:
|
||||
currency = scenario.financial_inputs[0].currency
|
||||
|
||||
totals = ScenarioFinancialTotals(
|
||||
currency=currency,
|
||||
inflows=inflows,
|
||||
outflows=outflows,
|
||||
net=net,
|
||||
by_category=by_category,
|
||||
)
|
||||
return cash_flows, totals
|
||||
|
||||
|
||||
def _calculate_deterministic_metrics(
|
||||
scenario: Scenario,
|
||||
cash_flows: Sequence[CashFlow],
|
||||
totals: ScenarioFinancialTotals,
|
||||
) -> ScenarioDeterministicMetrics:
|
||||
notes: list[str] = []
|
||||
discount_rate = _normalise_discount_rate(scenario.discount_rate)
|
||||
if scenario.discount_rate is None:
|
||||
notes.append(
|
||||
f"Discount rate not set; defaulted to {discount_rate:.2%}."
|
||||
)
|
||||
|
||||
if not cash_flows:
|
||||
notes.append(
|
||||
"No financial inputs available for deterministic metrics.")
|
||||
return ScenarioDeterministicMetrics(
|
||||
currency=totals.currency,
|
||||
discount_rate=discount_rate,
|
||||
compounds_per_year=1,
|
||||
npv=None,
|
||||
irr=None,
|
||||
payback_period=None,
|
||||
notes=notes,
|
||||
)
|
||||
|
||||
npv_value: float | None
|
||||
try:
|
||||
npv_value = net_present_value(
|
||||
discount_rate,
|
||||
cash_flows,
|
||||
compounds_per_year=1,
|
||||
)
|
||||
except ValueError as exc:
|
||||
npv_value = None
|
||||
notes.append(f"NPV unavailable: {exc}.")
|
||||
|
||||
irr_value: float | None
|
||||
try:
|
||||
irr_value = internal_rate_of_return(
|
||||
cash_flows,
|
||||
compounds_per_year=1,
|
||||
)
|
||||
except (ValueError, ConvergenceError) as exc:
|
||||
irr_value = None
|
||||
notes.append(f"IRR unavailable: {exc}.")
|
||||
|
||||
payback_value: float | None
|
||||
try:
|
||||
payback_value = payback_period(
|
||||
cash_flows,
|
||||
compounds_per_year=1,
|
||||
)
|
||||
except (ValueError, PaybackNotReachedError) as exc:
|
||||
payback_value = None
|
||||
notes.append(f"Payback period unavailable: {exc}.")
|
||||
|
||||
return ScenarioDeterministicMetrics(
|
||||
currency=totals.currency,
|
||||
discount_rate=discount_rate,
|
||||
compounds_per_year=1,
|
||||
npv=npv_value,
|
||||
irr=irr_value,
|
||||
payback_period=payback_value,
|
||||
notes=notes,
|
||||
)
|
||||
|
||||
|
||||
def _run_monte_carlo(
|
||||
scenario: Scenario,
|
||||
cash_flows: Sequence[CashFlow],
|
||||
*,
|
||||
include_samples: bool,
|
||||
iterations: int,
|
||||
percentiles: tuple[float, ...],
|
||||
) -> ScenarioMonteCarloResult:
|
||||
if not cash_flows:
|
||||
return ScenarioMonteCarloResult(
|
||||
available=False,
|
||||
notes=["No financial inputs available for Monte Carlo simulation."],
|
||||
)
|
||||
|
||||
discount_rate = _normalise_discount_rate(scenario.discount_rate)
|
||||
specs = [CashFlowSpec(cash_flow=flow) for flow in cash_flows]
|
||||
notes: list[str] = []
|
||||
if not scenario.simulation_parameters:
|
||||
notes.append(
|
||||
"Scenario has no stochastic parameters; simulation mirrors deterministic cash flows."
|
||||
)
|
||||
config = SimulationConfig(
|
||||
iterations=iterations,
|
||||
discount_rate=discount_rate,
|
||||
metrics=(
|
||||
SimulationMetric.NPV,
|
||||
SimulationMetric.IRR,
|
||||
SimulationMetric.PAYBACK,
|
||||
),
|
||||
percentiles=percentiles,
|
||||
return_samples=include_samples,
|
||||
)
|
||||
try:
|
||||
result = run_monte_carlo(specs, config)
|
||||
except Exception as exc: # pragma: no cover - safeguard for unexpected failures
|
||||
notes.append(f"Simulation failed: {exc}.")
|
||||
return ScenarioMonteCarloResult(available=False, notes=notes)
|
||||
return ScenarioMonteCarloResult(
|
||||
available=True,
|
||||
notes=notes,
|
||||
result=result,
|
||||
include_samples=include_samples,
|
||||
)
|
||||
|
||||
|
||||
def _normalise_discount_rate(value: float | None) -> float:
|
||||
if value is None:
|
||||
return DEFAULT_DISCOUNT_RATE
|
||||
rate = float(value)
|
||||
if rate > 1.0:
|
||||
return rate / 100.0
|
||||
return rate
|
||||
|
||||
|
||||
def _sanitize_float(value: float | None) -> float | None:
|
||||
if value is None:
|
||||
return None
|
||||
if math.isnan(value) or math.isinf(value):
|
||||
return None
|
||||
return float(value)
|
||||
|
||||
|
||||
def _round_optional(value: float | None, *, digits: int = 2) -> float | None:
|
||||
clean = _sanitize_float(value)
|
||||
if clean is None:
|
||||
return None
|
||||
return round(clean, digits)
|
||||
|
||||
|
||||
def _comparison_entry(entry: tuple[int, str, float] | None) -> dict[str, object] | None:
|
||||
if entry is None:
|
||||
return None
|
||||
scenario_id, name, value = entry
|
||||
return {
|
||||
"scenario_id": scenario_id,
|
||||
"name": name,
|
||||
"value": _round_optional(value),
|
||||
}
|
||||
|
||||
|
||||
def _project_payload(project: Project) -> dict[str, object]:
|
||||
return {
|
||||
"id": project.id,
|
||||
"name": project.name,
|
||||
"location": project.location,
|
||||
"operation_type": project.operation_type.value,
|
||||
"description": project.description,
|
||||
"created_at": project.created_at,
|
||||
"updated_at": project.updated_at,
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Iterable
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime
|
||||
from typing import Mapping, Sequence
|
||||
|
||||
@@ -11,26 +12,99 @@ from sqlalchemy.orm import Session, joinedload, selectinload
|
||||
from models import (
|
||||
FinancialInput,
|
||||
Project,
|
||||
PricingImpuritySettings,
|
||||
PricingMetalSettings,
|
||||
PricingSettings,
|
||||
ProjectCapexSnapshot,
|
||||
ProjectProfitability,
|
||||
ProjectOpexSnapshot,
|
||||
NavigationGroup,
|
||||
NavigationLink,
|
||||
Role,
|
||||
Scenario,
|
||||
ScenarioCapexSnapshot,
|
||||
ScenarioProfitability,
|
||||
ScenarioOpexSnapshot,
|
||||
ScenarioStatus,
|
||||
SimulationParameter,
|
||||
User,
|
||||
UserRole,
|
||||
)
|
||||
from services.exceptions import EntityConflictError, EntityNotFoundError
|
||||
from services.export_query import ProjectExportFilters, ScenarioExportFilters
|
||||
from services.pricing import PricingMetadata
|
||||
|
||||
|
||||
def _enum_value(e):
|
||||
"""Return the underlying value for Enum members, otherwise return as-is."""
|
||||
return getattr(e, "value", e)
|
||||
|
||||
|
||||
class NavigationRepository:
|
||||
"""Persistence operations for navigation metadata."""
|
||||
|
||||
def __init__(self, session: Session) -> None:
|
||||
self.session = session
|
||||
|
||||
def list_groups_with_links(
|
||||
self,
|
||||
*,
|
||||
include_disabled: bool = False,
|
||||
) -> Sequence[NavigationGroup]:
|
||||
stmt = (
|
||||
select(NavigationGroup)
|
||||
.options(
|
||||
selectinload(NavigationGroup.links)
|
||||
.selectinload(NavigationLink.children)
|
||||
)
|
||||
.order_by(NavigationGroup.sort_order, NavigationGroup.id)
|
||||
)
|
||||
if not include_disabled:
|
||||
stmt = stmt.where(NavigationGroup.is_enabled.is_(True))
|
||||
return self.session.execute(stmt).scalars().all()
|
||||
|
||||
def get_group_by_slug(self, slug: str) -> NavigationGroup | None:
|
||||
stmt = select(NavigationGroup).where(NavigationGroup.slug == slug)
|
||||
return self.session.execute(stmt).scalar_one_or_none()
|
||||
|
||||
def get_link_by_slug(
|
||||
self,
|
||||
slug: str,
|
||||
*,
|
||||
group_id: int | None = None,
|
||||
) -> NavigationLink | None:
|
||||
stmt = select(NavigationLink).where(NavigationLink.slug == slug)
|
||||
if group_id is not None:
|
||||
stmt = stmt.where(NavigationLink.group_id == group_id)
|
||||
return self.session.execute(stmt).scalar_one_or_none()
|
||||
|
||||
def add_group(self, group: NavigationGroup) -> NavigationGroup:
|
||||
self.session.add(group)
|
||||
self.session.flush()
|
||||
return group
|
||||
|
||||
def add_link(self, link: NavigationLink) -> NavigationLink:
|
||||
self.session.add(link)
|
||||
self.session.flush()
|
||||
return link
|
||||
|
||||
class ProjectRepository:
|
||||
"""Persistence operations for Project entities."""
|
||||
|
||||
def __init__(self, session: Session) -> None:
|
||||
self.session = session
|
||||
|
||||
def list(self, *, with_children: bool = False) -> Sequence[Project]:
|
||||
def list(
|
||||
self,
|
||||
*,
|
||||
with_children: bool = False,
|
||||
with_pricing: bool = False,
|
||||
) -> Sequence[Project]:
|
||||
stmt = select(Project).order_by(Project.created_at)
|
||||
if with_children:
|
||||
stmt = stmt.options(selectinload(Project.scenarios))
|
||||
if with_pricing:
|
||||
stmt = stmt.options(selectinload(Project.pricing_settings))
|
||||
return self.session.execute(stmt).scalars().all()
|
||||
|
||||
def count(self) -> int:
|
||||
@@ -45,10 +119,18 @@ class ProjectRepository:
|
||||
)
|
||||
return self.session.execute(stmt).scalars().all()
|
||||
|
||||
def get(self, project_id: int, *, with_children: bool = False) -> Project:
|
||||
def get(
|
||||
self,
|
||||
project_id: int,
|
||||
*,
|
||||
with_children: bool = False,
|
||||
with_pricing: bool = False,
|
||||
) -> Project:
|
||||
stmt = select(Project).where(Project.id == project_id)
|
||||
if with_children:
|
||||
stmt = stmt.options(joinedload(Project.scenarios))
|
||||
if with_pricing:
|
||||
stmt = stmt.options(joinedload(Project.pricing_settings))
|
||||
result = self.session.execute(stmt)
|
||||
if with_children:
|
||||
result = result.unique()
|
||||
@@ -66,8 +148,12 @@ class ProjectRepository:
|
||||
try:
|
||||
self.session.flush()
|
||||
except IntegrityError as exc: # pragma: no cover - reliance on DB constraints
|
||||
from monitoring.metrics import observe_project_operation
|
||||
observe_project_operation("create", "error")
|
||||
raise EntityConflictError(
|
||||
"Project violates uniqueness constraints") from exc
|
||||
from monitoring.metrics import observe_project_operation
|
||||
observe_project_operation("create", "success")
|
||||
return project
|
||||
|
||||
def find_by_names(self, names: Iterable[str]) -> Mapping[str, Project]:
|
||||
@@ -79,10 +165,71 @@ class ProjectRepository:
|
||||
records = self.session.execute(stmt).scalars().all()
|
||||
return {project.name.lower(): project for project in records}
|
||||
|
||||
def filtered_for_export(
|
||||
self,
|
||||
filters: ProjectExportFilters | None = None,
|
||||
*,
|
||||
include_scenarios: bool = False,
|
||||
include_pricing: bool = False,
|
||||
) -> Sequence[Project]:
|
||||
stmt = select(Project)
|
||||
if include_scenarios:
|
||||
stmt = stmt.options(selectinload(Project.scenarios))
|
||||
if include_pricing:
|
||||
stmt = stmt.options(selectinload(Project.pricing_settings))
|
||||
|
||||
if filters:
|
||||
ids = filters.normalised_ids()
|
||||
if ids:
|
||||
stmt = stmt.where(Project.id.in_(ids))
|
||||
|
||||
name_matches = filters.normalised_names()
|
||||
if name_matches:
|
||||
stmt = stmt.where(func.lower(Project.name).in_(name_matches))
|
||||
|
||||
name_pattern = filters.name_search_pattern()
|
||||
if name_pattern:
|
||||
stmt = stmt.where(Project.name.ilike(name_pattern))
|
||||
|
||||
locations = filters.normalised_locations()
|
||||
if locations:
|
||||
stmt = stmt.where(func.lower(Project.location).in_(locations))
|
||||
|
||||
if filters.operation_types:
|
||||
stmt = stmt.where(Project.operation_type.in_(
|
||||
filters.operation_types))
|
||||
|
||||
if filters.created_from:
|
||||
stmt = stmt.where(Project.created_at >= filters.created_from)
|
||||
|
||||
if filters.created_to:
|
||||
stmt = stmt.where(Project.created_at <= filters.created_to)
|
||||
|
||||
if filters.updated_from:
|
||||
stmt = stmt.where(Project.updated_at >= filters.updated_from)
|
||||
|
||||
if filters.updated_to:
|
||||
stmt = stmt.where(Project.updated_at <= filters.updated_to)
|
||||
|
||||
stmt = stmt.order_by(Project.name, Project.id)
|
||||
return self.session.execute(stmt).scalars().all()
|
||||
|
||||
def delete(self, project_id: int) -> None:
|
||||
project = self.get(project_id)
|
||||
self.session.delete(project)
|
||||
|
||||
def set_pricing_settings(
|
||||
self,
|
||||
project: Project,
|
||||
pricing_settings: PricingSettings | None,
|
||||
) -> Project:
|
||||
project.pricing_settings = pricing_settings
|
||||
project.pricing_settings_id = (
|
||||
pricing_settings.id if pricing_settings is not None else None
|
||||
)
|
||||
self.session.flush()
|
||||
return project
|
||||
|
||||
|
||||
class ScenarioRepository:
|
||||
"""Persistence operations for Scenario entities."""
|
||||
@@ -90,20 +237,35 @@ class ScenarioRepository:
|
||||
def __init__(self, session: Session) -> None:
|
||||
self.session = session
|
||||
|
||||
def list_for_project(self, project_id: int) -> Sequence[Scenario]:
|
||||
def list_for_project(
|
||||
self,
|
||||
project_id: int,
|
||||
*,
|
||||
with_children: bool = False,
|
||||
) -> Sequence[Scenario]:
|
||||
stmt = (
|
||||
select(Scenario)
|
||||
.where(Scenario.project_id == project_id)
|
||||
.order_by(Scenario.created_at)
|
||||
)
|
||||
return self.session.execute(stmt).scalars().all()
|
||||
if with_children:
|
||||
stmt = stmt.options(
|
||||
selectinload(Scenario.financial_inputs),
|
||||
selectinload(Scenario.simulation_parameters),
|
||||
)
|
||||
result = self.session.execute(stmt)
|
||||
if with_children:
|
||||
result = result.unique()
|
||||
return result.scalars().all()
|
||||
|
||||
def count(self) -> int:
|
||||
stmt = select(func.count(Scenario.id))
|
||||
return self.session.execute(stmt).scalar_one()
|
||||
|
||||
def count_by_status(self, status: ScenarioStatus) -> int:
|
||||
stmt = select(func.count(Scenario.id)).where(Scenario.status == status)
|
||||
status_val = _enum_value(status)
|
||||
stmt = select(func.count(Scenario.id)).where(
|
||||
Scenario.status == status_val)
|
||||
return self.session.execute(stmt).scalar_one()
|
||||
|
||||
def recent(self, limit: int = 5, *, with_project: bool = False) -> Sequence[Scenario]:
|
||||
@@ -120,9 +282,10 @@ class ScenarioRepository:
|
||||
limit: int | None = None,
|
||||
with_project: bool = False,
|
||||
) -> Sequence[Scenario]:
|
||||
status_val = _enum_value(status)
|
||||
stmt = (
|
||||
select(Scenario)
|
||||
.where(Scenario.status == status)
|
||||
.where(Scenario.status == status_val)
|
||||
.order_by(Scenario.updated_at.desc())
|
||||
)
|
||||
if with_project:
|
||||
@@ -155,7 +318,11 @@ class ScenarioRepository:
|
||||
try:
|
||||
self.session.flush()
|
||||
except IntegrityError as exc: # pragma: no cover
|
||||
from monitoring.metrics import observe_scenario_operation
|
||||
observe_scenario_operation("create", "error")
|
||||
raise EntityConflictError("Scenario violates constraints") from exc
|
||||
from monitoring.metrics import observe_scenario_operation
|
||||
observe_scenario_operation("create", "success")
|
||||
return scenario
|
||||
|
||||
def find_by_project_and_names(
|
||||
@@ -177,11 +344,389 @@ class ScenarioRepository:
|
||||
records = self.session.execute(stmt).scalars().all()
|
||||
return {scenario.name.lower(): scenario for scenario in records}
|
||||
|
||||
def filtered_for_export(
|
||||
self,
|
||||
filters: ScenarioExportFilters | None = None,
|
||||
*,
|
||||
include_project: bool = True,
|
||||
) -> Sequence[Scenario]:
|
||||
stmt = select(Scenario)
|
||||
if include_project:
|
||||
stmt = stmt.options(joinedload(Scenario.project))
|
||||
|
||||
if filters:
|
||||
scenario_ids = filters.normalised_ids()
|
||||
if scenario_ids:
|
||||
stmt = stmt.where(Scenario.id.in_(scenario_ids))
|
||||
|
||||
project_ids = filters.normalised_project_ids()
|
||||
if project_ids:
|
||||
stmt = stmt.where(Scenario.project_id.in_(project_ids))
|
||||
|
||||
project_names = filters.normalised_project_names()
|
||||
if project_names:
|
||||
project_id_select = select(Project.id).where(
|
||||
func.lower(Project.name).in_(project_names)
|
||||
)
|
||||
stmt = stmt.where(Scenario.project_id.in_(project_id_select))
|
||||
|
||||
name_pattern = filters.name_search_pattern()
|
||||
if name_pattern:
|
||||
stmt = stmt.where(Scenario.name.ilike(name_pattern))
|
||||
|
||||
if filters.statuses:
|
||||
# Accept Enum members or raw values in filters.statuses
|
||||
status_values = [
|
||||
_enum_value(s) for s in (filters.statuses or [])
|
||||
]
|
||||
stmt = stmt.where(Scenario.status.in_(status_values))
|
||||
|
||||
if filters.start_date_from:
|
||||
stmt = stmt.where(Scenario.start_date >=
|
||||
filters.start_date_from)
|
||||
|
||||
if filters.start_date_to:
|
||||
stmt = stmt.where(Scenario.start_date <= filters.start_date_to)
|
||||
|
||||
if filters.end_date_from:
|
||||
stmt = stmt.where(Scenario.end_date >= filters.end_date_from)
|
||||
|
||||
if filters.end_date_to:
|
||||
stmt = stmt.where(Scenario.end_date <= filters.end_date_to)
|
||||
|
||||
if filters.created_from:
|
||||
stmt = stmt.where(Scenario.created_at >= filters.created_from)
|
||||
|
||||
if filters.created_to:
|
||||
stmt = stmt.where(Scenario.created_at <= filters.created_to)
|
||||
|
||||
if filters.updated_from:
|
||||
stmt = stmt.where(Scenario.updated_at >= filters.updated_from)
|
||||
|
||||
if filters.updated_to:
|
||||
stmt = stmt.where(Scenario.updated_at <= filters.updated_to)
|
||||
|
||||
currencies = filters.normalised_currencies()
|
||||
if currencies:
|
||||
stmt = stmt.where(func.upper(
|
||||
Scenario.currency).in_(currencies))
|
||||
|
||||
if filters.primary_resources:
|
||||
stmt = stmt.where(Scenario.primary_resource.in_(
|
||||
filters.primary_resources))
|
||||
|
||||
stmt = stmt.order_by(Scenario.name, Scenario.id)
|
||||
return self.session.execute(stmt).scalars().all()
|
||||
|
||||
def delete(self, scenario_id: int) -> None:
|
||||
scenario = self.get(scenario_id)
|
||||
self.session.delete(scenario)
|
||||
|
||||
|
||||
class ProjectProfitabilityRepository:
|
||||
"""Persistence operations for project-level profitability snapshots."""
|
||||
|
||||
def __init__(self, session: Session) -> None:
|
||||
self.session = session
|
||||
|
||||
def create(self, snapshot: ProjectProfitability) -> ProjectProfitability:
|
||||
self.session.add(snapshot)
|
||||
self.session.flush()
|
||||
return snapshot
|
||||
|
||||
def list_for_project(
|
||||
self,
|
||||
project_id: int,
|
||||
*,
|
||||
limit: int | None = None,
|
||||
) -> Sequence[ProjectProfitability]:
|
||||
stmt = (
|
||||
select(ProjectProfitability)
|
||||
.where(ProjectProfitability.project_id == project_id)
|
||||
.order_by(ProjectProfitability.calculated_at.desc())
|
||||
)
|
||||
if limit is not None:
|
||||
stmt = stmt.limit(limit)
|
||||
return self.session.execute(stmt).scalars().all()
|
||||
|
||||
def latest_for_project(
|
||||
self,
|
||||
project_id: int,
|
||||
) -> ProjectProfitability | None:
|
||||
stmt = (
|
||||
select(ProjectProfitability)
|
||||
.where(ProjectProfitability.project_id == project_id)
|
||||
.order_by(ProjectProfitability.calculated_at.desc())
|
||||
.limit(1)
|
||||
)
|
||||
return self.session.execute(stmt).scalar_one_or_none()
|
||||
|
||||
def delete(self, snapshot_id: int) -> None:
|
||||
stmt = select(ProjectProfitability).where(
|
||||
ProjectProfitability.id == snapshot_id
|
||||
)
|
||||
entity = self.session.execute(stmt).scalar_one_or_none()
|
||||
if entity is None:
|
||||
raise EntityNotFoundError(
|
||||
f"Project profitability snapshot {snapshot_id} not found"
|
||||
)
|
||||
self.session.delete(entity)
|
||||
|
||||
|
||||
class ScenarioProfitabilityRepository:
|
||||
"""Persistence operations for scenario-level profitability snapshots."""
|
||||
|
||||
def __init__(self, session: Session) -> None:
|
||||
self.session = session
|
||||
|
||||
def create(self, snapshot: ScenarioProfitability) -> ScenarioProfitability:
|
||||
self.session.add(snapshot)
|
||||
self.session.flush()
|
||||
return snapshot
|
||||
|
||||
def list_for_scenario(
|
||||
self,
|
||||
scenario_id: int,
|
||||
*,
|
||||
limit: int | None = None,
|
||||
) -> Sequence[ScenarioProfitability]:
|
||||
stmt = (
|
||||
select(ScenarioProfitability)
|
||||
.where(ScenarioProfitability.scenario_id == scenario_id)
|
||||
.order_by(ScenarioProfitability.calculated_at.desc())
|
||||
)
|
||||
if limit is not None:
|
||||
stmt = stmt.limit(limit)
|
||||
return self.session.execute(stmt).scalars().all()
|
||||
|
||||
def latest_for_scenario(
|
||||
self,
|
||||
scenario_id: int,
|
||||
) -> ScenarioProfitability | None:
|
||||
stmt = (
|
||||
select(ScenarioProfitability)
|
||||
.where(ScenarioProfitability.scenario_id == scenario_id)
|
||||
.order_by(ScenarioProfitability.calculated_at.desc())
|
||||
.limit(1)
|
||||
)
|
||||
return self.session.execute(stmt).scalar_one_or_none()
|
||||
|
||||
def delete(self, snapshot_id: int) -> None:
|
||||
stmt = select(ScenarioProfitability).where(
|
||||
ScenarioProfitability.id == snapshot_id
|
||||
)
|
||||
entity = self.session.execute(stmt).scalar_one_or_none()
|
||||
if entity is None:
|
||||
raise EntityNotFoundError(
|
||||
f"Scenario profitability snapshot {snapshot_id} not found"
|
||||
)
|
||||
self.session.delete(entity)
|
||||
|
||||
|
||||
class ProjectCapexRepository:
|
||||
"""Persistence operations for project-level capex snapshots."""
|
||||
|
||||
def __init__(self, session: Session) -> None:
|
||||
self.session = session
|
||||
|
||||
def create(self, snapshot: ProjectCapexSnapshot) -> ProjectCapexSnapshot:
|
||||
self.session.add(snapshot)
|
||||
self.session.flush()
|
||||
return snapshot
|
||||
|
||||
def list_for_project(
|
||||
self,
|
||||
project_id: int,
|
||||
*,
|
||||
limit: int | None = None,
|
||||
) -> Sequence[ProjectCapexSnapshot]:
|
||||
stmt = (
|
||||
select(ProjectCapexSnapshot)
|
||||
.where(ProjectCapexSnapshot.project_id == project_id)
|
||||
.order_by(ProjectCapexSnapshot.calculated_at.desc())
|
||||
)
|
||||
if limit is not None:
|
||||
stmt = stmt.limit(limit)
|
||||
return self.session.execute(stmt).scalars().all()
|
||||
|
||||
def latest_for_project(
|
||||
self,
|
||||
project_id: int,
|
||||
) -> ProjectCapexSnapshot | None:
|
||||
stmt = (
|
||||
select(ProjectCapexSnapshot)
|
||||
.where(ProjectCapexSnapshot.project_id == project_id)
|
||||
.order_by(ProjectCapexSnapshot.calculated_at.desc())
|
||||
.limit(1)
|
||||
)
|
||||
return self.session.execute(stmt).scalar_one_or_none()
|
||||
|
||||
def delete(self, snapshot_id: int) -> None:
|
||||
stmt = select(ProjectCapexSnapshot).where(
|
||||
ProjectCapexSnapshot.id == snapshot_id
|
||||
)
|
||||
entity = self.session.execute(stmt).scalar_one_or_none()
|
||||
if entity is None:
|
||||
raise EntityNotFoundError(
|
||||
f"Project capex snapshot {snapshot_id} not found"
|
||||
)
|
||||
self.session.delete(entity)
|
||||
|
||||
|
||||
class ScenarioCapexRepository:
|
||||
"""Persistence operations for scenario-level capex snapshots."""
|
||||
|
||||
def __init__(self, session: Session) -> None:
|
||||
self.session = session
|
||||
|
||||
def create(self, snapshot: ScenarioCapexSnapshot) -> ScenarioCapexSnapshot:
|
||||
self.session.add(snapshot)
|
||||
self.session.flush()
|
||||
return snapshot
|
||||
|
||||
def list_for_scenario(
|
||||
self,
|
||||
scenario_id: int,
|
||||
*,
|
||||
limit: int | None = None,
|
||||
) -> Sequence[ScenarioCapexSnapshot]:
|
||||
stmt = (
|
||||
select(ScenarioCapexSnapshot)
|
||||
.where(ScenarioCapexSnapshot.scenario_id == scenario_id)
|
||||
.order_by(ScenarioCapexSnapshot.calculated_at.desc())
|
||||
)
|
||||
if limit is not None:
|
||||
stmt = stmt.limit(limit)
|
||||
return self.session.execute(stmt).scalars().all()
|
||||
|
||||
def latest_for_scenario(
|
||||
self,
|
||||
scenario_id: int,
|
||||
) -> ScenarioCapexSnapshot | None:
|
||||
stmt = (
|
||||
select(ScenarioCapexSnapshot)
|
||||
.where(ScenarioCapexSnapshot.scenario_id == scenario_id)
|
||||
.order_by(ScenarioCapexSnapshot.calculated_at.desc())
|
||||
.limit(1)
|
||||
)
|
||||
return self.session.execute(stmt).scalar_one_or_none()
|
||||
|
||||
def delete(self, snapshot_id: int) -> None:
|
||||
stmt = select(ScenarioCapexSnapshot).where(
|
||||
ScenarioCapexSnapshot.id == snapshot_id
|
||||
)
|
||||
entity = self.session.execute(stmt).scalar_one_or_none()
|
||||
if entity is None:
|
||||
raise EntityNotFoundError(
|
||||
f"Scenario capex snapshot {snapshot_id} not found"
|
||||
)
|
||||
self.session.delete(entity)
|
||||
|
||||
|
||||
class ProjectOpexRepository:
|
||||
"""Persistence operations for project-level opex snapshots."""
|
||||
|
||||
def __init__(self, session: Session) -> None:
|
||||
self.session = session
|
||||
|
||||
def create(
|
||||
self, snapshot: ProjectOpexSnapshot
|
||||
) -> ProjectOpexSnapshot:
|
||||
self.session.add(snapshot)
|
||||
self.session.flush()
|
||||
return snapshot
|
||||
|
||||
def list_for_project(
|
||||
self,
|
||||
project_id: int,
|
||||
*,
|
||||
limit: int | None = None,
|
||||
) -> Sequence[ProjectOpexSnapshot]:
|
||||
stmt = (
|
||||
select(ProjectOpexSnapshot)
|
||||
.where(ProjectOpexSnapshot.project_id == project_id)
|
||||
.order_by(ProjectOpexSnapshot.calculated_at.desc())
|
||||
)
|
||||
if limit is not None:
|
||||
stmt = stmt.limit(limit)
|
||||
return self.session.execute(stmt).scalars().all()
|
||||
|
||||
def latest_for_project(
|
||||
self,
|
||||
project_id: int,
|
||||
) -> ProjectOpexSnapshot | None:
|
||||
stmt = (
|
||||
select(ProjectOpexSnapshot)
|
||||
.where(ProjectOpexSnapshot.project_id == project_id)
|
||||
.order_by(ProjectOpexSnapshot.calculated_at.desc())
|
||||
.limit(1)
|
||||
)
|
||||
return self.session.execute(stmt).scalar_one_or_none()
|
||||
|
||||
def delete(self, snapshot_id: int) -> None:
|
||||
stmt = select(ProjectOpexSnapshot).where(
|
||||
ProjectOpexSnapshot.id == snapshot_id
|
||||
)
|
||||
entity = self.session.execute(stmt).scalar_one_or_none()
|
||||
if entity is None:
|
||||
raise EntityNotFoundError(
|
||||
f"Project opex snapshot {snapshot_id} not found"
|
||||
)
|
||||
self.session.delete(entity)
|
||||
|
||||
|
||||
class ScenarioOpexRepository:
|
||||
"""Persistence operations for scenario-level opex snapshots."""
|
||||
|
||||
def __init__(self, session: Session) -> None:
|
||||
self.session = session
|
||||
|
||||
def create(
|
||||
self, snapshot: ScenarioOpexSnapshot
|
||||
) -> ScenarioOpexSnapshot:
|
||||
self.session.add(snapshot)
|
||||
self.session.flush()
|
||||
return snapshot
|
||||
|
||||
def list_for_scenario(
|
||||
self,
|
||||
scenario_id: int,
|
||||
*,
|
||||
limit: int | None = None,
|
||||
) -> Sequence[ScenarioOpexSnapshot]:
|
||||
stmt = (
|
||||
select(ScenarioOpexSnapshot)
|
||||
.where(ScenarioOpexSnapshot.scenario_id == scenario_id)
|
||||
.order_by(ScenarioOpexSnapshot.calculated_at.desc())
|
||||
)
|
||||
if limit is not None:
|
||||
stmt = stmt.limit(limit)
|
||||
return self.session.execute(stmt).scalars().all()
|
||||
|
||||
def latest_for_scenario(
|
||||
self,
|
||||
scenario_id: int,
|
||||
) -> ScenarioOpexSnapshot | None:
|
||||
stmt = (
|
||||
select(ScenarioOpexSnapshot)
|
||||
.where(ScenarioOpexSnapshot.scenario_id == scenario_id)
|
||||
.order_by(ScenarioOpexSnapshot.calculated_at.desc())
|
||||
.limit(1)
|
||||
)
|
||||
return self.session.execute(stmt).scalar_one_or_none()
|
||||
|
||||
def delete(self, snapshot_id: int) -> None:
|
||||
stmt = select(ScenarioOpexSnapshot).where(
|
||||
ScenarioOpexSnapshot.id == snapshot_id
|
||||
)
|
||||
entity = self.session.execute(stmt).scalar_one_or_none()
|
||||
if entity is None:
|
||||
raise EntityNotFoundError(
|
||||
f"Scenario opex snapshot {snapshot_id} not found"
|
||||
)
|
||||
self.session.delete(entity)
|
||||
|
||||
|
||||
class FinancialInputRepository:
|
||||
"""Persistence operations for FinancialInput entities."""
|
||||
|
||||
@@ -258,6 +803,101 @@ class SimulationParameterRepository:
|
||||
self.session.delete(entity)
|
||||
|
||||
|
||||
class PricingSettingsRepository:
|
||||
"""Persistence operations for pricing configuration entities."""
|
||||
|
||||
def __init__(self, session: Session) -> None:
|
||||
self.session = session
|
||||
|
||||
def list(self, *, include_children: bool = False) -> Sequence[PricingSettings]:
|
||||
stmt = select(PricingSettings).order_by(PricingSettings.created_at)
|
||||
if include_children:
|
||||
stmt = stmt.options(
|
||||
selectinload(PricingSettings.metal_overrides),
|
||||
selectinload(PricingSettings.impurity_overrides),
|
||||
)
|
||||
result = self.session.execute(stmt)
|
||||
if include_children:
|
||||
result = result.unique()
|
||||
return result.scalars().all()
|
||||
|
||||
def get(self, settings_id: int, *, include_children: bool = False) -> PricingSettings:
|
||||
stmt = select(PricingSettings).where(PricingSettings.id == settings_id)
|
||||
if include_children:
|
||||
stmt = stmt.options(
|
||||
selectinload(PricingSettings.metal_overrides),
|
||||
selectinload(PricingSettings.impurity_overrides),
|
||||
)
|
||||
result = self.session.execute(stmt)
|
||||
if include_children:
|
||||
result = result.unique()
|
||||
settings = result.scalar_one_or_none()
|
||||
if settings is None:
|
||||
raise EntityNotFoundError(
|
||||
f"Pricing settings {settings_id} not found")
|
||||
return settings
|
||||
|
||||
def find_by_slug(
|
||||
self,
|
||||
slug: str,
|
||||
*,
|
||||
include_children: bool = False,
|
||||
) -> PricingSettings | None:
|
||||
normalised = slug.strip().lower()
|
||||
stmt = select(PricingSettings).where(
|
||||
PricingSettings.slug == normalised)
|
||||
if include_children:
|
||||
stmt = stmt.options(
|
||||
selectinload(PricingSettings.metal_overrides),
|
||||
selectinload(PricingSettings.impurity_overrides),
|
||||
)
|
||||
result = self.session.execute(stmt)
|
||||
if include_children:
|
||||
result = result.unique()
|
||||
return result.scalar_one_or_none()
|
||||
|
||||
def get_by_slug(self, slug: str, *, include_children: bool = False) -> PricingSettings:
|
||||
settings = self.find_by_slug(slug, include_children=include_children)
|
||||
if settings is None:
|
||||
raise EntityNotFoundError(
|
||||
f"Pricing settings slug '{slug}' not found"
|
||||
)
|
||||
return settings
|
||||
|
||||
def create(self, settings: PricingSettings) -> PricingSettings:
|
||||
self.session.add(settings)
|
||||
try:
|
||||
self.session.flush()
|
||||
except IntegrityError as exc: # pragma: no cover - relies on DB constraints
|
||||
raise EntityConflictError(
|
||||
"Pricing settings violates constraints") from exc
|
||||
return settings
|
||||
|
||||
def delete(self, settings_id: int) -> None:
|
||||
settings = self.get(settings_id, include_children=True)
|
||||
self.session.delete(settings)
|
||||
|
||||
def attach_metal_override(
|
||||
self,
|
||||
settings: PricingSettings,
|
||||
override: PricingMetalSettings,
|
||||
) -> PricingMetalSettings:
|
||||
settings.metal_overrides.append(override)
|
||||
self.session.add(override)
|
||||
self.session.flush()
|
||||
return override
|
||||
|
||||
def attach_impurity_override(
|
||||
self,
|
||||
settings: PricingSettings,
|
||||
override: PricingImpuritySettings,
|
||||
) -> PricingImpuritySettings:
|
||||
settings.impurity_overrides.append(override)
|
||||
self.session.add(override)
|
||||
self.session.flush()
|
||||
return override
|
||||
|
||||
|
||||
class RoleRepository:
|
||||
"""Persistence operations for Role entities."""
|
||||
|
||||
@@ -389,6 +1029,159 @@ class UserRepository:
|
||||
self.session.flush()
|
||||
|
||||
|
||||
DEFAULT_PRICING_SETTINGS_NAME = "Default Pricing Settings"
|
||||
DEFAULT_PRICING_SETTINGS_DESCRIPTION = (
|
||||
"Default pricing configuration generated from environment metadata."
|
||||
)
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class PricingSettingsSeedResult:
|
||||
settings: PricingSettings
|
||||
created: bool
|
||||
updated_fields: int
|
||||
impurity_upserts: int
|
||||
|
||||
|
||||
def ensure_default_pricing_settings(
|
||||
repo: PricingSettingsRepository,
|
||||
*,
|
||||
metadata: PricingMetadata,
|
||||
slug: str = "default",
|
||||
name: str | None = None,
|
||||
description: str | None = None,
|
||||
) -> PricingSettingsSeedResult:
|
||||
"""Ensure a baseline pricing settings record exists and matches metadata defaults."""
|
||||
|
||||
normalised_slug = (slug or "default").strip().lower() or "default"
|
||||
target_name = name or DEFAULT_PRICING_SETTINGS_NAME
|
||||
target_description = description or DEFAULT_PRICING_SETTINGS_DESCRIPTION
|
||||
|
||||
updated_fields = 0
|
||||
impurity_upserts = 0
|
||||
|
||||
try:
|
||||
settings = repo.get_by_slug(normalised_slug, include_children=True)
|
||||
created = False
|
||||
except EntityNotFoundError:
|
||||
settings = PricingSettings(
|
||||
name=target_name,
|
||||
slug=normalised_slug,
|
||||
description=target_description,
|
||||
default_currency=metadata.default_currency,
|
||||
default_payable_pct=metadata.default_payable_pct,
|
||||
moisture_threshold_pct=metadata.moisture_threshold_pct,
|
||||
moisture_penalty_per_pct=metadata.moisture_penalty_per_pct,
|
||||
)
|
||||
settings.metadata_payload = None
|
||||
settings = repo.create(settings)
|
||||
created = True
|
||||
else:
|
||||
if settings.name != target_name:
|
||||
settings.name = target_name
|
||||
updated_fields += 1
|
||||
if target_description and settings.description != target_description:
|
||||
settings.description = target_description
|
||||
updated_fields += 1
|
||||
if settings.default_currency != metadata.default_currency:
|
||||
settings.default_currency = metadata.default_currency
|
||||
updated_fields += 1
|
||||
if float(settings.default_payable_pct) != float(metadata.default_payable_pct):
|
||||
settings.default_payable_pct = metadata.default_payable_pct
|
||||
updated_fields += 1
|
||||
if float(settings.moisture_threshold_pct) != float(metadata.moisture_threshold_pct):
|
||||
settings.moisture_threshold_pct = metadata.moisture_threshold_pct
|
||||
updated_fields += 1
|
||||
if float(settings.moisture_penalty_per_pct) != float(metadata.moisture_penalty_per_pct):
|
||||
settings.moisture_penalty_per_pct = metadata.moisture_penalty_per_pct
|
||||
updated_fields += 1
|
||||
|
||||
impurity_thresholds = {
|
||||
code.strip().upper(): float(value)
|
||||
for code, value in (metadata.impurity_thresholds or {}).items()
|
||||
if code.strip()
|
||||
}
|
||||
impurity_penalties = {
|
||||
code.strip().upper(): float(value)
|
||||
for code, value in (metadata.impurity_penalty_per_ppm or {}).items()
|
||||
if code.strip()
|
||||
}
|
||||
|
||||
if impurity_thresholds or impurity_penalties:
|
||||
existing_map = {
|
||||
override.impurity_code: override
|
||||
for override in settings.impurity_overrides
|
||||
}
|
||||
target_codes = set(impurity_thresholds) | set(impurity_penalties)
|
||||
for code in sorted(target_codes):
|
||||
threshold_value = impurity_thresholds.get(code, 0.0)
|
||||
penalty_value = impurity_penalties.get(code, 0.0)
|
||||
existing = existing_map.get(code)
|
||||
if existing is None:
|
||||
repo.attach_impurity_override(
|
||||
settings,
|
||||
PricingImpuritySettings(
|
||||
impurity_code=code,
|
||||
threshold_ppm=threshold_value,
|
||||
penalty_per_ppm=penalty_value,
|
||||
),
|
||||
)
|
||||
impurity_upserts += 1
|
||||
continue
|
||||
changed = False
|
||||
if float(existing.threshold_ppm) != float(threshold_value):
|
||||
existing.threshold_ppm = threshold_value
|
||||
changed = True
|
||||
if float(existing.penalty_per_ppm) != float(penalty_value):
|
||||
existing.penalty_per_ppm = penalty_value
|
||||
changed = True
|
||||
if changed:
|
||||
updated_fields += 1
|
||||
|
||||
if updated_fields > 0 or impurity_upserts > 0:
|
||||
repo.session.flush()
|
||||
|
||||
return PricingSettingsSeedResult(
|
||||
settings=settings,
|
||||
created=created,
|
||||
updated_fields=updated_fields,
|
||||
impurity_upserts=impurity_upserts,
|
||||
)
|
||||
|
||||
|
||||
def pricing_settings_to_metadata(settings: PricingSettings) -> PricingMetadata:
|
||||
"""Convert a persisted pricing settings record into metadata defaults."""
|
||||
|
||||
payload = settings.metadata_payload or {}
|
||||
payload_thresholds = payload.get("impurity_thresholds") or {}
|
||||
payload_penalties = payload.get("impurity_penalty_per_ppm") or {}
|
||||
|
||||
thresholds: dict[str, float] = {
|
||||
code.strip().upper(): float(value)
|
||||
for code, value in payload_thresholds.items()
|
||||
if isinstance(code, str) and code.strip()
|
||||
}
|
||||
penalties: dict[str, float] = {
|
||||
code.strip().upper(): float(value)
|
||||
for code, value in payload_penalties.items()
|
||||
if isinstance(code, str) and code.strip()
|
||||
}
|
||||
|
||||
for override in settings.impurity_overrides:
|
||||
code = override.impurity_code.strip().upper()
|
||||
thresholds[code] = float(override.threshold_ppm)
|
||||
penalties[code] = float(override.penalty_per_ppm)
|
||||
|
||||
return PricingMetadata(
|
||||
default_payable_pct=float(settings.default_payable_pct),
|
||||
default_currency=settings.default_currency,
|
||||
moisture_threshold_pct=float(settings.moisture_threshold_pct),
|
||||
moisture_penalty_per_pct=float(settings.moisture_penalty_per_pct),
|
||||
impurity_thresholds=thresholds,
|
||||
impurity_penalty_per_ppm=penalties,
|
||||
)
|
||||
|
||||
|
||||
DEFAULT_ROLE_DEFINITIONS: tuple[dict[str, str], ...] = (
|
||||
{
|
||||
"name": "admin",
|
||||
|
||||
54
services/scenario_evaluation.py
Normal file
54
services/scenario_evaluation.py
Normal file
@@ -0,0 +1,54 @@
|
||||
"""Scenario evaluation services including pricing integration."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Iterable
|
||||
|
||||
from models.scenario import Scenario
|
||||
from services.pricing import (
|
||||
PricingInput,
|
||||
PricingMetadata,
|
||||
PricingResult,
|
||||
calculate_pricing,
|
||||
)
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class ScenarioPricingConfig:
|
||||
"""Configuration for pricing evaluation within a scenario."""
|
||||
|
||||
metadata: PricingMetadata | None = None
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class ScenarioPricingSnapshot:
|
||||
"""Captured pricing results for a scenario."""
|
||||
|
||||
scenario_id: int
|
||||
results: list[PricingResult]
|
||||
|
||||
|
||||
class ScenarioPricingEvaluator:
|
||||
"""Evaluate scenario profitability inputs using pricing services."""
|
||||
|
||||
def __init__(self, config: ScenarioPricingConfig | None = None) -> None:
|
||||
self._config = config or ScenarioPricingConfig()
|
||||
|
||||
def evaluate(
|
||||
self,
|
||||
scenario: Scenario,
|
||||
*,
|
||||
inputs: Iterable[PricingInput],
|
||||
metadata_override: PricingMetadata | None = None,
|
||||
) -> ScenarioPricingSnapshot:
|
||||
metadata = metadata_override or self._config.metadata
|
||||
results: list[PricingResult] = []
|
||||
for pricing_input in inputs:
|
||||
result = calculate_pricing(
|
||||
pricing_input,
|
||||
metadata=metadata,
|
||||
currency=scenario.currency,
|
||||
)
|
||||
results.append(result)
|
||||
return ScenarioPricingSnapshot(scenario_id=scenario.id, results=results)
|
||||
@@ -2,6 +2,7 @@ from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from hmac import compare_digest
|
||||
from typing import Any, Dict, Iterable, Literal, Type
|
||||
|
||||
from jose import ExpiredSignatureError, JWTError, jwt
|
||||
@@ -176,6 +177,14 @@ def _decode_token(
|
||||
except JWTError as exc: # pragma: no cover - jose error bubble
|
||||
raise TokenDecodeError("Unable to decode token") from exc
|
||||
|
||||
expected_token = jwt.encode(
|
||||
decoded,
|
||||
settings.secret_key,
|
||||
algorithm=settings.algorithm,
|
||||
)
|
||||
if not compare_digest(token, expected_token):
|
||||
raise TokenDecodeError("Token contents have been altered.")
|
||||
|
||||
try:
|
||||
payload = _model_validate(TokenPayload, decoded)
|
||||
except ValidationError as exc:
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Literal, Optional, TYPE_CHECKING
|
||||
from typing import Iterable, Literal, Optional, TYPE_CHECKING
|
||||
|
||||
from fastapi import Request, Response
|
||||
|
||||
@@ -67,6 +67,7 @@ class AuthSession:
|
||||
tokens: SessionTokens
|
||||
user: Optional["User"] = None
|
||||
scopes: tuple[str, ...] = ()
|
||||
role_slugs: tuple[str, ...] = ()
|
||||
issued_access_token: Optional[str] = None
|
||||
issued_refresh_token: Optional[str] = None
|
||||
clear_cookies: bool = False
|
||||
@@ -77,7 +78,10 @@ class AuthSession:
|
||||
|
||||
@classmethod
|
||||
def anonymous(cls) -> "AuthSession":
|
||||
return cls(tokens=SessionTokens(access_token=None, refresh_token=None))
|
||||
return cls(
|
||||
tokens=SessionTokens(access_token=None, refresh_token=None),
|
||||
role_slugs=(),
|
||||
)
|
||||
|
||||
def issue_tokens(
|
||||
self,
|
||||
@@ -100,6 +104,10 @@ class AuthSession:
|
||||
self.tokens = SessionTokens(access_token=None, refresh_token=None)
|
||||
self.user = None
|
||||
self.scopes = ()
|
||||
self.role_slugs = ()
|
||||
|
||||
def set_role_slugs(self, roles: Iterable[str]) -> None:
|
||||
self.role_slugs = tuple(dict.fromkeys(role.strip().lower() for role in roles if role))
|
||||
|
||||
|
||||
def extract_session_tokens(request: Request, strategy: SessionStrategy) -> SessionTokens:
|
||||
|
||||
373
services/simulation.py
Normal file
373
services/simulation.py
Normal file
@@ -0,0 +1,373 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, Mapping, Sequence
|
||||
import time
|
||||
|
||||
import numpy as np
|
||||
from numpy.random import Generator, default_rng
|
||||
|
||||
from .financial import (
|
||||
CashFlow,
|
||||
ConvergenceError,
|
||||
PaybackNotReachedError,
|
||||
internal_rate_of_return,
|
||||
net_present_value,
|
||||
payback_period,
|
||||
)
|
||||
from monitoring.metrics import observe_simulation
|
||||
|
||||
|
||||
class DistributionConfigError(ValueError):
|
||||
"""Raised when a distribution specification is invalid."""
|
||||
|
||||
|
||||
class SimulationMetric(Enum):
|
||||
"""Supported Monte Carlo summary metrics."""
|
||||
|
||||
NPV = "npv"
|
||||
IRR = "irr"
|
||||
PAYBACK = "payback"
|
||||
|
||||
|
||||
class DistributionType(Enum):
|
||||
"""Supported probability distribution families."""
|
||||
|
||||
NORMAL = "normal"
|
||||
LOGNORMAL = "lognormal"
|
||||
TRIANGULAR = "triangular"
|
||||
DISCRETE = "discrete"
|
||||
|
||||
|
||||
class DistributionSource(Enum):
|
||||
"""Origins for parameter values when sourcing dynamically."""
|
||||
|
||||
STATIC = "static"
|
||||
SCENARIO_FIELD = "scenario_field"
|
||||
METADATA_KEY = "metadata_key"
|
||||
|
||||
|
||||
@dataclass(frozen=True, slots=True)
|
||||
class DistributionSpec:
|
||||
"""Defines the stochastic behaviour for a single cash flow."""
|
||||
|
||||
type: DistributionType
|
||||
parameters: Mapping[str, Any]
|
||||
source: DistributionSource = DistributionSource.STATIC
|
||||
source_key: str | None = None
|
||||
|
||||
|
||||
@dataclass(frozen=True, slots=True)
|
||||
class CashFlowSpec:
|
||||
"""Pairs a baseline cash flow with an optional distribution."""
|
||||
|
||||
cash_flow: CashFlow
|
||||
distribution: DistributionSpec | None = None
|
||||
|
||||
|
||||
@dataclass(frozen=True, slots=True)
|
||||
class SimulationConfig:
|
||||
"""Controls Monte Carlo simulation behaviour."""
|
||||
|
||||
iterations: int
|
||||
discount_rate: float
|
||||
seed: int | None = None
|
||||
metrics: Sequence[SimulationMetric] = (
|
||||
SimulationMetric.NPV, SimulationMetric.IRR, SimulationMetric.PAYBACK)
|
||||
percentiles: Sequence[float] = (5.0, 50.0, 95.0)
|
||||
compounds_per_year: int = 1
|
||||
return_samples: bool = False
|
||||
residual_value: float | None = None
|
||||
residual_periods: float | None = None
|
||||
|
||||
|
||||
@dataclass(frozen=True, slots=True)
|
||||
class MetricSummary:
|
||||
"""Aggregated statistics for a simulated metric."""
|
||||
|
||||
mean: float
|
||||
std_dev: float
|
||||
minimum: float
|
||||
maximum: float
|
||||
percentiles: Mapping[float, float]
|
||||
sample_size: int
|
||||
failed_runs: int
|
||||
|
||||
|
||||
@dataclass(frozen=True, slots=True)
|
||||
class SimulationResult:
|
||||
"""Monte Carlo output including per-metric summaries."""
|
||||
|
||||
iterations: int
|
||||
summaries: Mapping[SimulationMetric, MetricSummary]
|
||||
samples: Mapping[SimulationMetric, np.ndarray] | None = None
|
||||
|
||||
|
||||
def run_monte_carlo(
|
||||
cash_flows: Sequence[CashFlowSpec],
|
||||
config: SimulationConfig,
|
||||
*,
|
||||
scenario_context: Mapping[str, Any] | None = None,
|
||||
metadata: Mapping[str, Any] | None = None,
|
||||
rng: Generator | None = None,
|
||||
) -> SimulationResult:
|
||||
"""Execute Monte Carlo simulation for the provided cash flows."""
|
||||
|
||||
if config.iterations <= 0:
|
||||
raise ValueError("iterations must be greater than zero")
|
||||
if config.compounds_per_year <= 0:
|
||||
raise ValueError("compounds_per_year must be greater than zero")
|
||||
for pct in config.percentiles:
|
||||
if pct < 0.0 or pct > 100.0:
|
||||
raise ValueError("percentiles must be within [0, 100]")
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
generator = rng or default_rng(config.seed)
|
||||
|
||||
metric_arrays: Dict[SimulationMetric, np.ndarray] = {
|
||||
metric: np.empty(config.iterations, dtype=float)
|
||||
for metric in config.metrics
|
||||
}
|
||||
|
||||
for idx in range(config.iterations):
|
||||
iteration_flows = [
|
||||
_realise_cash_flow(
|
||||
spec,
|
||||
generator,
|
||||
scenario_context=scenario_context,
|
||||
metadata=metadata,
|
||||
)
|
||||
for spec in cash_flows
|
||||
]
|
||||
|
||||
if SimulationMetric.NPV in metric_arrays:
|
||||
metric_arrays[SimulationMetric.NPV][idx] = net_present_value(
|
||||
config.discount_rate,
|
||||
iteration_flows,
|
||||
residual_value=config.residual_value,
|
||||
residual_periods=config.residual_periods,
|
||||
compounds_per_year=config.compounds_per_year,
|
||||
)
|
||||
if SimulationMetric.IRR in metric_arrays:
|
||||
try:
|
||||
metric_arrays[SimulationMetric.IRR][idx] = internal_rate_of_return(
|
||||
iteration_flows,
|
||||
compounds_per_year=config.compounds_per_year,
|
||||
)
|
||||
except (ValueError, ConvergenceError):
|
||||
metric_arrays[SimulationMetric.IRR][idx] = np.nan
|
||||
if SimulationMetric.PAYBACK in metric_arrays:
|
||||
try:
|
||||
metric_arrays[SimulationMetric.PAYBACK][idx] = payback_period(
|
||||
iteration_flows,
|
||||
compounds_per_year=config.compounds_per_year,
|
||||
)
|
||||
except (ValueError, PaybackNotReachedError):
|
||||
metric_arrays[SimulationMetric.PAYBACK][idx] = np.nan
|
||||
|
||||
summaries = {
|
||||
metric: _summarise(metric_arrays[metric], config.percentiles)
|
||||
for metric in metric_arrays
|
||||
}
|
||||
|
||||
samples = metric_arrays if config.return_samples else None
|
||||
result = SimulationResult(
|
||||
iterations=config.iterations,
|
||||
summaries=summaries,
|
||||
samples=samples,
|
||||
)
|
||||
|
||||
# Record successful simulation
|
||||
duration = time.time() - start_time
|
||||
observe_simulation(
|
||||
status="success",
|
||||
duration_seconds=duration,
|
||||
)
|
||||
return result
|
||||
|
||||
except Exception:
|
||||
# Record failed simulation
|
||||
duration = time.time() - start_time
|
||||
observe_simulation(
|
||||
status="error",
|
||||
duration_seconds=duration,
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
def _realise_cash_flow(
|
||||
spec: CashFlowSpec,
|
||||
generator: Generator,
|
||||
*,
|
||||
scenario_context: Mapping[str, Any] | None,
|
||||
metadata: Mapping[str, Any] | None,
|
||||
) -> CashFlow:
|
||||
if spec.distribution is None:
|
||||
return spec.cash_flow
|
||||
|
||||
distribution = spec.distribution
|
||||
base_amount = spec.cash_flow.amount
|
||||
params = _resolve_parameters(
|
||||
distribution,
|
||||
base_amount,
|
||||
scenario_context=scenario_context,
|
||||
metadata=metadata,
|
||||
)
|
||||
sample = _sample_distribution(
|
||||
distribution.type,
|
||||
params,
|
||||
generator,
|
||||
)
|
||||
return CashFlow(
|
||||
amount=float(sample),
|
||||
period_index=spec.cash_flow.period_index,
|
||||
date=spec.cash_flow.date,
|
||||
)
|
||||
|
||||
|
||||
def _resolve_parameters(
|
||||
distribution: DistributionSpec,
|
||||
base_amount: float,
|
||||
*,
|
||||
scenario_context: Mapping[str, Any] | None,
|
||||
metadata: Mapping[str, Any] | None,
|
||||
) -> Dict[str, Any]:
|
||||
params = dict(distribution.parameters)
|
||||
|
||||
if distribution.source == DistributionSource.SCENARIO_FIELD:
|
||||
if distribution.source_key is None:
|
||||
raise DistributionConfigError(
|
||||
"source_key is required for scenario_field sourcing")
|
||||
if not scenario_context or distribution.source_key not in scenario_context:
|
||||
raise DistributionConfigError(
|
||||
f"scenario field '{distribution.source_key}' not found for distribution"
|
||||
)
|
||||
params.setdefault("mean", float(
|
||||
scenario_context[distribution.source_key]))
|
||||
elif distribution.source == DistributionSource.METADATA_KEY:
|
||||
if distribution.source_key is None:
|
||||
raise DistributionConfigError(
|
||||
"source_key is required for metadata_key sourcing")
|
||||
if not metadata or distribution.source_key not in metadata:
|
||||
raise DistributionConfigError(
|
||||
f"metadata key '{distribution.source_key}' not found for distribution"
|
||||
)
|
||||
params.setdefault("mean", float(metadata[distribution.source_key]))
|
||||
else:
|
||||
params.setdefault("mean", float(base_amount))
|
||||
|
||||
return params
|
||||
|
||||
|
||||
def _sample_distribution(
|
||||
distribution_type: DistributionType,
|
||||
params: Mapping[str, Any],
|
||||
generator: Generator,
|
||||
) -> float:
|
||||
if distribution_type is DistributionType.NORMAL:
|
||||
return _sample_normal(params, generator)
|
||||
if distribution_type is DistributionType.LOGNORMAL:
|
||||
return _sample_lognormal(params, generator)
|
||||
if distribution_type is DistributionType.TRIANGULAR:
|
||||
return _sample_triangular(params, generator)
|
||||
if distribution_type is DistributionType.DISCRETE:
|
||||
return _sample_discrete(params, generator)
|
||||
raise DistributionConfigError(
|
||||
f"Unsupported distribution type: {distribution_type}")
|
||||
|
||||
|
||||
def _sample_normal(params: Mapping[str, Any], generator: Generator) -> float:
|
||||
if "std_dev" not in params:
|
||||
raise DistributionConfigError("normal distribution requires 'std_dev'")
|
||||
std_dev = float(params["std_dev"])
|
||||
if std_dev < 0:
|
||||
raise DistributionConfigError("std_dev must be non-negative")
|
||||
mean = float(params.get("mean", 0.0))
|
||||
if std_dev == 0:
|
||||
return mean
|
||||
return float(generator.normal(loc=mean, scale=std_dev))
|
||||
|
||||
|
||||
def _sample_lognormal(params: Mapping[str, Any], generator: Generator) -> float:
|
||||
if "sigma" not in params:
|
||||
raise DistributionConfigError(
|
||||
"lognormal distribution requires 'sigma'")
|
||||
sigma = float(params["sigma"])
|
||||
if sigma < 0:
|
||||
raise DistributionConfigError("sigma must be non-negative")
|
||||
if "mean" not in params:
|
||||
raise DistributionConfigError(
|
||||
"lognormal distribution requires 'mean' (mu in log space)")
|
||||
mean = float(params["mean"])
|
||||
return float(generator.lognormal(mean=mean, sigma=sigma))
|
||||
|
||||
|
||||
def _sample_triangular(params: Mapping[str, Any], generator: Generator) -> float:
|
||||
required = {"min", "mode", "max"}
|
||||
if not required.issubset(params):
|
||||
missing = ", ".join(sorted(required - params.keys()))
|
||||
raise DistributionConfigError(
|
||||
f"triangular distribution missing parameters: {missing}")
|
||||
left = float(params["min"])
|
||||
mode = float(params["mode"])
|
||||
right = float(params["max"])
|
||||
if not (left <= mode <= right):
|
||||
raise DistributionConfigError(
|
||||
"triangular distribution requires min <= mode <= max")
|
||||
if left == right:
|
||||
return mode
|
||||
return float(generator.triangular(left=left, mode=mode, right=right))
|
||||
|
||||
|
||||
def _sample_discrete(params: Mapping[str, Any], generator: Generator) -> float:
|
||||
values = params.get("values")
|
||||
probabilities = params.get("probabilities")
|
||||
if not isinstance(values, Sequence) or not isinstance(probabilities, Sequence):
|
||||
raise DistributionConfigError(
|
||||
"discrete distribution requires 'values' and 'probabilities' sequences")
|
||||
if len(values) != len(probabilities) or not values:
|
||||
raise DistributionConfigError(
|
||||
"values and probabilities must be non-empty and of equal length")
|
||||
probs = np.array(probabilities, dtype=float)
|
||||
if np.any(probs < 0):
|
||||
raise DistributionConfigError("probabilities must be non-negative")
|
||||
total = probs.sum()
|
||||
if not np.isclose(total, 1.0):
|
||||
raise DistributionConfigError("probabilities must sum to 1.0")
|
||||
probs = probs / total
|
||||
choices = np.array(values, dtype=float)
|
||||
return float(generator.choice(choices, p=probs))
|
||||
|
||||
|
||||
def _summarise(values: np.ndarray, percentiles: Sequence[float]) -> MetricSummary:
|
||||
clean = values[~np.isnan(values)]
|
||||
sample_size = clean.size
|
||||
failed_runs = values.size - sample_size
|
||||
|
||||
if sample_size == 0:
|
||||
percentile_map: Dict[float, float] = {
|
||||
pct: float("nan") for pct in percentiles}
|
||||
return MetricSummary(
|
||||
mean=float("nan"),
|
||||
std_dev=float("nan"),
|
||||
minimum=float("nan"),
|
||||
maximum=float("nan"),
|
||||
percentiles=percentile_map,
|
||||
sample_size=0,
|
||||
failed_runs=failed_runs,
|
||||
)
|
||||
|
||||
percentile_map = {
|
||||
pct: float(np.percentile(clean, pct)) for pct in percentiles
|
||||
}
|
||||
return MetricSummary(
|
||||
mean=float(np.mean(clean)),
|
||||
std_dev=float(np.std(clean, ddof=1)) if sample_size > 1 else 0.0,
|
||||
minimum=float(np.min(clean)),
|
||||
maximum=float(np.max(clean)),
|
||||
percentiles=percentile_map,
|
||||
sample_size=sample_size,
|
||||
failed_runs=failed_runs,
|
||||
)
|
||||
@@ -6,16 +6,28 @@ from typing import Callable, Sequence
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from config.database import SessionLocal
|
||||
from models import Role, Scenario
|
||||
from models import PricingSettings, Project, Role, Scenario
|
||||
from services.pricing import PricingMetadata
|
||||
from services.repositories import (
|
||||
FinancialInputRepository,
|
||||
PricingSettingsRepository,
|
||||
PricingSettingsSeedResult,
|
||||
ProjectRepository,
|
||||
ProjectProfitabilityRepository,
|
||||
ProjectOpexRepository,
|
||||
ProjectCapexRepository,
|
||||
RoleRepository,
|
||||
ScenarioRepository,
|
||||
ScenarioProfitabilityRepository,
|
||||
ScenarioOpexRepository,
|
||||
ScenarioCapexRepository,
|
||||
SimulationParameterRepository,
|
||||
UserRepository,
|
||||
ensure_admin_user as ensure_admin_user_record,
|
||||
ensure_default_pricing_settings,
|
||||
ensure_default_roles,
|
||||
pricing_settings_to_metadata,
|
||||
NavigationRepository,
|
||||
)
|
||||
from services.scenario_validation import ScenarioComparisonValidator
|
||||
|
||||
@@ -31,8 +43,16 @@ class UnitOfWork(AbstractContextManager["UnitOfWork"]):
|
||||
self.scenarios: ScenarioRepository | None = None
|
||||
self.financial_inputs: FinancialInputRepository | None = None
|
||||
self.simulation_parameters: SimulationParameterRepository | None = None
|
||||
self.project_profitability: ProjectProfitabilityRepository | None = None
|
||||
self.project_capex: ProjectCapexRepository | None = None
|
||||
self.project_opex: ProjectOpexRepository | None = None
|
||||
self.scenario_profitability: ScenarioProfitabilityRepository | None = None
|
||||
self.scenario_capex: ScenarioCapexRepository | None = None
|
||||
self.scenario_opex: ScenarioOpexRepository | None = None
|
||||
self.users: UserRepository | None = None
|
||||
self.roles: RoleRepository | None = None
|
||||
self.pricing_settings: PricingSettingsRepository | None = None
|
||||
self.navigation: NavigationRepository | None = None
|
||||
|
||||
def __enter__(self) -> "UnitOfWork":
|
||||
self.session = self._session_factory()
|
||||
@@ -41,8 +61,21 @@ class UnitOfWork(AbstractContextManager["UnitOfWork"]):
|
||||
self.financial_inputs = FinancialInputRepository(self.session)
|
||||
self.simulation_parameters = SimulationParameterRepository(
|
||||
self.session)
|
||||
self.project_profitability = ProjectProfitabilityRepository(
|
||||
self.session)
|
||||
self.project_capex = ProjectCapexRepository(self.session)
|
||||
self.project_opex = ProjectOpexRepository(
|
||||
self.session)
|
||||
self.scenario_profitability = ScenarioProfitabilityRepository(
|
||||
self.session
|
||||
)
|
||||
self.scenario_capex = ScenarioCapexRepository(self.session)
|
||||
self.scenario_opex = ScenarioOpexRepository(
|
||||
self.session)
|
||||
self.users = UserRepository(self.session)
|
||||
self.roles = RoleRepository(self.session)
|
||||
self.pricing_settings = PricingSettingsRepository(self.session)
|
||||
self.navigation = NavigationRepository(self.session)
|
||||
self._scenario_validator = ScenarioComparisonValidator()
|
||||
return self
|
||||
|
||||
@@ -58,8 +91,16 @@ class UnitOfWork(AbstractContextManager["UnitOfWork"]):
|
||||
self.scenarios = None
|
||||
self.financial_inputs = None
|
||||
self.simulation_parameters = None
|
||||
self.project_profitability = None
|
||||
self.project_capex = None
|
||||
self.project_opex = None
|
||||
self.scenario_profitability = None
|
||||
self.scenario_capex = None
|
||||
self.scenario_opex = None
|
||||
self.users = None
|
||||
self.roles = None
|
||||
self.pricing_settings = None
|
||||
self.navigation = None
|
||||
|
||||
def flush(self) -> None:
|
||||
if not self.session:
|
||||
@@ -116,3 +157,45 @@ class UnitOfWork(AbstractContextManager["UnitOfWork"]):
|
||||
username=username,
|
||||
password=password,
|
||||
)
|
||||
|
||||
def ensure_default_pricing_settings(
|
||||
self,
|
||||
*,
|
||||
metadata: PricingMetadata,
|
||||
slug: str = "default",
|
||||
name: str | None = None,
|
||||
description: str | None = None,
|
||||
) -> PricingSettingsSeedResult:
|
||||
if not self.pricing_settings:
|
||||
raise RuntimeError("UnitOfWork session is not initialised")
|
||||
return ensure_default_pricing_settings(
|
||||
self.pricing_settings,
|
||||
metadata=metadata,
|
||||
slug=slug,
|
||||
name=name,
|
||||
description=description,
|
||||
)
|
||||
|
||||
def get_pricing_metadata(
|
||||
self,
|
||||
*,
|
||||
slug: str = "default",
|
||||
) -> PricingMetadata | None:
|
||||
if not self.pricing_settings:
|
||||
raise RuntimeError("UnitOfWork session is not initialised")
|
||||
settings = self.pricing_settings.find_by_slug(
|
||||
slug,
|
||||
include_children=True,
|
||||
)
|
||||
if settings is None:
|
||||
return None
|
||||
return pricing_settings_to_metadata(settings)
|
||||
|
||||
def set_project_pricing_settings(
|
||||
self,
|
||||
project: Project,
|
||||
pricing_settings: PricingSettings | None,
|
||||
) -> Project:
|
||||
if not self.projects:
|
||||
raise RuntimeError("UnitOfWork session is not initialised")
|
||||
return self.projects.set_pricing_settings(project, pricing_settings)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user