Compare commits
78 Commits
feat/app-s
...
c6a0eb2588
| Author | SHA1 | Date | |
|---|---|---|---|
| c6a0eb2588 | |||
| d807a50f77 | |||
| 22ddfb671d | |||
| 971b4a19ea | |||
| 5b1278cbea | |||
| b6511e5273 | |||
| bcb15bd0e4 | |||
| 42f8714d71 | |||
| 1881ebe24f | |||
| d90aae3d0a | |||
| 9934d1483d | |||
| df1c971354 | |||
| 3a8aef04b0 | |||
| 45d746d80a | |||
| f1bc7f06b9 | |||
| 82e98efb1b | |||
| f91349dedd | |||
| efee50fdc7 | |||
| e254d50c0c | |||
| 6eef8424b7 | |||
| c1f4902cf4 | |||
| 52450bc487 | |||
| c3449f1986 | |||
| f863808940 | |||
| 37646b571a | |||
| 22f43bed56 | |||
| 72cf06a31d | |||
| b796a053d6 | |||
| 04d7f202b6 | |||
| 1f58de448c | |||
| 807204869f | |||
| ddb23b1da0 | |||
| 26e231d63f | |||
| d98d6ebe83 | |||
| e881be52b5 | |||
| cc8efa3eab | |||
| 29a17595da | |||
| a0431cb630 | |||
| f1afcaa78b | |||
| 36da0609ed | |||
| 26843104ee | |||
| eb509e3dd2 | |||
| 51aa2fa71d | |||
| e1689c3a31 | |||
| 99d9ea7770 | |||
| 2136dbdd44 | |||
| 3da8a50ac4 | |||
| a772960390 | |||
| 89a4f663b5 | |||
| 50446c4248 | |||
| c5a9a7c96f | |||
| 723f6a62b8 | |||
| dcb08ab1b8 | |||
| a6a5f630cc | |||
| b56045ca6a | |||
| 2f07e6fb75 | |||
| 1f8a595243 | |||
| 54137b88d7 | |||
| 7385bdad3e | |||
| 7d0c8bfc53 | |||
| a861efeabf | |||
| 2f5306b793 | |||
| 573e255769 | |||
| 8bb5456864 | |||
| b1d50a56e0 | |||
| e37488bcf6 | |||
| ee0a7a5bf5 | |||
| ef4fb7dcf0 | |||
| 7f4cd33b65 | |||
| 41156a87d1 | |||
| 3fc6a2a9d3 | |||
| f3da80885f | |||
| 97b1c0360b | |||
| e8a86b15e4 | |||
| 300ecebe23 | |||
| 70db34d088 | |||
| 0550928a2f | |||
| ec56099e2a |
@@ -10,6 +10,8 @@ venv/
|
||||
.vscode
|
||||
.git
|
||||
.gitignore
|
||||
.gitea
|
||||
.github
|
||||
.DS_Store
|
||||
dist
|
||||
build
|
||||
@@ -17,5 +19,9 @@ build
|
||||
*.sqlite3
|
||||
.env
|
||||
.env.*
|
||||
.Dockerfile
|
||||
.dockerignore
|
||||
coverage/
|
||||
logs/
|
||||
backups/
|
||||
tests/e2e/artifacts/
|
||||
scripts/__pycache__/
|
||||
reports/
|
||||
|
||||
@@ -1,59 +0,0 @@
|
||||
name: Build and Push Docker Image
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
DEFAULT_BRANCH: main
|
||||
REGISTRY_ORG: allucanget
|
||||
REGISTRY_IMAGE_NAME: calminer
|
||||
REGISTRY_URL: ${{ secrets.REGISTRY_URL }}
|
||||
REGISTRY_USERNAME: ${{ secrets.REGISTRY_USERNAME }}
|
||||
REGISTRY_PASSWORD: ${{ secrets.REGISTRY_PASSWORD }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Collect workflow metadata
|
||||
id: meta
|
||||
shell: bash
|
||||
run: |
|
||||
ref_name="${GITHUB_REF_NAME:-${GITHUB_REF##*/}}"
|
||||
event_name="${GITHUB_EVENT_NAME:-}"
|
||||
sha="${GITHUB_SHA:-}"
|
||||
|
||||
if [ "$ref_name" = "${DEFAULT_BRANCH:-main}" ]; then
|
||||
echo "on_default=true" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "on_default=false" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
echo "ref_name=$ref_name" >> "$GITHUB_OUTPUT"
|
||||
echo "event_name=$event_name" >> "$GITHUB_OUTPUT"
|
||||
echo "sha=$sha" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Set up QEMU and Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to Gitea registry
|
||||
if: ${{ steps.meta.outputs.on_default == 'true' }}
|
||||
uses: docker/login-action@v3
|
||||
continue-on-error: true
|
||||
with:
|
||||
registry: ${{ env.REGISTRY_URL }}
|
||||
username: ${{ env.REGISTRY_USERNAME }}
|
||||
password: ${{ env.REGISTRY_PASSWORD }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile
|
||||
push: ${{ steps.meta.outputs.on_default == 'true' && steps.meta.outputs.event_name != 'pull_request' && (env.REGISTRY_URL != '' && env.REGISTRY_USERNAME != '' && env.REGISTRY_PASSWORD != '') }}
|
||||
tags: |
|
||||
${{ env.REGISTRY_URL }}/${{ env.REGISTRY_ORG }}/${{ env.REGISTRY_IMAGE_NAME }}:latest
|
||||
${{ env.REGISTRY_URL }}/${{ env.REGISTRY_ORG }}/${{ env.REGISTRY_IMAGE_NAME }}:${{ steps.meta.outputs.sha }}
|
||||
141
.gitea/workflows/cicache.yml
Normal file
141
.gitea/workflows/cicache.yml
Normal file
@@ -0,0 +1,141 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, develop]
|
||||
pull_request:
|
||||
branches: [main, develop]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
env:
|
||||
APT_CACHER_NG: http://192.168.88.14:3142
|
||||
DB_DRIVER: postgresql+psycopg2
|
||||
DB_HOST: 192.168.88.35
|
||||
DB_NAME: calminer_test
|
||||
DB_USER: calminer
|
||||
DB_PASSWORD: calminer_password
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:17
|
||||
env:
|
||||
POSTGRES_USER: ${{ env.DB_USER }}
|
||||
POSTGRES_PASSWORD: ${{ env.DB_PASSWORD }}
|
||||
POSTGRES_DB: ${{ env.DB_NAME }}
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Get pip cache dir
|
||||
id: pip-cache
|
||||
run: |
|
||||
echo "path=$(pip cache dir)" >> $GITEA_OUTPUT
|
||||
echo "Pip cache dir: $(pip cache dir)"
|
||||
|
||||
- name: Cache pip dependencies
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ steps.pip-cache.outputs.path }}
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt', 'requirements-test.txt') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pip-
|
||||
|
||||
- name: Update apt-cacher-ng config
|
||||
run: |-
|
||||
echo 'Acquire::http::Proxy "{{ env.APT_CACHER_NG }}";' | tee /etc/apt/apt.conf.d/01apt-cacher-ng
|
||||
apt-get update
|
||||
|
||||
- name: Update system packages
|
||||
run: apt-get upgrade -y
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
pip install -r requirements-test.txt
|
||||
|
||||
- name: Install Playwright system dependencies
|
||||
run: playwright install-deps
|
||||
|
||||
- name: Install Playwright browsers
|
||||
run: playwright install
|
||||
|
||||
- name: Run tests
|
||||
env:
|
||||
DATABASE_DRIVER: ${{ env.DB_DRIVER }}
|
||||
DATABASE_HOST: postgres
|
||||
DATABASE_PORT: 5432
|
||||
DATABASE_USER: ${{ env.DB_USER }}
|
||||
DATABASE_PASSWORD: ${{ env.DB_PASSWORD }}
|
||||
DATABASE_NAME: ${{ env.DB_NAME }}
|
||||
run: |
|
||||
pytest tests/ --cov=.
|
||||
|
||||
- name: Build Docker image
|
||||
run: |
|
||||
docker build -t calminer .
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
needs: test
|
||||
env:
|
||||
DEFAULT_BRANCH: main
|
||||
REGISTRY_URL: ${{ secrets.REGISTRY_URL }}
|
||||
REGISTRY_USERNAME: ${{ secrets.REGISTRY_USERNAME }}
|
||||
REGISTRY_PASSWORD: ${{ secrets.REGISTRY_PASSWORD }}
|
||||
REGISTRY_CONTAINER_NAME: calminer
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Collect workflow metadata
|
||||
id: meta
|
||||
shell: bash
|
||||
run: |
|
||||
ref_name="${GITHUB_REF_NAME:-${GITHUB_REF##*/}}"
|
||||
event_name="${GITHUB_EVENT_NAME:-}"
|
||||
sha="${GITHUB_SHA:-}"
|
||||
|
||||
if [ "$ref_name" = "${DEFAULT_BRANCH:-main}" ]; then
|
||||
echo "on_default=true" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "on_default=false" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
echo "ref_name=$ref_name" >> "$GITHUB_OUTPUT"
|
||||
echo "event_name=$event_name" >> "$GITHUB_OUTPUT"
|
||||
echo "sha=$sha" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Set up QEMU and Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to gitea registry
|
||||
if: ${{ steps.meta.outputs.on_default == 'true' }}
|
||||
uses: docker/login-action@v3
|
||||
continue-on-error: true
|
||||
with:
|
||||
registry: ${{ env.REGISTRY_URL }}
|
||||
username: ${{ env.REGISTRY_USERNAME }}
|
||||
password: ${{ env.REGISTRY_PASSWORD }}
|
||||
|
||||
- name: Build and push image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile
|
||||
push: ${{ steps.meta.outputs.on_default == 'true' && steps.meta.outputs.event_name != 'pull_request' && (env.REGISTRY_URL != '' && env.REGISTRY_USERNAME != '' && env.REGISTRY_PASSWORD != '') }}
|
||||
tags: |
|
||||
${{ env.REGISTRY_URL }}/allucanget/${{ env.REGISTRY_CONTAINER_NAME }}:latest
|
||||
${{ env.REGISTRY_URL }}/allucanget/${{ env.REGISTRY_CONTAINER_NAME }}:${{ steps.meta.outputs.sha }}
|
||||
@@ -1,36 +0,0 @@
|
||||
name: Deploy to Server
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
DEFAULT_BRANCH: main
|
||||
REGISTRY_ORG: allucanget
|
||||
REGISTRY_IMAGE_NAME: calminer
|
||||
REGISTRY_URL: ${{ secrets.REGISTRY_URL }}
|
||||
REGISTRY_USERNAME: ${{ secrets.REGISTRY_USERNAME }}
|
||||
REGISTRY_PASSWORD: ${{ secrets.REGISTRY_PASSWORD }}
|
||||
steps:
|
||||
- name: SSH and deploy
|
||||
uses: appleboy/ssh-action@master
|
||||
with:
|
||||
host: ${{ secrets.SSH_HOST }}
|
||||
username: ${{ secrets.SSH_USERNAME }}
|
||||
key: ${{ secrets.SSH_PRIVATE_KEY }}
|
||||
script: |
|
||||
docker pull ${{ env.REGISTRY_URL }}/${{ env.REGISTRY_ORG }}/${{ env.REGISTRY_IMAGE_NAME }}:latest
|
||||
docker stop calminer || true
|
||||
docker rm calminer || true
|
||||
docker run -d --name calminer -p 8000:8000 \
|
||||
-e DATABASE_DRIVER=${{ secrets.DATABASE_DRIVER }} \
|
||||
-e DATABASE_HOST=${{ secrets.DATABASE_HOST }} \
|
||||
-e DATABASE_PORT=${{ secrets.DATABASE_PORT }} \
|
||||
-e DATABASE_USER=${{ secrets.DATABASE_USER }} \
|
||||
-e DATABASE_PASSWORD=${{ secrets.DATABASE_PASSWORD }} \
|
||||
-e DATABASE_NAME=${{ secrets.DATABASE_NAME }} \
|
||||
-e DATABASE_SCHEMA=${{ secrets.DATABASE_SCHEMA }} \
|
||||
${{ secrets.REGISTRY_URL }}/${{ secrets.REGISTRY_USERNAME }}/calminer:latest
|
||||
@@ -1,125 +0,0 @@
|
||||
name: Run Tests
|
||||
on: [push]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:16-alpine
|
||||
env:
|
||||
POSTGRES_DB: calminer_ci
|
||||
POSTGRES_USER: calminer
|
||||
POSTGRES_PASSWORD: secret
|
||||
ports:
|
||||
- 5432:5432
|
||||
options: >-
|
||||
--health-cmd "pg_isready -U calminer -d calminer_ci"
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 10
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.10"
|
||||
- name: Configure apt proxy
|
||||
run: |
|
||||
set -euo pipefail
|
||||
PROXY_HOST="http://apt-cacher:3142"
|
||||
if ! curl -fsS --connect-timeout 3 "${PROXY_HOST}" >/dev/null; then
|
||||
PROXY_HOST="http://192.168.88.14:3142"
|
||||
fi
|
||||
echo "Using APT proxy ${PROXY_HOST}"
|
||||
echo "http_proxy=${PROXY_HOST}" >> "$GITHUB_ENV"
|
||||
echo "https_proxy=${PROXY_HOST}" >> "$GITHUB_ENV"
|
||||
echo "HTTP_PROXY=${PROXY_HOST}" >> "$GITHUB_ENV"
|
||||
echo "HTTPS_PROXY=${PROXY_HOST}" >> "$GITHUB_ENV"
|
||||
sudo tee /etc/apt/apt.conf.d/01proxy >/dev/null <<EOF
|
||||
Acquire::http::Proxy "${PROXY_HOST}";
|
||||
Acquire::https::Proxy "${PROXY_HOST}";
|
||||
EOF
|
||||
# - name: Cache pip
|
||||
# uses: actions/cache@v4
|
||||
# with:
|
||||
# path: ~/.cache/pip
|
||||
# key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt', 'requirements-test.txt') }}
|
||||
# restore-keys: |
|
||||
# ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }}
|
||||
# ${{ runner.os }}-pip-
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
pip install -r requirements.txt
|
||||
pip install -r requirements-test.txt
|
||||
- name: Install Playwright browsers
|
||||
run: |
|
||||
python -m playwright install --with-deps
|
||||
- name: Wait for database service
|
||||
env:
|
||||
DATABASE_DRIVER: postgresql
|
||||
DATABASE_HOST: postgres
|
||||
DATABASE_PORT: "5432"
|
||||
DATABASE_NAME: calminer_ci
|
||||
DATABASE_USER: calminer
|
||||
DATABASE_PASSWORD: secret
|
||||
DATABASE_SCHEMA: public
|
||||
DATABASE_SUPERUSER: calminer
|
||||
DATABASE_SUPERUSER_PASSWORD: secret
|
||||
DATABASE_SUPERUSER_DB: calminer_ci
|
||||
run: |
|
||||
python - <<'PY'
|
||||
import os
|
||||
import time
|
||||
|
||||
import psycopg2
|
||||
|
||||
dsn = (
|
||||
f"dbname={os.environ['DATABASE_SUPERUSER_DB']} "
|
||||
f"user={os.environ['DATABASE_SUPERUSER']} "
|
||||
f"password={os.environ['DATABASE_SUPERUSER_PASSWORD']} "
|
||||
f"host={os.environ['DATABASE_HOST']} "
|
||||
f"port={os.environ['DATABASE_PORT']}"
|
||||
)
|
||||
|
||||
for attempt in range(30):
|
||||
try:
|
||||
with psycopg2.connect(dsn):
|
||||
break
|
||||
except psycopg2.OperationalError:
|
||||
time.sleep(2)
|
||||
else:
|
||||
raise SystemExit("Postgres service did not become available")
|
||||
PY
|
||||
- name: Run database setup (dry run)
|
||||
env:
|
||||
DATABASE_DRIVER: postgresql
|
||||
DATABASE_HOST: postgres
|
||||
DATABASE_PORT: "5432"
|
||||
DATABASE_NAME: calminer_ci
|
||||
DATABASE_USER: calminer
|
||||
DATABASE_PASSWORD: secret
|
||||
DATABASE_SCHEMA: public
|
||||
DATABASE_SUPERUSER: calminer
|
||||
DATABASE_SUPERUSER_PASSWORD: secret
|
||||
DATABASE_SUPERUSER_DB: calminer_ci
|
||||
run: python scripts/setup_database.py --ensure-database --ensure-role --ensure-schema --initialize-schema --run-migrations --seed-data --dry-run -v
|
||||
- name: Run database setup
|
||||
env:
|
||||
DATABASE_DRIVER: postgresql
|
||||
DATABASE_HOST: postgres
|
||||
DATABASE_PORT: "5432"
|
||||
DATABASE_NAME: calminer_ci
|
||||
DATABASE_USER: calminer
|
||||
DATABASE_PASSWORD: secret
|
||||
DATABASE_SCHEMA: public
|
||||
DATABASE_SUPERUSER: calminer
|
||||
DATABASE_SUPERUSER_PASSWORD: secret
|
||||
DATABASE_SUPERUSER_DB: calminer_ci
|
||||
run: python scripts/setup_database.py --ensure-database --ensure-role --ensure-schema --initialize-schema --run-migrations --seed-data -v
|
||||
- name: Run tests
|
||||
env:
|
||||
DATABASE_URL: postgresql+psycopg2://calminer:secret@postgres:5432/calminer_ci
|
||||
DATABASE_SCHEMA: public
|
||||
run: pytest
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -38,6 +38,9 @@ htmlcov/
|
||||
# Mypy cache
|
||||
.mypy_cache/
|
||||
|
||||
# Linting cache
|
||||
.ruff_cache/
|
||||
|
||||
# Logs
|
||||
*.log
|
||||
logs/
|
||||
@@ -45,3 +48,6 @@ logs/
|
||||
# SQLite database
|
||||
*.sqlite3
|
||||
test*.db
|
||||
|
||||
# Act runner files
|
||||
.runner
|
||||
|
||||
128
Dockerfile
128
Dockerfile
@@ -1,35 +1,111 @@
|
||||
# Multi-stage Dockerfile to keep final image small
|
||||
FROM python:3.10-slim AS builder
|
||||
# syntax=docker/dockerfile:1.7
|
||||
|
||||
# Install build-time packages and Python dependencies in one layer
|
||||
WORKDIR /app
|
||||
COPY requirements.txt /app/requirements.txt
|
||||
RUN echo 'Acquire::http::Proxy "http://192.168.88.14:3142";' > /etc/apt/apt.conf.d/90proxy
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends build-essential gcc libpq-dev \
|
||||
&& python -m pip install --upgrade pip \
|
||||
&& pip install --no-cache-dir --prefix=/install -r /app/requirements.txt \
|
||||
&& apt-get purge -y --auto-remove build-essential gcc \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
ARG PYTHON_VERSION=3.11-slim
|
||||
ARG APT_CACHE_URL=http://192.168.88.14:3142
|
||||
|
||||
FROM python:${PYTHON_VERSION} AS builder
|
||||
ARG APT_CACHE_URL
|
||||
|
||||
ENV \
|
||||
PIP_DISABLE_PIP_VERSION_CHECK=1 \
|
||||
PIP_NO_CACHE_DIR=1 \
|
||||
PYTHONDONTWRITEBYTECODE=1 \
|
||||
PYTHONUNBUFFERED=1
|
||||
|
||||
FROM python:3.10-slim
|
||||
WORKDIR /app
|
||||
|
||||
# Copy installed packages from builder
|
||||
COPY --from=builder /install /usr/local
|
||||
COPY requirements.txt ./requirements.txt
|
||||
|
||||
# Assume environment variables for DB config will be set at runtime
|
||||
# ENV DATABASE_HOST=your_db_host
|
||||
# ENV DATABASE_PORT=your_db_port
|
||||
# ENV DATABASE_NAME=your_db_name
|
||||
# ENV DATABASE_USER=your_db_user
|
||||
# ENV DATABASE_PASSWORD=your_db_password
|
||||
RUN --mount=type=cache,target=/root/.cache/pip /bin/bash <<'EOF'
|
||||
set -e
|
||||
|
||||
python3 <<'PY'
|
||||
import os, socket, urllib.parse
|
||||
|
||||
url = os.environ.get('APT_CACHE_URL', '').strip()
|
||||
if url:
|
||||
parsed = urllib.parse.urlparse(url)
|
||||
host = parsed.hostname
|
||||
port = parsed.port or (80 if parsed.scheme == 'http' else 443)
|
||||
if host:
|
||||
sock = socket.socket()
|
||||
sock.settimeout(1)
|
||||
try:
|
||||
sock.connect((host, port))
|
||||
except OSError:
|
||||
pass
|
||||
else:
|
||||
with open('/etc/apt/apt.conf.d/01proxy', 'w', encoding='utf-8') as fh:
|
||||
fh.write(f"Acquire::http::Proxy \"{url}\";\n")
|
||||
fh.write(f"Acquire::https::Proxy \"{url}\";\n")
|
||||
finally:
|
||||
sock.close()
|
||||
PY
|
||||
apt-get update
|
||||
apt-get install -y --no-install-recommends build-essential gcc libpq-dev
|
||||
pip install --upgrade pip
|
||||
pip wheel --no-deps --wheel-dir /wheels -r requirements.txt
|
||||
apt-get purge -y --auto-remove build-essential gcc
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
EOF
|
||||
|
||||
FROM python:${PYTHON_VERSION} AS runtime
|
||||
ARG APT_CACHE_URL
|
||||
|
||||
ENV \
|
||||
PIP_DISABLE_PIP_VERSION_CHECK=1 \
|
||||
PIP_NO_CACHE_DIR=1 \
|
||||
PYTHONDONTWRITEBYTECODE=1 \
|
||||
PYTHONUNBUFFERED=1 \
|
||||
PATH="/home/appuser/.local/bin:${PATH}"
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
RUN groupadd --system app && useradd --system --create-home --gid app appuser
|
||||
|
||||
RUN /bin/bash <<'EOF'
|
||||
set -e
|
||||
|
||||
python3 <<'PY'
|
||||
import os, socket, urllib.parse
|
||||
|
||||
url = os.environ.get('APT_CACHE_URL', '').strip()
|
||||
if url:
|
||||
parsed = urllib.parse.urlparse(url)
|
||||
host = parsed.hostname
|
||||
port = parsed.port or (80 if parsed.scheme == 'http' else 443)
|
||||
if host:
|
||||
sock = socket.socket()
|
||||
sock.settimeout(1)
|
||||
try:
|
||||
sock.connect((host, port))
|
||||
except OSError:
|
||||
pass
|
||||
else:
|
||||
with open('/etc/apt/apt.conf.d/01proxy', 'w', encoding='utf-8') as fh:
|
||||
fh.write(f"Acquire::http::Proxy \"{url}\";\n")
|
||||
fh.write(f"Acquire::https::Proxy \"{url}\";\n")
|
||||
finally:
|
||||
sock.close()
|
||||
PY
|
||||
apt-get update
|
||||
apt-get install -y --no-install-recommends libpq5
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
EOF
|
||||
|
||||
COPY --from=builder /wheels /wheels
|
||||
COPY --from=builder /app/requirements.txt /tmp/requirements.txt
|
||||
|
||||
RUN pip install --upgrade pip \
|
||||
&& pip install --no-cache-dir --find-links=/wheels -r /tmp/requirements.txt \
|
||||
&& rm -rf /wheels /tmp/requirements.txt
|
||||
|
||||
# Copy application code
|
||||
COPY . /app
|
||||
|
||||
# Expose service port
|
||||
EXPOSE 8000
|
||||
RUN chown -R appuser:app /app
|
||||
|
||||
# Run the FastAPI app with uvicorn
|
||||
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
|
||||
USER appuser
|
||||
|
||||
EXPOSE 8003
|
||||
|
||||
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8003", "--workers", "4"]
|
||||
|
||||
82
README.md
82
README.md
@@ -6,86 +6,6 @@ Focuses on ore mining operations and covering parameters such as capital and ope
|
||||
|
||||
The system is designed to help mining companies make informed decisions by simulating various scenarios and analyzing potential outcomes based on stochastic variables.
|
||||
|
||||
A range of features are implemented to support these functionalities.
|
||||
|
||||
## Features
|
||||
|
||||
- **Scenario Management**: Manage multiple mining scenarios with independent parameter sets and outputs.
|
||||
- **Process Parameters**: Define and persist process inputs via FastAPI endpoints and template-driven forms.
|
||||
- **Cost Tracking**: Capture capital (`capex`) and operational (`opex`) expenditures per scenario.
|
||||
- **Consumption Tracking**: Record resource consumption (chemicals, fuel, water, scrap) tied to scenarios.
|
||||
- **Production Output**: Store production metrics such as tonnage, recovery, and revenue drivers.
|
||||
- **Equipment Management**: Register scenario-specific equipment inventories.
|
||||
- **Maintenance Logging**: Log maintenance events against equipment with dates and costs.
|
||||
- **Reporting Dashboard**: Surface aggregated statistics for simulation outputs with an interactive Chart.js dashboard.
|
||||
- **Unified UI Shell**: Server-rendered templates extend a shared base layout with a persistent left sidebar linking scenarios, parameters, costs, consumption, production, equipment, maintenance, simulations, and reporting views.
|
||||
- **Operations Overview Dashboard**: The root route (`/`) surfaces cross-scenario KPIs, charts, and maintenance reminders with a one-click refresh backed by aggregated loaders.
|
||||
- **Theming Tokens**: Shared CSS variables in `static/css/main.css` centralize the UI color palette for consistent styling and rapid theme tweaks.
|
||||
- **Settings Center**: The Settings landing page exposes visual theme controls and links to currency administration, backed by persisted application settings and environment overrides.
|
||||
- **Modular Frontend Scripts**: Page-specific interactions in `static/js/` modules, keeping templates lean while enabling browser caching and reuse.
|
||||
- **Monte Carlo Simulation (in progress)**: Services and routes are scaffolded for future stochastic analysis.
|
||||
|
||||
## Documentation & quickstart
|
||||
|
||||
This repository contains detailed developer and architecture documentation in the `docs/` folder.
|
||||
|
||||
### Settings overview
|
||||
|
||||
The Settings page (`/ui/settings`) lets administrators adjust global theme colors stored in the `application_setting` table. Changes are instantly applied across the UI. Environment variables prefixed with `CALMINER_THEME_` (for example, `CALMINER_THEME_COLOR_PRIMARY`) automatically override individual CSS variables and render as read-only in the form, ensuring deployment-time overrides take precedence while remaining visible to operators.
|
||||
|
||||
[Quickstart](docs/quickstart.md) contains developer quickstart, migrations, testing and current status.
|
||||
|
||||
Key architecture documents: see [architecture](docs/architecture/README.md) for the arc42-based architecture documentation.
|
||||
|
||||
For contributors: the `routes/`, `models/` and `services/` folders contain the primary application code. Tests and E2E specs are in `tests/`.
|
||||
|
||||
## Run with Docker
|
||||
|
||||
The repository ships with a multi-stage `Dockerfile` that produces a slim runtime image.
|
||||
|
||||
### Build container
|
||||
|
||||
```powershell
|
||||
# Build the image locally
|
||||
docker build -t calminer:latest .
|
||||
```
|
||||
|
||||
### Push to registry
|
||||
|
||||
```powershell
|
||||
# Tag and push the image to your registry
|
||||
docker login your-registry.com -u your-username -p your-password
|
||||
docker tag calminer:latest your-registry.com/your-namespace/calminer:latest
|
||||
docker push your-registry.com/your-namespace/calminer:latest
|
||||
```
|
||||
|
||||
### Run container
|
||||
|
||||
Expose FastAPI on <http://localhost:8000> with database configuration via granular environment variables:
|
||||
|
||||
```powershell
|
||||
# Provide database configuration via granular environment variables
|
||||
docker run --rm -p 8000:8000 ^
|
||||
-e DATABASE_DRIVER="postgresql" ^
|
||||
-e DATABASE_HOST="db.host" ^
|
||||
-e DATABASE_PORT="5432" ^
|
||||
-e DATABASE_USER="calminer" ^
|
||||
-e DATABASE_PASSWORD="s3cret" ^
|
||||
-e DATABASE_NAME="calminer" ^
|
||||
-e DATABASE_SCHEMA="public" ^
|
||||
calminer:latest
|
||||
```
|
||||
|
||||
### Orchestrated Deployment
|
||||
|
||||
Use `docker compose` or an orchestrator of your choice to co-locate PostgreSQL/Redis alongside the app when needed. The image expects migrations to be applied before startup.
|
||||
|
||||
## CI/CD expectations
|
||||
|
||||
CalMiner uses Gitea Actions workflows stored in `.gitea/workflows/`:
|
||||
|
||||
- `test.yml` runs style/unit/e2e suites on every push with cached Python dependencies.
|
||||
- `build-and-push.yml` builds the Docker image, reuses cached layers, and pushes to the configured registry.
|
||||
- `deploy.yml` pulls the pushed image on the target host and restarts the container.
|
||||
|
||||
Pipelines assume the following secrets are provisioned in the Gitea instance: `REGISTRY_USERNAME`, `REGISTRY_PASSWORD`, `REGISTRY_URL`, `SSH_HOST`, `SSH_USERNAME`, and `SSH_PRIVATE_KEY`.
|
||||
This repository contains only code. See detailed developer and architecture documentation in the [Docs](https://git.allucanget.biz/allucanget/calminer-docs) repository.
|
||||
|
||||
@@ -56,3 +56,11 @@ DATABASE_URL = _build_database_url()
|
||||
engine = create_engine(DATABASE_URL, echo=True, future=True)
|
||||
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
|
||||
Base = declarative_base()
|
||||
|
||||
|
||||
def get_db():
|
||||
db = SessionLocal()
|
||||
try:
|
||||
yield db
|
||||
finally:
|
||||
db.close()
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
# Sample environment configuration for staging deployment
|
||||
DATABASE_HOST=staging-db.internal
|
||||
DATABASE_PORT=5432
|
||||
DATABASE_NAME=calminer_staging
|
||||
DATABASE_USER=calminer_app
|
||||
DATABASE_PASSWORD=<app-password>
|
||||
|
||||
# Admin connection used for provisioning database and roles
|
||||
DATABASE_SUPERUSER=postgres
|
||||
DATABASE_SUPERUSER_PASSWORD=<admin-password>
|
||||
DATABASE_SUPERUSER_DB=postgres
|
||||
@@ -1,14 +0,0 @@
|
||||
# Sample environment configuration for running scripts/setup_database.py against a test instance
|
||||
DATABASE_DRIVER=postgresql
|
||||
DATABASE_HOST=postgres
|
||||
DATABASE_PORT=5432
|
||||
DATABASE_NAME=calminer_test
|
||||
DATABASE_USER=calminer_test
|
||||
DATABASE_PASSWORD=<test-password>
|
||||
# optional: specify schema if different from 'public'
|
||||
#DATABASE_SCHEMA=public
|
||||
|
||||
# Admin connection used for provisioning database and roles
|
||||
DATABASE_SUPERUSER=postgres
|
||||
DATABASE_SUPERUSER_PASSWORD=<superuser-password>
|
||||
DATABASE_SUPERUSER_DB=postgres
|
||||
@@ -1,23 +0,0 @@
|
||||
version: "3.9"
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:16-alpine
|
||||
container_name: calminer_postgres_local
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
POSTGRES_DB: calminer_local
|
||||
POSTGRES_USER: calminer
|
||||
POSTGRES_PASSWORD: secret
|
||||
ports:
|
||||
- "5433:5432"
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U calminer -d calminer_local"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
volumes:
|
||||
- postgres_data:/var/lib/postgresql/data
|
||||
|
||||
volumes:
|
||||
postgres_data:
|
||||
36
docker-compose.yml
Normal file
36
docker-compose.yml
Normal file
@@ -0,0 +1,36 @@
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
app:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
ports:
|
||||
- "8003:8003"
|
||||
environment:
|
||||
- DATABASE_HOST=postgres
|
||||
- DATABASE_PORT=5432
|
||||
- DATABASE_USER=calminer
|
||||
- DATABASE_PASSWORD=calminer_password
|
||||
- DATABASE_NAME=calminer_db
|
||||
- DATABASE_DRIVER=postgresql
|
||||
depends_on:
|
||||
- postgres
|
||||
volumes:
|
||||
- ./logs:/app/logs
|
||||
restart: unless-stopped
|
||||
|
||||
postgres:
|
||||
image: postgres:17
|
||||
environment:
|
||||
- POSTGRES_USER=calminer
|
||||
- POSTGRES_PASSWORD=calminer_password
|
||||
- POSTGRES_DB=calminer_db
|
||||
ports:
|
||||
- "5432:5432"
|
||||
volumes:
|
||||
- postgres_data:/var/lib/postgresql/data
|
||||
restart: unless-stopped
|
||||
|
||||
volumes:
|
||||
postgres_data:
|
||||
@@ -1,62 +0,0 @@
|
||||
---
|
||||
title: "01 — Introduction and Goals"
|
||||
description: "System purpose, stakeholders, and high-level goals; project introduction and business/technical goals."
|
||||
status: draft
|
||||
---
|
||||
|
||||
# 01 — Introduction and Goals
|
||||
|
||||
## Purpose
|
||||
|
||||
CalMiner aims to provide a comprehensive platform for mining project scenario analysis, enabling stakeholders to make informed decisions based on data-driven insights.
|
||||
|
||||
## Stakeholders
|
||||
|
||||
- **Project Managers**: Require tools for scenario planning and risk assessment.
|
||||
- **Data Analysts**: Need access to historical data and simulation results for analysis.
|
||||
- **Executives**: Seek high-level insights and reporting for strategic decision-making.
|
||||
|
||||
## High-Level Goals
|
||||
|
||||
1. **Comprehensive Scenario Analysis**: Enable users to create and analyze multiple project scenarios to assess risks and opportunities.
|
||||
2. **Data-Driven Decision Making**: Provide stakeholders with the insights needed to make informed decisions based on simulation results.
|
||||
3. **User-Friendly Interface**: Ensure the platform is accessible and easy to use for all stakeholders, regardless of technical expertise.
|
||||
|
||||
## System Overview
|
||||
|
||||
FastAPI application that collects mining project inputs, persists scenario-specific records, and surfaces aggregated insights. The platform targets Monte Carlo driven planning, with deterministic CRUD features in place and simulation logic staged for future work.
|
||||
|
||||
Frontend components are server-rendered Jinja2 templates, with Chart.js powering the dashboard visualization. The backend leverages SQLAlchemy for ORM mapping to a PostgreSQL database.
|
||||
|
||||
### Runtime Flow
|
||||
|
||||
1. Users navigate to form templates or API clients to manage scenarios, parameters, and operational data.
|
||||
2. FastAPI routers validate payloads with Pydantic models, then delegate to SQLAlchemy sessions for persistence.
|
||||
3. Simulation runs (placeholder `services/simulation.py`) will consume stored parameters to emit iteration results via `/api/simulations/run`.
|
||||
4. Reporting requests POST simulation outputs to `/api/reporting/summary`; the reporting service calculates aggregates (count, min/max, mean, median, percentiles, standard deviation, variance, and tail-risk metrics at the 95% confidence level).
|
||||
5. `templates/Dashboard.html` fetches summaries, renders metric cards, and plots distribution charts with Chart.js for stakeholder review.
|
||||
|
||||
### Current implementation status (summary)
|
||||
|
||||
- Currency normalization, simulation scaffold, and reporting service exist; see [quickstart](../quickstart.md) for full status and migration instructions.
|
||||
|
||||
## MVP Features (migrated)
|
||||
|
||||
The following MVP features and priorities were defined during initial planning.
|
||||
|
||||
### Prioritized Features
|
||||
|
||||
1. **Scenario Creation and Management** (High Priority): Allow users to create, edit, and delete scenarios. Rationale: Core functionality for what-if analysis.
|
||||
1. **Parameter Input and Validation** (High Priority): Input process parameters with validation. Rationale: Ensures data integrity for simulations.
|
||||
1. **Monte Carlo Simulation Run** (High Priority): Execute simulations and store results. Rationale: Key differentiator for risk analysis.
|
||||
1. **Basic Reporting** (Medium Priority): Display NPV, IRR, EBITDA from simulation results. Rationale: Essential for decision-making.
|
||||
1. **Cost Tracking Dashboard** (Medium Priority): Visualize CAPEX and OPEX. Rationale: Helps monitor expenses.
|
||||
1. **Consumption Monitoring** (Low Priority): Track resource consumption. Rationale: Useful for optimization.
|
||||
1. **User Authentication** (Medium Priority): Basic login/logout. Rationale: Security for multi-user access.
|
||||
1. **Export Results** (Low Priority): Export simulation data to CSV/PDF. Rationale: For external analysis.
|
||||
|
||||
### Rationale for Prioritization
|
||||
|
||||
- High: Core simulation and scenario features first.
|
||||
- Medium: Reporting and auth for usability.
|
||||
- Low: Nice-to-haves after basics.
|
||||
@@ -1,175 +0,0 @@
|
||||
---
|
||||
title: "02 — Architecture Constraints"
|
||||
description: "Document imposed constraints: technical, organizational, regulatory, and environmental constraints that affect architecture decisions."
|
||||
status: skeleton
|
||||
---
|
||||
|
||||
# 02 — Architecture Constraints
|
||||
|
||||
## Technical Constraints
|
||||
|
||||
> e.g., choice of FastAPI, PostgreSQL, SQLAlchemy, Chart.js, Jinja2 templates.
|
||||
|
||||
The architecture of CalMiner is influenced by several technical constraints that shape its design and implementation:
|
||||
|
||||
1. **Framework Selection**: The choice of FastAPI as the web framework imposes constraints on how the application handles requests, routing, and middleware. FastAPI's asynchronous capabilities must be leveraged appropriately to ensure optimal performance.
|
||||
2. **Database Technology**: The use of PostgreSQL as the primary database system dictates the data modeling, querying capabilities, and transaction management strategies. SQLAlchemy ORM is used for database interactions, which requires adherence to its conventions and limitations.
|
||||
3. **Frontend Technologies**: The decision to use Jinja2 for server-side templating and Chart.js for data visualization influences the structure of the frontend code and the way dynamic content is rendered.
|
||||
4. **Simulation Logic**: The Monte Carlo simulation logic must be designed to efficiently handle large datasets and perform computations within the constraints of the chosen programming language (Python) and its libraries.
|
||||
|
||||
## Organizational Constraints
|
||||
|
||||
> e.g., team skillsets, development workflows, CI/CD pipelines.
|
||||
|
||||
Restrictions arising from organizational factors include:
|
||||
|
||||
1. **Team Expertise**: The development team’s familiarity with FastAPI, SQLAlchemy, and frontend technologies like Jinja2 and Chart.js influences the architecture choices to ensure maintainability and ease of development.
|
||||
2. **Development Processes**: The adoption of Agile methodologies and CI/CD pipelines (using Gitea Actions) shapes the architecture to support continuous integration, automated testing, and deployment practices.
|
||||
3. **Collaboration Tools**: The use of specific collaboration and version control tools (e.g., Gitea) affects how code is managed, reviewed, and integrated, impacting the overall architecture and development workflow.
|
||||
4. **Documentation Standards**: The requirement for comprehensive documentation (as seen in the `docs/` folder) necessitates an architecture that is well-structured and easy to understand for both current and future team members.
|
||||
5. **Knowledge Sharing**: The need for effective knowledge sharing and onboarding processes influences the architecture to ensure that it is accessible and understandable for new team members.
|
||||
6. **Resource Availability**: The availability of hardware, software, and human resources within the organization can impose constraints on the architecture, affecting decisions related to scalability, performance, and feature implementation.
|
||||
|
||||
## Regulatory Constraints
|
||||
|
||||
> e.g., data privacy laws, industry standards.
|
||||
|
||||
Regulatory constraints that impact the architecture of CalMiner include:
|
||||
|
||||
1. **Data Privacy Compliance**: The architecture must ensure compliance with data privacy regulations such as GDPR or CCPA, which may dictate how user data is collected, stored, and processed.
|
||||
2. **Industry Standards**: Adherence to industry-specific standards and best practices may influence the design of data models, security measures, and reporting functionalities.
|
||||
3. **Auditability**: The system may need to incorporate logging and auditing features to meet regulatory requirements, affecting the architecture of data storage and access controls.
|
||||
4. **Data Retention Policies**: Regulatory requirements regarding data retention and deletion may impose constraints on how long certain types of data can be stored, influencing database design and data lifecycle management.
|
||||
5. **Security Standards**: Compliance with security standards (e.g., ISO/IEC 27001) may necessitate the implementation of specific security measures, such as encryption, access controls, and vulnerability management, which impact the overall architecture.
|
||||
|
||||
## Environmental Constraints
|
||||
|
||||
> e.g., deployment environments, cloud provider limitations.
|
||||
|
||||
Environmental constraints affecting the architecture include:
|
||||
|
||||
1. **Deployment Environments**: The architecture must accommodate various deployment environments (development, testing, production) with differing configurations and resource allocations.
|
||||
2. **Cloud Provider Limitations**: If deployed on a specific cloud provider, the architecture may need to align with the provider's services, limitations, and best practices, such as using managed databases or specific container orchestration tools.
|
||||
3. **Containerization**: The use of Docker for containerization imposes constraints on how the application is packaged, deployed, and scaled, influencing the architecture to ensure compatibility with container orchestration platforms.
|
||||
4. **Scalability Requirements**: The architecture must be designed to scale efficiently based on anticipated load and usage patterns, considering the limitations of the chosen infrastructure.
|
||||
|
||||
## Performance Constraints
|
||||
|
||||
> e.g., response time requirements, scalability needs.
|
||||
|
||||
Current performance constraints include:
|
||||
|
||||
1. **Response Time Requirements**: The architecture must ensure that the system can respond to user requests within a specified time frame, which may impact design decisions related to caching, database queries, and API performance.
|
||||
2. **Scalability Needs**: The system should be able to handle increased load and user traffic without significant degradation in performance, necessitating a scalable architecture that can grow with demand.
|
||||
|
||||
## Security Constraints
|
||||
|
||||
> e.g., authentication mechanisms, data encryption standards.
|
||||
|
||||
## Budgetary Constraints
|
||||
|
||||
> e.g., licensing costs, infrastructure budgets.
|
||||
|
||||
## Time Constraints
|
||||
|
||||
> e.g., project deadlines, release schedules.
|
||||
|
||||
## Interoperability Constraints
|
||||
|
||||
> e.g., integration with existing systems, third-party services.
|
||||
|
||||
## Maintainability Constraints
|
||||
|
||||
> e.g., code modularity, documentation standards.
|
||||
|
||||
## Usability Constraints
|
||||
|
||||
> e.g., user interface design principles, accessibility requirements.
|
||||
|
||||
## Data Constraints
|
||||
|
||||
> e.g., data storage formats, data retention policies.
|
||||
|
||||
## Deployment Constraints
|
||||
|
||||
> e.g., deployment environments, cloud provider limitations.
|
||||
|
||||
## Testing Constraints
|
||||
|
||||
> e.g., testing frameworks, test coverage requirements.
|
||||
|
||||
## Localization Constraints
|
||||
|
||||
> e.g., multi-language support, regional settings.
|
||||
|
||||
## Versioning Constraints
|
||||
|
||||
> e.g., API versioning strategies, backward compatibility.
|
||||
|
||||
## Monitoring Constraints
|
||||
|
||||
> e.g., logging standards, performance monitoring tools.
|
||||
|
||||
## Backup and Recovery Constraints
|
||||
|
||||
> e.g., data backup frequency, disaster recovery plans.
|
||||
|
||||
## Development Constraints
|
||||
|
||||
> e.g., coding languages, frameworks, libraries to be used or avoided.
|
||||
|
||||
## Collaboration Constraints
|
||||
|
||||
> e.g., communication tools, collaboration platforms.
|
||||
|
||||
## Documentation Constraints
|
||||
|
||||
> e.g., documentation tools, style guides.
|
||||
|
||||
## Training Constraints
|
||||
|
||||
> e.g., training programs, skill development initiatives.
|
||||
|
||||
## Support Constraints
|
||||
|
||||
> e.g., support channels, response time expectations.
|
||||
|
||||
## Legal Constraints
|
||||
|
||||
> e.g., compliance requirements, intellectual property considerations.
|
||||
|
||||
## Ethical Constraints
|
||||
|
||||
> e.g., ethical considerations in data usage, user privacy.
|
||||
|
||||
## Environmental Impact Constraints
|
||||
|
||||
> e.g., energy consumption considerations, sustainability goals.
|
||||
|
||||
## Innovation Constraints
|
||||
|
||||
> e.g., limitations on adopting new technologies, risk tolerance for experimentation.
|
||||
|
||||
## Cultural Constraints
|
||||
|
||||
> e.g., organizational culture, team dynamics affecting development practices.
|
||||
|
||||
## Stakeholder Constraints
|
||||
|
||||
> e.g., stakeholder expectations, communication preferences.
|
||||
|
||||
## Change Management Constraints
|
||||
|
||||
> e.g., processes for handling changes, version control practices.
|
||||
|
||||
## Resource Constraints
|
||||
|
||||
> e.g., availability of hardware, software, and human resources.
|
||||
|
||||
## Process Constraints
|
||||
|
||||
> e.g., development methodologies (Agile, Scrum), project management tools.
|
||||
|
||||
## Quality Constraints
|
||||
|
||||
> e.g., code quality standards, testing requirements.
|
||||
@@ -1,57 +0,0 @@
|
||||
---
|
||||
title: "03 — Context and Scope"
|
||||
description: "Describe system context, external actors, and the scope of the architecture."
|
||||
status: draft
|
||||
---
|
||||
|
||||
# 03 — Context and Scope
|
||||
|
||||
## System Context
|
||||
|
||||
The CalMiner system operates within the context of mining project management, providing tools for scenario analysis and decision support. It interacts with various data sources, including historical project data and real-time operational metrics.
|
||||
|
||||
## External Actors
|
||||
|
||||
- **Project Managers**: Utilize the platform for scenario planning and risk assessment.
|
||||
- **Data Analysts**: Analyze simulation results and derive insights.
|
||||
- **Executives**: Review high-level reports and dashboards for strategic decision-making.
|
||||
|
||||
## Scope of the Architecture
|
||||
|
||||
The architecture encompasses the following key areas:
|
||||
|
||||
1. **Data Ingestion**: Mechanisms for collecting and processing data from various sources.
|
||||
2. **Data Storage**: Solutions for storing and managing historical and real-time data.
|
||||
3. **Simulation Engine**: Core algorithms and models for scenario analysis.
|
||||
3.1. **Modeling Framework**: Tools for defining and managing simulation models.
|
||||
3.2. **Parameter Management**: Systems for handling input parameters and configurations.
|
||||
3.3. **Execution Engine**: Infrastructure for running simulations and processing results.
|
||||
3.4. **Result Storage**: Systems for storing simulation outputs for analysis and reporting.
|
||||
4. **Financial Reporting**: Tools for generating reports and visualizations based on simulation outcomes.
|
||||
5. **Risk Assessment**: Frameworks for identifying and evaluating potential project risks.
|
||||
6. **Profitability Analysis**: Modules for calculating and analyzing project profitability metrics.
|
||||
7. **User Interface**: Design and implementation of the user-facing components of the system.
|
||||
8. **Security and Compliance**: Measures to ensure data security and regulatory compliance.
|
||||
9. **Scalability and Performance**: Strategies for ensuring the system can handle increasing data volumes and user loads.
|
||||
10. **Integration Points**: Interfaces for integrating with external systems and services.
|
||||
11. **Monitoring and Logging**: Systems for tracking system performance and user activity.
|
||||
12. **Maintenance and Support**: Processes for ongoing system maintenance and user support.
|
||||
|
||||
## Diagram
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant PM as Project Manager
|
||||
participant DA as Data Analyst
|
||||
participant EX as Executive
|
||||
participant CM as CalMiner System
|
||||
|
||||
PM->>CM: Create and manage scenarios
|
||||
DA->>CM: Analyze simulation results
|
||||
EX->>CM: Review reports and dashboards
|
||||
CM->>PM: Provide scenario planning tools
|
||||
CM->>DA: Deliver analysis insights
|
||||
CM->>EX: Generate high-level reports
|
||||
```
|
||||
|
||||
This diagram illustrates the key components of the CalMiner system and their interactions with external actors.
|
||||
@@ -1,49 +0,0 @@
|
||||
---
|
||||
title: "04 — Solution Strategy"
|
||||
description: "High-level solution strategy describing major approaches, technology choices, and trade-offs."
|
||||
status: draft
|
||||
---
|
||||
|
||||
# 04 — Solution Strategy
|
||||
|
||||
This section outlines the high-level solution strategy for implementing the CalMiner system, focusing on major approaches, technology choices, and trade-offs.
|
||||
|
||||
## Client-Server Architecture
|
||||
|
||||
- **Backend**: FastAPI serves as the backend framework, providing RESTful APIs for data management, simulation execution, and reporting. It leverages SQLAlchemy for ORM-based database interactions with PostgreSQL.
|
||||
- **Frontend**: Server-rendered Jinja2 templates deliver dynamic HTML views, enhanced with Chart.js for interactive data visualizations. This approach balances performance and simplicity, avoiding the complexity of a full SPA.
|
||||
- **Middleware**: Custom middleware handles JSON validation to ensure data integrity before processing requests.
|
||||
|
||||
## Technology Choices
|
||||
|
||||
- **FastAPI**: Chosen for its high performance, ease of use, and modern features like async support and automatic OpenAPI documentation.
|
||||
- **PostgreSQL**: Selected for its robustness, scalability, and support for complex queries, making it suitable for handling the diverse data needs of mining project management.
|
||||
- **SQLAlchemy**: Provides a flexible and powerful ORM layer, facilitating database interactions while maintaining code readability and maintainability.
|
||||
- **Chart.js**: Utilized for its simplicity and effectiveness in rendering interactive charts, enhancing the user experience on the dashboard.
|
||||
- **Jinja2**: Enables server-side rendering of HTML templates, allowing for dynamic content generation while keeping the frontend lightweight.
|
||||
- **Pydantic**: Used for data validation and serialization, ensuring that incoming request payloads conform to expected schemas.
|
||||
- **Docker**: Employed for containerization, ensuring consistent deployment across different environments and simplifying dependency management.
|
||||
- **Redis**: Used as an in-memory data store to cache frequently accessed data, improving application performance and reducing database load.
|
||||
|
||||
## Trade-offs
|
||||
|
||||
- **Server-Rendered vs. SPA**: Opted for server-rendered templates over a single-page application (SPA) to reduce complexity and improve initial load times, at the cost of some interactivity.
|
||||
- **Synchronous vs. Asynchronous**: While FastAPI supports async operations, the initial implementation focuses on synchronous request handling for simplicity, with plans to introduce async features as needed.
|
||||
- **Monolithic vs. Microservices**: The initial architecture follows a monolithic approach for ease of development and deployment, with the possibility of refactoring into microservices as the system scales.
|
||||
- **In-Memory Caching**: Implementing Redis for caching introduces additional infrastructure complexity but significantly enhances performance for read-heavy operations.
|
||||
- **Database Choice**: PostgreSQL was chosen over NoSQL alternatives due to the structured nature of the data and the need for complex querying capabilities, despite potential scalability challenges.
|
||||
- **Technology Familiarity**: Selected technologies align with the team's existing skill set to minimize the learning curve and accelerate development, even if some alternatives may offer marginally better performance or features.
|
||||
- **Extensibility vs. Simplicity**: The architecture is designed to be extensible for future features (e.g., Monte Carlo simulation engine) while maintaining simplicity in the initial implementation to ensure timely delivery of core functionalities.
|
||||
|
||||
## Future Considerations
|
||||
|
||||
- **Scalability**: As the user base grows, consider transitioning to a microservices architecture and implementing load balancing strategies.
|
||||
- **Asynchronous Processing**: Introduce asynchronous task queues (e.g., Celery) for long-running simulations to improve responsiveness.
|
||||
- **Enhanced Frontend**: Explore the possibility of integrating a frontend framework (e.g., React or Vue.js) for more dynamic user interactions in future iterations.
|
||||
- **Advanced Analytics**: Plan for integrating advanced analytics and machine learning capabilities to enhance simulation accuracy and reporting insights.
|
||||
- **Security Enhancements**: Implement robust authentication and authorization mechanisms to protect sensitive data and ensure compliance with industry standards.
|
||||
- **Continuous Integration/Continuous Deployment (CI/CD)**: Establish CI/CD pipelines to automate testing, building, and deployment processes for faster and more reliable releases.
|
||||
- **Monitoring and Logging**: Integrate monitoring tools (e.g., Prometheus, Grafana) and centralized logging solutions (e.g., ELK stack) to track application performance and troubleshoot issues effectively.
|
||||
- **User Feedback Loop**: Implement mechanisms for collecting user feedback to inform future development priorities and improve user experience.
|
||||
- **Documentation**: Maintain comprehensive documentation for both developers and end-users to facilitate onboarding and effective use of the system.
|
||||
- **Testing Strategy**: Develop a robust testing strategy, including unit, integration, and end-to-end tests, to ensure code quality and reliability as the system evolves.
|
||||
@@ -1,110 +0,0 @@
|
||||
# Implementation Plan 2025-10-20
|
||||
|
||||
This file contains the implementation plan (MVP features, steps, and estimates).
|
||||
|
||||
## Project Setup
|
||||
|
||||
1. Connect to PostgreSQL database with schema `calminer`.
|
||||
1. Create and activate a virtual environment and install dependencies via `requirements.txt`.
|
||||
1. Define database environment variables in `.env` (e.g., `DATABASE_DRIVER`, `DATABASE_HOST`, `DATABASE_PORT`, `DATABASE_USER`, `DATABASE_PASSWORD`, `DATABASE_NAME`, optional `DATABASE_SCHEMA`).
|
||||
1. Configure FastAPI entrypoint in `main.py` to include routers.
|
||||
|
||||
## Feature: Scenario Management
|
||||
|
||||
### Scenario Management — Steps
|
||||
|
||||
1. Create `models/scenario.py` for scenario CRUD.
|
||||
1. Implement API endpoints in `routes/scenarios.py` (GET, POST, PUT, DELETE).
|
||||
1. Write unit tests in `tests/unit/test_scenario.py`.
|
||||
1. Build UI component `components/ScenarioForm.html`.
|
||||
|
||||
## Feature: Process Parameters
|
||||
|
||||
### Parameters — Steps
|
||||
|
||||
1. Create `models/parameters.py` for process parameters.
|
||||
1. Implement Pydantic schemas in `routes/parameters.py`.
|
||||
1. Add validation middleware in `middleware/validation.py`.
|
||||
1. Write unit tests in `tests/unit/test_parameter.py`.
|
||||
1. Build UI component `components/ParameterInput.html`.
|
||||
|
||||
## Feature: Stochastic Variables
|
||||
|
||||
### Stochastic Variables — Steps
|
||||
|
||||
1. Create `models/distribution.py` for variable distributions.
|
||||
1. Implement API routes in `routes/distributions.py`.
|
||||
1. Write Pydantic schemas and validations.
|
||||
1. Write unit tests in `tests/unit/test_distribution.py`.
|
||||
1. Build UI component `components/DistributionEditor.html`.
|
||||
|
||||
## Feature: Cost Tracking
|
||||
|
||||
### Cost Tracking — Steps
|
||||
|
||||
1. Create `models/capex.py` and `models/opex.py`.
|
||||
1. Implement API routes in `routes/costs.py`.
|
||||
1. Write Pydantic schemas for CAPEX/OPEX.
|
||||
1. Write unit tests in `tests/unit/test_costs.py`.
|
||||
1. Build UI component `components/CostForm.html`.
|
||||
|
||||
## Feature: Consumption Tracking
|
||||
|
||||
### Consumption Tracking — Steps
|
||||
|
||||
1. Create models for consumption: `chemical_consumption.py`, `fuel_consumption.py`, `water_consumption.py`, `scrap_consumption.py`.
|
||||
1. Implement API routes in `routes/consumption.py`.
|
||||
1. Write Pydantic schemas for consumption data.
|
||||
1. Write unit tests in `tests/unit/test_consumption.py`.
|
||||
1. Build UI component `components/ConsumptionDashboard.html`.
|
||||
|
||||
## Feature: Production Output
|
||||
|
||||
### Production Output — Steps
|
||||
|
||||
1. Create `models/production_output.py`.
|
||||
1. Implement API routes in `routes/production.py`.
|
||||
1. Write Pydantic schemas for production output.
|
||||
1. Write unit tests in `tests/unit/test_production.py`.
|
||||
1. Build UI component `components/ProductionChart.html`.
|
||||
|
||||
## Feature: Equipment Management
|
||||
|
||||
### Equipment Management — Steps
|
||||
|
||||
1. Create `models/equipment.py` for equipment data.
|
||||
1. Implement API routes in `routes/equipment.py`.
|
||||
1. Write Pydantic schemas for equipment.
|
||||
1. Write unit tests in `tests/unit/test_equipment.py`.
|
||||
1. Build UI component `components/EquipmentList.html`.
|
||||
|
||||
## Feature: Maintenance Logging
|
||||
|
||||
### Maintenance Logging — Steps
|
||||
|
||||
1. Create `models/maintenance.py` for maintenance events.
|
||||
1. Implement API routes in `routes/maintenance.py`.
|
||||
1. Write Pydantic schemas for maintenance logs.
|
||||
1. Write unit tests in `tests/unit/test_maintenance.py`.
|
||||
1. Build UI component `components/MaintenanceLog.html`.
|
||||
|
||||
## Feature: Monte Carlo Simulation Engine
|
||||
|
||||
### Monte Carlo Engine — Steps
|
||||
|
||||
1. Implement Monte Carlo logic in `services/simulation.py`.
|
||||
1. Persist results in `models/simulation_result.py`.
|
||||
1. Expose endpoint in `routes/simulations.py`.
|
||||
1. Write integration tests in `tests/unit/test_simulation.py`.
|
||||
1. Build UI component `components/SimulationRunner.html`.
|
||||
|
||||
## Feature: Reporting / Dashboard
|
||||
|
||||
### Reporting / Dashboard — Steps
|
||||
|
||||
1. Implement report calculations in `services/reporting.py`.
|
||||
1. Add detailed and summary endpoints in `routes/reporting.py`.
|
||||
1. Write unit tests in `tests/unit/test_reporting.py`.
|
||||
1. Enhance UI in `components/Dashboard.html` with charts.
|
||||
|
||||
See [UI and Style](../13_ui_and_style.md) for the UI template audit, layout guidance, and next steps.
|
||||
@@ -1,62 +0,0 @@
|
||||
---
|
||||
title: "05 — Building Block View"
|
||||
description: "Explain the static structure: modules, components, services and their relationships."
|
||||
status: draft
|
||||
---
|
||||
|
||||
<!-- markdownlint-disable-next-line MD025 -->
|
||||
# 05 — Building Block View
|
||||
|
||||
## Architecture overview
|
||||
|
||||
This overview complements [architecture](README.md) with a high-level map of CalMiner's module layout and request flow.
|
||||
|
||||
Refer to the detailed architecture chapters in `docs/architecture/`:
|
||||
|
||||
- Module map & components: [Building Block View](05_building_block_view.md)
|
||||
- Request flow & runtime interactions: [Runtime View](06_runtime_view.md)
|
||||
- Simulation roadmap & strategy: [Solution Strategy](04_solution_strategy.md)
|
||||
|
||||
## System Components
|
||||
|
||||
### Backend
|
||||
|
||||
- **FastAPI application** (`main.py`): entry point that configures routers, middleware, and startup/shutdown events.
|
||||
- **Routers** (`routes/`): modular route handlers for scenarios, parameters, costs, consumption, production, equipment, maintenance, simulations, and reporting. Each router defines RESTful endpoints, request/response schemas, and orchestrates service calls.
|
||||
- leveraging a shared dependency module (`routes/dependencies.get_db`) for SQLAlchemy session management.
|
||||
- **Models** (`models/`): SQLAlchemy ORM models representing database tables and relationships, encapsulating domain entities like Scenario, CapEx, OpEx, Consumption, ProductionOutput, Equipment, Maintenance, and SimulationResult.
|
||||
- **Services** (`services/`): business logic layer that processes data, performs calculations, and interacts with models. Key services include reporting calculations and Monte Carlo simulation scaffolding.
|
||||
- `services/settings.py`: manages application settings backed by the `application_setting` table, including CSS variable defaults, persistence, and environment-driven overrides that surface in both the API and UI.
|
||||
- **Database** (`config/database.py`): sets up the SQLAlchemy engine and session management for PostgreSQL interactions.
|
||||
|
||||
### Frontend
|
||||
|
||||
- **Templates** (`templates/`): Jinja2 templates for server-rendered HTML views, extending a shared base layout with a persistent sidebar for navigation.
|
||||
- **Static Assets** (`static/`): CSS and JavaScript files for styling and interactivity. Shared CSS variables in `static/css/main.css` define the color palette, while page-specific JS modules in `static/js/` handle dynamic behaviors.
|
||||
- **Reusable partials** (`templates/partials/components.html`): macro library that standardises select inputs, feedback/empty states, and table wrappers so pages remain consistent while keeping DOM hooks stable for existing JavaScript modules.
|
||||
- `templates/settings.html`: Settings hub that renders theme controls and environment override tables using metadata provided by `routes/ui.py`.
|
||||
- `static/js/settings.js`: applies client-side validation, form submission, and live CSS updates for theme changes, respecting environment-managed variables returned by the API.
|
||||
|
||||
### Middleware & Utilities
|
||||
|
||||
- **Middleware** (`middleware/validation.py`): applies JSON validation before requests reach routers.
|
||||
- **Testing** (`tests/unit/`): pytest suite covering route and service behavior, including UI rendering checks and negative-path router validation tests to ensure consistent HTTP error semantics. Playwright end-to-end coverage is planned for core smoke flows (dashboard load, scenario inputs, reporting) and will attach in CI once scaffolding is completed.
|
||||
|
||||
## Module Map (code)
|
||||
|
||||
- `scenario.py`: central scenario entity with relationships to cost, consumption, production, equipment, maintenance, and simulation results.
|
||||
- `capex.py`, `opex.py`: financial expenditures tied to scenarios.
|
||||
- `consumption.py`, `production_output.py`: operational data tables.
|
||||
- `equipment.py`, `maintenance.py`: asset management models.
|
||||
- `simulation_result.py`: stores Monte Carlo iteration outputs.
|
||||
- `application_setting.py`: persists editable application configuration, currently focused on theme variables but designed to store future settings categories.
|
||||
|
||||
## Service Layer
|
||||
|
||||
- `reporting.py`: computes aggregates (count, min/max, mean, median, percentiles, standard deviation, variance, tail-risk metrics) from simulation results.
|
||||
- `simulation.py`: scaffolds Monte Carlo simulation logic (currently in-memory; persistence planned).
|
||||
- `currency.py`: handles currency normalization for cost tables.
|
||||
- `utils.py`: shared helper functions (e.g., statistical calculations).
|
||||
- `validation.py`: JSON schema validation middleware.
|
||||
- `database.py`: SQLAlchemy engine and session setup.
|
||||
- `dependencies.py`: FastAPI dependency injection for DB sessions.
|
||||
@@ -1,288 +0,0 @@
|
||||
---
|
||||
title: "06 — Runtime View"
|
||||
description: "Describe runtime aspects: request flows, lifecycle of key interactions, and runtime components."
|
||||
status: draft
|
||||
---
|
||||
|
||||
# 06 — Runtime View
|
||||
|
||||
## Overview
|
||||
|
||||
The runtime view focuses on the dynamic behavior of the CalMiner application during execution. It illustrates how various components interact to fulfill user requests, process data, and generate outputs. Key runtime scenarios include scenario management, parameter input handling, cost tracking, consumption tracking, production output recording, equipment management, maintenance logging, Monte Carlo simulations, and reporting.
|
||||
|
||||
## Request Flow
|
||||
|
||||
1. **User Interaction**: A user interacts with the web application through the UI, triggering actions such as creating a scenario, inputting parameters, or generating reports.
|
||||
2. **API Request**: The frontend sends HTTP requests (GET, POST, PUT, DELETE) to the appropriate API endpoints defined in the `routes/` directory.
|
||||
3. **Routing**: The FastAPI framework routes the incoming requests to the corresponding route handlers.
|
||||
4. **Service Layer**: Route handlers invoke services from the `services/` directory to process the business logic.
|
||||
5. **Database Interaction**: Services interact with the database via ORM models defined in the `models/` directory to perform CRUD operations.
|
||||
6. **Response Generation**: After processing, services return data to the route handlers, which format the response (JSON or HTML) and send it back to the frontend.
|
||||
7. **UI Update**: The frontend updates the UI based on the response, rendering new data or updating existing views.
|
||||
8. **Reporting Pipeline**: For reporting, data is aggregated from various sources, processed to generate statistics, and presented in the dashboard using Chart.js.
|
||||
9. **Monte Carlo Simulations**: Stochastic simulations are executed in the backend, generating probabilistic outcomes that are stored temporarily and used for risk analysis in reports.
|
||||
10. **Error Handling**: Throughout the process, error handling mechanisms ensure that exceptions are caught and appropriate responses are sent back to the user.
|
||||
|
||||
Request flow diagram:
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant User
|
||||
participant Frontend
|
||||
participant API
|
||||
participant Service
|
||||
participant Database
|
||||
|
||||
User->>Frontend: Interact with UI
|
||||
Frontend->>API: Send HTTP Request
|
||||
API->>Service: Route to Handler
|
||||
Service->>Database: Perform CRUD Operation
|
||||
Database-->>Service: Return Data
|
||||
Service-->>API: Return Processed Data
|
||||
API-->>Frontend: Send Response
|
||||
Frontend-->>User: Update UI
|
||||
|
||||
participant Reporting
|
||||
|
||||
Service->>Reporting: Aggregate Data
|
||||
Reporting-->>Service: Return Report Data
|
||||
Service-->>API: Return Report Response
|
||||
API-->>Frontend: Send Report Data
|
||||
Frontend-->>User: Render Report
|
||||
|
||||
participant Simulation
|
||||
Service->>Simulation: Execute Monte Carlo Simulation
|
||||
Simulation-->>Service: Return Simulation Results
|
||||
|
||||
Service-->>API: Return Simulation Data
|
||||
API-->>Frontend: Send Simulation Data
|
||||
Frontend-->>User: Display Simulation Results
|
||||
```
|
||||
|
||||
## Key Runtime Scenarios
|
||||
|
||||
### Scenario Management
|
||||
|
||||
1. User accesses the scenario list via the UI.
|
||||
2. The frontend sends a GET request to `/api/scenarios`.
|
||||
3. The `ScenarioService` retrieves scenarios from the database.
|
||||
4. The response is rendered in the UI.
|
||||
5. For scenario creation, the user submits a form, triggering a POST request to `/api/scenarios`, which the `ScenarioService` processes to create a new scenario in the database.
|
||||
6. The UI updates to reflect the new scenario.
|
||||
|
||||
Scenario management diagram:
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant User
|
||||
participant Frontend
|
||||
participant API
|
||||
participant ScenarioService
|
||||
participant Database
|
||||
|
||||
User->>Frontend: Access Scenario List
|
||||
Frontend->>API: GET /api/scenarios
|
||||
API->>ScenarioService: Route to Handler
|
||||
ScenarioService->>Database: Retrieve Scenarios
|
||||
Database-->>ScenarioService: Return Scenarios
|
||||
ScenarioService-->>API: Return Scenario Data
|
||||
API-->>Frontend: Send Response
|
||||
Frontend-->>User: Render Scenario List
|
||||
|
||||
User->>Frontend: Submit New Scenario Form
|
||||
Frontend->>API: POST /api/scenarios
|
||||
API->>ScenarioService: Route to Handler
|
||||
ScenarioService->>Database: Create New Scenario
|
||||
Database-->>ScenarioService: Confirm Creation
|
||||
ScenarioService-->>API: Return New Scenario Data
|
||||
API-->>Frontend: Send Response
|
||||
Frontend-->>User: Update UI with New Scenario
|
||||
```
|
||||
|
||||
### Process Parameter Input
|
||||
|
||||
1. User navigates to the parameter input form.
|
||||
2. The frontend fetches existing parameters via a GET request to `/api/parameters`.
|
||||
3. The `ParameterService` retrieves parameters from the database.
|
||||
4. The response is rendered in the UI.
|
||||
5. For parameter updates, the user submits a form, triggering a PUT request to `/api/parameters/:id`, which the `ParameterService` processes to update the parameter in the database.
|
||||
6. The UI updates to reflect the changes.
|
||||
|
||||
Parameter input diagram:
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant User
|
||||
participant Frontend
|
||||
participant API
|
||||
participant ParameterService
|
||||
participant Database
|
||||
|
||||
User->>Frontend: Navigate to Parameter Input Form
|
||||
Frontend->>API: GET /api/parameters
|
||||
API->>ParameterService: Route to Handler
|
||||
ParameterService->>Database: Retrieve Parameters
|
||||
Database-->>ParameterService: Return Parameters
|
||||
ParameterService-->>API: Return Parameter Data
|
||||
API-->>Frontend: Send Response
|
||||
Frontend-->>User: Render Parameter Form
|
||||
|
||||
User->>Frontend: Submit Parameter Update Form
|
||||
Frontend->>API: PUT /api/parameters/:id
|
||||
API->>ParameterService: Route to Handler
|
||||
ParameterService->>Database: Update Parameter
|
||||
Database-->>ParameterService: Confirm Update
|
||||
ParameterService-->>API: Return Updated Parameter Data
|
||||
API-->>Frontend: Send Response
|
||||
Frontend-->>User: Update UI with Updated Parameter
|
||||
```
|
||||
|
||||
### Cost Tracking
|
||||
|
||||
1. User accesses the cost tracking view.
|
||||
2. The frontend sends a GET request to `/api/costs` to fetch existing cost records.
|
||||
3. The `CostService` retrieves cost data from the database.
|
||||
4. The response is rendered in the UI.
|
||||
5. For cost updates, the user submits a form, triggering a PUT request to `/api/costs/:id`, which the `CostService` processes to update the cost record in the database.
|
||||
6. The UI updates to reflect the changes.
|
||||
|
||||
Cost tracking diagram:
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant User
|
||||
participant Frontend
|
||||
participant API
|
||||
participant CostService
|
||||
participant Database
|
||||
|
||||
User->>Frontend: Access Cost Tracking View
|
||||
Frontend->>API: GET /api/costs
|
||||
API->>CostService: Route to Handler
|
||||
CostService->>Database: Retrieve Cost Records
|
||||
Database-->>CostService: Return Cost Data
|
||||
CostService-->>API: Return Cost Data
|
||||
API-->>Frontend: Send Response
|
||||
Frontend-->>User: Render Cost Tracking View
|
||||
|
||||
User->>Frontend: Submit Cost Update Form
|
||||
Frontend->>API: PUT /api/costs/:id
|
||||
API->>CostService: Route to Handler
|
||||
CostService->>Database: Update Cost Record
|
||||
Database-->>CostService: Confirm Update
|
||||
CostService-->>API: Return Updated Cost Data
|
||||
API-->>Frontend: Send Response
|
||||
Frontend-->>User: Update UI with Updated Cost Data
|
||||
```
|
||||
|
||||
## Reporting Pipeline and UI Integration
|
||||
|
||||
1. **Data Sources**
|
||||
|
||||
- Scenario-linked calculations (costs, consumption, production) produce raw figures stored in dedicated tables (`capex`, `opex`, `consumption`, `production_output`).
|
||||
- Monte Carlo simulations (currently transient) generate arrays of `{ "result": float }` tuples that the dashboard or downstream tooling passes directly to reporting endpoints.
|
||||
|
||||
2. **API Contract**
|
||||
|
||||
- `POST /api/reporting/summary` accepts a JSON array of result objects and validates shape through `_validate_payload` in `routes/reporting.py`.
|
||||
- On success it returns a structured payload (`ReportSummary`) containing count, mean, median, min/max, standard deviation, and percentile values, all as floats.
|
||||
|
||||
3. **Service Layer**
|
||||
|
||||
- `services/reporting.generate_report` converts the sanitized payload into descriptive statistics using Python’s standard library (`statistics` module) to avoid external dependencies.
|
||||
- The service remains stateless; no database read/write occurs, which keeps summary calculations deterministic and idempotent.
|
||||
- Extended KPIs (surfaced in the API and dashboard):
|
||||
- `variance`: population variance computed as the square of the population standard deviation.
|
||||
- `percentile_5` and `percentile_95`: lower and upper tail interpolated percentiles for sensitivity bounds.
|
||||
- `value_at_risk_95`: 5th percentile threshold representing the minimum outcome within a 95% confidence band.
|
||||
- `expected_shortfall_95`: mean of all outcomes at or below the `value_at_risk_95`, highlighting tail exposure.
|
||||
|
||||
4. **UI Consumption**
|
||||
|
||||
- `templates/Dashboard.html` posts the user-provided dataset to the summary endpoint, renders metric cards for each field, and charts the distribution using Chart.js.
|
||||
- `SUMMARY_FIELDS` now includes variance, 5th/10th/90th/95th percentiles, and tail-risk metrics (VaR/Expected Shortfall at 95%); tooltip annotations surface the tail metrics alongside the percentile line chart.
|
||||
- Error handling surfaces HTTP failures inline so users can address malformed JSON or backend availability issues without leaving the page.
|
||||
|
||||
Reporting pipeline diagram:
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant User
|
||||
participant Frontend
|
||||
participant API
|
||||
participant ReportingService
|
||||
|
||||
User->>Frontend: Input Data for Reporting
|
||||
Frontend->>API: POST /api/reporting/summary
|
||||
API->>ReportingService: Route to Handler
|
||||
ReportingService->>ReportingService: Validate Payload
|
||||
ReportingService->>ReportingService: Compute Statistics
|
||||
ReportingService-->>API: Return Report Summary
|
||||
API-->>Frontend: Send Report Summary
|
||||
Frontend-->>User: Render Report Metrics and Charts
|
||||
```
|
||||
|
||||
## Monte Carlo Simulation Execution
|
||||
|
||||
1. User initiates a Monte Carlo simulation via the UI.
|
||||
2. The frontend sends a POST request to `/api/simulations/run` with simulation parameters.
|
||||
3. The `SimulationService` executes the Monte Carlo logic, generating stochastic results.
|
||||
4. The results are temporarily stored and returned to the frontend.
|
||||
5. The UI displays the simulation results and allows users to trigger reporting based on these outcomes.
|
||||
6. The reporting pipeline processes the simulation results as described above.
|
||||
7. Error handling ensures that any issues during simulation execution are communicated back to the user.
|
||||
8. Monte Carlo simulation diagram:
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant User
|
||||
participant Frontend
|
||||
participant API
|
||||
participant SimulationService
|
||||
|
||||
User->>Frontend: Input Simulation Parameters
|
||||
Frontend->>API: POST /api/simulations/run
|
||||
API->>SimulationService: Route to Handler
|
||||
SimulationService->>SimulationService: Execute Monte Carlo Logic
|
||||
SimulationService-->>API: Return Simulation Results
|
||||
API-->>Frontend: Send Simulation Results
|
||||
Frontend-->>User: Render Simulation Results
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
Throughout the runtime processes, error handling mechanisms are implemented to catch exceptions and provide meaningful feedback to users. Common error scenarios include:
|
||||
|
||||
- Invalid input data
|
||||
- Database connection issues
|
||||
- Simulation execution errors
|
||||
- Reporting calculation failures
|
||||
- API endpoint unavailability
|
||||
- Timeouts during long-running operations
|
||||
- Unauthorized access attempts
|
||||
- Data validation failures
|
||||
- Resource not found errors
|
||||
|
||||
Error handling diagram:
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant User
|
||||
participant Frontend
|
||||
participant API
|
||||
participant Service
|
||||
|
||||
User->>Frontend: Perform Action
|
||||
Frontend->>API: Send Request
|
||||
API->>Service: Route to Handler
|
||||
Service->>Service: Process Request
|
||||
alt Success
|
||||
Service-->>API: Return Data
|
||||
API-->>Frontend: Send Response
|
||||
Frontend-->>User: Update UI
|
||||
else Error
|
||||
Service-->>API: Return Error
|
||||
API-->>Frontend: Send Error Response
|
||||
Frontend-->>User: Display Error Message
|
||||
end
|
||||
```
|
||||
@@ -1,103 +0,0 @@
|
||||
---
|
||||
title: "07 — Deployment View"
|
||||
description: "Describe deployment topology, infrastructure components, and environments (dev/stage/prod)."
|
||||
status: draft
|
||||
---
|
||||
|
||||
<!-- markdownlint-disable-next-line MD025 -->
|
||||
|
||||
# 07 — Deployment View
|
||||
|
||||
## Deployment Topology
|
||||
|
||||
The CalMiner application is deployed using a multi-tier architecture consisting of the following layers:
|
||||
|
||||
1. **Client Layer**: This layer consists of web browsers that interact with the application through a user interface rendered by Jinja2 templates and enhanced with JavaScript (Chart.js for dashboards).
|
||||
2. **Web Application Layer**: This layer hosts the FastAPI application, which handles API requests, business logic, and serves HTML templates. It communicates with the database layer for data persistence.
|
||||
3. **Database Layer**: This layer consists of a PostgreSQL database that stores all application data, including scenarios, parameters, costs, consumption, production outputs, equipment, maintenance logs, and simulation results.
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
A[Client Layer<br/>(Web Browsers)] --> B[Web Application Layer<br/>(FastAPI)]
|
||||
B --> C[Database Layer<br/>(PostgreSQL)]
|
||||
```
|
||||
|
||||
## Infrastructure Components
|
||||
|
||||
The infrastructure components for the application include:
|
||||
|
||||
- **Web Server**: Hosts the FastAPI application and serves API endpoints.
|
||||
- **Database Server**: PostgreSQL database for persisting application data.
|
||||
- **Static File Server**: Serves static assets such as CSS, JavaScript, and image files.
|
||||
- **Reverse Proxy (optional)**: An Nginx or Apache server can be used as a reverse proxy.
|
||||
- **Containerization**: Docker images are generated via the repository `Dockerfile`, using a multi-stage build to keep the final runtime minimal.
|
||||
- **CI/CD Pipeline**: Automated pipelines (Gitea Actions) run tests, build/push Docker images, and trigger deployments.
|
||||
- **Cloud Infrastructure (optional)**: The application can be deployed on cloud platforms.
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
A[Web Server] --> B[Database Server]
|
||||
A --> C[Static File Server]
|
||||
A --> D[Reverse Proxy]
|
||||
A --> E[Containerization]
|
||||
A --> F[CI/CD Pipeline]
|
||||
A --> G[Cloud Infrastructure]
|
||||
```
|
||||
|
||||
## Environments
|
||||
|
||||
The application can be deployed in multiple environments to support development, testing, and production:
|
||||
|
||||
### Development Environment
|
||||
|
||||
The development environment is set up for local development and testing. It includes:
|
||||
|
||||
- Local PostgreSQL instance (docker compose recommended, script available at `docker-compose.postgres.yml`)
|
||||
- FastAPI server running in debug mode
|
||||
|
||||
### Testing Environment
|
||||
|
||||
The testing environment is set up for automated testing and quality assurance. It includes:
|
||||
|
||||
- Staging PostgreSQL instance
|
||||
- FastAPI server running in testing mode
|
||||
- Automated test suite (e.g., pytest) for running unit and integration tests
|
||||
|
||||
### Production Environment
|
||||
|
||||
The production environment is set up for serving live traffic and includes:
|
||||
|
||||
- Production PostgreSQL instance
|
||||
- FastAPI server running in production mode
|
||||
- Load balancer (e.g., Nginx) for distributing incoming requests
|
||||
- Monitoring and logging tools for tracking application performance
|
||||
|
||||
## Containerized Deployment Flow
|
||||
|
||||
The Docker-based deployment path aligns with the solution strategy documented in [04 — Solution Strategy](04_solution_strategy.md) and the CI practices captured in [14 — Testing & CI](14_testing_ci.md).
|
||||
|
||||
### Image Build
|
||||
|
||||
- The multi-stage `Dockerfile` installs dependencies in a builder layer (including system compilers and Python packages) and copies only the required runtime artifacts to the final image.
|
||||
- Build arguments are minimal; database configuration is supplied at runtime via granular variables (`DATABASE_DRIVER`, `DATABASE_HOST`, `DATABASE_PORT`, `DATABASE_USER`, `DATABASE_PASSWORD`, `DATABASE_NAME`, optional `DATABASE_SCHEMA`). Secrets and configuration should be passed via environment variables or an orchestrator.
|
||||
- The resulting image exposes port `8000` and starts `uvicorn main:app` (s. [README.md](../../README.md)).
|
||||
|
||||
### Runtime Environment
|
||||
|
||||
- For single-node deployments, run the container alongside PostgreSQL/Redis using Docker Compose or an equivalent orchestrator.
|
||||
- A reverse proxy (e.g., Nginx) terminates TLS and forwards traffic to the container on port `8000`.
|
||||
- Migrations must be applied prior to rolling out a new image; automation can hook into the deploy step to run `scripts/run_migrations.py`.
|
||||
|
||||
### CI/CD Integration
|
||||
|
||||
- Gitea Actions workflows reside under `.gitea/workflows/`.
|
||||
- `test.yml` executes the pytest suite using cached pip dependencies.
|
||||
- `build-and-push.yml` logs into the container registry, rebuilds the Docker image using GitHub Actions cache-backed layers, and pushes `latest` (and additional tags as required).
|
||||
- `deploy.yml` connects to the target host via SSH, pulls the pushed tag, stops any existing container, and launches the new version.
|
||||
- Required secrets: `REGISTRY_URL`, `REGISTRY_USERNAME`, `REGISTRY_PASSWORD`, `SSH_HOST`, `SSH_USERNAME`, `SSH_PRIVATE_KEY`.
|
||||
- Extend these workflows when introducing staging/blue-green deployments; keep cross-links with [14 — Testing & CI](14_testing_ci.md) up to date.
|
||||
|
||||
## Integrations and Future Work (deployment-related)
|
||||
|
||||
- **Persistence of results**: `/api/simulations/run` currently returns in-memory results; next iteration should persist to `simulation_result` and reference scenarios.
|
||||
- **Deployment**: implement infrastructure-as-code (e.g., Terraform/Ansible) to provision the hosting environment and maintain parity across dev/stage/prod.
|
||||
@@ -1,64 +0,0 @@
|
||||
---
|
||||
title: "08 — Concepts"
|
||||
description: "Document key concepts, domain models, and terminology used throughout the architecture documentation."
|
||||
status: draft
|
||||
---
|
||||
|
||||
# 08 — Concepts
|
||||
|
||||
## Key Concepts
|
||||
|
||||
### Scenario
|
||||
|
||||
A `scenario` represents a distinct mining project configuration, encapsulating all relevant parameters, costs, consumption, production outputs, equipment, maintenance logs, and simulation results. Each scenario is independent, allowing users to model and analyze different mining strategies.
|
||||
|
||||
### Parameterization
|
||||
|
||||
Parameters are defined for each scenario to capture inputs such as resource consumption rates, production targets, cost factors, and equipment specifications. Parameters can have fixed values or be linked to probability distributions for stochastic simulations.
|
||||
|
||||
### Monte Carlo Simulation
|
||||
|
||||
The Monte Carlo simulation engine allows users to perform risk analysis by running multiple iterations of a scenario with varying input parameters based on defined probability distributions. This helps in understanding the range of possible outcomes and their associated probabilities.
|
||||
|
||||
## Domain Model
|
||||
|
||||
The domain model consists of the following key entities:
|
||||
|
||||
- `Scenario`: Represents a mining project configuration.
|
||||
- `Parameter`: Input values for scenarios, which can be fixed or probabilistic.
|
||||
- `Cost`: Tracks capital and operational expenditures.
|
||||
- `Consumption`: Records resource usage.
|
||||
- `ProductionOutput`: Captures production metrics.
|
||||
- `Equipment`: Represents mining equipment associated with a scenario.
|
||||
- `Maintenance`: Logs maintenance events for equipment.
|
||||
- `SimulationResult`: Stores results from Monte Carlo simulations.
|
||||
- `Distribution`: Defines probability distributions for stochastic parameters.
|
||||
- `User`: Represents application users and their roles.
|
||||
- `Report`: Generated reports summarizing scenario analyses.
|
||||
- `Dashboard`: Visual representation of key performance indicators and metrics.
|
||||
- `AuditLog`: Tracks changes and actions performed within the application.
|
||||
- `Notification`: Alerts and messages related to scenario events and updates.
|
||||
- `Tag`: Labels for categorizing scenarios and other entities.
|
||||
- `Attachment`: Files associated with scenarios, such as documents or images.
|
||||
- `Version`: Tracks different versions of scenarios and their configurations.
|
||||
|
||||
### Detailed Domain Models
|
||||
|
||||
See [Domain Models](08_concepts/08_01_domain_models.md) document for detailed class diagrams and entity relationships.
|
||||
|
||||
## Data Model Highlights
|
||||
|
||||
- `scenario`: central entity describing a mining scenario; owns relationships to cost, consumption, production, equipment, and maintenance tables.
|
||||
- `capex`, `opex`: monetary tracking linked to scenarios.
|
||||
- `consumption`: resource usage entries parameterized by scenario and description.
|
||||
- `parameter`: scenario inputs with base `value` and optional distribution linkage via `distribution_id`, `distribution_type`, and JSON `distribution_parameters` to support simulation sampling.
|
||||
- `production_output`: production metrics per scenario.
|
||||
- `equipment` and `maintenance`: equipment inventory and maintenance events with dates/costs.
|
||||
- `simulation_result`: staging table for future Monte Carlo outputs (not yet populated by `run_simulation`).
|
||||
- `application_setting`: centralized key/value store for UI and system configuration, supporting typed values, categories, and editability flags so administrators can manage theme variables and future global options without code changes.
|
||||
|
||||
Foreign keys secure referential integrity between domain tables and their scenarios, enabling per-scenario analytics.
|
||||
|
||||
### Detailed Data Models
|
||||
|
||||
See [Data Models](08_concepts/08_02_data_models.md) document for detailed ER diagrams and table descriptions.
|
||||
@@ -1,106 +0,0 @@
|
||||
# Data Models
|
||||
|
||||
## Data Model Highlights
|
||||
|
||||
- `scenario`: central entity describing a mining scenario; owns relationships to cost, consumption, production, equipment, and maintenance tables.
|
||||
- `capex`, `opex`: monetary tracking linked to scenarios.
|
||||
- `consumption`: resource usage entries parameterized by scenario and description.
|
||||
- `parameter`: scenario inputs with base `value` and optional distribution linkage via `distribution_id`, `distribution_type`, and JSON `distribution_parameters` to support simulation sampling.
|
||||
- `production_output`: production metrics per scenario.
|
||||
- `equipment` and `maintenance`: equipment inventory and maintenance events with dates/costs.
|
||||
- `simulation_result`: staging table for future Monte Carlo outputs (not yet populated by `run_simulation`).
|
||||
|
||||
Foreign keys secure referential integrity between domain tables and their scenarios, enabling per-scenario analytics.
|
||||
|
||||
## Schema Diagrams
|
||||
|
||||
```mermaid
|
||||
erDiagram
|
||||
SCENARIO ||--o{ CAPEX : has
|
||||
SCENARIO ||--o{ OPEX : has
|
||||
SCENARIO ||--o{ CONSUMPTION : has
|
||||
SCENARIO ||--o{ PARAMETER : has
|
||||
SCENARIO ||--o{ PRODUCTION_OUTPUT : has
|
||||
SCENARIO ||--o{ EQUIPMENT : has
|
||||
EQUIPMENT ||--o{ MAINTENANCE : has
|
||||
SCENARIO ||--o{ SIMULATION_RESULT : has
|
||||
|
||||
SCENARIO {
|
||||
int id PK
|
||||
string name
|
||||
string description
|
||||
datetime created_at
|
||||
datetime updated_at
|
||||
}
|
||||
CAPEX {
|
||||
int id PK
|
||||
int scenario_id FK
|
||||
float amount
|
||||
string description
|
||||
datetime created_at
|
||||
datetime updated_at
|
||||
}
|
||||
OPEX {
|
||||
int id PK
|
||||
int scenario_id FK
|
||||
float amount
|
||||
string description
|
||||
datetime created_at
|
||||
datetime updated_at
|
||||
}
|
||||
CONSUMPTION {
|
||||
int id PK
|
||||
int scenario_id FK
|
||||
string resource_type
|
||||
float quantity
|
||||
string description
|
||||
datetime created_at
|
||||
datetime updated_at
|
||||
}
|
||||
|
||||
PRODUCTION_OUTPUT {
|
||||
int id PK
|
||||
int scenario_id FK
|
||||
float tonnage
|
||||
float recovery_rate
|
||||
float revenue
|
||||
datetime created_at
|
||||
datetime updated_at
|
||||
}
|
||||
EQUIPMENT {
|
||||
int id PK
|
||||
int scenario_id FK
|
||||
string name
|
||||
string type
|
||||
datetime created_at
|
||||
datetime updated_at
|
||||
}
|
||||
MAINTENANCE {
|
||||
int id PK
|
||||
int equipment_id FK
|
||||
date maintenance_date
|
||||
float cost
|
||||
string description
|
||||
datetime created_at
|
||||
datetime updated_at
|
||||
}
|
||||
SIMULATION_RESULT {
|
||||
int id PK
|
||||
int scenario_id FK
|
||||
json result_data
|
||||
datetime created_at
|
||||
datetime updated_at
|
||||
}
|
||||
PARAMETER {
|
||||
int id PK
|
||||
int scenario_id FK
|
||||
string name
|
||||
float value
|
||||
int distribution_id FK
|
||||
string distribution_type
|
||||
json distribution_parameters
|
||||
datetime created_at
|
||||
datetime updated_at
|
||||
}
|
||||
|
||||
```
|
||||
@@ -1,5 +0,0 @@
|
||||
# 09 — Architecture Decisions
|
||||
|
||||
Status: skeleton
|
||||
|
||||
Record important architectural decisions, their rationale, and alternatives considered.
|
||||
@@ -1,5 +0,0 @@
|
||||
# 10 — Quality Requirements
|
||||
|
||||
Status: skeleton
|
||||
|
||||
List non-functional requirements (performance, scalability, reliability, security) and measurable acceptance criteria.
|
||||
@@ -1,5 +0,0 @@
|
||||
# 11 — Technical Risks
|
||||
|
||||
Status: skeleton
|
||||
|
||||
Document potential technical risks, mitigation strategies, and monitoring suggestions.
|
||||
@@ -1,5 +0,0 @@
|
||||
# 12 — Glossary
|
||||
|
||||
Status: skeleton
|
||||
|
||||
Project glossary and definitions for domain-specific terms.
|
||||
@@ -1,85 +0,0 @@
|
||||
# 13 — UI, templates and styling
|
||||
|
||||
Status: migrated
|
||||
|
||||
This chapter collects UI integration notes, reusable template components, styling audit points and per-page UI data/actions.
|
||||
|
||||
## Reusable Template Components
|
||||
|
||||
To reduce duplication across form-centric pages, shared Jinja macros live in `templates/partials/components.html`.
|
||||
|
||||
- `select_field(...)`: renders labeled `<select>` controls with consistent placeholder handling and optional preselection. Existing JavaScript modules continue to target the generated IDs, so template calls must pass the same identifiers (`consumption-form-scenario`, etc.).
|
||||
- `feedback(...)` and `empty_state(...)`: wrap status messages in standard classes (`feedback`, `empty-state`) with optional `hidden` toggles so scripts can control visibility without reimplementing markup.
|
||||
- `table_container(...)`: provides a semantic wrapper and optional heading around tabular content; the `{% call %}` body supplies the `<thead>`, `<tbody>`, and `<tfoot>` elements while the macro applies the `table-container` class and manages hidden state.
|
||||
|
||||
Pages like `templates/consumption.html` and `templates/costs.html` already consume these helpers to keep markup aligned while preserving existing JavaScript selectors.
|
||||
|
||||
Import macros via:
|
||||
|
||||
```jinja
|
||||
{% from "partials/components.html" import select_field, feedback, table_container with context %}
|
||||
```
|
||||
|
||||
## Styling Audit Notes (2025-10-21)
|
||||
|
||||
- **Spacing**: Panels (`section.panel`) sometimes lack consistent vertical rhythm between headings, form grids, and tables. Extra top/bottom margin utilities would help align content.
|
||||
- **Typography**: Headings rely on browser defaults; font-size scale is uneven between `<h2>` and `<h3>`. Define explicit scale tokens (e.g., `--font-size-lg`) for predictable sizing.
|
||||
- **Forms**: `.form-grid` uses fixed column gaps that collapse on small screens; introduce responsive grid rules to stack gracefully below ~768px.
|
||||
- **Tables**: `.table-container` wrappers need overflow handling for narrow viewports; consider `overflow-x: auto` with padding adjustments.
|
||||
- **Feedback/Empty states**: Messages use default font weight and spacing; a utility class for margin/padding would ensure consistent separation from forms or tables.
|
||||
|
||||
## Per-page data & actions
|
||||
|
||||
Short reference of per-page APIs and primary actions used by templates and scripts.
|
||||
|
||||
- Scenarios (`templates/ScenarioForm.html`):
|
||||
|
||||
- Data: `GET /api/scenarios/`
|
||||
- Actions: `POST /api/scenarios/`
|
||||
|
||||
- Parameters (`templates/ParameterInput.html`):
|
||||
|
||||
- Data: `GET /api/scenarios/`, `GET /api/parameters/`
|
||||
- Actions: `POST /api/parameters/`
|
||||
|
||||
- Costs (`templates/costs.html`):
|
||||
|
||||
- Data: `GET /api/costs/capex`, `GET /api/costs/opex`
|
||||
- Actions: `POST /api/costs/capex`, `POST /api/costs/opex`
|
||||
|
||||
- Consumption (`templates/consumption.html`):
|
||||
|
||||
- Data: `GET /api/consumption/`
|
||||
- Actions: `POST /api/consumption/`
|
||||
|
||||
- Production (`templates/production.html`):
|
||||
|
||||
- Data: `GET /api/production/`
|
||||
- Actions: `POST /api/production/`
|
||||
|
||||
- Equipment (`templates/equipment.html`):
|
||||
|
||||
- Data: `GET /api/equipment/`
|
||||
- Actions: `POST /api/equipment/`
|
||||
|
||||
- Maintenance (`templates/maintenance.html`):
|
||||
|
||||
- Data: `GET /api/maintenance/` (pagination support)
|
||||
- Actions: `POST /api/maintenance/`, `PUT /api/maintenance/{id}`, `DELETE /api/maintenance/{id}`
|
||||
|
||||
- Simulations (`templates/simulations.html`):
|
||||
|
||||
- Data: `GET /api/scenarios/`, `GET /api/parameters/`
|
||||
- Actions: `POST /api/simulations/run`
|
||||
|
||||
- Reporting (`templates/reporting.html` and `templates/Dashboard.html`):
|
||||
- Data: `POST /api/reporting/summary` (accepts arrays of `{ "result": float }` objects)
|
||||
- Actions: Trigger summary refreshes and export/download actions.
|
||||
|
||||
## UI Template Audit (2025-10-20)
|
||||
|
||||
- Existing HTML templates: `ScenarioForm.html`, `ParameterInput.html`, and `Dashboard.html` (reporting summary view).
|
||||
- Coverage gaps remain for costs, consumption, production, equipment, maintenance, and simulation workflows—no dedicated templates yet.
|
||||
- Shared layout primitives (navigation/header/footer) are absent; current pages duplicate boilerplate markup.
|
||||
- Dashboard currently covers reporting metrics but should be wired to a central `/` route once the shared layout lands.
|
||||
- Next steps: introduce a `base.html`, refactor existing templates to extend it, and scaffold placeholder pages for the remaining features.
|
||||
@@ -1,118 +0,0 @@
|
||||
# 14 Testing, CI and Quality Assurance
|
||||
|
||||
This chapter centralizes the project's testing strategy, CI configuration, and quality targets.
|
||||
|
||||
## Overview
|
||||
|
||||
CalMiner uses a combination of unit, integration, and end-to-end tests to ensure quality.
|
||||
|
||||
### Frameworks
|
||||
|
||||
- Backend: pytest for unit and integration tests.
|
||||
- Frontend: pytest with Playwright for E2E tests.
|
||||
- Database: pytest fixtures with psycopg2 for DB tests.
|
||||
|
||||
### Test Types
|
||||
|
||||
- Unit Tests: Test individual functions/modules.
|
||||
- Integration Tests: Test API endpoints and DB interactions.
|
||||
- E2E Tests: Playwright for full user flows.
|
||||
|
||||
### CI/CD
|
||||
|
||||
- Use Gitea Actions for CI/CD; workflows live under `.gitea/workflows/`.
|
||||
- `test.yml` runs on every push, provisions a temporary Postgres 16 service, waits for readiness, executes the setup script in dry-run and live modes, installs Playwright browsers, and finally runs the full pytest suite.
|
||||
- `build-and-push.yml` builds the Docker image with `docker/build-push-action@v2`, reusing GitHub Actions cache-backed layers, and pushes to the Gitea registry.
|
||||
- `deploy.yml` connects to the target host (via `appleboy/ssh-action`) to pull the freshly pushed image and restart the container.
|
||||
- Mandatory secrets: `REGISTRY_USERNAME`, `REGISTRY_PASSWORD`, `REGISTRY_URL`, `SSH_HOST`, `SSH_USERNAME`, `SSH_PRIVATE_KEY`.
|
||||
- Run tests on pull requests to shared branches; enforce coverage target ≥80% (pytest-cov).
|
||||
|
||||
### Running Tests
|
||||
|
||||
- Unit: `pytest tests/unit/`
|
||||
- E2E: `pytest tests/e2e/`
|
||||
- All: `pytest`
|
||||
|
||||
### Test Directory Structure
|
||||
|
||||
Organize tests under the `tests/` directory mirroring the application structure:
|
||||
|
||||
````text
|
||||
tests/
|
||||
unit/
|
||||
test_<module>.py
|
||||
e2e/
|
||||
test_<flow>.py
|
||||
fixtures/
|
||||
conftest.py
|
||||
```python
|
||||
|
||||
### Fixtures and Test Data
|
||||
|
||||
- Define reusable fixtures in `tests/fixtures/conftest.py`.
|
||||
- Use temporary in-memory databases or isolated schemas for DB tests.
|
||||
- Load sample data via fixtures for consistent test environments.
|
||||
- Leverage the `seeded_ui_data` fixture in `tests/unit/conftest.py` to populate scenarios with related cost, maintenance, and simulation records for deterministic UI route checks.
|
||||
|
||||
### E2E (Playwright) Tests
|
||||
|
||||
The E2E test suite, located in `tests/e2e/`, uses Playwright to simulate user interactions in a live browser environment. These tests are designed to catch issues in the UI, frontend-backend integration, and overall application flow.
|
||||
|
||||
#### Fixtures
|
||||
|
||||
- `live_server`: A session-scoped fixture that launches the FastAPI application in a separate process, making it accessible to the browser.
|
||||
- `playwright_instance`, `browser`, `page`: Standard `pytest-playwright` fixtures for managing the Playwright instance, browser, and individual pages.
|
||||
|
||||
#### Smoke Tests
|
||||
|
||||
- UI Page Loading: `test_smoke.py` contains a parameterized test that systematically navigates to all UI routes to ensure they load without errors, have the correct title, and display a primary heading.
|
||||
- Form Submissions: Each major form in the application has a corresponding test file (e.g., `test_scenarios.py`, `test_costs.py`) that verifies: page loads, create item by filling the form, success message, and UI updates.
|
||||
|
||||
### Running E2E Tests
|
||||
|
||||
To run the Playwright tests:
|
||||
|
||||
```bash
|
||||
pytest tests/e2e/
|
||||
````
|
||||
|
||||
To run headed mode:
|
||||
|
||||
```bash
|
||||
pytest tests/e2e/ --headed
|
||||
```
|
||||
|
||||
### Mocking and Dependency Injection
|
||||
|
||||
- Use `unittest.mock` to mock external dependencies.
|
||||
- Inject dependencies via function parameters or FastAPI's dependency overrides in tests.
|
||||
|
||||
### Code Coverage
|
||||
|
||||
- Install `pytest-cov` to generate coverage reports.
|
||||
- Run with coverage: `pytest --cov --cov-report=term` (use `--cov-report=html` when visualizing hotspots).
|
||||
- Target 95%+ overall coverage. Focus on historically low modules: `services/simulation.py`, `services/reporting.py`, `middleware/validation.py`, and `routes/ui.py`.
|
||||
- Latest snapshot (2025-10-21): `pytest --cov=. --cov-report=term-missing` returns **91%** overall coverage.
|
||||
|
||||
### CI Integration
|
||||
|
||||
`test.yml` encapsulates the steps below:
|
||||
|
||||
- Check out the repository and set up Python 3.10.
|
||||
- Configure the runner's apt proxy (if available), install project dependencies (requirements + test extras), and download Playwright browsers.
|
||||
- Run `pytest` (extend with `--cov` flags when enforcing coverage).
|
||||
|
||||
> The pip cache step is temporarily disabled in `test.yml` until the self-hosted cache service is exposed (see `docs/ci-cache-troubleshooting.md`).
|
||||
|
||||
`build-and-push.yml` adds:
|
||||
|
||||
- Registry login using repository secrets.
|
||||
- Docker image build/push with GHA cache storage (`cache-from/cache-to` set to `type=gha`).
|
||||
|
||||
`deploy.yml` handles:
|
||||
|
||||
- SSH into the deployment host.
|
||||
- Pull the tagged image from the registry.
|
||||
- Stop, remove, and relaunch the `calminer` container exposing port 8000.
|
||||
|
||||
When adding new workflows, mirror this structure to ensure secrets, caching, and deployment steps remain aligned with the production environment.
|
||||
@@ -1,77 +0,0 @@
|
||||
# 15 Development Setup Guide
|
||||
|
||||
This document outlines the local development environment and steps to get the project running.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Python (version 3.10+)
|
||||
- PostgreSQL (version 13+)
|
||||
- Git
|
||||
|
||||
## Clone and Project Setup
|
||||
|
||||
````powershell
|
||||
# Clone the repository
|
||||
git clone https://git.allucanget.biz/allucanget/calminer.git
|
||||
cd calminer
|
||||
```python
|
||||
|
||||
## Virtual Environment
|
||||
|
||||
```powershell
|
||||
# Create and activate a virtual environment
|
||||
python -m venv .venv
|
||||
.\.venv\Scripts\Activate.ps1
|
||||
```python
|
||||
|
||||
## Install Dependencies
|
||||
|
||||
```powershell
|
||||
pip install -r requirements.txt
|
||||
```python
|
||||
|
||||
## Database Setup
|
||||
|
||||
1. Create database user:
|
||||
|
||||
```sql
|
||||
CREATE USER calminer_user WITH PASSWORD 'your_password';
|
||||
````
|
||||
|
||||
1. Create database:
|
||||
|
||||
````sql
|
||||
CREATE DATABASE calminer;
|
||||
```python
|
||||
|
||||
## Environment Variables
|
||||
|
||||
1. Copy `.env.example` to `.env` at project root.
|
||||
1. Edit `.env` to set database connection details:
|
||||
|
||||
```dotenv
|
||||
DATABASE_DRIVER=postgresql
|
||||
DATABASE_HOST=localhost
|
||||
DATABASE_PORT=5432
|
||||
DATABASE_USER=calminer_user
|
||||
DATABASE_PASSWORD=your_password
|
||||
DATABASE_NAME=calminer
|
||||
DATABASE_SCHEMA=public
|
||||
````
|
||||
|
||||
1. The application uses `python-dotenv` to load these variables. A legacy `DATABASE_URL` value is still accepted if the granular keys are omitted.
|
||||
|
||||
## Running the Application
|
||||
|
||||
````powershell
|
||||
# Start the FastAPI server
|
||||
uvicorn main:app --reload
|
||||
```python
|
||||
|
||||
## Testing
|
||||
|
||||
```powershell
|
||||
pytest
|
||||
````
|
||||
|
||||
E2E tests use Playwright and a session-scoped `live_server` fixture that starts the app at `http://localhost:8001` for browser-driven tests.
|
||||
@@ -1,26 +0,0 @@
|
||||
---
|
||||
title: "CalMiner Architecture Documentation"
|
||||
description: "arc42-based architecture documentation for the CalMiner project"
|
||||
---
|
||||
|
||||
# Architecture documentation (arc42 mapping)
|
||||
|
||||
This folder mirrors the arc42 chapter structure (adapted to Markdown).
|
||||
|
||||
## Files
|
||||
|
||||
- [01 Introduction and Goals](01_introduction_and_goals.md)
|
||||
- [02 Architecture Constraints](02_architecture_constraints.md)
|
||||
- [03 Context and Scope](03_context_and_scope.md)
|
||||
- [04 Solution Strategy](04_solution_strategy.md)
|
||||
- [05 Building Block View](05_building_block_view.md)
|
||||
- [06 Runtime View](06_runtime_view.md)
|
||||
- [07 Deployment View](07_deployment_view.md)
|
||||
- [08 Concepts](08_concepts.md)
|
||||
- [09 Architecture Decisions](09_architecture_decisions.md)
|
||||
- [10 Quality Requirements](10_quality_requirements.md)
|
||||
- [11 Technical Risks](11_technical_risks.md)
|
||||
- [12 Glossary](12_glossary.md)
|
||||
- [13 UI and Style](13_ui_and_style.md)
|
||||
- [14 Testing & CI](14_testing_ci.md)
|
||||
- [15 Development Setup](15_development_setup.md)
|
||||
@@ -1,27 +0,0 @@
|
||||
# CI Cache Troubleshooting
|
||||
|
||||
## Background
|
||||
|
||||
The test workflow (`.gitea/workflows/test.yml`) uses the `actions/cache` action to reuse the pip download cache located at `~/.cache/pip`. The cache key now hashes both `requirements.txt` and `requirements-test.txt` so the cache stays aligned with dependency changes.
|
||||
|
||||
## Current Observation
|
||||
|
||||
Recent CI runs report the following warning when the cache step executes:
|
||||
|
||||
```text
|
||||
::warning::Failed to restore: getCacheEntry failed: connect ETIMEDOUT 172.17.0.5:40181
|
||||
Cache not found for input keys: Linux-pip-<hash>, Linux-pip-
|
||||
```
|
||||
|
||||
The timeout indicates the runner cannot reach the cache backend rather than a normal cache miss.
|
||||
|
||||
## Recommended Follow-Up
|
||||
|
||||
- Confirm that the Actions cache service is enabled for the CI environment (Gitea runners require the cache server URL to be provided via `ACTIONS_CACHE_URL` and `ACTIONS_RUNTIME_URL`).
|
||||
- Verify network connectivity from the runner to the cache service endpoint and ensure required ports are open.
|
||||
- After connectivity is restored, rerun the workflow to allow the cache to be populated and confirm subsequent runs restore the cache without warnings.
|
||||
|
||||
## Interim Guidance
|
||||
|
||||
- The workflow will proceed without cached dependencies, but package installs may take longer.
|
||||
- Keep the cache step in place so it begins working automatically once the infrastructure is configured.
|
||||
@@ -1,31 +0,0 @@
|
||||
# Setup Script Idempotency Audit (2025-10-25)
|
||||
|
||||
This note captures the current evaluation of idempotent behaviour for `scripts/setup_database.py` and outlines follow-up actions.
|
||||
|
||||
## Admin Tasks
|
||||
|
||||
- **ensure_database**: guarded by `SELECT 1 FROM pg_database`; re-runs safely. Failure mode: network issues or lack of privileges surface as psycopg2 errors without additional context.
|
||||
- **ensure_role**: checks `pg_roles`, creates role if missing, reapplies grants each time. Subsequent runs execute grants again but PostgreSQL tolerates repeated grants.
|
||||
- **ensure_schema**: uses `information_schema` guard and respects `--dry-run`; idempotent when schema is `public` or already present.
|
||||
|
||||
## Application Tasks
|
||||
|
||||
- **initialize_schema**: relies on SQLAlchemy `create_all(checkfirst=True)`; repeatable. Dry-run output remains descriptive.
|
||||
- **run_migrations**: new baseline workflow applies `000_base.sql` once and records legacy scripts as applied. Subsequent runs detect the baseline in `schema_migrations` and skip reapplication.
|
||||
|
||||
## Seeding
|
||||
|
||||
- `seed_baseline_data` seeds currencies and measurement units with upsert logic. Verification now raises on missing data, preventing silent failures.
|
||||
- Running `--seed-data` repeatedly performs `ON CONFLICT` updates, making the operation safe.
|
||||
|
||||
## Outstanding Risks
|
||||
|
||||
1. Baseline migration relies on legacy files being present when first executed; if removed beforehand, old entries are never marked. (Low risk given repository state.)
|
||||
2. `ensure_database` and `ensure_role` do not wrap SQL execution errors with additional context beyond psycopg2 messages.
|
||||
3. Baseline verification assumes migrations and seeding run in the same process; manual runs of `scripts/seed_data.py` without the baseline could still fail.
|
||||
|
||||
## Recommended Actions
|
||||
|
||||
- Add regression tests ensuring repeated executions of key CLI paths (`--run-migrations`, `--seed-data`) result in no-op behaviour after the first run.
|
||||
- Extend logging/error handling for admin operations to provide clearer messages on repeated failures.
|
||||
- Consider a preflight check when migrations directory lacks legacy files but baseline is pending, warning about potential drift.
|
||||
@@ -1,29 +0,0 @@
|
||||
# Setup Script Logging Audit (2025-10-25)
|
||||
|
||||
The following observations capture current logging behaviour in `scripts/setup_database.py` and highlight areas requiring improved error handling and messaging.
|
||||
|
||||
## Connection Validation
|
||||
|
||||
- `validate_admin_connection` and `validate_application_connection` log entry/exit messages and raise `RuntimeError` with context if connection fails. This coverage is sufficient.
|
||||
- `ensure_database` logs creation states but does not surface connection or SQL exceptions beyond the initial connection acquisition. When the inner `cursor.execute` calls fail, the exceptions bubble without contextual logging.
|
||||
|
||||
## Migration Runner
|
||||
|
||||
- Lists pending migrations and logs each application attempt.
|
||||
- When the baseline is pending, the script logs whether it is a dry-run or live application and records legacy file marking. However, if `_apply_migration_file` raises an exception, the caller re-raises after logging the failure; there is no wrapping message guiding users toward manual cleanup.
|
||||
- Legacy migration marking happens silently (just info logs). Failures during the insert into `schema_migrations` would currently propagate without added guidance.
|
||||
|
||||
## Seeding Workflow
|
||||
|
||||
- `seed_baseline_data` announces each seeding phase and skips verification in dry-run mode with a log breadcrumb.
|
||||
- `_verify_seeded_data` warns about missing currencies/units and inactive defaults but does **not** raise errors, meaning CI can pass while the database is incomplete. There is no explicit log when verification succeeds.
|
||||
- `_seed_units` logs when the `measurement_unit` table is missing, which is helpful, but the warning is the only feedback; no exception is raised.
|
||||
|
||||
## Suggested Enhancements
|
||||
|
||||
1. Wrap baseline application and legacy marking in `try/except` blocks that log actionable remediation steps before re-raising.
|
||||
2. Promote seed verification failures (missing or inactive records) to exceptions so automated workflows fail fast; add success logs for clarity.
|
||||
3. Add contextual logging around currency/measurement-unit insert failures, particularly around `execute_values` calls, to aid debugging malformed data.
|
||||
4. Introduce structured logging (log codes or phases) for major steps (`CONNECT`, `MIGRATE`, `SEED`, `VERIFY`) to make scanning log files easier.
|
||||
|
||||
These findings inform the remaining TODO subtasks for enhanced error handling.
|
||||
@@ -1,53 +0,0 @@
|
||||
# Consolidated Migration Baseline Plan
|
||||
|
||||
This note outlines the content and structure of the planned baseline migration (`scripts/migrations/000_base.sql`). The objective is to capture the currently required schema changes in a single idempotent script so that fresh environments only need to apply one SQL file before proceeding with incremental migrations.
|
||||
|
||||
## Guiding Principles
|
||||
|
||||
1. **Idempotent DDL**: Every `CREATE` or `ALTER` statement must tolerate repeated execution. Use `IF NOT EXISTS` guards or existence checks (`information_schema`) where necessary.
|
||||
2. **Order of Operations**: Create reference tables first, then update dependent tables, finally enforce foreign keys and constraints.
|
||||
3. **Data Safety**: Default data seeded by migrations should be minimal and in ASCII-only form to avoid encoding issues in various shells and CI logs.
|
||||
4. **Compatibility**: The baseline must reflect the schema shape expected by the current SQLAlchemy models, API routes, and seeding scripts.
|
||||
|
||||
## Schema Elements to Include
|
||||
|
||||
### 1. `currency` Table
|
||||
|
||||
- Columns: `id SERIAL PRIMARY KEY`, `code VARCHAR(3) UNIQUE NOT NULL`, `name VARCHAR(128) NOT NULL`, `symbol VARCHAR(8)`, `is_active BOOLEAN NOT NULL DEFAULT TRUE`.
|
||||
- Index: implicit via unique constraint on `code`.
|
||||
- Seed rows matching `scripts.seed_data.CURRENCY_SEEDS` (ASCII-only symbols such as `USD$`, `CAD$`).
|
||||
- Upsert logic using `ON CONFLICT (code) DO UPDATE` to keep names/symbols in sync when rerun.
|
||||
|
||||
### 2. Currency Integration for CAPEX/OPEX
|
||||
|
||||
- Add `currency_id INTEGER` columns with `IF NOT EXISTS` guards.
|
||||
- Populate `currency_id` from legacy `currency_code` if the column exists.
|
||||
- Default null `currency_id` values to the USD row, then `ALTER` to `SET NOT NULL`.
|
||||
- Create `fk_capex_currency` and `fk_opex_currency` constraints with `ON DELETE RESTRICT` semantics.
|
||||
- Drop legacy `currency_code` column if it exists (safe because new column holds data).
|
||||
|
||||
### 3. Measurement Metadata on Consumption/Production
|
||||
|
||||
- Ensure `consumption` and `production_output` tables have `unit_name VARCHAR(64)` and `unit_symbol VARCHAR(16)` columns with `IF NOT EXISTS` guards.
|
||||
|
||||
### 4. `measurement_unit` Reference Table
|
||||
|
||||
- Columns: `id SERIAL PRIMARY KEY`, `code VARCHAR(64) UNIQUE NOT NULL`, `name VARCHAR(128) NOT NULL`, `symbol VARCHAR(16)`, `unit_type VARCHAR(32) NOT NULL`, `is_active BOOLEAN NOT NULL DEFAULT TRUE`, `created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()`, `updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()`.
|
||||
- Assume a simple trigger to maintain `updated_at` is deferred: automate via application layer later; for now, omit trigger.
|
||||
- Seed rows matching `MEASUREMENT_UNIT_SEEDS` (ASCII names/symbols). Use `ON CONFLICT (code) DO UPDATE` to keep descriptive fields aligned.
|
||||
|
||||
### 5. Transaction Handling
|
||||
|
||||
- Wrap the main operations in a single `BEGIN; ... COMMIT;` block.
|
||||
- Use subtransactions (`DO $$ ... $$;`) only where conditional logic is required (e.g., checking column existence before backfill).
|
||||
|
||||
## Migration Tracking Alignment
|
||||
|
||||
- Baseline file will be named `000_base.sql`. After execution, insert a row into `schema_migrations` with filename `000_base.sql` to keep the tracking table aligned.
|
||||
- Existing migrations (`20251021_add_currency_and_unit_fields.sql`, `20251022_create_currency_table_and_fks.sql`) remain for historical reference but will no longer be applied to new environments once the baseline is present.
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. Draft `000_base.sql` reflecting the steps above.
|
||||
2. Update `run_migrations` to recognise the baseline file and mark older migrations as applied when the baseline exists.
|
||||
3. Provide documentation in `docs/quickstart.md` explaining how to reset an environment using the baseline plus seeds.
|
||||
@@ -1,253 +0,0 @@
|
||||
# Quickstart & Expanded Project Documentation
|
||||
|
||||
This document contains the expanded development, usage, testing, and migration guidance moved out of the top-level README for brevity.
|
||||
|
||||
## Development
|
||||
|
||||
To get started locally:
|
||||
|
||||
```powershell
|
||||
# Clone the repository
|
||||
git clone https://git.allucanget.biz/allucanget/calminer.git
|
||||
cd calminer
|
||||
|
||||
# Create and activate a virtual environment
|
||||
python -m venv .venv
|
||||
.\.venv\Scripts\Activate.ps1
|
||||
|
||||
# Install dependencies
|
||||
pip install -r requirements.txt
|
||||
|
||||
# Start the development server
|
||||
uvicorn main:app --reload
|
||||
```
|
||||
|
||||
## Docker-based setup
|
||||
|
||||
To build and run the application using Docker instead of a local Python environment:
|
||||
|
||||
```powershell
|
||||
# Build the application image (multi-stage build keeps runtime small)
|
||||
docker build -t calminer:latest .
|
||||
|
||||
# Start the container on port 8000
|
||||
docker run --rm -p 8000:8000 calminer:latest
|
||||
|
||||
# Supply environment variables (e.g., Postgres connection)
|
||||
docker run --rm -p 8000:8000 ^
|
||||
-e DATABASE_DRIVER="postgresql" ^
|
||||
-e DATABASE_HOST="db.host" ^
|
||||
-e DATABASE_PORT="5432" ^
|
||||
-e DATABASE_USER="calminer" ^
|
||||
-e DATABASE_PASSWORD="s3cret" ^
|
||||
-e DATABASE_NAME="calminer" ^
|
||||
-e DATABASE_SCHEMA="public" ^
|
||||
calminer:latest
|
||||
```
|
||||
|
||||
If you maintain a Postgres or Redis dependency locally, consider authoring a `docker compose` stack that pairs them with the app container. The Docker image expects the database to be reachable and migrations executed before serving traffic.
|
||||
|
||||
## Usage Overview
|
||||
|
||||
- **API base URL**: `http://localhost:8000/api`
|
||||
- Key routes include creating scenarios, parameters, costs, consumption, production, equipment, maintenance, and reporting summaries. See the `routes/` directory for full details.
|
||||
|
||||
### Theme configuration
|
||||
|
||||
- Open `/ui/settings` to access the Settings dashboard. The **Theme Colors** form lists every CSS variable persisted in the `application_setting` table. Updates apply immediately across the UI once saved.
|
||||
- Use the accompanying API endpoints for automation or integration tests:
|
||||
- `GET /api/settings/css` returns the active variables, defaults, and metadata describing any environment overrides.
|
||||
- `PUT /api/settings/css` accepts a payload such as `{"variables": {"--color-primary": "#112233"}}` and persists the change unless an environment override is in place.
|
||||
- Environment variables prefixed with `CALMINER_THEME_` win over database values. For example, setting `CALMINER_THEME_COLOR_PRIMARY="#112233"` renders the corresponding input read-only and surfaces the override in the Environment Overrides table.
|
||||
- Acceptable values include hex (`#rrggbb` or `#rrggbbaa`), `rgb()/rgba()`, and `hsl()/hsla()` expressions with the expected number of components. Invalid inputs trigger a validation error and the API responds with HTTP 422.
|
||||
|
||||
## Dashboard Preview
|
||||
|
||||
1. Start the FastAPI server and navigate to `/`.
|
||||
2. Review the headline metrics, scenario snapshot table, and cost/activity charts sourced from the current database state.
|
||||
3. Use the "Refresh Dashboard" button to pull freshly aggregated data via `/ui/dashboard/data` without reloading the page.
|
||||
|
||||
## Testing
|
||||
|
||||
Run the unit test suite:
|
||||
|
||||
```powershell
|
||||
pytest
|
||||
```
|
||||
|
||||
E2E tests use Playwright and a session-scoped `live_server` fixture that starts the app at `http://localhost:8001` for browser-driven tests.
|
||||
|
||||
## Migrations & Baseline
|
||||
|
||||
A consolidated baseline migration (`scripts/migrations/000_base.sql`) captures all schema changes required for a fresh installation. The script is idempotent: it creates the `currency` and `measurement_unit` reference tables, provisions the `application_setting` store for configurable UI/system options, ensures consumption and production records expose unit metadata, and enforces the foreign keys used by CAPEX and OPEX.
|
||||
|
||||
Configure granular database settings in your PowerShell session before running migrations:
|
||||
|
||||
```powershell
|
||||
$env:DATABASE_DRIVER = 'postgresql'
|
||||
$env:DATABASE_HOST = 'localhost'
|
||||
$env:DATABASE_PORT = '5432'
|
||||
$env:DATABASE_USER = 'calminer'
|
||||
$env:DATABASE_PASSWORD = 's3cret'
|
||||
$env:DATABASE_NAME = 'calminer'
|
||||
$env:DATABASE_SCHEMA = 'public'
|
||||
python scripts/setup_database.py --run-migrations --seed-data --dry-run
|
||||
python scripts/setup_database.py --run-migrations --seed-data
|
||||
```
|
||||
|
||||
The dry-run invocation reports which steps would execute without making changes. The live run applies the baseline (if not already recorded in `schema_migrations`) and seeds the reference data relied upon by the UI and API.
|
||||
|
||||
> ℹ️ When `--seed-data` is supplied without `--run-migrations`, the bootstrap script automatically applies any pending SQL migrations first so the `application_setting` table (and future settings-backed features) are present before seeding.
|
||||
|
||||
> ℹ️ The application still accepts `DATABASE_URL` as a fallback if the granular variables are not set.
|
||||
|
||||
## Database bootstrap workflow
|
||||
|
||||
Provision or refresh a database instance with `scripts/setup_database.py`. Populate the required environment variables (an example lives at `config/setup_test.env.example`) and run:
|
||||
|
||||
```powershell
|
||||
# Load test credentials (PowerShell)
|
||||
Get-Content .\config\setup_test.env.example |
|
||||
ForEach-Object {
|
||||
if ($_ -and -not $_.StartsWith('#')) {
|
||||
$name, $value = $_ -split '=', 2
|
||||
Set-Item -Path Env:$name -Value $value
|
||||
}
|
||||
}
|
||||
|
||||
# Dry-run to inspect the planned actions
|
||||
python scripts/setup_database.py --ensure-database --ensure-role --ensure-schema --initialize-schema --run-migrations --seed-data --dry-run -v
|
||||
|
||||
# Execute the full workflow
|
||||
python scripts/setup_database.py --ensure-database --ensure-role --ensure-schema --initialize-schema --run-migrations --seed-data -v
|
||||
```
|
||||
|
||||
Typical log output confirms:
|
||||
|
||||
- Admin and application connections succeed for the supplied credentials.
|
||||
- Database and role creation are idempotent (`already present` when rerun).
|
||||
- SQLAlchemy metadata either reports missing tables or `All tables already exist`.
|
||||
- Migrations list pending files and finish with `Applied N migrations` (a new database reports `Applied 1 migrations` for `000_base.sql`).
|
||||
|
||||
After a successful run the target database contains all application tables plus `schema_migrations`, and that table records each applied migration file. New installations only record `000_base.sql`; upgraded environments retain historical entries alongside the baseline.
|
||||
|
||||
### Local Postgres via Docker Compose
|
||||
|
||||
For local validation without installing Postgres directly, use the provided compose file:
|
||||
|
||||
```powershell
|
||||
docker compose -f docker-compose.postgres.yml up -d
|
||||
```
|
||||
|
||||
#### Summary
|
||||
|
||||
1. Start the Postgres container with `docker compose -f docker-compose.postgres.yml up -d`.
|
||||
2. Export the granular database environment variables (host `127.0.0.1`, port `5433`, database `calminer_local`, user/password `calminer`/`secret`).
|
||||
3. Run the setup script twice: first with `--dry-run` to preview actions, then without it to apply changes.
|
||||
4. When finished, stop and optionally remove the container/volume using `docker compose -f docker-compose.postgres.yml down`.
|
||||
|
||||
The service exposes Postgres 16 on `localhost:5433` with database `calminer_local` and role `calminer`/`secret`. When the container is running, set the granular environment variables before invoking the setup script:
|
||||
|
||||
```powershell
|
||||
$env:DATABASE_DRIVER = 'postgresql'
|
||||
$env:DATABASE_HOST = '127.0.0.1'
|
||||
$env:DATABASE_PORT = '5433'
|
||||
$env:DATABASE_USER = 'calminer'
|
||||
$env:DATABASE_PASSWORD = 'secret'
|
||||
$env:DATABASE_NAME = 'calminer_local'
|
||||
$env:DATABASE_SCHEMA = 'public'
|
||||
|
||||
python scripts/setup_database.py --ensure-database --ensure-role --ensure-schema --initialize-schema --run-migrations --seed-data --dry-run -v
|
||||
python scripts/setup_database.py --ensure-database --ensure-role --ensure-schema --initialize-schema --run-migrations --seed-data -v
|
||||
```
|
||||
|
||||
When testing is complete, shut down the container (and optional persistent volume) with:
|
||||
|
||||
```powershell
|
||||
docker compose -f docker-compose.postgres.yml down
|
||||
docker volume rm calminer_postgres_local_postgres_data # optional cleanup
|
||||
```
|
||||
|
||||
Document successful runs (or issues encountered) in `.github/instructions/DONE.TODO.md` for future reference.
|
||||
|
||||
### Seeding reference data
|
||||
|
||||
`scripts/seed_data.py` provides targeted control over the baseline datasets when the full setup script is not required:
|
||||
|
||||
```powershell
|
||||
python scripts/seed_data.py --currencies --units --dry-run
|
||||
python scripts/seed_data.py --currencies --units
|
||||
```
|
||||
|
||||
The seeder upserts the canonical currency catalog (`USD`, `EUR`, `CLP`, `RMB`, `GBP`, `CAD`, `AUD`) using ASCII-safe symbols (`USD$`, `EUR`, etc.) and the measurement units referenced by the UI (`tonnes`, `kilograms`, `pounds`, `liters`, `cubic_meters`, `kilowatt_hours`). The setup script invokes the same seeder when `--seed-data` is provided and verifies the expected rows afterward, warning if any are missing or inactive.
|
||||
|
||||
### Rollback guidance
|
||||
|
||||
`scripts/setup_database.py` now tracks compensating actions when it creates the database or application role. If a later step fails, the script replays those rollback actions (dropping the newly created database or role and revoking grants) before exiting. Dry runs never register rollback steps and remain read-only.
|
||||
|
||||
If the script reports that some rollback steps could not complete—for example because a connection cannot be established—rerun the script with `--dry-run` to confirm the desired end state and then apply the outstanding cleanup manually:
|
||||
|
||||
```powershell
|
||||
python scripts/setup_database.py --ensure-database --ensure-role --dry-run -v
|
||||
|
||||
# Manual cleanup examples when automation cannot connect
|
||||
psql -d postgres -c "DROP DATABASE IF EXISTS calminer"
|
||||
psql -d postgres -c "DROP ROLE IF EXISTS calminer"
|
||||
```
|
||||
|
||||
After a failure and rollback, rerun the full setup once the environment issues are resolved.
|
||||
|
||||
### CI pipeline environment
|
||||
|
||||
The `.gitea/workflows/test.yml` job spins up a temporary PostgreSQL 16 container and runs the setup script twice: once with `--dry-run` to validate the plan and again without it to apply migrations and seeds. No external secrets are required; the workflow sets the following environment variables for both invocations and for pytest:
|
||||
|
||||
| Variable | Value | Purpose |
|
||||
| --- | --- | --- |
|
||||
| `DATABASE_DRIVER` | `postgresql` | Signals the driver to the setup script |
|
||||
| `DATABASE_HOST` | `postgres` | Hostname of the Postgres job service container |
|
||||
| `DATABASE_PORT` | `5432` | Default service port |
|
||||
| `DATABASE_NAME` | `calminer_ci` | Target database created by the workflow |
|
||||
| `DATABASE_USER` | `calminer` | Application role used during tests |
|
||||
| `DATABASE_PASSWORD` | `secret` | Password for both admin and app role |
|
||||
| `DATABASE_SCHEMA` | `public` | Default schema for the tests |
|
||||
| `DATABASE_SUPERUSER` | `calminer` | Setup script uses the same role for admin actions |
|
||||
| `DATABASE_SUPERUSER_PASSWORD` | `secret` | Matches the Postgres service password |
|
||||
| `DATABASE_SUPERUSER_DB` | `calminer_ci` | Database to connect to for admin operations |
|
||||
|
||||
The workflow also updates `DATABASE_URL` for pytest to point at the CI Postgres instance. Existing tests continue to work unchanged, since SQLAlchemy reads the URL exactly as it does locally.
|
||||
|
||||
Because the workflow provisions everything inline, no repository or organization secrets need to be configured for basic CI runs. If you later move the setup step to staging or production pipelines, replace these inline values with secrets managed by the CI platform. When running on self-hosted runners behind an HTTP proxy or apt cache, ensure Playwright dependencies and OS packages inherit the same proxy settings that the workflow configures prior to installing browsers.
|
||||
|
||||
### Staging environment workflow
|
||||
|
||||
Use the staging checklist in `docs/staging_environment_setup.md` when running the setup script against the shared environment. A sample variable file (`config/setup_staging.env`) records the expected inputs (host, port, admin/application roles); copy it outside the repository or load the values securely via your shell before executing the workflow.
|
||||
|
||||
Recommended execution order:
|
||||
|
||||
1. Dry run with `--dry-run -v` to confirm connectivity and review planned operations. Capture the output to `reports/setup_staging_dry_run.log` (or similar) for auditing.
|
||||
2. Execute the live run with the same flags minus `--dry-run` to provision the database, role grants, migrations, and seed data. Save the log as `reports/setup_staging_apply.log`.
|
||||
3. Repeat the dry run to verify idempotency and record the result (for example `reports/setup_staging_post_apply.log`).
|
||||
|
||||
Record any issues in `.github/instructions/TODO.md` or `.github/instructions/DONE.TODO.md` as appropriate so the team can track follow-up actions.
|
||||
|
||||
## Database Objects
|
||||
|
||||
The database contains tables such as `capex`, `opex`, `chemical_consumption`, `fuel_consumption`, `water_consumption`, `scrap_consumption`, `production_output`, `equipment_operation`, `ore_batch`, `exchange_rate`, and `simulation_result`.
|
||||
|
||||
## Current implementation status (2025-10-21)
|
||||
|
||||
- Currency normalization: a `currency` table and backfill scripts exist; routes accept `currency_id` and `currency_code` for compatibility.
|
||||
- Simulation engine: scaffolding in `services/simulation.py` and `/api/simulations/run` return in-memory results; persistence to `models/simulation_result` is planned.
|
||||
- Reporting: `services/reporting.py` provides summary statistics used by `POST /api/reporting/summary`.
|
||||
- Tests & coverage: unit and E2E suites exist; recent local coverage is >90%.
|
||||
- Remaining work: authentication, persist simulation runs, CI/CD and containerization.
|
||||
|
||||
## Where to look next
|
||||
|
||||
- Architecture overview & chapters: [architecture](architecture/README.md) (per-chapter files under `docs/architecture/`)
|
||||
- [Testing & CI](architecture/14_testing_ci.md)
|
||||
- [Development setup](architecture/15_development_setup.md)
|
||||
- Implementation plan & roadmap: [Solution strategy](architecture/04_solution_strategy.md)
|
||||
- Routes: [routes](../routes/)
|
||||
- Services: [services](../services/)
|
||||
- Scripts: [scripts](../scripts/) (migrations and backfills)
|
||||
@@ -1,78 +0,0 @@
|
||||
# Baseline Seed Data Plan
|
||||
|
||||
This document captures the datasets that should be present in a fresh CalMiner installation and the structure required to manage them through `scripts/seed_data.py`.
|
||||
|
||||
## Currency Catalog
|
||||
|
||||
The `currency` table already exists and is seeded today via `scripts/seed_data.py`. The goal is to keep the canonical list in one place and ensure the default currency (USD) is always active.
|
||||
|
||||
| Code | Name | Symbol | Notes |
|
||||
| ---- | ------------------- | ------ | ---------------------------------------- |
|
||||
| USD | US Dollar | $ | Default currency (`DEFAULT_CURRENCY_CODE`) |
|
||||
| EUR | Euro | EUR symbol | |
|
||||
| CLP | Chilean Peso | $ | |
|
||||
| RMB | Chinese Yuan | RMB symbol | |
|
||||
| GBP | British Pound | GBP symbol | |
|
||||
| CAD | Canadian Dollar | $ | |
|
||||
| AUD | Australian Dollar | $ | |
|
||||
|
||||
Seeding behaviour:
|
||||
|
||||
- Upsert by ISO code; keep existing name/symbol when updated manually.
|
||||
- Ensure `is_active` remains true for USD and defaults to true for new rows.
|
||||
- Defer to runtime validation in `routes.currencies` for enforcing default behaviour.
|
||||
|
||||
## Measurement Units
|
||||
|
||||
UI routes (`routes/ui.py`) currently rely on the in-memory `MEASUREMENT_UNITS` list to populate dropdowns for consumption and production forms. To make this configurable and available to the API, introduce a dedicated `measurement_unit` table and seed it.
|
||||
|
||||
Proposed schema:
|
||||
|
||||
| Column | Type | Notes |
|
||||
| ------------- | -------------- | ------------------------------------ |
|
||||
| id | SERIAL / BIGINT | Primary key. |
|
||||
| code | TEXT | Stable slug (e.g. `tonnes`). Unique. |
|
||||
| name | TEXT | Display label. |
|
||||
| symbol | TEXT | Short symbol (nullable). |
|
||||
| unit_type | TEXT | Category (`mass`, `volume`, `energy`).|
|
||||
| is_active | BOOLEAN | Default `true` for soft disabling. |
|
||||
| created_at | TIMESTAMP | Optional `NOW()` default. |
|
||||
| updated_at | TIMESTAMP | Optional `NOW()` trigger/default. |
|
||||
|
||||
Initial seed set (mirrors existing UI list plus type categorisation):
|
||||
|
||||
| Code | Name | Symbol | Unit Type |
|
||||
| --------------- | ---------------- | ------ | --------- |
|
||||
| tonnes | Tonnes | t | mass |
|
||||
| kilograms | Kilograms | kg | mass |
|
||||
| pounds | Pounds | lb | mass |
|
||||
| liters | Liters | L | volume |
|
||||
| cubic_meters | Cubic Meters | m3 | volume |
|
||||
| kilowatt_hours | Kilowatt Hours | kWh | energy |
|
||||
|
||||
Seeding behaviour:
|
||||
|
||||
- Upsert rows by `code`.
|
||||
- Preserve `unit_type` and `symbol` unless explicitly changed via administration tooling.
|
||||
- Continue surfacing unit options to the UI by querying this table instead of the static constant.
|
||||
|
||||
## Default Settings
|
||||
|
||||
The application expects certain defaults to exist:
|
||||
|
||||
- **Default currency**: enforced by `routes.currencies._ensure_default_currency`; ensure seeds keep USD active.
|
||||
- **Fallback measurement unit**: UI currently auto-selects the first option in the list. Once units move to the database, expose an application setting to choose a fallback (future work tracked under "Application Settings management").
|
||||
|
||||
## Seeding Structure Updates
|
||||
|
||||
To support the datasets above:
|
||||
|
||||
1. Extend `scripts/seed_data.py` with a `SeedDataset` registry so each dataset (currencies, units, future defaults) can declare its loader/upsert function and optional dependencies.
|
||||
2. Add a `--dataset` CLI selector for targeted seeding while keeping `--all` as the default for `setup_database.py` integrations.
|
||||
3. Update `scripts/setup_database.py` to:
|
||||
- Run migration ensuring `measurement_unit` table exists.
|
||||
- Execute the unit seeder after currencies when `--seed-data` is supplied.
|
||||
- Verify post-seed counts, logging which dataset was inserted/updated.
|
||||
4. Adjust UI routes to load measurement units from the database and remove the hard-coded list once the table is available.
|
||||
|
||||
This plan aligns with the TODO item for seeding initial data and lays the groundwork for consolidating migrations around a single baseline file that introduces both the schema and seed data in an idempotent manner.
|
||||
@@ -1,101 +0,0 @@
|
||||
# Staging Environment Setup
|
||||
|
||||
This guide outlines how to provision and validate the CalMiner staging database using `scripts/setup_database.py`. It complements the local and CI-focused instructions in `docs/quickstart.md`.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Network access to the staging infrastructure (VPN or bastion, as required by ops).
|
||||
- Provisioned PostgreSQL instance with superuser or delegated admin credentials for maintenance.
|
||||
- Application credentials (role + password) dedicated to CalMiner staging.
|
||||
- The application repository checked out with Python dependencies installed (`pip install -r requirements.txt`).
|
||||
- Optional but recommended: a writable directory (for example `reports/`) to capture setup logs.
|
||||
|
||||
> Replace the placeholder values in the examples below with the actual host, port, and credential details supplied by ops.
|
||||
|
||||
## Environment Configuration
|
||||
|
||||
Populate the following environment variables before invoking the setup script. Store them in a secure location such as `config/setup_staging.env` (excluded from source control) and load them with `dotenv` or your shell profile.
|
||||
|
||||
| Variable | Description |
|
||||
| --- | --- |
|
||||
| `DATABASE_HOST` | Staging PostgreSQL hostname or IP (for example `staging-db.internal`). |
|
||||
| `DATABASE_PORT` | Port exposed by the staging PostgreSQL service (default `5432`). |
|
||||
| `DATABASE_NAME` | CalMiner staging database name (for example `calminer_staging`). |
|
||||
| `DATABASE_USER` | Application role used by the FastAPI app (for example `calminer_app`). |
|
||||
| `DATABASE_PASSWORD` | Password for the application role. |
|
||||
| `DATABASE_SCHEMA` | Optional non-public schema; omit or set to `public` otherwise. |
|
||||
| `DATABASE_SUPERUSER` | Administrative role with rights to create roles/databases (for example `calminer_admin`). |
|
||||
| `DATABASE_SUPERUSER_PASSWORD` | Password for the administrative role. |
|
||||
| `DATABASE_SUPERUSER_DB` | Database to connect to for admin tasks (default `postgres`). |
|
||||
| `DATABASE_ADMIN_URL` | Optional DSN that overrides the granular admin settings above. |
|
||||
|
||||
You may also set `DATABASE_URL` for application runtime convenience, but the setup script only requires the values listed in the table.
|
||||
|
||||
### Loading Variables (PowerShell example)
|
||||
|
||||
```powershell
|
||||
$env:DATABASE_HOST = "staging-db.internal"
|
||||
$env:DATABASE_PORT = "5432"
|
||||
$env:DATABASE_NAME = "calminer_staging"
|
||||
$env:DATABASE_USER = "calminer_app"
|
||||
$env:DATABASE_PASSWORD = "<app-password>"
|
||||
$env:DATABASE_SUPERUSER = "calminer_admin"
|
||||
$env:DATABASE_SUPERUSER_PASSWORD = "<admin-password>"
|
||||
$env:DATABASE_SUPERUSER_DB = "postgres"
|
||||
```
|
||||
|
||||
For bash shells, export the same variables using `export VARIABLE=value` or load them through `dotenv`.
|
||||
|
||||
## Setup Workflow
|
||||
|
||||
Run the setup script in three phases to validate idempotency and capture diagnostics:
|
||||
|
||||
1. **Dry run (diagnostic):**
|
||||
|
||||
```powershell
|
||||
python scripts/setup_database.py --ensure-database --ensure-role --ensure-schema --initialize-schema --run-migrations --seed-data --dry-run -v `
|
||||
2>&1 | Tee-Object -FilePath reports/setup_staging_dry_run.log
|
||||
```
|
||||
|
||||
Confirm that the script reports planned actions without failures. If the application role is missing, a dry run will log skip messages until a live run creates the role.
|
||||
|
||||
2. **Apply changes:**
|
||||
|
||||
```powershell
|
||||
python scripts/setup_database.py --ensure-database --ensure-role --ensure-schema --initialize-schema --run-migrations --seed-data -v `
|
||||
2>&1 | Tee-Object -FilePath reports/setup_staging_apply.log
|
||||
```
|
||||
|
||||
Verify the log for successful database creation, role grants, migration execution, and seed verification.
|
||||
|
||||
3. **Post-apply dry run:**
|
||||
|
||||
```powershell
|
||||
python scripts/setup_database.py --ensure-database --ensure-role --ensure-schema --initialize-schema --run-migrations --seed-data --dry-run -v `
|
||||
2>&1 | Tee-Object -FilePath reports/setup_staging_post_apply.log
|
||||
```
|
||||
|
||||
This run should confirm that all schema objects, migrations, and seed data are already in place.
|
||||
|
||||
## Validation Checklist
|
||||
|
||||
- [ ] Confirm the staging application can connect using the application DSN (for example, run `pytest tests/e2e/test_smoke.py` against staging or trigger a smoke test workflow).
|
||||
- [ ] Inspect `schema_migrations` to ensure the baseline migration (`000_base.sql`) is recorded.
|
||||
- [ ] Spot-check seeded reference data (`currency`, `measurement_unit`) for correctness.
|
||||
- [ ] Capture and archive the three setup logs in a shared location for audit purposes.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
- If the dry run reports skipped actions because the application role does not exist, proceed with the live run; subsequent dry runs will validate as expected.
|
||||
- Connection errors usually stem from network restrictions or incorrect credentials. Validate reachability with `psql` or `pg_isready` using the same host/port and credentials.
|
||||
- For permission issues during migrations or seeding, confirm the admin role has rights on the target database and that the application role inherits the expected privileges.
|
||||
|
||||
## Rollback Guidance
|
||||
|
||||
- Database creation and role grants register rollback actions when not running in dry-run mode. If a later step fails, rerun the script without `--dry-run`; it will automatically revoke grants or drop newly created resources as part of the rollback routine.
|
||||
- For staged environments where manual intervention is required, coordinate with ops before dropping databases or roles.
|
||||
|
||||
## Next Steps
|
||||
|
||||
- Keep this document updated as staging infrastructure evolves (for example, when migrating to managed services or rotating credentials).
|
||||
- Once staging validation is complete, summarize the outcome in `.github/instructions/DONE.TODO.md` and cross-link the relevant log files.
|
||||
8
main.py
8
main.py
@@ -17,6 +17,7 @@ from routes.currencies import router as currencies_router
|
||||
from routes.simulations import router as simulations_router
|
||||
from routes.maintenance import router as maintenance_router
|
||||
from routes.settings import router as settings_router
|
||||
from routes.users import router as users_router
|
||||
|
||||
# Initialize database schema
|
||||
Base.metadata.create_all(bind=engine)
|
||||
@@ -30,6 +31,12 @@ async def json_validation(
|
||||
) -> Response:
|
||||
return await validate_json(request, call_next)
|
||||
|
||||
|
||||
@app.get("/health", summary="Container health probe")
|
||||
async def health() -> dict[str, str]:
|
||||
return {"status": "ok"}
|
||||
|
||||
|
||||
app.mount("/static", StaticFiles(directory="static"), name="static")
|
||||
|
||||
# Include API routers
|
||||
@@ -46,3 +53,4 @@ app.include_router(reporting_router)
|
||||
app.include_router(currencies_router)
|
||||
app.include_router(settings_router)
|
||||
app.include_router(ui_router)
|
||||
app.include_router(users_router)
|
||||
|
||||
@@ -4,7 +4,10 @@ from fastapi import HTTPException, Request, Response
|
||||
|
||||
MiddlewareCallNext = Callable[[Request], Awaitable[Response]]
|
||||
|
||||
async def validate_json(request: Request, call_next: MiddlewareCallNext) -> Response:
|
||||
|
||||
async def validate_json(
|
||||
request: Request, call_next: MiddlewareCallNext
|
||||
) -> Response:
|
||||
# Only validate JSON for requests with a body
|
||||
if request.method in ("POST", "PUT", "PATCH"):
|
||||
try:
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
"""
|
||||
models package initializer. Import key models so they're registered
|
||||
with the shared Base.metadata when the package is imported by tests.
|
||||
"""
|
||||
from . import application_setting # noqa: F401
|
||||
from . import currency # noqa: F401
|
||||
@@ -1,29 +0,0 @@
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
|
||||
from sqlalchemy import Boolean, DateTime, String, Text
|
||||
from sqlalchemy.orm import Mapped, mapped_column
|
||||
from sqlalchemy.sql import func
|
||||
|
||||
from config.database import Base
|
||||
|
||||
|
||||
class ApplicationSetting(Base):
|
||||
__tablename__ = "application_setting"
|
||||
|
||||
id: Mapped[int] = mapped_column(primary_key=True, index=True)
|
||||
key: Mapped[str] = mapped_column(String(128), unique=True, nullable=False)
|
||||
value: Mapped[str] = mapped_column(Text, nullable=False)
|
||||
value_type: Mapped[str] = mapped_column(String(32), nullable=False, default="string")
|
||||
category: Mapped[str] = mapped_column(String(32), nullable=False, default="general")
|
||||
description: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
|
||||
is_editable: Mapped[bool] = mapped_column(Boolean, nullable=False, default=True)
|
||||
created_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), server_default=func.now(), nullable=False
|
||||
)
|
||||
updated_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), server_default=func.now(), onupdate=func.now(), nullable=False
|
||||
)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<ApplicationSetting key={self.key} category={self.category}>"
|
||||
@@ -1,65 +0,0 @@
|
||||
from sqlalchemy import event, text
|
||||
from sqlalchemy import Column, Integer, Float, String, ForeignKey
|
||||
from sqlalchemy.orm import relationship
|
||||
from config.database import Base
|
||||
|
||||
|
||||
class Capex(Base):
|
||||
__tablename__ = "capex"
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
scenario_id = Column(Integer, ForeignKey("scenario.id"), nullable=False)
|
||||
amount = Column(Float, nullable=False)
|
||||
description = Column(String, nullable=True)
|
||||
currency_id = Column(Integer, ForeignKey("currency.id"), nullable=False)
|
||||
|
||||
scenario = relationship("Scenario", back_populates="capex_items")
|
||||
currency = relationship("Currency", back_populates="capex_items")
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
f"<Capex id={self.id} scenario_id={self.scenario_id} "
|
||||
f"amount={self.amount} currency_id={self.currency_id}>"
|
||||
)
|
||||
|
||||
@property
|
||||
def currency_code(self) -> str:
|
||||
return self.currency.code if self.currency else None
|
||||
|
||||
@currency_code.setter
|
||||
def currency_code(self, value: str) -> None:
|
||||
# store pending code so application code or migrations can pick it up
|
||||
setattr(self, "_currency_code_pending",
|
||||
(value or "USD").strip().upper())
|
||||
|
||||
|
||||
# SQLAlchemy event handlers to ensure currency_id is set before insert/update
|
||||
|
||||
|
||||
def _resolve_currency(mapper, connection, target):
|
||||
# If currency_id already set, nothing to do
|
||||
if getattr(target, "currency_id", None):
|
||||
return
|
||||
code = getattr(target, "_currency_code_pending", None) or "USD"
|
||||
# Try to find existing currency id
|
||||
row = connection.execute(text("SELECT id FROM currency WHERE code = :code"), {
|
||||
"code": code}).fetchone()
|
||||
if row:
|
||||
cid = row[0]
|
||||
else:
|
||||
# Insert new currency and attempt to get lastrowid
|
||||
res = connection.execute(
|
||||
text("INSERT INTO currency (code, name, symbol, is_active) VALUES (:code, :name, :symbol, :active)"),
|
||||
{"code": code, "name": code, "symbol": None, "active": True},
|
||||
)
|
||||
try:
|
||||
cid = res.lastrowid
|
||||
except Exception:
|
||||
# fallback: select after insert
|
||||
cid = connection.execute(text("SELECT id FROM currency WHERE code = :code"), {
|
||||
"code": code}).scalar()
|
||||
target.currency_id = cid
|
||||
|
||||
|
||||
event.listen(Capex, "before_insert", _resolve_currency)
|
||||
event.listen(Capex, "before_update", _resolve_currency)
|
||||
@@ -1,22 +0,0 @@
|
||||
from sqlalchemy import Column, Integer, Float, String, ForeignKey
|
||||
from sqlalchemy.orm import relationship
|
||||
from config.database import Base
|
||||
|
||||
|
||||
class Consumption(Base):
|
||||
__tablename__ = "consumption"
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
scenario_id = Column(Integer, ForeignKey("scenario.id"), nullable=False)
|
||||
amount = Column(Float, nullable=False)
|
||||
description = Column(String, nullable=True)
|
||||
unit_name = Column(String(64), nullable=True)
|
||||
unit_symbol = Column(String(16), nullable=True)
|
||||
|
||||
scenario = relationship("Scenario", back_populates="consumption_items")
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
f"<Consumption id={self.id} scenario_id={self.scenario_id} "
|
||||
f"amount={self.amount} unit={self.unit_symbol or self.unit_name}>"
|
||||
)
|
||||
@@ -1,21 +0,0 @@
|
||||
from sqlalchemy import Column, Integer, String, Boolean
|
||||
from sqlalchemy.orm import relationship
|
||||
from config.database import Base
|
||||
|
||||
|
||||
class Currency(Base):
|
||||
__tablename__ = "currency"
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
code = Column(String(3), nullable=False, unique=True, index=True)
|
||||
name = Column(String(128), nullable=False)
|
||||
symbol = Column(String(8), nullable=True)
|
||||
is_active = Column(Boolean, nullable=False, default=True)
|
||||
|
||||
# reverse relationships (optional)
|
||||
capex_items = relationship(
|
||||
"Capex", back_populates="currency", lazy="select")
|
||||
opex_items = relationship("Opex", back_populates="currency", lazy="select")
|
||||
|
||||
def __repr__(self):
|
||||
return f"<Currency code={self.code} name={self.name} symbol={self.symbol}>"
|
||||
@@ -1,14 +0,0 @@
|
||||
from sqlalchemy import Column, Integer, String, JSON
|
||||
from config.database import Base
|
||||
|
||||
|
||||
class Distribution(Base):
|
||||
__tablename__ = "distribution"
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
name = Column(String, nullable=False)
|
||||
distribution_type = Column(String, nullable=False)
|
||||
parameters = Column(JSON, nullable=True)
|
||||
|
||||
def __repr__(self):
|
||||
return f"<Distribution id={self.id} name={self.name} type={self.distribution_type}>"
|
||||
@@ -1,17 +0,0 @@
|
||||
from sqlalchemy import Column, Integer, String, ForeignKey
|
||||
from sqlalchemy.orm import relationship
|
||||
from config.database import Base
|
||||
|
||||
|
||||
class Equipment(Base):
|
||||
__tablename__ = "equipment"
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
scenario_id = Column(Integer, ForeignKey("scenario.id"), nullable=False)
|
||||
name = Column(String, nullable=False)
|
||||
description = Column(String, nullable=True)
|
||||
|
||||
scenario = relationship("Scenario", back_populates="equipment_items")
|
||||
|
||||
def __repr__(self):
|
||||
return f"<Equipment id={self.id} scenario_id={self.scenario_id} name={self.name}>"
|
||||
@@ -1,23 +0,0 @@
|
||||
from sqlalchemy import Column, Date, Float, ForeignKey, Integer, String
|
||||
from sqlalchemy.orm import relationship
|
||||
from config.database import Base
|
||||
|
||||
|
||||
class Maintenance(Base):
|
||||
__tablename__ = "maintenance"
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
equipment_id = Column(Integer, ForeignKey("equipment.id"), nullable=False)
|
||||
scenario_id = Column(Integer, ForeignKey("scenario.id"), nullable=False)
|
||||
maintenance_date = Column(Date, nullable=False)
|
||||
description = Column(String, nullable=True)
|
||||
cost = Column(Float, nullable=False)
|
||||
|
||||
equipment = relationship("Equipment")
|
||||
scenario = relationship("Scenario", back_populates="maintenance_items")
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return (
|
||||
f"<Maintenance id={self.id} equipment_id={self.equipment_id} "
|
||||
f"scenario_id={self.scenario_id} date={self.maintenance_date} cost={self.cost}>"
|
||||
)
|
||||
@@ -1,57 +0,0 @@
|
||||
from sqlalchemy import event, text
|
||||
from sqlalchemy import Column, Integer, Float, String, ForeignKey
|
||||
from sqlalchemy.orm import relationship
|
||||
from config.database import Base
|
||||
|
||||
|
||||
class Opex(Base):
|
||||
__tablename__ = "opex"
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
scenario_id = Column(Integer, ForeignKey("scenario.id"), nullable=False)
|
||||
amount = Column(Float, nullable=False)
|
||||
description = Column(String, nullable=True)
|
||||
currency_id = Column(Integer, ForeignKey("currency.id"), nullable=False)
|
||||
|
||||
scenario = relationship("Scenario", back_populates="opex_items")
|
||||
currency = relationship("Currency", back_populates="opex_items")
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
f"<Opex id={self.id} scenario_id={self.scenario_id} "
|
||||
f"amount={self.amount} currency_id={self.currency_id}>"
|
||||
)
|
||||
|
||||
@property
|
||||
def currency_code(self) -> str:
|
||||
return self.currency.code if self.currency else None
|
||||
|
||||
@currency_code.setter
|
||||
def currency_code(self, value: str) -> None:
|
||||
setattr(self, "_currency_code_pending",
|
||||
(value or "USD").strip().upper())
|
||||
|
||||
|
||||
def _resolve_currency_opex(mapper, connection, target):
|
||||
if getattr(target, "currency_id", None):
|
||||
return
|
||||
code = getattr(target, "_currency_code_pending", None) or "USD"
|
||||
row = connection.execute(text("SELECT id FROM currency WHERE code = :code"), {
|
||||
"code": code}).fetchone()
|
||||
if row:
|
||||
cid = row[0]
|
||||
else:
|
||||
res = connection.execute(
|
||||
text("INSERT INTO currency (code, name, symbol, is_active) VALUES (:code, :name, :symbol, :active)"),
|
||||
{"code": code, "name": code, "symbol": None, "active": True},
|
||||
)
|
||||
try:
|
||||
cid = res.lastrowid
|
||||
except Exception:
|
||||
cid = connection.execute(text("SELECT id FROM currency WHERE code = :code"), {
|
||||
"code": code}).scalar()
|
||||
target.currency_id = cid
|
||||
|
||||
|
||||
event.listen(Opex, "before_insert", _resolve_currency_opex)
|
||||
event.listen(Opex, "before_update", _resolve_currency_opex)
|
||||
@@ -1,26 +0,0 @@
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from sqlalchemy import ForeignKey, JSON
|
||||
from sqlalchemy.orm import Mapped, mapped_column, relationship
|
||||
from config.database import Base
|
||||
|
||||
|
||||
class Parameter(Base):
|
||||
__tablename__ = "parameter"
|
||||
|
||||
id: Mapped[int] = mapped_column(primary_key=True, index=True)
|
||||
scenario_id: Mapped[int] = mapped_column(
|
||||
ForeignKey("scenario.id"), nullable=False)
|
||||
name: Mapped[str] = mapped_column(nullable=False)
|
||||
value: Mapped[float] = mapped_column(nullable=False)
|
||||
distribution_id: Mapped[Optional[int]] = mapped_column(
|
||||
ForeignKey("distribution.id"), nullable=True)
|
||||
distribution_type: Mapped[Optional[str]] = mapped_column(nullable=True)
|
||||
distribution_parameters: Mapped[Optional[Dict[str, Any]]] = mapped_column(
|
||||
JSON, nullable=True)
|
||||
|
||||
scenario = relationship("Scenario", back_populates="parameters")
|
||||
distribution = relationship("Distribution")
|
||||
|
||||
def __repr__(self):
|
||||
return f"<Parameter id={self.id} name={self.name} value={self.value}>"
|
||||
@@ -1,23 +0,0 @@
|
||||
from sqlalchemy import Column, Integer, Float, String, ForeignKey
|
||||
from sqlalchemy.orm import relationship
|
||||
from config.database import Base
|
||||
|
||||
|
||||
class ProductionOutput(Base):
|
||||
__tablename__ = "production_output"
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
scenario_id = Column(Integer, ForeignKey("scenario.id"), nullable=False)
|
||||
amount = Column(Float, nullable=False)
|
||||
description = Column(String, nullable=True)
|
||||
unit_name = Column(String(64), nullable=True)
|
||||
unit_symbol = Column(String(16), nullable=True)
|
||||
|
||||
scenario = relationship(
|
||||
"Scenario", back_populates="production_output_items")
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
f"<ProductionOutput id={self.id} scenario_id={self.scenario_id} "
|
||||
f"amount={self.amount} unit={self.unit_symbol or self.unit_name}>"
|
||||
)
|
||||
@@ -1,39 +0,0 @@
|
||||
from sqlalchemy import Column, Integer, String, DateTime, func
|
||||
from sqlalchemy.orm import relationship
|
||||
from models.simulation_result import SimulationResult
|
||||
from models.capex import Capex
|
||||
from models.opex import Opex
|
||||
from models.consumption import Consumption
|
||||
from models.production_output import ProductionOutput
|
||||
from models.equipment import Equipment
|
||||
from models.maintenance import Maintenance
|
||||
from config.database import Base
|
||||
|
||||
|
||||
class Scenario(Base):
|
||||
__tablename__ = "scenario"
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
name = Column(String, unique=True, nullable=False)
|
||||
description = Column(String)
|
||||
created_at = Column(DateTime(timezone=True), server_default=func.now())
|
||||
updated_at = Column(DateTime(timezone=True), onupdate=func.now())
|
||||
parameters = relationship("Parameter", back_populates="scenario")
|
||||
simulation_results = relationship(
|
||||
SimulationResult, back_populates="scenario")
|
||||
capex_items = relationship(
|
||||
Capex, back_populates="scenario")
|
||||
opex_items = relationship(
|
||||
Opex, back_populates="scenario")
|
||||
consumption_items = relationship(
|
||||
Consumption, back_populates="scenario")
|
||||
production_output_items = relationship(
|
||||
ProductionOutput, back_populates="scenario")
|
||||
equipment_items = relationship(
|
||||
Equipment, back_populates="scenario")
|
||||
maintenance_items = relationship(
|
||||
Maintenance, back_populates="scenario")
|
||||
|
||||
# relationships can be defined later
|
||||
def __repr__(self):
|
||||
return f"<Scenario id={self.id} name={self.name}>"
|
||||
@@ -1,14 +0,0 @@
|
||||
from sqlalchemy import Column, Integer, Float, ForeignKey
|
||||
from sqlalchemy.orm import relationship
|
||||
from config.database import Base
|
||||
|
||||
|
||||
class SimulationResult(Base):
|
||||
__tablename__ = "simulation_result"
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
scenario_id = Column(Integer, ForeignKey("scenario.id"), nullable=False)
|
||||
iteration = Column(Integer, nullable=False)
|
||||
result = Column(Float, nullable=False)
|
||||
|
||||
scenario = relationship("Scenario", back_populates="simulation_results")
|
||||
16
pyproject.toml
Normal file
16
pyproject.toml
Normal file
@@ -0,0 +1,16 @@
|
||||
[tool.black]
|
||||
line-length = 80
|
||||
target-version = ['py310']
|
||||
include = '\\.pyi?$'
|
||||
exclude = '''
|
||||
/(
|
||||
.git
|
||||
| .hg
|
||||
| .mypy_cache
|
||||
| .tox
|
||||
| .venv
|
||||
| build
|
||||
| dist
|
||||
)/
|
||||
'''
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
pytest
|
||||
pytest-cov
|
||||
pytest-httpx
|
||||
playwright
|
||||
pytest-playwright
|
||||
python-jose
|
||||
ruff
|
||||
black
|
||||
mypy
|
||||
@@ -1,4 +1,5 @@
|
||||
fastapi
|
||||
pydantic
|
||||
uvicorn
|
||||
sqlalchemy
|
||||
psycopg2-binary
|
||||
@@ -7,3 +8,5 @@ httpx
|
||||
jinja2
|
||||
pandas
|
||||
numpy
|
||||
passlib
|
||||
python-jose
|
||||
@@ -1,50 +0,0 @@
|
||||
from typing import List, Optional
|
||||
|
||||
from fastapi import APIRouter, Depends, status
|
||||
from pydantic import BaseModel, ConfigDict, PositiveFloat, field_validator
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from models.consumption import Consumption
|
||||
from routes.dependencies import get_db
|
||||
|
||||
|
||||
router = APIRouter(prefix="/api/consumption", tags=["Consumption"])
|
||||
|
||||
|
||||
class ConsumptionBase(BaseModel):
|
||||
scenario_id: int
|
||||
amount: PositiveFloat
|
||||
description: Optional[str] = None
|
||||
unit_name: Optional[str] = None
|
||||
unit_symbol: Optional[str] = None
|
||||
|
||||
@field_validator("unit_name", "unit_symbol")
|
||||
@classmethod
|
||||
def _normalize_text(cls, value: Optional[str]) -> Optional[str]:
|
||||
if value is None:
|
||||
return None
|
||||
stripped = value.strip()
|
||||
return stripped or None
|
||||
|
||||
|
||||
class ConsumptionCreate(ConsumptionBase):
|
||||
pass
|
||||
|
||||
|
||||
class ConsumptionRead(ConsumptionBase):
|
||||
id: int
|
||||
model_config = ConfigDict(from_attributes=True)
|
||||
|
||||
|
||||
@router.post("/", response_model=ConsumptionRead, status_code=status.HTTP_201_CREATED)
|
||||
def create_consumption(item: ConsumptionCreate, db: Session = Depends(get_db)):
|
||||
db_item = Consumption(**item.model_dump())
|
||||
db.add(db_item)
|
||||
db.commit()
|
||||
db.refresh(db_item)
|
||||
return db_item
|
||||
|
||||
|
||||
@router.get("/", response_model=List[ConsumptionRead])
|
||||
def list_consumption(db: Session = Depends(get_db)):
|
||||
return db.query(Consumption).all()
|
||||
119
routes/costs.py
119
routes/costs.py
@@ -1,119 +0,0 @@
|
||||
from typing import List, Optional
|
||||
|
||||
from fastapi import APIRouter, Depends
|
||||
from pydantic import BaseModel, ConfigDict, field_validator
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from models.capex import Capex
|
||||
from models.opex import Opex
|
||||
from routes.dependencies import get_db
|
||||
|
||||
router = APIRouter(prefix="/api/costs", tags=["Costs"])
|
||||
# Pydantic schemas for CAPEX and OPEX
|
||||
|
||||
|
||||
class _CostBase(BaseModel):
|
||||
scenario_id: int
|
||||
amount: float
|
||||
description: Optional[str] = None
|
||||
currency_code: Optional[str] = "USD"
|
||||
currency_id: Optional[int] = None
|
||||
|
||||
@field_validator("currency_code")
|
||||
@classmethod
|
||||
def _normalize_currency(cls, value: Optional[str]) -> str:
|
||||
code = (value or "USD").strip().upper()
|
||||
return code[:3] if len(code) > 3 else code
|
||||
|
||||
|
||||
class CapexCreate(_CostBase):
|
||||
pass
|
||||
|
||||
|
||||
class CapexRead(_CostBase):
|
||||
id: int
|
||||
# use from_attributes so Pydantic reads attributes off SQLAlchemy model
|
||||
model_config = ConfigDict(from_attributes=True)
|
||||
|
||||
# optionally include nested currency info
|
||||
currency: Optional["CurrencyRead"] = None
|
||||
|
||||
|
||||
class OpexCreate(_CostBase):
|
||||
pass
|
||||
|
||||
|
||||
class OpexRead(_CostBase):
|
||||
id: int
|
||||
model_config = ConfigDict(from_attributes=True)
|
||||
currency: Optional["CurrencyRead"] = None
|
||||
|
||||
|
||||
class CurrencyRead(BaseModel):
|
||||
id: int
|
||||
code: str
|
||||
name: Optional[str] = None
|
||||
symbol: Optional[str] = None
|
||||
is_active: Optional[bool] = True
|
||||
|
||||
model_config = ConfigDict(from_attributes=True)
|
||||
|
||||
|
||||
# forward refs
|
||||
CapexRead.model_rebuild()
|
||||
OpexRead.model_rebuild()
|
||||
|
||||
|
||||
# Capex endpoints
|
||||
@router.post("/capex", response_model=CapexRead)
|
||||
def create_capex(item: CapexCreate, db: Session = Depends(get_db)):
|
||||
payload = item.model_dump()
|
||||
# Prefer explicit currency_id if supplied
|
||||
cid = payload.get("currency_id")
|
||||
if not cid:
|
||||
code = (payload.pop("currency_code", "USD") or "USD").strip().upper()
|
||||
currency_cls = __import__(
|
||||
"models.currency", fromlist=["Currency"]).Currency
|
||||
currency = db.query(currency_cls).filter_by(code=code).one_or_none()
|
||||
if currency is None:
|
||||
currency = currency_cls(code=code, name=code, symbol=None)
|
||||
db.add(currency)
|
||||
db.flush()
|
||||
payload["currency_id"] = currency.id
|
||||
db_item = Capex(**payload)
|
||||
db.add(db_item)
|
||||
db.commit()
|
||||
db.refresh(db_item)
|
||||
return db_item
|
||||
|
||||
|
||||
@router.get("/capex", response_model=List[CapexRead])
|
||||
def list_capex(db: Session = Depends(get_db)):
|
||||
return db.query(Capex).all()
|
||||
|
||||
|
||||
# Opex endpoints
|
||||
@router.post("/opex", response_model=OpexRead)
|
||||
def create_opex(item: OpexCreate, db: Session = Depends(get_db)):
|
||||
payload = item.model_dump()
|
||||
cid = payload.get("currency_id")
|
||||
if not cid:
|
||||
code = (payload.pop("currency_code", "USD") or "USD").strip().upper()
|
||||
currency_cls = __import__(
|
||||
"models.currency", fromlist=["Currency"]).Currency
|
||||
currency = db.query(currency_cls).filter_by(code=code).one_or_none()
|
||||
if currency is None:
|
||||
currency = currency_cls(code=code, name=code, symbol=None)
|
||||
db.add(currency)
|
||||
db.flush()
|
||||
payload["currency_id"] = currency.id
|
||||
db_item = Opex(**payload)
|
||||
db.add(db_item)
|
||||
db.commit()
|
||||
db.refresh(db_item)
|
||||
return db_item
|
||||
|
||||
|
||||
@router.get("/opex", response_model=List[OpexRead])
|
||||
def list_opex(db: Session = Depends(get_db)):
|
||||
return db.query(Opex).all()
|
||||
@@ -1,191 +0,0 @@
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query, status
|
||||
from pydantic import BaseModel, ConfigDict, Field, field_validator
|
||||
from sqlalchemy.orm import Session
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
|
||||
from models.currency import Currency
|
||||
from routes.dependencies import get_db
|
||||
|
||||
router = APIRouter(prefix="/api/currencies", tags=["Currencies"])
|
||||
|
||||
|
||||
DEFAULT_CURRENCY_CODE = "USD"
|
||||
DEFAULT_CURRENCY_NAME = "US Dollar"
|
||||
DEFAULT_CURRENCY_SYMBOL = "$"
|
||||
|
||||
|
||||
class CurrencyBase(BaseModel):
|
||||
name: str = Field(..., min_length=1, max_length=128)
|
||||
symbol: Optional[str] = Field(default=None, max_length=8)
|
||||
|
||||
@staticmethod
|
||||
def _normalize_symbol(value: Optional[str]) -> Optional[str]:
|
||||
if value is None:
|
||||
return None
|
||||
value = value.strip()
|
||||
return value or None
|
||||
|
||||
@field_validator("name")
|
||||
@classmethod
|
||||
def _strip_name(cls, value: str) -> str:
|
||||
return value.strip()
|
||||
|
||||
@field_validator("symbol")
|
||||
@classmethod
|
||||
def _strip_symbol(cls, value: Optional[str]) -> Optional[str]:
|
||||
return cls._normalize_symbol(value)
|
||||
|
||||
|
||||
class CurrencyCreate(CurrencyBase):
|
||||
code: str = Field(..., min_length=3, max_length=3)
|
||||
is_active: bool = True
|
||||
|
||||
@field_validator("code")
|
||||
@classmethod
|
||||
def _normalize_code(cls, value: str) -> str:
|
||||
return value.strip().upper()
|
||||
|
||||
|
||||
class CurrencyUpdate(CurrencyBase):
|
||||
is_active: Optional[bool] = None
|
||||
|
||||
|
||||
class CurrencyActivation(BaseModel):
|
||||
is_active: bool
|
||||
|
||||
|
||||
class CurrencyRead(CurrencyBase):
|
||||
id: int
|
||||
code: str
|
||||
is_active: bool
|
||||
|
||||
model_config = ConfigDict(from_attributes=True)
|
||||
|
||||
|
||||
def _ensure_default_currency(db: Session) -> Currency:
|
||||
existing = (
|
||||
db.query(Currency)
|
||||
.filter(Currency.code == DEFAULT_CURRENCY_CODE)
|
||||
.one_or_none()
|
||||
)
|
||||
if existing:
|
||||
return existing
|
||||
|
||||
default_currency = Currency(
|
||||
code=DEFAULT_CURRENCY_CODE,
|
||||
name=DEFAULT_CURRENCY_NAME,
|
||||
symbol=DEFAULT_CURRENCY_SYMBOL,
|
||||
is_active=True,
|
||||
)
|
||||
db.add(default_currency)
|
||||
try:
|
||||
db.commit()
|
||||
except IntegrityError:
|
||||
db.rollback()
|
||||
existing = (
|
||||
db.query(Currency)
|
||||
.filter(Currency.code == DEFAULT_CURRENCY_CODE)
|
||||
.one()
|
||||
)
|
||||
return existing
|
||||
db.refresh(default_currency)
|
||||
return default_currency
|
||||
|
||||
|
||||
def _get_currency_or_404(db: Session, code: str) -> Currency:
|
||||
normalized = code.strip().upper()
|
||||
currency = (
|
||||
db.query(Currency)
|
||||
.filter(Currency.code == normalized)
|
||||
.one_or_none()
|
||||
)
|
||||
if currency is None:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND, detail="Currency not found")
|
||||
return currency
|
||||
|
||||
|
||||
@router.get("/", response_model=List[CurrencyRead])
|
||||
def list_currencies(
|
||||
include_inactive: bool = Query(
|
||||
False, description="Include inactive currencies"),
|
||||
db: Session = Depends(get_db),
|
||||
):
|
||||
_ensure_default_currency(db)
|
||||
query = db.query(Currency)
|
||||
if not include_inactive:
|
||||
query = query.filter(Currency.is_active.is_(True))
|
||||
currencies = query.order_by(Currency.code).all()
|
||||
return currencies
|
||||
|
||||
|
||||
@router.post("/", response_model=CurrencyRead, status_code=status.HTTP_201_CREATED)
|
||||
def create_currency(payload: CurrencyCreate, db: Session = Depends(get_db)):
|
||||
code = payload.code
|
||||
existing = (
|
||||
db.query(Currency)
|
||||
.filter(Currency.code == code)
|
||||
.one_or_none()
|
||||
)
|
||||
if existing is not None:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_409_CONFLICT,
|
||||
detail=f"Currency '{code}' already exists",
|
||||
)
|
||||
|
||||
currency = Currency(
|
||||
code=code,
|
||||
name=payload.name,
|
||||
symbol=CurrencyBase._normalize_symbol(payload.symbol),
|
||||
is_active=payload.is_active,
|
||||
)
|
||||
db.add(currency)
|
||||
db.commit()
|
||||
db.refresh(currency)
|
||||
return currency
|
||||
|
||||
|
||||
@router.put("/{code}", response_model=CurrencyRead)
|
||||
def update_currency(code: str, payload: CurrencyUpdate, db: Session = Depends(get_db)):
|
||||
currency = _get_currency_or_404(db, code)
|
||||
|
||||
if payload.name is not None:
|
||||
setattr(currency, "name", payload.name)
|
||||
if payload.symbol is not None or payload.symbol == "":
|
||||
setattr(
|
||||
currency,
|
||||
"symbol",
|
||||
CurrencyBase._normalize_symbol(payload.symbol),
|
||||
)
|
||||
if payload.is_active is not None:
|
||||
code_value = getattr(currency, "code")
|
||||
if code_value == DEFAULT_CURRENCY_CODE and payload.is_active is False:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="The default currency cannot be deactivated.",
|
||||
)
|
||||
setattr(currency, "is_active", payload.is_active)
|
||||
|
||||
db.add(currency)
|
||||
db.commit()
|
||||
db.refresh(currency)
|
||||
return currency
|
||||
|
||||
|
||||
@router.patch("/{code}/activation", response_model=CurrencyRead)
|
||||
def toggle_currency_activation(code: str, body: CurrencyActivation, db: Session = Depends(get_db)):
|
||||
currency = _get_currency_or_404(db, code)
|
||||
code_value = getattr(currency, "code")
|
||||
if code_value == DEFAULT_CURRENCY_CODE and body.is_active is False:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="The default currency cannot be deactivated.",
|
||||
)
|
||||
|
||||
setattr(currency, "is_active", body.is_active)
|
||||
db.add(currency)
|
||||
db.commit()
|
||||
db.refresh(currency)
|
||||
return currency
|
||||
@@ -1,13 +0,0 @@
|
||||
from collections.abc import Generator
|
||||
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from config.database import SessionLocal
|
||||
|
||||
|
||||
def get_db() -> Generator[Session, None, None]:
|
||||
db = SessionLocal()
|
||||
try:
|
||||
yield db
|
||||
finally:
|
||||
db.close()
|
||||
@@ -1,36 +0,0 @@
|
||||
from typing import Dict, List
|
||||
|
||||
from fastapi import APIRouter, Depends
|
||||
from pydantic import BaseModel, ConfigDict
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from models.distribution import Distribution
|
||||
from routes.dependencies import get_db
|
||||
|
||||
router = APIRouter(prefix="/api/distributions", tags=["Distributions"])
|
||||
|
||||
|
||||
class DistributionCreate(BaseModel):
|
||||
name: str
|
||||
distribution_type: str
|
||||
parameters: Dict[str, float | int]
|
||||
|
||||
|
||||
class DistributionRead(DistributionCreate):
|
||||
id: int
|
||||
model_config = ConfigDict(from_attributes=True)
|
||||
|
||||
|
||||
@router.post("/", response_model=DistributionRead)
|
||||
async def create_distribution(dist: DistributionCreate, db: Session = Depends(get_db)):
|
||||
db_dist = Distribution(**dist.model_dump())
|
||||
db.add(db_dist)
|
||||
db.commit()
|
||||
db.refresh(db_dist)
|
||||
return db_dist
|
||||
|
||||
|
||||
@router.get("/", response_model=List[DistributionRead])
|
||||
async def list_distributions(db: Session = Depends(get_db)):
|
||||
dists = db.query(Distribution).all()
|
||||
return dists
|
||||
@@ -1,36 +0,0 @@
|
||||
from typing import List, Optional
|
||||
|
||||
from fastapi import APIRouter, Depends
|
||||
from pydantic import BaseModel, ConfigDict
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from models.equipment import Equipment
|
||||
from routes.dependencies import get_db
|
||||
|
||||
router = APIRouter(prefix="/api/equipment", tags=["Equipment"])
|
||||
# Pydantic schemas
|
||||
|
||||
|
||||
class EquipmentCreate(BaseModel):
|
||||
scenario_id: int
|
||||
name: str
|
||||
description: Optional[str] = None
|
||||
|
||||
|
||||
class EquipmentRead(EquipmentCreate):
|
||||
id: int
|
||||
model_config = ConfigDict(from_attributes=True)
|
||||
|
||||
|
||||
@router.post("/", response_model=EquipmentRead)
|
||||
async def create_equipment(item: EquipmentCreate, db: Session = Depends(get_db)):
|
||||
db_item = Equipment(**item.model_dump())
|
||||
db.add(db_item)
|
||||
db.commit()
|
||||
db.refresh(db_item)
|
||||
return db_item
|
||||
|
||||
|
||||
@router.get("/", response_model=List[EquipmentRead])
|
||||
async def list_equipment(db: Session = Depends(get_db)):
|
||||
return db.query(Equipment).all()
|
||||
@@ -1,84 +0,0 @@
|
||||
from datetime import date
|
||||
from typing import List, Optional
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, status
|
||||
from pydantic import BaseModel, ConfigDict, PositiveFloat
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from models.maintenance import Maintenance
|
||||
from routes.dependencies import get_db
|
||||
|
||||
|
||||
router = APIRouter(prefix="/api/maintenance", tags=["Maintenance"])
|
||||
|
||||
|
||||
class MaintenanceBase(BaseModel):
|
||||
equipment_id: int
|
||||
scenario_id: int
|
||||
maintenance_date: date
|
||||
description: Optional[str] = None
|
||||
cost: PositiveFloat
|
||||
|
||||
|
||||
class MaintenanceCreate(MaintenanceBase):
|
||||
pass
|
||||
|
||||
|
||||
class MaintenanceUpdate(MaintenanceBase):
|
||||
pass
|
||||
|
||||
|
||||
class MaintenanceRead(MaintenanceBase):
|
||||
id: int
|
||||
model_config = ConfigDict(from_attributes=True)
|
||||
|
||||
|
||||
def _get_maintenance_or_404(db: Session, maintenance_id: int) -> Maintenance:
|
||||
maintenance = db.query(Maintenance).filter(
|
||||
Maintenance.id == maintenance_id).first()
|
||||
if maintenance is None:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"Maintenance record {maintenance_id} not found",
|
||||
)
|
||||
return maintenance
|
||||
|
||||
|
||||
@router.post("/", response_model=MaintenanceRead, status_code=status.HTTP_201_CREATED)
|
||||
def create_maintenance(maintenance: MaintenanceCreate, db: Session = Depends(get_db)):
|
||||
db_maintenance = Maintenance(**maintenance.model_dump())
|
||||
db.add(db_maintenance)
|
||||
db.commit()
|
||||
db.refresh(db_maintenance)
|
||||
return db_maintenance
|
||||
|
||||
|
||||
@router.get("/", response_model=List[MaintenanceRead])
|
||||
def list_maintenance(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
|
||||
return db.query(Maintenance).offset(skip).limit(limit).all()
|
||||
|
||||
|
||||
@router.get("/{maintenance_id}", response_model=MaintenanceRead)
|
||||
def get_maintenance(maintenance_id: int, db: Session = Depends(get_db)):
|
||||
return _get_maintenance_or_404(db, maintenance_id)
|
||||
|
||||
|
||||
@router.put("/{maintenance_id}", response_model=MaintenanceRead)
|
||||
def update_maintenance(
|
||||
maintenance_id: int,
|
||||
payload: MaintenanceUpdate,
|
||||
db: Session = Depends(get_db),
|
||||
):
|
||||
db_maintenance = _get_maintenance_or_404(db, maintenance_id)
|
||||
for field, value in payload.model_dump().items():
|
||||
setattr(db_maintenance, field, value)
|
||||
db.commit()
|
||||
db.refresh(db_maintenance)
|
||||
return db_maintenance
|
||||
|
||||
|
||||
@router.delete("/{maintenance_id}", status_code=status.HTTP_204_NO_CONTENT)
|
||||
def delete_maintenance(maintenance_id: int, db: Session = Depends(get_db)):
|
||||
db_maintenance = _get_maintenance_or_404(db, maintenance_id)
|
||||
db.delete(db_maintenance)
|
||||
db.commit()
|
||||
@@ -1,82 +0,0 @@
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from pydantic import BaseModel, ConfigDict, field_validator
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from models.distribution import Distribution
|
||||
from models.parameters import Parameter
|
||||
from models.scenario import Scenario
|
||||
from routes.dependencies import get_db
|
||||
|
||||
router = APIRouter(prefix="/api/parameters", tags=["parameters"])
|
||||
|
||||
|
||||
class ParameterCreate(BaseModel):
|
||||
scenario_id: int
|
||||
name: str
|
||||
value: float
|
||||
distribution_id: Optional[int] = None
|
||||
distribution_type: Optional[str] = None
|
||||
distribution_parameters: Optional[Dict[str, Any]] = None
|
||||
|
||||
@field_validator("distribution_type")
|
||||
@classmethod
|
||||
def normalize_type(cls, value: Optional[str]) -> Optional[str]:
|
||||
if value is None:
|
||||
return value
|
||||
normalized = value.strip().lower()
|
||||
if not normalized:
|
||||
return None
|
||||
if normalized not in {"normal", "uniform", "triangular"}:
|
||||
raise ValueError(
|
||||
"distribution_type must be normal, uniform, or triangular")
|
||||
return normalized
|
||||
|
||||
@field_validator("distribution_parameters")
|
||||
@classmethod
|
||||
def empty_dict_to_none(cls, value: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
|
||||
if value is None:
|
||||
return None
|
||||
return value or None
|
||||
|
||||
|
||||
class ParameterRead(ParameterCreate):
|
||||
id: int
|
||||
model_config = ConfigDict(from_attributes=True)
|
||||
|
||||
@router.post("/", response_model=ParameterRead)
|
||||
def create_parameter(param: ParameterCreate, db: Session = Depends(get_db)):
|
||||
scen = db.query(Scenario).filter(Scenario.id == param.scenario_id).first()
|
||||
if not scen:
|
||||
raise HTTPException(status_code=404, detail="Scenario not found")
|
||||
distribution_id = param.distribution_id
|
||||
distribution_type = param.distribution_type
|
||||
distribution_parameters = param.distribution_parameters
|
||||
|
||||
if distribution_id is not None:
|
||||
distribution = db.query(Distribution).filter(
|
||||
Distribution.id == distribution_id).first()
|
||||
if not distribution:
|
||||
raise HTTPException(
|
||||
status_code=404, detail="Distribution not found")
|
||||
distribution_type = distribution.distribution_type
|
||||
distribution_parameters = distribution.parameters or None
|
||||
|
||||
new_param = Parameter(
|
||||
scenario_id=param.scenario_id,
|
||||
name=param.name,
|
||||
value=param.value,
|
||||
distribution_id=distribution_id,
|
||||
distribution_type=distribution_type,
|
||||
distribution_parameters=distribution_parameters,
|
||||
)
|
||||
db.add(new_param)
|
||||
db.commit()
|
||||
db.refresh(new_param)
|
||||
return new_param
|
||||
|
||||
|
||||
@router.get("/", response_model=List[ParameterRead])
|
||||
def list_parameters(db: Session = Depends(get_db)):
|
||||
return db.query(Parameter).all()
|
||||
@@ -1,50 +0,0 @@
|
||||
from typing import List, Optional
|
||||
|
||||
from fastapi import APIRouter, Depends, status
|
||||
from pydantic import BaseModel, ConfigDict, PositiveFloat, field_validator
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from models.production_output import ProductionOutput
|
||||
from routes.dependencies import get_db
|
||||
|
||||
|
||||
router = APIRouter(prefix="/api/production", tags=["Production"])
|
||||
|
||||
|
||||
class ProductionOutputBase(BaseModel):
|
||||
scenario_id: int
|
||||
amount: PositiveFloat
|
||||
description: Optional[str] = None
|
||||
unit_name: Optional[str] = None
|
||||
unit_symbol: Optional[str] = None
|
||||
|
||||
@field_validator("unit_name", "unit_symbol")
|
||||
@classmethod
|
||||
def _normalize_text(cls, value: Optional[str]) -> Optional[str]:
|
||||
if value is None:
|
||||
return None
|
||||
stripped = value.strip()
|
||||
return stripped or None
|
||||
|
||||
|
||||
class ProductionOutputCreate(ProductionOutputBase):
|
||||
pass
|
||||
|
||||
|
||||
class ProductionOutputRead(ProductionOutputBase):
|
||||
id: int
|
||||
model_config = ConfigDict(from_attributes=True)
|
||||
|
||||
|
||||
@router.post("/", response_model=ProductionOutputRead, status_code=status.HTTP_201_CREATED)
|
||||
def create_production(item: ProductionOutputCreate, db: Session = Depends(get_db)):
|
||||
db_item = ProductionOutput(**item.model_dump())
|
||||
db.add(db_item)
|
||||
db.commit()
|
||||
db.refresh(db_item)
|
||||
return db_item
|
||||
|
||||
|
||||
@router.get("/", response_model=List[ProductionOutputRead])
|
||||
def list_production(db: Session = Depends(get_db)):
|
||||
return db.query(ProductionOutput).all()
|
||||
@@ -1,73 +0,0 @@
|
||||
from typing import Any, Dict, List, cast
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Request, status
|
||||
from pydantic import BaseModel
|
||||
|
||||
from services.reporting import generate_report
|
||||
|
||||
|
||||
router = APIRouter(prefix="/api/reporting", tags=["Reporting"])
|
||||
|
||||
|
||||
def _validate_payload(payload: Any) -> List[Dict[str, float]]:
|
||||
if not isinstance(payload, list):
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="Invalid input format",
|
||||
)
|
||||
|
||||
typed_payload = cast(List[Any], payload)
|
||||
|
||||
validated: List[Dict[str, float]] = []
|
||||
for index, item in enumerate(typed_payload):
|
||||
if not isinstance(item, dict):
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=f"Entry at index {index} must be an object",
|
||||
)
|
||||
value = cast(Dict[str, Any], item).get("result")
|
||||
if not isinstance(value, (int, float)):
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=f"Entry at index {index} must include numeric 'result'",
|
||||
)
|
||||
validated.append({"result": float(value)})
|
||||
return validated
|
||||
|
||||
|
||||
class ReportSummary(BaseModel):
|
||||
count: int
|
||||
mean: float
|
||||
median: float
|
||||
min: float
|
||||
max: float
|
||||
std_dev: float
|
||||
variance: float
|
||||
percentile_10: float
|
||||
percentile_90: float
|
||||
percentile_5: float
|
||||
percentile_95: float
|
||||
value_at_risk_95: float
|
||||
expected_shortfall_95: float
|
||||
|
||||
|
||||
@router.post("/summary", response_model=ReportSummary)
|
||||
async def summary_report(request: Request):
|
||||
payload = await request.json()
|
||||
validated_payload = _validate_payload(payload)
|
||||
summary = generate_report(validated_payload)
|
||||
return ReportSummary(
|
||||
count=int(summary["count"]),
|
||||
mean=float(summary["mean"]),
|
||||
median=float(summary["median"]),
|
||||
min=float(summary["min"]),
|
||||
max=float(summary["max"]),
|
||||
std_dev=float(summary["std_dev"]),
|
||||
variance=float(summary["variance"]),
|
||||
percentile_10=float(summary["percentile_10"]),
|
||||
percentile_90=float(summary["percentile_90"]),
|
||||
percentile_5=float(summary["percentile_5"]),
|
||||
percentile_95=float(summary["percentile_95"]),
|
||||
value_at_risk_95=float(summary["value_at_risk_95"]),
|
||||
expected_shortfall_95=float(summary["expected_shortfall_95"]),
|
||||
)
|
||||
@@ -1,41 +0,0 @@
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from pydantic import BaseModel, ConfigDict
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from models.scenario import Scenario
|
||||
from routes.dependencies import get_db
|
||||
|
||||
router = APIRouter(prefix="/api/scenarios", tags=["scenarios"])
|
||||
|
||||
# Pydantic schemas
|
||||
|
||||
|
||||
class ScenarioCreate(BaseModel):
|
||||
name: str
|
||||
description: Optional[str] = None
|
||||
|
||||
|
||||
class ScenarioRead(ScenarioCreate):
|
||||
id: int
|
||||
created_at: datetime
|
||||
updated_at: Optional[datetime] = None
|
||||
model_config = ConfigDict(from_attributes=True)
|
||||
|
||||
@router.post("/", response_model=ScenarioRead)
|
||||
def create_scenario(scenario: ScenarioCreate, db: Session = Depends(get_db)):
|
||||
db_s = db.query(Scenario).filter(Scenario.name == scenario.name).first()
|
||||
if db_s:
|
||||
raise HTTPException(status_code=400, detail="Scenario already exists")
|
||||
new_s = Scenario(name=scenario.name, description=scenario.description)
|
||||
db.add(new_s)
|
||||
db.commit()
|
||||
db.refresh(new_s)
|
||||
return new_s
|
||||
|
||||
|
||||
@router.get("/", response_model=list[ScenarioRead])
|
||||
def list_scenarios(db: Session = Depends(get_db)):
|
||||
return db.query(Scenario).all()
|
||||
@@ -1,85 +0,0 @@
|
||||
from typing import Dict, List
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, status
|
||||
from pydantic import BaseModel, Field, model_validator
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from routes.dependencies import get_db
|
||||
from services.settings import (
|
||||
CSS_COLOR_DEFAULTS,
|
||||
get_css_color_settings,
|
||||
list_css_env_override_rows,
|
||||
read_css_color_env_overrides,
|
||||
update_css_color_settings,
|
||||
)
|
||||
|
||||
router = APIRouter(prefix="/api/settings", tags=["Settings"])
|
||||
|
||||
|
||||
class CSSSettingsPayload(BaseModel):
|
||||
variables: Dict[str, str] = Field(default_factory=dict)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def _validate_allowed_keys(self) -> "CSSSettingsPayload":
|
||||
invalid = set(self.variables.keys()) - set(CSS_COLOR_DEFAULTS.keys())
|
||||
if invalid:
|
||||
invalid_keys = ", ".join(sorted(invalid))
|
||||
raise ValueError(
|
||||
f"Unsupported CSS variables: {invalid_keys}."
|
||||
" Accepted keys align with the default theme variables."
|
||||
)
|
||||
return self
|
||||
|
||||
|
||||
class EnvOverride(BaseModel):
|
||||
css_key: str
|
||||
env_var: str
|
||||
value: str
|
||||
|
||||
|
||||
class CSSSettingsResponse(BaseModel):
|
||||
variables: Dict[str, str]
|
||||
env_overrides: Dict[str, str] = Field(default_factory=dict)
|
||||
env_sources: List[EnvOverride] = Field(default_factory=list)
|
||||
|
||||
|
||||
@router.get("/css", response_model=CSSSettingsResponse)
|
||||
def read_css_settings(db: Session = Depends(get_db)) -> CSSSettingsResponse:
|
||||
try:
|
||||
values = get_css_color_settings(db)
|
||||
env_overrides = read_css_color_env_overrides()
|
||||
env_sources = [
|
||||
EnvOverride(**row)
|
||||
for row in list_css_env_override_rows()
|
||||
]
|
||||
except ValueError as exc:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=str(exc),
|
||||
) from exc
|
||||
return CSSSettingsResponse(
|
||||
variables=values,
|
||||
env_overrides=env_overrides,
|
||||
env_sources=env_sources,
|
||||
)
|
||||
|
||||
|
||||
@router.put("/css", response_model=CSSSettingsResponse, status_code=status.HTTP_200_OK)
|
||||
def update_css_settings(payload: CSSSettingsPayload, db: Session = Depends(get_db)) -> CSSSettingsResponse:
|
||||
try:
|
||||
values = update_css_color_settings(db, payload.variables)
|
||||
env_overrides = read_css_color_env_overrides()
|
||||
env_sources = [
|
||||
EnvOverride(**row)
|
||||
for row in list_css_env_override_rows()
|
||||
]
|
||||
except ValueError as exc:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_422_UNPROCESSABLE_CONTENT,
|
||||
detail=str(exc),
|
||||
) from exc
|
||||
return CSSSettingsResponse(
|
||||
variables=values,
|
||||
env_overrides=env_overrides,
|
||||
env_sources=env_sources,
|
||||
)
|
||||
@@ -1,122 +0,0 @@
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, status
|
||||
from pydantic import BaseModel, PositiveInt
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from models.parameters import Parameter
|
||||
from models.scenario import Scenario
|
||||
from models.simulation_result import SimulationResult
|
||||
from routes.dependencies import get_db
|
||||
from services.reporting import generate_report
|
||||
from services.simulation import run_simulation
|
||||
|
||||
router = APIRouter(prefix="/api/simulations", tags=["Simulations"])
|
||||
|
||||
|
||||
class SimulationParameterInput(BaseModel):
|
||||
name: str
|
||||
value: float
|
||||
distribution: Optional[str] = "normal"
|
||||
std_dev: Optional[float] = None
|
||||
min: Optional[float] = None
|
||||
max: Optional[float] = None
|
||||
mode: Optional[float] = None
|
||||
|
||||
|
||||
class SimulationRunRequest(BaseModel):
|
||||
scenario_id: int
|
||||
iterations: PositiveInt = 1000
|
||||
parameters: Optional[List[SimulationParameterInput]] = None
|
||||
seed: Optional[int] = None
|
||||
|
||||
|
||||
class SimulationResultItem(BaseModel):
|
||||
iteration: int
|
||||
result: float
|
||||
|
||||
|
||||
class SimulationRunResponse(BaseModel):
|
||||
scenario_id: int
|
||||
iterations: int
|
||||
results: List[SimulationResultItem]
|
||||
summary: Dict[str, float | int]
|
||||
|
||||
|
||||
def _load_parameters(db: Session, scenario_id: int) -> List[SimulationParameterInput]:
|
||||
db_params = (
|
||||
db.query(Parameter)
|
||||
.filter(Parameter.scenario_id == scenario_id)
|
||||
.order_by(Parameter.id)
|
||||
.all()
|
||||
)
|
||||
return [
|
||||
SimulationParameterInput(
|
||||
name=item.name,
|
||||
value=item.value,
|
||||
)
|
||||
for item in db_params
|
||||
]
|
||||
|
||||
|
||||
@router.post("/run", response_model=SimulationRunResponse)
|
||||
async def simulate(payload: SimulationRunRequest, db: Session = Depends(get_db)):
|
||||
scenario = db.query(Scenario).filter(
|
||||
Scenario.id == payload.scenario_id).first()
|
||||
if scenario is None:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="Scenario not found",
|
||||
)
|
||||
|
||||
parameters = payload.parameters or _load_parameters(
|
||||
db, payload.scenario_id)
|
||||
if not parameters:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="No parameters provided",
|
||||
)
|
||||
|
||||
raw_results = run_simulation(
|
||||
[param.model_dump(exclude_none=True) for param in parameters],
|
||||
iterations=payload.iterations,
|
||||
seed=payload.seed,
|
||||
)
|
||||
|
||||
if not raw_results:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="Simulation produced no results",
|
||||
)
|
||||
|
||||
# Persist results (replace existing values for scenario)
|
||||
db.query(SimulationResult).filter(
|
||||
SimulationResult.scenario_id == payload.scenario_id
|
||||
).delete()
|
||||
db.bulk_save_objects(
|
||||
[
|
||||
SimulationResult(
|
||||
scenario_id=payload.scenario_id,
|
||||
iteration=item["iteration"],
|
||||
result=item["result"],
|
||||
)
|
||||
for item in raw_results
|
||||
]
|
||||
)
|
||||
db.commit()
|
||||
|
||||
summary = generate_report(raw_results)
|
||||
|
||||
response = SimulationRunResponse(
|
||||
scenario_id=payload.scenario_id,
|
||||
iterations=payload.iterations,
|
||||
results=[
|
||||
SimulationResultItem(
|
||||
iteration=int(item["iteration"]),
|
||||
result=float(item["result"]),
|
||||
)
|
||||
for item in raw_results
|
||||
],
|
||||
summary=summary,
|
||||
)
|
||||
return response
|
||||
706
routes/ui.py
706
routes/ui.py
@@ -1,706 +0,0 @@
|
||||
from collections import defaultdict
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from fastapi import APIRouter, Depends, Request
|
||||
from fastapi.responses import HTMLResponse, JSONResponse
|
||||
from fastapi.templating import Jinja2Templates
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from models.capex import Capex
|
||||
from models.consumption import Consumption
|
||||
from models.equipment import Equipment
|
||||
from models.maintenance import Maintenance
|
||||
from models.opex import Opex
|
||||
from models.parameters import Parameter
|
||||
from models.production_output import ProductionOutput
|
||||
from models.scenario import Scenario
|
||||
from models.simulation_result import SimulationResult
|
||||
from routes.dependencies import get_db
|
||||
from services.reporting import generate_report
|
||||
from models.currency import Currency
|
||||
from routes.currencies import DEFAULT_CURRENCY_CODE, _ensure_default_currency
|
||||
from services.settings import (
|
||||
CSS_COLOR_DEFAULTS,
|
||||
get_css_color_settings,
|
||||
list_css_env_override_rows,
|
||||
read_css_color_env_overrides,
|
||||
)
|
||||
|
||||
|
||||
CURRENCY_CHOICES: list[Dict[str, Any]] = [
|
||||
{"id": "USD", "name": "US Dollar (USD)"},
|
||||
{"id": "EUR", "name": "Euro (EUR)"},
|
||||
{"id": "CLP", "name": "Chilean Peso (CLP)"},
|
||||
{"id": "RMB", "name": "Chinese Yuan (RMB)"},
|
||||
{"id": "GBP", "name": "British Pound (GBP)"},
|
||||
{"id": "CAD", "name": "Canadian Dollar (CAD)"},
|
||||
{"id": "AUD", "name": "Australian Dollar (AUD)"},
|
||||
]
|
||||
|
||||
MEASUREMENT_UNITS: list[Dict[str, Any]] = [
|
||||
{"id": "tonnes", "name": "Tonnes", "symbol": "t"},
|
||||
{"id": "kilograms", "name": "Kilograms", "symbol": "kg"},
|
||||
{"id": "pounds", "name": "Pounds", "symbol": "lb"},
|
||||
{"id": "liters", "name": "Liters", "symbol": "L"},
|
||||
{"id": "cubic_meters", "name": "Cubic Meters", "symbol": "m3"},
|
||||
{"id": "kilowatt_hours", "name": "Kilowatt Hours", "symbol": "kWh"},
|
||||
]
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
# Set up Jinja2 templates directory
|
||||
templates = Jinja2Templates(directory="templates")
|
||||
|
||||
|
||||
def _context(request: Request, extra: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
||||
payload: Dict[str, Any] = {
|
||||
"request": request,
|
||||
"current_year": datetime.now(timezone.utc).year,
|
||||
}
|
||||
if extra:
|
||||
payload.update(extra)
|
||||
return payload
|
||||
|
||||
|
||||
def _render(
|
||||
request: Request,
|
||||
template_name: str,
|
||||
extra: Optional[Dict[str, Any]] = None,
|
||||
):
|
||||
context = _context(request, extra)
|
||||
return templates.TemplateResponse(request, template_name, context)
|
||||
|
||||
|
||||
def _format_currency(value: float) -> str:
|
||||
return f"${value:,.2f}"
|
||||
|
||||
|
||||
def _format_decimal(value: float) -> str:
|
||||
return f"{value:,.2f}"
|
||||
|
||||
|
||||
def _format_int(value: int) -> str:
|
||||
return f"{value:,}"
|
||||
|
||||
|
||||
def _load_scenarios(db: Session) -> Dict[str, Any]:
|
||||
scenarios: list[Dict[str, Any]] = [
|
||||
{
|
||||
"id": item.id,
|
||||
"name": item.name,
|
||||
"description": item.description,
|
||||
}
|
||||
for item in db.query(Scenario).order_by(Scenario.name).all()
|
||||
]
|
||||
return {"scenarios": scenarios}
|
||||
|
||||
|
||||
def _load_parameters(db: Session) -> Dict[str, Any]:
|
||||
grouped: defaultdict[int, list[Dict[str, Any]]] = defaultdict(list)
|
||||
for param in db.query(Parameter).order_by(Parameter.scenario_id, Parameter.id):
|
||||
grouped[param.scenario_id].append(
|
||||
{
|
||||
"id": param.id,
|
||||
"name": param.name,
|
||||
"value": param.value,
|
||||
"distribution_type": param.distribution_type,
|
||||
"distribution_parameters": param.distribution_parameters,
|
||||
}
|
||||
)
|
||||
return {"parameters_by_scenario": dict(grouped)}
|
||||
|
||||
|
||||
def _load_costs(db: Session) -> Dict[str, Any]:
|
||||
capex_grouped: defaultdict[int, list[Dict[str, Any]]] = defaultdict(list)
|
||||
for capex in (
|
||||
db.query(Capex)
|
||||
.order_by(Capex.scenario_id, Capex.id)
|
||||
.all()
|
||||
):
|
||||
capex_grouped[int(getattr(capex, "scenario_id"))].append(
|
||||
{
|
||||
"id": int(getattr(capex, "id")),
|
||||
"scenario_id": int(getattr(capex, "scenario_id")),
|
||||
"amount": float(getattr(capex, "amount", 0.0)),
|
||||
"description": getattr(capex, "description", "") or "",
|
||||
"currency_code": getattr(capex, "currency_code", "USD") or "USD",
|
||||
}
|
||||
)
|
||||
|
||||
opex_grouped: defaultdict[int, list[Dict[str, Any]]] = defaultdict(list)
|
||||
for opex in (
|
||||
db.query(Opex)
|
||||
.order_by(Opex.scenario_id, Opex.id)
|
||||
.all()
|
||||
):
|
||||
opex_grouped[int(getattr(opex, "scenario_id"))].append(
|
||||
{
|
||||
"id": int(getattr(opex, "id")),
|
||||
"scenario_id": int(getattr(opex, "scenario_id")),
|
||||
"amount": float(getattr(opex, "amount", 0.0)),
|
||||
"description": getattr(opex, "description", "") or "",
|
||||
"currency_code": getattr(opex, "currency_code", "USD") or "USD",
|
||||
}
|
||||
)
|
||||
|
||||
return {
|
||||
"capex_by_scenario": dict(capex_grouped),
|
||||
"opex_by_scenario": dict(opex_grouped),
|
||||
}
|
||||
|
||||
|
||||
def _load_currencies(db: Session) -> Dict[str, Any]:
|
||||
items: list[Dict[str, Any]] = []
|
||||
for c in db.query(Currency).filter_by(is_active=True).order_by(Currency.code).all():
|
||||
items.append(
|
||||
{"id": c.code, "name": f"{c.name} ({c.code})", "symbol": c.symbol})
|
||||
if not items:
|
||||
items.append({"id": "USD", "name": "US Dollar (USD)", "symbol": "$"})
|
||||
return {"currency_options": items}
|
||||
|
||||
|
||||
def _load_currency_settings(db: Session) -> Dict[str, Any]:
|
||||
_ensure_default_currency(db)
|
||||
records = db.query(Currency).order_by(Currency.code).all()
|
||||
currencies: list[Dict[str, Any]] = []
|
||||
for record in records:
|
||||
code_value = getattr(record, "code")
|
||||
currencies.append(
|
||||
{
|
||||
"id": int(getattr(record, "id")),
|
||||
"code": code_value,
|
||||
"name": getattr(record, "name"),
|
||||
"symbol": getattr(record, "symbol"),
|
||||
"is_active": bool(getattr(record, "is_active", True)),
|
||||
"is_default": code_value == DEFAULT_CURRENCY_CODE,
|
||||
}
|
||||
)
|
||||
|
||||
active_count = sum(1 for item in currencies if item["is_active"])
|
||||
inactive_count = len(currencies) - active_count
|
||||
|
||||
return {
|
||||
"currencies": currencies,
|
||||
"currency_stats": {
|
||||
"total": len(currencies),
|
||||
"active": active_count,
|
||||
"inactive": inactive_count,
|
||||
},
|
||||
"default_currency_code": DEFAULT_CURRENCY_CODE,
|
||||
"currency_api_base": "/api/currencies",
|
||||
}
|
||||
|
||||
|
||||
def _load_css_settings(db: Session) -> Dict[str, Any]:
|
||||
variables = get_css_color_settings(db)
|
||||
env_overrides = read_css_color_env_overrides()
|
||||
env_rows = list_css_env_override_rows()
|
||||
env_meta = {row["css_key"]: row for row in env_rows}
|
||||
return {
|
||||
"css_variables": variables,
|
||||
"css_defaults": CSS_COLOR_DEFAULTS,
|
||||
"css_env_overrides": env_overrides,
|
||||
"css_env_override_rows": env_rows,
|
||||
"css_env_override_meta": env_meta,
|
||||
}
|
||||
|
||||
|
||||
def _load_consumption(db: Session) -> Dict[str, Any]:
|
||||
grouped: defaultdict[int, list[Dict[str, Any]]] = defaultdict(list)
|
||||
for record in (
|
||||
db.query(Consumption)
|
||||
.order_by(Consumption.scenario_id, Consumption.id)
|
||||
.all()
|
||||
):
|
||||
record_id = int(getattr(record, "id"))
|
||||
scenario_id = int(getattr(record, "scenario_id"))
|
||||
amount_value = float(getattr(record, "amount", 0.0))
|
||||
description = getattr(record, "description", "") or ""
|
||||
unit_name = getattr(record, "unit_name", None)
|
||||
unit_symbol = getattr(record, "unit_symbol", None)
|
||||
grouped[scenario_id].append(
|
||||
{
|
||||
"id": record_id,
|
||||
"scenario_id": scenario_id,
|
||||
"amount": amount_value,
|
||||
"description": description,
|
||||
"unit_name": unit_name,
|
||||
"unit_symbol": unit_symbol,
|
||||
}
|
||||
)
|
||||
return {"consumption_by_scenario": dict(grouped)}
|
||||
|
||||
|
||||
def _load_production(db: Session) -> Dict[str, Any]:
|
||||
grouped: defaultdict[int, list[Dict[str, Any]]] = defaultdict(list)
|
||||
for record in (
|
||||
db.query(ProductionOutput)
|
||||
.order_by(ProductionOutput.scenario_id, ProductionOutput.id)
|
||||
.all()
|
||||
):
|
||||
record_id = int(getattr(record, "id"))
|
||||
scenario_id = int(getattr(record, "scenario_id"))
|
||||
amount_value = float(getattr(record, "amount", 0.0))
|
||||
description = getattr(record, "description", "") or ""
|
||||
unit_name = getattr(record, "unit_name", None)
|
||||
unit_symbol = getattr(record, "unit_symbol", None)
|
||||
grouped[scenario_id].append(
|
||||
{
|
||||
"id": record_id,
|
||||
"scenario_id": scenario_id,
|
||||
"amount": amount_value,
|
||||
"description": description,
|
||||
"unit_name": unit_name,
|
||||
"unit_symbol": unit_symbol,
|
||||
}
|
||||
)
|
||||
return {"production_by_scenario": dict(grouped)}
|
||||
|
||||
|
||||
def _load_equipment(db: Session) -> Dict[str, Any]:
|
||||
grouped: defaultdict[int, list[Dict[str, Any]]] = defaultdict(list)
|
||||
for record in (
|
||||
db.query(Equipment)
|
||||
.order_by(Equipment.scenario_id, Equipment.id)
|
||||
.all()
|
||||
):
|
||||
record_id = int(getattr(record, "id"))
|
||||
scenario_id = int(getattr(record, "scenario_id"))
|
||||
name_value = getattr(record, "name", "") or ""
|
||||
description = getattr(record, "description", "") or ""
|
||||
grouped[scenario_id].append(
|
||||
{
|
||||
"id": record_id,
|
||||
"scenario_id": scenario_id,
|
||||
"name": name_value,
|
||||
"description": description,
|
||||
}
|
||||
)
|
||||
return {"equipment_by_scenario": dict(grouped)}
|
||||
|
||||
|
||||
def _load_maintenance(db: Session) -> Dict[str, Any]:
|
||||
grouped: defaultdict[int, list[Dict[str, Any]]] = defaultdict(list)
|
||||
for record in (
|
||||
db.query(Maintenance)
|
||||
.order_by(Maintenance.scenario_id, Maintenance.maintenance_date)
|
||||
.all()
|
||||
):
|
||||
record_id = int(getattr(record, "id"))
|
||||
scenario_id = int(getattr(record, "scenario_id"))
|
||||
equipment_id = int(getattr(record, "equipment_id"))
|
||||
equipment_obj = getattr(record, "equipment", None)
|
||||
equipment_name = getattr(
|
||||
equipment_obj, "name", "") if equipment_obj else ""
|
||||
maintenance_date = getattr(record, "maintenance_date", None)
|
||||
cost_value = float(getattr(record, "cost", 0.0))
|
||||
description = getattr(record, "description", "") or ""
|
||||
|
||||
grouped[scenario_id].append(
|
||||
{
|
||||
"id": record_id,
|
||||
"scenario_id": scenario_id,
|
||||
"equipment_id": equipment_id,
|
||||
"equipment_name": equipment_name,
|
||||
"maintenance_date": maintenance_date.isoformat() if maintenance_date else "",
|
||||
"cost": cost_value,
|
||||
"description": description,
|
||||
}
|
||||
)
|
||||
return {"maintenance_by_scenario": dict(grouped)}
|
||||
|
||||
|
||||
def _load_simulations(db: Session) -> Dict[str, Any]:
|
||||
scenarios: list[Dict[str, Any]] = [
|
||||
{
|
||||
"id": item.id,
|
||||
"name": item.name,
|
||||
}
|
||||
for item in db.query(Scenario).order_by(Scenario.name).all()
|
||||
]
|
||||
|
||||
results_grouped: defaultdict[int, list[Dict[str, Any]]] = defaultdict(list)
|
||||
for record in (
|
||||
db.query(SimulationResult)
|
||||
.order_by(SimulationResult.scenario_id, SimulationResult.iteration)
|
||||
.all()
|
||||
):
|
||||
scenario_id = int(getattr(record, "scenario_id"))
|
||||
results_grouped[scenario_id].append(
|
||||
{
|
||||
"iteration": int(getattr(record, "iteration")),
|
||||
"result": float(getattr(record, "result", 0.0)),
|
||||
}
|
||||
)
|
||||
|
||||
runs: list[Dict[str, Any]] = []
|
||||
sample_limit = 20
|
||||
for item in scenarios:
|
||||
scenario_id = int(item["id"])
|
||||
scenario_results = results_grouped.get(scenario_id, [])
|
||||
summary = generate_report(
|
||||
scenario_results) if scenario_results else generate_report([])
|
||||
runs.append(
|
||||
{
|
||||
"scenario_id": scenario_id,
|
||||
"scenario_name": item["name"],
|
||||
"iterations": int(summary.get("count", 0)),
|
||||
"summary": summary,
|
||||
"sample_results": scenario_results[:sample_limit],
|
||||
}
|
||||
)
|
||||
|
||||
return {
|
||||
"simulation_scenarios": scenarios,
|
||||
"simulation_runs": runs,
|
||||
}
|
||||
|
||||
|
||||
def _load_reporting(db: Session) -> Dict[str, Any]:
|
||||
scenarios = _load_scenarios(db)["scenarios"]
|
||||
runs = _load_simulations(db)["simulation_runs"]
|
||||
|
||||
summaries: list[Dict[str, Any]] = []
|
||||
runs_by_scenario = {run["scenario_id"]: run for run in runs}
|
||||
|
||||
for scenario in scenarios:
|
||||
scenario_id = scenario["id"]
|
||||
run = runs_by_scenario.get(scenario_id)
|
||||
summary = run["summary"] if run else generate_report([])
|
||||
summaries.append(
|
||||
{
|
||||
"scenario_id": scenario_id,
|
||||
"scenario_name": scenario["name"],
|
||||
"summary": summary,
|
||||
"iterations": run["iterations"] if run else 0,
|
||||
}
|
||||
)
|
||||
|
||||
return {
|
||||
"report_summaries": summaries,
|
||||
}
|
||||
|
||||
|
||||
def _load_dashboard(db: Session) -> Dict[str, Any]:
|
||||
scenarios = _load_scenarios(db)["scenarios"]
|
||||
parameters_by_scenario = _load_parameters(db)["parameters_by_scenario"]
|
||||
costs_context = _load_costs(db)
|
||||
capex_by_scenario = costs_context["capex_by_scenario"]
|
||||
opex_by_scenario = costs_context["opex_by_scenario"]
|
||||
consumption_by_scenario = _load_consumption(db)["consumption_by_scenario"]
|
||||
production_by_scenario = _load_production(db)["production_by_scenario"]
|
||||
equipment_by_scenario = _load_equipment(db)["equipment_by_scenario"]
|
||||
maintenance_by_scenario = _load_maintenance(db)["maintenance_by_scenario"]
|
||||
simulation_context = _load_simulations(db)
|
||||
simulation_runs = simulation_context["simulation_runs"]
|
||||
|
||||
runs_by_scenario = {
|
||||
run["scenario_id"]: run for run in simulation_runs
|
||||
}
|
||||
|
||||
def sum_amounts(grouped: Dict[int, list[Dict[str, Any]]], field: str = "amount") -> float:
|
||||
total = 0.0
|
||||
for items in grouped.values():
|
||||
for item in items:
|
||||
value = item.get(field, 0.0)
|
||||
if isinstance(value, (int, float)):
|
||||
total += float(value)
|
||||
return total
|
||||
|
||||
total_capex = sum_amounts(capex_by_scenario)
|
||||
total_opex = sum_amounts(opex_by_scenario)
|
||||
total_consumption = sum_amounts(consumption_by_scenario)
|
||||
total_production = sum_amounts(production_by_scenario)
|
||||
total_maintenance_cost = sum_amounts(maintenance_by_scenario, field="cost")
|
||||
|
||||
total_parameters = sum(len(items)
|
||||
for items in parameters_by_scenario.values())
|
||||
total_equipment = sum(len(items)
|
||||
for items in equipment_by_scenario.values())
|
||||
total_maintenance_events = sum(len(items)
|
||||
for items in maintenance_by_scenario.values())
|
||||
total_simulation_iterations = sum(
|
||||
run["iterations"] for run in simulation_runs)
|
||||
|
||||
scenario_rows: list[Dict[str, Any]] = []
|
||||
scenario_labels: list[str] = []
|
||||
scenario_capex: list[float] = []
|
||||
scenario_opex: list[float] = []
|
||||
activity_labels: list[str] = []
|
||||
activity_production: list[float] = []
|
||||
activity_consumption: list[float] = []
|
||||
|
||||
for scenario in scenarios:
|
||||
scenario_id = scenario["id"]
|
||||
scenario_name = scenario["name"]
|
||||
param_count = len(parameters_by_scenario.get(scenario_id, []))
|
||||
equipment_count = len(equipment_by_scenario.get(scenario_id, []))
|
||||
maintenance_count = len(maintenance_by_scenario.get(scenario_id, []))
|
||||
|
||||
capex_total = sum(
|
||||
float(item.get("amount", 0.0))
|
||||
for item in capex_by_scenario.get(scenario_id, [])
|
||||
)
|
||||
opex_total = sum(
|
||||
float(item.get("amount", 0.0))
|
||||
for item in opex_by_scenario.get(scenario_id, [])
|
||||
)
|
||||
consumption_total = sum(
|
||||
float(item.get("amount", 0.0))
|
||||
for item in consumption_by_scenario.get(scenario_id, [])
|
||||
)
|
||||
production_total = sum(
|
||||
float(item.get("amount", 0.0))
|
||||
for item in production_by_scenario.get(scenario_id, [])
|
||||
)
|
||||
|
||||
run = runs_by_scenario.get(scenario_id)
|
||||
summary = run["summary"] if run else generate_report([])
|
||||
iterations = run["iterations"] if run else 0
|
||||
mean_value = float(summary.get("mean", 0.0))
|
||||
|
||||
scenario_rows.append(
|
||||
{
|
||||
"scenario_name": scenario_name,
|
||||
"parameter_count": param_count,
|
||||
"parameter_display": _format_int(param_count),
|
||||
"equipment_count": equipment_count,
|
||||
"equipment_display": _format_int(equipment_count),
|
||||
"capex_total": capex_total,
|
||||
"capex_display": _format_currency(capex_total),
|
||||
"opex_total": opex_total,
|
||||
"opex_display": _format_currency(opex_total),
|
||||
"production_total": production_total,
|
||||
"production_display": _format_decimal(production_total),
|
||||
"consumption_total": consumption_total,
|
||||
"consumption_display": _format_decimal(consumption_total),
|
||||
"maintenance_count": maintenance_count,
|
||||
"maintenance_display": _format_int(maintenance_count),
|
||||
"iterations": iterations,
|
||||
"iterations_display": _format_int(iterations),
|
||||
"simulation_mean": mean_value,
|
||||
"simulation_mean_display": _format_decimal(mean_value),
|
||||
}
|
||||
)
|
||||
|
||||
scenario_labels.append(scenario_name)
|
||||
scenario_capex.append(capex_total)
|
||||
scenario_opex.append(opex_total)
|
||||
|
||||
activity_labels.append(scenario_name)
|
||||
activity_production.append(production_total)
|
||||
activity_consumption.append(consumption_total)
|
||||
|
||||
scenario_rows.sort(key=lambda row: row["scenario_name"].lower())
|
||||
|
||||
all_simulation_results = [
|
||||
{"result": float(getattr(item, "result", 0.0))}
|
||||
for item in db.query(SimulationResult).all()
|
||||
]
|
||||
overall_report = generate_report(all_simulation_results)
|
||||
|
||||
overall_report_metrics = [
|
||||
{"label": "Runs", "value": _format_int(
|
||||
int(overall_report.get("count", 0)))},
|
||||
{"label": "Mean", "value": _format_decimal(
|
||||
float(overall_report.get("mean", 0.0)))},
|
||||
{"label": "Median", "value": _format_decimal(
|
||||
float(overall_report.get("median", 0.0)))},
|
||||
{"label": "Std Dev", "value": _format_decimal(
|
||||
float(overall_report.get("std_dev", 0.0)))},
|
||||
{"label": "95th Percentile", "value": _format_decimal(
|
||||
float(overall_report.get("percentile_95", 0.0)))},
|
||||
{"label": "VaR (95%)", "value": _format_decimal(
|
||||
float(overall_report.get("value_at_risk_95", 0.0)))},
|
||||
{"label": "Expected Shortfall (95%)", "value": _format_decimal(
|
||||
float(overall_report.get("expected_shortfall_95", 0.0)))},
|
||||
]
|
||||
|
||||
recent_simulations: list[Dict[str, Any]] = [
|
||||
{
|
||||
"scenario_name": run["scenario_name"],
|
||||
"iterations": run["iterations"],
|
||||
"iterations_display": _format_int(run["iterations"]),
|
||||
"mean_display": _format_decimal(float(run["summary"].get("mean", 0.0))),
|
||||
"p95_display": _format_decimal(float(run["summary"].get("percentile_95", 0.0))),
|
||||
}
|
||||
for run in simulation_runs
|
||||
if run["iterations"] > 0
|
||||
]
|
||||
recent_simulations.sort(key=lambda item: item["iterations"], reverse=True)
|
||||
recent_simulations = recent_simulations[:5]
|
||||
|
||||
upcoming_maintenance: list[Dict[str, Any]] = []
|
||||
for record in (
|
||||
db.query(Maintenance)
|
||||
.order_by(Maintenance.maintenance_date.asc())
|
||||
.limit(5)
|
||||
.all()
|
||||
):
|
||||
maintenance_date = getattr(record, "maintenance_date", None)
|
||||
upcoming_maintenance.append(
|
||||
{
|
||||
"scenario_name": getattr(getattr(record, "scenario", None), "name", "Unknown"),
|
||||
"equipment_name": getattr(getattr(record, "equipment", None), "name", "Unknown"),
|
||||
"date_display": maintenance_date.strftime("%Y-%m-%d") if maintenance_date else "—",
|
||||
"cost_display": _format_currency(float(getattr(record, "cost", 0.0))),
|
||||
"description": getattr(record, "description", "") or "—",
|
||||
}
|
||||
)
|
||||
|
||||
cost_chart_has_data = any(value > 0 for value in scenario_capex) or any(
|
||||
value > 0 for value in scenario_opex
|
||||
)
|
||||
activity_chart_has_data = any(value > 0 for value in activity_production) or any(
|
||||
value > 0 for value in activity_consumption
|
||||
)
|
||||
|
||||
scenario_cost_chart: Dict[str, list[Any]] = {
|
||||
"labels": scenario_labels,
|
||||
"capex": scenario_capex,
|
||||
"opex": scenario_opex,
|
||||
}
|
||||
scenario_activity_chart: Dict[str, list[Any]] = {
|
||||
"labels": activity_labels,
|
||||
"production": activity_production,
|
||||
"consumption": activity_consumption,
|
||||
}
|
||||
|
||||
summary_metrics = [
|
||||
{"label": "Active Scenarios", "value": _format_int(len(scenarios))},
|
||||
{"label": "Parameters", "value": _format_int(total_parameters)},
|
||||
{"label": "CAPEX Total", "value": _format_currency(total_capex)},
|
||||
{"label": "OPEX Total", "value": _format_currency(total_opex)},
|
||||
{"label": "Equipment Assets", "value": _format_int(total_equipment)},
|
||||
{"label": "Maintenance Events",
|
||||
"value": _format_int(total_maintenance_events)},
|
||||
{"label": "Consumption", "value": _format_decimal(total_consumption)},
|
||||
{"label": "Production", "value": _format_decimal(total_production)},
|
||||
{"label": "Simulation Iterations",
|
||||
"value": _format_int(total_simulation_iterations)},
|
||||
{"label": "Maintenance Cost",
|
||||
"value": _format_currency(total_maintenance_cost)},
|
||||
]
|
||||
|
||||
return {
|
||||
"summary_metrics": summary_metrics,
|
||||
"scenario_rows": scenario_rows,
|
||||
"overall_report_metrics": overall_report_metrics,
|
||||
"recent_simulations": recent_simulations,
|
||||
"upcoming_maintenance": upcoming_maintenance,
|
||||
"scenario_cost_chart": scenario_cost_chart,
|
||||
"scenario_activity_chart": scenario_activity_chart,
|
||||
"cost_chart_has_data": cost_chart_has_data,
|
||||
"activity_chart_has_data": activity_chart_has_data,
|
||||
"report_available": overall_report.get("count", 0) > 0,
|
||||
}
|
||||
|
||||
|
||||
@router.get("/", response_class=HTMLResponse)
|
||||
async def dashboard_root(request: Request, db: Session = Depends(get_db)):
|
||||
"""Render the primary dashboard landing page."""
|
||||
return _render(request, "Dashboard.html", _load_dashboard(db))
|
||||
|
||||
|
||||
@router.get("/ui/dashboard", response_class=HTMLResponse)
|
||||
async def dashboard(request: Request, db: Session = Depends(get_db)):
|
||||
"""Render the legacy dashboard route for backward compatibility."""
|
||||
return _render(request, "Dashboard.html", _load_dashboard(db))
|
||||
|
||||
|
||||
@router.get("/ui/dashboard/data", response_class=JSONResponse)
|
||||
async def dashboard_data(db: Session = Depends(get_db)) -> JSONResponse:
|
||||
"""Expose dashboard aggregates as JSON for client-side refreshes."""
|
||||
return JSONResponse(_load_dashboard(db))
|
||||
|
||||
|
||||
@router.get("/ui/scenarios", response_class=HTMLResponse)
|
||||
async def scenario_form(request: Request, db: Session = Depends(get_db)):
|
||||
"""Render the scenario creation form."""
|
||||
context = _load_scenarios(db)
|
||||
return _render(request, "ScenarioForm.html", context)
|
||||
|
||||
|
||||
@router.get("/ui/parameters", response_class=HTMLResponse)
|
||||
async def parameter_form(request: Request, db: Session = Depends(get_db)):
|
||||
"""Render the parameter input form."""
|
||||
context: Dict[str, Any] = {}
|
||||
context.update(_load_scenarios(db))
|
||||
context.update(_load_parameters(db))
|
||||
return _render(request, "ParameterInput.html", context)
|
||||
|
||||
|
||||
@router.get("/ui/costs", response_class=HTMLResponse)
|
||||
async def costs_view(request: Request, db: Session = Depends(get_db)):
|
||||
"""Render the costs view with CAPEX and OPEX data."""
|
||||
context: Dict[str, Any] = {}
|
||||
context.update(_load_scenarios(db))
|
||||
context.update(_load_costs(db))
|
||||
context.update(_load_currencies(db))
|
||||
return _render(request, "costs.html", context)
|
||||
|
||||
|
||||
@router.get("/ui/consumption", response_class=HTMLResponse)
|
||||
async def consumption_view(request: Request, db: Session = Depends(get_db)):
|
||||
"""Render the consumption view with scenario consumption data."""
|
||||
context: Dict[str, Any] = {}
|
||||
context.update(_load_scenarios(db))
|
||||
context.update(_load_consumption(db))
|
||||
context["unit_options"] = MEASUREMENT_UNITS
|
||||
return _render(request, "consumption.html", context)
|
||||
|
||||
|
||||
@router.get("/ui/production", response_class=HTMLResponse)
|
||||
async def production_view(request: Request, db: Session = Depends(get_db)):
|
||||
"""Render the production view with scenario production data."""
|
||||
context: Dict[str, Any] = {}
|
||||
context.update(_load_scenarios(db))
|
||||
context.update(_load_production(db))
|
||||
context["unit_options"] = MEASUREMENT_UNITS
|
||||
return _render(request, "production.html", context)
|
||||
|
||||
|
||||
@router.get("/ui/equipment", response_class=HTMLResponse)
|
||||
async def equipment_view(request: Request, db: Session = Depends(get_db)):
|
||||
"""Render the equipment view with scenario equipment data."""
|
||||
context: Dict[str, Any] = {}
|
||||
context.update(_load_scenarios(db))
|
||||
context.update(_load_equipment(db))
|
||||
return _render(request, "equipment.html", context)
|
||||
|
||||
|
||||
@router.get("/ui/maintenance", response_class=HTMLResponse)
|
||||
async def maintenance_view(request: Request, db: Session = Depends(get_db)):
|
||||
"""Render the maintenance view with scenario maintenance data."""
|
||||
context: Dict[str, Any] = {}
|
||||
context.update(_load_scenarios(db))
|
||||
context.update(_load_equipment(db))
|
||||
context.update(_load_maintenance(db))
|
||||
return _render(request, "maintenance.html", context)
|
||||
|
||||
|
||||
@router.get("/ui/simulations", response_class=HTMLResponse)
|
||||
async def simulations_view(request: Request, db: Session = Depends(get_db)):
|
||||
"""Render the simulations view with scenario information and recent runs."""
|
||||
return _render(request, "simulations.html", _load_simulations(db))
|
||||
|
||||
|
||||
@router.get("/ui/reporting", response_class=HTMLResponse)
|
||||
async def reporting_view(request: Request, db: Session = Depends(get_db)):
|
||||
"""Render the reporting view with scenario KPI summaries."""
|
||||
return _render(request, "reporting.html", _load_reporting(db))
|
||||
|
||||
|
||||
@router.get("/ui/settings", response_class=HTMLResponse)
|
||||
async def settings_view(request: Request, db: Session = Depends(get_db)):
|
||||
"""Render the settings landing page."""
|
||||
context = _load_css_settings(db)
|
||||
return _render(request, "settings.html", context)
|
||||
|
||||
|
||||
@router.get("/ui/currencies", response_class=HTMLResponse)
|
||||
async def currencies_view(request: Request, db: Session = Depends(get_db)):
|
||||
"""Render the currency administration page with full currency context."""
|
||||
context = _load_currency_settings(db)
|
||||
return _render(request, "currencies.html", context)
|
||||
@@ -1,116 +0,0 @@
|
||||
"""
|
||||
Backfill script to populate currency_id for capex and opex rows using existing currency_code.
|
||||
|
||||
Usage:
|
||||
python scripts/backfill_currency.py --dry-run
|
||||
python scripts/backfill_currency.py --create-missing
|
||||
|
||||
This script is intentionally cautious: it defaults to dry-run mode and will refuse to run
|
||||
if database connection settings are missing. It supports creating missing currency rows when `--create-missing`
|
||||
is provided. Always run against a development/staging database first.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
import argparse
|
||||
import importlib
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
from sqlalchemy import text, create_engine
|
||||
|
||||
|
||||
PROJECT_ROOT = Path(__file__).resolve().parent.parent
|
||||
if str(PROJECT_ROOT) not in sys.path:
|
||||
sys.path.insert(0, str(PROJECT_ROOT))
|
||||
|
||||
|
||||
def load_database_url() -> str:
|
||||
try:
|
||||
db_module = importlib.import_module("config.database")
|
||||
except RuntimeError as exc:
|
||||
raise RuntimeError(
|
||||
"Database configuration missing: set DATABASE_URL or provide granular "
|
||||
"variables (DATABASE_DRIVER, DATABASE_HOST, DATABASE_PORT, DATABASE_USER, "
|
||||
"DATABASE_PASSWORD, DATABASE_NAME, optional DATABASE_SCHEMA)."
|
||||
) from exc
|
||||
|
||||
return getattr(db_module, "DATABASE_URL")
|
||||
|
||||
|
||||
def backfill(db_url: str, dry_run: bool = True, create_missing: bool = False) -> None:
|
||||
engine = create_engine(db_url)
|
||||
with engine.begin() as conn:
|
||||
# Ensure currency table exists
|
||||
res = conn.execute(text("SELECT name FROM sqlite_master WHERE type='table' AND name='currency';")) if db_url.startswith(
|
||||
'sqlite:') else conn.execute(text("SELECT to_regclass('public.currency');"))
|
||||
# Note: we don't strictly depend on the above - we assume migration was already applied
|
||||
|
||||
# Helper: find or create currency by code
|
||||
def find_currency_id(code: str):
|
||||
r = conn.execute(text("SELECT id FROM currency WHERE code = :code"), {
|
||||
"code": code}).fetchone()
|
||||
if r:
|
||||
return r[0]
|
||||
if create_missing:
|
||||
# insert and return id
|
||||
conn.execute(text("INSERT INTO currency (code, name, symbol, is_active) VALUES (:c, :n, NULL, TRUE)"), {
|
||||
"c": code, "n": code})
|
||||
r2 = conn.execute(text("SELECT id FROM currency WHERE code = :code"), {
|
||||
"code": code}).fetchone()
|
||||
if not r2:
|
||||
raise RuntimeError(
|
||||
f"Unable to determine currency ID for '{code}' after insert"
|
||||
)
|
||||
return r2[0]
|
||||
return None
|
||||
|
||||
# Process tables capex and opex
|
||||
for table in ("capex", "opex"):
|
||||
# Check if currency_id column exists
|
||||
try:
|
||||
cols = conn.execute(text(f"SELECT 1 FROM information_schema.columns WHERE table_name = '{table}' AND column_name = 'currency_id'")) if not db_url.startswith(
|
||||
'sqlite:') else [(1,)]
|
||||
except Exception:
|
||||
cols = [(1,)]
|
||||
|
||||
if not cols:
|
||||
print(f"Skipping {table}: no currency_id column found")
|
||||
continue
|
||||
|
||||
# Find rows where currency_id IS NULL but currency_code exists
|
||||
rows = conn.execute(text(
|
||||
f"SELECT id, currency_code FROM {table} WHERE currency_id IS NULL OR currency_id = ''"))
|
||||
changed = 0
|
||||
for r in rows:
|
||||
rid = r[0]
|
||||
code = (r[1] or "USD").strip().upper()
|
||||
cid = find_currency_id(code)
|
||||
if cid is None:
|
||||
print(
|
||||
f"Row {table}:{rid} has unknown currency code '{code}' and create_missing=False; skipping")
|
||||
continue
|
||||
if dry_run:
|
||||
print(
|
||||
f"[DRY RUN] Would set {table}.currency_id = {cid} for row id={rid} (code={code})")
|
||||
else:
|
||||
conn.execute(text(f"UPDATE {table} SET currency_id = :cid WHERE id = :rid"), {
|
||||
"cid": cid, "rid": rid})
|
||||
changed += 1
|
||||
|
||||
print(f"{table}: processed, changed={changed} (dry_run={dry_run})")
|
||||
|
||||
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Backfill currency_id from currency_code for capex/opex tables")
|
||||
parser.add_argument("--dry-run", action="store_true",
|
||||
default=True, help="Show actions without writing")
|
||||
parser.add_argument("--create-missing", action="store_true",
|
||||
help="Create missing currency rows in the currency table")
|
||||
args = parser.parse_args()
|
||||
|
||||
db = load_database_url()
|
||||
backfill(db, dry_run=args.dry_run, create_missing=args.create_missing)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,43 +0,0 @@
|
||||
"""Simple Markdown link checker for local docs/ files.
|
||||
|
||||
Checks only local file links (relative paths) and reports missing targets.
|
||||
|
||||
Run from the repository root using the project's Python environment.
|
||||
"""
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
ROOT = Path(__file__).resolve().parent.parent
|
||||
DOCS = ROOT / 'docs'
|
||||
|
||||
MD_LINK_RE = re.compile(r"\[([^\]]+)\]\(([^)]+)\)")
|
||||
|
||||
errors = []
|
||||
|
||||
for md in DOCS.rglob('*.md'):
|
||||
text = md.read_text(encoding='utf-8')
|
||||
for m in MD_LINK_RE.finditer(text):
|
||||
label, target = m.groups()
|
||||
# skip URLs
|
||||
if target.startswith('http://') or target.startswith('https://') or target.startswith('#'):
|
||||
continue
|
||||
# strip anchors
|
||||
target_path = target.split('#')[0]
|
||||
# if link is to a directory index, allow
|
||||
candidate = (md.parent / target_path).resolve()
|
||||
if candidate.exists():
|
||||
continue
|
||||
# check common implicit index: target/ -> target/README.md or target/index.md
|
||||
candidate_dir = md.parent / target_path
|
||||
if candidate_dir.is_dir():
|
||||
if (candidate_dir / 'README.md').exists() or (candidate_dir / 'index.md').exists():
|
||||
continue
|
||||
errors.append((str(md.relative_to(ROOT)), target, label))
|
||||
|
||||
if errors:
|
||||
print('Broken local links found:')
|
||||
for src, tgt, label in errors:
|
||||
print(f'- {src} -> {tgt} ({label})')
|
||||
exit(2)
|
||||
|
||||
print('No broken local links detected.')
|
||||
@@ -1,79 +0,0 @@
|
||||
"""Lightweight Markdown formatter: normalizes first-line H1, adds code-fence language hints for common shebangs, trims trailing whitespace.
|
||||
|
||||
This is intentionally small and non-destructive; it touches only files under docs/ and makes safe changes.
|
||||
"""
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
DOCS = Path(__file__).resolve().parents[1] / "docs"
|
||||
|
||||
CODE_LANG_HINTS = {
|
||||
'powershell': ('powershell',),
|
||||
'bash': ('bash', 'sh'),
|
||||
'sql': ('sql',),
|
||||
'python': ('python',),
|
||||
}
|
||||
|
||||
|
||||
def add_code_fence_language(match):
|
||||
fence = match.group(0)
|
||||
inner = match.group(1)
|
||||
# If language already present, return unchanged
|
||||
if fence.startswith('```') and len(fence.splitlines()[0].strip()) > 3:
|
||||
return fence
|
||||
# Try to infer language from the code content
|
||||
code = inner.strip().splitlines()[0] if inner.strip() else ''
|
||||
lang = ''
|
||||
if code.startswith('$') or code.startswith('PS') or code.lower().startswith('powershell'):
|
||||
lang = 'powershell'
|
||||
elif code.startswith('#') or code.startswith('import') or code.startswith('from'):
|
||||
lang = 'python'
|
||||
elif re.match(r'^(select|insert|update|create)\b', code.strip(), re.I):
|
||||
lang = 'sql'
|
||||
elif code.startswith('git') or code.startswith('./') or code.startswith('sudo'):
|
||||
lang = 'bash'
|
||||
if lang:
|
||||
return f'```{lang}\n{inner}\n```'
|
||||
return fence
|
||||
|
||||
|
||||
def normalize_file(path: Path):
|
||||
text = path.read_text(encoding='utf-8')
|
||||
orig = text
|
||||
# Trim trailing whitespace and ensure single trailing newline
|
||||
text = '\n'.join(line.rstrip() for line in text.splitlines()) + '\n'
|
||||
# Ensure first non-empty line is H1
|
||||
lines = text.splitlines()
|
||||
for i, ln in enumerate(lines):
|
||||
if ln.strip():
|
||||
if not ln.startswith('#'):
|
||||
lines[i] = '# ' + ln
|
||||
break
|
||||
text = '\n'.join(lines) + '\n'
|
||||
# Add basic code fence languages where missing (simple heuristic)
|
||||
text = re.sub(r'```\n([\s\S]*?)\n```', add_code_fence_language, text)
|
||||
if text != orig:
|
||||
path.write_text(text, encoding='utf-8')
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
changed = []
|
||||
for p in DOCS.rglob('*.md'):
|
||||
if p.is_file():
|
||||
try:
|
||||
if normalize_file(p):
|
||||
changed.append(str(p.relative_to(Path.cwd())))
|
||||
except Exception as e:
|
||||
print(f"Failed to format {p}: {e}")
|
||||
if changed:
|
||||
print('Formatted files:')
|
||||
for c in changed:
|
||||
print(' -', c)
|
||||
else:
|
||||
print('No formatting changes required.')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -1,161 +0,0 @@
|
||||
-- Baseline migration for CalMiner database schema
|
||||
-- Date: 2025-10-25
|
||||
-- Purpose: Consolidate foundational tables and reference data
|
||||
|
||||
BEGIN;
|
||||
|
||||
-- Currency reference table
|
||||
CREATE TABLE IF NOT EXISTS currency (
|
||||
id SERIAL PRIMARY KEY,
|
||||
code VARCHAR(3) NOT NULL UNIQUE,
|
||||
name VARCHAR(128) NOT NULL,
|
||||
symbol VARCHAR(8),
|
||||
is_active BOOLEAN NOT NULL DEFAULT TRUE
|
||||
);
|
||||
|
||||
INSERT INTO currency (code, name, symbol, is_active)
|
||||
VALUES
|
||||
('USD', 'United States Dollar', 'USD$', TRUE),
|
||||
('EUR', 'Euro', 'EUR', TRUE),
|
||||
('CLP', 'Chilean Peso', 'CLP$', TRUE),
|
||||
('RMB', 'Chinese Yuan', 'RMB', TRUE),
|
||||
('GBP', 'British Pound', 'GBP', TRUE),
|
||||
('CAD', 'Canadian Dollar', 'CAD$', TRUE),
|
||||
('AUD', 'Australian Dollar', 'AUD$', TRUE)
|
||||
ON CONFLICT (code) DO UPDATE
|
||||
SET name = EXCLUDED.name,
|
||||
symbol = EXCLUDED.symbol,
|
||||
is_active = EXCLUDED.is_active;
|
||||
|
||||
-- Application-level settings table
|
||||
CREATE TABLE IF NOT EXISTS application_setting (
|
||||
id SERIAL PRIMARY KEY,
|
||||
key VARCHAR(128) NOT NULL UNIQUE,
|
||||
value TEXT NOT NULL,
|
||||
value_type VARCHAR(32) NOT NULL DEFAULT 'string',
|
||||
category VARCHAR(32) NOT NULL DEFAULT 'general',
|
||||
description TEXT,
|
||||
is_editable BOOLEAN NOT NULL DEFAULT TRUE,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS ux_application_setting_key
|
||||
ON application_setting (key);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS ix_application_setting_category
|
||||
ON application_setting (category);
|
||||
|
||||
-- Measurement unit reference table
|
||||
CREATE TABLE IF NOT EXISTS measurement_unit (
|
||||
id SERIAL PRIMARY KEY,
|
||||
code VARCHAR(64) NOT NULL UNIQUE,
|
||||
name VARCHAR(128) NOT NULL,
|
||||
symbol VARCHAR(16),
|
||||
unit_type VARCHAR(32) NOT NULL,
|
||||
is_active BOOLEAN NOT NULL DEFAULT TRUE,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
INSERT INTO measurement_unit (code, name, symbol, unit_type, is_active)
|
||||
VALUES
|
||||
('tonnes', 'Tonnes', 't', 'mass', TRUE),
|
||||
('kilograms', 'Kilograms', 'kg', 'mass', TRUE),
|
||||
('pounds', 'Pounds', 'lb', 'mass', TRUE),
|
||||
('liters', 'Liters', 'L', 'volume', TRUE),
|
||||
('cubic_meters', 'Cubic Meters', 'm3', 'volume', TRUE),
|
||||
('kilowatt_hours', 'Kilowatt Hours', 'kWh', 'energy', TRUE)
|
||||
ON CONFLICT (code) DO UPDATE
|
||||
SET name = EXCLUDED.name,
|
||||
symbol = EXCLUDED.symbol,
|
||||
unit_type = EXCLUDED.unit_type,
|
||||
is_active = EXCLUDED.is_active;
|
||||
|
||||
-- Consumption and production measurement metadata
|
||||
ALTER TABLE consumption
|
||||
ADD COLUMN IF NOT EXISTS unit_name VARCHAR(64);
|
||||
ALTER TABLE consumption
|
||||
ADD COLUMN IF NOT EXISTS unit_symbol VARCHAR(16);
|
||||
|
||||
ALTER TABLE production_output
|
||||
ADD COLUMN IF NOT EXISTS unit_name VARCHAR(64);
|
||||
ALTER TABLE production_output
|
||||
ADD COLUMN IF NOT EXISTS unit_symbol VARCHAR(16);
|
||||
|
||||
-- Currency integration for CAPEX and OPEX
|
||||
ALTER TABLE capex
|
||||
ADD COLUMN IF NOT EXISTS currency_id INTEGER;
|
||||
ALTER TABLE opex
|
||||
ADD COLUMN IF NOT EXISTS currency_id INTEGER;
|
||||
|
||||
DO $$
|
||||
DECLARE
|
||||
usd_id INTEGER;
|
||||
BEGIN
|
||||
-- Ensure currency_id columns align with legacy currency_code values when present
|
||||
IF EXISTS (
|
||||
SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'capex' AND column_name = 'currency_code'
|
||||
) THEN
|
||||
UPDATE capex AS c
|
||||
SET currency_id = cur.id
|
||||
FROM currency AS cur
|
||||
WHERE c.currency_code = cur.code
|
||||
AND (c.currency_id IS DISTINCT FROM cur.id);
|
||||
END IF;
|
||||
|
||||
IF EXISTS (
|
||||
SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = 'opex' AND column_name = 'currency_code'
|
||||
) THEN
|
||||
UPDATE opex AS o
|
||||
SET currency_id = cur.id
|
||||
FROM currency AS cur
|
||||
WHERE o.currency_code = cur.code
|
||||
AND (o.currency_id IS DISTINCT FROM cur.id);
|
||||
END IF;
|
||||
|
||||
SELECT id INTO usd_id FROM currency WHERE code = 'USD';
|
||||
IF usd_id IS NOT NULL THEN
|
||||
UPDATE capex SET currency_id = usd_id WHERE currency_id IS NULL;
|
||||
UPDATE opex SET currency_id = usd_id WHERE currency_id IS NULL;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
ALTER TABLE capex
|
||||
ALTER COLUMN currency_id SET NOT NULL;
|
||||
ALTER TABLE opex
|
||||
ALTER COLUMN currency_id SET NOT NULL;
|
||||
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM information_schema.table_constraints
|
||||
WHERE table_schema = current_schema()
|
||||
AND table_name = 'capex'
|
||||
AND constraint_name = 'fk_capex_currency'
|
||||
) THEN
|
||||
ALTER TABLE capex
|
||||
ADD CONSTRAINT fk_capex_currency FOREIGN KEY (currency_id)
|
||||
REFERENCES currency (id) ON DELETE RESTRICT;
|
||||
END IF;
|
||||
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM information_schema.table_constraints
|
||||
WHERE table_schema = current_schema()
|
||||
AND table_name = 'opex'
|
||||
AND constraint_name = 'fk_opex_currency'
|
||||
) THEN
|
||||
ALTER TABLE opex
|
||||
ADD CONSTRAINT fk_opex_currency FOREIGN KEY (currency_id)
|
||||
REFERENCES currency (id) ON DELETE RESTRICT;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
ALTER TABLE capex
|
||||
DROP COLUMN IF EXISTS currency_code;
|
||||
ALTER TABLE opex
|
||||
DROP COLUMN IF EXISTS currency_code;
|
||||
|
||||
COMMIT;
|
||||
@@ -1,25 +0,0 @@
|
||||
-- Migration: Create application_setting table for configurable application options
|
||||
-- Date: 2025-10-25
|
||||
-- Description: Introduces persistent storage for application-level settings such as theme colors.
|
||||
|
||||
BEGIN;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS application_setting (
|
||||
id SERIAL PRIMARY KEY,
|
||||
key VARCHAR(128) NOT NULL UNIQUE,
|
||||
value TEXT NOT NULL,
|
||||
value_type VARCHAR(32) NOT NULL DEFAULT 'string',
|
||||
category VARCHAR(32) NOT NULL DEFAULT 'general',
|
||||
description TEXT,
|
||||
is_editable BOOLEAN NOT NULL DEFAULT TRUE,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS ux_application_setting_key
|
||||
ON application_setting (key);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS ix_application_setting_category
|
||||
ON application_setting (category);
|
||||
|
||||
COMMIT;
|
||||
@@ -1,162 +0,0 @@
|
||||
"""Seed baseline data for CalMiner in an idempotent manner.
|
||||
|
||||
Usage examples
|
||||
--------------
|
||||
|
||||
```powershell
|
||||
# Use existing environment variables (or load from setup_test.env.example)
|
||||
python scripts/seed_data.py --currencies --units --defaults
|
||||
|
||||
# Dry-run to preview actions
|
||||
python scripts/seed_data.py --currencies --dry-run
|
||||
```
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
import os
|
||||
from typing import Iterable, Optional
|
||||
|
||||
import psycopg2
|
||||
from psycopg2 import errors
|
||||
from psycopg2.extras import execute_values
|
||||
|
||||
from scripts.setup_database import DatabaseConfig
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CURRENCY_SEEDS = (
|
||||
("USD", "United States Dollar", "USD$", True),
|
||||
("EUR", "Euro", "EUR", True),
|
||||
("CLP", "Chilean Peso", "CLP$", True),
|
||||
("RMB", "Chinese Yuan", "RMB", True),
|
||||
("GBP", "British Pound", "GBP", True),
|
||||
("CAD", "Canadian Dollar", "CAD$", True),
|
||||
("AUD", "Australian Dollar", "AUD$", True),
|
||||
)
|
||||
|
||||
MEASUREMENT_UNIT_SEEDS = (
|
||||
("tonnes", "Tonnes", "t", "mass", True),
|
||||
("kilograms", "Kilograms", "kg", "mass", True),
|
||||
("pounds", "Pounds", "lb", "mass", True),
|
||||
("liters", "Liters", "L", "volume", True),
|
||||
("cubic_meters", "Cubic Meters", "m3", "volume", True),
|
||||
("kilowatt_hours", "Kilowatt Hours", "kWh", "energy", True),
|
||||
)
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(description="Seed baseline CalMiner data")
|
||||
parser.add_argument("--currencies", action="store_true", help="Seed currency table")
|
||||
parser.add_argument("--units", action="store_true", help="Seed unit table")
|
||||
parser.add_argument("--defaults", action="store_true", help="Seed default records")
|
||||
parser.add_argument("--dry-run", action="store_true", help="Print actions without executing")
|
||||
parser.add_argument(
|
||||
"--verbose", "-v", action="count", default=0, help="Increase logging verbosity"
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def _configure_logging(args: argparse.Namespace) -> None:
|
||||
level = logging.WARNING - (10 * min(args.verbose, 2))
|
||||
logging.basicConfig(level=max(level, logging.INFO), format="%(levelname)s %(message)s")
|
||||
|
||||
|
||||
def main() -> None:
|
||||
args = parse_args()
|
||||
run_with_namespace(args)
|
||||
|
||||
|
||||
def run_with_namespace(
|
||||
args: argparse.Namespace,
|
||||
*,
|
||||
config: Optional[DatabaseConfig] = None,
|
||||
) -> None:
|
||||
_configure_logging(args)
|
||||
|
||||
if not any((args.currencies, args.units, args.defaults)):
|
||||
logger.info("No seeding options provided; exiting")
|
||||
return
|
||||
|
||||
config = config or DatabaseConfig.from_env()
|
||||
with psycopg2.connect(config.application_dsn()) as conn:
|
||||
conn.autocommit = True
|
||||
with conn.cursor() as cursor:
|
||||
if args.currencies:
|
||||
_seed_currencies(cursor, dry_run=args.dry_run)
|
||||
if args.units:
|
||||
_seed_units(cursor, dry_run=args.dry_run)
|
||||
if args.defaults:
|
||||
_seed_defaults(cursor, dry_run=args.dry_run)
|
||||
|
||||
|
||||
def _seed_currencies(cursor, *, dry_run: bool) -> None:
|
||||
logger.info("Seeding currency table (%d rows)", len(CURRENCY_SEEDS))
|
||||
if dry_run:
|
||||
for code, name, symbol, active in CURRENCY_SEEDS:
|
||||
logger.info("Dry run: would upsert currency %s (%s)", code, name)
|
||||
return
|
||||
|
||||
execute_values(
|
||||
cursor,
|
||||
"""
|
||||
INSERT INTO currency (code, name, symbol, is_active)
|
||||
VALUES %s
|
||||
ON CONFLICT (code) DO UPDATE
|
||||
SET name = EXCLUDED.name,
|
||||
symbol = EXCLUDED.symbol,
|
||||
is_active = EXCLUDED.is_active
|
||||
""",
|
||||
CURRENCY_SEEDS,
|
||||
)
|
||||
logger.info("Currency seed complete")
|
||||
|
||||
|
||||
def _seed_units(cursor, *, dry_run: bool) -> None:
|
||||
total = len(MEASUREMENT_UNIT_SEEDS)
|
||||
logger.info("Seeding measurement_unit table (%d rows)", total)
|
||||
if dry_run:
|
||||
for code, name, symbol, unit_type, _ in MEASUREMENT_UNIT_SEEDS:
|
||||
logger.info(
|
||||
"Dry run: would upsert measurement unit %s (%s - %s)",
|
||||
code,
|
||||
name,
|
||||
unit_type,
|
||||
)
|
||||
return
|
||||
|
||||
try:
|
||||
execute_values(
|
||||
cursor,
|
||||
"""
|
||||
INSERT INTO measurement_unit (code, name, symbol, unit_type, is_active)
|
||||
VALUES %s
|
||||
ON CONFLICT (code) DO UPDATE
|
||||
SET name = EXCLUDED.name,
|
||||
symbol = EXCLUDED.symbol,
|
||||
unit_type = EXCLUDED.unit_type,
|
||||
is_active = EXCLUDED.is_active
|
||||
""",
|
||||
MEASUREMENT_UNIT_SEEDS,
|
||||
)
|
||||
except errors.UndefinedTable:
|
||||
logger.warning(
|
||||
"measurement_unit table does not exist; skipping unit seeding."
|
||||
)
|
||||
cursor.connection.rollback()
|
||||
return
|
||||
|
||||
logger.info("Measurement unit seed complete")
|
||||
|
||||
|
||||
def _seed_defaults(cursor, *, dry_run: bool) -> None:
|
||||
logger.info("Seeding default records - not yet implemented")
|
||||
if dry_run:
|
||||
return
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,77 +0,0 @@
|
||||
from statistics import mean, median, pstdev
|
||||
from typing import Any, Dict, Iterable, List, Mapping, Union, cast
|
||||
|
||||
|
||||
def _extract_results(simulation_results: Iterable[object]) -> List[float]:
|
||||
values: List[float] = []
|
||||
for item in simulation_results:
|
||||
if not isinstance(item, Mapping):
|
||||
continue
|
||||
mapping_item = cast(Mapping[str, Any], item)
|
||||
value = mapping_item.get("result")
|
||||
if isinstance(value, (int, float)):
|
||||
values.append(float(value))
|
||||
return values
|
||||
|
||||
|
||||
def _percentile(values: List[float], percentile: float) -> float:
|
||||
if not values:
|
||||
return 0.0
|
||||
sorted_values = sorted(values)
|
||||
if len(sorted_values) == 1:
|
||||
return sorted_values[0]
|
||||
index = (percentile / 100) * (len(sorted_values) - 1)
|
||||
lower = int(index)
|
||||
upper = min(lower + 1, len(sorted_values) - 1)
|
||||
weight = index - lower
|
||||
return sorted_values[lower] * (1 - weight) + sorted_values[upper] * weight
|
||||
|
||||
|
||||
def generate_report(simulation_results: List[Dict[str, float]]) -> Dict[str, Union[float, int]]:
|
||||
"""Aggregate basic statistics for simulation outputs."""
|
||||
|
||||
values = _extract_results(simulation_results)
|
||||
|
||||
if not values:
|
||||
return {
|
||||
"count": 0,
|
||||
"mean": 0.0,
|
||||
"median": 0.0,
|
||||
"min": 0.0,
|
||||
"max": 0.0,
|
||||
"std_dev": 0.0,
|
||||
"variance": 0.0,
|
||||
"percentile_10": 0.0,
|
||||
"percentile_90": 0.0,
|
||||
"percentile_5": 0.0,
|
||||
"percentile_95": 0.0,
|
||||
"value_at_risk_95": 0.0,
|
||||
"expected_shortfall_95": 0.0,
|
||||
}
|
||||
|
||||
summary: Dict[str, Union[float, int]] = {
|
||||
"count": len(values),
|
||||
"mean": mean(values),
|
||||
"median": median(values),
|
||||
"min": min(values),
|
||||
"max": max(values),
|
||||
"percentile_10": _percentile(values, 10),
|
||||
"percentile_90": _percentile(values, 90),
|
||||
"percentile_5": _percentile(values, 5),
|
||||
"percentile_95": _percentile(values, 95),
|
||||
}
|
||||
|
||||
std_dev = pstdev(values) if len(values) > 1 else 0.0
|
||||
summary["std_dev"] = std_dev
|
||||
summary["variance"] = std_dev ** 2
|
||||
|
||||
var_95 = summary["percentile_5"]
|
||||
summary["value_at_risk_95"] = var_95
|
||||
|
||||
tail_values = [value for value in values if value <= var_95]
|
||||
if tail_values:
|
||||
summary["expected_shortfall_95"] = mean(tail_values)
|
||||
else:
|
||||
summary["expected_shortfall_95"] = var_95
|
||||
|
||||
return summary
|
||||
@@ -1,208 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import re
|
||||
from typing import Dict, Mapping
|
||||
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from models.application_setting import ApplicationSetting
|
||||
|
||||
CSS_COLOR_CATEGORY = "theme"
|
||||
CSS_COLOR_VALUE_TYPE = "color"
|
||||
CSS_ENV_PREFIX = "CALMINER_THEME_"
|
||||
|
||||
CSS_COLOR_DEFAULTS: Dict[str, str] = {
|
||||
"--color-background": "#f4f5f7",
|
||||
"--color-surface": "#ffffff",
|
||||
"--color-text-primary": "#2a1f33",
|
||||
"--color-text-secondary": "#624769",
|
||||
"--color-text-muted": "#64748b",
|
||||
"--color-text-subtle": "#94a3b8",
|
||||
"--color-text-invert": "#ffffff",
|
||||
"--color-text-dark": "#0f172a",
|
||||
"--color-text-strong": "#111827",
|
||||
"--color-primary": "#5f320d",
|
||||
"--color-primary-strong": "#7e4c13",
|
||||
"--color-primary-stronger": "#837c15",
|
||||
"--color-accent": "#bff838",
|
||||
"--color-border": "#e2e8f0",
|
||||
"--color-border-strong": "#cbd5e1",
|
||||
"--color-highlight": "#eef2ff",
|
||||
"--color-panel-shadow": "rgba(15, 23, 42, 0.08)",
|
||||
"--color-panel-shadow-deep": "rgba(15, 23, 42, 0.12)",
|
||||
"--color-surface-alt": "#f8fafc",
|
||||
"--color-success": "#047857",
|
||||
"--color-error": "#b91c1c",
|
||||
}
|
||||
|
||||
_COLOR_VALUE_PATTERN = re.compile(
|
||||
r"^(#([0-9a-fA-F]{3}|[0-9a-fA-F]{6}|[0-9a-fA-F]{8})|rgba?\([^)]+\)|hsla?\([^)]+\))$",
|
||||
re.IGNORECASE,
|
||||
)
|
||||
|
||||
|
||||
def ensure_css_color_settings(db: Session) -> Dict[str, ApplicationSetting]:
|
||||
"""Ensure the CSS color defaults exist in the settings table."""
|
||||
|
||||
existing = (
|
||||
db.query(ApplicationSetting)
|
||||
.filter(ApplicationSetting.key.in_(CSS_COLOR_DEFAULTS.keys()))
|
||||
.all()
|
||||
)
|
||||
by_key = {setting.key: setting for setting in existing}
|
||||
|
||||
created = False
|
||||
for key, default_value in CSS_COLOR_DEFAULTS.items():
|
||||
if key in by_key:
|
||||
continue
|
||||
setting = ApplicationSetting(
|
||||
key=key,
|
||||
value=default_value,
|
||||
value_type=CSS_COLOR_VALUE_TYPE,
|
||||
category=CSS_COLOR_CATEGORY,
|
||||
description=f"CSS variable {key}",
|
||||
is_editable=True,
|
||||
)
|
||||
db.add(setting)
|
||||
by_key[key] = setting
|
||||
created = True
|
||||
|
||||
if created:
|
||||
db.commit()
|
||||
for key, setting in by_key.items():
|
||||
db.refresh(setting)
|
||||
|
||||
return by_key
|
||||
|
||||
|
||||
def get_css_color_settings(db: Session) -> Dict[str, str]:
|
||||
"""Return CSS color variables, filling missing values with defaults."""
|
||||
|
||||
settings = ensure_css_color_settings(db)
|
||||
values: Dict[str, str] = {
|
||||
key: settings[key].value if key in settings else default
|
||||
for key, default in CSS_COLOR_DEFAULTS.items()
|
||||
}
|
||||
|
||||
env_overrides = read_css_color_env_overrides(os.environ)
|
||||
if env_overrides:
|
||||
values.update(env_overrides)
|
||||
|
||||
return values
|
||||
|
||||
|
||||
def update_css_color_settings(db: Session, updates: Mapping[str, str]) -> Dict[str, str]:
|
||||
"""Persist provided CSS color overrides and return the final values."""
|
||||
|
||||
if not updates:
|
||||
return get_css_color_settings(db)
|
||||
|
||||
invalid_keys = sorted(set(updates.keys()) - set(CSS_COLOR_DEFAULTS.keys()))
|
||||
if invalid_keys:
|
||||
invalid_list = ", ".join(invalid_keys)
|
||||
raise ValueError(f"Unsupported CSS variables: {invalid_list}")
|
||||
|
||||
normalized: Dict[str, str] = {}
|
||||
for key, value in updates.items():
|
||||
normalized[key] = _normalize_color_value(value)
|
||||
|
||||
settings = ensure_css_color_settings(db)
|
||||
changed = False
|
||||
|
||||
for key, value in normalized.items():
|
||||
setting = settings[key]
|
||||
if setting.value != value:
|
||||
setting.value = value
|
||||
changed = True
|
||||
if setting.value_type != CSS_COLOR_VALUE_TYPE:
|
||||
setting.value_type = CSS_COLOR_VALUE_TYPE
|
||||
changed = True
|
||||
if setting.category != CSS_COLOR_CATEGORY:
|
||||
setting.category = CSS_COLOR_CATEGORY
|
||||
changed = True
|
||||
if not setting.is_editable:
|
||||
setting.is_editable = True
|
||||
changed = True
|
||||
|
||||
if changed:
|
||||
db.commit()
|
||||
for key in normalized.keys():
|
||||
db.refresh(settings[key])
|
||||
|
||||
return get_css_color_settings(db)
|
||||
|
||||
|
||||
def read_css_color_env_overrides(
|
||||
env: Mapping[str, str] | None = None,
|
||||
) -> Dict[str, str]:
|
||||
"""Return validated CSS overrides sourced from environment variables."""
|
||||
|
||||
if env is None:
|
||||
env = os.environ
|
||||
|
||||
overrides: Dict[str, str] = {}
|
||||
for css_key in CSS_COLOR_DEFAULTS.keys():
|
||||
env_name = css_key_to_env_var(css_key)
|
||||
raw_value = env.get(env_name)
|
||||
if raw_value is None:
|
||||
continue
|
||||
overrides[css_key] = _normalize_color_value(raw_value)
|
||||
|
||||
return overrides
|
||||
|
||||
|
||||
def _normalize_color_value(value: str) -> str:
|
||||
if not isinstance(value, str):
|
||||
raise ValueError("Color value must be a string")
|
||||
trimmed = value.strip()
|
||||
if not trimmed:
|
||||
raise ValueError("Color value cannot be empty")
|
||||
if not _COLOR_VALUE_PATTERN.match(trimmed):
|
||||
raise ValueError(
|
||||
"Color value must be a hex code or an rgb/rgba/hsl/hsla expression"
|
||||
)
|
||||
_validate_functional_color(trimmed)
|
||||
return trimmed
|
||||
|
||||
|
||||
def _validate_functional_color(value: str) -> None:
|
||||
lowered = value.lower()
|
||||
if lowered.startswith("rgb(") or lowered.startswith("hsl("):
|
||||
_ensure_component_count(value, expected=3)
|
||||
elif lowered.startswith("rgba(") or lowered.startswith("hsla("):
|
||||
_ensure_component_count(value, expected=4)
|
||||
|
||||
|
||||
def _ensure_component_count(value: str, expected: int) -> None:
|
||||
if not value.endswith(")"):
|
||||
raise ValueError("Color function expressions must end with a closing parenthesis")
|
||||
inner = value[value.index("(") + 1 : -1]
|
||||
parts = [segment.strip() for segment in inner.split(",")]
|
||||
if len(parts) != expected:
|
||||
raise ValueError(
|
||||
"Color function expressions must provide the expected number of components"
|
||||
)
|
||||
if any(not component for component in parts):
|
||||
raise ValueError("Color function components cannot be empty")
|
||||
|
||||
|
||||
def css_key_to_env_var(css_key: str) -> str:
|
||||
sanitized = css_key.lstrip("-").replace("-", "_").upper()
|
||||
return f"{CSS_ENV_PREFIX}{sanitized}"
|
||||
|
||||
|
||||
def list_css_env_override_rows(
|
||||
env: Mapping[str, str] | None = None,
|
||||
) -> list[Dict[str, str]]:
|
||||
overrides = read_css_color_env_overrides(env)
|
||||
rows: list[Dict[str, str]] = []
|
||||
for css_key, value in overrides.items():
|
||||
rows.append(
|
||||
{
|
||||
"css_key": css_key,
|
||||
"env_var": css_key_to_env_var(css_key),
|
||||
"value": value,
|
||||
}
|
||||
)
|
||||
return rows
|
||||
@@ -1,140 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from random import Random
|
||||
from typing import Dict, List, Literal, Optional, Sequence
|
||||
|
||||
|
||||
DEFAULT_STD_DEV_RATIO = 0.1
|
||||
DEFAULT_UNIFORM_SPAN_RATIO = 0.15
|
||||
DistributionType = Literal["normal", "uniform", "triangular"]
|
||||
|
||||
|
||||
@dataclass
|
||||
class SimulationParameter:
|
||||
name: str
|
||||
base_value: float
|
||||
distribution: DistributionType
|
||||
std_dev: Optional[float] = None
|
||||
minimum: Optional[float] = None
|
||||
maximum: Optional[float] = None
|
||||
mode: Optional[float] = None
|
||||
|
||||
|
||||
def _ensure_positive_span(span: float, fallback: float) -> float:
|
||||
return span if span and span > 0 else fallback
|
||||
|
||||
|
||||
def _compile_parameters(parameters: Sequence[Dict[str, float]]) -> List[SimulationParameter]:
|
||||
compiled: List[SimulationParameter] = []
|
||||
for index, item in enumerate(parameters):
|
||||
if "value" not in item:
|
||||
raise ValueError(
|
||||
f"Parameter at index {index} must include 'value'")
|
||||
name = str(item.get("name", f"param_{index}"))
|
||||
base_value = float(item["value"])
|
||||
distribution = str(item.get("distribution", "normal")).lower()
|
||||
if distribution not in {"normal", "uniform", "triangular"}:
|
||||
raise ValueError(
|
||||
f"Parameter '{name}' has unsupported distribution '{distribution}'"
|
||||
)
|
||||
|
||||
span_default = abs(base_value) * DEFAULT_UNIFORM_SPAN_RATIO or 1.0
|
||||
|
||||
if distribution == "normal":
|
||||
std_dev = item.get("std_dev")
|
||||
std_dev_value = float(std_dev) if std_dev is not None else abs(
|
||||
base_value) * DEFAULT_STD_DEV_RATIO or 1.0
|
||||
compiled.append(
|
||||
SimulationParameter(
|
||||
name=name,
|
||||
base_value=base_value,
|
||||
distribution="normal",
|
||||
std_dev=_ensure_positive_span(std_dev_value, 1.0),
|
||||
)
|
||||
)
|
||||
continue
|
||||
|
||||
minimum = item.get("min")
|
||||
maximum = item.get("max")
|
||||
if minimum is None or maximum is None:
|
||||
minimum = base_value - span_default
|
||||
maximum = base_value + span_default
|
||||
minimum = float(minimum)
|
||||
maximum = float(maximum)
|
||||
if minimum >= maximum:
|
||||
raise ValueError(
|
||||
f"Parameter '{name}' requires 'min' < 'max' for {distribution} distribution"
|
||||
)
|
||||
|
||||
if distribution == "uniform":
|
||||
compiled.append(
|
||||
SimulationParameter(
|
||||
name=name,
|
||||
base_value=base_value,
|
||||
distribution="uniform",
|
||||
minimum=minimum,
|
||||
maximum=maximum,
|
||||
)
|
||||
)
|
||||
else: # triangular
|
||||
mode = item.get("mode")
|
||||
if mode is None:
|
||||
mode = base_value
|
||||
mode_value = float(mode)
|
||||
if not (minimum <= mode_value <= maximum):
|
||||
raise ValueError(
|
||||
f"Parameter '{name}' mode must be within min/max bounds for triangular distribution"
|
||||
)
|
||||
compiled.append(
|
||||
SimulationParameter(
|
||||
name=name,
|
||||
base_value=base_value,
|
||||
distribution="triangular",
|
||||
minimum=minimum,
|
||||
maximum=maximum,
|
||||
mode=mode_value,
|
||||
)
|
||||
)
|
||||
return compiled
|
||||
|
||||
|
||||
def _sample_parameter(rng: Random, param: SimulationParameter) -> float:
|
||||
if param.distribution == "normal":
|
||||
assert param.std_dev is not None
|
||||
return rng.normalvariate(param.base_value, param.std_dev)
|
||||
if param.distribution == "uniform":
|
||||
assert param.minimum is not None and param.maximum is not None
|
||||
return rng.uniform(param.minimum, param.maximum)
|
||||
# triangular
|
||||
assert (
|
||||
param.minimum is not None
|
||||
and param.maximum is not None
|
||||
and param.mode is not None
|
||||
)
|
||||
return rng.triangular(param.minimum, param.maximum, param.mode)
|
||||
|
||||
|
||||
def run_simulation(
|
||||
parameters: Sequence[Dict[str, float]],
|
||||
iterations: int = 1000,
|
||||
seed: Optional[int] = None,
|
||||
) -> List[Dict[str, float]]:
|
||||
"""Run a lightweight Monte Carlo simulation using configurable distributions."""
|
||||
|
||||
if iterations <= 0:
|
||||
return []
|
||||
|
||||
compiled_params = _compile_parameters(parameters)
|
||||
if not compiled_params:
|
||||
return []
|
||||
|
||||
rng = Random(seed)
|
||||
results: List[Dict[str, float]] = []
|
||||
for iteration in range(1, iterations + 1):
|
||||
total = 0.0
|
||||
for param in compiled_params:
|
||||
sample = _sample_parameter(rng, param)
|
||||
total += sample
|
||||
results.append({"iteration": iteration, "result": total})
|
||||
return results
|
||||
@@ -1,25 +1,29 @@
|
||||
:root {
|
||||
--color-background: #f4f5f7;
|
||||
--color-surface: #ffffff;
|
||||
--color-text-primary: #2a1f33;
|
||||
--color-text-secondary: #624769;
|
||||
--color-text-muted: #64748b;
|
||||
--color-text-subtle: #94a3b8;
|
||||
--bg: #0b0f14;
|
||||
--bg-2: #0f141b;
|
||||
--card: #151b23;
|
||||
--text: #e6edf3;
|
||||
--muted: #a9b4c0;
|
||||
--brand: #f1b21a;
|
||||
--brand-2: #f6c648;
|
||||
--brand-3: #f9d475;
|
||||
--accent: #2ba58f;
|
||||
--danger: #d14b4b;
|
||||
--shadow: 0 10px 30px rgba(0, 0, 0, 0.35);
|
||||
--radius: 14px;
|
||||
--radius-sm: 10px;
|
||||
--container: 1180px;
|
||||
--muted: var(--muted);
|
||||
--color-text-subtle: rgba(169, 180, 192, 0.6);
|
||||
--color-text-invert: #ffffff;
|
||||
--color-text-dark: #0f172a;
|
||||
--color-text-strong: #111827;
|
||||
--color-primary: #5f320d;
|
||||
--color-primary-strong: #7e4c13;
|
||||
--color-primary-stronger: #837c15;
|
||||
--color-accent: #bff838;
|
||||
--color-border: #e2e8f0;
|
||||
--color-border-strong: #cbd5e1;
|
||||
--color-highlight: #eef2ff;
|
||||
--color-panel-shadow: rgba(15, 23, 42, 0.08);
|
||||
--color-panel-shadow-deep: rgba(15, 23, 42, 0.12);
|
||||
--color-surface-alt: #f8fafc;
|
||||
--color-success: #047857;
|
||||
--color-error: #b91c1c;
|
||||
--color-border: rgba(255, 255, 255, 0.08);
|
||||
--color-border-strong: rgba(255, 255, 255, 0.12);
|
||||
--color-highlight: rgba(241, 178, 26, 0.08);
|
||||
--color-panel-shadow: rgba(0, 0, 0, 0.25);
|
||||
--color-panel-shadow-deep: rgba(0, 0, 0, 0.35);
|
||||
--color-surface-alt: rgba(21, 27, 35, 0.7);
|
||||
--space-2xs: 0.25rem;
|
||||
--space-xs: 0.5rem;
|
||||
--space-sm: 0.75rem;
|
||||
@@ -33,15 +37,30 @@
|
||||
--font-size-lg: 1.25rem;
|
||||
--font-size-xl: 1.5rem;
|
||||
--font-size-2xl: 2rem;
|
||||
--panel-radius: 12px;
|
||||
--table-radius: 10px;
|
||||
--panel-radius: var(--radius);
|
||||
--table-radius: var(--radius-sm);
|
||||
}
|
||||
|
||||
* {
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
html,
|
||||
body {
|
||||
height: 100%;
|
||||
}
|
||||
|
||||
body {
|
||||
margin: 0;
|
||||
font-family: "Segoe UI", Tahoma, Geneva, Verdana, sans-serif;
|
||||
background-color: var(--color-background);
|
||||
color: var(--color-text-primary);
|
||||
font-family: ui-sans-serif, system-ui, -apple-system, 'Segoe UI', 'Roboto',
|
||||
Helvetica, Arial, 'Apple Color Emoji', 'Segoe UI Emoji';
|
||||
color: var(--text);
|
||||
background: linear-gradient(180deg, var(--bg) 0%, var(--bg-2) 100%);
|
||||
line-height: 1.45;
|
||||
}
|
||||
|
||||
a {
|
||||
color: var(--brand);
|
||||
}
|
||||
|
||||
.app-layout {
|
||||
@@ -51,7 +70,7 @@ body {
|
||||
|
||||
.app-sidebar {
|
||||
width: 264px;
|
||||
background-color: var(--color-primary);
|
||||
background-color: var(--card);
|
||||
color: var(--color-text-invert);
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
@@ -59,6 +78,7 @@ body {
|
||||
position: sticky;
|
||||
top: 0;
|
||||
height: 100vh;
|
||||
border-right: 1px solid var(--color-border);
|
||||
}
|
||||
|
||||
.sidebar-inner {
|
||||
@@ -82,11 +102,7 @@ body {
|
||||
width: 44px;
|
||||
height: 44px;
|
||||
border-radius: 12px;
|
||||
background: linear-gradient(
|
||||
0deg,
|
||||
var(--color-primary-stronger),
|
||||
var(--color-accent)
|
||||
);
|
||||
background: linear-gradient(0deg, var(--brand-3), var(--accent));
|
||||
color: var(--color-text-invert);
|
||||
font-weight: 700;
|
||||
font-size: 1.1rem;
|
||||
@@ -207,7 +223,7 @@ body {
|
||||
}
|
||||
|
||||
.app-main {
|
||||
background-color: var(--color-background);
|
||||
background-color: var(--bg);
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
flex: 1;
|
||||
@@ -240,7 +256,7 @@ body {
|
||||
|
||||
.dashboard-subtitle {
|
||||
margin: 0.35rem 0 0;
|
||||
color: var(--color-text-muted);
|
||||
color: var(--muted);
|
||||
}
|
||||
|
||||
.dashboard-actions {
|
||||
@@ -259,7 +275,7 @@ body {
|
||||
|
||||
.page-subtitle {
|
||||
margin-top: 0.35rem;
|
||||
color: var(--color-text-muted);
|
||||
color: var(--muted);
|
||||
font-size: 0.95rem;
|
||||
}
|
||||
|
||||
@@ -271,13 +287,14 @@ body {
|
||||
}
|
||||
|
||||
.settings-card {
|
||||
background: var(--color-surface);
|
||||
border-radius: 12px;
|
||||
background: var(--card);
|
||||
border-radius: var(--radius);
|
||||
padding: 1.5rem;
|
||||
box-shadow: 0 4px 14px var(--color-panel-shadow);
|
||||
box-shadow: var(--shadow);
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 0.75rem;
|
||||
border: 1px solid var(--color-border);
|
||||
}
|
||||
|
||||
.settings-card h2 {
|
||||
@@ -287,7 +304,7 @@ body {
|
||||
|
||||
.settings-card p {
|
||||
margin: 0;
|
||||
color: var(--color-text-muted);
|
||||
color: var(--muted);
|
||||
}
|
||||
|
||||
.settings-card-note {
|
||||
@@ -311,7 +328,7 @@ body {
|
||||
|
||||
.color-form-field.is-env-override {
|
||||
background: rgba(191, 248, 56, 0.12);
|
||||
border-color: var(--color-accent);
|
||||
border-color: var(--accent);
|
||||
}
|
||||
|
||||
.color-field-header {
|
||||
@@ -319,13 +336,13 @@ body {
|
||||
justify-content: space-between;
|
||||
gap: var(--space-sm);
|
||||
font-weight: 600;
|
||||
color: var(--color-text-strong);
|
||||
font-family: "Fira Code", "Consolas", "Courier New", monospace;
|
||||
color: var(--text);
|
||||
font-family: 'Fira Code', 'Consolas', 'Courier New', monospace;
|
||||
font-size: 0.85rem;
|
||||
}
|
||||
|
||||
.color-field-default {
|
||||
color: var(--color-text-muted);
|
||||
color: var(--muted);
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
@@ -337,7 +354,7 @@ body {
|
||||
.color-env-flag {
|
||||
font-size: 0.78rem;
|
||||
font-weight: 600;
|
||||
color: var(--color-accent);
|
||||
color: var(--accent);
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 0.04em;
|
||||
}
|
||||
@@ -349,7 +366,7 @@ body {
|
||||
}
|
||||
|
||||
.color-value-input {
|
||||
font-family: "Fira Code", "Consolas", "Courier New", monospace;
|
||||
font-family: 'Fira Code', 'Consolas', 'Courier New', monospace;
|
||||
}
|
||||
|
||||
.color-value-input[disabled] {
|
||||
@@ -378,7 +395,7 @@ body {
|
||||
}
|
||||
|
||||
.env-overrides-table code {
|
||||
font-family: "Fira Code", "Consolas", "Courier New", monospace;
|
||||
font-family: 'Fira Code', 'Consolas', 'Courier New', monospace;
|
||||
font-size: 0.85rem;
|
||||
}
|
||||
|
||||
@@ -391,7 +408,7 @@ body {
|
||||
border-radius: 999px;
|
||||
font-weight: 600;
|
||||
text-decoration: none;
|
||||
background: var(--color-primary);
|
||||
background: var(--brand);
|
||||
color: var(--color-text-invert);
|
||||
transition: transform 0.2s ease, box-shadow 0.2s ease;
|
||||
}
|
||||
@@ -410,26 +427,27 @@ body {
|
||||
}
|
||||
|
||||
.metric-card {
|
||||
background: var(--color-surface);
|
||||
border-radius: 12px;
|
||||
background: var(--card);
|
||||
border-radius: var(--radius);
|
||||
padding: 1.2rem 1.4rem;
|
||||
box-shadow: 0 4px 14px var(--color-panel-shadow);
|
||||
box-shadow: var(--shadow);
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 0.35rem;
|
||||
border: 1px solid var(--color-border);
|
||||
}
|
||||
|
||||
.metric-label {
|
||||
font-size: 0.82rem;
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 0.04em;
|
||||
color: var(--color-text-muted);
|
||||
color: var(--muted);
|
||||
}
|
||||
|
||||
.metric-value {
|
||||
font-size: 1.45rem;
|
||||
font-weight: 700;
|
||||
color: var(--color-text-dark);
|
||||
color: var(--muted);
|
||||
}
|
||||
|
||||
.dashboard-charts {
|
||||
@@ -522,7 +540,7 @@ body {
|
||||
}
|
||||
|
||||
.list-detail {
|
||||
color: var(--color-text-secondary);
|
||||
color: var(--muted);
|
||||
font-size: 0.95rem;
|
||||
}
|
||||
|
||||
@@ -532,7 +550,7 @@ body {
|
||||
}
|
||||
|
||||
.btn.is-loading::after {
|
||||
content: "";
|
||||
content: '';
|
||||
width: 0.85rem;
|
||||
height: 0.85rem;
|
||||
border: 2px solid rgba(255, 255, 255, 0.6);
|
||||
@@ -550,7 +568,7 @@ body {
|
||||
}
|
||||
|
||||
.panel {
|
||||
background-color: var(--color-surface);
|
||||
background-color: var(--card);
|
||||
border-radius: var(--panel-radius);
|
||||
padding: var(--space-xl);
|
||||
box-shadow: 0 2px 8px var(--color-panel-shadow);
|
||||
@@ -560,7 +578,7 @@ body {
|
||||
.panel h2,
|
||||
.panel h3 {
|
||||
font-weight: 700;
|
||||
color: var(--color-text-dark);
|
||||
color: var(--text);
|
||||
margin: 0 0 var(--space-sm);
|
||||
}
|
||||
|
||||
@@ -583,7 +601,7 @@ body {
|
||||
flex-direction: column;
|
||||
gap: var(--space-sm);
|
||||
font-weight: 600;
|
||||
color: var(--color-text-strong);
|
||||
color: var(--text);
|
||||
}
|
||||
|
||||
.form-grid input,
|
||||
@@ -598,7 +616,7 @@ body {
|
||||
.form-grid input:focus,
|
||||
.form-grid textarea:focus,
|
||||
.form-grid select:focus {
|
||||
outline: 2px solid var(--color-primary-strong);
|
||||
outline: 2px solid var(--brand-2);
|
||||
outline-offset: 1px;
|
||||
}
|
||||
|
||||
@@ -624,13 +642,13 @@ body {
|
||||
}
|
||||
|
||||
.btn.primary {
|
||||
background-color: var(--color-primary-strong);
|
||||
background-color: var(--brand-2);
|
||||
color: var(--color-text-invert);
|
||||
}
|
||||
|
||||
.btn.primary:hover,
|
||||
.btn.primary:focus {
|
||||
background-color: var(--color-primary-stronger);
|
||||
background-color: var(--brand-3);
|
||||
}
|
||||
|
||||
.result-output {
|
||||
@@ -638,14 +656,14 @@ body {
|
||||
color: var(--color-surface-alt);
|
||||
padding: 1rem;
|
||||
border-radius: 8px;
|
||||
font-family: "Fira Code", "Consolas", "Courier New", monospace;
|
||||
font-family: 'Fira Code', 'Consolas', 'Courier New', monospace;
|
||||
overflow-x: auto;
|
||||
margin-top: 1.5rem;
|
||||
}
|
||||
|
||||
.monospace-input {
|
||||
width: 100%;
|
||||
font-family: "Fira Code", "Consolas", "Courier New", monospace;
|
||||
font-family: 'Fira Code', 'Consolas', 'Courier New', monospace;
|
||||
min-height: 120px;
|
||||
}
|
||||
|
||||
@@ -670,7 +688,7 @@ table {
|
||||
}
|
||||
|
||||
thead {
|
||||
background-color: var(--color-primary);
|
||||
background-color: var(--brand);
|
||||
color: var(--color-text-invert);
|
||||
}
|
||||
|
||||
@@ -687,7 +705,7 @@ tbody tr:nth-child(even) {
|
||||
|
||||
.empty-state {
|
||||
margin-top: 1.5rem;
|
||||
color: var(--color-text-muted);
|
||||
color: var(--muted);
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
@@ -701,15 +719,15 @@ tbody tr:nth-child(even) {
|
||||
}
|
||||
|
||||
.feedback.success {
|
||||
color: var(--color-success);
|
||||
color: var(--accent);
|
||||
}
|
||||
|
||||
.feedback.error {
|
||||
color: var(--color-error);
|
||||
color: var(--danger);
|
||||
}
|
||||
|
||||
.site-footer {
|
||||
background-color: var(--color-primary);
|
||||
background-color: var(--brand);
|
||||
color: var(--color-text-invert);
|
||||
margin-top: 3rem;
|
||||
}
|
||||
|
||||
@@ -1,205 +0,0 @@
|
||||
document.addEventListener("DOMContentLoaded", () => {
|
||||
const dataElement = document.getElementById("consumption-data");
|
||||
let data = { scenarios: [], consumption: {}, unit_options: [] };
|
||||
|
||||
if (dataElement) {
|
||||
try {
|
||||
const parsed = JSON.parse(dataElement.textContent || "{}");
|
||||
if (parsed && typeof parsed === "object") {
|
||||
data = {
|
||||
scenarios: Array.isArray(parsed.scenarios) ? parsed.scenarios : [],
|
||||
consumption:
|
||||
parsed.consumption && typeof parsed.consumption === "object"
|
||||
? parsed.consumption
|
||||
: {},
|
||||
unit_options: Array.isArray(parsed.unit_options)
|
||||
? parsed.unit_options
|
||||
: [],
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Unable to parse consumption data", error);
|
||||
}
|
||||
}
|
||||
|
||||
const consumptionByScenario = data.consumption;
|
||||
const filterSelect = document.getElementById("consumption-scenario-filter");
|
||||
const tableWrapper = document.getElementById("consumption-table-wrapper");
|
||||
const tableBody = document.getElementById("consumption-table-body");
|
||||
const emptyState = document.getElementById("consumption-empty");
|
||||
const form = document.getElementById("consumption-form");
|
||||
const feedbackEl = document.getElementById("consumption-feedback");
|
||||
const unitSelect = document.getElementById("consumption-form-unit");
|
||||
const unitSymbolInput = document.getElementById(
|
||||
"consumption-form-unit-symbol"
|
||||
);
|
||||
|
||||
const showFeedback = (message, type = "success") => {
|
||||
if (!feedbackEl) {
|
||||
return;
|
||||
}
|
||||
feedbackEl.textContent = message;
|
||||
feedbackEl.classList.remove("hidden", "success", "error");
|
||||
feedbackEl.classList.add(type);
|
||||
};
|
||||
|
||||
const hideFeedback = () => {
|
||||
if (!feedbackEl) {
|
||||
return;
|
||||
}
|
||||
feedbackEl.classList.add("hidden");
|
||||
feedbackEl.textContent = "";
|
||||
};
|
||||
|
||||
const formatAmount = (value) =>
|
||||
Number(value).toLocaleString(undefined, {
|
||||
minimumFractionDigits: 2,
|
||||
maximumFractionDigits: 2,
|
||||
});
|
||||
|
||||
const formatMeasurement = (amount, symbol, name) => {
|
||||
if (symbol) {
|
||||
return `${formatAmount(amount)} ${symbol}`;
|
||||
}
|
||||
if (name) {
|
||||
return `${formatAmount(amount)} ${name}`;
|
||||
}
|
||||
return formatAmount(amount);
|
||||
};
|
||||
|
||||
const renderConsumptionRows = (scenarioId) => {
|
||||
if (!tableBody || !tableWrapper || !emptyState) {
|
||||
return;
|
||||
}
|
||||
|
||||
const key = String(scenarioId);
|
||||
const records = consumptionByScenario[key] || [];
|
||||
|
||||
tableBody.innerHTML = "";
|
||||
|
||||
if (!records.length) {
|
||||
emptyState.textContent = "No consumption records for this scenario yet.";
|
||||
emptyState.classList.remove("hidden");
|
||||
tableWrapper.classList.add("hidden");
|
||||
return;
|
||||
}
|
||||
|
||||
emptyState.classList.add("hidden");
|
||||
tableWrapper.classList.remove("hidden");
|
||||
|
||||
records.forEach((record) => {
|
||||
const row = document.createElement("tr");
|
||||
row.innerHTML = `
|
||||
<td>${formatMeasurement(
|
||||
record.amount,
|
||||
record.unit_symbol,
|
||||
record.unit_name
|
||||
)}</td>
|
||||
<td>${record.description || "—"}</td>
|
||||
`;
|
||||
tableBody.appendChild(row);
|
||||
});
|
||||
};
|
||||
|
||||
if (filterSelect) {
|
||||
filterSelect.addEventListener("change", (event) => {
|
||||
const value = event.target.value;
|
||||
if (!value) {
|
||||
if (emptyState && tableWrapper && tableBody) {
|
||||
emptyState.textContent =
|
||||
"Choose a scenario to review its consumption records.";
|
||||
emptyState.classList.remove("hidden");
|
||||
tableWrapper.classList.add("hidden");
|
||||
tableBody.innerHTML = "";
|
||||
}
|
||||
return;
|
||||
}
|
||||
renderConsumptionRows(value);
|
||||
});
|
||||
}
|
||||
|
||||
const submitConsumption = async (event) => {
|
||||
event.preventDefault();
|
||||
hideFeedback();
|
||||
|
||||
if (!form) {
|
||||
return;
|
||||
}
|
||||
|
||||
const formData = new FormData(form);
|
||||
const scenarioId = formData.get("scenario_id");
|
||||
const unitName = formData.get("unit_name");
|
||||
const unitSymbol = formData.get("unit_symbol");
|
||||
const payload = {
|
||||
scenario_id: scenarioId ? Number(scenarioId) : null,
|
||||
amount: Number(formData.get("amount")),
|
||||
description: formData.get("description") || null,
|
||||
unit_name: unitName ? String(unitName) : null,
|
||||
unit_symbol: unitSymbol ? String(unitSymbol) : null,
|
||||
};
|
||||
|
||||
try {
|
||||
const response = await fetch("/api/consumption/", {
|
||||
method: "POST",
|
||||
headers: { "Content-Type": "application/json" },
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const errorDetail = await response.json().catch(() => ({}));
|
||||
throw new Error(
|
||||
errorDetail.detail || "Unable to add consumption record."
|
||||
);
|
||||
}
|
||||
|
||||
const result = await response.json();
|
||||
const mapKey = String(result.scenario_id);
|
||||
|
||||
if (!Array.isArray(consumptionByScenario[mapKey])) {
|
||||
consumptionByScenario[mapKey] = [];
|
||||
}
|
||||
consumptionByScenario[mapKey].push(result);
|
||||
|
||||
form.reset();
|
||||
syncUnitSelection();
|
||||
showFeedback("Consumption record saved.", "success");
|
||||
|
||||
if (filterSelect && filterSelect.value === String(result.scenario_id)) {
|
||||
renderConsumptionRows(filterSelect.value);
|
||||
}
|
||||
} catch (error) {
|
||||
showFeedback(error.message || "An unexpected error occurred.", "error");
|
||||
}
|
||||
};
|
||||
|
||||
if (form) {
|
||||
form.addEventListener("submit", submitConsumption);
|
||||
}
|
||||
|
||||
const syncUnitSelection = () => {
|
||||
if (!unitSelect || !unitSymbolInput) {
|
||||
return;
|
||||
}
|
||||
if (!unitSelect.value && unitSelect.options.length > 0) {
|
||||
const firstOption = Array.from(unitSelect.options).find(
|
||||
(option) => option.value
|
||||
);
|
||||
if (firstOption) {
|
||||
firstOption.selected = true;
|
||||
}
|
||||
}
|
||||
const selectedOption = unitSelect.options[unitSelect.selectedIndex];
|
||||
unitSymbolInput.value = selectedOption
|
||||
? selectedOption.getAttribute("data-symbol") || ""
|
||||
: "";
|
||||
};
|
||||
|
||||
if (unitSelect) {
|
||||
unitSelect.addEventListener("change", syncUnitSelection);
|
||||
syncUnitSelection();
|
||||
}
|
||||
|
||||
if (filterSelect && filterSelect.value) {
|
||||
renderConsumptionRows(filterSelect.value);
|
||||
}
|
||||
});
|
||||
@@ -1,339 +0,0 @@
|
||||
document.addEventListener("DOMContentLoaded", () => {
|
||||
const dataElement = document.getElementById("costs-payload");
|
||||
let capexByScenario = {};
|
||||
let opexByScenario = {};
|
||||
let currencyOptions = [];
|
||||
|
||||
if (dataElement) {
|
||||
try {
|
||||
const parsed = JSON.parse(dataElement.textContent || "{}");
|
||||
if (parsed && typeof parsed === "object") {
|
||||
if (parsed.capex && typeof parsed.capex === "object") {
|
||||
capexByScenario = parsed.capex;
|
||||
}
|
||||
if (parsed.opex && typeof parsed.opex === "object") {
|
||||
opexByScenario = parsed.opex;
|
||||
}
|
||||
if (Array.isArray(parsed.currency_options)) {
|
||||
currencyOptions = parsed.currency_options;
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Unable to parse cost data", error);
|
||||
}
|
||||
}
|
||||
|
||||
const filterSelect = document.getElementById("costs-scenario-filter");
|
||||
const costsEmptyState = document.getElementById("costs-empty");
|
||||
const costsDataWrapper = document.getElementById("costs-data");
|
||||
const capexTableBody = document.getElementById("capex-table-body");
|
||||
const opexTableBody = document.getElementById("opex-table-body");
|
||||
const capexEmpty = document.getElementById("capex-empty");
|
||||
const opexEmpty = document.getElementById("opex-empty");
|
||||
const capexTotal = document.getElementById("capex-total");
|
||||
const opexTotal = document.getElementById("opex-total");
|
||||
const capexForm = document.getElementById("capex-form");
|
||||
const opexForm = document.getElementById("opex-form");
|
||||
const capexFeedback = document.getElementById("capex-feedback");
|
||||
const opexFeedback = document.getElementById("opex-feedback");
|
||||
const capexFormScenario = document.getElementById("capex-form-scenario");
|
||||
const opexFormScenario = document.getElementById("opex-form-scenario");
|
||||
const capexCurrencySelect = document.getElementById("capex-form-currency");
|
||||
const opexCurrencySelect = document.getElementById("opex-form-currency");
|
||||
|
||||
// If no currency options were injected server-side, fetch from API
|
||||
const fetchCurrencyOptions = async () => {
|
||||
try {
|
||||
const resp = await fetch("/api/currencies/");
|
||||
if (!resp.ok) return;
|
||||
const list = await resp.json();
|
||||
if (Array.isArray(list) && list.length) {
|
||||
currencyOptions = list;
|
||||
populateCurrencySelects();
|
||||
}
|
||||
} catch (err) {
|
||||
console.warn("Unable to fetch currency options", err);
|
||||
}
|
||||
};
|
||||
|
||||
const populateCurrencySelects = () => {
|
||||
const selectElements = [capexCurrencySelect, opexCurrencySelect].filter(Boolean);
|
||||
selectElements.forEach((sel) => {
|
||||
if (!sel) return;
|
||||
// Clear non-empty options except the empty placeholder
|
||||
const placeholder = sel.querySelector("option[value='']");
|
||||
sel.innerHTML = "";
|
||||
if (placeholder) sel.appendChild(placeholder);
|
||||
currencyOptions.forEach((opt) => {
|
||||
const option = document.createElement("option");
|
||||
option.value = opt.id;
|
||||
option.textContent = opt.name || opt.id;
|
||||
sel.appendChild(option);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
// populate from injected options first, then fetch to refresh
|
||||
if (currencyOptions && currencyOptions.length) populateCurrencySelects();
|
||||
else fetchCurrencyOptions();
|
||||
|
||||
const showFeedback = (element, message, type = "success") => {
|
||||
if (!element) {
|
||||
return;
|
||||
}
|
||||
element.textContent = message;
|
||||
element.classList.remove("hidden", "success", "error");
|
||||
element.classList.add(type);
|
||||
};
|
||||
|
||||
const hideFeedback = (element) => {
|
||||
if (!element) {
|
||||
return;
|
||||
}
|
||||
element.classList.add("hidden");
|
||||
element.textContent = "";
|
||||
};
|
||||
|
||||
const formatAmount = (value) =>
|
||||
Number(value).toLocaleString(undefined, {
|
||||
minimumFractionDigits: 2,
|
||||
maximumFractionDigits: 2,
|
||||
});
|
||||
|
||||
const formatCurrencyAmount = (value, currencyCode) => {
|
||||
if (!currencyCode) {
|
||||
return formatAmount(value);
|
||||
}
|
||||
try {
|
||||
return new Intl.NumberFormat(undefined, {
|
||||
style: "currency",
|
||||
currency: currencyCode,
|
||||
minimumFractionDigits: 2,
|
||||
maximumFractionDigits: 2,
|
||||
}).format(Number(value));
|
||||
} catch (error) {
|
||||
return `${currencyCode} ${formatAmount(value)}`;
|
||||
}
|
||||
};
|
||||
|
||||
const sumAmount = (records) =>
|
||||
records.reduce((total, record) => total + Number(record.amount || 0), 0);
|
||||
|
||||
const describeTotal = (records) => {
|
||||
if (!records || records.length === 0) {
|
||||
return "—";
|
||||
}
|
||||
const total = sumAmount(records);
|
||||
const currencyCodes = Array.from(
|
||||
new Set(
|
||||
records
|
||||
.map((record) => (record.currency_code || "").trim().toUpperCase())
|
||||
.filter(Boolean)
|
||||
)
|
||||
);
|
||||
|
||||
if (currencyCodes.length === 1) {
|
||||
return formatCurrencyAmount(total, currencyCodes[0]);
|
||||
}
|
||||
return `${formatAmount(total)} (mixed)`;
|
||||
};
|
||||
|
||||
const renderCostTables = (scenarioId) => {
|
||||
if (
|
||||
!capexTableBody ||
|
||||
!opexTableBody ||
|
||||
!capexEmpty ||
|
||||
!opexEmpty ||
|
||||
!capexTotal ||
|
||||
!opexTotal
|
||||
) {
|
||||
return;
|
||||
}
|
||||
|
||||
const capexRecords = capexByScenario[String(scenarioId)] || [];
|
||||
const opexRecords = opexByScenario[String(scenarioId)] || [];
|
||||
|
||||
capexTableBody.innerHTML = "";
|
||||
opexTableBody.innerHTML = "";
|
||||
|
||||
if (!capexRecords.length) {
|
||||
capexEmpty.classList.remove("hidden");
|
||||
} else {
|
||||
capexEmpty.classList.add("hidden");
|
||||
capexRecords.forEach((record) => {
|
||||
const row = document.createElement("tr");
|
||||
row.innerHTML = `
|
||||
<td>${formatCurrencyAmount(record.amount, record.currency_code)}</td>
|
||||
<td>${record.description || "—"}</td>
|
||||
`;
|
||||
capexTableBody.appendChild(row);
|
||||
});
|
||||
}
|
||||
|
||||
if (!opexRecords.length) {
|
||||
opexEmpty.classList.remove("hidden");
|
||||
} else {
|
||||
opexEmpty.classList.add("hidden");
|
||||
opexRecords.forEach((record) => {
|
||||
const row = document.createElement("tr");
|
||||
row.innerHTML = `
|
||||
<td>${formatCurrencyAmount(record.amount, record.currency_code)}</td>
|
||||
<td>${record.description || "—"}</td>
|
||||
`;
|
||||
opexTableBody.appendChild(row);
|
||||
});
|
||||
}
|
||||
|
||||
capexTotal.textContent = describeTotal(capexRecords);
|
||||
opexTotal.textContent = describeTotal(opexRecords);
|
||||
};
|
||||
|
||||
const toggleCostView = (show) => {
|
||||
if (
|
||||
!costsEmptyState ||
|
||||
!costsDataWrapper ||
|
||||
!capexTableBody ||
|
||||
!opexTableBody
|
||||
) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (show) {
|
||||
costsEmptyState.classList.add("hidden");
|
||||
costsDataWrapper.classList.remove("hidden");
|
||||
} else {
|
||||
costsEmptyState.classList.remove("hidden");
|
||||
costsDataWrapper.classList.add("hidden");
|
||||
capexTableBody.innerHTML = "";
|
||||
opexTableBody.innerHTML = "";
|
||||
if (capexTotal) {
|
||||
capexTotal.textContent = "—";
|
||||
}
|
||||
if (opexTotal) {
|
||||
opexTotal.textContent = "—";
|
||||
}
|
||||
if (capexEmpty) {
|
||||
capexEmpty.classList.add("hidden");
|
||||
}
|
||||
if (opexEmpty) {
|
||||
opexEmpty.classList.add("hidden");
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const syncFormSelections = (value) => {
|
||||
if (capexFormScenario) {
|
||||
capexFormScenario.value = value || "";
|
||||
}
|
||||
if (opexFormScenario) {
|
||||
opexFormScenario.value = value || "";
|
||||
}
|
||||
};
|
||||
|
||||
const ensureCurrencySelection = (selectElement) => {
|
||||
if (!selectElement || selectElement.value) {
|
||||
return;
|
||||
}
|
||||
const firstOption = selectElement.querySelector(
|
||||
"option[value]:not([value=''])"
|
||||
);
|
||||
if (firstOption && firstOption.value) {
|
||||
selectElement.value = firstOption.value;
|
||||
}
|
||||
};
|
||||
|
||||
if (filterSelect) {
|
||||
filterSelect.addEventListener("change", (event) => {
|
||||
const value = event.target.value;
|
||||
if (!value) {
|
||||
toggleCostView(false);
|
||||
syncFormSelections("");
|
||||
return;
|
||||
}
|
||||
toggleCostView(true);
|
||||
renderCostTables(value);
|
||||
syncFormSelections(value);
|
||||
});
|
||||
}
|
||||
|
||||
const submitCostEntry = async (event, targetUrl, storageMap, feedbackEl) => {
|
||||
event.preventDefault();
|
||||
hideFeedback(feedbackEl);
|
||||
|
||||
const formData = new FormData(event.target);
|
||||
const scenarioId = formData.get("scenario_id");
|
||||
const currencyCode = formData.get("currency_code");
|
||||
const payload = {
|
||||
scenario_id: scenarioId ? Number(scenarioId) : null,
|
||||
amount: Number(formData.get("amount")),
|
||||
description: formData.get("description") || null,
|
||||
currency_code: currencyCode ? String(currencyCode).toUpperCase() : null,
|
||||
};
|
||||
|
||||
if (!payload.scenario_id) {
|
||||
showFeedback(feedbackEl, "Select a scenario before submitting.", "error");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!payload.currency_code) {
|
||||
showFeedback(feedbackEl, "Choose a currency before submitting.", "error");
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await fetch(targetUrl, {
|
||||
method: "POST",
|
||||
headers: { "Content-Type": "application/json" },
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const errorDetail = await response.json().catch(() => ({}));
|
||||
throw new Error(errorDetail.detail || "Unable to save cost entry.");
|
||||
}
|
||||
|
||||
const result = await response.json();
|
||||
const mapKey = String(result.scenario_id);
|
||||
|
||||
if (!Array.isArray(storageMap[mapKey])) {
|
||||
storageMap[mapKey] = [];
|
||||
}
|
||||
|
||||
storageMap[mapKey].push(result);
|
||||
|
||||
event.target.reset();
|
||||
ensureCurrencySelection(event.target.querySelector("select[name='currency_code']"));
|
||||
showFeedback(feedbackEl, "Entry saved successfully.", "success");
|
||||
|
||||
if (filterSelect && filterSelect.value === mapKey) {
|
||||
renderCostTables(mapKey);
|
||||
}
|
||||
} catch (error) {
|
||||
showFeedback(
|
||||
feedbackEl,
|
||||
error.message || "An unexpected error occurred.",
|
||||
"error"
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
if (capexForm) {
|
||||
ensureCurrencySelection(capexCurrencySelect);
|
||||
capexForm.addEventListener("submit", (event) =>
|
||||
submitCostEntry(event, "/api/costs/capex", capexByScenario, capexFeedback)
|
||||
);
|
||||
}
|
||||
|
||||
if (opexForm) {
|
||||
ensureCurrencySelection(opexCurrencySelect);
|
||||
opexForm.addEventListener("submit", (event) =>
|
||||
submitCostEntry(event, "/api/costs/opex", opexByScenario, opexFeedback)
|
||||
);
|
||||
}
|
||||
|
||||
if (filterSelect && filterSelect.value) {
|
||||
toggleCostView(true);
|
||||
renderCostTables(filterSelect.value);
|
||||
syncFormSelections(filterSelect.value);
|
||||
}
|
||||
});
|
||||
@@ -1,537 +0,0 @@
|
||||
document.addEventListener("DOMContentLoaded", () => {
|
||||
const dataElement = document.getElementById("currencies-data");
|
||||
const editorSection = document.getElementById("currencies-editor");
|
||||
const tableBody = document.getElementById("currencies-table-body");
|
||||
const tableEmptyState = document.getElementById("currencies-table-empty");
|
||||
const metrics = {
|
||||
total: document.getElementById("currency-metric-total"),
|
||||
active: document.getElementById("currency-metric-active"),
|
||||
inactive: document.getElementById("currency-metric-inactive"),
|
||||
};
|
||||
|
||||
const form = document.getElementById("currency-form");
|
||||
const existingSelect = document.getElementById("currency-form-existing");
|
||||
const codeInput = document.getElementById("currency-form-code");
|
||||
const nameInput = document.getElementById("currency-form-name");
|
||||
const symbolInput = document.getElementById("currency-form-symbol");
|
||||
const statusSelect = document.getElementById("currency-form-status");
|
||||
const resetButton = document.getElementById("currency-form-reset");
|
||||
const feedbackElement = document.getElementById("currency-form-feedback");
|
||||
|
||||
const saveButton = form ? form.querySelector("button[type='submit']") : null;
|
||||
|
||||
const uppercaseCode = (value) =>
|
||||
(value || "").toString().trim().toUpperCase();
|
||||
const normalizeSymbol = (value) => {
|
||||
if (value === undefined || value === null) {
|
||||
return null;
|
||||
}
|
||||
const trimmed = String(value).trim();
|
||||
return trimmed ? trimmed : null;
|
||||
};
|
||||
|
||||
const normalizeApiBase = (value) => {
|
||||
if (!value || typeof value !== "string") {
|
||||
return "/api/currencies";
|
||||
}
|
||||
return value.endsWith("/") ? value.slice(0, -1) : value;
|
||||
};
|
||||
|
||||
let currencies = [];
|
||||
let apiBase = "/api/currencies";
|
||||
let defaultCurrencyCode = "USD";
|
||||
|
||||
const buildCurrencyRecord = (record) => {
|
||||
if (!record || typeof record !== "object") {
|
||||
return null;
|
||||
}
|
||||
const code = uppercaseCode(record.code);
|
||||
return {
|
||||
id: record.id ?? null,
|
||||
code,
|
||||
name: record.name || "",
|
||||
symbol: record.symbol || "",
|
||||
is_active: Boolean(record.is_active),
|
||||
is_default: code === defaultCurrencyCode,
|
||||
};
|
||||
};
|
||||
|
||||
const findCurrencyIndex = (code) => {
|
||||
return currencies.findIndex((item) => item.code === code);
|
||||
};
|
||||
|
||||
const upsertCurrency = (record) => {
|
||||
const normalized = buildCurrencyRecord(record);
|
||||
if (!normalized) {
|
||||
return null;
|
||||
}
|
||||
const existingIndex = findCurrencyIndex(normalized.code);
|
||||
if (existingIndex >= 0) {
|
||||
currencies[existingIndex] = normalized;
|
||||
} else {
|
||||
currencies.push(normalized);
|
||||
}
|
||||
currencies.sort((a, b) => a.code.localeCompare(b.code));
|
||||
return normalized;
|
||||
};
|
||||
|
||||
const replaceCurrencyList = (records) => {
|
||||
if (!Array.isArray(records)) {
|
||||
return;
|
||||
}
|
||||
currencies = records
|
||||
.map((record) => buildCurrencyRecord(record))
|
||||
.filter((record) => record !== null)
|
||||
.sort((a, b) => a.code.localeCompare(b.code));
|
||||
};
|
||||
|
||||
const applyPayload = () => {
|
||||
if (!dataElement) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
const parsed = JSON.parse(dataElement.textContent || "{}");
|
||||
if (parsed && typeof parsed === "object") {
|
||||
if (parsed.default_currency_code) {
|
||||
defaultCurrencyCode = uppercaseCode(parsed.default_currency_code);
|
||||
}
|
||||
if (parsed.currency_api_base) {
|
||||
apiBase = normalizeApiBase(parsed.currency_api_base);
|
||||
}
|
||||
if (Array.isArray(parsed.currencies)) {
|
||||
replaceCurrencyList(parsed.currencies);
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Unable to parse currencies payload", error);
|
||||
}
|
||||
};
|
||||
|
||||
const showFeedback = (message, type = "success") => {
|
||||
if (!feedbackElement) {
|
||||
return;
|
||||
}
|
||||
feedbackElement.textContent = message;
|
||||
feedbackElement.classList.remove("hidden", "success", "error");
|
||||
feedbackElement.classList.add(type);
|
||||
};
|
||||
|
||||
const hideFeedback = () => {
|
||||
if (!feedbackElement) {
|
||||
return;
|
||||
}
|
||||
feedbackElement.classList.add("hidden");
|
||||
feedbackElement.classList.remove("success", "error");
|
||||
feedbackElement.textContent = "";
|
||||
};
|
||||
|
||||
const setButtonLoading = (button, isLoading) => {
|
||||
if (!button) {
|
||||
return;
|
||||
}
|
||||
button.disabled = isLoading;
|
||||
button.classList.toggle("is-loading", isLoading);
|
||||
};
|
||||
|
||||
const updateMetrics = () => {
|
||||
const total = currencies.length;
|
||||
const active = currencies.filter((item) => item.is_active).length;
|
||||
const inactive = total - active;
|
||||
if (metrics.total) {
|
||||
metrics.total.textContent = String(total);
|
||||
}
|
||||
if (metrics.active) {
|
||||
metrics.active.textContent = String(active);
|
||||
}
|
||||
if (metrics.inactive) {
|
||||
metrics.inactive.textContent = String(inactive);
|
||||
}
|
||||
};
|
||||
|
||||
const renderExistingOptions = (
|
||||
selectedCode = existingSelect ? existingSelect.value : ""
|
||||
) => {
|
||||
if (!existingSelect) {
|
||||
return;
|
||||
}
|
||||
const placeholder = existingSelect.querySelector("option[value='']");
|
||||
const placeholderClone = placeholder ? placeholder.cloneNode(true) : null;
|
||||
existingSelect.innerHTML = "";
|
||||
if (placeholderClone) {
|
||||
existingSelect.appendChild(placeholderClone);
|
||||
}
|
||||
const fragment = document.createDocumentFragment();
|
||||
currencies.forEach((currency) => {
|
||||
const option = document.createElement("option");
|
||||
option.value = currency.code;
|
||||
option.textContent = currency.name
|
||||
? `${currency.name} (${currency.code})`
|
||||
: currency.code;
|
||||
if (selectedCode === currency.code) {
|
||||
option.selected = true;
|
||||
}
|
||||
fragment.appendChild(option);
|
||||
});
|
||||
existingSelect.appendChild(fragment);
|
||||
if (
|
||||
selectedCode &&
|
||||
!currencies.some((item) => item.code === selectedCode)
|
||||
) {
|
||||
existingSelect.value = "";
|
||||
}
|
||||
};
|
||||
|
||||
const renderTable = () => {
|
||||
if (!tableBody) {
|
||||
return;
|
||||
}
|
||||
tableBody.innerHTML = "";
|
||||
if (!currencies.length) {
|
||||
if (tableEmptyState) {
|
||||
tableEmptyState.classList.remove("hidden");
|
||||
}
|
||||
return;
|
||||
}
|
||||
if (tableEmptyState) {
|
||||
tableEmptyState.classList.add("hidden");
|
||||
}
|
||||
const fragment = document.createDocumentFragment();
|
||||
currencies.forEach((currency) => {
|
||||
const row = document.createElement("tr");
|
||||
|
||||
const codeCell = document.createElement("td");
|
||||
codeCell.textContent = currency.code;
|
||||
row.appendChild(codeCell);
|
||||
|
||||
const nameCell = document.createElement("td");
|
||||
nameCell.textContent = currency.name || "—";
|
||||
row.appendChild(nameCell);
|
||||
|
||||
const symbolCell = document.createElement("td");
|
||||
symbolCell.textContent = currency.symbol || "—";
|
||||
row.appendChild(symbolCell);
|
||||
|
||||
const statusCell = document.createElement("td");
|
||||
statusCell.textContent = currency.is_active ? "Active" : "Inactive";
|
||||
if (currency.is_default) {
|
||||
statusCell.textContent += " (Default)";
|
||||
}
|
||||
row.appendChild(statusCell);
|
||||
|
||||
const actionsCell = document.createElement("td");
|
||||
const editButton = document.createElement("button");
|
||||
editButton.type = "button";
|
||||
editButton.className = "btn";
|
||||
editButton.dataset.action = "edit";
|
||||
editButton.dataset.code = currency.code;
|
||||
editButton.textContent = "Edit";
|
||||
editButton.style.marginRight = "0.5rem";
|
||||
|
||||
const toggleButton = document.createElement("button");
|
||||
toggleButton.type = "button";
|
||||
toggleButton.className = "btn";
|
||||
toggleButton.dataset.action = "toggle";
|
||||
toggleButton.dataset.code = currency.code;
|
||||
toggleButton.textContent = currency.is_active ? "Deactivate" : "Activate";
|
||||
if (currency.is_default && currency.is_active) {
|
||||
toggleButton.disabled = true;
|
||||
toggleButton.title = "The default currency must remain active.";
|
||||
}
|
||||
|
||||
actionsCell.appendChild(editButton);
|
||||
actionsCell.appendChild(toggleButton);
|
||||
|
||||
row.appendChild(actionsCell);
|
||||
fragment.appendChild(row);
|
||||
});
|
||||
tableBody.appendChild(fragment);
|
||||
};
|
||||
|
||||
const refreshUI = (selectedCode) => {
|
||||
currencies.sort((a, b) => a.code.localeCompare(b.code));
|
||||
renderTable();
|
||||
renderExistingOptions(selectedCode);
|
||||
updateMetrics();
|
||||
};
|
||||
|
||||
const findCurrency = (code) =>
|
||||
currencies.find((item) => item.code === code) || null;
|
||||
|
||||
const setFormForCurrency = (currency) => {
|
||||
if (!form || !codeInput || !nameInput || !symbolInput || !statusSelect) {
|
||||
return;
|
||||
}
|
||||
if (!currency) {
|
||||
form.reset();
|
||||
if (existingSelect) {
|
||||
existingSelect.value = "";
|
||||
}
|
||||
codeInput.readOnly = false;
|
||||
codeInput.value = "";
|
||||
nameInput.value = "";
|
||||
symbolInput.value = "";
|
||||
statusSelect.disabled = false;
|
||||
statusSelect.value = "true";
|
||||
statusSelect.title = "";
|
||||
return;
|
||||
}
|
||||
|
||||
if (existingSelect) {
|
||||
existingSelect.value = currency.code;
|
||||
}
|
||||
codeInput.readOnly = true;
|
||||
codeInput.value = currency.code;
|
||||
nameInput.value = currency.name || "";
|
||||
symbolInput.value = currency.symbol || "";
|
||||
statusSelect.value = currency.is_active ? "true" : "false";
|
||||
if (currency.is_default) {
|
||||
statusSelect.disabled = true;
|
||||
statusSelect.value = "true";
|
||||
statusSelect.title = "The default currency must remain active.";
|
||||
} else {
|
||||
statusSelect.disabled = false;
|
||||
statusSelect.title = "";
|
||||
}
|
||||
};
|
||||
|
||||
const resetFormState = () => {
|
||||
setFormForCurrency(null);
|
||||
};
|
||||
|
||||
const parseError = async (response, fallbackMessage) => {
|
||||
try {
|
||||
const detail = await response.json();
|
||||
if (detail && typeof detail === "object" && detail.detail) {
|
||||
return detail.detail;
|
||||
}
|
||||
} catch (error) {
|
||||
// ignore JSON parse errors
|
||||
}
|
||||
return fallbackMessage;
|
||||
};
|
||||
|
||||
const fetchCurrenciesFromApi = async () => {
|
||||
const url = `${apiBase}/?include_inactive=true`;
|
||||
try {
|
||||
const response = await fetch(url);
|
||||
if (!response.ok) {
|
||||
return;
|
||||
}
|
||||
const list = await response.json();
|
||||
if (Array.isArray(list)) {
|
||||
replaceCurrencyList(list);
|
||||
refreshUI(existingSelect ? existingSelect.value : undefined);
|
||||
}
|
||||
} catch (error) {
|
||||
console.warn("Unable to refresh currency list", error);
|
||||
}
|
||||
};
|
||||
|
||||
const handleSubmit = async (event) => {
|
||||
event.preventDefault();
|
||||
hideFeedback();
|
||||
if (!form || !codeInput || !nameInput || !statusSelect) {
|
||||
return;
|
||||
}
|
||||
|
||||
const editingCode = existingSelect
|
||||
? uppercaseCode(existingSelect.value)
|
||||
: "";
|
||||
const codeValue = uppercaseCode(codeInput.value);
|
||||
const nameValue = (nameInput.value || "").trim();
|
||||
const symbolValue = normalizeSymbol(symbolInput ? symbolInput.value : "");
|
||||
const isActive = statusSelect.value !== "false";
|
||||
|
||||
if (!nameValue) {
|
||||
showFeedback("Provide a currency name.", "error");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!editingCode) {
|
||||
if (!codeValue || codeValue.length !== 3) {
|
||||
showFeedback("Provide a three-letter currency code.", "error");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
const payload = editingCode
|
||||
? {
|
||||
name: nameValue,
|
||||
symbol: symbolValue,
|
||||
is_active: isActive,
|
||||
}
|
||||
: {
|
||||
code: codeValue,
|
||||
name: nameValue,
|
||||
symbol: symbolValue,
|
||||
is_active: isActive,
|
||||
};
|
||||
|
||||
const targetCode = editingCode || codeValue;
|
||||
const url = editingCode
|
||||
? `${apiBase}/${encodeURIComponent(editingCode)}`
|
||||
: `${apiBase}/`;
|
||||
|
||||
setButtonLoading(saveButton, true);
|
||||
try {
|
||||
const response = await fetch(url, {
|
||||
method: editingCode ? "PUT" : "POST",
|
||||
headers: { "Content-Type": "application/json" },
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const message = await parseError(
|
||||
response,
|
||||
editingCode
|
||||
? "Unable to update the currency."
|
||||
: "Unable to create the currency."
|
||||
);
|
||||
throw new Error(message);
|
||||
}
|
||||
|
||||
const result = await response.json();
|
||||
const updated = upsertCurrency(result);
|
||||
defaultCurrencyCode = uppercaseCode(defaultCurrencyCode);
|
||||
refreshUI(updated ? updated.code : targetCode);
|
||||
|
||||
if (editingCode) {
|
||||
showFeedback("Currency updated successfully.");
|
||||
if (updated) {
|
||||
setFormForCurrency(updated);
|
||||
}
|
||||
} else {
|
||||
showFeedback("Currency created successfully.");
|
||||
resetFormState();
|
||||
}
|
||||
} catch (error) {
|
||||
showFeedback(error.message || "An unexpected error occurred.", "error");
|
||||
} finally {
|
||||
setButtonLoading(saveButton, false);
|
||||
}
|
||||
};
|
||||
|
||||
const handleToggle = async (code, button) => {
|
||||
const record = findCurrency(code);
|
||||
if (!record) {
|
||||
return;
|
||||
}
|
||||
hideFeedback();
|
||||
const nextState = !record.is_active;
|
||||
const url = `${apiBase}/${encodeURIComponent(code)}/activation`;
|
||||
setButtonLoading(button, true);
|
||||
try {
|
||||
const response = await fetch(url, {
|
||||
method: "PATCH",
|
||||
headers: { "Content-Type": "application/json" },
|
||||
body: JSON.stringify({ is_active: nextState }),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const message = await parseError(
|
||||
response,
|
||||
nextState
|
||||
? "Unable to activate the currency."
|
||||
: "Unable to deactivate the currency."
|
||||
);
|
||||
throw new Error(message);
|
||||
}
|
||||
|
||||
const result = await response.json();
|
||||
const updated = upsertCurrency(result);
|
||||
refreshUI(updated ? updated.code : code);
|
||||
if (existingSelect && existingSelect.value === code && updated) {
|
||||
setFormForCurrency(updated);
|
||||
}
|
||||
const actionMessage = nextState
|
||||
? `Currency ${code} activated.`
|
||||
: `Currency ${code} deactivated.`;
|
||||
showFeedback(actionMessage);
|
||||
} catch (error) {
|
||||
showFeedback(error.message || "An unexpected error occurred.", "error");
|
||||
} finally {
|
||||
setButtonLoading(button, false);
|
||||
}
|
||||
};
|
||||
|
||||
const handleTableClick = (event) => {
|
||||
const button = event.target.closest("button[data-action]");
|
||||
if (!button) {
|
||||
return;
|
||||
}
|
||||
const code = uppercaseCode(button.dataset.code);
|
||||
const action = button.dataset.action;
|
||||
if (!code || !action) {
|
||||
return;
|
||||
}
|
||||
if (action === "edit") {
|
||||
const currency = findCurrency(code);
|
||||
if (currency) {
|
||||
setFormForCurrency(currency);
|
||||
hideFeedback();
|
||||
if (nameInput) {
|
||||
nameInput.focus();
|
||||
}
|
||||
}
|
||||
} else if (action === "toggle") {
|
||||
handleToggle(code, button);
|
||||
}
|
||||
};
|
||||
|
||||
applyPayload();
|
||||
if (editorSection && editorSection.dataset.defaultCode) {
|
||||
defaultCurrencyCode = uppercaseCode(editorSection.dataset.defaultCode);
|
||||
currencies = currencies.map((record) => {
|
||||
return record
|
||||
? {
|
||||
...record,
|
||||
is_default: record.code === defaultCurrencyCode,
|
||||
}
|
||||
: record;
|
||||
});
|
||||
}
|
||||
apiBase = normalizeApiBase(apiBase);
|
||||
|
||||
refreshUI();
|
||||
|
||||
if (form) {
|
||||
form.addEventListener("submit", handleSubmit);
|
||||
}
|
||||
|
||||
if (existingSelect) {
|
||||
existingSelect.addEventListener("change", (event) => {
|
||||
const selectedCode = uppercaseCode(event.target.value);
|
||||
if (!selectedCode) {
|
||||
hideFeedback();
|
||||
resetFormState();
|
||||
return;
|
||||
}
|
||||
const currency = findCurrency(selectedCode);
|
||||
if (currency) {
|
||||
setFormForCurrency(currency);
|
||||
hideFeedback();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (resetButton) {
|
||||
resetButton.addEventListener("click", (event) => {
|
||||
event.preventDefault();
|
||||
hideFeedback();
|
||||
resetFormState();
|
||||
});
|
||||
}
|
||||
|
||||
if (codeInput) {
|
||||
codeInput.addEventListener("input", () => {
|
||||
const value = uppercaseCode(codeInput.value).slice(0, 3);
|
||||
codeInput.value = value;
|
||||
});
|
||||
}
|
||||
|
||||
if (tableBody) {
|
||||
tableBody.addEventListener("click", handleTableClick);
|
||||
}
|
||||
|
||||
fetchCurrenciesFromApi();
|
||||
});
|
||||
@@ -1,289 +0,0 @@
|
||||
(() => {
|
||||
const dataElement = document.getElementById("dashboard-data");
|
||||
if (!dataElement) {
|
||||
return;
|
||||
}
|
||||
|
||||
let state = {};
|
||||
try {
|
||||
state = JSON.parse(dataElement.textContent || "{}");
|
||||
} catch (error) {
|
||||
console.error("Failed to parse dashboard data", error);
|
||||
return;
|
||||
}
|
||||
|
||||
const statusElement = document.getElementById("dashboard-status");
|
||||
const summaryContainer = document.getElementById("summary-metrics");
|
||||
const summaryEmpty = document.getElementById("summary-empty");
|
||||
const scenarioTableBody = document.querySelector("#scenario-table tbody");
|
||||
const scenarioEmpty = document.getElementById("scenario-table-empty");
|
||||
const overallMetricsList = document.getElementById("overall-metrics");
|
||||
const overallMetricsEmpty = document.getElementById("overall-metrics-empty");
|
||||
const recentList = document.getElementById("recent-simulations");
|
||||
const recentEmpty = document.getElementById("recent-simulations-empty");
|
||||
const maintenanceList = document.getElementById("upcoming-maintenance");
|
||||
const maintenanceEmpty = document.getElementById(
|
||||
"upcoming-maintenance-empty"
|
||||
);
|
||||
const refreshButton = document.getElementById("refresh-dashboard");
|
||||
const costChartCanvas = document.getElementById("cost-chart");
|
||||
const costChartEmpty = document.getElementById("cost-chart-empty");
|
||||
const activityChartCanvas = document.getElementById("activity-chart");
|
||||
const activityChartEmpty = document.getElementById("activity-chart-empty");
|
||||
|
||||
let costChartInstance = null;
|
||||
let activityChartInstance = null;
|
||||
|
||||
const setStatus = (message, variant = "success") => {
|
||||
if (!statusElement) {
|
||||
return;
|
||||
}
|
||||
if (!message) {
|
||||
statusElement.hidden = true;
|
||||
statusElement.textContent = "";
|
||||
statusElement.classList.remove("success", "error");
|
||||
return;
|
||||
}
|
||||
statusElement.textContent = message;
|
||||
statusElement.hidden = false;
|
||||
statusElement.classList.toggle("success", variant === "success");
|
||||
statusElement.classList.toggle("error", variant !== "success");
|
||||
};
|
||||
|
||||
const renderSummaryMetrics = () => {
|
||||
if (!summaryContainer || !summaryEmpty) {
|
||||
return;
|
||||
}
|
||||
summaryContainer.innerHTML = "";
|
||||
const metrics = Array.isArray(state.summary_metrics)
|
||||
? state.summary_metrics
|
||||
: [];
|
||||
metrics.forEach((metric) => {
|
||||
const card = document.createElement("article");
|
||||
card.className = "metric-card";
|
||||
card.innerHTML = `
|
||||
<span class="metric-label">${metric.label}</span>
|
||||
<span class="metric-value">${metric.value}</span>
|
||||
`;
|
||||
summaryContainer.appendChild(card);
|
||||
});
|
||||
summaryEmpty.hidden = metrics.length > 0;
|
||||
};
|
||||
|
||||
const renderScenarioTable = () => {
|
||||
if (!scenarioTableBody || !scenarioEmpty) {
|
||||
return;
|
||||
}
|
||||
scenarioTableBody.innerHTML = "";
|
||||
const rows = Array.isArray(state.scenario_rows) ? state.scenario_rows : [];
|
||||
rows.forEach((row) => {
|
||||
const tr = document.createElement("tr");
|
||||
tr.innerHTML = `
|
||||
<td>${row.scenario_name}</td>
|
||||
<td>${row.parameter_display}</td>
|
||||
<td>${row.equipment_display}</td>
|
||||
<td>${row.capex_display}</td>
|
||||
<td>${row.opex_display}</td>
|
||||
<td>${row.production_display}</td>
|
||||
<td>${row.consumption_display}</td>
|
||||
<td>${row.maintenance_display}</td>
|
||||
<td>${row.iterations_display}</td>
|
||||
<td>${row.simulation_mean_display}</td>
|
||||
`;
|
||||
scenarioTableBody.appendChild(tr);
|
||||
});
|
||||
scenarioEmpty.hidden = rows.length > 0;
|
||||
};
|
||||
|
||||
const renderOverallMetrics = () => {
|
||||
if (!overallMetricsList || !overallMetricsEmpty) {
|
||||
return;
|
||||
}
|
||||
overallMetricsList.innerHTML = "";
|
||||
const items = Array.isArray(state.overall_report_metrics)
|
||||
? state.overall_report_metrics
|
||||
: [];
|
||||
items.forEach((item) => {
|
||||
const li = document.createElement("li");
|
||||
li.className = "metric-list-item";
|
||||
li.textContent = `${item.label}: ${item.value}`;
|
||||
overallMetricsList.appendChild(li);
|
||||
});
|
||||
overallMetricsEmpty.hidden = items.length > 0;
|
||||
};
|
||||
|
||||
const renderRecentSimulations = () => {
|
||||
if (!recentList || !recentEmpty) {
|
||||
return;
|
||||
}
|
||||
recentList.innerHTML = "";
|
||||
const runs = Array.isArray(state.recent_simulations)
|
||||
? state.recent_simulations
|
||||
: [];
|
||||
runs.forEach((run) => {
|
||||
const item = document.createElement("li");
|
||||
item.className = "metric-list-item";
|
||||
item.textContent = `${run.scenario_name} · ${run.iterations_display} iterations · ${run.mean_display}`;
|
||||
recentList.appendChild(item);
|
||||
});
|
||||
recentEmpty.hidden = runs.length > 0;
|
||||
};
|
||||
|
||||
const renderMaintenanceReminders = () => {
|
||||
if (!maintenanceList || !maintenanceEmpty) {
|
||||
return;
|
||||
}
|
||||
maintenanceList.innerHTML = "";
|
||||
const items = Array.isArray(state.upcoming_maintenance)
|
||||
? state.upcoming_maintenance
|
||||
: [];
|
||||
items.forEach((item) => {
|
||||
const li = document.createElement("li");
|
||||
li.innerHTML = `
|
||||
<span class="list-title">${item.equipment_name} · ${item.scenario_name}</span>
|
||||
<span class="list-detail">${item.date_display} · ${item.cost_display} · ${item.description}</span>
|
||||
`;
|
||||
maintenanceList.appendChild(li);
|
||||
});
|
||||
maintenanceEmpty.hidden = items.length > 0;
|
||||
};
|
||||
|
||||
const buildChartConfig = (dataset, overrides = {}) => ({
|
||||
type: dataset.type || "bar",
|
||||
data: {
|
||||
labels: dataset.labels || [],
|
||||
datasets: dataset.datasets || [],
|
||||
},
|
||||
options: Object.assign(
|
||||
{
|
||||
responsive: true,
|
||||
maintainAspectRatio: false,
|
||||
plugins: {
|
||||
legend: { position: "top" },
|
||||
tooltip: { enabled: true },
|
||||
},
|
||||
scales: {
|
||||
x: { stacked: dataset.stacked ?? false },
|
||||
y: { stacked: dataset.stacked ?? false, beginAtZero: true },
|
||||
},
|
||||
},
|
||||
overrides.options || {}
|
||||
),
|
||||
});
|
||||
|
||||
const renderCharts = () => {
|
||||
if (costChartInstance) {
|
||||
costChartInstance.destroy();
|
||||
}
|
||||
if (activityChartInstance) {
|
||||
activityChartInstance.destroy();
|
||||
}
|
||||
|
||||
const costData = state.scenario_cost_chart || {};
|
||||
const activityData = state.scenario_activity_chart || {};
|
||||
|
||||
if (costChartCanvas && state.cost_chart_has_data) {
|
||||
costChartInstance = new Chart(
|
||||
costChartCanvas,
|
||||
buildChartConfig(costData, {
|
||||
options: {
|
||||
scales: {
|
||||
y: {
|
||||
beginAtZero: true,
|
||||
ticks: {
|
||||
callback: (value) =>
|
||||
typeof value === "number"
|
||||
? value.toLocaleString(undefined, {
|
||||
maximumFractionDigits: 0,
|
||||
})
|
||||
: value,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
);
|
||||
if (costChartEmpty) {
|
||||
costChartEmpty.hidden = true;
|
||||
}
|
||||
costChartCanvas.classList.remove("hidden");
|
||||
} else if (costChartEmpty && costChartCanvas) {
|
||||
costChartEmpty.hidden = false;
|
||||
costChartCanvas.classList.add("hidden");
|
||||
}
|
||||
|
||||
if (activityChartCanvas && state.activity_chart_has_data) {
|
||||
activityChartInstance = new Chart(
|
||||
activityChartCanvas,
|
||||
buildChartConfig(activityData, {
|
||||
options: {
|
||||
scales: {
|
||||
y: {
|
||||
beginAtZero: true,
|
||||
ticks: {
|
||||
callback: (value) =>
|
||||
typeof value === "number"
|
||||
? value.toLocaleString(undefined, {
|
||||
maximumFractionDigits: 0,
|
||||
})
|
||||
: value,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
);
|
||||
if (activityChartEmpty) {
|
||||
activityChartEmpty.hidden = true;
|
||||
}
|
||||
activityChartCanvas.classList.remove("hidden");
|
||||
} else if (activityChartEmpty && activityChartCanvas) {
|
||||
activityChartEmpty.hidden = false;
|
||||
activityChartCanvas.classList.add("hidden");
|
||||
}
|
||||
};
|
||||
|
||||
const renderView = () => {
|
||||
renderSummaryMetrics();
|
||||
renderScenarioTable();
|
||||
renderOverallMetrics();
|
||||
renderRecentSimulations();
|
||||
renderMaintenanceReminders();
|
||||
renderCharts();
|
||||
};
|
||||
|
||||
const refreshDashboard = async () => {
|
||||
setStatus("Refreshing dashboard…", "success");
|
||||
if (refreshButton) {
|
||||
refreshButton.classList.add("is-loading");
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await fetch("/ui/dashboard/data", {
|
||||
headers: { "X-Requested-With": "XMLHttpRequest" },
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error("Unable to refresh dashboard data.");
|
||||
}
|
||||
|
||||
const payload = await response.json();
|
||||
state = payload || {};
|
||||
renderView();
|
||||
setStatus("Dashboard updated.", "success");
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
setStatus(error.message || "Failed to refresh dashboard.", "error");
|
||||
} finally {
|
||||
if (refreshButton) {
|
||||
refreshButton.classList.remove("is-loading");
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
renderView();
|
||||
|
||||
if (refreshButton) {
|
||||
refreshButton.addEventListener("click", refreshDashboard);
|
||||
}
|
||||
})();
|
||||
@@ -1,145 +0,0 @@
|
||||
document.addEventListener("DOMContentLoaded", () => {
|
||||
const dataElement = document.getElementById("equipment-data");
|
||||
let equipmentByScenario = {};
|
||||
|
||||
if (dataElement) {
|
||||
try {
|
||||
const parsed = JSON.parse(dataElement.textContent || "{}");
|
||||
if (parsed && typeof parsed === "object") {
|
||||
if (parsed.equipment && typeof parsed.equipment === "object") {
|
||||
equipmentByScenario = parsed.equipment;
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Unable to parse equipment data", error);
|
||||
}
|
||||
}
|
||||
|
||||
const filterSelect = document.getElementById("equipment-scenario-filter");
|
||||
const tableWrapper = document.getElementById("equipment-table-wrapper");
|
||||
const tableBody = document.getElementById("equipment-table-body");
|
||||
const emptyState = document.getElementById("equipment-empty");
|
||||
const form = document.getElementById("equipment-form");
|
||||
const feedbackEl = document.getElementById("equipment-feedback");
|
||||
|
||||
const showFeedback = (message, type = "success") => {
|
||||
if (!feedbackEl) {
|
||||
return;
|
||||
}
|
||||
feedbackEl.textContent = message;
|
||||
feedbackEl.classList.remove("hidden", "success", "error");
|
||||
feedbackEl.classList.add(type);
|
||||
};
|
||||
|
||||
const hideFeedback = () => {
|
||||
if (!feedbackEl) {
|
||||
return;
|
||||
}
|
||||
feedbackEl.classList.add("hidden");
|
||||
feedbackEl.textContent = "";
|
||||
};
|
||||
|
||||
const renderEquipmentRows = (scenarioId) => {
|
||||
if (!tableBody || !tableWrapper || !emptyState) {
|
||||
return;
|
||||
}
|
||||
|
||||
const key = String(scenarioId);
|
||||
const records = equipmentByScenario[key] || [];
|
||||
|
||||
tableBody.innerHTML = "";
|
||||
|
||||
if (!records.length) {
|
||||
emptyState.textContent = "No equipment recorded for this scenario yet.";
|
||||
emptyState.classList.remove("hidden");
|
||||
tableWrapper.classList.add("hidden");
|
||||
return;
|
||||
}
|
||||
|
||||
emptyState.classList.add("hidden");
|
||||
tableWrapper.classList.remove("hidden");
|
||||
|
||||
records.forEach((record) => {
|
||||
const row = document.createElement("tr");
|
||||
row.innerHTML = `
|
||||
<td>${record.name || "—"}</td>
|
||||
<td>${record.description || "—"}</td>
|
||||
`;
|
||||
tableBody.appendChild(row);
|
||||
});
|
||||
};
|
||||
|
||||
if (filterSelect) {
|
||||
filterSelect.addEventListener("change", (event) => {
|
||||
const value = event.target.value;
|
||||
if (!value) {
|
||||
if (emptyState && tableWrapper && tableBody) {
|
||||
emptyState.textContent =
|
||||
"Choose a scenario to review the equipment list.";
|
||||
emptyState.classList.remove("hidden");
|
||||
tableWrapper.classList.add("hidden");
|
||||
tableBody.innerHTML = "";
|
||||
}
|
||||
return;
|
||||
}
|
||||
renderEquipmentRows(value);
|
||||
});
|
||||
}
|
||||
|
||||
const submitEquipment = async (event) => {
|
||||
event.preventDefault();
|
||||
hideFeedback();
|
||||
|
||||
if (!form) {
|
||||
return;
|
||||
}
|
||||
|
||||
const formData = new FormData(form);
|
||||
const scenarioId = formData.get("scenario_id");
|
||||
const payload = {
|
||||
scenario_id: scenarioId ? Number(scenarioId) : null,
|
||||
name: formData.get("name"),
|
||||
description: formData.get("description") || null,
|
||||
};
|
||||
|
||||
try {
|
||||
const response = await fetch("/api/equipment/", {
|
||||
method: "POST",
|
||||
headers: { "Content-Type": "application/json" },
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const errorDetail = await response.json().catch(() => ({}));
|
||||
throw new Error(
|
||||
errorDetail.detail || "Unable to add equipment record."
|
||||
);
|
||||
}
|
||||
|
||||
const result = await response.json();
|
||||
const mapKey = String(result.scenario_id);
|
||||
|
||||
if (!Array.isArray(equipmentByScenario[mapKey])) {
|
||||
equipmentByScenario[mapKey] = [];
|
||||
}
|
||||
equipmentByScenario[mapKey].push(result);
|
||||
|
||||
form.reset();
|
||||
showFeedback("Equipment saved.", "success");
|
||||
|
||||
if (filterSelect && filterSelect.value === String(result.scenario_id)) {
|
||||
renderEquipmentRows(filterSelect.value);
|
||||
}
|
||||
} catch (error) {
|
||||
showFeedback(error.message || "An unexpected error occurred.", "error");
|
||||
}
|
||||
};
|
||||
|
||||
if (form) {
|
||||
form.addEventListener("submit", submitEquipment);
|
||||
}
|
||||
|
||||
if (filterSelect && filterSelect.value) {
|
||||
renderEquipmentRows(filterSelect.value);
|
||||
}
|
||||
});
|
||||
@@ -1,243 +0,0 @@
|
||||
document.addEventListener("DOMContentLoaded", () => {
|
||||
const dataElement = document.getElementById("maintenance-data");
|
||||
let equipmentByScenario = {};
|
||||
let maintenanceByScenario = {};
|
||||
|
||||
if (dataElement) {
|
||||
try {
|
||||
const parsed = JSON.parse(dataElement.textContent || "{}");
|
||||
if (parsed && typeof parsed === "object") {
|
||||
if (parsed.equipment && typeof parsed.equipment === "object") {
|
||||
equipmentByScenario = parsed.equipment;
|
||||
}
|
||||
if (parsed.maintenance && typeof parsed.maintenance === "object") {
|
||||
maintenanceByScenario = parsed.maintenance;
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Unable to parse maintenance data", error);
|
||||
}
|
||||
}
|
||||
|
||||
const filterSelect = document.getElementById("maintenance-scenario-filter");
|
||||
const tableWrapper = document.getElementById("maintenance-table-wrapper");
|
||||
const tableBody = document.getElementById("maintenance-table-body");
|
||||
const emptyState = document.getElementById("maintenance-empty");
|
||||
const form = document.getElementById("maintenance-form");
|
||||
const feedbackEl = document.getElementById("maintenance-feedback");
|
||||
const formScenarioSelect = document.getElementById(
|
||||
"maintenance-form-scenario"
|
||||
);
|
||||
const equipmentSelect = document.getElementById("maintenance-form-equipment");
|
||||
const equipmentEmptyState = document.getElementById(
|
||||
"maintenance-equipment-empty"
|
||||
);
|
||||
|
||||
const showFeedback = (message, type = "success") => {
|
||||
if (!feedbackEl) {
|
||||
return;
|
||||
}
|
||||
feedbackEl.textContent = message;
|
||||
feedbackEl.classList.remove("hidden", "success", "error");
|
||||
feedbackEl.classList.add(type);
|
||||
};
|
||||
|
||||
const hideFeedback = () => {
|
||||
if (!feedbackEl) {
|
||||
return;
|
||||
}
|
||||
feedbackEl.classList.add("hidden");
|
||||
feedbackEl.textContent = "";
|
||||
};
|
||||
|
||||
const formatCost = (value) =>
|
||||
Number(value).toLocaleString(undefined, {
|
||||
minimumFractionDigits: 2,
|
||||
maximumFractionDigits: 2,
|
||||
});
|
||||
|
||||
const formatDate = (value) => {
|
||||
if (!value) {
|
||||
return "—";
|
||||
}
|
||||
const parsed = new Date(value);
|
||||
if (Number.isNaN(parsed.getTime())) {
|
||||
return value;
|
||||
}
|
||||
return parsed.toLocaleDateString();
|
||||
};
|
||||
|
||||
const renderMaintenanceRows = (scenarioId) => {
|
||||
if (!tableBody || !tableWrapper || !emptyState) {
|
||||
return;
|
||||
}
|
||||
|
||||
const key = String(scenarioId);
|
||||
const records = maintenanceByScenario[key] || [];
|
||||
|
||||
tableBody.innerHTML = "";
|
||||
|
||||
if (!records.length) {
|
||||
emptyState.textContent =
|
||||
"No maintenance entries recorded for this scenario yet.";
|
||||
emptyState.classList.remove("hidden");
|
||||
tableWrapper.classList.add("hidden");
|
||||
return;
|
||||
}
|
||||
|
||||
emptyState.classList.add("hidden");
|
||||
tableWrapper.classList.remove("hidden");
|
||||
|
||||
records.forEach((record) => {
|
||||
const row = document.createElement("tr");
|
||||
row.innerHTML = `
|
||||
<td>${formatDate(record.maintenance_date)}</td>
|
||||
<td>${record.equipment_name || "—"}</td>
|
||||
<td>${formatCost(record.cost)}</td>
|
||||
<td>${record.description || "—"}</td>
|
||||
`;
|
||||
tableBody.appendChild(row);
|
||||
});
|
||||
};
|
||||
|
||||
const populateEquipmentOptions = (scenarioId) => {
|
||||
if (!equipmentSelect) {
|
||||
return;
|
||||
}
|
||||
|
||||
equipmentSelect.innerHTML =
|
||||
'<option value="" disabled selected>Select equipment</option>';
|
||||
equipmentSelect.disabled = true;
|
||||
|
||||
if (equipmentEmptyState) {
|
||||
equipmentEmptyState.classList.add("hidden");
|
||||
}
|
||||
|
||||
if (!scenarioId) {
|
||||
return;
|
||||
}
|
||||
|
||||
const list = equipmentByScenario[String(scenarioId)] || [];
|
||||
if (!list.length) {
|
||||
if (equipmentEmptyState) {
|
||||
equipmentEmptyState.textContent =
|
||||
"Add equipment for this scenario before scheduling maintenance.";
|
||||
equipmentEmptyState.classList.remove("hidden");
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
list.forEach((item) => {
|
||||
const option = document.createElement("option");
|
||||
option.value = item.id;
|
||||
option.textContent = item.name || `Equipment ${item.id}`;
|
||||
equipmentSelect.appendChild(option);
|
||||
});
|
||||
|
||||
equipmentSelect.disabled = false;
|
||||
};
|
||||
|
||||
if (filterSelect) {
|
||||
filterSelect.addEventListener("change", (event) => {
|
||||
const value = event.target.value;
|
||||
if (!value) {
|
||||
if (emptyState && tableWrapper && tableBody) {
|
||||
emptyState.textContent =
|
||||
"Choose a scenario to review upcoming or completed maintenance.";
|
||||
emptyState.classList.remove("hidden");
|
||||
tableWrapper.classList.add("hidden");
|
||||
tableBody.innerHTML = "";
|
||||
}
|
||||
return;
|
||||
}
|
||||
renderMaintenanceRows(value);
|
||||
});
|
||||
}
|
||||
|
||||
if (formScenarioSelect) {
|
||||
formScenarioSelect.addEventListener("change", (event) => {
|
||||
const value = event.target.value;
|
||||
populateEquipmentOptions(value);
|
||||
});
|
||||
}
|
||||
|
||||
const submitMaintenance = async (event) => {
|
||||
event.preventDefault();
|
||||
hideFeedback();
|
||||
|
||||
if (!form) {
|
||||
return;
|
||||
}
|
||||
|
||||
const formData = new FormData(form);
|
||||
const scenarioId = formData.get("scenario_id");
|
||||
const equipmentId = formData.get("equipment_id");
|
||||
const payload = {
|
||||
scenario_id: scenarioId ? Number(scenarioId) : null,
|
||||
equipment_id: equipmentId ? Number(equipmentId) : null,
|
||||
maintenance_date: formData.get("maintenance_date"),
|
||||
cost: Number(formData.get("cost")),
|
||||
description: formData.get("description") || null,
|
||||
};
|
||||
|
||||
if (!payload.scenario_id || !payload.equipment_id) {
|
||||
showFeedback(
|
||||
"Select a scenario and equipment before submitting.",
|
||||
"error"
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await fetch("/api/maintenance/", {
|
||||
method: "POST",
|
||||
headers: { "Content-Type": "application/json" },
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const errorDetail = await response.json().catch(() => ({}));
|
||||
throw new Error(
|
||||
errorDetail.detail || "Unable to add maintenance entry."
|
||||
);
|
||||
}
|
||||
|
||||
const result = await response.json();
|
||||
const mapKey = String(result.scenario_id);
|
||||
|
||||
if (!Array.isArray(maintenanceByScenario[mapKey])) {
|
||||
maintenanceByScenario[mapKey] = [];
|
||||
}
|
||||
|
||||
const equipmentList = equipmentByScenario[mapKey] || [];
|
||||
const matchedEquipment = equipmentList.find(
|
||||
(item) => Number(item.id) === Number(result.equipment_id)
|
||||
);
|
||||
result.equipment_name = matchedEquipment ? matchedEquipment.name : "";
|
||||
|
||||
maintenanceByScenario[mapKey].push(result);
|
||||
|
||||
form.reset();
|
||||
populateEquipmentOptions(null);
|
||||
showFeedback("Maintenance entry saved.", "success");
|
||||
|
||||
if (filterSelect && filterSelect.value === String(result.scenario_id)) {
|
||||
renderMaintenanceRows(filterSelect.value);
|
||||
}
|
||||
} catch (error) {
|
||||
showFeedback(error.message || "An unexpected error occurred.", "error");
|
||||
}
|
||||
};
|
||||
|
||||
if (form) {
|
||||
form.addEventListener("submit", submitMaintenance);
|
||||
}
|
||||
|
||||
if (filterSelect && filterSelect.value) {
|
||||
renderMaintenanceRows(filterSelect.value);
|
||||
}
|
||||
|
||||
if (formScenarioSelect && formScenarioSelect.value) {
|
||||
populateEquipmentOptions(formScenarioSelect.value);
|
||||
}
|
||||
});
|
||||
@@ -1,124 +0,0 @@
|
||||
document.addEventListener("DOMContentLoaded", () => {
|
||||
const dataElement = document.getElementById("parameters-data");
|
||||
let parametersByScenario = {};
|
||||
|
||||
if (dataElement) {
|
||||
try {
|
||||
const parsed = JSON.parse(dataElement.textContent || "{}");
|
||||
if (parsed && typeof parsed === "object") {
|
||||
parametersByScenario = parsed;
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Unable to parse parameter data", error);
|
||||
}
|
||||
}
|
||||
|
||||
const form = document.getElementById("parameter-form");
|
||||
const scenarioSelect = /** @type {HTMLSelectElement | null} */ (
|
||||
document.getElementById("scenario_id")
|
||||
);
|
||||
const nameInput = /** @type {HTMLInputElement | null} */ (
|
||||
document.getElementById("name")
|
||||
);
|
||||
const valueInput = /** @type {HTMLInputElement | null} */ (
|
||||
document.getElementById("value")
|
||||
);
|
||||
const feedback = document.getElementById("parameter-feedback");
|
||||
const tableBody = document.getElementById("parameter-table-body");
|
||||
|
||||
const setFeedback = (message, variant) => {
|
||||
if (!feedback) {
|
||||
return;
|
||||
}
|
||||
feedback.textContent = message;
|
||||
feedback.classList.remove("success", "error");
|
||||
if (variant) {
|
||||
feedback.classList.add(variant);
|
||||
}
|
||||
};
|
||||
|
||||
const renderTable = (scenarioId) => {
|
||||
if (!tableBody) {
|
||||
return;
|
||||
}
|
||||
tableBody.innerHTML = "";
|
||||
const rows = parametersByScenario[String(scenarioId)] || [];
|
||||
if (!rows.length) {
|
||||
const emptyRow = document.createElement("tr");
|
||||
emptyRow.id = "parameter-empty-state";
|
||||
emptyRow.innerHTML =
|
||||
'<td colspan="4">No parameters recorded for this scenario yet.</td>';
|
||||
tableBody.appendChild(emptyRow);
|
||||
return;
|
||||
}
|
||||
rows.forEach((row) => {
|
||||
const tr = document.createElement("tr");
|
||||
tr.innerHTML = `
|
||||
<td>${row.name}</td>
|
||||
<td>${row.value}</td>
|
||||
<td>${row.distribution_type ?? "—"}</td>
|
||||
<td>${
|
||||
row.distribution_parameters
|
||||
? JSON.stringify(row.distribution_parameters)
|
||||
: "—"
|
||||
}</td>
|
||||
`;
|
||||
tableBody.appendChild(tr);
|
||||
});
|
||||
};
|
||||
|
||||
if (scenarioSelect) {
|
||||
renderTable(scenarioSelect.value);
|
||||
scenarioSelect.addEventListener("change", () =>
|
||||
renderTable(scenarioSelect.value)
|
||||
);
|
||||
}
|
||||
|
||||
if (!form || !scenarioSelect || !nameInput || !valueInput) {
|
||||
return;
|
||||
}
|
||||
|
||||
form.addEventListener("submit", async (event) => {
|
||||
event.preventDefault();
|
||||
|
||||
const scenarioId = scenarioSelect.value;
|
||||
const payload = {
|
||||
scenario_id: Number(scenarioId),
|
||||
name: nameInput.value.trim(),
|
||||
value: Number(valueInput.value),
|
||||
};
|
||||
|
||||
if (!payload.name) {
|
||||
setFeedback("Parameter name is required.", "error");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!Number.isFinite(payload.value)) {
|
||||
setFeedback("Enter a numeric value.", "error");
|
||||
return;
|
||||
}
|
||||
|
||||
const response = await fetch("/api/parameters/", {
|
||||
method: "POST",
|
||||
headers: { "Content-Type": "application/json" },
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text();
|
||||
setFeedback(`Error saving parameter: ${errorText}`, "error");
|
||||
return;
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
const scenarioKey = String(scenarioId);
|
||||
parametersByScenario[scenarioKey] = parametersByScenario[scenarioKey] || [];
|
||||
parametersByScenario[scenarioKey].push(data);
|
||||
|
||||
form.reset();
|
||||
scenarioSelect.value = scenarioKey;
|
||||
renderTable(scenarioKey);
|
||||
nameInput.focus();
|
||||
setFeedback("Parameter saved.", "success");
|
||||
});
|
||||
});
|
||||
@@ -1,204 +0,0 @@
|
||||
document.addEventListener("DOMContentLoaded", () => {
|
||||
const dataElement = document.getElementById("production-data");
|
||||
let data = { scenarios: [], production: {}, unit_options: [] };
|
||||
|
||||
if (dataElement) {
|
||||
try {
|
||||
const parsed = JSON.parse(dataElement.textContent || "{}");
|
||||
if (parsed && typeof parsed === "object") {
|
||||
data = {
|
||||
scenarios: Array.isArray(parsed.scenarios) ? parsed.scenarios : [],
|
||||
production:
|
||||
parsed.production && typeof parsed.production === "object"
|
||||
? parsed.production
|
||||
: {},
|
||||
unit_options: Array.isArray(parsed.unit_options)
|
||||
? parsed.unit_options
|
||||
: [],
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Unable to parse production data", error);
|
||||
}
|
||||
}
|
||||
|
||||
const productionByScenario = data.production;
|
||||
const filterSelect = document.getElementById("production-scenario-filter");
|
||||
const tableWrapper = document.getElementById("production-table-wrapper");
|
||||
const tableBody = document.getElementById("production-table-body");
|
||||
const emptyState = document.getElementById("production-empty");
|
||||
const form = document.getElementById("production-form");
|
||||
const feedbackEl = document.getElementById("production-feedback");
|
||||
const unitSelect = document.getElementById("production-form-unit");
|
||||
const unitSymbolInput = document.getElementById("production-form-unit-symbol");
|
||||
|
||||
const showFeedback = (message, type = "success") => {
|
||||
if (!feedbackEl) {
|
||||
return;
|
||||
}
|
||||
feedbackEl.textContent = message;
|
||||
feedbackEl.classList.remove("hidden", "success", "error");
|
||||
feedbackEl.classList.add(type);
|
||||
};
|
||||
|
||||
const hideFeedback = () => {
|
||||
if (!feedbackEl) {
|
||||
return;
|
||||
}
|
||||
feedbackEl.classList.add("hidden");
|
||||
feedbackEl.textContent = "";
|
||||
};
|
||||
|
||||
const formatAmount = (value) =>
|
||||
Number(value).toLocaleString(undefined, {
|
||||
minimumFractionDigits: 2,
|
||||
maximumFractionDigits: 2,
|
||||
});
|
||||
|
||||
const formatMeasurement = (amount, symbol, name) => {
|
||||
if (symbol) {
|
||||
return `${formatAmount(amount)} ${symbol}`;
|
||||
}
|
||||
if (name) {
|
||||
return `${formatAmount(amount)} ${name}`;
|
||||
}
|
||||
return formatAmount(amount);
|
||||
};
|
||||
|
||||
const renderProductionRows = (scenarioId) => {
|
||||
if (!tableBody || !tableWrapper || !emptyState) {
|
||||
return;
|
||||
}
|
||||
|
||||
const key = String(scenarioId);
|
||||
const records = productionByScenario[key] || [];
|
||||
|
||||
tableBody.innerHTML = "";
|
||||
|
||||
if (!records.length) {
|
||||
emptyState.textContent =
|
||||
"No production output recorded for this scenario yet.";
|
||||
emptyState.classList.remove("hidden");
|
||||
tableWrapper.classList.add("hidden");
|
||||
return;
|
||||
}
|
||||
|
||||
emptyState.classList.add("hidden");
|
||||
tableWrapper.classList.remove("hidden");
|
||||
|
||||
records.forEach((record) => {
|
||||
const row = document.createElement("tr");
|
||||
row.innerHTML = `
|
||||
<td>${formatMeasurement(
|
||||
record.amount,
|
||||
record.unit_symbol,
|
||||
record.unit_name
|
||||
)}</td>
|
||||
<td>${record.description || "—"}</td>
|
||||
`;
|
||||
tableBody.appendChild(row);
|
||||
});
|
||||
};
|
||||
|
||||
if (filterSelect) {
|
||||
filterSelect.addEventListener("change", (event) => {
|
||||
const value = event.target.value;
|
||||
if (!value) {
|
||||
if (emptyState && tableWrapper && tableBody) {
|
||||
emptyState.textContent =
|
||||
"Choose a scenario to review its production output.";
|
||||
emptyState.classList.remove("hidden");
|
||||
tableWrapper.classList.add("hidden");
|
||||
tableBody.innerHTML = "";
|
||||
}
|
||||
return;
|
||||
}
|
||||
renderProductionRows(value);
|
||||
});
|
||||
}
|
||||
|
||||
const submitProduction = async (event) => {
|
||||
event.preventDefault();
|
||||
hideFeedback();
|
||||
|
||||
if (!form) {
|
||||
return;
|
||||
}
|
||||
|
||||
const formData = new FormData(form);
|
||||
const scenarioId = formData.get("scenario_id");
|
||||
const unitName = formData.get("unit_name");
|
||||
const unitSymbol = formData.get("unit_symbol");
|
||||
const payload = {
|
||||
scenario_id: scenarioId ? Number(scenarioId) : null,
|
||||
amount: Number(formData.get("amount")),
|
||||
description: formData.get("description") || null,
|
||||
unit_name: unitName ? String(unitName) : null,
|
||||
unit_symbol: unitSymbol ? String(unitSymbol) : null,
|
||||
};
|
||||
|
||||
try {
|
||||
const response = await fetch("/api/production/", {
|
||||
method: "POST",
|
||||
headers: { "Content-Type": "application/json" },
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const errorDetail = await response.json().catch(() => ({}));
|
||||
throw new Error(
|
||||
errorDetail.detail || "Unable to add production output record."
|
||||
);
|
||||
}
|
||||
|
||||
const result = await response.json();
|
||||
const mapKey = String(result.scenario_id);
|
||||
|
||||
if (!Array.isArray(productionByScenario[mapKey])) {
|
||||
productionByScenario[mapKey] = [];
|
||||
}
|
||||
productionByScenario[mapKey].push(result);
|
||||
|
||||
form.reset();
|
||||
syncUnitSelection();
|
||||
showFeedback("Production output saved.", "success");
|
||||
|
||||
if (filterSelect && filterSelect.value === String(result.scenario_id)) {
|
||||
renderProductionRows(filterSelect.value);
|
||||
}
|
||||
} catch (error) {
|
||||
showFeedback(error.message || "An unexpected error occurred.", "error");
|
||||
}
|
||||
};
|
||||
|
||||
if (form) {
|
||||
form.addEventListener("submit", submitProduction);
|
||||
}
|
||||
|
||||
const syncUnitSelection = () => {
|
||||
if (!unitSelect || !unitSymbolInput) {
|
||||
return;
|
||||
}
|
||||
if (!unitSelect.value && unitSelect.options.length > 0) {
|
||||
const firstOption = Array.from(unitSelect.options).find(
|
||||
(option) => option.value
|
||||
);
|
||||
if (firstOption) {
|
||||
firstOption.selected = true;
|
||||
}
|
||||
}
|
||||
const selectedOption = unitSelect.options[unitSelect.selectedIndex];
|
||||
unitSymbolInput.value = selectedOption
|
||||
? selectedOption.getAttribute("data-symbol") || ""
|
||||
: "";
|
||||
};
|
||||
|
||||
if (unitSelect) {
|
||||
unitSelect.addEventListener("change", syncUnitSelection);
|
||||
syncUnitSelection();
|
||||
}
|
||||
|
||||
if (filterSelect && filterSelect.value) {
|
||||
renderProductionRows(filterSelect.value);
|
||||
}
|
||||
});
|
||||
@@ -1,149 +0,0 @@
|
||||
document.addEventListener("DOMContentLoaded", () => {
|
||||
const dataElement = document.getElementById("reporting-data");
|
||||
let reportingSummaries = [];
|
||||
|
||||
if (dataElement) {
|
||||
try {
|
||||
const parsed = JSON.parse(dataElement.textContent || "[]");
|
||||
if (Array.isArray(parsed)) {
|
||||
reportingSummaries = parsed;
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Unable to parse reporting data", error);
|
||||
}
|
||||
}
|
||||
|
||||
const REPORT_FIELDS = [
|
||||
{ key: "iterations", label: "Iterations", decimals: 0 },
|
||||
{ key: "mean", label: "Mean Result", decimals: 2 },
|
||||
{ key: "variance", label: "Variance", decimals: 2 },
|
||||
{ key: "std_dev", label: "Std. Dev", decimals: 2 },
|
||||
{ key: "percentile_5", label: "Percentile 5", decimals: 2 },
|
||||
{ key: "percentile_95", label: "Percentile 95", decimals: 2 },
|
||||
{ key: "value_at_risk_95", label: "Value at Risk (95%)", decimals: 2 },
|
||||
{
|
||||
key: "expected_shortfall_95",
|
||||
label: "Expected Shortfall (95%)",
|
||||
decimals: 2,
|
||||
},
|
||||
];
|
||||
|
||||
const tableWrapper = document.getElementById("reporting-table-wrapper");
|
||||
const tableBody = document.getElementById("reporting-table-body");
|
||||
const emptyState = document.getElementById("reporting-empty");
|
||||
const refreshButton = document.getElementById("report-refresh");
|
||||
const feedbackEl = document.getElementById("report-feedback");
|
||||
|
||||
const formatNumber = (value, decimals = 2) => {
|
||||
if (value === null || value === undefined || Number.isNaN(Number(value))) {
|
||||
return "—";
|
||||
}
|
||||
return Number(value).toLocaleString(undefined, {
|
||||
minimumFractionDigits: decimals,
|
||||
maximumFractionDigits: decimals,
|
||||
});
|
||||
};
|
||||
|
||||
const showFeedback = (message, type = "success") => {
|
||||
if (!feedbackEl) {
|
||||
return;
|
||||
}
|
||||
feedbackEl.textContent = message;
|
||||
feedbackEl.classList.remove("hidden", "success", "error");
|
||||
feedbackEl.classList.add(type);
|
||||
};
|
||||
|
||||
const hideFeedback = () => {
|
||||
if (!feedbackEl) {
|
||||
return;
|
||||
}
|
||||
feedbackEl.classList.add("hidden");
|
||||
feedbackEl.textContent = "";
|
||||
};
|
||||
|
||||
const renderReportingTable = (summaryData) => {
|
||||
if (!tableBody || !tableWrapper || !emptyState) {
|
||||
return;
|
||||
}
|
||||
|
||||
tableBody.innerHTML = "";
|
||||
|
||||
if (!summaryData.length) {
|
||||
emptyState.classList.remove("hidden");
|
||||
tableWrapper.classList.add("hidden");
|
||||
return;
|
||||
}
|
||||
|
||||
emptyState.classList.add("hidden");
|
||||
tableWrapper.classList.remove("hidden");
|
||||
|
||||
summaryData.forEach((entry) => {
|
||||
const row = document.createElement("tr");
|
||||
const scenarioCell = document.createElement("td");
|
||||
scenarioCell.textContent = entry.scenario_name;
|
||||
row.appendChild(scenarioCell);
|
||||
|
||||
REPORT_FIELDS.forEach((field) => {
|
||||
const cell = document.createElement("td");
|
||||
const source = field.key === "iterations" ? entry : entry.summary || {};
|
||||
cell.textContent = formatNumber(source[field.key], field.decimals);
|
||||
row.appendChild(cell);
|
||||
});
|
||||
|
||||
tableBody.appendChild(row);
|
||||
});
|
||||
};
|
||||
|
||||
const refreshMetrics = async () => {
|
||||
hideFeedback();
|
||||
showFeedback("Refreshing metrics…", "success");
|
||||
|
||||
try {
|
||||
const response = await fetch("/ui/reporting", {
|
||||
method: "GET",
|
||||
headers: { "X-Requested-With": "XMLHttpRequest" },
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error("Unable to refresh reporting data.");
|
||||
}
|
||||
|
||||
const text = await response.text();
|
||||
const parser = new DOMParser();
|
||||
const doc = parser.parseFromString(text, "text/html");
|
||||
const newTable = doc.querySelector("#reporting-table-wrapper");
|
||||
const newFeedback = doc.querySelector("#report-feedback");
|
||||
|
||||
if (!newTable) {
|
||||
throw new Error("Unexpected response while refreshing.");
|
||||
}
|
||||
|
||||
const newEmptyState = doc.querySelector("#reporting-empty");
|
||||
|
||||
if (emptyState && newEmptyState) {
|
||||
emptyState.className = newEmptyState.className;
|
||||
emptyState.textContent = newEmptyState.textContent;
|
||||
}
|
||||
|
||||
if (tableWrapper) {
|
||||
tableWrapper.className = newTable.className;
|
||||
tableWrapper.innerHTML = newTable.innerHTML;
|
||||
}
|
||||
|
||||
if (newFeedback && feedbackEl) {
|
||||
feedbackEl.className = newFeedback.className;
|
||||
feedbackEl.textContent = newFeedback.textContent;
|
||||
}
|
||||
|
||||
showFeedback("Metrics refreshed.", "success");
|
||||
} catch (error) {
|
||||
showFeedback(error.message || "An unexpected error occurred.", "error");
|
||||
}
|
||||
};
|
||||
|
||||
renderReportingTable(reportingSummaries);
|
||||
|
||||
if (refreshButton) {
|
||||
refreshButton.addEventListener("click", refreshMetrics);
|
||||
}
|
||||
});
|
||||
@@ -1,78 +0,0 @@
|
||||
document.addEventListener("DOMContentLoaded", () => {
|
||||
const form = document.getElementById("scenario-form");
|
||||
if (!form) {
|
||||
return;
|
||||
}
|
||||
|
||||
const nameInput = /** @type {HTMLInputElement | null} */ (
|
||||
document.getElementById("name")
|
||||
);
|
||||
const descriptionInput = /** @type {HTMLInputElement | null} */ (
|
||||
document.getElementById("description")
|
||||
);
|
||||
const table = document.getElementById("scenario-table");
|
||||
const tableBody = document.getElementById("scenario-table-body");
|
||||
const emptyState = document.getElementById("empty-state");
|
||||
|
||||
form.addEventListener("submit", async (event) => {
|
||||
event.preventDefault();
|
||||
|
||||
if (!nameInput || !descriptionInput) {
|
||||
return;
|
||||
}
|
||||
|
||||
const payload = {
|
||||
name: nameInput.value.trim(),
|
||||
description: descriptionInput.value.trim() || null,
|
||||
};
|
||||
|
||||
if (!payload.name) {
|
||||
return;
|
||||
}
|
||||
|
||||
const response = await fetch("/api/scenarios/", {
|
||||
method: "POST",
|
||||
headers: { "Content-Type": "application/json" },
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text();
|
||||
console.error("Scenario creation failed", errorText);
|
||||
return;
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
const row = document.createElement("tr");
|
||||
row.dataset.scenarioId = String(data.id);
|
||||
row.innerHTML = `
|
||||
<td>${data.name}</td>
|
||||
<td>${data.description ?? "—"}</td>
|
||||
`;
|
||||
|
||||
if (emptyState) {
|
||||
emptyState.remove();
|
||||
}
|
||||
|
||||
if (table) {
|
||||
table.classList.remove("hidden");
|
||||
table.removeAttribute("aria-hidden");
|
||||
}
|
||||
|
||||
if (tableBody) {
|
||||
tableBody.appendChild(row);
|
||||
}
|
||||
|
||||
form.reset();
|
||||
nameInput.focus();
|
||||
|
||||
const feedback = document.getElementById("feedback");
|
||||
if (feedback) {
|
||||
feedback.textContent = `Scenario "${data.name}" created successfully.`;
|
||||
feedback.classList.remove("hidden");
|
||||
setTimeout(() => {
|
||||
feedback.classList.add("hidden");
|
||||
}, 3000);
|
||||
}
|
||||
});
|
||||
});
|
||||
@@ -1,200 +0,0 @@
|
||||
(function () {
|
||||
const dataScript = document.getElementById("theme-settings-data");
|
||||
const form = document.getElementById("theme-settings-form");
|
||||
const feedbackEl = document.getElementById("theme-settings-feedback");
|
||||
const resetBtn = document.getElementById("theme-settings-reset");
|
||||
const panel = document.getElementById("theme-settings");
|
||||
|
||||
if (!dataScript || !form || !feedbackEl || !panel) {
|
||||
return;
|
||||
}
|
||||
|
||||
const apiUrl = panel.getAttribute("data-api");
|
||||
if (!apiUrl) {
|
||||
return;
|
||||
}
|
||||
|
||||
const parsed = JSON.parse(dataScript.textContent || "{}");
|
||||
const currentValues = { ...(parsed.variables || {}) };
|
||||
const defaultValues = parsed.defaults || {};
|
||||
let envOverrides = { ...(parsed.envOverrides || {}) };
|
||||
|
||||
const previewElements = new Map();
|
||||
const inputs = Array.from(form.querySelectorAll(".color-value-input"));
|
||||
|
||||
inputs.forEach((input) => {
|
||||
const key = input.name;
|
||||
const field = input.closest(".color-form-field");
|
||||
const preview = field ? field.querySelector(".color-preview") : null;
|
||||
if (preview) {
|
||||
previewElements.set(input, preview);
|
||||
}
|
||||
|
||||
if (Object.prototype.hasOwnProperty.call(envOverrides, key)) {
|
||||
const overrideValue = envOverrides[key];
|
||||
input.value = overrideValue;
|
||||
input.disabled = true;
|
||||
input.setAttribute("aria-disabled", "true");
|
||||
input.dataset.envOverride = "true";
|
||||
if (field) {
|
||||
field.classList.add("is-env-override");
|
||||
}
|
||||
if (preview) {
|
||||
preview.style.background = overrideValue;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
input.addEventListener("input", () => {
|
||||
const previewEl = previewElements.get(input);
|
||||
if (previewEl) {
|
||||
previewEl.style.background = input.value || defaultValues[key] || "";
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
function setFeedback(message, type) {
|
||||
feedbackEl.textContent = message;
|
||||
feedbackEl.classList.remove("hidden", "success", "error");
|
||||
if (type) {
|
||||
feedbackEl.classList.add(type);
|
||||
}
|
||||
}
|
||||
|
||||
function clearFeedback() {
|
||||
feedbackEl.textContent = "";
|
||||
feedbackEl.classList.add("hidden");
|
||||
feedbackEl.classList.remove("success", "error");
|
||||
}
|
||||
|
||||
function updateRootVariables(values) {
|
||||
if (!values) {
|
||||
return;
|
||||
}
|
||||
const root = document.documentElement;
|
||||
Object.entries(values).forEach(([key, value]) => {
|
||||
if (typeof key === "string" && typeof value === "string") {
|
||||
root.style.setProperty(key, value);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function resetTo(source) {
|
||||
inputs.forEach((input) => {
|
||||
const key = input.name;
|
||||
if (input.disabled) {
|
||||
const previewEl = previewElements.get(input);
|
||||
const fallback = envOverrides[key] || currentValues[key];
|
||||
if (previewEl && fallback) {
|
||||
previewEl.style.background = fallback;
|
||||
}
|
||||
return;
|
||||
}
|
||||
if (Object.prototype.hasOwnProperty.call(source, key)) {
|
||||
input.value = source[key];
|
||||
const previewEl = previewElements.get(input);
|
||||
if (previewEl) {
|
||||
previewEl.style.background = source[key];
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Initialize previews to current values after page load.
|
||||
resetTo(currentValues);
|
||||
|
||||
resetBtn?.addEventListener("click", () => {
|
||||
resetTo(defaultValues);
|
||||
clearFeedback();
|
||||
setFeedback("Reverted to default values. Submit to save.", "success");
|
||||
});
|
||||
|
||||
form.addEventListener("submit", async (event) => {
|
||||
event.preventDefault();
|
||||
clearFeedback();
|
||||
|
||||
const payload = {};
|
||||
inputs.forEach((input) => {
|
||||
if (input.disabled) {
|
||||
return;
|
||||
}
|
||||
payload[input.name] = input.value.trim();
|
||||
});
|
||||
|
||||
try {
|
||||
const response = await fetch(apiUrl, {
|
||||
method: "PUT",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
body: JSON.stringify({ variables: payload }),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
let detail = "Unable to save theme settings.";
|
||||
try {
|
||||
const errorData = await response.json();
|
||||
if (errorData?.detail) {
|
||||
detail = Array.isArray(errorData.detail)
|
||||
? errorData.detail.map((item) => item.msg || item).join("; ")
|
||||
: errorData.detail;
|
||||
}
|
||||
} catch (parseError) {
|
||||
// Ignore JSON parse errors and use default detail message.
|
||||
}
|
||||
setFeedback(detail, "error");
|
||||
return;
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
const variables = data?.variables || {};
|
||||
const responseOverrides = data?.env_overrides || {};
|
||||
|
||||
Object.assign(currentValues, variables);
|
||||
envOverrides = { ...responseOverrides };
|
||||
|
||||
inputs.forEach((input) => {
|
||||
const key = input.name;
|
||||
const field = input.closest(".color-form-field");
|
||||
const previewEl = previewElements.get(input);
|
||||
const isOverride = Object.prototype.hasOwnProperty.call(
|
||||
envOverrides,
|
||||
key,
|
||||
);
|
||||
|
||||
if (isOverride) {
|
||||
const overrideValue = envOverrides[key];
|
||||
input.value = overrideValue;
|
||||
if (!input.disabled) {
|
||||
input.disabled = true;
|
||||
input.setAttribute("aria-disabled", "true");
|
||||
}
|
||||
if (field) {
|
||||
field.classList.add("is-env-override");
|
||||
}
|
||||
if (previewEl) {
|
||||
previewEl.style.background = overrideValue;
|
||||
}
|
||||
} else if (input.disabled) {
|
||||
input.disabled = false;
|
||||
input.removeAttribute("aria-disabled");
|
||||
if (field) {
|
||||
field.classList.remove("is-env-override");
|
||||
}
|
||||
if (
|
||||
previewEl &&
|
||||
Object.prototype.hasOwnProperty.call(variables, key)
|
||||
) {
|
||||
previewEl.style.background = variables[key];
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
updateRootVariables(variables);
|
||||
resetTo(variables);
|
||||
setFeedback("Theme colors updated successfully.", "success");
|
||||
} catch (error) {
|
||||
setFeedback("Network error: unable to save settings.", "error");
|
||||
}
|
||||
});
|
||||
})();
|
||||
@@ -1,354 +0,0 @@
|
||||
document.addEventListener("DOMContentLoaded", () => {
|
||||
const dataElement = document.getElementById("simulations-data");
|
||||
let simulationScenarios = [];
|
||||
let initialRuns = [];
|
||||
|
||||
if (dataElement) {
|
||||
try {
|
||||
const parsed = JSON.parse(dataElement.textContent || "{}");
|
||||
if (parsed && typeof parsed === "object") {
|
||||
if (Array.isArray(parsed.scenarios)) {
|
||||
simulationScenarios = parsed.scenarios;
|
||||
}
|
||||
if (Array.isArray(parsed.runs)) {
|
||||
initialRuns = parsed.runs;
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Unable to parse simulations data", error);
|
||||
}
|
||||
}
|
||||
|
||||
const SUMMARY_FIELDS = [
|
||||
{ key: "count", label: "Iterations", decimals: 0 },
|
||||
{ key: "mean", label: "Mean Result", decimals: 2 },
|
||||
{ key: "median", label: "Median Result", decimals: 2 },
|
||||
{ key: "min", label: "Minimum", decimals: 2 },
|
||||
{ key: "max", label: "Maximum", decimals: 2 },
|
||||
{ key: "variance", label: "Variance", decimals: 2 },
|
||||
{ key: "std_dev", label: "Standard Deviation", decimals: 2 },
|
||||
{ key: "percentile_5", label: "Percentile 5", decimals: 2 },
|
||||
{ key: "percentile_95", label: "Percentile 95", decimals: 2 },
|
||||
{ key: "value_at_risk_95", label: "Value at Risk (95%)", decimals: 2 },
|
||||
{
|
||||
key: "expected_shortfall_95",
|
||||
label: "Expected Shortfall (95%)",
|
||||
decimals: 2,
|
||||
},
|
||||
];
|
||||
const SAMPLE_RESULT_LIMIT = 20;
|
||||
|
||||
const filterSelect = document.getElementById("simulations-scenario-filter");
|
||||
const overviewWrapper = document.getElementById(
|
||||
"simulations-overview-wrapper"
|
||||
);
|
||||
const overviewBody = document.getElementById("simulations-overview-body");
|
||||
const overviewEmpty = document.getElementById("simulations-overview-empty");
|
||||
const emptyState = document.getElementById("simulations-empty");
|
||||
const summaryWrapper = document.getElementById("simulations-summary-wrapper");
|
||||
const summaryBody = document.getElementById("simulations-summary-body");
|
||||
const summaryEmpty = document.getElementById("simulations-summary-empty");
|
||||
const resultsWrapper = document.getElementById("simulations-results-wrapper");
|
||||
const resultsBody = document.getElementById("simulations-results-body");
|
||||
const resultsEmpty = document.getElementById("simulations-results-empty");
|
||||
const simulationForm = document.getElementById("simulation-run-form");
|
||||
const simulationFeedback = document.getElementById("simulation-feedback");
|
||||
const formScenarioSelect = document.getElementById(
|
||||
"simulation-form-scenario"
|
||||
);
|
||||
|
||||
const simulationRunsMap = Object.create(null);
|
||||
|
||||
const getScenarioName = (id) => {
|
||||
const match = simulationScenarios.find(
|
||||
(scenario) => String(scenario.id) === String(id)
|
||||
);
|
||||
return match ? match.name : `Scenario ${id}`;
|
||||
};
|
||||
|
||||
const formatNumber = (value, decimals = 2) => {
|
||||
if (value === null || value === undefined || Number.isNaN(Number(value))) {
|
||||
return "—";
|
||||
}
|
||||
return Number(value).toLocaleString(undefined, {
|
||||
minimumFractionDigits: decimals,
|
||||
maximumFractionDigits: decimals,
|
||||
});
|
||||
};
|
||||
|
||||
const showFeedback = (element, message, type = "success") => {
|
||||
if (!element) {
|
||||
return;
|
||||
}
|
||||
element.textContent = message;
|
||||
element.classList.remove("hidden", "success", "error");
|
||||
element.classList.add(type);
|
||||
};
|
||||
|
||||
const hideFeedback = (element) => {
|
||||
if (!element) {
|
||||
return;
|
||||
}
|
||||
element.classList.add("hidden");
|
||||
element.textContent = "";
|
||||
};
|
||||
|
||||
const initializeRunsMap = () => {
|
||||
simulationScenarios.forEach((scenario) => {
|
||||
const key = String(scenario.id);
|
||||
simulationRunsMap[key] = {
|
||||
scenario_id: scenario.id,
|
||||
scenario_name: scenario.name,
|
||||
iterations: 0,
|
||||
summary: null,
|
||||
sample_results: [],
|
||||
};
|
||||
});
|
||||
|
||||
initialRuns.forEach((run) => {
|
||||
const key = String(run.scenario_id);
|
||||
simulationRunsMap[key] = {
|
||||
scenario_id: run.scenario_id,
|
||||
scenario_name: run.scenario_name || getScenarioName(key),
|
||||
iterations: run.iterations || 0,
|
||||
summary: run.summary || null,
|
||||
sample_results: Array.isArray(run.sample_results)
|
||||
? run.sample_results
|
||||
: [],
|
||||
};
|
||||
});
|
||||
};
|
||||
|
||||
const renderOverviewTable = () => {
|
||||
if (!overviewBody) {
|
||||
return;
|
||||
}
|
||||
|
||||
overviewBody.innerHTML = "";
|
||||
|
||||
if (!simulationScenarios.length) {
|
||||
if (overviewWrapper) {
|
||||
overviewWrapper.classList.add("hidden");
|
||||
}
|
||||
if (overviewEmpty) {
|
||||
overviewEmpty.classList.remove("hidden");
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (overviewWrapper) {
|
||||
overviewWrapper.classList.remove("hidden");
|
||||
}
|
||||
if (overviewEmpty) {
|
||||
overviewEmpty.classList.add("hidden");
|
||||
}
|
||||
|
||||
simulationScenarios.forEach((scenario) => {
|
||||
const key = String(scenario.id);
|
||||
const run = simulationRunsMap[key];
|
||||
const iterations = run && run.iterations ? run.iterations : 0;
|
||||
const meanValue =
|
||||
iterations && run && run.summary ? run.summary.mean : null;
|
||||
|
||||
const row = document.createElement("tr");
|
||||
row.innerHTML = `
|
||||
<td>${scenario.name}</td>
|
||||
<td>${iterations || 0}</td>
|
||||
<td>${iterations ? formatNumber(meanValue) : "—"}</td>
|
||||
`;
|
||||
overviewBody.appendChild(row);
|
||||
});
|
||||
};
|
||||
|
||||
const renderScenarioDetails = (scenarioId) => {
|
||||
if (!scenarioId) {
|
||||
if (emptyState) {
|
||||
emptyState.classList.remove("hidden");
|
||||
}
|
||||
if (summaryWrapper) {
|
||||
summaryWrapper.classList.add("hidden");
|
||||
}
|
||||
if (summaryEmpty) {
|
||||
summaryEmpty.classList.add("hidden");
|
||||
}
|
||||
if (resultsWrapper) {
|
||||
resultsWrapper.classList.add("hidden");
|
||||
}
|
||||
if (resultsEmpty) {
|
||||
resultsEmpty.classList.add("hidden");
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (emptyState) {
|
||||
emptyState.classList.add("hidden");
|
||||
}
|
||||
|
||||
const run = simulationRunsMap[String(scenarioId)];
|
||||
const summary = run ? run.summary : null;
|
||||
const samples = run ? run.sample_results || [] : [];
|
||||
|
||||
if (!summary) {
|
||||
if (summaryWrapper) {
|
||||
summaryWrapper.classList.add("hidden");
|
||||
}
|
||||
if (summaryEmpty) {
|
||||
summaryEmpty.classList.remove("hidden");
|
||||
}
|
||||
} else {
|
||||
if (summaryWrapper) {
|
||||
summaryWrapper.classList.remove("hidden");
|
||||
}
|
||||
if (summaryEmpty) {
|
||||
summaryEmpty.classList.add("hidden");
|
||||
}
|
||||
|
||||
if (summaryBody) {
|
||||
summaryBody.innerHTML = "";
|
||||
SUMMARY_FIELDS.forEach((field) => {
|
||||
const row = document.createElement("tr");
|
||||
row.innerHTML = `
|
||||
<td>${field.label}</td>
|
||||
<td>${formatNumber(summary[field.key], field.decimals)}</td>
|
||||
`;
|
||||
summaryBody.appendChild(row);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (!samples.length) {
|
||||
if (resultsWrapper) {
|
||||
resultsWrapper.classList.add("hidden");
|
||||
}
|
||||
if (resultsEmpty) {
|
||||
resultsEmpty.classList.remove("hidden");
|
||||
}
|
||||
} else {
|
||||
if (resultsWrapper) {
|
||||
resultsWrapper.classList.remove("hidden");
|
||||
}
|
||||
if (resultsEmpty) {
|
||||
resultsEmpty.classList.add("hidden");
|
||||
}
|
||||
|
||||
if (resultsBody) {
|
||||
resultsBody.innerHTML = "";
|
||||
samples.slice(0, SAMPLE_RESULT_LIMIT).forEach((item, index) => {
|
||||
const row = document.createElement("tr");
|
||||
row.innerHTML = `
|
||||
<td>${index + 1}</td>
|
||||
<td>${formatNumber(item)}</td>
|
||||
`;
|
||||
resultsBody.appendChild(row);
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const runSimulation = async (event) => {
|
||||
event.preventDefault();
|
||||
hideFeedback(simulationFeedback);
|
||||
|
||||
if (!simulationForm) {
|
||||
return;
|
||||
}
|
||||
|
||||
const formData = new FormData(simulationForm);
|
||||
const scenarioId = formData.get("scenario_id");
|
||||
const payload = {
|
||||
scenario_id: scenarioId ? Number(scenarioId) : null,
|
||||
iterations: Number(formData.get("iterations")),
|
||||
seed: formData.get("seed") ? Number(formData.get("seed")) : null,
|
||||
};
|
||||
|
||||
if (!payload.scenario_id) {
|
||||
showFeedback(
|
||||
simulationFeedback,
|
||||
"Select a scenario before running a simulation.",
|
||||
"error"
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await fetch("/api/simulations/", {
|
||||
method: "POST",
|
||||
headers: { "Content-Type": "application/json" },
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const errorDetail = await response.json().catch(() => ({}));
|
||||
throw new Error(errorDetail.detail || "Unable to run simulation.");
|
||||
}
|
||||
|
||||
const result = await response.json();
|
||||
const mapKey = String(result.scenario_id);
|
||||
const summary =
|
||||
result.summary && typeof result.summary === "object"
|
||||
? result.summary
|
||||
: null;
|
||||
const iterations =
|
||||
summary && typeof summary.count === "number"
|
||||
? summary.count
|
||||
: payload.iterations || 0;
|
||||
|
||||
simulationRunsMap[mapKey] = {
|
||||
scenario_id: result.scenario_id,
|
||||
scenario_name: getScenarioName(mapKey),
|
||||
iterations,
|
||||
summary,
|
||||
sample_results: Array.isArray(result.sample_results)
|
||||
? result.sample_results
|
||||
: [],
|
||||
};
|
||||
|
||||
renderOverviewTable();
|
||||
renderScenarioDetails(mapKey);
|
||||
|
||||
if (filterSelect) {
|
||||
filterSelect.value = mapKey;
|
||||
}
|
||||
if (formScenarioSelect) {
|
||||
formScenarioSelect.value = mapKey;
|
||||
}
|
||||
|
||||
simulationForm.reset();
|
||||
showFeedback(simulationFeedback, "Simulation completed.", "success");
|
||||
} catch (error) {
|
||||
showFeedback(
|
||||
simulationFeedback,
|
||||
error.message || "An unexpected error occurred.",
|
||||
"error"
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
initializeRunsMap();
|
||||
renderOverviewTable();
|
||||
|
||||
if (filterSelect) {
|
||||
filterSelect.addEventListener("change", (event) => {
|
||||
const value = event.target.value;
|
||||
renderScenarioDetails(value);
|
||||
});
|
||||
}
|
||||
|
||||
if (formScenarioSelect) {
|
||||
formScenarioSelect.addEventListener("change", (event) => {
|
||||
const value = event.target.value;
|
||||
if (filterSelect) {
|
||||
filterSelect.value = value;
|
||||
}
|
||||
renderScenarioDetails(value);
|
||||
});
|
||||
}
|
||||
|
||||
if (simulationForm) {
|
||||
simulationForm.addEventListener("submit", runSimulation);
|
||||
}
|
||||
|
||||
if (filterSelect && filterSelect.value) {
|
||||
renderScenarioDetails(filterSelect.value);
|
||||
}
|
||||
});
|
||||
134
static/js/theme.js
Normal file
134
static/js/theme.js
Normal file
@@ -0,0 +1,134 @@
|
||||
// static/js/theme.js
|
||||
|
||||
document.addEventListener('DOMContentLoaded', () => {
|
||||
const themeSettingsForm = document.getElementById('theme-settings-form');
|
||||
const colorInputs = themeSettingsForm
|
||||
? themeSettingsForm.querySelectorAll('input[type="color"]')
|
||||
: [];
|
||||
|
||||
// Function to apply theme settings to CSS variables
|
||||
function applyTheme(theme) {
|
||||
const root = document.documentElement;
|
||||
if (theme.primary_color)
|
||||
root.style.setProperty('--color-primary', theme.primary_color);
|
||||
if (theme.secondary_color)
|
||||
root.style.setProperty('--color-secondary', theme.secondary_color);
|
||||
if (theme.accent_color)
|
||||
root.style.setProperty('--color-accent', theme.accent_color);
|
||||
if (theme.background_color)
|
||||
root.style.setProperty('--color-background', theme.background_color);
|
||||
if (theme.text_color)
|
||||
root.style.setProperty('--color-text-primary', theme.text_color);
|
||||
// Add other theme properties as needed
|
||||
}
|
||||
|
||||
// Save theme to local storage
|
||||
function saveTheme(theme) {
|
||||
localStorage.setItem('user-theme', JSON.stringify(theme));
|
||||
}
|
||||
|
||||
// Load theme from local storage
|
||||
function loadTheme() {
|
||||
const savedTheme = localStorage.getItem('user-theme');
|
||||
return savedTheme ? JSON.parse(savedTheme) : null;
|
||||
}
|
||||
|
||||
// Real-time preview for color inputs
|
||||
colorInputs.forEach((input) => {
|
||||
input.addEventListener('input', (event) => {
|
||||
const cssVar = `--color-${event.target.id.replace('-', '_')}`;
|
||||
document.documentElement.style.setProperty(cssVar, event.target.value);
|
||||
});
|
||||
});
|
||||
|
||||
const THEME_API_URL = '/api/settings/theme';
|
||||
|
||||
const normalizeTheme = (theme) => {
|
||||
if (!theme || typeof theme !== 'object') {
|
||||
return {};
|
||||
}
|
||||
const {
|
||||
theme_name,
|
||||
primary_color,
|
||||
secondary_color,
|
||||
accent_color,
|
||||
background_color,
|
||||
text_color,
|
||||
} = theme;
|
||||
return {
|
||||
theme_name,
|
||||
primary_color,
|
||||
secondary_color,
|
||||
accent_color,
|
||||
background_color,
|
||||
text_color,
|
||||
};
|
||||
};
|
||||
|
||||
if (themeSettingsForm) {
|
||||
themeSettingsForm.addEventListener('submit', async (event) => {
|
||||
event.preventDefault();
|
||||
|
||||
const formData = new FormData(themeSettingsForm);
|
||||
const themeData = Object.fromEntries(formData.entries());
|
||||
|
||||
try {
|
||||
const response = await fetch(THEME_API_URL, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify(themeData),
|
||||
});
|
||||
|
||||
if (response.ok) {
|
||||
const payload = await response.json();
|
||||
const savedTheme = normalizeTheme(payload?.theme ?? themeData);
|
||||
alert('Theme settings saved successfully!');
|
||||
applyTheme(savedTheme);
|
||||
saveTheme(savedTheme);
|
||||
} else {
|
||||
const errorData = await response.json();
|
||||
alert(`Error saving theme settings: ${errorData.detail}`);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error:', error);
|
||||
alert('An error occurred while saving theme settings.');
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Load and apply theme on page load
|
||||
const initialTheme = loadTheme();
|
||||
if (initialTheme) {
|
||||
applyTheme(initialTheme);
|
||||
// Populate form fields if on the theme settings page
|
||||
if (themeSettingsForm) {
|
||||
for (const key in initialTheme) {
|
||||
const input = themeSettingsForm.querySelector(
|
||||
`#${key.replace('_', '-')}`
|
||||
);
|
||||
if (input) {
|
||||
input.value = initialTheme[key];
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If no saved theme, load from backend (if available)
|
||||
async function loadAndApplyThemeFromServer() {
|
||||
try {
|
||||
const response = await fetch(THEME_API_URL);
|
||||
if (response.ok) {
|
||||
const theme = normalizeTheme(await response.json());
|
||||
applyTheme(theme);
|
||||
saveTheme(theme); // Save to local storage for future use
|
||||
} else {
|
||||
console.error('Failed to load theme settings from server');
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error loading theme settings from server:', error);
|
||||
}
|
||||
}
|
||||
loadAndApplyThemeFromServer();
|
||||
}
|
||||
});
|
||||
@@ -1,94 +0,0 @@
|
||||
{% extends "base.html" %} {% block title %}Dashboard · CalMiner{% endblock %} {%
|
||||
block content %}
|
||||
<div class="dashboard-header">
|
||||
<div>
|
||||
<h2>Operations Overview</h2>
|
||||
<p class="dashboard-subtitle">
|
||||
Unified insight across scenarios, costs, production, maintenance, and
|
||||
simulations.
|
||||
</p>
|
||||
</div>
|
||||
<div class="dashboard-actions">
|
||||
<button id="refresh-dashboard" type="button" class="btn primary">
|
||||
Refresh Dashboard
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<p id="dashboard-status" class="feedback" hidden></p>
|
||||
|
||||
<section>
|
||||
<div id="summary-metrics" class="dashboard-metrics-grid">
|
||||
{% for metric in summary_metrics %}
|
||||
<article class="metric-card">
|
||||
<span class="metric-label">{{ metric.label }}</span>
|
||||
<span class="metric-value">{{ metric.value }}</span>
|
||||
</article>
|
||||
{% endfor %}
|
||||
</div>
|
||||
<p id="summary-empty" class="empty-state" {% if summary_metrics|length>
|
||||
0 %} hidden{% endif %}> Add project inputs to populate summary metrics.
|
||||
</p>
|
||||
</section>
|
||||
|
||||
<section class="dashboard-charts">
|
||||
<article class="panel chart-card">
|
||||
<header class="panel-header">
|
||||
<div>
|
||||
<h3>Scenario Cost Mix</h3>
|
||||
<p class="chart-subtitle">CAPEX vs OPEX totals per scenario</p>
|
||||
</div>
|
||||
</header>
|
||||
<canvas
|
||||
id="cost-chart"
|
||||
height="220"
|
||||
{%
|
||||
if
|
||||
not
|
||||
cost_chart_has_data
|
||||
%}
|
||||
hidden{%
|
||||
endif
|
||||
%}
|
||||
></canvas>
|
||||
<p
|
||||
id="cost-chart-empty"
|
||||
class="empty-state"
|
||||
{%
|
||||
if
|
||||
cost_chart_has_data
|
||||
%}
|
||||
hidden{%
|
||||
endif
|
||||
%}
|
||||
>
|
||||
Add CAPEX or OPEX entries to display this chart.
|
||||
</p>
|
||||
</article>
|
||||
<article class="panel chart-card">
|
||||
<header class="panel-header">
|
||||
<div>
|
||||
<h3>Production vs Consumption</h3>
|
||||
<p class="chart-subtitle">Throughput comparison by scenario</p>
|
||||
</div>
|
||||
</header>
|
||||
<canvas
|
||||
id="activity-chart"
|
||||
height="220"
|
||||
{%
|
||||
if
|
||||
not
|
||||
activity_chart_has_data
|
||||
%}
|
||||
hidden{%
|
||||
endif
|
||||
%}
|
||||
></canvas>
|
||||
</article>
|
||||
</section>
|
||||
{% endblock %} {% block scripts %} {{ super() }}
|
||||
<script id="dashboard-data" type="application/json">
|
||||
{{ {"summary_metrics": summary_metrics, "scenario_rows": scenario_rows, "overall_report_metrics": overall_report_metrics, "recent_simulations": recent_simulations, "upcoming_maintenance": upcoming_maintenance} | tojson }}
|
||||
</script>
|
||||
<script src="/static/js/dashboard.js"></script>
|
||||
{% endblock %}
|
||||
@@ -1,51 +0,0 @@
|
||||
{% extends "base.html" %} {% block title %}Process Parameters · CalMiner{%
|
||||
endblock %} {% block content %}
|
||||
<section class="panel">
|
||||
<h2>Scenario Parameters</h2>
|
||||
{% if scenarios %}
|
||||
<form id="parameter-form" class="form-grid">
|
||||
<label>
|
||||
<span>Scenario</span>
|
||||
<select name="scenario_id" id="scenario_id">
|
||||
{% for scenario in scenarios %}
|
||||
<option value="{{ scenario.id }}">{{ scenario.name }}</option>
|
||||
{% endfor %}
|
||||
</select>
|
||||
</label>
|
||||
<label>
|
||||
<span>Name</span>
|
||||
<input type="text" name="name" id="name" required />
|
||||
</label>
|
||||
<label>
|
||||
<span>Value</span>
|
||||
<input type="number" name="value" id="value" step="any" required />
|
||||
</label>
|
||||
<button type="submit" class="btn primary">Add Parameter</button>
|
||||
</form>
|
||||
<p id="parameter-feedback" class="feedback" role="status"></p>
|
||||
<div class="table-container">
|
||||
<table id="parameter-table">
|
||||
<thead>
|
||||
<tr>
|
||||
<th scope="col">Parameter</th>
|
||||
<th scope="col">Value</th>
|
||||
<th scope="col">Distribution</th>
|
||||
<th scope="col">Details</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody id="parameter-table-body"></tbody>
|
||||
</table>
|
||||
</div>
|
||||
{% else %}
|
||||
<p class="empty-state">
|
||||
No scenarios available. Create a <a href="scenarios">scenario</a> before
|
||||
adding parameters.
|
||||
</p>
|
||||
{% endif %}
|
||||
</section>
|
||||
{% endblock %} {% block scripts %} {{ super() }}
|
||||
<script id="parameters-data" type="application/json">
|
||||
{{ parameters_by_scenario | tojson }}
|
||||
</script>
|
||||
<script src="/static/js/parameters.js"></script>
|
||||
{% endblock %}
|
||||
@@ -1,53 +0,0 @@
|
||||
{% extends "base.html" %} {% block title %}Scenario Management · CalMiner{%
|
||||
endblock %} {% block content %}
|
||||
<section class="panel">
|
||||
<h2>Create a New Scenario</h2>
|
||||
<form id="scenario-form" class="form-grid">
|
||||
<label>
|
||||
<span>Name</span>
|
||||
<input type="text" name="name" id="name" required />
|
||||
</label>
|
||||
<label>
|
||||
<span>Description</span>
|
||||
<input type="text" name="description" id="description" />
|
||||
</label>
|
||||
<button type="submit" class="btn primary">Create Scenario</button>
|
||||
</form>
|
||||
<div id="feedback" class="feedback hidden" aria-live="polite"></div>
|
||||
<div class="table-container">
|
||||
{% if scenarios %}
|
||||
<table id="scenario-table">
|
||||
<thead>
|
||||
<tr>
|
||||
<th scope="col">Name</th>
|
||||
<th scope="col">Description</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody id="scenario-table-body">
|
||||
{% for scenario in scenarios %}
|
||||
<tr data-scenario-id="{{ scenario.id }}">
|
||||
<td>{{ scenario.name }}</td>
|
||||
<td>{{ scenario.description or "—" }}</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
{% else %}
|
||||
<p id="empty-state" class="empty-state">
|
||||
No scenarios yet. Create one to get started.
|
||||
</p>
|
||||
<table id="scenario-table" class="hidden" aria-hidden="true">
|
||||
<thead>
|
||||
<tr>
|
||||
<th scope="col">Name</th>
|
||||
<th scope="col">Description</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody id="scenario-table-body"></tbody>
|
||||
</table>
|
||||
{% endif %}
|
||||
</div>
|
||||
</section>
|
||||
{% endblock %} {% block scripts %} {{ super() }}
|
||||
<script src="/static/js/scenario-form.js"></script>
|
||||
{% endblock %}
|
||||
@@ -20,5 +20,6 @@
|
||||
</div>
|
||||
</div>
|
||||
{% block scripts %}{% endblock %}
|
||||
<script src="/static/js/theme.js"></script>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -1,76 +0,0 @@
|
||||
{% extends "base.html" %} {% from "partials/components.html" import
|
||||
select_field, feedback, empty_state, table_container with context %} {% block
|
||||
title %}Consumption · CalMiner{% endblock %} {% block content %}
|
||||
<section class="panel">
|
||||
<h2>Consumption Tracking</h2>
|
||||
<div class="form-grid">
|
||||
{{ select_field( "Scenario filter", "consumption-scenario-filter",
|
||||
options=scenarios, placeholder="Select a scenario" ) }}
|
||||
</div>
|
||||
{{ empty_state( "consumption-empty", "Choose a scenario to review its
|
||||
consumption records." ) }} {% call table_container(
|
||||
"consumption-table-wrapper", hidden=True, aria_label="Scenario consumption
|
||||
records" ) %}
|
||||
<thead>
|
||||
<tr>
|
||||
<th scope="col">Amount</th>
|
||||
<th scope="col">Description</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody id="consumption-table-body"></tbody>
|
||||
{% endcall %}
|
||||
</section>
|
||||
|
||||
<section class="panel">
|
||||
<h2>Add Consumption Record</h2>
|
||||
{% if scenarios %}
|
||||
<form id="consumption-form" class="form-grid">
|
||||
{{ select_field( "Scenario", "consumption-form-scenario",
|
||||
name="scenario_id", options=scenarios, required=True, placeholder="Select a
|
||||
scenario", placeholder_disabled=True ) }}
|
||||
<label for="consumption-form-unit">
|
||||
Unit
|
||||
<select id="consumption-form-unit" name="unit_name" required>
|
||||
<option value="" disabled selected>Select unit</option>
|
||||
{% for unit in unit_options %}
|
||||
<option value="{{ unit.name }}" data-symbol="{{ unit.symbol }}">
|
||||
{{ unit.name }} ({{ unit.symbol }})
|
||||
</option>
|
||||
{% endfor %}
|
||||
</select>
|
||||
</label>
|
||||
<input id="consumption-form-unit-symbol" type="hidden" name="unit_symbol" />
|
||||
<label for="consumption-form-amount">
|
||||
Amount
|
||||
<input
|
||||
id="consumption-form-amount"
|
||||
type="number"
|
||||
name="amount"
|
||||
min="0"
|
||||
step="0.01"
|
||||
required
|
||||
/>
|
||||
</label>
|
||||
<label for="consumption-form-description">
|
||||
Description (optional)
|
||||
<textarea
|
||||
id="consumption-form-description"
|
||||
name="description"
|
||||
rows="3"
|
||||
></textarea>
|
||||
</label>
|
||||
<button type="submit" class="btn primary">Add Record</button>
|
||||
</form>
|
||||
{{ feedback("consumption-feedback") }} {% else %}
|
||||
<p class="empty-state">
|
||||
Create a <a href="scenarios">scenario</a> before adding consumption records.
|
||||
</p>
|
||||
{% endif %}
|
||||
</section>
|
||||
|
||||
{% endblock %} {% block scripts %} {{ super() }}
|
||||
<script id="consumption-data" type="application/json">
|
||||
{{ {"scenarios": scenarios, "consumption": consumption_by_scenario, "unit_options": unit_options} | tojson }}
|
||||
</script>
|
||||
<script src="/static/js/consumption.js"></script>
|
||||
{% endblock %}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user