feat: Enhance project and scenario creation with monitoring metrics
- Added monitoring metrics for project creation success and error handling in `ProjectRepository`. - Implemented similar monitoring for scenario creation in `ScenarioRepository`. - Refactored `run_monte_carlo` function in `simulation.py` to include timing and success/error metrics. - Introduced new CSS styles for headers, alerts, and navigation buttons in `main.css` and `projects.css`. - Created a new JavaScript file for navigation logic to handle chevron buttons. - Updated HTML templates to include new navigation buttons and improved styling for buttons. - Added tests for reporting service and routes to ensure proper functionality and access control. - Removed unused imports and optimized existing test files for better clarity and performance.
This commit is contained in:
25
.env.development
Normal file
25
.env.development
Normal file
@@ -0,0 +1,25 @@
|
||||
# Development Environment Configuration
|
||||
ENVIRONMENT=development
|
||||
DEBUG=true
|
||||
LOG_LEVEL=DEBUG
|
||||
|
||||
# Database Configuration
|
||||
DATABASE_HOST=postgres
|
||||
DATABASE_PORT=5432
|
||||
DATABASE_USER=calminer
|
||||
DATABASE_PASSWORD=calminer_password
|
||||
DATABASE_NAME=calminer_db
|
||||
DATABASE_DRIVER=postgresql
|
||||
|
||||
# Application Settings
|
||||
CALMINER_EXPORT_MAX_ROWS=1000
|
||||
CALMINER_IMPORT_MAX_ROWS=10000
|
||||
CALMINER_EXPORT_METADATA=true
|
||||
CALMINER_IMPORT_STAGING_TTL=300
|
||||
|
||||
# Admin Seeding (for development)
|
||||
CALMINER_SEED_ADMIN_EMAIL=admin@calminer.local
|
||||
CALMINER_SEED_ADMIN_USERNAME=admin
|
||||
CALMINER_SEED_ADMIN_PASSWORD=ChangeMe123!
|
||||
CALMINER_SEED_ADMIN_ROLES=admin
|
||||
CALMINER_SEED_FORCE=false
|
||||
25
.env.production
Normal file
25
.env.production
Normal file
@@ -0,0 +1,25 @@
|
||||
# Production Environment Configuration
|
||||
ENVIRONMENT=production
|
||||
DEBUG=false
|
||||
LOG_LEVEL=WARNING
|
||||
|
||||
# Database Configuration (MUST be set externally - no defaults)
|
||||
DATABASE_HOST=
|
||||
DATABASE_PORT=5432
|
||||
DATABASE_USER=
|
||||
DATABASE_PASSWORD=
|
||||
DATABASE_NAME=
|
||||
DATABASE_DRIVER=postgresql
|
||||
|
||||
# Application Settings
|
||||
CALMINER_EXPORT_MAX_ROWS=100000
|
||||
CALMINER_IMPORT_MAX_ROWS=100000
|
||||
CALMINER_EXPORT_METADATA=true
|
||||
CALMINER_IMPORT_STAGING_TTL=3600
|
||||
|
||||
# Admin Seeding (for production - set strong password)
|
||||
CALMINER_SEED_ADMIN_EMAIL=admin@calminer.com
|
||||
CALMINER_SEED_ADMIN_USERNAME=admin
|
||||
CALMINER_SEED_ADMIN_PASSWORD=CHANGE_THIS_VERY_STRONG_PASSWORD
|
||||
CALMINER_SEED_ADMIN_ROLES=admin
|
||||
CALMINER_SEED_FORCE=false
|
||||
25
.env.staging
Normal file
25
.env.staging
Normal file
@@ -0,0 +1,25 @@
|
||||
# Staging Environment Configuration
|
||||
ENVIRONMENT=staging
|
||||
DEBUG=false
|
||||
LOG_LEVEL=INFO
|
||||
|
||||
# Database Configuration (override with actual staging values)
|
||||
DATABASE_HOST=postgres
|
||||
DATABASE_PORT=5432
|
||||
DATABASE_USER=calminer_staging
|
||||
DATABASE_PASSWORD=CHANGE_THIS_STRONG_PASSWORD
|
||||
DATABASE_NAME=calminer_staging_db
|
||||
DATABASE_DRIVER=postgresql
|
||||
|
||||
# Application Settings
|
||||
CALMINER_EXPORT_MAX_ROWS=50000
|
||||
CALMINER_IMPORT_MAX_ROWS=50000
|
||||
CALMINER_EXPORT_METADATA=true
|
||||
CALMINER_IMPORT_STAGING_TTL=600
|
||||
|
||||
# Admin Seeding (for staging)
|
||||
CALMINER_SEED_ADMIN_EMAIL=admin@staging.calminer.com
|
||||
CALMINER_SEED_ADMIN_USERNAME=admin
|
||||
CALMINER_SEED_ADMIN_PASSWORD=CHANGE_THIS_STRONG_PASSWORD
|
||||
CALMINER_SEED_ADMIN_ROLES=admin
|
||||
CALMINER_SEED_FORCE=false
|
||||
@@ -7,30 +7,10 @@ on:
|
||||
branches: [main, develop]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
APT_CACHER_NG: http://192.168.88.14:3142
|
||||
DB_DRIVER: postgresql+psycopg2
|
||||
DB_HOST: 192.168.88.35
|
||||
DB_NAME: calminer_test
|
||||
DB_USER: calminer
|
||||
DB_PASSWORD: calminer_password
|
||||
REGISTRY_CONTAINER_NAME: calminer
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:17
|
||||
env:
|
||||
POSTGRES_USER: ${{ env.DB_USER }}
|
||||
POSTGRES_PASSWORD: ${{ env.DB_PASSWORD }}
|
||||
POSTGRES_DB: ${{ env.DB_NAME }}
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
@@ -49,17 +29,90 @@ jobs:
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ steps.pip-cache.outputs.path }}
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt', 'requirements-test.txt') }}
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt', 'requirements-test.txt', 'pyproject.toml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pip-
|
||||
|
||||
- name: Update apt-cacher-ng config
|
||||
run: |-
|
||||
echo 'Acquire::http::Proxy "{{ env.APT_CACHER_NG }}";' | tee /etc/apt/apt.conf.d/01apt-cacher-ng
|
||||
apt-get update
|
||||
- name: Configure apt proxy
|
||||
run: |
|
||||
if [ -n "${APT_CACHER_NG}" ]; then
|
||||
echo "Acquire::http::Proxy \"${APT_CACHER_NG}\";" | tee /etc/apt/apt.conf.d/01apt-cacher-ng
|
||||
fi
|
||||
|
||||
- name: Update system packages
|
||||
run: apt-get upgrade -y
|
||||
- name: Install system packages
|
||||
run: |
|
||||
apt-get update
|
||||
apt-get install -y build-essential libpq-dev
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
pip install -r requirements-test.txt
|
||||
|
||||
- name: Run Ruff
|
||||
run: ruff check .
|
||||
|
||||
- name: Run Black
|
||||
run: black --check .
|
||||
|
||||
- name: Run Bandit
|
||||
run: bandit -r . -c pyproject.toml
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
needs: lint
|
||||
env:
|
||||
APT_CACHER_NG: http://192.168.88.14:3142
|
||||
DB_DRIVER: postgresql+psycopg2
|
||||
DB_HOST: 192.168.88.35
|
||||
DB_NAME: calminer_test
|
||||
DB_USER: calminer
|
||||
DB_PASSWORD: calminer_password
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:17
|
||||
env:
|
||||
POSTGRES_USER: ${{ env.DB_USER }}
|
||||
POSTGRES_PASSWORD: ${{ env.DB_PASSWORD }}
|
||||
POSTGRES_DB: ${{ env.DB_NAME }}
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.12"
|
||||
|
||||
- name: Get pip cache dir
|
||||
id: pip-cache
|
||||
run: |
|
||||
echo "path=$(pip cache dir)" >> $GITEA_OUTPUT
|
||||
echo "Pip cache dir: $(pip cache dir)"
|
||||
|
||||
- name: Cache pip dependencies
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ steps.pip-cache.outputs.path }}
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt', 'requirements-test.txt', 'pyproject.toml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pip-
|
||||
|
||||
- name: Configure apt proxy
|
||||
run: |
|
||||
if [ -n "${APT_CACHER_NG}" ]; then
|
||||
echo "Acquire::http::Proxy \"${APT_CACHER_NG}\";" | tee /etc/apt/apt.conf.d/01apt-cacher-ng
|
||||
fi
|
||||
|
||||
- name: Install system packages
|
||||
run: |
|
||||
apt-get update
|
||||
apt-get install -y build-essential libpq-dev
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
@@ -82,15 +135,22 @@ jobs:
|
||||
DATABASE_PASSWORD: ${{ env.DB_PASSWORD }}
|
||||
DATABASE_NAME: ${{ env.DB_NAME }}
|
||||
run: |
|
||||
pytest tests/ --cov=.
|
||||
pytest --cov=. --cov-report=term-missing --cov-report=xml --cov-fail-under=80 --junitxml=pytest-report.xml
|
||||
|
||||
- name: Build Docker image
|
||||
run: |
|
||||
docker build -t ${{ env.REGISTRY_CONTAINER_NAME }} .
|
||||
- name: Upload test artifacts
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-artifacts
|
||||
path: |
|
||||
coverage.xml
|
||||
pytest-report.xml
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
needs: test
|
||||
needs:
|
||||
- lint
|
||||
- test
|
||||
env:
|
||||
DEFAULT_BRANCH: main
|
||||
REGISTRY_URL: ${{ secrets.REGISTRY_URL }}
|
||||
@@ -131,12 +191,84 @@ jobs:
|
||||
username: ${{ env.REGISTRY_USERNAME }}
|
||||
password: ${{ env.REGISTRY_PASSWORD }}
|
||||
|
||||
- name: Build and push image
|
||||
uses: docker/build-push-action@v5
|
||||
- name: Build image
|
||||
id: build-image
|
||||
env:
|
||||
REGISTRY_URL: ${{ env.REGISTRY_URL }}
|
||||
REGISTRY_CONTAINER_NAME: ${{ env.REGISTRY_CONTAINER_NAME }}
|
||||
SHA_TAG: ${{ steps.meta.outputs.sha }}
|
||||
PUSH_IMAGE: ${{ steps.meta.outputs.on_default == 'true' && steps.meta.outputs.event_name != 'pull_request' && env.REGISTRY_URL != '' && env.REGISTRY_USERNAME != '' && env.REGISTRY_PASSWORD != '' }}
|
||||
run: |
|
||||
set -eo pipefail
|
||||
LOG_FILE=build.log
|
||||
if [ "${PUSH_IMAGE}" = "true" ]; then
|
||||
docker buildx build \
|
||||
--push \
|
||||
--tag "${REGISTRY_URL}/allucanget/${REGISTRY_CONTAINER_NAME}:latest" \
|
||||
--tag "${REGISTRY_URL}/allucanget/${REGISTRY_CONTAINER_NAME}:${SHA_TAG}" \
|
||||
--file Dockerfile \
|
||||
. 2>&1 | tee "${LOG_FILE}"
|
||||
else
|
||||
docker buildx build \
|
||||
--load \
|
||||
--tag "${REGISTRY_CONTAINER_NAME}:ci" \
|
||||
--file Dockerfile \
|
||||
. 2>&1 | tee "${LOG_FILE}"
|
||||
fi
|
||||
|
||||
- name: Upload docker build logs
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile
|
||||
push: ${{ steps.meta.outputs.on_default == 'true' && steps.meta.outputs.event_name != 'pull_request' && (env.REGISTRY_URL != '' && env.REGISTRY_USERNAME != '' && env.REGISTRY_PASSWORD != '') }}
|
||||
tags: |
|
||||
${{ env.REGISTRY_URL }}/allucanget/${{ env.REGISTRY_CONTAINER_NAME }}:latest
|
||||
${{ env.REGISTRY_URL }}/allucanget/${{ env.REGISTRY_CONTAINER_NAME }}:${{ steps.meta.outputs.sha }}
|
||||
name: docker-build-logs
|
||||
path: build.log
|
||||
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
if: github.ref == 'refs/heads/main' && github.event_name != 'pull_request'
|
||||
env:
|
||||
REGISTRY_URL: ${{ secrets.REGISTRY_URL }}
|
||||
REGISTRY_CONTAINER_NAME: calminer
|
||||
KUBE_CONFIG: ${{ secrets.KUBE_CONFIG }}
|
||||
STAGING_KUBE_CONFIG: ${{ secrets.STAGING_KUBE_CONFIG }}
|
||||
PROD_KUBE_CONFIG: ${{ secrets.PROD_KUBE_CONFIG }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up kubectl for staging
|
||||
if: github.ref == 'refs/heads/main' && contains(github.event.head_commit.message, '[deploy staging]')
|
||||
uses: azure/k8s-set-context@v3
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ env.STAGING_KUBE_CONFIG }}
|
||||
|
||||
- name: Set up kubectl for production
|
||||
if: github.ref == 'refs/heads/main' && contains(github.event.head_commit.message, '[deploy production]')
|
||||
uses: azure/k8s-set-context@v3
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ env.PROD_KUBE_CONFIG }}
|
||||
|
||||
- name: Deploy to staging
|
||||
if: github.ref == 'refs/heads/main' && contains(github.event.head_commit.message, '[deploy staging]')
|
||||
run: |
|
||||
# Update image in deployment
|
||||
kubectl set image deployment/calminer-app calminer=${REGISTRY_URL}/allucanget/${REGISTRY_CONTAINER_NAME}:latest
|
||||
# Apply any config changes
|
||||
kubectl apply -f k8s/configmap.yaml
|
||||
kubectl apply -f k8s/secret.yaml
|
||||
# Wait for rollout
|
||||
kubectl rollout status deployment/calminer-app
|
||||
|
||||
- name: Deploy to production
|
||||
if: github.ref == 'refs/heads/main' && contains(github.event.head_commit.message, '[deploy production]')
|
||||
run: |
|
||||
# Update image in deployment
|
||||
kubectl set image deployment/calminer-app calminer=${REGISTRY_URL}/allucanget/${REGISTRY_CONTAINER_NAME}:latest
|
||||
# Apply any config changes
|
||||
kubectl apply -f k8s/configmap.yaml
|
||||
kubectl apply -f k8s/secret.yaml
|
||||
# Wait for rollout
|
||||
kubectl rollout status deployment/calminer-app
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from logging.config import fileConfig
|
||||
from typing import Iterable
|
||||
|
||||
from alembic import context
|
||||
from sqlalchemy import engine_from_config, pool
|
||||
|
||||
@@ -0,0 +1,38 @@
|
||||
"""Add performance_metrics table"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "20251111_01"
|
||||
down_revision = "20251111_00"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
op.create_table(
|
||||
"performance_metrics",
|
||||
sa.Column("id", sa.Integer(), nullable=False),
|
||||
sa.Column("timestamp", sa.DateTime(), nullable=True),
|
||||
sa.Column("metric_name", sa.String(), nullable=True),
|
||||
sa.Column("value", sa.Float(), nullable=True),
|
||||
sa.Column("labels", sa.String(), nullable=True),
|
||||
sa.Column("endpoint", sa.String(), nullable=True),
|
||||
sa.Column("method", sa.String(), nullable=True),
|
||||
sa.Column("status_code", sa.Integer(), nullable=True),
|
||||
sa.Column("duration_seconds", sa.Float(), nullable=True),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
)
|
||||
op.create_index(op.f("ix_performance_metrics_timestamp"), "performance_metrics", ["timestamp"], unique=False)
|
||||
op.create_index(op.f("ix_performance_metrics_metric_name"), "performance_metrics", ["metric_name"], unique=False)
|
||||
op.create_index(op.f("ix_performance_metrics_endpoint"), "performance_metrics", ["endpoint"], unique=False)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.drop_index(op.f("ix_performance_metrics_endpoint"), table_name="performance_metrics")
|
||||
op.drop_index(op.f("ix_performance_metrics_metric_name"), table_name="performance_metrics")
|
||||
op.drop_index(op.f("ix_performance_metrics_timestamp"), table_name="performance_metrics")
|
||||
op.drop_table("performance_metrics")
|
||||
134
alembic/versions/20251112_00_add_roles_metadata_columns.py
Normal file
134
alembic/versions/20251112_00_add_roles_metadata_columns.py
Normal file
@@ -0,0 +1,134 @@
|
||||
"""Add metadata columns to roles table"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "20251112_00_add_roles_metadata_columns"
|
||||
down_revision = "20251111_01"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
ROLE_BACKFILL = (
|
||||
("admin", "Administrator", "Full platform access with user management rights."),
|
||||
(
|
||||
"project_manager",
|
||||
"Project Manager",
|
||||
"Manage projects, scenarios, and associated data.",
|
||||
),
|
||||
("analyst", "Analyst", "Review dashboards and scenario outputs."),
|
||||
(
|
||||
"viewer",
|
||||
"Viewer",
|
||||
"Read-only access to assigned projects and reports.",
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
op.add_column(
|
||||
"roles",
|
||||
sa.Column("display_name", sa.String(length=128), nullable=True),
|
||||
)
|
||||
op.add_column(
|
||||
"roles",
|
||||
sa.Column("description", sa.Text(), nullable=True),
|
||||
)
|
||||
op.add_column(
|
||||
"roles",
|
||||
sa.Column(
|
||||
"created_at",
|
||||
sa.DateTime(timezone=True),
|
||||
nullable=True,
|
||||
server_default=sa.text("timezone('UTC', now())"),
|
||||
),
|
||||
)
|
||||
op.add_column(
|
||||
"roles",
|
||||
sa.Column(
|
||||
"updated_at",
|
||||
sa.DateTime(timezone=True),
|
||||
nullable=True,
|
||||
server_default=sa.text("timezone('UTC', now())"),
|
||||
),
|
||||
)
|
||||
|
||||
connection = op.get_bind()
|
||||
|
||||
for name, display_name, description in ROLE_BACKFILL:
|
||||
connection.execute(
|
||||
sa.text(
|
||||
"""
|
||||
UPDATE roles
|
||||
SET display_name = :display_name,
|
||||
description = COALESCE(description, :description)
|
||||
WHERE name = :name
|
||||
AND display_name IS NULL
|
||||
"""
|
||||
),
|
||||
{
|
||||
"name": name,
|
||||
"display_name": display_name,
|
||||
"description": description,
|
||||
},
|
||||
)
|
||||
|
||||
connection.execute(
|
||||
sa.text(
|
||||
"""
|
||||
UPDATE roles
|
||||
SET display_name = INITCAP(REPLACE(name, '_', ' '))
|
||||
WHERE display_name IS NULL
|
||||
"""
|
||||
)
|
||||
)
|
||||
|
||||
connection.execute(
|
||||
sa.text(
|
||||
"""
|
||||
UPDATE roles
|
||||
SET created_at = timezone('UTC', now())
|
||||
WHERE created_at IS NULL
|
||||
"""
|
||||
)
|
||||
)
|
||||
connection.execute(
|
||||
sa.text(
|
||||
"""
|
||||
UPDATE roles
|
||||
SET updated_at = timezone('UTC', now())
|
||||
WHERE updated_at IS NULL
|
||||
"""
|
||||
)
|
||||
)
|
||||
|
||||
op.alter_column(
|
||||
"roles",
|
||||
"display_name",
|
||||
existing_type=sa.String(length=128),
|
||||
nullable=False,
|
||||
)
|
||||
op.alter_column(
|
||||
"roles",
|
||||
"created_at",
|
||||
existing_type=sa.DateTime(timezone=True),
|
||||
nullable=False,
|
||||
server_default=sa.text("timezone('UTC', now())"),
|
||||
)
|
||||
op.alter_column(
|
||||
"roles",
|
||||
"updated_at",
|
||||
existing_type=sa.DateTime(timezone=True),
|
||||
nullable=False,
|
||||
server_default=sa.text("timezone('UTC', now())"),
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.drop_column("roles", "updated_at")
|
||||
op.drop_column("roles", "created_at")
|
||||
op.drop_column("roles", "description")
|
||||
op.drop_column("roles", "display_name")
|
||||
81
changelog.md
81
changelog.md
@@ -1,5 +1,58 @@
|
||||
# Changelog
|
||||
|
||||
## 2025-11-12
|
||||
|
||||
- Diagnosed admin bootstrap failure caused by legacy `roles` schema, added Alembic migration `20251112_00_add_roles_metadata_columns.py` to backfill `display_name`, `description`, `created_at`, and `updated_at`, and verified the migration via full pytest run in the activated `.venv`.
|
||||
|
||||
## 2025-11-11
|
||||
|
||||
- Implemented base URL routing to redirect unauthenticated users to login and authenticated users to dashboard.
|
||||
- Added comprehensive end-to-end tests for login flow, including redirects, session handling, and error messaging for invalid/inactive accounts.
|
||||
- Updated header and footer templates to consistently use `logo_big.png` image instead of text logo, with appropriate CSS styling for sizing.
|
||||
- Centralised ISO-4217 currency validation across scenarios, imports, and export filters (`models/scenario.py`, `routes/scenarios.py`, `schemas/scenario.py`, `schemas/imports.py`, `services/export_query.py`) so malformed codes are rejected consistently at every entry point.
|
||||
- Updated scenario services and UI flows to surface friendly validation errors and added regression coverage for imports, exports, API creation, and lifecycle flows ensuring currencies are normalised end-to-end.
|
||||
- Recorded the completed “Ensure currency is used consistently” work in `.github/instructions/DONE.md` and ran the full pytest suite (150 tests) to verify the refactor.
|
||||
- Linked projects to their pricing settings by updating SQLAlchemy models, repositories, seeding utilities, and migrations, and added regression tests to cover the new association and default backfill.
|
||||
- Bootstrapped database-stored pricing settings at application startup, aligned initial data seeding with the database-first metadata flow, and added tests covering pricing bootstrap creation, project assignment, and idempotency.
|
||||
- Extended pricing configuration support to prefer persisted metadata via `dependencies.get_pricing_metadata`, added retrieval tests for project/default fallbacks, and refreshed docs (`calminer-docs/specifications/price_calculation.md`, `pricing_settings_data_model.md`) to describe the database-backed workflow and bootstrap behaviour.
|
||||
- Added `services/financial.py` NPV, IRR, and payback helpers with robust cash-flow normalisation, convergence safeguards, and fractional period support, plus comprehensive pytest coverage exercising representative project scenarios and failure modes.
|
||||
- Authored `calminer-docs/specifications/financial_metrics.md` capturing DCF assumptions, solver behaviours, and worked examples, and cross-linked the architecture concepts to the new reference for consistent navigation.
|
||||
- Implemented `services/simulation.py` Monte Carlo engine with configurable distributions, summary aggregation, and reproducible RNG seeding, introduced regression tests in `tests/test_simulation.py`, and documented configuration/usage in `calminer-docs/specifications/monte_carlo_simulation.md` with architecture cross-links.
|
||||
- Polished reporting HTML contexts by cleaning stray fragments in `routes/reports.py`, adding download action metadata for project and scenario pages, and generating scenario comparison download URLs with correctly serialised repeated `scenario_ids` parameters.
|
||||
- Consolidated Alembic history into a single initial migration (`20251111_00_initial_schema.py`), removed superseded revision files, and ensured Alembic metadata still references the project metadata for clean bootstrap.
|
||||
- Added `scripts/run_migrations.py` and a Docker entrypoint wrapper to run Alembic migrations before `uvicorn` starts, removed the fallback `Base.metadata.create_all` call, and updated `calminer-docs/admin/installation.md` so developers know how to apply migrations locally or via Docker.
|
||||
- Configured pytest defaults to collect coverage (`--cov`) with an 80% fail-under gate, excluded entrypoint/reporting scaffolds from the calculation, updated contributor docs with the standard `pytest` command, and verified the suite now reports 83% coverage.
|
||||
- Standardized color scheme and typography by moving alert styles to `main.css`, adding typography rules with CSS variables, updating auth templates for consistent button classes, and ensuring all templates use centralized color and spacing variables.
|
||||
- Improved navigation flow by adding two big chevron buttons on top of the navigation sidebar to allow users to navigate to the previous and next page in the page navigation list, including JavaScript logic for determining current page and handling navigation.
|
||||
- Established pytest-based unit and integration test suites with coverage thresholds, achieving 83% coverage across 181 tests, with configuration in pyproject.toml and documentation in CONTRIBUTING.md.
|
||||
- Configured CI pipelines to run tests, linting, and security checks on each change, adding Bandit security scanning to the workflow and verifying execution on pushes and PRs to main/develop branches.
|
||||
- Added deployment automation with Docker Compose for local development and Kubernetes manifests for production, ensuring environment parity and documenting processes in calminer-docs/admin/installation.md.
|
||||
- Completed monitoring instrumentation by adding business metrics observation to project and scenario repository operations, and simulation performance tracking to Monte Carlo service with success/error status and duration metrics.
|
||||
- Updated TODO list to reflect completed monitoring implementation tasks and validated changes with passing simulation tests.
|
||||
- Implemented comprehensive performance monitoring for scalability (FR-006) with Prometheus metrics collection for HTTP requests, import/export operations, and general application metrics.
|
||||
- Added database model for persistent metric storage with aggregation endpoints for KPIs like request latency, error rates, and throughput.
|
||||
- Created FastAPI middleware for automatic request metric collection and background persistence to database.
|
||||
- Extended monitoring router with performance metrics API endpoints and detailed health checks.
|
||||
- Added Alembic migration for performance_metrics table and updated model imports.
|
||||
- Completed concurrent interaction testing implementation, validating database transaction isolation under threading and establishing async testing framework for future concurrency enhancements.
|
||||
- Implemented comprehensive deployment automation with Docker Compose configurations for development, staging, and production environments ensuring environment parity.
|
||||
- Set up Kubernetes manifests with resource limits, health checks, and secrets management for production deployment.
|
||||
- Configured CI/CD workflows for automated Docker image building, registry pushing, and Kubernetes deployment to staging/production environments.
|
||||
- Documented deployment processes, environment configurations, and CI/CD workflows in project documentation.
|
||||
- Validated deployment automation through Docker Compose configuration testing and CI/CD pipeline structure.
|
||||
|
||||
## 2025-11-10
|
||||
|
||||
- Added dedicated pytest coverage for guard dependencies, exercising success plus failure paths (missing session, inactive user, missing roles, project/scenario access errors) via `tests/test_dependencies_guards.py`.
|
||||
- Added integration tests in `tests/test_authorization_integration.py` verifying anonymous 401 responses, role-based 403s, and authorized project manager flows across API and UI endpoints.
|
||||
- Implemented environment-driven admin bootstrap settings, wired the `bootstrap_admin` helper into FastAPI startup, added pytest coverage for creation/idempotency/reset logic, and documented operational guidance in the RBAC plan and security concept.
|
||||
- Retired the legacy authentication RBAC implementation plan document after migrating its guidance into live documentation and synchronized the contributor instructions to reflect the removal.
|
||||
- Completed the Authentication & RBAC checklist by shipping the new models, migrations, repositories, guard dependencies, and integration tests.
|
||||
- Documented the project/scenario import/export field mapping and file format guidelines in `calminer-docs/requirements/FR-008.md`, and introduced `schemas/imports.py` with Pydantic models that normalise incoming CSV/Excel rows for projects and scenarios.
|
||||
- Added `services/importers.py` to load CSV/XLSX files into the new import schemas, pulled in `openpyxl` for Excel support, and covered the parsing behaviour with `tests/test_import_parsing.py`.
|
||||
- Expanded the import ingestion workflow with staging previews, transactional persistence commits, FastAPI preview/commit endpoints under `/imports`, and new API tests (`tests/test_import_ingestion.py`, `tests/test_import_api.py`) ensuring end-to-end coverage.
|
||||
- Added persistent audit logging via `ImportExportLog`, structured log emission, Prometheus metrics instrumentation, `/metrics` endpoint exposure, and updated operator/deployment documentation to guide monitoring setup.
|
||||
|
||||
## 2025-11-09
|
||||
|
||||
- Captured current implementation status, requirements coverage, missing features, and prioritized roadmap in `calminer-docs/implementation_status.md` to guide future development.
|
||||
@@ -21,31 +74,3 @@
|
||||
- Implemented cookie-based authentication session middleware with automatic access token refresh, logout handling, navigation adjustments, and documentation/test updates capturing the new behaviour.
|
||||
- Delivered idempotent seeding utilities with `scripts/initial_data.py`, entry-point runner `scripts/00_initial_data.py`, documentation updates, and pytest coverage to verify role/admin provisioning.
|
||||
- Secured project and scenario routers with RBAC guard dependencies, enforced repository access checks via helper utilities, and aligned template routes with FastAPI dependency injection patterns.
|
||||
|
||||
## 2025-11-10
|
||||
|
||||
- Added dedicated pytest coverage for guard dependencies, exercising success plus failure paths (missing session, inactive user, missing roles, project/scenario access errors) via `tests/test_dependencies_guards.py`.
|
||||
- Added integration tests in `tests/test_authorization_integration.py` verifying anonymous 401 responses, role-based 403s, and authorized project manager flows across API and UI endpoints.
|
||||
- Implemented environment-driven admin bootstrap settings, wired the `bootstrap_admin` helper into FastAPI startup, added pytest coverage for creation/idempotency/reset logic, and documented operational guidance in the RBAC plan and security concept.
|
||||
- Retired the legacy authentication RBAC implementation plan document after migrating its guidance into live documentation and synchronized the contributor instructions to reflect the removal.
|
||||
- Completed the Authentication & RBAC checklist by shipping the new models, migrations, repositories, guard dependencies, and integration tests.
|
||||
- Documented the project/scenario import/export field mapping and file format guidelines in `calminer-docs/requirements/FR-008.md`, and introduced `schemas/imports.py` with Pydantic models that normalise incoming CSV/Excel rows for projects and scenarios.
|
||||
- Added `services/importers.py` to load CSV/XLSX files into the new import schemas, pulled in `openpyxl` for Excel support, and covered the parsing behaviour with `tests/test_import_parsing.py`.
|
||||
- Expanded the import ingestion workflow with staging previews, transactional persistence commits, FastAPI preview/commit endpoints under `/imports`, and new API tests (`tests/test_import_ingestion.py`, `tests/test_import_api.py`) ensuring end-to-end coverage.
|
||||
- Added persistent audit logging via `ImportExportLog`, structured log emission, Prometheus metrics instrumentation, `/metrics` endpoint exposure, and updated operator/deployment documentation to guide monitoring setup.
|
||||
|
||||
## 2025-11-11
|
||||
|
||||
- Centralised ISO-4217 currency validation across scenarios, imports, and export filters (`models/scenario.py`, `routes/scenarios.py`, `schemas/scenario.py`, `schemas/imports.py`, `services/export_query.py`) so malformed codes are rejected consistently at every entry point.
|
||||
- Updated scenario services and UI flows to surface friendly validation errors and added regression coverage for imports, exports, API creation, and lifecycle flows ensuring currencies are normalised end-to-end.
|
||||
- Recorded the completed “Ensure currency is used consistently” work in `.github/instructions/DONE.md` and ran the full pytest suite (150 tests) to verify the refactor.
|
||||
- Linked projects to their pricing settings by updating SQLAlchemy models, repositories, seeding utilities, and migrations, and added regression tests to cover the new association and default backfill.
|
||||
- Bootstrapped database-stored pricing settings at application startup, aligned initial data seeding with the database-first metadata flow, and added tests covering pricing bootstrap creation, project assignment, and idempotency.
|
||||
- Extended pricing configuration support to prefer persisted metadata via `dependencies.get_pricing_metadata`, added retrieval tests for project/default fallbacks, and refreshed docs (`calminer-docs/specifications/price_calculation.md`, `pricing_settings_data_model.md`) to describe the database-backed workflow and bootstrap behaviour.
|
||||
- Added `services/financial.py` NPV, IRR, and payback helpers with robust cash-flow normalisation, convergence safeguards, and fractional period support, plus comprehensive pytest coverage exercising representative project scenarios and failure modes.
|
||||
- Authored `calminer-docs/specifications/financial_metrics.md` capturing DCF assumptions, solver behaviours, and worked examples, and cross-linked the architecture concepts to the new reference for consistent navigation.
|
||||
- Implemented `services/simulation.py` Monte Carlo engine with configurable distributions, summary aggregation, and reproducible RNG seeding, introduced regression tests in `tests/test_simulation.py`, and documented configuration/usage in `calminer-docs/specifications/monte_carlo_simulation.md` with architecture cross-links.
|
||||
- Polished reporting HTML contexts by cleaning stray fragments in `routes/reports.py`, adding download action metadata for project and scenario pages, and generating scenario comparison download URLs with correctly serialised repeated `scenario_ids` parameters.
|
||||
- Consolidated Alembic history into a single initial migration (`20251111_00_initial_schema.py`), removed superseded revision files, and ensured Alembic metadata still references the project metadata for clean bootstrap.
|
||||
- Added `scripts/run_migrations.py` and a Docker entrypoint wrapper to run Alembic migrations before `uvicorn` starts, removed the fallback `Base.metadata.create_all` call, and updated `calminer-docs/admin/installation.md` so developers know how to apply migrations locally or via Docker.
|
||||
- Configured pytest defaults to collect coverage (`--cov`) with an 80% fail-under gate, excluded entrypoint/reporting scaffolds from the calculation, updated contributor docs with the standard `pytest` command, and verified the suite now reports 83% coverage.
|
||||
|
||||
60
docker-compose.override.yml
Normal file
60
docker-compose.override.yml
Normal file
@@ -0,0 +1,60 @@
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
app:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
APT_CACHE_URL: ${APT_CACHE_URL:-}
|
||||
environment:
|
||||
- ENVIRONMENT=development
|
||||
- DEBUG=true
|
||||
- LOG_LEVEL=DEBUG
|
||||
# Override database to use local postgres service
|
||||
- DATABASE_HOST=postgres
|
||||
- DATABASE_PORT=5432
|
||||
- DATABASE_USER=calminer
|
||||
- DATABASE_PASSWORD=calminer_password
|
||||
- DATABASE_NAME=calminer_db
|
||||
- DATABASE_DRIVER=postgresql
|
||||
# Development-specific settings
|
||||
- CALMINER_EXPORT_MAX_ROWS=1000
|
||||
- CALMINER_IMPORT_MAX_ROWS=10000
|
||||
volumes:
|
||||
# Mount source code for live reloading (if using --reload)
|
||||
- .:/app:ro
|
||||
# Override logs volume to local for easier access
|
||||
- ./logs:/app/logs
|
||||
ports:
|
||||
- "8003:8003"
|
||||
# Override command for development with reload
|
||||
command:
|
||||
[
|
||||
"uvicorn",
|
||||
"main:app",
|
||||
"--host",
|
||||
"0.0.0.0",
|
||||
"--port",
|
||||
"8003",
|
||||
"--reload",
|
||||
"--workers",
|
||||
"1",
|
||||
]
|
||||
depends_on:
|
||||
- postgres
|
||||
restart: unless-stopped
|
||||
|
||||
postgres:
|
||||
environment:
|
||||
- POSTGRES_USER=calminer
|
||||
- POSTGRES_PASSWORD=calminer_password
|
||||
- POSTGRES_DB=calminer_db
|
||||
ports:
|
||||
- "5432:5432"
|
||||
volumes:
|
||||
- postgres_data:/var/lib/postgresql/data
|
||||
restart: unless-stopped
|
||||
|
||||
volumes:
|
||||
postgres_data:
|
||||
77
docker-compose.prod.yml
Normal file
77
docker-compose.prod.yml
Normal file
@@ -0,0 +1,77 @@
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
app:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
APT_CACHE_URL: ${APT_CACHE_URL:-}
|
||||
environment:
|
||||
- ENVIRONMENT=production
|
||||
- DEBUG=false
|
||||
- LOG_LEVEL=WARNING
|
||||
# Database configuration - must be provided externally
|
||||
- DATABASE_HOST=${DATABASE_HOST}
|
||||
- DATABASE_PORT=${DATABASE_PORT:-5432}
|
||||
- DATABASE_USER=${DATABASE_USER}
|
||||
- DATABASE_PASSWORD=${DATABASE_PASSWORD}
|
||||
- DATABASE_NAME=${DATABASE_NAME}
|
||||
- DATABASE_DRIVER=postgresql
|
||||
# Production-specific settings
|
||||
- CALMINER_EXPORT_MAX_ROWS=100000
|
||||
- CALMINER_IMPORT_MAX_ROWS=100000
|
||||
- CALMINER_EXPORT_METADATA=true
|
||||
- CALMINER_IMPORT_STAGING_TTL=3600
|
||||
ports:
|
||||
- "8003:8003"
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
restart: unless-stopped
|
||||
# Production health checks
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8003/health"]
|
||||
interval: 60s
|
||||
timeout: 30s
|
||||
retries: 5
|
||||
start_period: 60s
|
||||
# Resource limits for production
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: "1.0"
|
||||
memory: 1G
|
||||
reservations:
|
||||
cpus: "0.5"
|
||||
memory: 512M
|
||||
|
||||
postgres:
|
||||
environment:
|
||||
- POSTGRES_USER=${DATABASE_USER}
|
||||
- POSTGRES_PASSWORD=${DATABASE_PASSWORD}
|
||||
- POSTGRES_DB=${DATABASE_NAME}
|
||||
ports:
|
||||
- "5432:5432"
|
||||
volumes:
|
||||
- postgres_data:/var/lib/postgresql/data
|
||||
restart: unless-stopped
|
||||
# Production postgres health check
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U ${DATABASE_USER} -d ${DATABASE_NAME}"]
|
||||
interval: 60s
|
||||
timeout: 30s
|
||||
retries: 5
|
||||
start_period: 60s
|
||||
# Resource limits for postgres
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: "1.0"
|
||||
memory: 2G
|
||||
reservations:
|
||||
cpus: "0.5"
|
||||
memory: 1G
|
||||
|
||||
volumes:
|
||||
postgres_data:
|
||||
62
docker-compose.staging.yml
Normal file
62
docker-compose.staging.yml
Normal file
@@ -0,0 +1,62 @@
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
app:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
APT_CACHE_URL: ${APT_CACHE_URL:-}
|
||||
environment:
|
||||
- ENVIRONMENT=staging
|
||||
- DEBUG=false
|
||||
- LOG_LEVEL=INFO
|
||||
# Database configuration - can be overridden by external env
|
||||
- DATABASE_HOST=${DATABASE_HOST:-postgres}
|
||||
- DATABASE_PORT=${DATABASE_PORT:-5432}
|
||||
- DATABASE_USER=${DATABASE_USER:-calminer}
|
||||
- DATABASE_PASSWORD=${DATABASE_PASSWORD}
|
||||
- DATABASE_NAME=${DATABASE_NAME:-calminer_db}
|
||||
- DATABASE_DRIVER=postgresql
|
||||
# Staging-specific settings
|
||||
- CALMINER_EXPORT_MAX_ROWS=50000
|
||||
- CALMINER_IMPORT_MAX_ROWS=50000
|
||||
- CALMINER_EXPORT_METADATA=true
|
||||
- CALMINER_IMPORT_STAGING_TTL=600
|
||||
ports:
|
||||
- "8003:8003"
|
||||
depends_on:
|
||||
- postgres
|
||||
restart: unless-stopped
|
||||
# Health check for staging
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8003/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
|
||||
postgres:
|
||||
environment:
|
||||
- POSTGRES_USER=${DATABASE_USER:-calminer}
|
||||
- POSTGRES_PASSWORD=${DATABASE_PASSWORD}
|
||||
- POSTGRES_DB=${DATABASE_NAME:-calminer_db}
|
||||
ports:
|
||||
- "5432:5432"
|
||||
volumes:
|
||||
- postgres_data:/var/lib/postgresql/data
|
||||
restart: unless-stopped
|
||||
# Health check for postgres
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
"pg_isready -U ${DATABASE_USER:-calminer} -d ${DATABASE_NAME:-calminer_db}",
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
|
||||
volumes:
|
||||
postgres_data:
|
||||
@@ -8,11 +8,13 @@ services:
|
||||
ports:
|
||||
- "8003:8003"
|
||||
environment:
|
||||
- DATABASE_HOST=postgres
|
||||
- DATABASE_PORT=5432
|
||||
- DATABASE_USER=calminer
|
||||
- DATABASE_PASSWORD=calminer_password
|
||||
- DATABASE_NAME=calminer_db
|
||||
# Environment-specific variables should be set in override files
|
||||
- ENVIRONMENT=${ENVIRONMENT:-production}
|
||||
- DATABASE_HOST=${DATABASE_HOST:-postgres}
|
||||
- DATABASE_PORT=${DATABASE_PORT:-5432}
|
||||
- DATABASE_USER=${DATABASE_USER}
|
||||
- DATABASE_PASSWORD=${DATABASE_PASSWORD}
|
||||
- DATABASE_NAME=${DATABASE_NAME}
|
||||
- DATABASE_DRIVER=postgresql
|
||||
depends_on:
|
||||
- postgres
|
||||
@@ -23,9 +25,9 @@ services:
|
||||
postgres:
|
||||
image: postgres:17
|
||||
environment:
|
||||
- POSTGRES_USER=calminer
|
||||
- POSTGRES_PASSWORD=calminer_password
|
||||
- POSTGRES_DB=calminer_db
|
||||
- POSTGRES_USER=${DATABASE_USER}
|
||||
- POSTGRES_PASSWORD=${DATABASE_PASSWORD}
|
||||
- POSTGRES_DB=${DATABASE_NAME}
|
||||
ports:
|
||||
- "5432:5432"
|
||||
volumes:
|
||||
|
||||
14
k8s/configmap.yaml
Normal file
14
k8s/configmap.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: calminer-config
|
||||
data:
|
||||
DATABASE_HOST: "calminer-db"
|
||||
DATABASE_PORT: "5432"
|
||||
DATABASE_USER: "calminer"
|
||||
DATABASE_NAME: "calminer_db"
|
||||
DATABASE_DRIVER: "postgresql"
|
||||
CALMINER_EXPORT_MAX_ROWS: "10000"
|
||||
CALMINER_EXPORT_METADATA: "true"
|
||||
CALMINER_IMPORT_STAGING_TTL: "300"
|
||||
CALMINER_IMPORT_MAX_ROWS: "50000"
|
||||
54
k8s/deployment.yaml
Normal file
54
k8s/deployment.yaml
Normal file
@@ -0,0 +1,54 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: calminer-app
|
||||
labels:
|
||||
app: calminer
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: calminer
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: calminer
|
||||
spec:
|
||||
containers:
|
||||
- name: calminer
|
||||
image: registry.example.com/calminer:latest
|
||||
ports:
|
||||
- containerPort: 8003
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
name: calminer-config
|
||||
- secretRef:
|
||||
name: calminer-secrets
|
||||
resources:
|
||||
requests:
|
||||
memory: "256Mi"
|
||||
cpu: "250m"
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "500m"
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8003
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8003
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
initContainers:
|
||||
- name: wait-for-db
|
||||
image: postgres:17
|
||||
command:
|
||||
[
|
||||
"sh",
|
||||
"-c",
|
||||
"until pg_isready -h calminer-db -p 5432; do echo waiting for database; sleep 2; done;",
|
||||
]
|
||||
18
k8s/ingress.yaml
Normal file
18
k8s/ingress.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: calminer-ingress
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /
|
||||
spec:
|
||||
rules:
|
||||
- host: calminer.example.com
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: calminer-service
|
||||
port:
|
||||
number: 80
|
||||
13
k8s/postgres-service.yaml
Normal file
13
k8s/postgres-service.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: calminer-db
|
||||
labels:
|
||||
app: calminer-db
|
||||
spec:
|
||||
selector:
|
||||
app: calminer-db
|
||||
ports:
|
||||
- port: 5432
|
||||
targetPort: 5432
|
||||
clusterIP: None # Headless service for StatefulSet
|
||||
48
k8s/postgres.yaml
Normal file
48
k8s/postgres.yaml
Normal file
@@ -0,0 +1,48 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: calminer-db
|
||||
spec:
|
||||
serviceName: calminer-db
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: calminer-db
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: calminer-db
|
||||
spec:
|
||||
containers:
|
||||
- name: postgres
|
||||
image: postgres:17
|
||||
ports:
|
||||
- containerPort: 5432
|
||||
env:
|
||||
- name: POSTGRES_USER
|
||||
value: "calminer"
|
||||
- name: POSTGRES_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: calminer-secrets
|
||||
key: DATABASE_PASSWORD
|
||||
- name: POSTGRES_DB
|
||||
value: "calminer_db"
|
||||
resources:
|
||||
requests:
|
||||
memory: "256Mi"
|
||||
cpu: "250m"
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "500m"
|
||||
volumeMounts:
|
||||
- name: postgres-storage
|
||||
mountPath: /var/lib/postgresql/data
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: postgres-storage
|
||||
spec:
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
8
k8s/secret.yaml
Normal file
8
k8s/secret.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: calminer-secrets
|
||||
type: Opaque
|
||||
data:
|
||||
DATABASE_PASSWORD: Y2FsbWluZXJfcGFzc3dvcmQ= # base64 encoded 'calminer_password'
|
||||
CALMINER_SEED_ADMIN_PASSWORD: Q2hhbmdlTWUxMjMh # base64 encoded 'ChangeMe123!'
|
||||
14
k8s/service.yaml
Normal file
14
k8s/service.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: calminer-service
|
||||
labels:
|
||||
app: calminer
|
||||
spec:
|
||||
selector:
|
||||
app: calminer
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8003
|
||||
protocol: TCP
|
||||
type: ClusterIP
|
||||
8
main.py
8
main.py
@@ -6,13 +6,8 @@ from fastapi.staticfiles import StaticFiles
|
||||
|
||||
from config.settings import get_settings
|
||||
from middleware.auth_session import AuthSessionMiddleware
|
||||
from middleware.metrics import MetricsMiddleware
|
||||
from middleware.validation import validate_json
|
||||
from models import (
|
||||
FinancialInput,
|
||||
Project,
|
||||
Scenario,
|
||||
SimulationParameter,
|
||||
)
|
||||
from routes.auth import router as auth_router
|
||||
from routes.dashboard import router as dashboard_router
|
||||
from routes.imports import router as imports_router
|
||||
@@ -26,6 +21,7 @@ from services.bootstrap import bootstrap_admin, bootstrap_pricing_settings
|
||||
app = FastAPI()
|
||||
|
||||
app.add_middleware(AuthSessionMiddleware)
|
||||
app.add_middleware(MetricsMiddleware)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ from starlette.types import ASGIApp
|
||||
|
||||
from config.settings import Settings, get_settings
|
||||
from models import User
|
||||
from monitoring.metrics import ACTIVE_CONNECTIONS
|
||||
from services.exceptions import EntityNotFoundError
|
||||
from services.security import (
|
||||
JWTSettings,
|
||||
@@ -45,6 +46,8 @@ class _ResolutionResult:
|
||||
class AuthSessionMiddleware(BaseHTTPMiddleware):
|
||||
"""Resolve authenticated users from session cookies and refresh tokens."""
|
||||
|
||||
_active_sessions: int = 0
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
app: ASGIApp,
|
||||
@@ -61,9 +64,23 @@ class AuthSessionMiddleware(BaseHTTPMiddleware):
|
||||
|
||||
async def dispatch(self, request: Request, call_next: RequestResponseEndpoint) -> Response:
|
||||
resolved = self._resolve_session(request)
|
||||
response = await call_next(request)
|
||||
self._apply_session(response, resolved)
|
||||
return response
|
||||
|
||||
# Track active sessions for authenticated users
|
||||
if resolved.session.user and resolved.session.user.is_active:
|
||||
AuthSessionMiddleware._active_sessions += 1
|
||||
ACTIVE_CONNECTIONS.set(AuthSessionMiddleware._active_sessions)
|
||||
|
||||
try:
|
||||
response = await call_next(request)
|
||||
return response
|
||||
finally:
|
||||
# Decrement on response
|
||||
if resolved.session.user and resolved.session.user.is_active:
|
||||
AuthSessionMiddleware._active_sessions = max(
|
||||
0, AuthSessionMiddleware._active_sessions - 1)
|
||||
ACTIVE_CONNECTIONS.set(AuthSessionMiddleware._active_sessions)
|
||||
|
||||
self._apply_session(response, resolved)
|
||||
|
||||
def _resolve_session(self, request: Request) -> _ResolutionResult:
|
||||
settings = self._settings_provider()
|
||||
|
||||
58
middleware/metrics.py
Normal file
58
middleware/metrics.py
Normal file
@@ -0,0 +1,58 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
from typing import Callable
|
||||
|
||||
from fastapi import BackgroundTasks, Request, Response
|
||||
from starlette.middleware.base import BaseHTTPMiddleware
|
||||
|
||||
from monitoring.metrics import observe_request
|
||||
from services.metrics import get_metrics_service
|
||||
|
||||
|
||||
class MetricsMiddleware(BaseHTTPMiddleware):
|
||||
async def dispatch(self, request: Request, call_next: Callable[[Request], Response]) -> Response:
|
||||
start_time = time.time()
|
||||
response = await call_next(request)
|
||||
process_time = time.time() - start_time
|
||||
|
||||
observe_request(
|
||||
method=request.method,
|
||||
endpoint=request.url.path,
|
||||
status=response.status_code,
|
||||
seconds=process_time,
|
||||
)
|
||||
|
||||
# Store in database asynchronously
|
||||
background_tasks = getattr(request.state, "background_tasks", None)
|
||||
if background_tasks:
|
||||
background_tasks.add_task(
|
||||
store_request_metric,
|
||||
method=request.method,
|
||||
endpoint=request.url.path,
|
||||
status_code=response.status_code,
|
||||
duration_seconds=process_time,
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
async def store_request_metric(
|
||||
method: str, endpoint: str, status_code: int, duration_seconds: float
|
||||
) -> None:
|
||||
"""Store request metric in database."""
|
||||
try:
|
||||
service = get_metrics_service()
|
||||
service.store_metric(
|
||||
metric_name="http_request",
|
||||
value=duration_seconds,
|
||||
labels={"method": method, "endpoint": endpoint,
|
||||
"status": status_code},
|
||||
endpoint=endpoint,
|
||||
method=method,
|
||||
status_code=status_code,
|
||||
duration_seconds=duration_seconds,
|
||||
)
|
||||
except Exception:
|
||||
# Log error but don't fail the request
|
||||
pass
|
||||
@@ -11,6 +11,7 @@ from .metadata import (
|
||||
StochasticVariable,
|
||||
StochasticVariableDescriptor,
|
||||
)
|
||||
from .performance_metric import PerformanceMetric
|
||||
from .pricing_settings import (
|
||||
PricingImpuritySettings,
|
||||
PricingMetalSettings,
|
||||
@@ -45,4 +46,5 @@ __all__ = [
|
||||
"Role",
|
||||
"UserRole",
|
||||
"password_context",
|
||||
"PerformanceMetric",
|
||||
]
|
||||
|
||||
@@ -16,17 +16,6 @@ from sqlalchemy import (
|
||||
)
|
||||
from sqlalchemy.orm import Mapped, mapped_column, relationship, validates
|
||||
|
||||
from sqlalchemy import (
|
||||
Date,
|
||||
DateTime,
|
||||
Enum as SQLEnum,
|
||||
ForeignKey,
|
||||
Integer,
|
||||
Numeric,
|
||||
String,
|
||||
Text,
|
||||
)
|
||||
from sqlalchemy.orm import Mapped, mapped_column, relationship
|
||||
from sqlalchemy.sql import func
|
||||
|
||||
from config.database import Base
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text
|
||||
from sqlalchemy.sql import func
|
||||
|
||||
26
models/performance_metric.py
Normal file
26
models/performance_metric.py
Normal file
@@ -0,0 +1,26 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
|
||||
from sqlalchemy import Column, DateTime, Float, Integer, String
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
|
||||
Base = declarative_base()
|
||||
|
||||
|
||||
class PerformanceMetric(Base):
|
||||
__tablename__ = "performance_metrics"
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
timestamp = Column(DateTime, default=datetime.utcnow, index=True)
|
||||
metric_name = Column(String, index=True)
|
||||
value = Column(Float)
|
||||
labels = Column(String) # JSON string of labels
|
||||
endpoint = Column(String, index=True, nullable=True)
|
||||
method = Column(String, nullable=True)
|
||||
status_code = Column(Integer, nullable=True)
|
||||
duration_seconds = Column(Float, nullable=True)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<PerformanceMetric(id={self.id}, name={self.metric_name}, value={self.value})>"
|
||||
@@ -1,7 +1,14 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from fastapi import APIRouter, Response
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Optional
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query, Response
|
||||
from prometheus_client import CONTENT_TYPE_LATEST, generate_latest
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from config.database import get_db
|
||||
from services.metrics import MetricsService
|
||||
|
||||
|
||||
router = APIRouter(prefix="/metrics", tags=["monitoring"])
|
||||
@@ -11,3 +18,99 @@ router = APIRouter(prefix="/metrics", tags=["monitoring"])
|
||||
async def metrics_endpoint() -> Response:
|
||||
payload = generate_latest()
|
||||
return Response(content=payload, media_type=CONTENT_TYPE_LATEST)
|
||||
|
||||
|
||||
@router.get("/performance", summary="Get performance metrics")
|
||||
async def get_performance_metrics(
|
||||
metric_name: Optional[str] = Query(
|
||||
None, description="Filter by metric name"),
|
||||
hours: int = Query(24, description="Hours back to look"),
|
||||
db: Session = Depends(get_db),
|
||||
) -> dict:
|
||||
"""Get aggregated performance metrics."""
|
||||
service = MetricsService(db)
|
||||
start_time = datetime.utcnow() - timedelta(hours=hours)
|
||||
|
||||
if metric_name:
|
||||
metrics = service.get_metrics(
|
||||
metric_name=metric_name, start_time=start_time)
|
||||
aggregated = service.get_aggregated_metrics(
|
||||
metric_name, start_time=start_time)
|
||||
return {
|
||||
"metric_name": metric_name,
|
||||
"period_hours": hours,
|
||||
"aggregated": aggregated,
|
||||
"recent_samples": [
|
||||
{
|
||||
"timestamp": m.timestamp.isoformat(),
|
||||
"value": m.value,
|
||||
"labels": m.labels,
|
||||
"endpoint": m.endpoint,
|
||||
"method": m.method,
|
||||
"status_code": m.status_code,
|
||||
"duration_seconds": m.duration_seconds,
|
||||
}
|
||||
for m in metrics[:50] # Last 50 samples
|
||||
],
|
||||
}
|
||||
|
||||
# Return summary for all metrics
|
||||
all_metrics = service.get_metrics(start_time=start_time, limit=1000)
|
||||
metric_types = {}
|
||||
for m in all_metrics:
|
||||
if m.metric_name not in metric_types:
|
||||
metric_types[m.metric_name] = []
|
||||
metric_types[m.metric_name].append(m.value)
|
||||
|
||||
summary = {}
|
||||
for name, values in metric_types.items():
|
||||
summary[name] = {
|
||||
"count": len(values),
|
||||
"avg": sum(values) / len(values) if values else 0,
|
||||
"min": min(values) if values else 0,
|
||||
"max": max(values) if values else 0,
|
||||
}
|
||||
|
||||
return {
|
||||
"period_hours": hours,
|
||||
"summary": summary,
|
||||
}
|
||||
|
||||
|
||||
@router.get("/health", summary="Detailed health check with metrics")
|
||||
async def detailed_health(db: Session = Depends(get_db)) -> dict:
|
||||
"""Get detailed health status with recent metrics."""
|
||||
service = MetricsService(db)
|
||||
last_hour = datetime.utcnow() - timedelta(hours=1)
|
||||
|
||||
# Get request metrics from last hour
|
||||
request_metrics = service.get_metrics(
|
||||
metric_name="http_request", start_time=last_hour
|
||||
)
|
||||
|
||||
if request_metrics:
|
||||
durations = []
|
||||
error_count = 0
|
||||
for m in request_metrics:
|
||||
if m.duration_seconds is not None:
|
||||
durations.append(m.duration_seconds)
|
||||
if m.status_code is not None and m.status_code >= 400:
|
||||
error_count += 1
|
||||
total_requests = len(request_metrics)
|
||||
|
||||
avg_duration = sum(durations) / len(durations) if durations else 0
|
||||
error_rate = error_count / total_requests if total_requests > 0 else 0
|
||||
else:
|
||||
avg_duration = 0
|
||||
error_rate = 0
|
||||
total_requests = 0
|
||||
|
||||
return {
|
||||
"status": "ok",
|
||||
"timestamp": datetime.utcnow().isoformat(),
|
||||
"metrics": {
|
||||
"requests_last_hour": total_requests,
|
||||
"avg_response_time_seconds": avg_duration,
|
||||
"error_rate": error_rate,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Iterable
|
||||
|
||||
from prometheus_client import Counter, Histogram
|
||||
from prometheus_client import Counter, Histogram, Gauge
|
||||
|
||||
IMPORT_DURATION = Histogram(
|
||||
"calminer_import_duration_seconds",
|
||||
@@ -28,6 +27,54 @@ EXPORT_TOTAL = Counter(
|
||||
labelnames=("dataset", "status", "format"),
|
||||
)
|
||||
|
||||
# General performance metrics
|
||||
REQUEST_DURATION = Histogram(
|
||||
"calminer_request_duration_seconds",
|
||||
"Duration of HTTP requests",
|
||||
labelnames=("method", "endpoint", "status"),
|
||||
)
|
||||
|
||||
REQUEST_TOTAL = Counter(
|
||||
"calminer_request_total",
|
||||
"Count of HTTP requests",
|
||||
labelnames=("method", "endpoint", "status"),
|
||||
)
|
||||
|
||||
ACTIVE_CONNECTIONS = Gauge(
|
||||
"calminer_active_connections",
|
||||
"Number of active connections",
|
||||
)
|
||||
|
||||
DB_CONNECTIONS = Gauge(
|
||||
"calminer_db_connections",
|
||||
"Number of database connections",
|
||||
)
|
||||
|
||||
# Business metrics
|
||||
PROJECT_OPERATIONS = Counter(
|
||||
"calminer_project_operations_total",
|
||||
"Count of project operations",
|
||||
labelnames=("operation", "status"),
|
||||
)
|
||||
|
||||
SCENARIO_OPERATIONS = Counter(
|
||||
"calminer_scenario_operations_total",
|
||||
"Count of scenario operations",
|
||||
labelnames=("operation", "status"),
|
||||
)
|
||||
|
||||
SIMULATION_RUNS = Counter(
|
||||
"calminer_simulation_runs_total",
|
||||
"Count of Monte Carlo simulation runs",
|
||||
labelnames=("status",),
|
||||
)
|
||||
|
||||
SIMULATION_DURATION = Histogram(
|
||||
"calminer_simulation_duration_seconds",
|
||||
"Duration of Monte Carlo simulations",
|
||||
labelnames=("status",),
|
||||
)
|
||||
|
||||
|
||||
def observe_import(action: str, dataset: str, status: str, seconds: float) -> None:
|
||||
IMPORT_TOTAL.labels(dataset=dataset, action=action, status=status).inc()
|
||||
@@ -40,3 +87,22 @@ def observe_export(dataset: str, status: str, export_format: str, seconds: float
|
||||
format=export_format).inc()
|
||||
EXPORT_DURATION.labels(dataset=dataset, status=status,
|
||||
format=export_format).observe(seconds)
|
||||
|
||||
|
||||
def observe_request(method: str, endpoint: str, status: int, seconds: float) -> None:
|
||||
REQUEST_TOTAL.labels(method=method, endpoint=endpoint, status=status).inc()
|
||||
REQUEST_DURATION.labels(method=method, endpoint=endpoint,
|
||||
status=status).observe(seconds)
|
||||
|
||||
|
||||
def observe_project_operation(operation: str, status: str = "success") -> None:
|
||||
PROJECT_OPERATIONS.labels(operation=operation, status=status).inc()
|
||||
|
||||
|
||||
def observe_scenario_operation(operation: str, status: str = "success") -> None:
|
||||
SCENARIO_OPERATIONS.labels(operation=operation, status=status).inc()
|
||||
|
||||
|
||||
def observe_simulation(status: str, duration_seconds: float) -> None:
|
||||
SIMULATION_RUNS.labels(status=status).inc()
|
||||
SIMULATION_DURATION.labels(status=status).observe(duration_seconds)
|
||||
|
||||
@@ -18,6 +18,9 @@ exclude = '''
|
||||
pythonpath = ["."]
|
||||
testpaths = ["tests"]
|
||||
addopts = "-ra --strict-config --strict-markers --cov=. --cov-report=term-missing --cov-report=xml --cov-fail-under=80"
|
||||
markers = [
|
||||
"asyncio: marks tests as async (using pytest-asyncio)",
|
||||
]
|
||||
|
||||
[tool.coverage.run]
|
||||
branch = true
|
||||
@@ -35,3 +38,7 @@ omit = [
|
||||
skip_empty = true
|
||||
show_missing = true
|
||||
|
||||
[tool.bandit]
|
||||
exclude_dirs = ["tests", "alembic", "scripts"]
|
||||
skips = ["B101", "B601"] # B101: assert_used, B601: shell_injection (may be false positives)
|
||||
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
pytest
|
||||
pytest-asyncio
|
||||
pytest-cov
|
||||
pytest-httpx
|
||||
python-jose
|
||||
ruff
|
||||
black
|
||||
mypy
|
||||
mypy
|
||||
bandit
|
||||
@@ -3,10 +3,10 @@ from __future__ import annotations
|
||||
from datetime import datetime
|
||||
|
||||
from fastapi import APIRouter, Depends, Request
|
||||
from fastapi.responses import HTMLResponse
|
||||
from fastapi.responses import HTMLResponse, RedirectResponse
|
||||
from fastapi.templating import Jinja2Templates
|
||||
|
||||
from dependencies import get_unit_of_work, require_authenticated_user
|
||||
from dependencies import get_current_user, get_unit_of_work
|
||||
from models import ScenarioStatus, User
|
||||
from services.unit_of_work import UnitOfWork
|
||||
|
||||
@@ -108,12 +108,15 @@ def _load_scenario_alerts(
|
||||
return alerts
|
||||
|
||||
|
||||
@router.get("/", response_class=HTMLResponse, include_in_schema=False, name="dashboard.home")
|
||||
@router.get("/", include_in_schema=False, name="dashboard.home", response_model=None)
|
||||
def dashboard_home(
|
||||
request: Request,
|
||||
_: User = Depends(require_authenticated_user),
|
||||
user: User | None = Depends(get_current_user),
|
||||
uow: UnitOfWork = Depends(get_unit_of_work),
|
||||
) -> HTMLResponse:
|
||||
) -> HTMLResponse | RedirectResponse:
|
||||
if user is None:
|
||||
return RedirectResponse(request.url_for("auth.login_form"), status_code=303)
|
||||
|
||||
context = {
|
||||
"metrics": _load_metrics(uow),
|
||||
"recent_projects": _load_recent_projects(uow),
|
||||
|
||||
@@ -15,7 +15,7 @@ from dependencies import (
|
||||
)
|
||||
from models import MiningOperationType, Project, ScenarioStatus, User
|
||||
from schemas.project import ProjectCreate, ProjectRead, ProjectUpdate
|
||||
from services.exceptions import EntityConflictError, EntityNotFoundError
|
||||
from services.exceptions import EntityConflictError
|
||||
from services.pricing import PricingMetadata
|
||||
from services.unit_of_work import UnitOfWork
|
||||
|
||||
@@ -138,7 +138,7 @@ def create_project_submit(
|
||||
|
||||
try:
|
||||
op_type = MiningOperationType(operation_type)
|
||||
except ValueError as exc:
|
||||
except ValueError:
|
||||
return templates.TemplateResponse(
|
||||
request,
|
||||
"projects/form.html",
|
||||
@@ -160,7 +160,7 @@ def create_project_submit(
|
||||
)
|
||||
try:
|
||||
created = _require_project_repo(uow).create(project)
|
||||
except EntityConflictError as exc:
|
||||
except EntityConflictError:
|
||||
return templates.TemplateResponse(
|
||||
request,
|
||||
"projects/form.html",
|
||||
@@ -303,7 +303,7 @@ def edit_project_submit(
|
||||
if operation_type:
|
||||
try:
|
||||
project.operation_type = MiningOperationType(operation_type)
|
||||
except ValueError as exc:
|
||||
except ValueError:
|
||||
return templates.TemplateResponse(
|
||||
request,
|
||||
"projects/form.html",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import date
|
||||
from datetime import date, datetime
|
||||
from urllib.parse import urlencode
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query, Request, status
|
||||
@@ -12,7 +12,6 @@ from dependencies import (
|
||||
get_unit_of_work,
|
||||
require_any_role,
|
||||
require_project_resource,
|
||||
require_roles,
|
||||
require_scenario_resource,
|
||||
)
|
||||
from models import Project, Scenario, User
|
||||
@@ -30,6 +29,93 @@ from services.unit_of_work import UnitOfWork
|
||||
router = APIRouter(prefix="/reports", tags=["Reports"])
|
||||
templates = Jinja2Templates(directory="templates")
|
||||
|
||||
# Add custom Jinja2 filters
|
||||
|
||||
|
||||
def format_datetime(value):
|
||||
"""Format a datetime object for display in templates."""
|
||||
if not isinstance(value, datetime):
|
||||
return ""
|
||||
if value.tzinfo is None:
|
||||
# Assume UTC if no timezone
|
||||
from datetime import timezone
|
||||
value = value.replace(tzinfo=timezone.utc)
|
||||
# Format as readable date/time
|
||||
return value.strftime("%Y-%m-%d %H:%M UTC")
|
||||
|
||||
|
||||
def currency_display(value, currency_code):
|
||||
"""Format a numeric value with currency symbol/code."""
|
||||
if value is None:
|
||||
return "—"
|
||||
|
||||
# Format the number
|
||||
if isinstance(value, (int, float)):
|
||||
formatted_value = f"{value:,.2f}"
|
||||
else:
|
||||
formatted_value = str(value)
|
||||
|
||||
# Add currency code
|
||||
if currency_code:
|
||||
return f"{currency_code} {formatted_value}"
|
||||
return formatted_value
|
||||
|
||||
|
||||
def format_metric(value, metric_name, currency_code=None):
|
||||
"""Format metric values appropriately based on metric type."""
|
||||
if value is None:
|
||||
return "—"
|
||||
|
||||
# For currency-related metrics, use currency formatting
|
||||
currency_metrics = {'npv', 'inflows', 'outflows',
|
||||
'net', 'total_inflows', 'total_outflows', 'total_net'}
|
||||
if metric_name in currency_metrics and currency_code:
|
||||
return currency_display(value, currency_code)
|
||||
|
||||
# For percentage metrics
|
||||
percentage_metrics = {'irr', 'payback_period'}
|
||||
if metric_name in percentage_metrics:
|
||||
if isinstance(value, (int, float)):
|
||||
return f"{value:.2f}%"
|
||||
return f"{value}%"
|
||||
|
||||
# Default numeric formatting
|
||||
if isinstance(value, (int, float)):
|
||||
return f"{value:,.2f}"
|
||||
|
||||
return str(value)
|
||||
|
||||
|
||||
def percentage_display(value):
|
||||
"""Format a value as a percentage."""
|
||||
if value is None:
|
||||
return "—"
|
||||
|
||||
if isinstance(value, (int, float)):
|
||||
return f"{value:.2f}%"
|
||||
|
||||
return f"{value}%"
|
||||
|
||||
|
||||
def period_display(value):
|
||||
"""Format a period value (like payback period)."""
|
||||
if value is None:
|
||||
return "—"
|
||||
|
||||
if isinstance(value, (int, float)):
|
||||
if value == int(value):
|
||||
return f"{int(value)} years"
|
||||
return f"{value:.1f} years"
|
||||
|
||||
return str(value)
|
||||
|
||||
|
||||
templates.env.filters['format_datetime'] = format_datetime
|
||||
templates.env.filters['currency_display'] = currency_display
|
||||
templates.env.filters['format_metric'] = format_metric
|
||||
templates.env.filters['percentage_display'] = percentage_display
|
||||
templates.env.filters['period_display'] = period_display
|
||||
|
||||
READ_ROLES = ("viewer", "analyst", "project_manager", "admin")
|
||||
MANAGE_ROLES = ("project_manager", "admin")
|
||||
|
||||
@@ -296,35 +382,9 @@ def project_summary_page(
|
||||
)
|
||||
|
||||
service = ReportingService(uow)
|
||||
report = service.project_summary(
|
||||
project,
|
||||
filters=scenario_filter,
|
||||
include=include_options,
|
||||
iterations=iterations or DEFAULT_ITERATIONS,
|
||||
percentiles=percentile_values,
|
||||
context = service.build_project_summary_context(
|
||||
project, scenario_filter, include_options, iterations or DEFAULT_ITERATIONS, percentile_values, request
|
||||
)
|
||||
context = {
|
||||
"request": request,
|
||||
"project": report["project"],
|
||||
"scenario_count": report["scenario_count"],
|
||||
"aggregates": report["aggregates"],
|
||||
"scenarios": report["scenarios"],
|
||||
"filters": report["filters"],
|
||||
"include_options": include_options,
|
||||
"iterations": iterations or DEFAULT_ITERATIONS,
|
||||
"percentiles": percentile_values,
|
||||
"title": f"Project Summary · {project.name}",
|
||||
"subtitle": "Aggregated financial and simulation insights across scenarios.",
|
||||
"actions": [
|
||||
{
|
||||
"href": request.url_for(
|
||||
"reports.project_summary",
|
||||
project_id=project.id,
|
||||
),
|
||||
"label": "Download JSON",
|
||||
}
|
||||
],
|
||||
}
|
||||
return templates.TemplateResponse(
|
||||
request,
|
||||
"reports/project_summary.html",
|
||||
@@ -399,40 +459,9 @@ def project_scenario_comparison_page(
|
||||
)
|
||||
|
||||
service = ReportingService(uow)
|
||||
report = service.scenario_comparison(
|
||||
project,
|
||||
scenarios,
|
||||
include=include_options,
|
||||
iterations=iterations or DEFAULT_ITERATIONS,
|
||||
percentiles=percentile_values,
|
||||
context = service.build_scenario_comparison_context(
|
||||
project, scenarios, include_options, iterations or DEFAULT_ITERATIONS, percentile_values, request
|
||||
)
|
||||
comparison_json_url = request.url_for(
|
||||
"reports.project_scenario_comparison",
|
||||
project_id=project.id,
|
||||
)
|
||||
comparison_query = urlencode(
|
||||
[("scenario_ids", str(identifier)) for identifier in unique_ids]
|
||||
)
|
||||
if comparison_query:
|
||||
comparison_json_url = f"{comparison_json_url}?{comparison_query}"
|
||||
|
||||
context = {
|
||||
"request": request,
|
||||
"project": report["project"],
|
||||
"scenarios": report["scenarios"],
|
||||
"comparison": report["comparison"],
|
||||
"include_options": include_options,
|
||||
"iterations": iterations or DEFAULT_ITERATIONS,
|
||||
"percentiles": percentile_values,
|
||||
"title": f"Scenario Comparison · {project.name}",
|
||||
"subtitle": "Evaluate deterministic metrics and Monte Carlo trends side by side.",
|
||||
"actions": [
|
||||
{
|
||||
"href": comparison_json_url,
|
||||
"label": "Download JSON",
|
||||
}
|
||||
],
|
||||
}
|
||||
return templates.TemplateResponse(
|
||||
request,
|
||||
"reports/scenario_comparison.html",
|
||||
@@ -478,33 +507,9 @@ def scenario_distribution_page(
|
||||
) from exc
|
||||
|
||||
service = ReportingService(uow)
|
||||
report = service.scenario_distribution(
|
||||
scenario,
|
||||
include=include_options,
|
||||
iterations=iterations or DEFAULT_ITERATIONS,
|
||||
percentiles=percentile_values,
|
||||
context = service.build_scenario_distribution_context(
|
||||
scenario, include_options, iterations or DEFAULT_ITERATIONS, percentile_values, request
|
||||
)
|
||||
context = {
|
||||
"request": request,
|
||||
"scenario": report["scenario"],
|
||||
"summary": report["summary"],
|
||||
"metrics": report["metrics"],
|
||||
"monte_carlo": report["monte_carlo"],
|
||||
"include_options": include_options,
|
||||
"iterations": iterations or DEFAULT_ITERATIONS,
|
||||
"percentiles": percentile_values,
|
||||
"title": f"Scenario Distribution · {scenario.name}",
|
||||
"subtitle": "Deterministic and simulated distributions for a single scenario.",
|
||||
"actions": [
|
||||
{
|
||||
"href": request.url_for(
|
||||
"reports.scenario_distribution",
|
||||
scenario_id=scenario.id,
|
||||
),
|
||||
"label": "Download JSON",
|
||||
}
|
||||
],
|
||||
}
|
||||
return templates.TemplateResponse(
|
||||
request,
|
||||
"reports/scenario_distribution.html",
|
||||
|
||||
@@ -393,7 +393,7 @@ def create_scenario_submit(
|
||||
|
||||
try:
|
||||
scenario_repo.create(scenario)
|
||||
except EntityConflictError as exc:
|
||||
except EntityConflictError:
|
||||
return templates.TemplateResponse(
|
||||
request,
|
||||
"scenarios/form.html",
|
||||
|
||||
96
services/metrics.py
Normal file
96
services/metrics.py
Normal file
@@ -0,0 +1,96 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from datetime import datetime
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from config.database import get_db
|
||||
from models.performance_metric import PerformanceMetric
|
||||
|
||||
|
||||
class MetricsService:
|
||||
def __init__(self, db: Session):
|
||||
self.db = db
|
||||
|
||||
def store_metric(
|
||||
self,
|
||||
metric_name: str,
|
||||
value: float,
|
||||
labels: Optional[Dict[str, Any]] = None,
|
||||
endpoint: Optional[str] = None,
|
||||
method: Optional[str] = None,
|
||||
status_code: Optional[int] = None,
|
||||
duration_seconds: Optional[float] = None,
|
||||
) -> PerformanceMetric:
|
||||
"""Store a performance metric in the database."""
|
||||
metric = PerformanceMetric(
|
||||
timestamp=datetime.utcnow(),
|
||||
metric_name=metric_name,
|
||||
value=value,
|
||||
labels=json.dumps(labels) if labels else None,
|
||||
endpoint=endpoint,
|
||||
method=method,
|
||||
status_code=status_code,
|
||||
duration_seconds=duration_seconds,
|
||||
)
|
||||
self.db.add(metric)
|
||||
self.db.commit()
|
||||
self.db.refresh(metric)
|
||||
return metric
|
||||
|
||||
def get_metrics(
|
||||
self,
|
||||
metric_name: Optional[str] = None,
|
||||
start_time: Optional[datetime] = None,
|
||||
end_time: Optional[datetime] = None,
|
||||
limit: int = 100,
|
||||
) -> list[PerformanceMetric]:
|
||||
"""Retrieve stored metrics with optional filtering."""
|
||||
query = self.db.query(PerformanceMetric)
|
||||
|
||||
if metric_name:
|
||||
query = query.filter(PerformanceMetric.metric_name == metric_name)
|
||||
|
||||
if start_time:
|
||||
query = query.filter(PerformanceMetric.timestamp >= start_time)
|
||||
|
||||
if end_time:
|
||||
query = query.filter(PerformanceMetric.timestamp <= end_time)
|
||||
|
||||
return query.order_by(PerformanceMetric.timestamp.desc()).limit(limit).all()
|
||||
|
||||
def get_aggregated_metrics(
|
||||
self,
|
||||
metric_name: str,
|
||||
start_time: Optional[datetime] = None,
|
||||
end_time: Optional[datetime] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""Get aggregated statistics for a metric."""
|
||||
query = self.db.query(PerformanceMetric).filter(
|
||||
PerformanceMetric.metric_name == metric_name
|
||||
)
|
||||
|
||||
if start_time:
|
||||
query = query.filter(PerformanceMetric.timestamp >= start_time)
|
||||
|
||||
if end_time:
|
||||
query = query.filter(PerformanceMetric.timestamp <= end_time)
|
||||
|
||||
metrics = query.all()
|
||||
|
||||
if not metrics:
|
||||
return {"count": 0, "avg": 0, "min": 0, "max": 0}
|
||||
|
||||
values = [m.value for m in metrics]
|
||||
return {
|
||||
"count": len(values),
|
||||
"avg": sum(values) / len(values),
|
||||
"min": min(values),
|
||||
"max": max(values),
|
||||
}
|
||||
|
||||
|
||||
def get_metrics_service(db: Session) -> MetricsService:
|
||||
return MetricsService(db)
|
||||
@@ -5,7 +5,10 @@ from __future__ import annotations
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import date
|
||||
import math
|
||||
from typing import Iterable, Mapping, Sequence
|
||||
from typing import Mapping, Sequence
|
||||
from urllib.parse import urlencode
|
||||
|
||||
from fastapi import Request
|
||||
|
||||
from models import FinancialCategory, Project, Scenario
|
||||
from services.financial import (
|
||||
@@ -177,13 +180,13 @@ class ScenarioReport:
|
||||
"project_id": self.scenario.project_id,
|
||||
"name": self.scenario.name,
|
||||
"description": self.scenario.description,
|
||||
"status": self.scenario.status.value,
|
||||
"status": self.scenario.status.value if hasattr(self.scenario.status, 'value') else self.scenario.status,
|
||||
"start_date": self.scenario.start_date,
|
||||
"end_date": self.scenario.end_date,
|
||||
"currency": self.scenario.currency,
|
||||
"primary_resource": self.scenario.primary_resource.value
|
||||
if self.scenario.primary_resource
|
||||
else None,
|
||||
if self.scenario.primary_resource and hasattr(self.scenario.primary_resource, 'value')
|
||||
else self.scenario.primary_resource,
|
||||
"discount_rate": _round_optional(self.deterministic.discount_rate, digits=4),
|
||||
"created_at": self.scenario.created_at,
|
||||
"updated_at": self.scenario.updated_at,
|
||||
@@ -374,13 +377,12 @@ class ReportingService:
|
||||
}
|
||||
|
||||
def _load_scenarios(self, project_id: int, filters: ReportFilters) -> list[Scenario]:
|
||||
repo = self._require_scenario_repo()
|
||||
scenarios = repo.list_for_project(project_id, with_children=True)
|
||||
scenarios = self._uow.scenarios.list_for_project(
|
||||
project_id, with_children=True)
|
||||
return [scenario for scenario in scenarios if filters.matches(scenario)]
|
||||
|
||||
def _reload_scenario(self, scenario_id: int) -> Scenario:
|
||||
repo = self._require_scenario_repo()
|
||||
return repo.get(scenario_id, with_children=True)
|
||||
return self._uow.scenarios.get(scenario_id, with_children=True)
|
||||
|
||||
def _build_scenario_report(
|
||||
self,
|
||||
@@ -469,10 +471,147 @@ class ReportingService:
|
||||
)
|
||||
return comparisons
|
||||
|
||||
def _require_scenario_repo(self):
|
||||
if not self._uow.scenarios:
|
||||
raise RuntimeError("Scenario repository not initialised")
|
||||
return self._uow.scenarios
|
||||
def build_project_summary_context(
|
||||
self,
|
||||
project: Project,
|
||||
filters: ReportFilters,
|
||||
include: IncludeOptions,
|
||||
iterations: int,
|
||||
percentiles: tuple[float, ...],
|
||||
request: Request,
|
||||
) -> dict[str, object]:
|
||||
"""Build template context for project summary page."""
|
||||
scenarios = self._load_scenarios(project.id, filters)
|
||||
reports = [
|
||||
self._build_scenario_report(
|
||||
scenario,
|
||||
include_distribution=include.distribution,
|
||||
include_samples=include.samples,
|
||||
iterations=iterations,
|
||||
percentiles=percentiles,
|
||||
)
|
||||
for scenario in scenarios
|
||||
]
|
||||
aggregates = self._aggregate_project(reports)
|
||||
|
||||
return {
|
||||
"request": request,
|
||||
"project": _project_payload(project),
|
||||
"scenario_count": len(reports),
|
||||
"aggregates": aggregates.to_dict(),
|
||||
"scenarios": [report.to_dict() for report in reports],
|
||||
"filters": filters.to_dict(),
|
||||
"include_options": include,
|
||||
"iterations": iterations,
|
||||
"percentiles": percentiles,
|
||||
"title": f"Project Summary · {project.name}",
|
||||
"subtitle": "Aggregated financial and simulation insights across scenarios.",
|
||||
"actions": [
|
||||
{
|
||||
"href": request.url_for(
|
||||
"reports.project_summary",
|
||||
project_id=project.id,
|
||||
),
|
||||
"label": "Download JSON",
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
def build_scenario_comparison_context(
|
||||
self,
|
||||
project: Project,
|
||||
scenarios: Sequence[Scenario],
|
||||
include: IncludeOptions,
|
||||
iterations: int,
|
||||
percentiles: tuple[float, ...],
|
||||
request: Request,
|
||||
) -> dict[str, object]:
|
||||
"""Build template context for scenario comparison page."""
|
||||
reports = [
|
||||
self._build_scenario_report(
|
||||
self._reload_scenario(scenario.id),
|
||||
include_distribution=include.distribution,
|
||||
include_samples=include.samples,
|
||||
iterations=iterations,
|
||||
percentiles=percentiles,
|
||||
)
|
||||
for scenario in scenarios
|
||||
]
|
||||
comparison = {
|
||||
metric: data.to_dict()
|
||||
for metric, data in self._build_comparisons(reports).items()
|
||||
}
|
||||
|
||||
comparison_json_url = request.url_for(
|
||||
"reports.project_scenario_comparison",
|
||||
project_id=project.id,
|
||||
)
|
||||
scenario_ids = [str(s.id) for s in scenarios]
|
||||
comparison_query = urlencode(
|
||||
[("scenario_ids", str(identifier)) for identifier in scenario_ids]
|
||||
)
|
||||
if comparison_query:
|
||||
comparison_json_url = f"{comparison_json_url}?{comparison_query}"
|
||||
|
||||
return {
|
||||
"request": request,
|
||||
"project": _project_payload(project),
|
||||
"scenarios": [report.to_dict() for report in reports],
|
||||
"comparison": comparison,
|
||||
"include_options": include,
|
||||
"iterations": iterations,
|
||||
"percentiles": percentiles,
|
||||
"title": f"Scenario Comparison · {project.name}",
|
||||
"subtitle": "Evaluate deterministic metrics and Monte Carlo trends side by side.",
|
||||
"actions": [
|
||||
{
|
||||
"href": comparison_json_url,
|
||||
"label": "Download JSON",
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
def build_scenario_distribution_context(
|
||||
self,
|
||||
scenario: Scenario,
|
||||
include: IncludeOptions,
|
||||
iterations: int,
|
||||
percentiles: tuple[float, ...],
|
||||
request: Request,
|
||||
) -> dict[str, object]:
|
||||
"""Build template context for scenario distribution page."""
|
||||
report = self._build_scenario_report(
|
||||
self._reload_scenario(scenario.id),
|
||||
include_distribution=True,
|
||||
include_samples=include.samples,
|
||||
iterations=iterations,
|
||||
percentiles=percentiles,
|
||||
)
|
||||
|
||||
return {
|
||||
"request": request,
|
||||
"scenario": report.to_dict()["scenario"],
|
||||
"summary": report.totals.to_dict(),
|
||||
"metrics": report.deterministic.to_dict(),
|
||||
"monte_carlo": (
|
||||
report.monte_carlo.to_dict() if report.monte_carlo else {
|
||||
"available": False}
|
||||
),
|
||||
"include_options": include,
|
||||
"iterations": iterations,
|
||||
"percentiles": percentiles,
|
||||
"title": f"Scenario Distribution · {scenario.name}",
|
||||
"subtitle": "Deterministic and simulated distributions for a single scenario.",
|
||||
"actions": [
|
||||
{
|
||||
"href": request.url_for(
|
||||
"reports.scenario_distribution",
|
||||
scenario_id=scenario.id,
|
||||
),
|
||||
"label": "Download JSON",
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
def _build_cash_flows(scenario: Scenario) -> tuple[list[CashFlow], ScenarioFinancialTotals]:
|
||||
|
||||
@@ -15,7 +15,6 @@ from models import (
|
||||
PricingImpuritySettings,
|
||||
PricingMetalSettings,
|
||||
PricingSettings,
|
||||
ResourceType,
|
||||
Role,
|
||||
Scenario,
|
||||
ScenarioStatus,
|
||||
@@ -88,8 +87,12 @@ class ProjectRepository:
|
||||
try:
|
||||
self.session.flush()
|
||||
except IntegrityError as exc: # pragma: no cover - reliance on DB constraints
|
||||
from monitoring.metrics import observe_project_operation
|
||||
observe_project_operation("create", "error")
|
||||
raise EntityConflictError(
|
||||
"Project violates uniqueness constraints") from exc
|
||||
from monitoring.metrics import observe_project_operation
|
||||
observe_project_operation("create", "success")
|
||||
return project
|
||||
|
||||
def find_by_names(self, names: Iterable[str]) -> Mapping[str, Project]:
|
||||
@@ -251,7 +254,11 @@ class ScenarioRepository:
|
||||
try:
|
||||
self.session.flush()
|
||||
except IntegrityError as exc: # pragma: no cover
|
||||
from monitoring.metrics import observe_scenario_operation
|
||||
observe_scenario_operation("create", "error")
|
||||
raise EntityConflictError("Scenario violates constraints") from exc
|
||||
from monitoring.metrics import observe_scenario_operation
|
||||
observe_scenario_operation("create", "success")
|
||||
return scenario
|
||||
|
||||
def find_by_project_and_names(
|
||||
|
||||
@@ -3,7 +3,7 @@ from __future__ import annotations
|
||||
"""Scenario evaluation services including pricing integration."""
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Iterable, Mapping
|
||||
from typing import Iterable
|
||||
|
||||
from models.scenario import Scenario
|
||||
from services.pricing import (
|
||||
|
||||
@@ -2,7 +2,8 @@ from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, Iterable, Mapping, Sequence
|
||||
from typing import Any, Dict, Mapping, Sequence
|
||||
import time
|
||||
|
||||
import numpy as np
|
||||
from numpy.random import Generator, default_rng
|
||||
@@ -15,6 +16,7 @@ from .financial import (
|
||||
net_present_value,
|
||||
payback_period,
|
||||
)
|
||||
from monitoring.metrics import observe_simulation
|
||||
|
||||
|
||||
class DistributionConfigError(ValueError):
|
||||
@@ -120,60 +122,79 @@ def run_monte_carlo(
|
||||
if pct < 0.0 or pct > 100.0:
|
||||
raise ValueError("percentiles must be within [0, 100]")
|
||||
|
||||
generator = rng or default_rng(config.seed)
|
||||
start_time = time.time()
|
||||
try:
|
||||
generator = rng or default_rng(config.seed)
|
||||
|
||||
metric_arrays: Dict[SimulationMetric, np.ndarray] = {
|
||||
metric: np.empty(config.iterations, dtype=float)
|
||||
for metric in config.metrics
|
||||
}
|
||||
metric_arrays: Dict[SimulationMetric, np.ndarray] = {
|
||||
metric: np.empty(config.iterations, dtype=float)
|
||||
for metric in config.metrics
|
||||
}
|
||||
|
||||
for idx in range(config.iterations):
|
||||
iteration_flows = [
|
||||
_realise_cash_flow(
|
||||
spec,
|
||||
generator,
|
||||
scenario_context=scenario_context,
|
||||
metadata=metadata,
|
||||
)
|
||||
for spec in cash_flows
|
||||
]
|
||||
for idx in range(config.iterations):
|
||||
iteration_flows = [
|
||||
_realise_cash_flow(
|
||||
spec,
|
||||
generator,
|
||||
scenario_context=scenario_context,
|
||||
metadata=metadata,
|
||||
)
|
||||
for spec in cash_flows
|
||||
]
|
||||
|
||||
if SimulationMetric.NPV in metric_arrays:
|
||||
metric_arrays[SimulationMetric.NPV][idx] = net_present_value(
|
||||
config.discount_rate,
|
||||
iteration_flows,
|
||||
residual_value=config.residual_value,
|
||||
residual_periods=config.residual_periods,
|
||||
compounds_per_year=config.compounds_per_year,
|
||||
)
|
||||
if SimulationMetric.IRR in metric_arrays:
|
||||
try:
|
||||
metric_arrays[SimulationMetric.IRR][idx] = internal_rate_of_return(
|
||||
if SimulationMetric.NPV in metric_arrays:
|
||||
metric_arrays[SimulationMetric.NPV][idx] = net_present_value(
|
||||
config.discount_rate,
|
||||
iteration_flows,
|
||||
residual_value=config.residual_value,
|
||||
residual_periods=config.residual_periods,
|
||||
compounds_per_year=config.compounds_per_year,
|
||||
)
|
||||
except (ValueError, ConvergenceError):
|
||||
metric_arrays[SimulationMetric.IRR][idx] = np.nan
|
||||
if SimulationMetric.PAYBACK in metric_arrays:
|
||||
try:
|
||||
metric_arrays[SimulationMetric.PAYBACK][idx] = payback_period(
|
||||
iteration_flows,
|
||||
compounds_per_year=config.compounds_per_year,
|
||||
)
|
||||
except (ValueError, PaybackNotReachedError):
|
||||
metric_arrays[SimulationMetric.PAYBACK][idx] = np.nan
|
||||
if SimulationMetric.IRR in metric_arrays:
|
||||
try:
|
||||
metric_arrays[SimulationMetric.IRR][idx] = internal_rate_of_return(
|
||||
iteration_flows,
|
||||
compounds_per_year=config.compounds_per_year,
|
||||
)
|
||||
except (ValueError, ConvergenceError):
|
||||
metric_arrays[SimulationMetric.IRR][idx] = np.nan
|
||||
if SimulationMetric.PAYBACK in metric_arrays:
|
||||
try:
|
||||
metric_arrays[SimulationMetric.PAYBACK][idx] = payback_period(
|
||||
iteration_flows,
|
||||
compounds_per_year=config.compounds_per_year,
|
||||
)
|
||||
except (ValueError, PaybackNotReachedError):
|
||||
metric_arrays[SimulationMetric.PAYBACK][idx] = np.nan
|
||||
|
||||
summaries = {
|
||||
metric: _summarise(metric_arrays[metric], config.percentiles)
|
||||
for metric in metric_arrays
|
||||
}
|
||||
summaries = {
|
||||
metric: _summarise(metric_arrays[metric], config.percentiles)
|
||||
for metric in metric_arrays
|
||||
}
|
||||
|
||||
samples = metric_arrays if config.return_samples else None
|
||||
return SimulationResult(
|
||||
iterations=config.iterations,
|
||||
summaries=summaries,
|
||||
samples=samples,
|
||||
)
|
||||
samples = metric_arrays if config.return_samples else None
|
||||
result = SimulationResult(
|
||||
iterations=config.iterations,
|
||||
summaries=summaries,
|
||||
samples=samples,
|
||||
)
|
||||
|
||||
# Record successful simulation
|
||||
duration = time.time() - start_time
|
||||
observe_simulation(
|
||||
status="success",
|
||||
duration_seconds=duration,
|
||||
)
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
# Record failed simulation
|
||||
duration = time.time() - start_time
|
||||
observe_simulation(
|
||||
status="error",
|
||||
duration_seconds=duration,
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
def _realise_cash_flow(
|
||||
|
||||
@@ -260,6 +260,33 @@ body {
|
||||
line-height: 1.45;
|
||||
}
|
||||
|
||||
h1,
|
||||
h2,
|
||||
h3,
|
||||
h4,
|
||||
h5,
|
||||
h6 {
|
||||
margin: 0 0 0.5rem 0;
|
||||
font-weight: 700;
|
||||
line-height: 1.2;
|
||||
}
|
||||
|
||||
h1 {
|
||||
font-size: var(--font-size-2xl);
|
||||
}
|
||||
|
||||
h2 {
|
||||
font-size: var(--font-size-xl);
|
||||
}
|
||||
|
||||
h3 {
|
||||
font-size: var(--font-size-lg);
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 0 0 1rem 0;
|
||||
}
|
||||
|
||||
a {
|
||||
color: var(--brand);
|
||||
}
|
||||
@@ -296,18 +323,46 @@ a {
|
||||
gap: 1rem;
|
||||
}
|
||||
|
||||
.brand-logo {
|
||||
display: inline-flex;
|
||||
.sidebar-nav-controls {
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
gap: 0.5rem;
|
||||
margin: 1rem 0;
|
||||
}
|
||||
|
||||
.nav-chevron {
|
||||
width: 40px;
|
||||
height: 40px;
|
||||
border: none;
|
||||
border-radius: 50%;
|
||||
background: rgba(255, 255, 255, 0.1);
|
||||
color: rgba(255, 255, 255, 0.88);
|
||||
font-size: 1.2rem;
|
||||
font-weight: bold;
|
||||
cursor: pointer;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
transition: background 0.2s ease, transform 0.2s ease;
|
||||
}
|
||||
|
||||
.nav-chevron:hover,
|
||||
.nav-chevron:focus {
|
||||
background: rgba(255, 255, 255, 0.2);
|
||||
transform: scale(1.05);
|
||||
}
|
||||
|
||||
.nav-chevron:disabled {
|
||||
opacity: 0.5;
|
||||
cursor: not-allowed;
|
||||
transform: none;
|
||||
}
|
||||
|
||||
.brand-logo {
|
||||
width: 44px;
|
||||
height: 44px;
|
||||
border-radius: 12px;
|
||||
background: linear-gradient(0deg, var(--brand-3), var(--accent));
|
||||
color: var(--color-text-invert);
|
||||
font-weight: 700;
|
||||
font-size: 1.1rem;
|
||||
letter-spacing: 1px;
|
||||
object-fit: cover;
|
||||
}
|
||||
|
||||
.brand-text {
|
||||
@@ -927,6 +982,24 @@ tbody tr:nth-child(even) {
|
||||
color: var(--danger);
|
||||
}
|
||||
|
||||
.alert {
|
||||
padding: 0.75rem 1rem;
|
||||
border-radius: var(--radius-sm);
|
||||
margin-bottom: 1rem;
|
||||
}
|
||||
|
||||
.alert-error {
|
||||
background: rgba(209, 75, 75, 0.2);
|
||||
border: 1px solid rgba(209, 75, 75, 0.4);
|
||||
color: var(--color-text-invert);
|
||||
}
|
||||
|
||||
.alert-info {
|
||||
background: rgba(43, 168, 143, 0.2);
|
||||
border: 1px solid rgba(43, 168, 143, 0.4);
|
||||
color: var(--color-text-invert);
|
||||
}
|
||||
|
||||
.site-footer {
|
||||
background-color: var(--brand);
|
||||
color: var(--color-text-invert);
|
||||
@@ -939,6 +1012,19 @@ tbody tr:nth-child(even) {
|
||||
justify-content: center;
|
||||
padding: 1rem 0;
|
||||
font-size: 0.9rem;
|
||||
gap: 1rem;
|
||||
}
|
||||
|
||||
.footer-logo {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.footer-logo-img {
|
||||
width: 32px;
|
||||
height: 32px;
|
||||
border-radius: 8px;
|
||||
object-fit: cover;
|
||||
}
|
||||
|
||||
.sidebar-toggle {
|
||||
|
||||
@@ -153,18 +153,6 @@
|
||||
}
|
||||
}
|
||||
|
||||
.alert {
|
||||
padding: 0.75rem 1rem;
|
||||
border-radius: var(--radius-sm);
|
||||
margin-bottom: 1rem;
|
||||
}
|
||||
|
||||
.alert-error {
|
||||
background: rgba(209, 75, 75, 0.2);
|
||||
border: 1px solid rgba(209, 75, 75, 0.4);
|
||||
color: var(--color-text-invert);
|
||||
}
|
||||
|
||||
.form {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
|
||||
53
static/js/navigation.js
Normal file
53
static/js/navigation.js
Normal file
@@ -0,0 +1,53 @@
|
||||
// Navigation chevron buttons logic
|
||||
document.addEventListener("DOMContentLoaded", function () {
|
||||
const navPrev = document.getElementById("nav-prev");
|
||||
const navNext = document.getElementById("nav-next");
|
||||
|
||||
if (!navPrev || !navNext) return;
|
||||
|
||||
// Define the navigation order (main pages)
|
||||
const navPages = [
|
||||
"/",
|
||||
"/projects/ui",
|
||||
"/imports/ui",
|
||||
"/ui/simulations",
|
||||
"/ui/reporting",
|
||||
"/ui/settings",
|
||||
];
|
||||
|
||||
const currentPath = window.location.pathname;
|
||||
|
||||
// Find current index
|
||||
let currentIndex = -1;
|
||||
for (let i = 0; i < navPages.length; i++) {
|
||||
if (currentPath.startsWith(navPages[i])) {
|
||||
currentIndex = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// If not found, disable both
|
||||
if (currentIndex === -1) {
|
||||
navPrev.disabled = true;
|
||||
navNext.disabled = true;
|
||||
return;
|
||||
}
|
||||
|
||||
// Set up prev button
|
||||
if (currentIndex > 0) {
|
||||
navPrev.addEventListener("click", function () {
|
||||
window.location.href = navPages[currentIndex - 1];
|
||||
});
|
||||
} else {
|
||||
navPrev.disabled = true;
|
||||
}
|
||||
|
||||
// Set up next button
|
||||
if (currentIndex < navPages.length - 1) {
|
||||
navNext.addEventListener("click", function () {
|
||||
window.location.href = navPages[currentIndex + 1];
|
||||
});
|
||||
} else {
|
||||
navNext.disabled = true;
|
||||
}
|
||||
});
|
||||
@@ -25,6 +25,7 @@
|
||||
<script src="/static/js/exports.js" defer></script>
|
||||
<script src="/static/js/imports.js" defer></script>
|
||||
<script src="/static/js/notifications.js" defer></script>
|
||||
<script src="/static/js/navigation.js" defer></script>
|
||||
<script src="/static/js/theme.js"></script>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -18,7 +18,7 @@ block content %}
|
||||
<label for="email">Email:</label>
|
||||
<input type="email" id="email" name="email" required />
|
||||
</div>
|
||||
<button type="submit">Reset Password</button>
|
||||
<button type="submit" class="btn primary">Reset Password</button>
|
||||
</form>
|
||||
<p>Remember your password? <a href="/login">Login here</a></p>
|
||||
</div>
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
<label for="password">Password:</label>
|
||||
<input type="password" id="password" name="password" required />
|
||||
</div>
|
||||
<button type="submit">Login</button>
|
||||
<button type="submit" class="btn primary">Login</button>
|
||||
</form>
|
||||
<p>Don't have an account? <a href="/register">Register here</a></p>
|
||||
<p><a href="/forgot-password">Forgot password?</a></p>
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
<footer class="site-footer">
|
||||
<div class="container footer-inner">
|
||||
<div class="footer-logo">
|
||||
<img src="/static/img/logo_big.png" alt="CalMiner Logo" class="footer-logo-img" />
|
||||
</div>
|
||||
<p>
|
||||
© {{ current_year }} CalMiner by
|
||||
<a href="https://allucanget.biz/">AllYouCanGET</a>. All rights reserved.
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
<div class="sidebar-inner">
|
||||
<a class="sidebar-brand" href="{{ request.url_for('dashboard.home') }}">
|
||||
<span class="brand-logo" aria-hidden="true">CM</span>
|
||||
<img src="/static/img/logo_big.png" alt="CalMiner Logo" class="brand-logo" />
|
||||
<div class="brand-text">
|
||||
<span class="brand-title">CalMiner</span>
|
||||
<span class="brand-subtitle">Mining Planner</span>
|
||||
</div>
|
||||
</a>
|
||||
<div class="sidebar-nav-controls">
|
||||
<button id="nav-prev" class="nav-chevron nav-chevron-prev" aria-label="Previous page">←</button>
|
||||
<button id="nav-next" class="nav-chevron nav-chevron-next" aria-label="Next page">→</button>
|
||||
</div>
|
||||
{% include "partials/sidebar_nav.html" %}
|
||||
</div>
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
<div class="scenario-actions">
|
||||
<a
|
||||
href="{{ request.url_for('reports.scenario_distribution_page', scenario_id=scenario.id) }}"
|
||||
href="{{ request.url_for('reports.scenario_distribution_page', scenario_id=item.scenario.id) }}"
|
||||
class="button button-secondary"
|
||||
>
|
||||
View Distribution
|
||||
</a>
|
||||
<a
|
||||
href="{{ request.url_for('reports.scenario_distribution', scenario_id=scenario.id) }}"
|
||||
href="{{ request.url_for('reports.scenario_distribution', scenario_id=item.scenario.id) }}"
|
||||
class="button button-secondary"
|
||||
>
|
||||
Download JSON
|
||||
|
||||
@@ -36,7 +36,7 @@ content %}
|
||||
<label for="password">Password:</label>
|
||||
<input type="password" id="password" name="password" required />
|
||||
</div>
|
||||
<button type="submit">Register</button>
|
||||
<button type="submit" class="btn primary">Register</button>
|
||||
</form>
|
||||
<p>Already have an account? <a href="/login">Login here</a></p>
|
||||
</div>
|
||||
|
||||
@@ -2,10 +2,10 @@
|
||||
{% block title %}Project Summary | CalMiner{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
{% include "partials/reports_header.html" with context %}
|
||||
{% include "partials/reports_header.html" %}
|
||||
|
||||
{% include "partials/reports/options_card.html" with options=include_options iterations=iterations percentiles=percentiles %}
|
||||
{% include "partials/reports/filters_card.html" with filters=filters %}
|
||||
{% include "partials/reports/options_card.html" %}
|
||||
{% include "partials/reports/filters_card.html" %}
|
||||
|
||||
<section class="report-overview">
|
||||
<div class="report-grid">
|
||||
@@ -105,7 +105,7 @@
|
||||
<span class="meta-label">Currency</span>
|
||||
<span class="meta-value">{{ item.scenario.currency or project.currency or "—" }}</span>
|
||||
</div>
|
||||
{% include "partials/reports/scenario_actions.html" with scenario=item.scenario %}
|
||||
{% include "partials/reports/scenario_actions.html" %}
|
||||
</div>
|
||||
|
||||
<div class="scenario-grid">
|
||||
@@ -183,7 +183,7 @@
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
</p>
|
||||
{% include "partials/reports/monte_carlo_table.html" with metrics=item.monte_carlo.metrics currency=item.scenario.currency or project.currency percentiles=percentiles %}
|
||||
{% include "partials/reports/monte_carlo_table.html" %}
|
||||
{% else %}
|
||||
<p class="muted">Monte Carlo metrics are unavailable for this scenario.</p>
|
||||
{% if item.monte_carlo and item.monte_carlo.notes %}
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
{% block title %}Scenario Comparison | CalMiner{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
{% include "partials/reports_header.html" with context %}
|
||||
{% include "partials/reports_header.html" %}
|
||||
|
||||
{% include "partials/reports/options_card.html" with options=include_options iterations=iterations percentiles=percentiles %}
|
||||
{% include "partials/reports/options_card.html" %}
|
||||
<section class="report-filters">
|
||||
<div class="report-card">
|
||||
<h2>Compared Scenarios</h2>
|
||||
@@ -104,7 +104,7 @@
|
||||
<span class="meta-label">Primary Resource</span>
|
||||
<span class="meta-value">{{ item.scenario.primary_resource or "—" }}</span>
|
||||
</div>
|
||||
{% include "partials/reports/scenario_actions.html" with scenario=item.scenario %}
|
||||
{% include "partials/reports/scenario_actions.html" %}
|
||||
</div>
|
||||
|
||||
<div class="scenario-grid">
|
||||
@@ -147,7 +147,7 @@
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
</p>
|
||||
{% include "partials/reports/monte_carlo_table.html" with metrics=item.monte_carlo.metrics currency=item.scenario.currency or project.currency percentiles=percentiles %}
|
||||
{% include "partials/reports/monte_carlo_table.html" %}
|
||||
{% else %}
|
||||
<p class="muted">No Monte Carlo data available for this scenario.</p>
|
||||
{% if item.monte_carlo and item.monte_carlo.notes %}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
{% block title %}Scenario Distribution | CalMiner{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
{% include "partials/reports_header.html" with context %}
|
||||
{% include "partials/reports_header.html" %}
|
||||
|
||||
<section class="report-overview">
|
||||
<div class="report-grid">
|
||||
|
||||
@@ -26,7 +26,7 @@ content %}
|
||||
required
|
||||
/>
|
||||
</div>
|
||||
<button type="submit">Update Password</button>
|
||||
<button type="submit" class="btn primary">Update Password</button>
|
||||
</form>
|
||||
<p>
|
||||
Remembered your password?
|
||||
|
||||
@@ -3,8 +3,10 @@ from __future__ import annotations
|
||||
from collections.abc import Callable, Iterator
|
||||
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
from fastapi import FastAPI, Request
|
||||
from fastapi.testclient import TestClient
|
||||
from httpx import ASGITransport, AsyncClient
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy.engine import Engine
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
@@ -19,6 +21,7 @@ from routes.projects import router as projects_router
|
||||
from routes.scenarios import router as scenarios_router
|
||||
from routes.imports import router as imports_router
|
||||
from routes.exports import router as exports_router
|
||||
from routes.reports import router as reports_router
|
||||
from services.importers import ImportIngestionService
|
||||
from services.unit_of_work import UnitOfWork
|
||||
from services.session import AuthSession, SessionTokens
|
||||
@@ -56,6 +59,7 @@ def app(session_factory: sessionmaker) -> FastAPI:
|
||||
application.include_router(scenarios_router)
|
||||
application.include_router(imports_router)
|
||||
application.include_router(exports_router)
|
||||
application.include_router(reports_router)
|
||||
|
||||
def _override_uow() -> Iterator[UnitOfWork]:
|
||||
with UnitOfWork(session_factory=session_factory) as uow:
|
||||
@@ -108,6 +112,13 @@ def client(app: FastAPI) -> Iterator[TestClient]:
|
||||
test_client.close()
|
||||
|
||||
|
||||
@pytest_asyncio.fixture()
|
||||
async def async_client(app: FastAPI) -> AsyncClient:
|
||||
return AsyncClient(
|
||||
transport=ASGITransport(app=app), base_url="http://testserver"
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def unit_of_work_factory(session_factory: sessionmaker) -> Callable[[], UnitOfWork]:
|
||||
def _factory() -> UnitOfWork:
|
||||
|
||||
@@ -284,3 +284,110 @@ class TestLogoutFlow:
|
||||
set_cookie_header = response.headers.get("set-cookie") or ""
|
||||
assert "calminer_access_token=" in set_cookie_header
|
||||
assert "Max-Age=0" in set_cookie_header or "expires=" in set_cookie_header.lower()
|
||||
|
||||
|
||||
class TestLoginFlowEndToEnd:
|
||||
def test_get_login_form_renders(self, client: TestClient) -> None:
|
||||
response = client.get("/login")
|
||||
assert response.status_code == 200
|
||||
assert "login-form" in response.text
|
||||
assert "username" in response.text
|
||||
|
||||
def test_unauthenticated_root_redirects_to_login(self, client: TestClient) -> None:
|
||||
# Temporarily override to anonymous session
|
||||
app = cast(FastAPI, client.app)
|
||||
original_override = app.dependency_overrides.get(get_auth_session)
|
||||
app.dependency_overrides[get_auth_session] = lambda: AuthSession.anonymous(
|
||||
)
|
||||
try:
|
||||
response = client.get("/", follow_redirects=False)
|
||||
assert response.status_code == 303
|
||||
assert response.headers.get(
|
||||
"location") == "http://testserver/login"
|
||||
finally:
|
||||
if original_override is not None:
|
||||
app.dependency_overrides[get_auth_session] = original_override
|
||||
else:
|
||||
app.dependency_overrides.pop(get_auth_session, None)
|
||||
|
||||
def test_login_success_redirects_to_dashboard_and_sets_session(
|
||||
self, client: TestClient, db_session: Session
|
||||
) -> None:
|
||||
password = "TestP@ss123"
|
||||
user = User(
|
||||
email="e2e@example.com",
|
||||
username="e2euser",
|
||||
password_hash=hash_password(password),
|
||||
is_active=True,
|
||||
)
|
||||
db_session.add(user)
|
||||
db_session.commit()
|
||||
|
||||
# Override to anonymous for login
|
||||
app = cast(FastAPI, client.app)
|
||||
app.dependency_overrides[get_auth_session] = lambda: AuthSession.anonymous(
|
||||
)
|
||||
try:
|
||||
login_response = client.post(
|
||||
"/login",
|
||||
data={"username": "e2euser", "password": password},
|
||||
follow_redirects=False,
|
||||
)
|
||||
assert login_response.status_code == 303
|
||||
assert login_response.headers.get(
|
||||
"location") == "http://testserver/"
|
||||
set_cookie_header = login_response.headers.get("set-cookie", "")
|
||||
assert "calminer_access_token=" in set_cookie_header
|
||||
|
||||
# Now with cookies, GET / should show dashboard
|
||||
dashboard_response = client.get("/")
|
||||
assert dashboard_response.status_code == 200
|
||||
assert "Dashboard" in dashboard_response.text or "metrics" in dashboard_response.text
|
||||
finally:
|
||||
app.dependency_overrides.pop(get_auth_session, None)
|
||||
|
||||
def test_logout_redirects_to_login_and_clears_session(self, client: TestClient) -> None:
|
||||
# Assuming authenticated from conftest
|
||||
logout_response = client.get("/logout", follow_redirects=False)
|
||||
assert logout_response.status_code == 303
|
||||
location = logout_response.headers.get("location")
|
||||
assert location and "login" in location
|
||||
set_cookie_header = logout_response.headers.get("set-cookie", "")
|
||||
assert "calminer_access_token=" in set_cookie_header
|
||||
assert "Max-Age=0" in set_cookie_header or "expires=" in set_cookie_header.lower()
|
||||
|
||||
# After logout, GET / should redirect to login
|
||||
app = cast(FastAPI, client.app)
|
||||
app.dependency_overrides[get_auth_session] = lambda: AuthSession.anonymous(
|
||||
)
|
||||
try:
|
||||
root_response = client.get("/", follow_redirects=False)
|
||||
assert root_response.status_code == 303
|
||||
assert root_response.headers.get(
|
||||
"location") == "http://testserver/login"
|
||||
finally:
|
||||
app.dependency_overrides.pop(get_auth_session, None)
|
||||
|
||||
def test_login_inactive_user_shows_error(self, client: TestClient, db_session: Session) -> None:
|
||||
user = User(
|
||||
email="inactive@example.com",
|
||||
username="inactiveuser",
|
||||
password_hash=hash_password("TestP@ss123"),
|
||||
is_active=False,
|
||||
)
|
||||
db_session.add(user)
|
||||
db_session.commit()
|
||||
|
||||
app = cast(FastAPI, client.app)
|
||||
app.dependency_overrides[get_auth_session] = lambda: AuthSession.anonymous(
|
||||
)
|
||||
try:
|
||||
response = client.post(
|
||||
"/login",
|
||||
data={"username": "inactiveuser", "password": "TestP@ss123"},
|
||||
follow_redirects=False,
|
||||
)
|
||||
assert response.status_code == 400
|
||||
assert "Account is inactive" in response.text
|
||||
finally:
|
||||
app.dependency_overrides.pop(get_auth_session, None)
|
||||
|
||||
@@ -8,7 +8,6 @@ from fastapi.testclient import TestClient
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from models import Project, Scenario, ScenarioStatus
|
||||
from services.unit_of_work import UnitOfWork
|
||||
|
||||
|
||||
def _seed_projects(session: Session) -> None:
|
||||
|
||||
@@ -3,7 +3,6 @@ from __future__ import annotations
|
||||
from fastapi.testclient import TestClient
|
||||
|
||||
from models.project import MiningOperationType, Project
|
||||
from models.scenario import Scenario, ScenarioStatus
|
||||
|
||||
|
||||
def test_project_import_preview_and_commit_flow(
|
||||
|
||||
@@ -1,16 +1,12 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from io import BytesIO
|
||||
|
||||
import pandas as pd
|
||||
import pytest
|
||||
from fastapi.testclient import TestClient
|
||||
|
||||
from models import (
|
||||
MiningOperationType,
|
||||
Project,
|
||||
Scenario,
|
||||
ScenarioStatus,
|
||||
)
|
||||
from models.import_export_log import ImportExportLog
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@ import pytest
|
||||
|
||||
from services.importers import ImportResult, load_project_imports, load_scenario_imports
|
||||
from schemas.imports import ProjectImportRow, ScenarioImportRow
|
||||
from models.project import MiningOperationType
|
||||
|
||||
|
||||
def test_load_project_imports_from_csv() -> None:
|
||||
|
||||
262
tests/test_reporting.py
Normal file
262
tests/test_reporting.py
Normal file
@@ -0,0 +1,262 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import pytest
|
||||
from fastapi.testclient import TestClient
|
||||
from unittest.mock import Mock
|
||||
|
||||
from models import Project, Scenario, FinancialInput
|
||||
from models.metadata import CostBucket, ResourceType
|
||||
from services.reporting import (
|
||||
ReportingService,
|
||||
ReportFilters,
|
||||
IncludeOptions,
|
||||
ScenarioReport,
|
||||
ScenarioFinancialTotals,
|
||||
ScenarioDeterministicMetrics,
|
||||
)
|
||||
from routes.reports import router as reports_router
|
||||
|
||||
|
||||
class TestReportingService:
|
||||
def test_build_project_summary_context(self, unit_of_work_factory):
|
||||
with unit_of_work_factory() as uow:
|
||||
project = Project(name="Test Project", location="Test Location")
|
||||
uow.projects.create(project)
|
||||
scenario = Scenario(project_id=project.id,
|
||||
name="Test Scenario", status="draft")
|
||||
uow.scenarios.create(scenario)
|
||||
uow.commit()
|
||||
|
||||
service = ReportingService(uow)
|
||||
request = Mock()
|
||||
request.url_for = Mock(return_value="/api/reports/projects/1")
|
||||
|
||||
filters = ReportFilters()
|
||||
include = IncludeOptions()
|
||||
|
||||
context = service.build_project_summary_context(
|
||||
project, filters, include, 500, (5.0, 50.0, 95.0), request
|
||||
)
|
||||
|
||||
assert "project" in context
|
||||
assert context["scenario_count"] == 1
|
||||
assert "aggregates" in context
|
||||
assert "scenarios" in context
|
||||
assert context["title"] == f"Project Summary · {project.name}"
|
||||
|
||||
def test_build_scenario_comparison_context(self, unit_of_work_factory):
|
||||
with unit_of_work_factory() as uow:
|
||||
project = Project(name="Test Project", location="Test Location")
|
||||
uow.projects.create(project)
|
||||
scenario1 = Scenario(project_id=project.id,
|
||||
name="Scenario 1", status="draft")
|
||||
scenario2 = Scenario(project_id=project.id,
|
||||
name="Scenario 2", status="active")
|
||||
uow.scenarios.create(scenario1)
|
||||
uow.scenarios.create(scenario2)
|
||||
uow.commit()
|
||||
|
||||
service = ReportingService(uow)
|
||||
request = Mock()
|
||||
request.url_for = Mock(
|
||||
return_value="/api/reports/projects/1/comparison")
|
||||
|
||||
include = IncludeOptions()
|
||||
scenarios = [scenario1, scenario2]
|
||||
|
||||
context = service.build_scenario_comparison_context(
|
||||
project, scenarios, include, 500, (5.0, 50.0, 95.0), request
|
||||
)
|
||||
|
||||
assert "project" in context
|
||||
assert "scenarios" in context
|
||||
assert "comparison" in context
|
||||
assert context["title"] == f"Scenario Comparison · {project.name}"
|
||||
|
||||
def test_build_scenario_distribution_context(self, unit_of_work_factory):
|
||||
with unit_of_work_factory() as uow:
|
||||
project = Project(name="Test Project", location="Test Location")
|
||||
uow.projects.create(project)
|
||||
scenario = Scenario(project_id=project.id,
|
||||
name="Test Scenario", status="draft")
|
||||
uow.scenarios.create(scenario)
|
||||
uow.commit()
|
||||
|
||||
service = ReportingService(uow)
|
||||
request = Mock()
|
||||
request.url_for = Mock(
|
||||
return_value="/api/reports/scenarios/1/distribution")
|
||||
|
||||
include = IncludeOptions()
|
||||
|
||||
context = service.build_scenario_distribution_context(
|
||||
scenario, include, 500, (5.0, 50.0, 95.0), request
|
||||
)
|
||||
|
||||
assert "scenario" in context
|
||||
assert "summary" in context
|
||||
assert "metrics" in context
|
||||
assert "monte_carlo" in context
|
||||
assert context["title"] == f"Scenario Distribution · {scenario.name}"
|
||||
|
||||
def test_scenario_report_to_dict_with_enum_status(self, unit_of_work_factory):
|
||||
"""Test that to_dict handles enum status values correctly."""
|
||||
with unit_of_work_factory() as uow:
|
||||
project = Project(name="Test Project", location="Test Location")
|
||||
uow.projects.create(project)
|
||||
scenario = Scenario(
|
||||
project_id=project.id,
|
||||
name="Test Scenario",
|
||||
status="draft", # Stored as string
|
||||
primary_resource="diesel" # Stored as string
|
||||
)
|
||||
uow.scenarios.create(scenario)
|
||||
uow.commit()
|
||||
|
||||
# Create a mock scenario report
|
||||
totals = ScenarioFinancialTotals(
|
||||
currency="USD",
|
||||
inflows=1000.0,
|
||||
outflows=500.0,
|
||||
net=500.0,
|
||||
by_category={}
|
||||
)
|
||||
deterministic = ScenarioDeterministicMetrics(
|
||||
currency="USD",
|
||||
discount_rate=0.1,
|
||||
compounds_per_year=1,
|
||||
npv=100.0,
|
||||
irr=0.15,
|
||||
payback_period=2.5,
|
||||
notes=[]
|
||||
)
|
||||
|
||||
report = ScenarioReport(
|
||||
scenario=scenario,
|
||||
totals=totals,
|
||||
deterministic=deterministic,
|
||||
monte_carlo=None
|
||||
)
|
||||
|
||||
result = report.to_dict()
|
||||
|
||||
assert result["scenario"]["status"] == "draft" # type: ignore
|
||||
# type: ignore
|
||||
assert result["scenario"]["primary_resource"] == "diesel"
|
||||
assert result["financials"]["net"] == 500.0 # type: ignore
|
||||
assert result["metrics"]["npv"] == 100.0 # type: ignore
|
||||
|
||||
def test_project_summary_with_scenario_ids_filter(self, unit_of_work_factory):
|
||||
with unit_of_work_factory() as uow:
|
||||
project = Project(name="Test Project", location="Test Location")
|
||||
uow.projects.create(project)
|
||||
scenario1 = Scenario(project_id=project.id,
|
||||
name="Scenario 1", status="active")
|
||||
scenario2 = Scenario(project_id=project.id,
|
||||
name="Scenario 2", status="draft")
|
||||
uow.scenarios.create(scenario1)
|
||||
uow.scenarios.create(scenario2)
|
||||
uow.commit()
|
||||
|
||||
service = ReportingService(uow)
|
||||
|
||||
# Test filtering by scenario IDs
|
||||
filters = ReportFilters(scenario_ids={scenario1.id})
|
||||
result = service.project_summary(
|
||||
project, filters=filters, include=IncludeOptions(),
|
||||
iterations=100, percentiles=(5.0, 50.0, 95.0)
|
||||
)
|
||||
|
||||
assert result["scenario_count"] == 1 # type: ignore
|
||||
# type: ignore
|
||||
# type: ignore
|
||||
assert result["scenarios"][0]["scenario"]["name"] == "Scenario 1"
|
||||
|
||||
|
||||
class TestReportingRoutes:
|
||||
def test_project_summary_route(self, client: TestClient, unit_of_work_factory):
|
||||
with unit_of_work_factory() as uow:
|
||||
project = Project(name="Test Project", location="Test Location")
|
||||
uow.projects.create(project)
|
||||
scenario = Scenario(project_id=project.id,
|
||||
name="Test Scenario", status="draft")
|
||||
uow.scenarios.create(scenario)
|
||||
uow.commit()
|
||||
|
||||
response = client.get(f"/reports/projects/{project.id}")
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert "project" in data
|
||||
assert data["scenario_count"] == 1
|
||||
|
||||
def test_project_summary_html_route(self, client: TestClient, unit_of_work_factory):
|
||||
with unit_of_work_factory() as uow:
|
||||
project = Project(name="Test Project", location="Test Location")
|
||||
uow.projects.create(project)
|
||||
scenario = Scenario(project_id=project.id,
|
||||
name="Test Scenario", status="draft")
|
||||
uow.scenarios.create(scenario)
|
||||
uow.commit()
|
||||
|
||||
response = client.get(f"/reports/projects/{project.id}/ui")
|
||||
assert response.status_code == 200
|
||||
assert "text/html" in response.headers["content-type"]
|
||||
assert "Test Project" in response.text
|
||||
|
||||
def test_scenario_comparison_route(self, client: TestClient, unit_of_work_factory):
|
||||
with unit_of_work_factory() as uow:
|
||||
project = Project(name="Test Project", location="Test Location")
|
||||
uow.projects.create(project)
|
||||
scenario1 = Scenario(project_id=project.id,
|
||||
name="Scenario 1", status="draft")
|
||||
scenario2 = Scenario(project_id=project.id,
|
||||
name="Scenario 2", status="active")
|
||||
uow.scenarios.create(scenario1)
|
||||
uow.scenarios.create(scenario2)
|
||||
uow.commit()
|
||||
|
||||
response = client.get(
|
||||
f"/reports/projects/{project.id}/scenarios/compare?scenario_ids={scenario1.id}&scenario_ids={scenario2.id}"
|
||||
)
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert "project" in data
|
||||
assert "scenarios" in data
|
||||
assert "comparison" in data
|
||||
|
||||
def test_scenario_distribution_route(self, client: TestClient, unit_of_work_factory):
|
||||
with unit_of_work_factory() as uow:
|
||||
project = Project(name="Test Project", location="Test Location")
|
||||
uow.projects.create(project)
|
||||
scenario = Scenario(project_id=project.id,
|
||||
name="Test Scenario", status="draft")
|
||||
uow.scenarios.create(scenario)
|
||||
uow.commit()
|
||||
|
||||
response = client.get(
|
||||
f"/reports/scenarios/{scenario.id}/distribution")
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert "scenario" in data
|
||||
assert "summary" in data
|
||||
assert "monte_carlo" in data
|
||||
|
||||
def test_unauthorized_access(self, client: TestClient):
|
||||
# Create a new client without authentication
|
||||
from fastapi import FastAPI
|
||||
from routes.reports import router as reports_router
|
||||
app = FastAPI()
|
||||
app.include_router(reports_router)
|
||||
from fastapi.testclient import TestClient
|
||||
unauth_client = TestClient(app)
|
||||
|
||||
response = unauth_client.get("/reports/projects/1")
|
||||
assert response.status_code == 401
|
||||
|
||||
def test_project_not_found(self, client: TestClient):
|
||||
response = client.get("/reports/projects/999")
|
||||
assert response.status_code == 404
|
||||
|
||||
def test_scenario_not_found(self, client: TestClient):
|
||||
response = client.get("/reports/scenarios/999/distribution")
|
||||
assert response.status_code == 404
|
||||
@@ -12,7 +12,6 @@ from services.security import (
|
||||
create_access_token,
|
||||
create_refresh_token,
|
||||
decode_access_token,
|
||||
decode_refresh_token,
|
||||
hash_password,
|
||||
verify_password,
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user