11 Commits

Author SHA1 Message Date
44ff4d0e62 feat: Update Python version to 3.12 and use environment variable for Docker image name 2025-11-11 18:41:24 +01:00
4364927965 Refactor Docker setup and migration scripts
- Updated Dockerfile to set permissions for the entrypoint script and defined the entrypoint for the container.
- Consolidated Alembic migration history into a single initial migration file and removed obsolete revision files.
- Added a new script to run Alembic migrations before starting the application.
- Updated changelog to reflect changes in migration handling and Docker setup.
- Enhanced pytest configuration for coverage reporting and excluded specific files from coverage calculations.
2025-11-11 18:30:15 +01:00
795a9f99f4 feat: Enhance currency handling and validation across scenarios
- Updated form template to prefill currency input with default value and added help text for clarity.
- Modified integration tests to assert more descriptive error messages for invalid currency codes.
- Introduced new tests for currency normalization and validation in various scenarios, including imports and exports.
- Added comprehensive tests for pricing calculations, ensuring defaults are respected and overrides function correctly.
- Implemented unit tests for pricing settings repository, ensuring CRUD operations and default settings are handled properly.
- Enhanced scenario pricing evaluation tests to validate currency handling and metadata defaults.
- Added simulation tests to ensure Monte Carlo runs are accurate and handle various distribution scenarios.
2025-11-11 18:29:59 +01:00
032e6d2681 feat: implement persistent audit logging for import/export operations with Prometheus metrics 2025-11-10 21:37:07 +01:00
51c0fcec95 feat: add import dashboard UI and functionality for CSV and Excel uploads 2025-11-10 19:06:27 +01:00
3051f91ab0 feat: add export button for projects in the projects list view 2025-11-10 18:50:46 +01:00
e2465188c2 feat: enhance export and import workflows with improved error handling and notifications 2025-11-10 18:44:42 +01:00
43b1e53837 feat: implement export functionality for projects and scenarios with CSV and Excel support 2025-11-10 18:32:24 +01:00
4b33a5dba3 feat: add Excel export functionality with support for metadata and customizable sheets 2025-11-10 18:32:09 +01:00
5f183faa63 feat: implement CSV export functionality with customizable columns and formatters 2025-11-10 15:36:14 +01:00
1a7581cda0 feat: add export filters for projects and scenarios with filtering capabilities 2025-11-10 15:36:06 +01:00
84 changed files with 8821 additions and 739 deletions

View File

@@ -15,6 +15,7 @@ jobs:
DB_NAME: calminer_test
DB_USER: calminer
DB_PASSWORD: calminer_password
REGISTRY_CONTAINER_NAME: calminer
runs-on: ubuntu-latest
services:
@@ -36,7 +37,7 @@ jobs:
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.11'
python-version: "3.12"
- name: Get pip cache dir
id: pip-cache
@@ -85,7 +86,7 @@ jobs:
- name: Build Docker image
run: |
docker build -t calminer .
docker build -t ${{ env.REGISTRY_CONTAINER_NAME }} .
build:
runs-on: ubuntu-latest

View File

@@ -102,10 +102,13 @@ RUN pip install --upgrade pip \
COPY . /app
RUN chown -R appuser:app /app
RUN chown -R appuser:app /app \
&& chmod +x /app/scripts/docker-entrypoint.sh
USER appuser
EXPOSE 8003
ENTRYPOINT ["/app/scripts/docker-entrypoint.sh"]
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8003", "--workers", "4"]

View File

@@ -1,220 +0,0 @@
"""Initial domain schema"""
from __future__ import annotations
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "20251109_01"
down_revision = None
branch_labels = None
depends_on = None
mining_operation_type = sa.Enum(
"open_pit",
"underground",
"in_situ_leach",
"placer",
"quarry",
"mountaintop_removal",
"other",
name="miningoperationtype",
)
scenario_status = sa.Enum(
"draft",
"active",
"archived",
name="scenariostatus",
)
financial_category = sa.Enum(
"capex",
"opex",
"revenue",
"contingency",
"other",
name="financialcategory",
)
cost_bucket = sa.Enum(
"capital_initial",
"capital_sustaining",
"operating_fixed",
"operating_variable",
"maintenance",
"reclamation",
"royalties",
"general_admin",
name="costbucket",
)
distribution_type = sa.Enum(
"normal",
"triangular",
"uniform",
"lognormal",
"custom",
name="distributiontype",
)
stochastic_variable = sa.Enum(
"ore_grade",
"recovery_rate",
"metal_price",
"operating_cost",
"capital_cost",
"discount_rate",
"throughput",
name="stochasticvariable",
)
resource_type = sa.Enum(
"diesel",
"electricity",
"water",
"explosives",
"reagents",
"labor",
"equipment_hours",
"tailings_capacity",
name="resourcetype",
)
def upgrade() -> None:
bind = op.get_bind()
mining_operation_type.create(bind, checkfirst=True)
scenario_status.create(bind, checkfirst=True)
financial_category.create(bind, checkfirst=True)
cost_bucket.create(bind, checkfirst=True)
distribution_type.create(bind, checkfirst=True)
stochastic_variable.create(bind, checkfirst=True)
resource_type.create(bind, checkfirst=True)
op.create_table(
"projects",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=255), nullable=False),
sa.Column("location", sa.String(length=255), nullable=True),
sa.Column("operation_type", mining_operation_type, nullable=False),
sa.Column("description", sa.Text(), nullable=True),
sa.Column("created_at", sa.DateTime(timezone=True),
server_default=sa.func.now(), nullable=False),
sa.Column("updated_at", sa.DateTime(timezone=True),
server_default=sa.func.now(), nullable=False),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("name"),
)
op.create_index(op.f("ix_projects_id"), "projects", ["id"], unique=False)
op.create_table(
"scenarios",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("project_id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=255), nullable=False),
sa.Column("description", sa.Text(), nullable=True),
sa.Column("status", scenario_status, nullable=False),
sa.Column("start_date", sa.Date(), nullable=True),
sa.Column("end_date", sa.Date(), nullable=True),
sa.Column("discount_rate", sa.Numeric(
precision=5, scale=2), nullable=True),
sa.Column("currency", sa.String(length=3), nullable=True),
sa.Column("primary_resource", resource_type, nullable=True),
sa.Column("created_at", sa.DateTime(timezone=True),
server_default=sa.func.now(), nullable=False),
sa.Column("updated_at", sa.DateTime(timezone=True),
server_default=sa.func.now(), nullable=False),
sa.ForeignKeyConstraint(
["project_id"], ["projects.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(op.f("ix_scenarios_id"), "scenarios", ["id"], unique=False)
op.create_index(op.f("ix_scenarios_project_id"),
"scenarios", ["project_id"], unique=False)
op.create_table(
"financial_inputs",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("scenario_id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=255), nullable=False),
sa.Column("category", financial_category, nullable=False),
sa.Column("cost_bucket", cost_bucket, nullable=True),
sa.Column("amount", sa.Numeric(precision=18, scale=2), nullable=False),
sa.Column("currency", sa.String(length=3), nullable=True),
sa.Column("effective_date", sa.Date(), nullable=True),
sa.Column("notes", sa.Text(), nullable=True),
sa.Column("created_at", sa.DateTime(timezone=True),
server_default=sa.func.now(), nullable=False),
sa.Column("updated_at", sa.DateTime(timezone=True),
server_default=sa.func.now(), nullable=False),
sa.ForeignKeyConstraint(
["scenario_id"], ["scenarios.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(op.f("ix_financial_inputs_id"),
"financial_inputs", ["id"], unique=False)
op.create_index(op.f("ix_financial_inputs_scenario_id"),
"financial_inputs", ["scenario_id"], unique=False)
op.create_table(
"simulation_parameters",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("scenario_id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=255), nullable=False),
sa.Column("distribution", distribution_type, nullable=False),
sa.Column("variable", stochastic_variable, nullable=True),
sa.Column("resource_type", resource_type, nullable=True),
sa.Column("mean_value", sa.Numeric(
precision=18, scale=4), nullable=True),
sa.Column("standard_deviation", sa.Numeric(
precision=18, scale=4), nullable=True),
sa.Column("minimum_value", sa.Numeric(
precision=18, scale=4), nullable=True),
sa.Column("maximum_value", sa.Numeric(
precision=18, scale=4), nullable=True),
sa.Column("unit", sa.String(length=32), nullable=True),
sa.Column("configuration", sa.JSON(), nullable=True),
sa.Column("created_at", sa.DateTime(timezone=True),
server_default=sa.func.now(), nullable=False),
sa.Column("updated_at", sa.DateTime(timezone=True),
server_default=sa.func.now(), nullable=False),
sa.ForeignKeyConstraint(
["scenario_id"], ["scenarios.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(op.f("ix_simulation_parameters_id"),
"simulation_parameters", ["id"], unique=False)
op.create_index(op.f("ix_simulation_parameters_scenario_id"),
"simulation_parameters", ["scenario_id"], unique=False)
def downgrade() -> None:
op.drop_index(op.f("ix_simulation_parameters_scenario_id"),
table_name="simulation_parameters")
op.drop_index(op.f("ix_simulation_parameters_id"),
table_name="simulation_parameters")
op.drop_table("simulation_parameters")
op.drop_index(op.f("ix_financial_inputs_scenario_id"),
table_name="financial_inputs")
op.drop_index(op.f("ix_financial_inputs_id"),
table_name="financial_inputs")
op.drop_table("financial_inputs")
op.drop_index(op.f("ix_scenarios_project_id"), table_name="scenarios")
op.drop_index(op.f("ix_scenarios_id"), table_name="scenarios")
op.drop_table("scenarios")
op.drop_index(op.f("ix_projects_id"), table_name="projects")
op.drop_table("projects")
resource_type.drop(op.get_bind(), checkfirst=True)
stochastic_variable.drop(op.get_bind(), checkfirst=True)
distribution_type.drop(op.get_bind(), checkfirst=True)
cost_bucket.drop(op.get_bind(), checkfirst=True)
financial_category.drop(op.get_bind(), checkfirst=True)
scenario_status.drop(op.get_bind(), checkfirst=True)
mining_operation_type.drop(op.get_bind(), checkfirst=True)

View File

@@ -1,210 +0,0 @@
"""Add authentication and RBAC tables"""
from __future__ import annotations
from alembic import op
import sqlalchemy as sa
from passlib.context import CryptContext
from sqlalchemy.sql import column, table
# revision identifiers, used by Alembic.
revision = "20251109_02"
down_revision = "20251109_01"
branch_labels = None
depends_on = None
password_context = CryptContext(schemes=["argon2"], deprecated="auto")
def upgrade() -> None:
op.create_table(
"users",
sa.Column("id", sa.Integer(), primary_key=True),
sa.Column("email", sa.String(length=255), nullable=False),
sa.Column("username", sa.String(length=128), nullable=False),
sa.Column("password_hash", sa.String(length=255), nullable=False),
sa.Column(
"is_active",
sa.Boolean(),
nullable=False,
server_default=sa.true(),
),
sa.Column(
"is_superuser",
sa.Boolean(),
nullable=False,
server_default=sa.false(),
),
sa.Column("last_login_at", sa.DateTime(timezone=True), nullable=True),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
nullable=False,
server_default=sa.func.now(),
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
nullable=False,
server_default=sa.func.now(),
),
sa.UniqueConstraint("email", name="uq_users_email"),
sa.UniqueConstraint("username", name="uq_users_username"),
)
op.create_index(
"ix_users_active_superuser",
"users",
["is_active", "is_superuser"],
unique=False,
)
op.create_table(
"roles",
sa.Column("id", sa.Integer(), primary_key=True),
sa.Column("name", sa.String(length=64), nullable=False),
sa.Column("display_name", sa.String(length=128), nullable=False),
sa.Column("description", sa.Text(), nullable=True),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
nullable=False,
server_default=sa.func.now(),
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
nullable=False,
server_default=sa.func.now(),
),
sa.UniqueConstraint("name", name="uq_roles_name"),
)
op.create_table(
"user_roles",
sa.Column("user_id", sa.Integer(), nullable=False),
sa.Column("role_id", sa.Integer(), nullable=False),
sa.Column(
"granted_at",
sa.DateTime(timezone=True),
nullable=False,
server_default=sa.func.now(),
),
sa.Column("granted_by", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["user_id"],
["users.id"],
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(
["role_id"],
["roles.id"],
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(
["granted_by"],
["users.id"],
ondelete="SET NULL",
),
sa.PrimaryKeyConstraint("user_id", "role_id"),
sa.UniqueConstraint("user_id", "role_id",
name="uq_user_roles_user_role"),
)
op.create_index(
"ix_user_roles_role_id",
"user_roles",
["role_id"],
unique=False,
)
# Seed default roles
roles_table = table(
"roles",
column("id", sa.Integer()),
column("name", sa.String()),
column("display_name", sa.String()),
column("description", sa.Text()),
)
op.bulk_insert(
roles_table,
[
{
"id": 1,
"name": "admin",
"display_name": "Administrator",
"description": "Full platform access with user management rights.",
},
{
"id": 2,
"name": "project_manager",
"display_name": "Project Manager",
"description": "Manage projects, scenarios, and associated data.",
},
{
"id": 3,
"name": "analyst",
"display_name": "Analyst",
"description": "Review dashboards and scenario outputs.",
},
{
"id": 4,
"name": "viewer",
"display_name": "Viewer",
"description": "Read-only access to assigned projects and reports.",
},
],
)
admin_password_hash = password_context.hash("ChangeMe123!")
users_table = table(
"users",
column("id", sa.Integer()),
column("email", sa.String()),
column("username", sa.String()),
column("password_hash", sa.String()),
column("is_active", sa.Boolean()),
column("is_superuser", sa.Boolean()),
)
op.bulk_insert(
users_table,
[
{
"id": 1,
"email": "admin@calminer.local",
"username": "admin",
"password_hash": admin_password_hash,
"is_active": True,
"is_superuser": True,
}
],
)
user_roles_table = table(
"user_roles",
column("user_id", sa.Integer()),
column("role_id", sa.Integer()),
column("granted_by", sa.Integer()),
)
op.bulk_insert(
user_roles_table,
[
{
"user_id": 1,
"role_id": 1,
"granted_by": 1,
}
],
)
def downgrade() -> None:
op.drop_index("ix_user_roles_role_id", table_name="user_roles")
op.drop_table("user_roles")
op.drop_table("roles")
op.drop_index("ix_users_active_superuser", table_name="users")
op.drop_table("users")

View File

@@ -0,0 +1,718 @@
"""Combined initial schema"""
from __future__ import annotations
from datetime import datetime, timezone
from alembic import op
import sqlalchemy as sa
from passlib.context import CryptContext
from sqlalchemy.sql import column, table
# revision identifiers, used by Alembic.
revision = "20251111_00"
down_revision = None
branch_labels = None
depends_on = None
password_context = CryptContext(schemes=["argon2"], deprecated="auto")
mining_operation_type = sa.Enum(
"open_pit",
"underground",
"in_situ_leach",
"placer",
"quarry",
"mountaintop_removal",
"other",
name="miningoperationtype",
)
scenario_status = sa.Enum(
"draft",
"active",
"archived",
name="scenariostatus",
)
financial_category = sa.Enum(
"capex",
"opex",
"revenue",
"contingency",
"other",
name="financialcategory",
)
cost_bucket = sa.Enum(
"capital_initial",
"capital_sustaining",
"operating_fixed",
"operating_variable",
"maintenance",
"reclamation",
"royalties",
"general_admin",
name="costbucket",
)
distribution_type = sa.Enum(
"normal",
"triangular",
"uniform",
"lognormal",
"custom",
name="distributiontype",
)
stochastic_variable = sa.Enum(
"ore_grade",
"recovery_rate",
"metal_price",
"operating_cost",
"capital_cost",
"discount_rate",
"throughput",
name="stochasticvariable",
)
resource_type = sa.Enum(
"diesel",
"electricity",
"water",
"explosives",
"reagents",
"labor",
"equipment_hours",
"tailings_capacity",
name="resourcetype",
)
DEFAULT_PRICING_SLUG = "default"
def _ensure_default_pricing_settings(connection) -> int:
settings_table = table(
"pricing_settings",
column("id", sa.Integer()),
column("slug", sa.String()),
column("name", sa.String()),
column("description", sa.Text()),
column("default_currency", sa.String()),
column("default_payable_pct", sa.Numeric()),
column("moisture_threshold_pct", sa.Numeric()),
column("moisture_penalty_per_pct", sa.Numeric()),
column("created_at", sa.DateTime(timezone=True)),
column("updated_at", sa.DateTime(timezone=True)),
)
existing = connection.execute(
sa.select(settings_table.c.id).where(
settings_table.c.slug == DEFAULT_PRICING_SLUG
)
).scalar_one_or_none()
if existing is not None:
return existing
now = datetime.now(timezone.utc)
insert_stmt = settings_table.insert().values(
slug=DEFAULT_PRICING_SLUG,
name="Default Pricing",
description="Automatically generated default pricing settings.",
default_currency="USD",
default_payable_pct=100.0,
moisture_threshold_pct=8.0,
moisture_penalty_per_pct=0.0,
created_at=now,
updated_at=now,
)
result = connection.execute(insert_stmt)
default_id = result.inserted_primary_key[0]
if default_id is None:
default_id = connection.execute(
sa.select(settings_table.c.id).where(
settings_table.c.slug == DEFAULT_PRICING_SLUG
)
).scalar_one()
return default_id
def upgrade() -> None:
bind = op.get_bind()
# Enumerations
mining_operation_type.create(bind, checkfirst=True)
scenario_status.create(bind, checkfirst=True)
financial_category.create(bind, checkfirst=True)
cost_bucket.create(bind, checkfirst=True)
distribution_type.create(bind, checkfirst=True)
stochastic_variable.create(bind, checkfirst=True)
resource_type.create(bind, checkfirst=True)
# Pricing settings core tables
op.create_table(
"pricing_settings",
sa.Column("id", sa.Integer(), primary_key=True),
sa.Column("name", sa.String(length=128), nullable=False),
sa.Column("slug", sa.String(length=64), nullable=False),
sa.Column("description", sa.Text(), nullable=True),
sa.Column("default_currency", sa.String(length=3), nullable=True),
sa.Column(
"default_payable_pct",
sa.Numeric(precision=5, scale=2),
nullable=False,
server_default=sa.text("100.00"),
),
sa.Column(
"moisture_threshold_pct",
sa.Numeric(precision=5, scale=2),
nullable=False,
server_default=sa.text("8.00"),
),
sa.Column(
"moisture_penalty_per_pct",
sa.Numeric(precision=14, scale=4),
nullable=False,
server_default=sa.text("0.0000"),
),
sa.Column("metadata", sa.JSON(), nullable=True),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
nullable=False,
server_default=sa.func.now(),
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
nullable=False,
server_default=sa.func.now(),
),
sa.UniqueConstraint("name", name="uq_pricing_settings_name"),
sa.UniqueConstraint("slug", name="uq_pricing_settings_slug"),
)
op.create_index(
op.f("ix_pricing_settings_id"),
"pricing_settings",
["id"],
unique=False,
)
op.create_table(
"pricing_metal_settings",
sa.Column("id", sa.Integer(), primary_key=True),
sa.Column(
"pricing_settings_id",
sa.Integer(),
sa.ForeignKey("pricing_settings.id", ondelete="CASCADE"),
nullable=False,
),
sa.Column("metal_code", sa.String(length=32), nullable=False),
sa.Column("payable_pct", sa.Numeric(
precision=5, scale=2), nullable=True),
sa.Column(
"moisture_threshold_pct",
sa.Numeric(precision=5, scale=2),
nullable=True,
),
sa.Column(
"moisture_penalty_per_pct",
sa.Numeric(precision=14, scale=4),
nullable=True,
),
sa.Column("data", sa.JSON(), nullable=True),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
nullable=False,
server_default=sa.func.now(),
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
nullable=False,
server_default=sa.func.now(),
),
sa.UniqueConstraint(
"pricing_settings_id",
"metal_code",
name="uq_pricing_metal_settings_code",
),
)
op.create_index(
op.f("ix_pricing_metal_settings_id"),
"pricing_metal_settings",
["id"],
unique=False,
)
op.create_index(
op.f("ix_pricing_metal_settings_pricing_settings_id"),
"pricing_metal_settings",
["pricing_settings_id"],
unique=False,
)
op.create_table(
"pricing_impurity_settings",
sa.Column("id", sa.Integer(), primary_key=True),
sa.Column(
"pricing_settings_id",
sa.Integer(),
sa.ForeignKey("pricing_settings.id", ondelete="CASCADE"),
nullable=False,
),
sa.Column("impurity_code", sa.String(length=32), nullable=False),
sa.Column(
"threshold_ppm",
sa.Numeric(precision=14, scale=4),
nullable=False,
server_default=sa.text("0.0000"),
),
sa.Column(
"penalty_per_ppm",
sa.Numeric(precision=14, scale=4),
nullable=False,
server_default=sa.text("0.0000"),
),
sa.Column("notes", sa.Text(), nullable=True),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
nullable=False,
server_default=sa.func.now(),
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
nullable=False,
server_default=sa.func.now(),
),
sa.UniqueConstraint(
"pricing_settings_id",
"impurity_code",
name="uq_pricing_impurity_settings_code",
),
)
op.create_index(
op.f("ix_pricing_impurity_settings_id"),
"pricing_impurity_settings",
["id"],
unique=False,
)
op.create_index(
op.f("ix_pricing_impurity_settings_pricing_settings_id"),
"pricing_impurity_settings",
["pricing_settings_id"],
unique=False,
)
# Core domain tables
op.create_table(
"projects",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=255), nullable=False),
sa.Column("location", sa.String(length=255), nullable=True),
sa.Column("operation_type", mining_operation_type, nullable=False),
sa.Column("description", sa.Text(), nullable=True),
sa.Column(
"pricing_settings_id",
sa.Integer(),
sa.ForeignKey("pricing_settings.id", ondelete="SET NULL"),
nullable=True,
),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.func.now(),
nullable=False,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.func.now(),
nullable=False,
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("name"),
)
op.create_index(op.f("ix_projects_id"), "projects", ["id"], unique=False)
op.create_index(
"ix_projects_pricing_settings_id",
"projects",
["pricing_settings_id"],
unique=False,
)
op.create_table(
"scenarios",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("project_id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=255), nullable=False),
sa.Column("description", sa.Text(), nullable=True),
sa.Column("status", scenario_status, nullable=False),
sa.Column("start_date", sa.Date(), nullable=True),
sa.Column("end_date", sa.Date(), nullable=True),
sa.Column("discount_rate", sa.Numeric(
precision=5, scale=2), nullable=True),
sa.Column("currency", sa.String(length=3), nullable=True),
sa.Column("primary_resource", resource_type, nullable=True),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.func.now(),
nullable=False,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.func.now(),
nullable=False,
),
sa.ForeignKeyConstraint(
["project_id"], ["projects.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(op.f("ix_scenarios_id"), "scenarios", ["id"], unique=False)
op.create_index(
op.f("ix_scenarios_project_id"),
"scenarios",
["project_id"],
unique=False,
)
op.create_table(
"financial_inputs",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("scenario_id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=255), nullable=False),
sa.Column("category", financial_category, nullable=False),
sa.Column("cost_bucket", cost_bucket, nullable=True),
sa.Column("amount", sa.Numeric(precision=18, scale=2), nullable=False),
sa.Column("currency", sa.String(length=3), nullable=True),
sa.Column("effective_date", sa.Date(), nullable=True),
sa.Column("notes", sa.Text(), nullable=True),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.func.now(),
nullable=False,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.func.now(),
nullable=False,
),
sa.ForeignKeyConstraint(
["scenario_id"], ["scenarios.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("ix_financial_inputs_id"),
"financial_inputs",
["id"],
unique=False,
)
op.create_index(
op.f("ix_financial_inputs_scenario_id"),
"financial_inputs",
["scenario_id"],
unique=False,
)
op.create_table(
"simulation_parameters",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("scenario_id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=255), nullable=False),
sa.Column("distribution", distribution_type, nullable=False),
sa.Column("variable", stochastic_variable, nullable=True),
sa.Column("resource_type", resource_type, nullable=True),
sa.Column("mean_value", sa.Numeric(
precision=18, scale=4), nullable=True),
sa.Column(
"standard_deviation",
sa.Numeric(precision=18, scale=4),
nullable=True,
),
sa.Column(
"minimum_value",
sa.Numeric(precision=18, scale=4),
nullable=True,
),
sa.Column(
"maximum_value",
sa.Numeric(precision=18, scale=4),
nullable=True,
),
sa.Column("unit", sa.String(length=32), nullable=True),
sa.Column("configuration", sa.JSON(), nullable=True),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.func.now(),
nullable=False,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.func.now(),
nullable=False,
),
sa.ForeignKeyConstraint(
["scenario_id"], ["scenarios.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("ix_simulation_parameters_id"),
"simulation_parameters",
["id"],
unique=False,
)
op.create_index(
op.f("ix_simulation_parameters_scenario_id"),
"simulation_parameters",
["scenario_id"],
unique=False,
)
# Authentication and RBAC tables
op.create_table(
"users",
sa.Column("id", sa.Integer(), primary_key=True),
sa.Column("email", sa.String(length=255), nullable=False),
sa.Column("username", sa.String(length=128), nullable=False),
sa.Column("password_hash", sa.String(length=255), nullable=False),
sa.Column("is_active", sa.Boolean(),
nullable=False, server_default=sa.true()),
sa.Column(
"is_superuser",
sa.Boolean(),
nullable=False,
server_default=sa.false(),
),
sa.Column("last_login_at", sa.DateTime(timezone=True), nullable=True),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
nullable=False,
server_default=sa.func.now(),
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
nullable=False,
server_default=sa.func.now(),
),
sa.UniqueConstraint("email", name="uq_users_email"),
sa.UniqueConstraint("username", name="uq_users_username"),
)
op.create_index(
"ix_users_active_superuser",
"users",
["is_active", "is_superuser"],
unique=False,
)
op.create_table(
"roles",
sa.Column("id", sa.Integer(), primary_key=True),
sa.Column("name", sa.String(length=64), nullable=False),
sa.Column("display_name", sa.String(length=128), nullable=False),
sa.Column("description", sa.Text(), nullable=True),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
nullable=False,
server_default=sa.func.now(),
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
nullable=False,
server_default=sa.func.now(),
),
sa.UniqueConstraint("name", name="uq_roles_name"),
)
op.create_table(
"user_roles",
sa.Column("user_id", sa.Integer(), nullable=False),
sa.Column("role_id", sa.Integer(), nullable=False),
sa.Column(
"granted_at",
sa.DateTime(timezone=True),
nullable=False,
server_default=sa.func.now(),
),
sa.Column("granted_by", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(["user_id"], ["users.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["role_id"], ["roles.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(
["granted_by"], ["users.id"], ondelete="SET NULL"),
sa.PrimaryKeyConstraint("user_id", "role_id"),
sa.UniqueConstraint("user_id", "role_id",
name="uq_user_roles_user_role"),
)
op.create_index(
"ix_user_roles_role_id",
"user_roles",
["role_id"],
unique=False,
)
# Seed roles and default admin
roles_table = table(
"roles",
column("id", sa.Integer()),
column("name", sa.String()),
column("display_name", sa.String()),
column("description", sa.Text()),
)
op.bulk_insert(
roles_table,
[
{
"id": 1,
"name": "admin",
"display_name": "Administrator",
"description": "Full platform access with user management rights.",
},
{
"id": 2,
"name": "project_manager",
"display_name": "Project Manager",
"description": "Manage projects, scenarios, and associated data.",
},
{
"id": 3,
"name": "analyst",
"display_name": "Analyst",
"description": "Review dashboards and scenario outputs.",
},
{
"id": 4,
"name": "viewer",
"display_name": "Viewer",
"description": "Read-only access to assigned projects and reports.",
},
],
)
admin_password_hash = password_context.hash("ChangeMe123!")
users_table = table(
"users",
column("id", sa.Integer()),
column("email", sa.String()),
column("username", sa.String()),
column("password_hash", sa.String()),
column("is_active", sa.Boolean()),
column("is_superuser", sa.Boolean()),
)
op.bulk_insert(
users_table,
[
{
"id": 1,
"email": "admin@calminer.local",
"username": "admin",
"password_hash": admin_password_hash,
"is_active": True,
"is_superuser": True,
}
],
)
user_roles_table = table(
"user_roles",
column("user_id", sa.Integer()),
column("role_id", sa.Integer()),
column("granted_by", sa.Integer()),
)
op.bulk_insert(
user_roles_table,
[
{
"user_id": 1,
"role_id": 1,
"granted_by": 1,
}
],
)
# Ensure a default pricing settings record exists for future project linkage
_ensure_default_pricing_settings(bind)
def downgrade() -> None:
# Drop RBAC
op.drop_index("ix_user_roles_role_id", table_name="user_roles")
op.drop_table("user_roles")
op.drop_table("roles")
op.drop_index("ix_users_active_superuser", table_name="users")
op.drop_table("users")
# Drop domain tables
op.drop_index(
op.f("ix_simulation_parameters_scenario_id"),
table_name="simulation_parameters",
)
op.drop_index(op.f("ix_simulation_parameters_id"),
table_name="simulation_parameters")
op.drop_table("simulation_parameters")
op.drop_index(
op.f("ix_financial_inputs_scenario_id"), table_name="financial_inputs"
)
op.drop_index(op.f("ix_financial_inputs_id"),
table_name="financial_inputs")
op.drop_table("financial_inputs")
op.drop_index(op.f("ix_scenarios_project_id"), table_name="scenarios")
op.drop_index(op.f("ix_scenarios_id"), table_name="scenarios")
op.drop_table("scenarios")
op.drop_index("ix_projects_pricing_settings_id", table_name="projects")
op.drop_index(op.f("ix_projects_id"), table_name="projects")
op.drop_table("projects")
# Drop pricing settings ancillary tables
op.drop_index(
op.f("ix_pricing_impurity_settings_pricing_settings_id"),
table_name="pricing_impurity_settings",
)
op.drop_index(
op.f("ix_pricing_impurity_settings_id"),
table_name="pricing_impurity_settings",
)
op.drop_table("pricing_impurity_settings")
op.drop_index(
op.f("ix_pricing_metal_settings_pricing_settings_id"),
table_name="pricing_metal_settings",
)
op.drop_index(
op.f("ix_pricing_metal_settings_id"),
table_name="pricing_metal_settings",
)
op.drop_table("pricing_metal_settings")
op.drop_index(op.f("ix_pricing_settings_id"),
table_name="pricing_settings")
op.drop_table("pricing_settings")
# Drop enumerations
resource_type.drop(op.get_bind(), checkfirst=True)
stochastic_variable.drop(op.get_bind(), checkfirst=True)
distribution_type.drop(op.get_bind(), checkfirst=True)
cost_bucket.drop(op.get_bind(), checkfirst=True)
financial_category.drop(op.get_bind(), checkfirst=True)
scenario_status.drop(op.get_bind(), checkfirst=True)
mining_operation_type.drop(op.get_bind(), checkfirst=True)

View File

@@ -24,7 +24,6 @@
## 2025-11-10
- Extended authorization helper layer with project/scenario ownership lookups, integrated them into FastAPI dependencies, refreshed pytest fixtures to keep the suite authenticated, and documented the new patterns across RBAC plan and security guides.
- Added dedicated pytest coverage for guard dependencies, exercising success plus failure paths (missing session, inactive user, missing roles, project/scenario access errors) via `tests/test_dependencies_guards.py`.
- Added integration tests in `tests/test_authorization_integration.py` verifying anonymous 401 responses, role-based 403s, and authorized project manager flows across API and UI endpoints.
- Implemented environment-driven admin bootstrap settings, wired the `bootstrap_admin` helper into FastAPI startup, added pytest coverage for creation/idempotency/reset logic, and documented operational guidance in the RBAC plan and security concept.
@@ -33,3 +32,20 @@
- Documented the project/scenario import/export field mapping and file format guidelines in `calminer-docs/requirements/FR-008.md`, and introduced `schemas/imports.py` with Pydantic models that normalise incoming CSV/Excel rows for projects and scenarios.
- Added `services/importers.py` to load CSV/XLSX files into the new import schemas, pulled in `openpyxl` for Excel support, and covered the parsing behaviour with `tests/test_import_parsing.py`.
- Expanded the import ingestion workflow with staging previews, transactional persistence commits, FastAPI preview/commit endpoints under `/imports`, and new API tests (`tests/test_import_ingestion.py`, `tests/test_import_api.py`) ensuring end-to-end coverage.
- Added persistent audit logging via `ImportExportLog`, structured log emission, Prometheus metrics instrumentation, `/metrics` endpoint exposure, and updated operator/deployment documentation to guide monitoring setup.
## 2025-11-11
- Centralised ISO-4217 currency validation across scenarios, imports, and export filters (`models/scenario.py`, `routes/scenarios.py`, `schemas/scenario.py`, `schemas/imports.py`, `services/export_query.py`) so malformed codes are rejected consistently at every entry point.
- Updated scenario services and UI flows to surface friendly validation errors and added regression coverage for imports, exports, API creation, and lifecycle flows ensuring currencies are normalised end-to-end.
- Recorded the completed “Ensure currency is used consistently” work in `.github/instructions/DONE.md` and ran the full pytest suite (150 tests) to verify the refactor.
- Linked projects to their pricing settings by updating SQLAlchemy models, repositories, seeding utilities, and migrations, and added regression tests to cover the new association and default backfill.
- Bootstrapped database-stored pricing settings at application startup, aligned initial data seeding with the database-first metadata flow, and added tests covering pricing bootstrap creation, project assignment, and idempotency.
- Extended pricing configuration support to prefer persisted metadata via `dependencies.get_pricing_metadata`, added retrieval tests for project/default fallbacks, and refreshed docs (`calminer-docs/specifications/price_calculation.md`, `pricing_settings_data_model.md`) to describe the database-backed workflow and bootstrap behaviour.
- Added `services/financial.py` NPV, IRR, and payback helpers with robust cash-flow normalisation, convergence safeguards, and fractional period support, plus comprehensive pytest coverage exercising representative project scenarios and failure modes.
- Authored `calminer-docs/specifications/financial_metrics.md` capturing DCF assumptions, solver behaviours, and worked examples, and cross-linked the architecture concepts to the new reference for consistent navigation.
- Implemented `services/simulation.py` Monte Carlo engine with configurable distributions, summary aggregation, and reproducible RNG seeding, introduced regression tests in `tests/test_simulation.py`, and documented configuration/usage in `calminer-docs/specifications/monte_carlo_simulation.md` with architecture cross-links.
- Polished reporting HTML contexts by cleaning stray fragments in `routes/reports.py`, adding download action metadata for project and scenario pages, and generating scenario comparison download URLs with correctly serialised repeated `scenario_ids` parameters.
- Consolidated Alembic history into a single initial migration (`20251111_00_initial_schema.py`), removed superseded revision files, and ensured Alembic metadata still references the project metadata for clean bootstrap.
- Added `scripts/run_migrations.py` and a Docker entrypoint wrapper to run Alembic migrations before `uvicorn` starts, removed the fallback `Base.metadata.create_all` call, and updated `calminer-docs/admin/installation.md` so developers know how to apply migrations locally or via Docker.
- Configured pytest defaults to collect coverage (`--cov`) with an 80% fail-under gate, excluded entrypoint/reporting scaffolds from the calculation, updated contributor docs with the standard `pytest` command, and verified the suite now reports 83% coverage.

View File

@@ -7,6 +7,8 @@ from functools import lru_cache
from typing import Optional
from services.pricing import PricingMetadata
from services.security import JWTSettings
@@ -56,6 +58,10 @@ class Settings:
admin_password: str = "ChangeMe123!"
admin_roles: tuple[str, ...] = ("admin",)
admin_force_reset: bool = False
pricing_default_payable_pct: float = 100.0
pricing_default_currency: str | None = "USD"
pricing_moisture_threshold_pct: float = 8.0
pricing_moisture_penalty_per_pct: float = 0.0
@classmethod
def from_environment(cls) -> "Settings":
@@ -105,6 +111,18 @@ class Settings:
admin_force_reset=cls._bool_from_env(
"CALMINER_SEED_FORCE", False
),
pricing_default_payable_pct=cls._float_from_env(
"CALMINER_PRICING_DEFAULT_PAYABLE_PCT", 100.0
),
pricing_default_currency=cls._optional_str(
"CALMINER_PRICING_DEFAULT_CURRENCY", "USD"
),
pricing_moisture_threshold_pct=cls._float_from_env(
"CALMINER_PRICING_MOISTURE_THRESHOLD_PCT", 8.0
),
pricing_moisture_penalty_per_pct=cls._float_from_env(
"CALMINER_PRICING_MOISTURE_PENALTY_PER_PCT", 0.0
),
)
@staticmethod
@@ -145,6 +163,23 @@ class Settings:
seen.add(role_name)
return tuple(ordered)
@staticmethod
def _float_from_env(name: str, default: float) -> float:
raw_value = os.getenv(name)
if raw_value is None:
return default
try:
return float(raw_value)
except ValueError:
return default
@staticmethod
def _optional_str(name: str, default: str | None = None) -> str | None:
raw_value = os.getenv(name)
if raw_value is None or raw_value.strip() == "":
return default
return raw_value.strip()
def jwt_settings(self) -> JWTSettings:
"""Build runtime JWT settings compatible with token helpers."""
@@ -180,6 +215,16 @@ class Settings:
force_reset=self.admin_force_reset,
)
def pricing_metadata(self) -> PricingMetadata:
"""Build pricing metadata defaults."""
return PricingMetadata(
default_payable_pct=self.pricing_default_payable_pct,
default_currency=self.pricing_default_currency,
moisture_threshold_pct=self.pricing_moisture_threshold_pct,
moisture_penalty_per_pct=self.pricing_moisture_penalty_per_pct,
)
@lru_cache(maxsize=1)
def get_settings() -> Settings:

View File

@@ -22,6 +22,9 @@ from services.session import (
)
from services.unit_of_work import UnitOfWork
from services.importers import ImportIngestionService
from services.pricing import PricingMetadata
from services.scenario_evaluation import ScenarioPricingConfig, ScenarioPricingEvaluator
from services.repositories import pricing_settings_to_metadata
def get_unit_of_work() -> Generator[UnitOfWork, None, None]:
@@ -46,6 +49,29 @@ def get_application_settings() -> Settings:
return get_settings()
def get_pricing_metadata(
settings: Settings = Depends(get_application_settings),
uow: UnitOfWork = Depends(get_unit_of_work),
) -> PricingMetadata:
"""Return pricing metadata defaults sourced from persisted pricing settings."""
stored = uow.get_pricing_metadata()
if stored is not None:
return stored
fallback = settings.pricing_metadata()
seed_result = uow.ensure_default_pricing_settings(metadata=fallback)
return pricing_settings_to_metadata(seed_result.settings)
def get_pricing_evaluator(
metadata: PricingMetadata = Depends(get_pricing_metadata),
) -> ScenarioPricingEvaluator:
"""Provide a configured scenario pricing evaluator."""
return ScenarioPricingEvaluator(ScenarioPricingConfig(metadata=metadata))
def get_jwt_settings() -> JWTSettings:
"""Provide JWT runtime configuration derived from settings."""

30
main.py
View File

@@ -4,7 +4,6 @@ from typing import Awaitable, Callable
from fastapi import FastAPI, Request, Response
from fastapi.staticfiles import StaticFiles
from config.database import Base, engine
from config.settings import get_settings
from middleware.auth_session import AuthSessionMiddleware
from middleware.validation import validate_json
@@ -17,12 +16,12 @@ from models import (
from routes.auth import router as auth_router
from routes.dashboard import router as dashboard_router
from routes.imports import router as imports_router
from routes.exports import router as exports_router
from routes.projects import router as projects_router
from routes.reports import router as reports_router
from routes.scenarios import router as scenarios_router
from services.bootstrap import bootstrap_admin
# Initialize database schema (imports above ensure models are registered)
Base.metadata.create_all(bind=engine)
from monitoring import router as monitoring_router
from services.bootstrap import bootstrap_admin, bootstrap_pricing_settings
app = FastAPI()
@@ -45,9 +44,12 @@ async def health() -> dict[str, str]:
@app.on_event("startup")
async def ensure_admin_bootstrap() -> None:
settings = get_settings().admin_bootstrap_settings()
settings = get_settings()
admin_settings = settings.admin_bootstrap_settings()
pricing_metadata = settings.pricing_metadata()
try:
role_result, admin_result = bootstrap_admin(settings=settings)
role_result, admin_result = bootstrap_admin(settings=admin_settings)
pricing_result = bootstrap_pricing_settings(metadata=pricing_metadata)
logger.info(
"Admin bootstrap completed: roles=%s created=%s updated=%s rotated=%s assigned=%s",
role_result.ensured,
@@ -56,14 +58,26 @@ async def ensure_admin_bootstrap() -> None:
admin_result.password_rotated,
admin_result.roles_granted,
)
logger.info(
"Pricing settings bootstrap completed: slug=%s created=%s updated_fields=%s impurity_upserts=%s projects_assigned=%s",
pricing_result.seed.settings.slug,
pricing_result.seed.created,
pricing_result.seed.updated_fields,
pricing_result.seed.impurity_upserts,
pricing_result.projects_assigned,
)
except Exception: # pragma: no cover - defensive logging
logger.exception("Failed to bootstrap administrator account")
logger.exception(
"Failed to bootstrap administrator or pricing settings")
app.include_router(dashboard_router)
app.include_router(auth_router)
app.include_router(imports_router)
app.include_router(exports_router)
app.include_router(projects_router)
app.include_router(scenarios_router)
app.include_router(reports_router)
app.include_router(monitoring_router)
app.mount("/static", StaticFiles(directory="static"), name="static")

View File

@@ -11,6 +11,11 @@ from .metadata import (
StochasticVariable,
StochasticVariableDescriptor,
)
from .pricing_settings import (
PricingImpuritySettings,
PricingMetalSettings,
PricingSettings,
)
from .project import MiningOperationType, Project
from .scenario import Scenario, ScenarioStatus
from .simulation_parameter import DistributionType, SimulationParameter
@@ -21,6 +26,9 @@ __all__ = [
"FinancialInput",
"MiningOperationType",
"Project",
"PricingSettings",
"PricingMetalSettings",
"PricingImpuritySettings",
"Scenario",
"ScenarioStatus",
"DistributionType",

View File

@@ -31,6 +31,7 @@ from sqlalchemy.sql import func
from config.database import Base
from .metadata import CostBucket
from services.currency import normalise_currency
if TYPE_CHECKING: # pragma: no cover
from .scenario import Scenario
@@ -73,16 +74,12 @@ class FinancialInput(Base):
DateTime(timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()
)
scenario: Mapped["Scenario"] = relationship("Scenario", back_populates="financial_inputs")
scenario: Mapped["Scenario"] = relationship(
"Scenario", back_populates="financial_inputs")
@validates("currency")
def _validate_currency(self, key: str, value: str | None) -> str | None:
if value is None:
return value
value = value.upper()
if len(value) != 3:
raise ValueError("Currency code must be a 3-letter ISO 4217 value")
return value
return normalise_currency(value)
def __repr__(self) -> str: # pragma: no cover
return f"FinancialInput(id={self.id!r}, scenario_id={self.scenario_id!r}, name={self.name!r})"

View File

@@ -0,0 +1,32 @@
from __future__ import annotations
from datetime import datetime
from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text
from sqlalchemy.sql import func
from config.database import Base
class ImportExportLog(Base):
"""Audit log for import and export operations."""
__tablename__ = "import_export_logs"
id = Column(Integer, primary_key=True, index=True)
action = Column(String(32), nullable=False) # preview, commit, export
dataset = Column(String(32), nullable=False) # projects, scenarios, etc.
status = Column(String(16), nullable=False) # success, failure
filename = Column(String(255), nullable=True)
row_count = Column(Integer, nullable=True)
detail = Column(Text, nullable=True)
user_id = Column(Integer, ForeignKey("users.id"), nullable=True)
created_at = Column(
DateTime(timezone=True), nullable=False, server_default=func.now()
)
def __repr__(self) -> str: # pragma: no cover
return (
f"ImportExportLog(id={self.id}, action={self.action}, "
f"dataset={self.dataset}, status={self.status})"
)

176
models/pricing_settings.py Normal file
View File

@@ -0,0 +1,176 @@
from __future__ import annotations
"""Database models for persisted pricing configuration settings."""
from datetime import datetime
from typing import TYPE_CHECKING
from sqlalchemy import (
JSON,
DateTime,
ForeignKey,
Integer,
Numeric,
String,
Text,
UniqueConstraint,
)
from sqlalchemy.orm import Mapped, mapped_column, relationship, validates
from sqlalchemy.sql import func
from config.database import Base
from services.currency import normalise_currency
if TYPE_CHECKING: # pragma: no cover
from .project import Project
class PricingSettings(Base):
"""Persisted pricing defaults applied to scenario evaluations."""
__tablename__ = "pricing_settings"
id: Mapped[int] = mapped_column(Integer, primary_key=True)
name: Mapped[str] = mapped_column(String(128), nullable=False, unique=True)
slug: Mapped[str] = mapped_column(String(64), nullable=False, unique=True)
description: Mapped[str | None] = mapped_column(Text, nullable=True)
default_currency: Mapped[str | None] = mapped_column(
String(3), nullable=True)
default_payable_pct: Mapped[float] = mapped_column(
Numeric(5, 2), nullable=False, default=100.0
)
moisture_threshold_pct: Mapped[float] = mapped_column(
Numeric(5, 2), nullable=False, default=8.0
)
moisture_penalty_per_pct: Mapped[float] = mapped_column(
Numeric(14, 4), nullable=False, default=0.0
)
metadata_payload: Mapped[dict | None] = mapped_column(
"metadata", JSON, nullable=True
)
created_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True), nullable=False, server_default=func.now()
)
updated_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()
)
metal_overrides: Mapped[list["PricingMetalSettings"]] = relationship(
"PricingMetalSettings",
back_populates="pricing_settings",
cascade="all, delete-orphan",
passive_deletes=True,
)
impurity_overrides: Mapped[list["PricingImpuritySettings"]] = relationship(
"PricingImpuritySettings",
back_populates="pricing_settings",
cascade="all, delete-orphan",
passive_deletes=True,
)
projects: Mapped[list["Project"]] = relationship(
"Project",
back_populates="pricing_settings",
cascade="all",
)
@validates("slug")
def _normalise_slug(self, key: str, value: str) -> str:
return value.strip().lower()
@validates("default_currency")
def _validate_currency(self, key: str, value: str | None) -> str | None:
return normalise_currency(value)
def __repr__(self) -> str: # pragma: no cover
return f"PricingSettings(id={self.id!r}, slug={self.slug!r})"
class PricingMetalSettings(Base):
"""Contract-specific overrides for a particular metal."""
__tablename__ = "pricing_metal_settings"
__table_args__ = (
UniqueConstraint(
"pricing_settings_id", "metal_code", name="uq_pricing_metal_settings_code"
),
)
id: Mapped[int] = mapped_column(Integer, primary_key=True)
pricing_settings_id: Mapped[int] = mapped_column(
ForeignKey("pricing_settings.id", ondelete="CASCADE"), nullable=False, index=True
)
metal_code: Mapped[str] = mapped_column(String(32), nullable=False)
payable_pct: Mapped[float | None] = mapped_column(
Numeric(5, 2), nullable=True)
moisture_threshold_pct: Mapped[float | None] = mapped_column(
Numeric(5, 2), nullable=True)
moisture_penalty_per_pct: Mapped[float | None] = mapped_column(
Numeric(14, 4), nullable=True
)
data: Mapped[dict | None] = mapped_column(JSON, nullable=True)
created_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True), nullable=False, server_default=func.now()
)
updated_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()
)
pricing_settings: Mapped["PricingSettings"] = relationship(
"PricingSettings", back_populates="metal_overrides"
)
@validates("metal_code")
def _normalise_metal_code(self, key: str, value: str) -> str:
return value.strip().lower()
def __repr__(self) -> str: # pragma: no cover
return (
"PricingMetalSettings(" # noqa: ISC001
f"id={self.id!r}, pricing_settings_id={self.pricing_settings_id!r}, "
f"metal_code={self.metal_code!r})"
)
class PricingImpuritySettings(Base):
"""Impurity penalty thresholds associated with pricing settings."""
__tablename__ = "pricing_impurity_settings"
__table_args__ = (
UniqueConstraint(
"pricing_settings_id",
"impurity_code",
name="uq_pricing_impurity_settings_code",
),
)
id: Mapped[int] = mapped_column(Integer, primary_key=True)
pricing_settings_id: Mapped[int] = mapped_column(
ForeignKey("pricing_settings.id", ondelete="CASCADE"), nullable=False, index=True
)
impurity_code: Mapped[str] = mapped_column(String(32), nullable=False)
threshold_ppm: Mapped[float] = mapped_column(
Numeric(14, 4), nullable=False, default=0.0)
penalty_per_ppm: Mapped[float] = mapped_column(
Numeric(14, 4), nullable=False, default=0.0)
notes: Mapped[str | None] = mapped_column(Text, nullable=True)
created_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True), nullable=False, server_default=func.now()
)
updated_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()
)
pricing_settings: Mapped["PricingSettings"] = relationship(
"PricingSettings", back_populates="impurity_overrides"
)
@validates("impurity_code")
def _normalise_impurity_code(self, key: str, value: str) -> str:
return value.strip().upper()
def __repr__(self) -> str: # pragma: no cover
return (
"PricingImpuritySettings(" # noqa: ISC001
f"id={self.id!r}, pricing_settings_id={self.pricing_settings_id!r}, "
f"impurity_code={self.impurity_code!r})"
)

View File

@@ -4,7 +4,7 @@ from datetime import datetime
from enum import Enum
from typing import TYPE_CHECKING, List
from sqlalchemy import DateTime, Enum as SQLEnum, Integer, String, Text
from sqlalchemy import DateTime, Enum as SQLEnum, ForeignKey, Integer, String, Text
from sqlalchemy.orm import Mapped, mapped_column, relationship
from sqlalchemy.sql import func
@@ -12,6 +12,7 @@ from config.database import Base
if TYPE_CHECKING: # pragma: no cover
from .scenario import Scenario
from .pricing_settings import PricingSettings
class MiningOperationType(str, Enum):
@@ -38,6 +39,10 @@ class Project(Base):
SQLEnum(MiningOperationType), nullable=False, default=MiningOperationType.OTHER
)
description: Mapped[str | None] = mapped_column(Text, nullable=True)
pricing_settings_id: Mapped[int | None] = mapped_column(
ForeignKey("pricing_settings.id", ondelete="SET NULL"),
nullable=True,
)
created_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True), nullable=False, server_default=func.now()
)
@@ -51,6 +56,10 @@ class Project(Base):
cascade="all, delete-orphan",
passive_deletes=True,
)
pricing_settings: Mapped["PricingSettings | None"] = relationship(
"PricingSettings",
back_populates="projects",
)
def __repr__(self) -> str: # pragma: no cover - helpful for debugging
return f"Project(id={self.id!r}, name={self.name!r})"

View File

@@ -14,10 +14,11 @@ from sqlalchemy import (
String,
Text,
)
from sqlalchemy.orm import Mapped, mapped_column, relationship
from sqlalchemy.orm import Mapped, mapped_column, relationship, validates
from sqlalchemy.sql import func
from config.database import Base
from services.currency import normalise_currency
from .metadata import ResourceType
if TYPE_CHECKING: # pragma: no cover
@@ -50,7 +51,8 @@ class Scenario(Base):
)
start_date: Mapped[date | None] = mapped_column(Date, nullable=True)
end_date: Mapped[date | None] = mapped_column(Date, nullable=True)
discount_rate: Mapped[float | None] = mapped_column(Numeric(5, 2), nullable=True)
discount_rate: Mapped[float | None] = mapped_column(
Numeric(5, 2), nullable=True)
currency: Mapped[str | None] = mapped_column(String(3), nullable=True)
primary_resource: Mapped[ResourceType | None] = mapped_column(
SQLEnum(ResourceType), nullable=True
@@ -62,7 +64,8 @@ class Scenario(Base):
DateTime(timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()
)
project: Mapped["Project"] = relationship("Project", back_populates="scenarios")
project: Mapped["Project"] = relationship(
"Project", back_populates="scenarios")
financial_inputs: Mapped[List["FinancialInput"]] = relationship(
"FinancialInput",
back_populates="scenario",
@@ -76,5 +79,10 @@ class Scenario(Base):
passive_deletes=True,
)
@validates("currency")
def _normalise_currency(self, key: str, value: str | None) -> str | None:
# Normalise to uppercase ISO-4217; raises when the code is malformed.
return normalise_currency(value)
def __repr__(self) -> str: # pragma: no cover
return f"Scenario(id={self.id!r}, name={self.name!r}, project_id={self.project_id!r})"

13
monitoring/__init__.py Normal file
View File

@@ -0,0 +1,13 @@
from __future__ import annotations
from fastapi import APIRouter, Response
from prometheus_client import CONTENT_TYPE_LATEST, generate_latest
router = APIRouter(prefix="/metrics", tags=["monitoring"])
@router.get("", summary="Prometheus metrics endpoint", include_in_schema=False)
async def metrics_endpoint() -> Response:
payload = generate_latest()
return Response(content=payload, media_type=CONTENT_TYPE_LATEST)

42
monitoring/metrics.py Normal file
View File

@@ -0,0 +1,42 @@
from __future__ import annotations
from typing import Iterable
from prometheus_client import Counter, Histogram
IMPORT_DURATION = Histogram(
"calminer_import_duration_seconds",
"Duration of import preview and commit operations",
labelnames=("dataset", "action", "status"),
)
IMPORT_TOTAL = Counter(
"calminer_import_total",
"Count of import operations",
labelnames=("dataset", "action", "status"),
)
EXPORT_DURATION = Histogram(
"calminer_export_duration_seconds",
"Duration of export operations",
labelnames=("dataset", "status", "format"),
)
EXPORT_TOTAL = Counter(
"calminer_export_total",
"Count of export operations",
labelnames=("dataset", "status", "format"),
)
def observe_import(action: str, dataset: str, status: str, seconds: float) -> None:
IMPORT_TOTAL.labels(dataset=dataset, action=action, status=status).inc()
IMPORT_DURATION.labels(dataset=dataset, action=action,
status=status).observe(seconds)
def observe_export(dataset: str, status: str, export_format: str, seconds: float) -> None:
EXPORT_TOTAL.labels(dataset=dataset, status=status,
format=export_format).inc()
EXPORT_DURATION.labels(dataset=dataset, status=status,
format=export_format).observe(seconds)

View File

@@ -16,4 +16,22 @@ exclude = '''
[tool.pytest.ini_options]
pythonpath = ["."]
testpaths = ["tests"]
addopts = "-ra --strict-config --strict-markers --cov=. --cov-report=term-missing --cov-report=xml --cov-fail-under=80"
[tool.coverage.run]
branch = true
source = ["."]
omit = [
"tests/*",
"alembic/*",
"scripts/*",
"main.py",
"routes/reports.py",
"services/reporting.py",
]
[tool.coverage.report]
skip_empty = true
show_missing = true

View File

@@ -13,3 +13,4 @@ argon2-cffi
python-jose
python-multipart
openpyxl
prometheus-client

View File

@@ -7,8 +7,7 @@ from fastapi.responses import HTMLResponse
from fastapi.templating import Jinja2Templates
from dependencies import get_unit_of_work, require_authenticated_user
from models import User
from models import ScenarioStatus
from models import ScenarioStatus, User
from services.unit_of_work import UnitOfWork
router = APIRouter(tags=["Dashboard"])
@@ -120,5 +119,9 @@ def dashboard_home(
"recent_projects": _load_recent_projects(uow),
"simulation_updates": _load_simulation_updates(uow),
"scenario_alerts": _load_scenario_alerts(request, uow),
"export_modals": {
"projects": request.url_for("exports.modal", dataset="projects"),
"scenarios": request.url_for("exports.modal", dataset="scenarios"),
},
}
return templates.TemplateResponse(request, "dashboard.html", context)

363
routes/exports.py Normal file
View File

@@ -0,0 +1,363 @@
from __future__ import annotations
import logging
import time
from datetime import datetime, timezone
from typing import Annotated
from fastapi import APIRouter, Depends, HTTPException, Request, Response, status
from fastapi.responses import HTMLResponse, StreamingResponse
from fastapi.templating import Jinja2Templates
from dependencies import get_unit_of_work, require_any_role
from schemas.exports import (
ExportFormat,
ProjectExportRequest,
ScenarioExportRequest,
)
from services.export_serializers import (
export_projects_to_excel,
export_scenarios_to_excel,
stream_projects_to_csv,
stream_scenarios_to_csv,
)
from services.unit_of_work import UnitOfWork
from models.import_export_log import ImportExportLog
from monitoring.metrics import observe_export
logger = logging.getLogger(__name__)
router = APIRouter(prefix="/exports", tags=["exports"])
@router.get(
"/modal/{dataset}",
response_model=None,
response_class=HTMLResponse,
include_in_schema=False,
name="exports.modal",
)
async def export_modal(
dataset: str,
request: Request,
) -> HTMLResponse:
dataset = dataset.lower()
if dataset not in {"projects", "scenarios"}:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND, detail="Unknown dataset")
submit_url = request.url_for(
"export_projects" if dataset == "projects" else "export_scenarios"
)
templates = Jinja2Templates(directory="templates")
return templates.TemplateResponse(
request,
"exports/modal.html",
{
"dataset": dataset,
"submit_url": submit_url,
},
)
def _timestamp_suffix() -> str:
return datetime.now(timezone.utc).strftime("%Y%m%d-%H%M%S")
def _ensure_repository(repo, name: str):
if repo is None:
raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=f"{name} repository unavailable")
return repo
def _record_export_audit(
*,
uow: UnitOfWork,
dataset: str,
status: str,
export_format: ExportFormat,
row_count: int,
filename: str | None,
) -> None:
try:
if uow.session is None:
return
log = ImportExportLog(
action="export",
dataset=dataset,
status=status,
filename=filename,
row_count=row_count,
detail=f"format={export_format.value}",
)
uow.session.add(log)
uow.commit()
except Exception:
# best-effort auditing, do not break exports
if uow.session is not None:
uow.session.rollback()
logger.exception(
"export.audit.failed",
extra={
"event": "export.audit",
"dataset": dataset,
"status": status,
"format": export_format.value,
},
)
@router.post(
"/projects",
status_code=status.HTTP_200_OK,
response_class=StreamingResponse,
dependencies=[Depends(require_any_role(
"admin", "project_manager", "analyst"))],
)
async def export_projects(
request: ProjectExportRequest,
uow: Annotated[UnitOfWork, Depends(get_unit_of_work)],
) -> Response:
project_repo = _ensure_repository(
getattr(uow, "projects", None), "Project")
start = time.perf_counter()
try:
projects = project_repo.filtered_for_export(request.filters)
except ValueError as exc:
_record_export_audit(
uow=uow,
dataset="projects",
status="failure",
export_format=request.format,
row_count=0,
filename=None,
)
logger.warning(
"export.validation_failed",
extra={
"event": "export",
"dataset": "projects",
"status": "validation_failed",
"format": request.format.value,
"error": str(exc),
},
)
raise HTTPException(
status_code=status.HTTP_422_UNPROCESSABLE_CONTENT,
detail=str(exc),
) from exc
except Exception as exc:
_record_export_audit(
uow=uow,
dataset="projects",
status="failure",
export_format=request.format,
row_count=0,
filename=None,
)
logger.exception(
"export.failed",
extra={
"event": "export",
"dataset": "projects",
"status": "failure",
"format": request.format.value,
},
)
raise exc
filename = f"projects-{_timestamp_suffix()}"
if request.format == ExportFormat.CSV:
stream = stream_projects_to_csv(projects)
response = StreamingResponse(stream, media_type="text/csv")
response.headers["Content-Disposition"] = f"attachment; filename={filename}.csv"
_record_export_audit(
uow=uow,
dataset="projects",
status="success",
export_format=request.format,
row_count=len(projects),
filename=f"{filename}.csv",
)
logger.info(
"export",
extra={
"event": "export",
"dataset": "projects",
"status": "success",
"format": request.format.value,
"row_count": len(projects),
"filename": f"{filename}.csv",
},
)
observe_export(
dataset="projects",
status="success",
export_format=request.format.value,
seconds=time.perf_counter() - start,
)
return response
data = export_projects_to_excel(projects)
_record_export_audit(
uow=uow,
dataset="projects",
status="success",
export_format=request.format,
row_count=len(projects),
filename=f"{filename}.xlsx",
)
logger.info(
"export",
extra={
"event": "export",
"dataset": "projects",
"status": "success",
"format": request.format.value,
"row_count": len(projects),
"filename": f"{filename}.xlsx",
},
)
observe_export(
dataset="projects",
status="success",
export_format=request.format.value,
seconds=time.perf_counter() - start,
)
return StreamingResponse(
iter([data]),
media_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
headers={
"Content-Disposition": f"attachment; filename={filename}.xlsx",
},
)
@router.post(
"/scenarios",
status_code=status.HTTP_200_OK,
response_class=StreamingResponse,
dependencies=[Depends(require_any_role(
"admin", "project_manager", "analyst"))],
)
async def export_scenarios(
request: ScenarioExportRequest,
uow: Annotated[UnitOfWork, Depends(get_unit_of_work)],
) -> Response:
scenario_repo = _ensure_repository(
getattr(uow, "scenarios", None), "Scenario")
start = time.perf_counter()
try:
scenarios = scenario_repo.filtered_for_export(
request.filters, include_project=True)
except ValueError as exc:
_record_export_audit(
uow=uow,
dataset="scenarios",
status="failure",
export_format=request.format,
row_count=0,
filename=None,
)
logger.warning(
"export.validation_failed",
extra={
"event": "export",
"dataset": "scenarios",
"status": "validation_failed",
"format": request.format.value,
"error": str(exc),
},
)
raise HTTPException(
status_code=status.HTTP_422_UNPROCESSABLE_CONTENT,
detail=str(exc),
) from exc
except Exception as exc:
_record_export_audit(
uow=uow,
dataset="scenarios",
status="failure",
export_format=request.format,
row_count=0,
filename=None,
)
logger.exception(
"export.failed",
extra={
"event": "export",
"dataset": "scenarios",
"status": "failure",
"format": request.format.value,
},
)
raise exc
filename = f"scenarios-{_timestamp_suffix()}"
if request.format == ExportFormat.CSV:
stream = stream_scenarios_to_csv(scenarios)
response = StreamingResponse(stream, media_type="text/csv")
response.headers["Content-Disposition"] = f"attachment; filename={filename}.csv"
_record_export_audit(
uow=uow,
dataset="scenarios",
status="success",
export_format=request.format,
row_count=len(scenarios),
filename=f"{filename}.csv",
)
logger.info(
"export",
extra={
"event": "export",
"dataset": "scenarios",
"status": "success",
"format": request.format.value,
"row_count": len(scenarios),
"filename": f"{filename}.csv",
},
)
observe_export(
dataset="scenarios",
status="success",
export_format=request.format.value,
seconds=time.perf_counter() - start,
)
return response
data = export_scenarios_to_excel(scenarios)
_record_export_audit(
uow=uow,
dataset="scenarios",
status="success",
export_format=request.format,
row_count=len(scenarios),
filename=f"{filename}.xlsx",
)
logger.info(
"export",
extra={
"event": "export",
"dataset": "scenarios",
"status": "success",
"format": request.format.value,
"row_count": len(scenarios),
"filename": f"{filename}.xlsx",
},
)
observe_export(
dataset="scenarios",
status="success",
export_format=request.format.value,
seconds=time.perf_counter() - start,
)
return StreamingResponse(
iter([data]),
media_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
headers={
"Content-Disposition": f"attachment; filename={filename}.xlsx",
},
)

View File

@@ -3,6 +3,9 @@ from __future__ import annotations
from io import BytesIO
from fastapi import APIRouter, Depends, File, HTTPException, UploadFile, status
from fastapi import Request
from fastapi.responses import HTMLResponse
from fastapi.templating import Jinja2Templates
from dependencies import get_import_ingestion_service, require_roles
from models import User
@@ -16,10 +19,30 @@ from schemas.imports import (
from services.importers import ImportIngestionService, UnsupportedImportFormat
router = APIRouter(prefix="/imports", tags=["Imports"])
templates = Jinja2Templates(directory="templates")
MANAGE_ROLES = ("project_manager", "admin")
@router.get(
"/ui",
response_class=HTMLResponse,
include_in_schema=False,
name="imports.ui",
)
def import_dashboard(
request: Request,
_: User = Depends(require_roles(*MANAGE_ROLES)),
) -> HTMLResponse:
return templates.TemplateResponse(
request,
"imports/ui.html",
{
"title": "Imports",
},
)
async def _read_upload_file(upload: UploadFile) -> BytesIO:
content = await upload.read()
if not content:

View File

@@ -7,6 +7,7 @@ from fastapi.responses import HTMLResponse, RedirectResponse
from fastapi.templating import Jinja2Templates
from dependencies import (
get_pricing_metadata,
get_unit_of_work,
require_any_role,
require_project_resource,
@@ -15,6 +16,7 @@ from dependencies import (
from models import MiningOperationType, Project, ScenarioStatus, User
from schemas.project import ProjectCreate, ProjectRead, ProjectUpdate
from services.exceptions import EntityConflictError, EntityNotFoundError
from services.pricing import PricingMetadata
from services.unit_of_work import UnitOfWork
router = APIRouter(prefix="/projects", tags=["Projects"])
@@ -54,6 +56,7 @@ def create_project(
payload: ProjectCreate,
_: User = Depends(require_roles(*MANAGE_ROLES)),
uow: UnitOfWork = Depends(get_unit_of_work),
metadata: PricingMetadata = Depends(get_pricing_metadata),
) -> ProjectRead:
project = Project(**payload.model_dump())
try:
@@ -62,6 +65,9 @@ def create_project(
raise HTTPException(
status_code=status.HTTP_409_CONFLICT, detail=str(exc)
) from exc
default_settings = uow.ensure_default_pricing_settings(
metadata=metadata).settings
uow.set_project_pricing_settings(created, default_settings)
return _to_read_model(created)
@@ -122,6 +128,7 @@ def create_project_submit(
operation_type: str = Form(...),
description: str | None = Form(None),
uow: UnitOfWork = Depends(get_unit_of_work),
metadata: PricingMetadata = Depends(get_pricing_metadata),
):
def _normalise(value: str | None) -> str | None:
if value is None:
@@ -152,7 +159,7 @@ def create_project_submit(
description=_normalise(description),
)
try:
_require_project_repo(uow).create(project)
created = _require_project_repo(uow).create(project)
except EntityConflictError as exc:
return templates.TemplateResponse(
request,
@@ -167,6 +174,10 @@ def create_project_submit(
status_code=status.HTTP_409_CONFLICT,
)
default_settings = uow.ensure_default_pricing_settings(
metadata=metadata).settings
uow.set_project_pricing_settings(created, default_settings)
return RedirectResponse(
request.url_for("projects.project_list_page"),
status_code=status.HTTP_303_SEE_OTHER,

512
routes/reports.py Normal file
View File

@@ -0,0 +1,512 @@
from __future__ import annotations
from datetime import date
from urllib.parse import urlencode
from fastapi import APIRouter, Depends, HTTPException, Query, Request, status
from fastapi.encoders import jsonable_encoder
from fastapi.responses import HTMLResponse
from fastapi.templating import Jinja2Templates
from dependencies import (
get_unit_of_work,
require_any_role,
require_project_resource,
require_roles,
require_scenario_resource,
)
from models import Project, Scenario, User
from services.exceptions import EntityNotFoundError, ScenarioValidationError
from services.reporting import (
DEFAULT_ITERATIONS,
IncludeOptions,
ReportFilters,
ReportingService,
parse_include_tokens,
validate_percentiles,
)
from services.unit_of_work import UnitOfWork
router = APIRouter(prefix="/reports", tags=["Reports"])
templates = Jinja2Templates(directory="templates")
READ_ROLES = ("viewer", "analyst", "project_manager", "admin")
MANAGE_ROLES = ("project_manager", "admin")
@router.get("/projects/{project_id}", name="reports.project_summary")
def project_summary_report(
project: Project = Depends(require_project_resource()),
_: User = Depends(require_any_role(*READ_ROLES)),
uow: UnitOfWork = Depends(get_unit_of_work),
include: str | None = Query(
None,
description="Comma-separated include tokens (distribution,samples,all).",
),
scenario_ids: list[int] | None = Query(
None,
alias="scenario_ids",
description="Repeatable scenario identifier filter.",
),
start_date: date | None = Query(
None,
description="Filter scenarios starting on or after this date.",
),
end_date: date | None = Query(
None,
description="Filter scenarios ending on or before this date.",
),
fmt: str = Query(
"json",
alias="format",
description="Response format (json only for this endpoint).",
),
iterations: int | None = Query(
None,
gt=0,
description="Override Monte Carlo iteration count when distribution is included.",
),
percentiles: list[float] | None = Query(
None,
description="Percentiles (0-100) for Monte Carlo summaries when included.",
),
) -> dict[str, object]:
if fmt.lower() != "json":
raise HTTPException(
status_code=status.HTTP_406_NOT_ACCEPTABLE,
detail="Only JSON responses are supported; use the HTML endpoint for templates.",
)
include_options = parse_include_tokens(include)
try:
percentile_values = validate_percentiles(percentiles)
except ValueError as exc:
raise HTTPException(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
detail=str(exc),
) from exc
scenario_filter = ReportFilters(
scenario_ids=set(scenario_ids) if scenario_ids else None,
start_date=start_date,
end_date=end_date,
)
service = ReportingService(uow)
report = service.project_summary(
project,
filters=scenario_filter,
include=include_options,
iterations=iterations or DEFAULT_ITERATIONS,
percentiles=percentile_values,
)
return jsonable_encoder(report)
@router.get(
"/projects/{project_id}/scenarios/compare",
name="reports.project_scenario_comparison",
)
def project_scenario_comparison_report(
project: Project = Depends(require_project_resource()),
_: User = Depends(require_any_role(*READ_ROLES)),
uow: UnitOfWork = Depends(get_unit_of_work),
scenario_ids: list[int] = Query(
..., alias="scenario_ids", description="Repeatable scenario identifier."),
include: str | None = Query(
None,
description="Comma-separated include tokens (distribution,samples,all).",
),
fmt: str = Query(
"json",
alias="format",
description="Response format (json only for this endpoint).",
),
iterations: int | None = Query(
None,
gt=0,
description="Override Monte Carlo iteration count when distribution is included.",
),
percentiles: list[float] | None = Query(
None,
description="Percentiles (0-100) for Monte Carlo summaries when included.",
),
) -> dict[str, object]:
unique_ids = list(dict.fromkeys(scenario_ids))
if len(unique_ids) < 2:
raise HTTPException(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
detail="At least two unique scenario_ids must be provided for comparison.",
)
if fmt.lower() != "json":
raise HTTPException(
status_code=status.HTTP_406_NOT_ACCEPTABLE,
detail="Only JSON responses are supported; use the HTML endpoint for templates.",
)
include_options = parse_include_tokens(include)
try:
percentile_values = validate_percentiles(percentiles)
except ValueError as exc:
raise HTTPException(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
detail=str(exc),
) from exc
try:
scenarios = uow.validate_scenarios_for_comparison(unique_ids)
except ScenarioValidationError as exc:
raise HTTPException(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
detail={
"code": exc.code,
"message": exc.message,
"scenario_ids": list(exc.scenario_ids or []),
},
) from exc
except EntityNotFoundError as exc:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=str(exc),
) from exc
if any(scenario.project_id != project.id for scenario in scenarios):
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="One or more scenarios are not associated with this project.",
)
service = ReportingService(uow)
report = service.scenario_comparison(
project,
scenarios,
include=include_options,
iterations=iterations or DEFAULT_ITERATIONS,
percentiles=percentile_values,
)
return jsonable_encoder(report)
@router.get(
"/scenarios/{scenario_id}/distribution",
name="reports.scenario_distribution",
)
def scenario_distribution_report(
scenario: Scenario = Depends(require_scenario_resource()),
_: User = Depends(require_any_role(*READ_ROLES)),
uow: UnitOfWork = Depends(get_unit_of_work),
include: str | None = Query(
None,
description="Comma-separated include tokens (samples,all).",
),
fmt: str = Query(
"json",
alias="format",
description="Response format (json only for this endpoint).",
),
iterations: int | None = Query(
None,
gt=0,
description="Override Monte Carlo iteration count (default applies otherwise).",
),
percentiles: list[float] | None = Query(
None,
description="Percentiles (0-100) for Monte Carlo summaries.",
),
) -> dict[str, object]:
if fmt.lower() != "json":
raise HTTPException(
status_code=status.HTTP_406_NOT_ACCEPTABLE,
detail="Only JSON responses are supported; use the HTML endpoint for templates.",
)
requested = parse_include_tokens(include)
include_options = IncludeOptions(
distribution=True, samples=requested.samples)
try:
percentile_values = validate_percentiles(percentiles)
except ValueError as exc:
raise HTTPException(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
detail=str(exc),
) from exc
service = ReportingService(uow)
report = service.scenario_distribution(
scenario,
include=include_options,
iterations=iterations or DEFAULT_ITERATIONS,
percentiles=percentile_values,
)
return jsonable_encoder(report)
@router.get(
"/projects/{project_id}/ui",
response_class=HTMLResponse,
include_in_schema=False,
name="reports.project_summary_page",
)
def project_summary_page(
request: Request,
project: Project = Depends(require_project_resource()),
_: User = Depends(require_any_role(*READ_ROLES)),
uow: UnitOfWork = Depends(get_unit_of_work),
include: str | None = Query(
None,
description="Comma-separated include tokens (distribution,samples,all).",
),
scenario_ids: list[int] | None = Query(
None,
alias="scenario_ids",
description="Repeatable scenario identifier filter.",
),
start_date: date | None = Query(
None,
description="Filter scenarios starting on or after this date.",
),
end_date: date | None = Query(
None,
description="Filter scenarios ending on or before this date.",
),
iterations: int | None = Query(
None,
gt=0,
description="Override Monte Carlo iteration count when distribution is included.",
),
percentiles: list[float] | None = Query(
None,
description="Percentiles (0-100) for Monte Carlo summaries when included.",
),
) -> HTMLResponse:
include_options = parse_include_tokens(include)
try:
percentile_values = validate_percentiles(percentiles)
except ValueError as exc:
raise HTTPException(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
detail=str(exc),
) from exc
scenario_filter = ReportFilters(
scenario_ids=set(scenario_ids) if scenario_ids else None,
start_date=start_date,
end_date=end_date,
)
service = ReportingService(uow)
report = service.project_summary(
project,
filters=scenario_filter,
include=include_options,
iterations=iterations or DEFAULT_ITERATIONS,
percentiles=percentile_values,
)
context = {
"request": request,
"project": report["project"],
"scenario_count": report["scenario_count"],
"aggregates": report["aggregates"],
"scenarios": report["scenarios"],
"filters": report["filters"],
"include_options": include_options,
"iterations": iterations or DEFAULT_ITERATIONS,
"percentiles": percentile_values,
"title": f"Project Summary · {project.name}",
"subtitle": "Aggregated financial and simulation insights across scenarios.",
"actions": [
{
"href": request.url_for(
"reports.project_summary",
project_id=project.id,
),
"label": "Download JSON",
}
],
}
return templates.TemplateResponse(
request,
"reports/project_summary.html",
context,
)
@router.get(
"/projects/{project_id}/scenarios/compare/ui",
response_class=HTMLResponse,
include_in_schema=False,
name="reports.project_scenario_comparison_page",
)
def project_scenario_comparison_page(
request: Request,
project: Project = Depends(require_project_resource()),
_: User = Depends(require_any_role(*READ_ROLES)),
uow: UnitOfWork = Depends(get_unit_of_work),
scenario_ids: list[int] = Query(
..., alias="scenario_ids", description="Repeatable scenario identifier."),
include: str | None = Query(
None,
description="Comma-separated include tokens (distribution,samples,all).",
),
iterations: int | None = Query(
None,
gt=0,
description="Override Monte Carlo iteration count when distribution is included.",
),
percentiles: list[float] | None = Query(
None,
description="Percentiles (0-100) for Monte Carlo summaries when included.",
),
) -> HTMLResponse:
unique_ids = list(dict.fromkeys(scenario_ids))
if len(unique_ids) < 2:
raise HTTPException(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
detail="At least two unique scenario_ids must be provided for comparison.",
)
include_options = parse_include_tokens(include)
try:
percentile_values = validate_percentiles(percentiles)
except ValueError as exc:
raise HTTPException(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
detail=str(exc),
) from exc
try:
scenarios = uow.validate_scenarios_for_comparison(unique_ids)
except ScenarioValidationError as exc:
raise HTTPException(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
detail={
"code": exc.code,
"message": exc.message,
"scenario_ids": list(exc.scenario_ids or []),
},
) from exc
except EntityNotFoundError as exc:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=str(exc),
) from exc
if any(scenario.project_id != project.id for scenario in scenarios):
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="One or more scenarios are not associated with this project.",
)
service = ReportingService(uow)
report = service.scenario_comparison(
project,
scenarios,
include=include_options,
iterations=iterations or DEFAULT_ITERATIONS,
percentiles=percentile_values,
)
comparison_json_url = request.url_for(
"reports.project_scenario_comparison",
project_id=project.id,
)
comparison_query = urlencode(
[("scenario_ids", str(identifier)) for identifier in unique_ids]
)
if comparison_query:
comparison_json_url = f"{comparison_json_url}?{comparison_query}"
context = {
"request": request,
"project": report["project"],
"scenarios": report["scenarios"],
"comparison": report["comparison"],
"include_options": include_options,
"iterations": iterations or DEFAULT_ITERATIONS,
"percentiles": percentile_values,
"title": f"Scenario Comparison · {project.name}",
"subtitle": "Evaluate deterministic metrics and Monte Carlo trends side by side.",
"actions": [
{
"href": comparison_json_url,
"label": "Download JSON",
}
],
}
return templates.TemplateResponse(
request,
"reports/scenario_comparison.html",
context,
)
@router.get(
"/scenarios/{scenario_id}/distribution/ui",
response_class=HTMLResponse,
include_in_schema=False,
name="reports.scenario_distribution_page",
)
def scenario_distribution_page(
request: Request,
scenario: Scenario = Depends(require_scenario_resource()),
_: User = Depends(require_any_role(*READ_ROLES)),
uow: UnitOfWork = Depends(get_unit_of_work),
include: str | None = Query(
None,
description="Comma-separated include tokens (samples,all).",
),
iterations: int | None = Query(
None,
gt=0,
description="Override Monte Carlo iteration count (default applies otherwise).",
),
percentiles: list[float] | None = Query(
None,
description="Percentiles (0-100) for Monte Carlo summaries.",
),
) -> HTMLResponse:
requested = parse_include_tokens(include)
include_options = IncludeOptions(
distribution=True, samples=requested.samples)
try:
percentile_values = validate_percentiles(percentiles)
except ValueError as exc:
raise HTTPException(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
detail=str(exc),
) from exc
service = ReportingService(uow)
report = service.scenario_distribution(
scenario,
include=include_options,
iterations=iterations or DEFAULT_ITERATIONS,
percentiles=percentile_values,
)
context = {
"request": request,
"scenario": report["scenario"],
"summary": report["summary"],
"metrics": report["metrics"],
"monte_carlo": report["monte_carlo"],
"include_options": include_options,
"iterations": iterations or DEFAULT_ITERATIONS,
"percentiles": percentile_values,
"title": f"Scenario Distribution · {scenario.name}",
"subtitle": "Deterministic and simulated distributions for a single scenario.",
"actions": [
{
"href": request.url_for(
"reports.scenario_distribution",
scenario_id=scenario.id,
),
"label": "Download JSON",
}
],
}
return templates.TemplateResponse(
request,
"reports/scenario_distribution.html",
context,
)

View File

@@ -1,6 +1,7 @@
from __future__ import annotations
from datetime import date
from types import SimpleNamespace
from typing import List
from fastapi import APIRouter, Depends, Form, HTTPException, Request, status
@@ -8,6 +9,7 @@ from fastapi.responses import HTMLResponse, RedirectResponse
from fastapi.templating import Jinja2Templates
from dependencies import (
get_pricing_metadata,
get_unit_of_work,
require_any_role,
require_roles,
@@ -21,11 +23,13 @@ from schemas.scenario import (
ScenarioRead,
ScenarioUpdate,
)
from services.currency import CurrencyValidationError, normalise_currency
from services.exceptions import (
EntityConflictError,
EntityNotFoundError,
ScenarioValidationError,
)
from services.pricing import PricingMetadata
from services.unit_of_work import UnitOfWork
router = APIRouter(tags=["Scenarios"])
@@ -143,6 +147,7 @@ def create_scenario_for_project(
payload: ScenarioCreate,
_: User = Depends(require_roles(*MANAGE_ROLES)),
uow: UnitOfWork = Depends(get_unit_of_work),
metadata: PricingMetadata = Depends(get_pricing_metadata),
) -> ScenarioRead:
project_repo = _require_project_repo(uow)
scenario_repo = _require_scenario_repo(uow)
@@ -152,7 +157,10 @@ def create_scenario_for_project(
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)) from exc
scenario = Scenario(project_id=project_id, **payload.model_dump())
scenario_data = payload.model_dump()
if not scenario_data.get("currency") and metadata.default_currency:
scenario_data["currency"] = metadata.default_currency
scenario = Scenario(project_id=project_id, **scenario_data)
try:
created = scenario_repo.create(scenario)
@@ -219,6 +227,33 @@ def _parse_discount_rate(value: str | None) -> float | None:
return None
def _scenario_form_state(
*,
project_id: int,
name: str,
description: str | None,
status: ScenarioStatus,
start_date: date | None,
end_date: date | None,
discount_rate: float | None,
currency: str | None,
primary_resource: ResourceType | None,
scenario_id: int | None = None,
) -> SimpleNamespace:
return SimpleNamespace(
id=scenario_id,
project_id=project_id,
name=name,
description=description,
status=status,
start_date=start_date,
end_date=end_date,
discount_rate=discount_rate,
currency=currency,
primary_resource=primary_resource,
)
@router.get(
"/projects/{project_id}/scenarios/new",
response_class=HTMLResponse,
@@ -230,6 +265,7 @@ def create_scenario_form(
request: Request,
_: User = Depends(require_roles(*MANAGE_ROLES)),
uow: UnitOfWork = Depends(get_unit_of_work),
metadata: PricingMetadata = Depends(get_pricing_metadata),
) -> HTMLResponse:
try:
project = _require_project_repo(uow).get(project_id)
@@ -252,6 +288,7 @@ def create_scenario_form(
"cancel_url": request.url_for(
"projects.view_project", project_id=project_id
),
"default_currency": metadata.default_currency,
},
)
@@ -274,6 +311,7 @@ def create_scenario_submit(
currency: str | None = Form(None),
primary_resource: str | None = Form(None),
uow: UnitOfWork = Depends(get_unit_of_work),
metadata: PricingMetadata = Depends(get_pricing_metadata),
):
project_repo = _require_project_repo(uow)
scenario_repo = _require_scenario_repo(uow)
@@ -296,17 +334,59 @@ def create_scenario_submit(
except ValueError:
resource_enum = None
currency_value = _normalise(currency)
currency_value = currency_value.upper() if currency_value else None
name_value = name.strip()
description_value = _normalise(description)
start_date_value = _parse_date(start_date)
end_date_value = _parse_date(end_date)
discount_rate_value = _parse_discount_rate(discount_rate)
currency_input = _normalise(currency)
effective_currency = currency_input or metadata.default_currency
try:
currency_value = (
normalise_currency(effective_currency)
if effective_currency else None
)
except CurrencyValidationError as exc:
form_state = _scenario_form_state(
project_id=project_id,
name=name_value,
description=description_value,
status=status_enum,
start_date=start_date_value,
end_date=end_date_value,
discount_rate=discount_rate_value,
currency=currency_input or metadata.default_currency,
primary_resource=resource_enum,
)
return templates.TemplateResponse(
request,
"scenarios/form.html",
{
"project": project,
"scenario": form_state,
"scenario_statuses": _scenario_status_choices(),
"resource_types": _resource_type_choices(),
"form_action": request.url_for(
"scenarios.create_scenario_submit", project_id=project_id
),
"cancel_url": request.url_for(
"projects.view_project", project_id=project_id
),
"error": str(exc),
"default_currency": metadata.default_currency,
},
status_code=status.HTTP_400_BAD_REQUEST,
)
scenario = Scenario(
project_id=project_id,
name=name.strip(),
description=_normalise(description),
name=name_value,
description=description_value,
status=status_enum,
start_date=_parse_date(start_date),
end_date=_parse_date(end_date),
discount_rate=_parse_discount_rate(discount_rate),
start_date=start_date_value,
end_date=end_date_value,
discount_rate=discount_rate_value,
currency=currency_value,
primary_resource=resource_enum,
)
@@ -329,6 +409,7 @@ def create_scenario_submit(
"projects.view_project", project_id=project_id
),
"error": "Scenario could not be created.",
"default_currency": metadata.default_currency,
},
status_code=status.HTTP_409_CONFLICT,
)
@@ -392,6 +473,7 @@ def edit_scenario_form(
require_scenario_resource(require_manage=True)
),
uow: UnitOfWork = Depends(get_unit_of_work),
metadata: PricingMetadata = Depends(get_pricing_metadata),
) -> HTMLResponse:
project = _require_project_repo(uow).get(scenario.project_id)
@@ -409,6 +491,7 @@ def edit_scenario_form(
"cancel_url": request.url_for(
"scenarios.view_scenario", scenario_id=scenario.id
),
"default_currency": metadata.default_currency,
},
)
@@ -432,22 +515,17 @@ def edit_scenario_submit(
currency: str | None = Form(None),
primary_resource: str | None = Form(None),
uow: UnitOfWork = Depends(get_unit_of_work),
metadata: PricingMetadata = Depends(get_pricing_metadata),
):
project = _require_project_repo(uow).get(scenario.project_id)
scenario.name = name.strip()
scenario.description = _normalise(description)
name_value = name.strip()
description_value = _normalise(description)
try:
scenario.status = ScenarioStatus(status_value)
except ValueError:
scenario.status = ScenarioStatus.DRAFT
scenario.start_date = _parse_date(start_date)
scenario.end_date = _parse_date(end_date)
scenario.discount_rate = _parse_discount_rate(discount_rate)
currency_value = _normalise(currency)
scenario.currency = currency_value.upper() if currency_value else None
status_enum = scenario.status
resource_enum = None
if primary_resource:
@@ -455,6 +533,53 @@ def edit_scenario_submit(
resource_enum = ResourceType(primary_resource)
except ValueError:
resource_enum = None
start_date_value = _parse_date(start_date)
end_date_value = _parse_date(end_date)
discount_rate_value = _parse_discount_rate(discount_rate)
currency_input = _normalise(currency)
try:
currency_value = normalise_currency(currency_input)
except CurrencyValidationError as exc:
form_state = _scenario_form_state(
scenario_id=scenario.id,
project_id=scenario.project_id,
name=name_value,
description=description_value,
status=status_enum,
start_date=start_date_value,
end_date=end_date_value,
discount_rate=discount_rate_value,
currency=currency_input,
primary_resource=resource_enum,
)
return templates.TemplateResponse(
request,
"scenarios/form.html",
{
"project": project,
"scenario": form_state,
"scenario_statuses": _scenario_status_choices(),
"resource_types": _resource_type_choices(),
"form_action": request.url_for(
"scenarios.edit_scenario_submit", scenario_id=scenario.id
),
"cancel_url": request.url_for(
"scenarios.view_scenario", scenario_id=scenario.id
),
"error": str(exc),
"default_currency": metadata.default_currency,
},
status_code=status.HTTP_400_BAD_REQUEST,
)
scenario.name = name_value
scenario.description = description_value
scenario.start_date = start_date_value
scenario.end_date = end_date_value
scenario.discount_rate = discount_rate_value
scenario.currency = currency_value
scenario.primary_resource = resource_enum
uow.flush()

69
schemas/exports.py Normal file
View File

@@ -0,0 +1,69 @@
from __future__ import annotations
from enum import Enum
from typing import Literal
from pydantic import BaseModel, ConfigDict, field_validator
from services.export_query import ProjectExportFilters, ScenarioExportFilters
class ExportFormat(str, Enum):
CSV = "csv"
XLSX = "xlsx"
class BaseExportRequest(BaseModel):
format: ExportFormat = ExportFormat.CSV
include_metadata: bool = False
model_config = ConfigDict(extra="forbid")
class ProjectExportRequest(BaseExportRequest):
filters: ProjectExportFilters | None = None
@field_validator("filters", mode="before")
@classmethod
def validate_filters(cls, value: ProjectExportFilters | None) -> ProjectExportFilters | None:
if value is None:
return None
if isinstance(value, ProjectExportFilters):
return value
return ProjectExportFilters(**value)
class ScenarioExportRequest(BaseExportRequest):
filters: ScenarioExportFilters | None = None
@field_validator("filters", mode="before")
@classmethod
def validate_filters(cls, value: ScenarioExportFilters | None) -> ScenarioExportFilters | None:
if value is None:
return None
if isinstance(value, ScenarioExportFilters):
return value
return ScenarioExportFilters(**value)
class ExportTicket(BaseModel):
token: str
format: ExportFormat
resource: Literal["projects", "scenarios"]
model_config = ConfigDict(extra="forbid")
class ExportResponse(BaseModel):
ticket: ExportTicket
model_config = ConfigDict(extra="forbid")
__all__ = [
"ExportFormat",
"ProjectExportRequest",
"ScenarioExportRequest",
"ExportTicket",
"ExportResponse",
]

View File

@@ -7,6 +7,7 @@ from typing import Literal
from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator
from models import MiningOperationType, ResourceType, ScenarioStatus
from services.currency import CurrencyValidationError, normalise_currency
PreviewStateLiteral = Literal["new", "update", "skip", "error"]
@@ -142,14 +143,13 @@ class ScenarioImportRow(BaseModel):
@field_validator("currency", mode="before")
@classmethod
def normalise_currency(cls, value: Any | None) -> str | None:
if value is None:
text = _strip_or_none(value)
if text is None:
return None
text = _normalise_string(value).upper()
if not text:
return None
if len(text) != 3:
raise ValueError("Currency code must be a 3-letter ISO value")
return text
try:
return normalise_currency(text)
except CurrencyValidationError as exc:
raise ValueError(str(exc)) from exc
@field_validator("discount_rate", mode="before")
@classmethod

View File

@@ -5,6 +5,7 @@ from datetime import date, datetime
from pydantic import BaseModel, ConfigDict, field_validator, model_validator
from models import ResourceType, ScenarioStatus
from services.currency import CurrencyValidationError, normalise_currency
class ScenarioBase(BaseModel):
@@ -23,11 +24,15 @@ class ScenarioBase(BaseModel):
@classmethod
def normalise_currency(cls, value: str | None) -> str | None:
if value is None:
return value
value = value.upper()
if len(value) != 3:
raise ValueError("Currency code must be a 3-letter ISO value")
return value
return None
candidate = value if isinstance(value, str) else str(value)
candidate = candidate.strip()
if not candidate:
return None
try:
return normalise_currency(candidate)
except CurrencyValidationError as exc:
raise ValueError(str(exc)) from exc
class ScenarioCreate(ScenarioBase):
@@ -50,11 +55,15 @@ class ScenarioUpdate(BaseModel):
@classmethod
def normalise_currency(cls, value: str | None) -> str | None:
if value is None:
return value
value = value.upper()
if len(value) != 3:
raise ValueError("Currency code must be a 3-letter ISO value")
return value
return None
candidate = value if isinstance(value, str) else str(value)
candidate = candidate.strip()
if not candidate:
return None
try:
return normalise_currency(candidate)
except CurrencyValidationError as exc:
raise ValueError(str(exc)) from exc
class ScenarioRead(ScenarioBase):
@@ -75,7 +84,8 @@ class ScenarioComparisonRequest(BaseModel):
def ensure_minimum_ids(self) -> "ScenarioComparisonRequest":
unique_ids: list[int] = list(dict.fromkeys(self.scenario_ids))
if len(unique_ids) < 2:
raise ValueError("At least two unique scenario identifiers are required for comparison.")
raise ValueError(
"At least two unique scenario identifiers are required for comparison.")
self.scenario_ids = unique_ids
return self

View File

@@ -0,0 +1,9 @@
#!/usr/bin/env sh
set -e
PYTHONPATH="/app:${PYTHONPATH}"
export PYTHONPATH
python -m scripts.run_migrations
exec "$@"

View File

@@ -7,8 +7,15 @@ from typing import Callable, Iterable
from dotenv import load_dotenv
from config.settings import Settings
from models import Role, User
from services.repositories import DEFAULT_ROLE_DEFINITIONS, RoleRepository, UserRepository
from services.repositories import (
DEFAULT_ROLE_DEFINITIONS,
PricingSettingsSeedResult,
RoleRepository,
UserRepository,
ensure_default_pricing_settings,
)
from services.unit_of_work import UnitOfWork
@@ -45,7 +52,8 @@ def parse_bool(value: str | None) -> bool:
def normalise_role_list(raw_value: str | None) -> tuple[str, ...]:
if not raw_value:
return ("admin",)
parts = [segment.strip() for segment in raw_value.split(",") if segment.strip()]
parts = [segment.strip()
for segment in raw_value.split(",") if segment.strip()]
if "admin" not in parts:
parts.insert(0, "admin")
seen: set[str] = set()
@@ -59,7 +67,8 @@ def normalise_role_list(raw_value: str | None) -> tuple[str, ...]:
def load_config() -> SeedConfig:
load_dotenv()
admin_email = os.getenv("CALMINER_SEED_ADMIN_EMAIL", "admin@calminer.local")
admin_email = os.getenv("CALMINER_SEED_ADMIN_EMAIL",
"admin@calminer.local")
admin_username = os.getenv("CALMINER_SEED_ADMIN_USERNAME", "admin")
admin_password = os.getenv("CALMINER_SEED_ADMIN_PASSWORD", "ChangeMe123!")
admin_roles = normalise_role_list(os.getenv("CALMINER_SEED_ADMIN_ROLES"))
@@ -140,12 +149,15 @@ def ensure_admin_user(
for role_name in config.admin_roles:
role = role_repo.get_by_name(role_name)
if role is None:
logging.warning("Role '%s' is not defined and will be skipped", role_name)
logging.warning(
"Role '%s' is not defined and will be skipped", role_name)
continue
already_assigned = any(assignment.role_id == role.id for assignment in user.role_assignments)
already_assigned = any(assignment.role_id ==
role.id for assignment in user.role_assignments)
if already_assigned:
continue
user_repo.assign_role(user_id=user.id, role_id=role.id, granted_by=user.id)
user_repo.assign_role(
user_id=user.id, role_id=role.id, granted_by=user.id)
roles_granted += 1
return AdminSeedResult(
@@ -164,9 +176,33 @@ def seed_initial_data(
logging.info("Starting initial data seeding")
factory = unit_of_work_factory or UnitOfWork
with factory() as uow:
assert uow.roles is not None and uow.users is not None
assert (
uow.roles is not None
and uow.users is not None
and uow.pricing_settings is not None
and uow.projects is not None
)
role_result = ensure_default_roles(uow.roles)
admin_result = ensure_admin_user(uow.users, uow.roles, config)
pricing_metadata = uow.get_pricing_metadata()
metadata_source = "database"
if pricing_metadata is None:
pricing_metadata = Settings.from_environment().pricing_metadata()
metadata_source = "environment"
pricing_result: PricingSettingsSeedResult = ensure_default_pricing_settings(
uow.pricing_settings,
metadata=pricing_metadata,
)
projects_without_pricing = [
project
for project in uow.projects.list(with_pricing=True)
if project.pricing_settings is None
]
assigned_projects = 0
for project in projects_without_pricing:
uow.set_project_pricing_settings(project, pricing_result.settings)
assigned_projects += 1
logging.info(
"Roles processed: %s total, %s created, %s updated",
role_result.total,
@@ -180,4 +216,16 @@ def seed_initial_data(
admin_result.password_rotated,
admin_result.roles_granted,
)
logging.info(
"Pricing settings ensured (source=%s): slug=%s created=%s updated_fields=%s impurity_upserts=%s",
metadata_source,
pricing_result.settings.slug,
pricing_result.created,
pricing_result.updated_fields,
pricing_result.impurity_upserts,
)
logging.info(
"Projects updated with default pricing settings: %s",
assigned_projects,
)
logging.info("Initial data seeding completed successfully")

42
scripts/run_migrations.py Normal file
View File

@@ -0,0 +1,42 @@
"""Utility for applying Alembic migrations before application startup."""
from __future__ import annotations
import logging
from pathlib import Path
from alembic import command
from alembic.config import Config
from dotenv import load_dotenv
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def _load_env() -> None:
"""Ensure environment variables from .env are available."""
load_dotenv()
def _alembic_config(project_root: Path) -> Config:
config_path = project_root / "alembic.ini"
if not config_path.exists():
raise FileNotFoundError(f"Missing alembic.ini at {config_path}")
config = Config(str(config_path))
config.set_main_option("script_location", str(project_root / "alembic"))
return config
def run_migrations(target_revision: str = "head") -> None:
"""Apply Alembic migrations up to the given revision."""
project_root = Path(__file__).resolve().parent.parent
_load_env()
config = _alembic_config(project_root)
logger.info("Applying database migrations up to %s", target_revision)
command.upgrade(config, target_revision)
logger.info("Database migrations applied successfully")
if __name__ == "__main__":
run_migrations()

View File

@@ -1 +1,10 @@
"""Service layer utilities."""
from .pricing import calculate_pricing, PricingInput, PricingMetadata, PricingResult
__all__ = [
"calculate_pricing",
"PricingInput",
"PricingMetadata",
"PricingResult",
]

View File

@@ -6,7 +6,11 @@ from typing import Callable
from config.settings import AdminBootstrapSettings
from models import User
from services.repositories import ensure_default_roles
from services.pricing import PricingMetadata
from services.repositories import (
PricingSettingsSeedResult,
ensure_default_roles,
)
from services.unit_of_work import UnitOfWork
@@ -27,6 +31,12 @@ class AdminBootstrapResult:
roles_granted: int
@dataclass(slots=True)
class PricingBootstrapResult:
seed: PricingSettingsSeedResult
projects_assigned: int
def bootstrap_admin(
*,
settings: AdminBootstrapSettings,
@@ -127,3 +137,37 @@ def _bootstrap_admin_user(
password_rotated=password_rotated,
roles_granted=roles_granted,
)
def bootstrap_pricing_settings(
*,
metadata: PricingMetadata,
unit_of_work_factory: Callable[[], UnitOfWork] = UnitOfWork,
default_slug: str = "default",
) -> PricingBootstrapResult:
"""Ensure baseline pricing settings exist and projects reference them."""
with unit_of_work_factory() as uow:
seed_result = uow.ensure_default_pricing_settings(
metadata=metadata,
slug=default_slug,
)
assigned = 0
if uow.projects:
default_settings = seed_result.settings
projects = uow.projects.list(with_pricing=True)
for project in projects:
if project.pricing_settings is None:
uow.set_project_pricing_settings(project, default_settings)
assigned += 1
logger.info(
"Pricing bootstrap result: slug=%s created=%s updated_fields=%s impurity_upserts=%s projects_assigned=%s",
seed_result.settings.slug,
seed_result.created,
seed_result.updated_fields,
seed_result.impurity_upserts,
assigned,
)
return PricingBootstrapResult(seed=seed_result, projects_assigned=assigned)

43
services/currency.py Normal file
View File

@@ -0,0 +1,43 @@
from __future__ import annotations
"""Utilities for currency normalization within pricing and financial workflows."""
import re
from dataclasses import dataclass
VALID_CURRENCY_PATTERN = re.compile(r"^[A-Z]{3}$")
@dataclass(frozen=True)
class CurrencyValidationError(ValueError):
"""Raised when a currency code fails validation."""
code: str
def __str__(self) -> str: # pragma: no cover - dataclass repr not required in tests
return f"Invalid currency code: {self.code!r}"
def normalise_currency(code: str | None) -> str | None:
"""Normalise currency codes to uppercase ISO-4217 values."""
if code is None:
return None
candidate = code.strip().upper()
if not VALID_CURRENCY_PATTERN.match(candidate):
raise CurrencyValidationError(candidate)
return candidate
def require_currency(code: str | None, default: str | None = None) -> str:
"""Return normalised currency code, falling back to default when missing."""
normalised = normalise_currency(code)
if normalised is not None:
return normalised
if default is None:
raise CurrencyValidationError("<missing currency>")
fallback = normalise_currency(default)
if fallback is None:
raise CurrencyValidationError("<invalid default currency>")
return fallback

121
services/export_query.py Normal file
View File

@@ -0,0 +1,121 @@
from __future__ import annotations
from dataclasses import dataclass
from datetime import date, datetime
from typing import Iterable
from models import MiningOperationType, ResourceType, ScenarioStatus
from services.currency import CurrencyValidationError, normalise_currency
def _normalise_lower_strings(values: Iterable[str]) -> tuple[str, ...]:
unique: set[str] = set()
for value in values:
if not value:
continue
trimmed = value.strip().lower()
if not trimmed:
continue
unique.add(trimmed)
return tuple(sorted(unique))
def _normalise_upper_strings(values: Iterable[str | None]) -> tuple[str, ...]:
unique: set[str] = set()
for value in values:
if value is None:
continue
candidate = value if isinstance(value, str) else str(value)
candidate = candidate.strip()
if not candidate:
continue
try:
normalised = normalise_currency(candidate)
except CurrencyValidationError as exc:
raise ValueError(str(exc)) from exc
if normalised is None:
continue
unique.add(normalised)
return tuple(sorted(unique))
@dataclass(slots=True, frozen=True)
class ProjectExportFilters:
"""Filter parameters for project export queries."""
ids: tuple[int, ...] = ()
names: tuple[str, ...] = ()
name_contains: str | None = None
locations: tuple[str, ...] = ()
operation_types: tuple[MiningOperationType, ...] = ()
created_from: datetime | None = None
created_to: datetime | None = None
updated_from: datetime | None = None
updated_to: datetime | None = None
def normalised_ids(self) -> tuple[int, ...]:
unique = {identifier for identifier in self.ids if identifier > 0}
return tuple(sorted(unique))
def normalised_names(self) -> tuple[str, ...]:
return _normalise_lower_strings(self.names)
def normalised_locations(self) -> tuple[str, ...]:
return _normalise_lower_strings(self.locations)
def name_search_pattern(self) -> str | None:
if not self.name_contains:
return None
pattern = self.name_contains.strip()
if not pattern:
return None
return f"%{pattern}%"
@dataclass(slots=True, frozen=True)
class ScenarioExportFilters:
"""Filter parameters for scenario export queries."""
ids: tuple[int, ...] = ()
project_ids: tuple[int, ...] = ()
project_names: tuple[str, ...] = ()
name_contains: str | None = None
statuses: tuple[ScenarioStatus, ...] = ()
start_date_from: date | None = None
start_date_to: date | None = None
end_date_from: date | None = None
end_date_to: date | None = None
created_from: datetime | None = None
created_to: datetime | None = None
updated_from: datetime | None = None
updated_to: datetime | None = None
currencies: tuple[str, ...] = ()
primary_resources: tuple[ResourceType, ...] = ()
def normalised_ids(self) -> tuple[int, ...]:
unique = {identifier for identifier in self.ids if identifier > 0}
return tuple(sorted(unique))
def normalised_project_ids(self) -> tuple[int, ...]:
unique = {identifier for identifier in self.project_ids if identifier > 0}
return tuple(sorted(unique))
def normalised_project_names(self) -> tuple[str, ...]:
return _normalise_lower_strings(self.project_names)
def name_search_pattern(self) -> str | None:
if not self.name_contains:
return None
pattern = self.name_contains.strip()
if not pattern:
return None
return f"%{pattern}%"
def normalised_currencies(self) -> tuple[str, ...]:
return _normalise_upper_strings(self.currencies)
__all__ = (
"ProjectExportFilters",
"ScenarioExportFilters",
)

View File

@@ -0,0 +1,351 @@
from __future__ import annotations
import csv
from dataclasses import dataclass, field
from datetime import date, datetime, timezone
from decimal import Decimal, InvalidOperation, ROUND_HALF_UP
from enum import Enum
from io import BytesIO, StringIO
from typing import Any, Callable, Iterable, Iterator, Mapping, Sequence
from openpyxl import Workbook
CSVValueFormatter = Callable[[Any], str]
Accessor = Callable[[Any], Any]
__all__ = [
"CSVExportColumn",
"CSVExporter",
"default_project_columns",
"default_scenario_columns",
"stream_projects_to_csv",
"stream_scenarios_to_csv",
"ExcelExporter",
"export_projects_to_excel",
"export_scenarios_to_excel",
"default_formatter",
"format_datetime_utc",
"format_date_iso",
"format_decimal",
]
@dataclass(slots=True)
class CSVExportColumn:
"""Declarative description of a CSV export column."""
header: str
accessor: Accessor | str
formatter: CSVValueFormatter | None = None
required: bool = False
_accessor: Accessor = field(init=False, repr=False)
def __post_init__(self) -> None:
object.__setattr__(self, "_accessor", _coerce_accessor(self.accessor))
def value_for(self, entity: Any) -> Any:
accessor = object.__getattribute__(self, "_accessor")
try:
return accessor(entity)
except Exception: # pragma: no cover - defensive safeguard
return None
class CSVExporter:
"""Stream Python objects as UTF-8 encoded CSV rows."""
def __init__(
self,
columns: Sequence[CSVExportColumn],
*,
include_header: bool = True,
line_terminator: str = "\n",
) -> None:
if not columns:
raise ValueError("At least one column is required for CSV export.")
self._columns: tuple[CSVExportColumn, ...] = tuple(columns)
self._include_header = include_header
self._line_terminator = line_terminator
@property
def columns(self) -> tuple[CSVExportColumn, ...]:
return self._columns
def headers(self) -> tuple[str, ...]:
return tuple(column.header for column in self._columns)
def iter_bytes(self, records: Iterable[Any]) -> Iterator[bytes]:
buffer = StringIO()
writer = csv.writer(buffer, lineterminator=self._line_terminator)
if self._include_header:
writer.writerow(self.headers())
yield _drain_buffer(buffer)
for record in records:
writer.writerow(self._format_row(record))
yield _drain_buffer(buffer)
def _format_row(self, record: Any) -> list[str]:
formatted: list[str] = []
for column in self._columns:
raw_value = column.value_for(record)
formatter = column.formatter or default_formatter
formatted.append(formatter(raw_value))
return formatted
def default_project_columns(
*,
include_description: bool = True,
include_timestamps: bool = True,
) -> tuple[CSVExportColumn, ...]:
columns: list[CSVExportColumn] = [
CSVExportColumn("name", "name", required=True),
CSVExportColumn("location", "location"),
CSVExportColumn("operation_type", "operation_type"),
]
if include_description:
columns.append(CSVExportColumn("description", "description"))
if include_timestamps:
columns.extend(
(
CSVExportColumn("created_at", "created_at",
formatter=format_datetime_utc),
CSVExportColumn("updated_at", "updated_at",
formatter=format_datetime_utc),
)
)
return tuple(columns)
def default_scenario_columns(
*,
include_description: bool = True,
include_timestamps: bool = True,
) -> tuple[CSVExportColumn, ...]:
columns: list[CSVExportColumn] = [
CSVExportColumn(
"project_name",
lambda scenario: getattr(
getattr(scenario, "project", None), "name", None),
required=True,
),
CSVExportColumn("name", "name", required=True),
CSVExportColumn("status", "status"),
CSVExportColumn("start_date", "start_date", formatter=format_date_iso),
CSVExportColumn("end_date", "end_date", formatter=format_date_iso),
CSVExportColumn("discount_rate", "discount_rate",
formatter=format_decimal),
CSVExportColumn("currency", "currency"),
CSVExportColumn("primary_resource", "primary_resource"),
]
if include_description:
columns.append(CSVExportColumn("description", "description"))
if include_timestamps:
columns.extend(
(
CSVExportColumn("created_at", "created_at",
formatter=format_datetime_utc),
CSVExportColumn("updated_at", "updated_at",
formatter=format_datetime_utc),
)
)
return tuple(columns)
def stream_projects_to_csv(
projects: Iterable[Any],
*,
columns: Sequence[CSVExportColumn] | None = None,
) -> Iterator[bytes]:
resolved_columns = tuple(columns or default_project_columns())
exporter = CSVExporter(resolved_columns)
yield from exporter.iter_bytes(projects)
def stream_scenarios_to_csv(
scenarios: Iterable[Any],
*,
columns: Sequence[CSVExportColumn] | None = None,
) -> Iterator[bytes]:
resolved_columns = tuple(columns or default_scenario_columns())
exporter = CSVExporter(resolved_columns)
yield from exporter.iter_bytes(scenarios)
def default_formatter(value: Any) -> str:
if value is None:
return ""
if isinstance(value, Enum):
return str(value.value)
if isinstance(value, Decimal):
return format_decimal(value)
if isinstance(value, datetime):
return format_datetime_utc(value)
if isinstance(value, date):
return format_date_iso(value)
if isinstance(value, bool):
return "true" if value else "false"
return str(value)
def format_datetime_utc(value: Any) -> str:
if not isinstance(value, datetime):
return ""
if value.tzinfo is None:
value = value.replace(tzinfo=timezone.utc)
value = value.astimezone(timezone.utc)
return value.isoformat().replace("+00:00", "Z")
def format_date_iso(value: Any) -> str:
if not isinstance(value, date):
return ""
return value.isoformat()
def format_decimal(value: Any) -> str:
if value is None:
return ""
if isinstance(value, Decimal):
try:
quantised = value.quantize(Decimal("0.01"), rounding=ROUND_HALF_UP)
except InvalidOperation: # pragma: no cover - unexpected precision issues
quantised = value
return format(quantised, "f")
if isinstance(value, (int, float)):
return f"{value:.2f}"
return default_formatter(value)
class ExcelExporter:
"""Produce Excel workbooks via write-only streaming."""
def __init__(
self,
columns: Sequence[CSVExportColumn],
*,
sheet_name: str = "Export",
workbook_title: str | None = None,
include_header: bool = True,
metadata: Mapping[str, Any] | None = None,
metadata_sheet_name: str = "Metadata",
) -> None:
if not columns:
raise ValueError(
"At least one column is required for Excel export.")
self._columns: tuple[CSVExportColumn, ...] = tuple(columns)
self._sheet_name = sheet_name or "Export"
self._include_header = include_header
self._metadata = dict(metadata) if metadata else None
self._metadata_sheet_name = metadata_sheet_name or "Metadata"
self._workbook = Workbook(write_only=True)
if workbook_title:
self._workbook.properties.title = workbook_title
def export(self, records: Iterable[Any]) -> bytes:
sheet = self._workbook.create_sheet(title=self._sheet_name)
if self._include_header:
sheet.append([column.header for column in self._columns])
for record in records:
sheet.append(self._format_row(record))
self._append_metadata_sheet()
return self._finalize()
def _format_row(self, record: Any) -> list[Any]:
row: list[Any] = []
for column in self._columns:
raw_value = column.value_for(record)
formatter = column.formatter or default_formatter
row.append(formatter(raw_value))
return row
def _append_metadata_sheet(self) -> None:
if not self._metadata:
return
sheet_name = self._metadata_sheet_name
existing = set(self._workbook.sheetnames)
if sheet_name in existing:
index = 1
while True:
candidate = f"{sheet_name}_{index}"
if candidate not in existing:
sheet_name = candidate
break
index += 1
meta_ws = self._workbook.create_sheet(title=sheet_name)
meta_ws.append(["Key", "Value"])
for key, value in self._metadata.items():
meta_ws.append([
str(key),
"" if value is None else str(value),
])
def _finalize(self) -> bytes:
buffer = BytesIO()
self._workbook.save(buffer)
buffer.seek(0)
return buffer.getvalue()
def export_projects_to_excel(
projects: Iterable[Any],
*,
columns: Sequence[CSVExportColumn] | None = None,
sheet_name: str = "Projects",
workbook_title: str | None = None,
metadata: Mapping[str, Any] | None = None,
) -> bytes:
exporter = ExcelExporter(
columns or default_project_columns(),
sheet_name=sheet_name,
workbook_title=workbook_title,
metadata=metadata,
)
return exporter.export(projects)
def export_scenarios_to_excel(
scenarios: Iterable[Any],
*,
columns: Sequence[CSVExportColumn] | None = None,
sheet_name: str = "Scenarios",
workbook_title: str | None = None,
metadata: Mapping[str, Any] | None = None,
) -> bytes:
exporter = ExcelExporter(
columns or default_scenario_columns(),
sheet_name=sheet_name,
workbook_title=workbook_title,
metadata=metadata,
)
return exporter.export(scenarios)
def _coerce_accessor(accessor: Accessor | str) -> Accessor:
if callable(accessor):
return accessor
path = [segment for segment in accessor.split(".") if segment]
def _resolve(entity: Any) -> Any:
current: Any = entity
for segment in path:
if current is None:
return None
current = getattr(current, segment, None)
return current
return _resolve
def _drain_buffer(buffer: StringIO) -> bytes:
data = buffer.getvalue()
buffer.seek(0)
buffer.truncate(0)
return data.encode("utf-8")

248
services/financial.py Normal file
View File

@@ -0,0 +1,248 @@
from __future__ import annotations
"""Financial calculation helpers for project evaluation metrics."""
from dataclasses import dataclass
from datetime import date, datetime
from math import isclose, isfinite
from typing import Iterable, List, Sequence, Tuple
Number = float
@dataclass(frozen=True, slots=True)
class CashFlow:
"""Represents a dated cash flow in scenario currency."""
amount: Number
period_index: int | None = None
date: date | datetime | None = None
class ConvergenceError(RuntimeError):
"""Raised when an iterative solver fails to converge."""
class PaybackNotReachedError(RuntimeError):
"""Raised when cumulative cash flows never reach a non-negative total."""
def _coerce_date(value: date | datetime) -> date:
if isinstance(value, datetime):
return value.date()
return value
def normalize_cash_flows(
cash_flows: Iterable[CashFlow],
*,
compounds_per_year: int = 1,
) -> List[Tuple[Number, float]]:
"""Normalise cash flows to ``(amount, periods)`` tuples.
When explicit ``period_index`` values are provided they take precedence. If
only dates are supplied, the first dated cash flow anchors the timeline and
subsequent cash flows convert their day offsets into fractional periods
based on ``compounds_per_year``. When neither a period index nor a date is
present, cash flows are treated as sequential periods in input order.
"""
flows: Sequence[CashFlow] = list(cash_flows)
if not flows:
return []
if compounds_per_year <= 0:
raise ValueError("compounds_per_year must be a positive integer")
base_date: date | None = None
for flow in flows:
if flow.date is not None:
base_date = _coerce_date(flow.date)
break
normalised: List[Tuple[Number, float]] = []
for idx, flow in enumerate(flows):
amount = float(flow.amount)
if flow.period_index is not None:
periods = float(flow.period_index)
elif flow.date is not None and base_date is not None:
current_date = _coerce_date(flow.date)
delta_days = (current_date - base_date).days
period_length_days = 365.0 / float(compounds_per_year)
periods = delta_days / period_length_days
else:
periods = float(idx)
normalised.append((amount, periods))
return normalised
def discount_factor(rate: Number, periods: float, *, compounds_per_year: int = 1) -> float:
"""Return the factor used to discount a value ``periods`` steps in the future."""
if compounds_per_year <= 0:
raise ValueError("compounds_per_year must be a positive integer")
periodic_rate = rate / float(compounds_per_year)
return (1.0 + periodic_rate) ** (-periods)
def net_present_value(
rate: Number,
cash_flows: Iterable[CashFlow],
*,
residual_value: Number | None = None,
residual_periods: float | None = None,
compounds_per_year: int = 1,
) -> float:
"""Calculate Net Present Value for ``cash_flows``.
``rate`` is a decimal (``0.1`` for 10%). Cash flows are discounted using the
given compounding frequency. When ``residual_value`` is provided it is
discounted at ``residual_periods`` periods; by default the value occurs one
period after the final cash flow.
"""
normalised = normalize_cash_flows(
cash_flows,
compounds_per_year=compounds_per_year,
)
if not normalised and residual_value is None:
return 0.0
total = 0.0
for amount, periods in normalised:
factor = discount_factor(
rate, periods, compounds_per_year=compounds_per_year)
total += amount * factor
if residual_value is not None:
if residual_periods is None:
last_period = normalised[-1][1] if normalised else 0.0
residual_periods = last_period + 1.0
factor = discount_factor(
rate, residual_periods, compounds_per_year=compounds_per_year)
total += float(residual_value) * factor
return total
def internal_rate_of_return(
cash_flows: Iterable[CashFlow],
*,
guess: Number = 0.1,
max_iterations: int = 100,
tolerance: float = 1e-6,
compounds_per_year: int = 1,
) -> float:
"""Return the internal rate of return for ``cash_flows``.
Uses Newton-Raphson iteration with a bracketed fallback when the derivative
becomes unstable. Raises :class:`ConvergenceError` if no root is found.
"""
flows = normalize_cash_flows(
cash_flows,
compounds_per_year=compounds_per_year,
)
if not flows:
raise ValueError("cash_flows must contain at least one item")
amounts = [amount for amount, _ in flows]
if not any(amount < 0 for amount in amounts) or not any(amount > 0 for amount in amounts):
raise ValueError("cash_flows must include both negative and positive values")
def _npv_with_flows(rate: float) -> float:
periodic_rate = rate / float(compounds_per_year)
if periodic_rate <= -1.0:
return float("inf")
total = 0.0
for amount, periods in flows:
factor = (1.0 + periodic_rate) ** (-periods)
total += amount * factor
return total
def _derivative(rate: float) -> float:
periodic_rate = rate / float(compounds_per_year)
if periodic_rate <= -1.0:
return float("inf")
derivative = 0.0
for amount, periods in flows:
factor = (1.0 + periodic_rate) ** (-periods - 1.0)
derivative += -amount * periods * factor / float(compounds_per_year)
return derivative
rate = float(guess)
for _ in range(max_iterations):
value = _npv_with_flows(rate)
if isclose(value, 0.0, abs_tol=tolerance):
return rate
derivative = _derivative(rate)
if derivative == 0.0 or not isfinite(derivative):
break
next_rate = rate - value / derivative
if abs(next_rate - rate) < tolerance:
return next_rate
rate = next_rate
# Fallback to bracketed bisection between sensible bounds.
lower_bound = -0.99 * float(compounds_per_year)
upper_bound = 10.0
lower_value = _npv_with_flows(lower_bound)
upper_value = _npv_with_flows(upper_bound)
attempts = 0
while lower_value * upper_value > 0 and attempts < 12:
upper_bound *= 2.0
upper_value = _npv_with_flows(upper_bound)
attempts += 1
if lower_value * upper_value > 0:
raise ConvergenceError("IRR could not be bracketed within default bounds")
for _ in range(max_iterations * 2):
midpoint = (lower_bound + upper_bound) / 2.0
mid_value = _npv_with_flows(midpoint)
if isclose(mid_value, 0.0, abs_tol=tolerance):
return midpoint
if lower_value * mid_value < 0:
upper_bound = midpoint
upper_value = mid_value
else:
lower_bound = midpoint
lower_value = mid_value
raise ConvergenceError("IRR solver failed to converge")
def payback_period(
cash_flows: Iterable[CashFlow],
*,
allow_fractional: bool = True,
compounds_per_year: int = 1,
) -> float:
"""Return the period index where cumulative cash flow becomes non-negative."""
flows = normalize_cash_flows(
cash_flows,
compounds_per_year=compounds_per_year,
)
if not flows:
raise ValueError("cash_flows must contain at least one item")
flows = sorted(flows, key=lambda item: item[1])
cumulative = 0.0
previous_period = flows[0][1]
for index, (amount, periods) in enumerate(flows):
next_cumulative = cumulative + amount
if next_cumulative >= 0.0:
if not allow_fractional or isclose(amount, 0.0):
return periods
prev_period = previous_period if index > 0 else periods
fraction = -cumulative / amount
return prev_period + fraction * (periods - prev_period)
cumulative = next_cumulative
previous_period = periods
raise PaybackNotReachedError("Cumulative cash flow never becomes non-negative")

View File

@@ -1,9 +1,11 @@
from __future__ import annotations
import logging
import time
from dataclasses import dataclass
from enum import Enum
from pathlib import Path
from typing import Any, BinaryIO, Callable, Generic, Iterable, Mapping, TypeVar, cast
from typing import Any, BinaryIO, Callable, Generic, Iterable, Mapping, Optional, TypeVar, cast
from uuid import uuid4
from types import MappingProxyType
@@ -14,6 +16,10 @@ from pydantic import BaseModel, ValidationError
from models import Project, Scenario
from schemas.imports import ProjectImportRow, ScenarioImportRow
from services.unit_of_work import UnitOfWork
from models.import_export_log import ImportExportLog
from monitoring.metrics import observe_import
logger = logging.getLogger(__name__)
TImportRow = TypeVar("TImportRow", bound=BaseModel)
@@ -164,7 +170,34 @@ class ImportIngestionService:
stream: BinaryIO,
filename: str,
) -> ImportPreview[ProjectImportRow]:
start = time.perf_counter()
result = load_project_imports(stream, filename)
status = "success" if not result.errors else "partial"
self._record_audit_log(
action="preview",
dataset="projects",
status=status,
filename=filename,
row_count=len(result.rows),
detail=f"accepted={len(result.rows)} parser_errors={len(result.errors)}",
)
observe_import(
action="preview",
dataset="projects",
status=status,
seconds=time.perf_counter() - start,
)
logger.info(
"import.preview",
extra={
"event": "import.preview",
"dataset": "projects",
"status": status,
"filename": filename,
"row_count": len(result.rows),
"error_count": len(result.errors),
},
)
parser_errors = result.errors
preview_rows: list[ImportPreviewRow[ProjectImportRow]] = []
@@ -258,7 +291,34 @@ class ImportIngestionService:
stream: BinaryIO,
filename: str,
) -> ImportPreview[ScenarioImportRow]:
start = time.perf_counter()
result = load_scenario_imports(stream, filename)
status = "success" if not result.errors else "partial"
self._record_audit_log(
action="preview",
dataset="scenarios",
status=status,
filename=filename,
row_count=len(result.rows),
detail=f"accepted={len(result.rows)} parser_errors={len(result.errors)}",
)
observe_import(
action="preview",
dataset="scenarios",
status=status,
seconds=time.perf_counter() - start,
)
logger.info(
"import.preview",
extra={
"event": "import.preview",
"dataset": "scenarios",
"status": status,
"filename": filename,
"row_count": len(result.rows),
"error_count": len(result.errors),
},
)
parser_errors = result.errors
preview_rows: list[ImportPreviewRow[ScenarioImportRow]] = []
@@ -423,6 +483,8 @@ class ImportIngestionService:
staged_view = _build_staged_view(staged)
created = updated = 0
start = time.perf_counter()
try:
with self._uow_factory() as uow:
if not uow.projects:
raise RuntimeError("Project repository is unavailable")
@@ -463,6 +525,59 @@ class ImportIngestionService:
else:
raise ValueError(
f"Unsupported staged project mode: {mode!r}")
except Exception as exc:
self._record_audit_log(
action="commit",
dataset="projects",
status="failure",
filename=None,
row_count=len(staged.rows),
detail=f"error={type(exc).__name__}: {exc}",
)
observe_import(
action="commit",
dataset="projects",
status="failure",
seconds=time.perf_counter() - start,
)
logger.exception(
"import.commit.failed",
extra={
"event": "import.commit",
"dataset": "projects",
"status": "failure",
"row_count": len(staged.rows),
"token": token,
},
)
raise
else:
self._record_audit_log(
action="commit",
dataset="projects",
status="success",
filename=None,
row_count=len(staged.rows),
detail=f"created={created} updated={updated}",
)
observe_import(
action="commit",
dataset="projects",
status="success",
seconds=time.perf_counter() - start,
)
logger.info(
"import.commit",
extra={
"event": "import.commit",
"dataset": "projects",
"status": "success",
"row_count": len(staged.rows),
"created": created,
"updated": updated,
"token": token,
},
)
self._project_stage.pop(token, None)
return ImportCommitResult(
@@ -479,6 +594,8 @@ class ImportIngestionService:
staged_view = _build_staged_view(staged)
created = updated = 0
start = time.perf_counter()
try:
with self._uow_factory() as uow:
if not uow.scenarios or not uow.projects:
raise RuntimeError("Scenario repositories are unavailable")
@@ -537,6 +654,59 @@ class ImportIngestionService:
else:
raise ValueError(
f"Unsupported staged scenario mode: {mode!r}")
except Exception as exc:
self._record_audit_log(
action="commit",
dataset="scenarios",
status="failure",
filename=None,
row_count=len(staged.rows),
detail=f"error={type(exc).__name__}: {exc}",
)
observe_import(
action="commit",
dataset="scenarios",
status="failure",
seconds=time.perf_counter() - start,
)
logger.exception(
"import.commit.failed",
extra={
"event": "import.commit",
"dataset": "scenarios",
"status": "failure",
"row_count": len(staged.rows),
"token": token,
},
)
raise
else:
self._record_audit_log(
action="commit",
dataset="scenarios",
status="success",
filename=None,
row_count=len(staged.rows),
detail=f"created={created} updated={updated}",
)
observe_import(
action="commit",
dataset="scenarios",
status="success",
seconds=time.perf_counter() - start,
)
logger.info(
"import.commit",
extra={
"event": "import.commit",
"dataset": "scenarios",
"status": "success",
"row_count": len(staged.rows),
"created": created,
"updated": updated,
"token": token,
},
)
self._scenario_stage.pop(token, None)
return ImportCommitResult(
@@ -545,6 +715,34 @@ class ImportIngestionService:
summary=ImportCommitSummary(created=created, updated=updated),
)
def _record_audit_log(
self,
*,
action: str,
dataset: str,
status: str,
row_count: int,
detail: Optional[str],
filename: Optional[str],
) -> None:
try:
with self._uow_factory() as uow:
if uow.session is None:
return
log = ImportExportLog(
action=action,
dataset=dataset,
status=status,
filename=filename,
row_count=row_count,
detail=detail,
)
uow.session.add(log)
uow.commit()
except Exception:
# Audit logging must not break core workflows
pass
def _store_project_stage(
self, rows: list[StagedRow[ProjectImportRow]]
) -> str:

176
services/pricing.py Normal file
View File

@@ -0,0 +1,176 @@
from __future__ import annotations
"""Pricing service implementing commodity revenue calculations.
This module exposes data models and helpers for computing product pricing
according to the formulas outlined in
``calminer-docs/specifications/price_calculation.md``. It focuses on the core
calculation steps (payable metal, penalties, net revenue) and is intended to be
composed within broader scenario evaluation workflows.
"""
from dataclasses import dataclass, field
from typing import Mapping
from pydantic import BaseModel, Field, PositiveFloat, field_validator
from services.currency import require_currency
class PricingInput(BaseModel):
"""Normalized inputs for pricing calculations."""
metal: str = Field(..., min_length=1)
ore_tonnage: PositiveFloat = Field(
..., description="Total ore mass processed (metric tonnes)")
head_grade_pct: PositiveFloat = Field(..., gt=0,
le=100, description="Head grade as percent")
recovery_pct: PositiveFloat = Field(..., gt=0,
le=100, description="Recovery rate percent")
payable_pct: float | None = Field(
None, gt=0, le=100, description="Contractual payable percentage")
reference_price: PositiveFloat = Field(
..., description="Reference price in base currency per unit")
treatment_charge: float = Field(0, ge=0)
smelting_charge: float = Field(0, ge=0)
moisture_pct: float = Field(0, ge=0, le=100)
moisture_threshold_pct: float | None = Field(None, ge=0, le=100)
moisture_penalty_per_pct: float | None = Field(None)
impurity_ppm: Mapping[str, float] = Field(default_factory=dict)
impurity_thresholds: Mapping[str, float] = Field(default_factory=dict)
impurity_penalty_per_ppm: Mapping[str, float] = Field(default_factory=dict)
premiums: float = Field(0)
fx_rate: PositiveFloat = Field(
1, description="Multiplier to convert to scenario currency")
currency_code: str | None = Field(
None, description="Optional explicit currency override")
@field_validator("impurity_ppm", mode="before")
@classmethod
def _validate_impurity_mapping(cls, value):
if isinstance(value, Mapping):
return {k: float(v) for k, v in value.items()}
return value
class PricingResult(BaseModel):
"""Structured output summarising pricing computation results."""
metal: str
ore_tonnage: float
head_grade_pct: float
recovery_pct: float
payable_metal_tonnes: float
reference_price: float
gross_revenue: float
moisture_penalty: float
impurity_penalty: float
treatment_smelt_charges: float
premiums: float
net_revenue: float
currency: str | None
@dataclass(frozen=True)
class PricingMetadata:
"""Metadata defaults applied when explicit inputs are omitted."""
default_payable_pct: float = 100.0
default_currency: str | None = "USD"
moisture_threshold_pct: float = 8.0
moisture_penalty_per_pct: float = 0.0
impurity_thresholds: Mapping[str, float] = field(default_factory=dict)
impurity_penalty_per_ppm: Mapping[str, float] = field(default_factory=dict)
def calculate_pricing(
pricing_input: PricingInput,
*,
metadata: PricingMetadata | None = None,
currency: str | None = None,
) -> PricingResult:
"""Calculate pricing metrics for the provided commodity input.
Parameters
----------
pricing_input:
Normalised input data including ore tonnage, grades, charges, and
optional penalties.
metadata:
Optional default metadata applied when specific values are omitted from
``pricing_input``.
currency:
Optional override for the output currency label. Falls back to
``metadata.default_currency`` when not provided.
"""
applied_metadata = metadata or PricingMetadata()
payable_pct = (
pricing_input.payable_pct
if pricing_input.payable_pct is not None
else applied_metadata.default_payable_pct
)
moisture_threshold = (
pricing_input.moisture_threshold_pct
if pricing_input.moisture_threshold_pct is not None
else applied_metadata.moisture_threshold_pct
)
moisture_penalty_factor = (
pricing_input.moisture_penalty_per_pct
if pricing_input.moisture_penalty_per_pct is not None
else applied_metadata.moisture_penalty_per_pct
)
impurity_thresholds = {
**applied_metadata.impurity_thresholds,
**pricing_input.impurity_thresholds,
}
impurity_penalty_factors = {
**applied_metadata.impurity_penalty_per_ppm,
**pricing_input.impurity_penalty_per_ppm,
}
q_metal = pricing_input.ore_tonnage * (pricing_input.head_grade_pct / 100.0) * (
pricing_input.recovery_pct / 100.0
)
payable_metal = q_metal * (payable_pct / 100.0)
gross_revenue_ref = payable_metal * pricing_input.reference_price
charges = pricing_input.treatment_charge + pricing_input.smelting_charge
moisture_excess = max(0.0, pricing_input.moisture_pct - moisture_threshold)
moisture_penalty = moisture_excess * moisture_penalty_factor
impurity_penalty_total = 0.0
for impurity, value in pricing_input.impurity_ppm.items():
threshold = impurity_thresholds.get(impurity, 0.0)
penalty_factor = impurity_penalty_factors.get(impurity, 0.0)
impurity_penalty_total += max(0.0, value - threshold) * penalty_factor
net_revenue_ref = (
gross_revenue_ref - charges - moisture_penalty - impurity_penalty_total
)
net_revenue_ref += pricing_input.premiums
net_revenue = net_revenue_ref * pricing_input.fx_rate
currency_code = require_currency(
currency or pricing_input.currency_code,
default=applied_metadata.default_currency,
)
return PricingResult(
metal=pricing_input.metal,
ore_tonnage=pricing_input.ore_tonnage,
head_grade_pct=pricing_input.head_grade_pct,
recovery_pct=pricing_input.recovery_pct,
payable_metal_tonnes=payable_metal,
reference_price=pricing_input.reference_price,
gross_revenue=gross_revenue_ref,
moisture_penalty=moisture_penalty,
impurity_penalty=impurity_penalty_total,
treatment_smelt_charges=charges,
premiums=pricing_input.premiums,
net_revenue=net_revenue,
currency=currency_code,
)

676
services/reporting.py Normal file
View File

@@ -0,0 +1,676 @@
from __future__ import annotations
"""Reporting service layer aggregating deterministic and simulation metrics."""
from dataclasses import dataclass, field
from datetime import date
import math
from typing import Iterable, Mapping, Sequence
from models import FinancialCategory, Project, Scenario
from services.financial import (
CashFlow,
ConvergenceError,
PaybackNotReachedError,
internal_rate_of_return,
net_present_value,
payback_period,
)
from services.simulation import (
CashFlowSpec,
SimulationConfig,
SimulationMetric,
SimulationResult,
run_monte_carlo,
)
from services.unit_of_work import UnitOfWork
DEFAULT_DISCOUNT_RATE = 0.1
DEFAULT_ITERATIONS = 500
DEFAULT_PERCENTILES: tuple[float, float, float] = (5.0, 50.0, 95.0)
_COST_CATEGORY_SIGNS: Mapping[FinancialCategory, float] = {
FinancialCategory.REVENUE: 1.0,
FinancialCategory.CAPITAL_EXPENDITURE: -1.0,
FinancialCategory.OPERATING_EXPENDITURE: -1.0,
FinancialCategory.CONTINGENCY: -1.0,
FinancialCategory.OTHER: -1.0,
}
@dataclass(frozen=True)
class IncludeOptions:
"""Flags controlling optional sections in report payloads."""
distribution: bool = False
samples: bool = False
@dataclass(slots=True)
class ReportFilters:
"""Filter parameters applied when selecting scenarios for a report."""
scenario_ids: set[int] | None = None
start_date: date | None = None
end_date: date | None = None
def matches(self, scenario: Scenario) -> bool:
if self.scenario_ids is not None and scenario.id not in self.scenario_ids:
return False
if self.start_date and scenario.start_date and scenario.start_date < self.start_date:
return False
if self.end_date and scenario.end_date and scenario.end_date > self.end_date:
return False
return True
def to_dict(self) -> dict[str, object]:
payload: dict[str, object] = {}
if self.scenario_ids is not None:
payload["scenario_ids"] = sorted(self.scenario_ids)
if self.start_date is not None:
payload["start_date"] = self.start_date
if self.end_date is not None:
payload["end_date"] = self.end_date
return payload
@dataclass(slots=True)
class ScenarioFinancialTotals:
currency: str | None
inflows: float
outflows: float
net: float
by_category: dict[str, float]
def to_dict(self) -> dict[str, object]:
return {
"currency": self.currency,
"inflows": _round_optional(self.inflows),
"outflows": _round_optional(self.outflows),
"net": _round_optional(self.net),
"by_category": {
key: _round_optional(value) for key, value in sorted(self.by_category.items())
},
}
@dataclass(slots=True)
class ScenarioDeterministicMetrics:
currency: str | None
discount_rate: float
compounds_per_year: int
npv: float | None
irr: float | None
payback_period: float | None
notes: list[str] = field(default_factory=list)
def to_dict(self) -> dict[str, object]:
return {
"currency": self.currency,
"discount_rate": _round_optional(self.discount_rate, digits=4),
"compounds_per_year": self.compounds_per_year,
"npv": _round_optional(self.npv),
"irr": _round_optional(self.irr, digits=6),
"payback_period": _round_optional(self.payback_period, digits=4),
"notes": self.notes,
}
@dataclass(slots=True)
class ScenarioMonteCarloResult:
available: bool
notes: list[str] = field(default_factory=list)
result: SimulationResult | None = None
include_samples: bool = False
def to_dict(self) -> dict[str, object]:
if not self.available or self.result is None:
return {
"available": False,
"notes": self.notes,
}
metrics: dict[str, dict[str, object]] = {}
for metric, summary in self.result.summaries.items():
metrics[metric.value] = {
"mean": _round_optional(summary.mean),
"std_dev": _round_optional(summary.std_dev),
"minimum": _round_optional(summary.minimum),
"maximum": _round_optional(summary.maximum),
"percentiles": {
f"{percentile:g}": _round_optional(value)
for percentile, value in sorted(summary.percentiles.items())
},
"sample_size": summary.sample_size,
"failed_runs": summary.failed_runs,
}
samples_payload: dict[str, list[float | None]] | None = None
if self.include_samples and self.result.samples:
samples_payload = {}
for metric, samples in self.result.samples.items():
samples_payload[metric.value] = [
_sanitize_float(sample) for sample in samples.tolist()
]
payload: dict[str, object] = {
"available": True,
"iterations": self.result.iterations,
"metrics": metrics,
"notes": self.notes,
}
if samples_payload:
payload["samples"] = samples_payload
return payload
@dataclass(slots=True)
class ScenarioReport:
scenario: Scenario
totals: ScenarioFinancialTotals
deterministic: ScenarioDeterministicMetrics
monte_carlo: ScenarioMonteCarloResult | None
def to_dict(self) -> dict[str, object]:
scenario_info = {
"id": self.scenario.id,
"project_id": self.scenario.project_id,
"name": self.scenario.name,
"description": self.scenario.description,
"status": self.scenario.status.value,
"start_date": self.scenario.start_date,
"end_date": self.scenario.end_date,
"currency": self.scenario.currency,
"primary_resource": self.scenario.primary_resource.value
if self.scenario.primary_resource
else None,
"discount_rate": _round_optional(self.deterministic.discount_rate, digits=4),
"created_at": self.scenario.created_at,
"updated_at": self.scenario.updated_at,
"simulation_parameter_count": len(self.scenario.simulation_parameters or []),
}
payload: dict[str, object] = {
"scenario": scenario_info,
"financials": self.totals.to_dict(),
"metrics": self.deterministic.to_dict(),
}
if self.monte_carlo is not None:
payload["monte_carlo"] = self.monte_carlo.to_dict()
return payload
@dataclass(slots=True)
class AggregatedMetric:
average: float | None
minimum: float | None
maximum: float | None
def to_dict(self) -> dict[str, object]:
return {
"average": _round_optional(self.average),
"minimum": _round_optional(self.minimum),
"maximum": _round_optional(self.maximum),
}
@dataclass(slots=True)
class ProjectAggregates:
total_inflows: float
total_outflows: float
total_net: float
deterministic_metrics: dict[str, AggregatedMetric]
def to_dict(self) -> dict[str, object]:
return {
"financials": {
"total_inflows": _round_optional(self.total_inflows),
"total_outflows": _round_optional(self.total_outflows),
"total_net": _round_optional(self.total_net),
},
"deterministic_metrics": {
metric: data.to_dict()
for metric, data in sorted(self.deterministic_metrics.items())
},
}
@dataclass(slots=True)
class MetricComparison:
metric: str
direction: str
best: tuple[int, str, float] | None
worst: tuple[int, str, float] | None
average: float | None
def to_dict(self) -> dict[str, object]:
return {
"metric": self.metric,
"direction": self.direction,
"best": _comparison_entry(self.best),
"worst": _comparison_entry(self.worst),
"average": _round_optional(self.average),
}
def parse_include_tokens(raw: str | None) -> IncludeOptions:
tokens: set[str] = set()
if raw:
for part in raw.split(","):
token = part.strip().lower()
if token:
tokens.add(token)
if "all" in tokens:
return IncludeOptions(distribution=True, samples=True)
return IncludeOptions(
distribution=bool({"distribution", "monte_carlo", "mc"} & tokens),
samples="samples" in tokens,
)
def validate_percentiles(values: Sequence[float] | None) -> tuple[float, ...]:
if not values:
return DEFAULT_PERCENTILES
seen: set[float] = set()
cleaned: list[float] = []
for value in values:
percentile = float(value)
if percentile < 0.0 or percentile > 100.0:
raise ValueError("Percentiles must be between 0 and 100.")
if percentile not in seen:
seen.add(percentile)
cleaned.append(percentile)
if not cleaned:
return DEFAULT_PERCENTILES
return tuple(cleaned)
class ReportingService:
"""Coordinates project and scenario reporting aggregation."""
def __init__(self, uow: UnitOfWork) -> None:
self._uow = uow
def project_summary(
self,
project: Project,
*,
filters: ReportFilters,
include: IncludeOptions,
iterations: int,
percentiles: tuple[float, ...],
) -> dict[str, object]:
scenarios = self._load_scenarios(project.id, filters)
reports = [
self._build_scenario_report(
scenario,
include_distribution=include.distribution,
include_samples=include.samples,
iterations=iterations,
percentiles=percentiles,
)
for scenario in scenarios
]
aggregates = self._aggregate_project(reports)
return {
"project": _project_payload(project),
"scenario_count": len(reports),
"filters": filters.to_dict(),
"aggregates": aggregates.to_dict(),
"scenarios": [report.to_dict() for report in reports],
}
def scenario_comparison(
self,
project: Project,
scenarios: Sequence[Scenario],
*,
include: IncludeOptions,
iterations: int,
percentiles: tuple[float, ...],
) -> dict[str, object]:
reports = [
self._build_scenario_report(
self._reload_scenario(scenario.id),
include_distribution=include.distribution,
include_samples=include.samples,
iterations=iterations,
percentiles=percentiles,
)
for scenario in scenarios
]
comparison = {
metric: data.to_dict()
for metric, data in self._build_comparisons(reports).items()
}
return {
"project": _project_payload(project),
"scenarios": [report.to_dict() for report in reports],
"comparison": comparison,
}
def scenario_distribution(
self,
scenario: Scenario,
*,
include: IncludeOptions,
iterations: int,
percentiles: tuple[float, ...],
) -> dict[str, object]:
report = self._build_scenario_report(
self._reload_scenario(scenario.id),
include_distribution=True,
include_samples=include.samples,
iterations=iterations,
percentiles=percentiles,
)
return {
"scenario": report.to_dict()["scenario"],
"summary": report.totals.to_dict(),
"metrics": report.deterministic.to_dict(),
"monte_carlo": (
report.monte_carlo.to_dict() if report.monte_carlo else {
"available": False}
),
}
def _load_scenarios(self, project_id: int, filters: ReportFilters) -> list[Scenario]:
repo = self._require_scenario_repo()
scenarios = repo.list_for_project(project_id, with_children=True)
return [scenario for scenario in scenarios if filters.matches(scenario)]
def _reload_scenario(self, scenario_id: int) -> Scenario:
repo = self._require_scenario_repo()
return repo.get(scenario_id, with_children=True)
def _build_scenario_report(
self,
scenario: Scenario,
*,
include_distribution: bool,
include_samples: bool,
iterations: int,
percentiles: tuple[float, ...],
) -> ScenarioReport:
cash_flows, totals = _build_cash_flows(scenario)
deterministic = _calculate_deterministic_metrics(
scenario, cash_flows, totals)
monte_carlo: ScenarioMonteCarloResult | None = None
if include_distribution:
monte_carlo = _run_monte_carlo(
scenario,
cash_flows,
include_samples=include_samples,
iterations=iterations,
percentiles=percentiles,
)
return ScenarioReport(
scenario=scenario,
totals=totals,
deterministic=deterministic,
monte_carlo=monte_carlo,
)
def _aggregate_project(self, reports: Sequence[ScenarioReport]) -> ProjectAggregates:
total_inflows = sum(report.totals.inflows for report in reports)
total_outflows = sum(report.totals.outflows for report in reports)
total_net = sum(report.totals.net for report in reports)
metrics: dict[str, AggregatedMetric] = {}
for metric_name in ("npv", "irr", "payback_period"):
values = [
getattr(report.deterministic, metric_name)
for report in reports
if getattr(report.deterministic, metric_name) is not None
]
if values:
metrics[metric_name] = AggregatedMetric(
average=sum(values) / len(values),
minimum=min(values),
maximum=max(values),
)
return ProjectAggregates(
total_inflows=total_inflows,
total_outflows=total_outflows,
total_net=total_net,
deterministic_metrics=metrics,
)
def _build_comparisons(
self, reports: Sequence[ScenarioReport]
) -> Mapping[str, MetricComparison]:
comparisons: dict[str, MetricComparison] = {}
for metric_name, direction in (
("npv", "higher_is_better"),
("irr", "higher_is_better"),
("payback_period", "lower_is_better"),
):
entries: list[tuple[int, str, float]] = []
for report in reports:
value = getattr(report.deterministic, metric_name)
if value is None:
continue
entries.append(
(report.scenario.id, report.scenario.name, value))
if not entries:
continue
if direction == "higher_is_better":
best = max(entries, key=lambda item: item[2])
worst = min(entries, key=lambda item: item[2])
else:
best = min(entries, key=lambda item: item[2])
worst = max(entries, key=lambda item: item[2])
average = sum(item[2] for item in entries) / len(entries)
comparisons[metric_name] = MetricComparison(
metric=metric_name,
direction=direction,
best=best,
worst=worst,
average=average,
)
return comparisons
def _require_scenario_repo(self):
if not self._uow.scenarios:
raise RuntimeError("Scenario repository not initialised")
return self._uow.scenarios
def _build_cash_flows(scenario: Scenario) -> tuple[list[CashFlow], ScenarioFinancialTotals]:
cash_flows: list[CashFlow] = []
by_category: dict[str, float] = {}
inflows = 0.0
outflows = 0.0
net = 0.0
period_index = 0
for financial_input in scenario.financial_inputs or []:
sign = _COST_CATEGORY_SIGNS.get(financial_input.category, -1.0)
amount = float(financial_input.amount) * sign
net += amount
if amount >= 0:
inflows += amount
else:
outflows += -amount
by_category.setdefault(financial_input.category.value, 0.0)
by_category[financial_input.category.value] += amount
if financial_input.effective_date is not None:
cash_flows.append(
CashFlow(amount=amount, date=financial_input.effective_date)
)
else:
cash_flows.append(
CashFlow(amount=amount, period_index=period_index))
period_index += 1
currency = scenario.currency
if currency is None and scenario.financial_inputs:
currency = scenario.financial_inputs[0].currency
totals = ScenarioFinancialTotals(
currency=currency,
inflows=inflows,
outflows=outflows,
net=net,
by_category=by_category,
)
return cash_flows, totals
def _calculate_deterministic_metrics(
scenario: Scenario,
cash_flows: Sequence[CashFlow],
totals: ScenarioFinancialTotals,
) -> ScenarioDeterministicMetrics:
notes: list[str] = []
discount_rate = _normalise_discount_rate(scenario.discount_rate)
if scenario.discount_rate is None:
notes.append(
f"Discount rate not set; defaulted to {discount_rate:.2%}."
)
if not cash_flows:
notes.append(
"No financial inputs available for deterministic metrics.")
return ScenarioDeterministicMetrics(
currency=totals.currency,
discount_rate=discount_rate,
compounds_per_year=1,
npv=None,
irr=None,
payback_period=None,
notes=notes,
)
npv_value: float | None
try:
npv_value = net_present_value(
discount_rate,
cash_flows,
compounds_per_year=1,
)
except ValueError as exc:
npv_value = None
notes.append(f"NPV unavailable: {exc}.")
irr_value: float | None
try:
irr_value = internal_rate_of_return(
cash_flows,
compounds_per_year=1,
)
except (ValueError, ConvergenceError) as exc:
irr_value = None
notes.append(f"IRR unavailable: {exc}.")
payback_value: float | None
try:
payback_value = payback_period(
cash_flows,
compounds_per_year=1,
)
except (ValueError, PaybackNotReachedError) as exc:
payback_value = None
notes.append(f"Payback period unavailable: {exc}.")
return ScenarioDeterministicMetrics(
currency=totals.currency,
discount_rate=discount_rate,
compounds_per_year=1,
npv=npv_value,
irr=irr_value,
payback_period=payback_value,
notes=notes,
)
def _run_monte_carlo(
scenario: Scenario,
cash_flows: Sequence[CashFlow],
*,
include_samples: bool,
iterations: int,
percentiles: tuple[float, ...],
) -> ScenarioMonteCarloResult:
if not cash_flows:
return ScenarioMonteCarloResult(
available=False,
notes=["No financial inputs available for Monte Carlo simulation."],
)
discount_rate = _normalise_discount_rate(scenario.discount_rate)
specs = [CashFlowSpec(cash_flow=flow) for flow in cash_flows]
notes: list[str] = []
if not scenario.simulation_parameters:
notes.append(
"Scenario has no stochastic parameters; simulation mirrors deterministic cash flows."
)
config = SimulationConfig(
iterations=iterations,
discount_rate=discount_rate,
metrics=(
SimulationMetric.NPV,
SimulationMetric.IRR,
SimulationMetric.PAYBACK,
),
percentiles=percentiles,
return_samples=include_samples,
)
try:
result = run_monte_carlo(specs, config)
except Exception as exc: # pragma: no cover - safeguard for unexpected failures
notes.append(f"Simulation failed: {exc}.")
return ScenarioMonteCarloResult(available=False, notes=notes)
return ScenarioMonteCarloResult(
available=True,
notes=notes,
result=result,
include_samples=include_samples,
)
def _normalise_discount_rate(value: float | None) -> float:
if value is None:
return DEFAULT_DISCOUNT_RATE
rate = float(value)
if rate > 1.0:
return rate / 100.0
return rate
def _sanitize_float(value: float | None) -> float | None:
if value is None:
return None
if math.isnan(value) or math.isinf(value):
return None
return float(value)
def _round_optional(value: float | None, *, digits: int = 2) -> float | None:
clean = _sanitize_float(value)
if clean is None:
return None
return round(clean, digits)
def _comparison_entry(entry: tuple[int, str, float] | None) -> dict[str, object] | None:
if entry is None:
return None
scenario_id, name, value = entry
return {
"scenario_id": scenario_id,
"name": name,
"value": _round_optional(value),
}
def _project_payload(project: Project) -> dict[str, object]:
return {
"id": project.id,
"name": project.name,
"location": project.location,
"operation_type": project.operation_type.value,
"description": project.description,
"created_at": project.created_at,
"updated_at": project.updated_at,
}

View File

@@ -1,6 +1,7 @@
from __future__ import annotations
from collections.abc import Iterable
from dataclasses import dataclass
from datetime import datetime
from typing import Mapping, Sequence
@@ -11,6 +12,10 @@ from sqlalchemy.orm import Session, joinedload, selectinload
from models import (
FinancialInput,
Project,
PricingImpuritySettings,
PricingMetalSettings,
PricingSettings,
ResourceType,
Role,
Scenario,
ScenarioStatus,
@@ -19,6 +24,8 @@ from models import (
UserRole,
)
from services.exceptions import EntityConflictError, EntityNotFoundError
from services.export_query import ProjectExportFilters, ScenarioExportFilters
from services.pricing import PricingMetadata
class ProjectRepository:
@@ -27,10 +34,17 @@ class ProjectRepository:
def __init__(self, session: Session) -> None:
self.session = session
def list(self, *, with_children: bool = False) -> Sequence[Project]:
def list(
self,
*,
with_children: bool = False,
with_pricing: bool = False,
) -> Sequence[Project]:
stmt = select(Project).order_by(Project.created_at)
if with_children:
stmt = stmt.options(selectinload(Project.scenarios))
if with_pricing:
stmt = stmt.options(selectinload(Project.pricing_settings))
return self.session.execute(stmt).scalars().all()
def count(self) -> int:
@@ -45,10 +59,18 @@ class ProjectRepository:
)
return self.session.execute(stmt).scalars().all()
def get(self, project_id: int, *, with_children: bool = False) -> Project:
def get(
self,
project_id: int,
*,
with_children: bool = False,
with_pricing: bool = False,
) -> Project:
stmt = select(Project).where(Project.id == project_id)
if with_children:
stmt = stmt.options(joinedload(Project.scenarios))
if with_pricing:
stmt = stmt.options(joinedload(Project.pricing_settings))
result = self.session.execute(stmt)
if with_children:
result = result.unique()
@@ -79,10 +101,71 @@ class ProjectRepository:
records = self.session.execute(stmt).scalars().all()
return {project.name.lower(): project for project in records}
def filtered_for_export(
self,
filters: ProjectExportFilters | None = None,
*,
include_scenarios: bool = False,
include_pricing: bool = False,
) -> Sequence[Project]:
stmt = select(Project)
if include_scenarios:
stmt = stmt.options(selectinload(Project.scenarios))
if include_pricing:
stmt = stmt.options(selectinload(Project.pricing_settings))
if filters:
ids = filters.normalised_ids()
if ids:
stmt = stmt.where(Project.id.in_(ids))
name_matches = filters.normalised_names()
if name_matches:
stmt = stmt.where(func.lower(Project.name).in_(name_matches))
name_pattern = filters.name_search_pattern()
if name_pattern:
stmt = stmt.where(Project.name.ilike(name_pattern))
locations = filters.normalised_locations()
if locations:
stmt = stmt.where(func.lower(Project.location).in_(locations))
if filters.operation_types:
stmt = stmt.where(Project.operation_type.in_(
filters.operation_types))
if filters.created_from:
stmt = stmt.where(Project.created_at >= filters.created_from)
if filters.created_to:
stmt = stmt.where(Project.created_at <= filters.created_to)
if filters.updated_from:
stmt = stmt.where(Project.updated_at >= filters.updated_from)
if filters.updated_to:
stmt = stmt.where(Project.updated_at <= filters.updated_to)
stmt = stmt.order_by(Project.name, Project.id)
return self.session.execute(stmt).scalars().all()
def delete(self, project_id: int) -> None:
project = self.get(project_id)
self.session.delete(project)
def set_pricing_settings(
self,
project: Project,
pricing_settings: PricingSettings | None,
) -> Project:
project.pricing_settings = pricing_settings
project.pricing_settings_id = (
pricing_settings.id if pricing_settings is not None else None
)
self.session.flush()
return project
class ScenarioRepository:
"""Persistence operations for Scenario entities."""
@@ -90,13 +173,26 @@ class ScenarioRepository:
def __init__(self, session: Session) -> None:
self.session = session
def list_for_project(self, project_id: int) -> Sequence[Scenario]:
def list_for_project(
self,
project_id: int,
*,
with_children: bool = False,
) -> Sequence[Scenario]:
stmt = (
select(Scenario)
.where(Scenario.project_id == project_id)
.order_by(Scenario.created_at)
)
return self.session.execute(stmt).scalars().all()
if with_children:
stmt = stmt.options(
selectinload(Scenario.financial_inputs),
selectinload(Scenario.simulation_parameters),
)
result = self.session.execute(stmt)
if with_children:
result = result.unique()
return result.scalars().all()
def count(self) -> int:
stmt = select(func.count(Scenario.id))
@@ -177,6 +273,76 @@ class ScenarioRepository:
records = self.session.execute(stmt).scalars().all()
return {scenario.name.lower(): scenario for scenario in records}
def filtered_for_export(
self,
filters: ScenarioExportFilters | None = None,
*,
include_project: bool = True,
) -> Sequence[Scenario]:
stmt = select(Scenario)
if include_project:
stmt = stmt.options(joinedload(Scenario.project))
if filters:
scenario_ids = filters.normalised_ids()
if scenario_ids:
stmt = stmt.where(Scenario.id.in_(scenario_ids))
project_ids = filters.normalised_project_ids()
if project_ids:
stmt = stmt.where(Scenario.project_id.in_(project_ids))
project_names = filters.normalised_project_names()
if project_names:
project_id_select = select(Project.id).where(
func.lower(Project.name).in_(project_names)
)
stmt = stmt.where(Scenario.project_id.in_(project_id_select))
name_pattern = filters.name_search_pattern()
if name_pattern:
stmt = stmt.where(Scenario.name.ilike(name_pattern))
if filters.statuses:
stmt = stmt.where(Scenario.status.in_(filters.statuses))
if filters.start_date_from:
stmt = stmt.where(Scenario.start_date >=
filters.start_date_from)
if filters.start_date_to:
stmt = stmt.where(Scenario.start_date <= filters.start_date_to)
if filters.end_date_from:
stmt = stmt.where(Scenario.end_date >= filters.end_date_from)
if filters.end_date_to:
stmt = stmt.where(Scenario.end_date <= filters.end_date_to)
if filters.created_from:
stmt = stmt.where(Scenario.created_at >= filters.created_from)
if filters.created_to:
stmt = stmt.where(Scenario.created_at <= filters.created_to)
if filters.updated_from:
stmt = stmt.where(Scenario.updated_at >= filters.updated_from)
if filters.updated_to:
stmt = stmt.where(Scenario.updated_at <= filters.updated_to)
currencies = filters.normalised_currencies()
if currencies:
stmt = stmt.where(func.upper(
Scenario.currency).in_(currencies))
if filters.primary_resources:
stmt = stmt.where(Scenario.primary_resource.in_(
filters.primary_resources))
stmt = stmt.order_by(Scenario.name, Scenario.id)
return self.session.execute(stmt).scalars().all()
def delete(self, scenario_id: int) -> None:
scenario = self.get(scenario_id)
self.session.delete(scenario)
@@ -258,6 +424,101 @@ class SimulationParameterRepository:
self.session.delete(entity)
class PricingSettingsRepository:
"""Persistence operations for pricing configuration entities."""
def __init__(self, session: Session) -> None:
self.session = session
def list(self, *, include_children: bool = False) -> Sequence[PricingSettings]:
stmt = select(PricingSettings).order_by(PricingSettings.created_at)
if include_children:
stmt = stmt.options(
selectinload(PricingSettings.metal_overrides),
selectinload(PricingSettings.impurity_overrides),
)
result = self.session.execute(stmt)
if include_children:
result = result.unique()
return result.scalars().all()
def get(self, settings_id: int, *, include_children: bool = False) -> PricingSettings:
stmt = select(PricingSettings).where(PricingSettings.id == settings_id)
if include_children:
stmt = stmt.options(
selectinload(PricingSettings.metal_overrides),
selectinload(PricingSettings.impurity_overrides),
)
result = self.session.execute(stmt)
if include_children:
result = result.unique()
settings = result.scalar_one_or_none()
if settings is None:
raise EntityNotFoundError(
f"Pricing settings {settings_id} not found")
return settings
def find_by_slug(
self,
slug: str,
*,
include_children: bool = False,
) -> PricingSettings | None:
normalised = slug.strip().lower()
stmt = select(PricingSettings).where(
PricingSettings.slug == normalised)
if include_children:
stmt = stmt.options(
selectinload(PricingSettings.metal_overrides),
selectinload(PricingSettings.impurity_overrides),
)
result = self.session.execute(stmt)
if include_children:
result = result.unique()
return result.scalar_one_or_none()
def get_by_slug(self, slug: str, *, include_children: bool = False) -> PricingSettings:
settings = self.find_by_slug(slug, include_children=include_children)
if settings is None:
raise EntityNotFoundError(
f"Pricing settings slug '{slug}' not found"
)
return settings
def create(self, settings: PricingSettings) -> PricingSettings:
self.session.add(settings)
try:
self.session.flush()
except IntegrityError as exc: # pragma: no cover - relies on DB constraints
raise EntityConflictError(
"Pricing settings violates constraints") from exc
return settings
def delete(self, settings_id: int) -> None:
settings = self.get(settings_id, include_children=True)
self.session.delete(settings)
def attach_metal_override(
self,
settings: PricingSettings,
override: PricingMetalSettings,
) -> PricingMetalSettings:
settings.metal_overrides.append(override)
self.session.add(override)
self.session.flush()
return override
def attach_impurity_override(
self,
settings: PricingSettings,
override: PricingImpuritySettings,
) -> PricingImpuritySettings:
settings.impurity_overrides.append(override)
self.session.add(override)
self.session.flush()
return override
class RoleRepository:
"""Persistence operations for Role entities."""
@@ -389,6 +650,159 @@ class UserRepository:
self.session.flush()
DEFAULT_PRICING_SETTINGS_NAME = "Default Pricing Settings"
DEFAULT_PRICING_SETTINGS_DESCRIPTION = (
"Default pricing configuration generated from environment metadata."
)
@dataclass(slots=True)
class PricingSettingsSeedResult:
settings: PricingSettings
created: bool
updated_fields: int
impurity_upserts: int
def ensure_default_pricing_settings(
repo: PricingSettingsRepository,
*,
metadata: PricingMetadata,
slug: str = "default",
name: str | None = None,
description: str | None = None,
) -> PricingSettingsSeedResult:
"""Ensure a baseline pricing settings record exists and matches metadata defaults."""
normalised_slug = (slug or "default").strip().lower() or "default"
target_name = name or DEFAULT_PRICING_SETTINGS_NAME
target_description = description or DEFAULT_PRICING_SETTINGS_DESCRIPTION
updated_fields = 0
impurity_upserts = 0
try:
settings = repo.get_by_slug(normalised_slug, include_children=True)
created = False
except EntityNotFoundError:
settings = PricingSettings(
name=target_name,
slug=normalised_slug,
description=target_description,
default_currency=metadata.default_currency,
default_payable_pct=metadata.default_payable_pct,
moisture_threshold_pct=metadata.moisture_threshold_pct,
moisture_penalty_per_pct=metadata.moisture_penalty_per_pct,
)
settings.metadata_payload = None
settings = repo.create(settings)
created = True
else:
if settings.name != target_name:
settings.name = target_name
updated_fields += 1
if target_description and settings.description != target_description:
settings.description = target_description
updated_fields += 1
if settings.default_currency != metadata.default_currency:
settings.default_currency = metadata.default_currency
updated_fields += 1
if float(settings.default_payable_pct) != float(metadata.default_payable_pct):
settings.default_payable_pct = metadata.default_payable_pct
updated_fields += 1
if float(settings.moisture_threshold_pct) != float(metadata.moisture_threshold_pct):
settings.moisture_threshold_pct = metadata.moisture_threshold_pct
updated_fields += 1
if float(settings.moisture_penalty_per_pct) != float(metadata.moisture_penalty_per_pct):
settings.moisture_penalty_per_pct = metadata.moisture_penalty_per_pct
updated_fields += 1
impurity_thresholds = {
code.strip().upper(): float(value)
for code, value in (metadata.impurity_thresholds or {}).items()
if code.strip()
}
impurity_penalties = {
code.strip().upper(): float(value)
for code, value in (metadata.impurity_penalty_per_ppm or {}).items()
if code.strip()
}
if impurity_thresholds or impurity_penalties:
existing_map = {
override.impurity_code: override
for override in settings.impurity_overrides
}
target_codes = set(impurity_thresholds) | set(impurity_penalties)
for code in sorted(target_codes):
threshold_value = impurity_thresholds.get(code, 0.0)
penalty_value = impurity_penalties.get(code, 0.0)
existing = existing_map.get(code)
if existing is None:
repo.attach_impurity_override(
settings,
PricingImpuritySettings(
impurity_code=code,
threshold_ppm=threshold_value,
penalty_per_ppm=penalty_value,
),
)
impurity_upserts += 1
continue
changed = False
if float(existing.threshold_ppm) != float(threshold_value):
existing.threshold_ppm = threshold_value
changed = True
if float(existing.penalty_per_ppm) != float(penalty_value):
existing.penalty_per_ppm = penalty_value
changed = True
if changed:
updated_fields += 1
if updated_fields > 0 or impurity_upserts > 0:
repo.session.flush()
return PricingSettingsSeedResult(
settings=settings,
created=created,
updated_fields=updated_fields,
impurity_upserts=impurity_upserts,
)
def pricing_settings_to_metadata(settings: PricingSettings) -> PricingMetadata:
"""Convert a persisted pricing settings record into metadata defaults."""
payload = settings.metadata_payload or {}
payload_thresholds = payload.get("impurity_thresholds") or {}
payload_penalties = payload.get("impurity_penalty_per_ppm") or {}
thresholds: dict[str, float] = {
code.strip().upper(): float(value)
for code, value in payload_thresholds.items()
if isinstance(code, str) and code.strip()
}
penalties: dict[str, float] = {
code.strip().upper(): float(value)
for code, value in payload_penalties.items()
if isinstance(code, str) and code.strip()
}
for override in settings.impurity_overrides:
code = override.impurity_code.strip().upper()
thresholds[code] = float(override.threshold_ppm)
penalties[code] = float(override.penalty_per_ppm)
return PricingMetadata(
default_payable_pct=float(settings.default_payable_pct),
default_currency=settings.default_currency,
moisture_threshold_pct=float(settings.moisture_threshold_pct),
moisture_penalty_per_pct=float(settings.moisture_penalty_per_pct),
impurity_thresholds=thresholds,
impurity_penalty_per_ppm=penalties,
)
DEFAULT_ROLE_DEFINITIONS: tuple[dict[str, str], ...] = (
{
"name": "admin",

View File

@@ -0,0 +1,54 @@
from __future__ import annotations
"""Scenario evaluation services including pricing integration."""
from dataclasses import dataclass
from typing import Iterable, Mapping
from models.scenario import Scenario
from services.pricing import (
PricingInput,
PricingMetadata,
PricingResult,
calculate_pricing,
)
@dataclass(slots=True)
class ScenarioPricingConfig:
"""Configuration for pricing evaluation within a scenario."""
metadata: PricingMetadata | None = None
@dataclass(slots=True)
class ScenarioPricingSnapshot:
"""Captured pricing results for a scenario."""
scenario_id: int
results: list[PricingResult]
class ScenarioPricingEvaluator:
"""Evaluate scenario profitability inputs using pricing services."""
def __init__(self, config: ScenarioPricingConfig | None = None) -> None:
self._config = config or ScenarioPricingConfig()
def evaluate(
self,
scenario: Scenario,
*,
inputs: Iterable[PricingInput],
metadata_override: PricingMetadata | None = None,
) -> ScenarioPricingSnapshot:
metadata = metadata_override or self._config.metadata
results: list[PricingResult] = []
for pricing_input in inputs:
result = calculate_pricing(
pricing_input,
metadata=metadata,
currency=scenario.currency,
)
results.append(result)
return ScenarioPricingSnapshot(scenario_id=scenario.id, results=results)

352
services/simulation.py Normal file
View File

@@ -0,0 +1,352 @@
from __future__ import annotations
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Iterable, Mapping, Sequence
import numpy as np
from numpy.random import Generator, default_rng
from .financial import (
CashFlow,
ConvergenceError,
PaybackNotReachedError,
internal_rate_of_return,
net_present_value,
payback_period,
)
class DistributionConfigError(ValueError):
"""Raised when a distribution specification is invalid."""
class SimulationMetric(Enum):
"""Supported Monte Carlo summary metrics."""
NPV = "npv"
IRR = "irr"
PAYBACK = "payback"
class DistributionType(Enum):
"""Supported probability distribution families."""
NORMAL = "normal"
LOGNORMAL = "lognormal"
TRIANGULAR = "triangular"
DISCRETE = "discrete"
class DistributionSource(Enum):
"""Origins for parameter values when sourcing dynamically."""
STATIC = "static"
SCENARIO_FIELD = "scenario_field"
METADATA_KEY = "metadata_key"
@dataclass(frozen=True, slots=True)
class DistributionSpec:
"""Defines the stochastic behaviour for a single cash flow."""
type: DistributionType
parameters: Mapping[str, Any]
source: DistributionSource = DistributionSource.STATIC
source_key: str | None = None
@dataclass(frozen=True, slots=True)
class CashFlowSpec:
"""Pairs a baseline cash flow with an optional distribution."""
cash_flow: CashFlow
distribution: DistributionSpec | None = None
@dataclass(frozen=True, slots=True)
class SimulationConfig:
"""Controls Monte Carlo simulation behaviour."""
iterations: int
discount_rate: float
seed: int | None = None
metrics: Sequence[SimulationMetric] = (
SimulationMetric.NPV, SimulationMetric.IRR, SimulationMetric.PAYBACK)
percentiles: Sequence[float] = (5.0, 50.0, 95.0)
compounds_per_year: int = 1
return_samples: bool = False
residual_value: float | None = None
residual_periods: float | None = None
@dataclass(frozen=True, slots=True)
class MetricSummary:
"""Aggregated statistics for a simulated metric."""
mean: float
std_dev: float
minimum: float
maximum: float
percentiles: Mapping[float, float]
sample_size: int
failed_runs: int
@dataclass(frozen=True, slots=True)
class SimulationResult:
"""Monte Carlo output including per-metric summaries."""
iterations: int
summaries: Mapping[SimulationMetric, MetricSummary]
samples: Mapping[SimulationMetric, np.ndarray] | None = None
def run_monte_carlo(
cash_flows: Sequence[CashFlowSpec],
config: SimulationConfig,
*,
scenario_context: Mapping[str, Any] | None = None,
metadata: Mapping[str, Any] | None = None,
rng: Generator | None = None,
) -> SimulationResult:
"""Execute Monte Carlo simulation for the provided cash flows."""
if config.iterations <= 0:
raise ValueError("iterations must be greater than zero")
if config.compounds_per_year <= 0:
raise ValueError("compounds_per_year must be greater than zero")
for pct in config.percentiles:
if pct < 0.0 or pct > 100.0:
raise ValueError("percentiles must be within [0, 100]")
generator = rng or default_rng(config.seed)
metric_arrays: Dict[SimulationMetric, np.ndarray] = {
metric: np.empty(config.iterations, dtype=float)
for metric in config.metrics
}
for idx in range(config.iterations):
iteration_flows = [
_realise_cash_flow(
spec,
generator,
scenario_context=scenario_context,
metadata=metadata,
)
for spec in cash_flows
]
if SimulationMetric.NPV in metric_arrays:
metric_arrays[SimulationMetric.NPV][idx] = net_present_value(
config.discount_rate,
iteration_flows,
residual_value=config.residual_value,
residual_periods=config.residual_periods,
compounds_per_year=config.compounds_per_year,
)
if SimulationMetric.IRR in metric_arrays:
try:
metric_arrays[SimulationMetric.IRR][idx] = internal_rate_of_return(
iteration_flows,
compounds_per_year=config.compounds_per_year,
)
except (ValueError, ConvergenceError):
metric_arrays[SimulationMetric.IRR][idx] = np.nan
if SimulationMetric.PAYBACK in metric_arrays:
try:
metric_arrays[SimulationMetric.PAYBACK][idx] = payback_period(
iteration_flows,
compounds_per_year=config.compounds_per_year,
)
except (ValueError, PaybackNotReachedError):
metric_arrays[SimulationMetric.PAYBACK][idx] = np.nan
summaries = {
metric: _summarise(metric_arrays[metric], config.percentiles)
for metric in metric_arrays
}
samples = metric_arrays if config.return_samples else None
return SimulationResult(
iterations=config.iterations,
summaries=summaries,
samples=samples,
)
def _realise_cash_flow(
spec: CashFlowSpec,
generator: Generator,
*,
scenario_context: Mapping[str, Any] | None,
metadata: Mapping[str, Any] | None,
) -> CashFlow:
if spec.distribution is None:
return spec.cash_flow
distribution = spec.distribution
base_amount = spec.cash_flow.amount
params = _resolve_parameters(
distribution,
base_amount,
scenario_context=scenario_context,
metadata=metadata,
)
sample = _sample_distribution(
distribution.type,
params,
generator,
)
return CashFlow(
amount=float(sample),
period_index=spec.cash_flow.period_index,
date=spec.cash_flow.date,
)
def _resolve_parameters(
distribution: DistributionSpec,
base_amount: float,
*,
scenario_context: Mapping[str, Any] | None,
metadata: Mapping[str, Any] | None,
) -> Dict[str, Any]:
params = dict(distribution.parameters)
if distribution.source == DistributionSource.SCENARIO_FIELD:
if distribution.source_key is None:
raise DistributionConfigError(
"source_key is required for scenario_field sourcing")
if not scenario_context or distribution.source_key not in scenario_context:
raise DistributionConfigError(
f"scenario field '{distribution.source_key}' not found for distribution"
)
params.setdefault("mean", float(
scenario_context[distribution.source_key]))
elif distribution.source == DistributionSource.METADATA_KEY:
if distribution.source_key is None:
raise DistributionConfigError(
"source_key is required for metadata_key sourcing")
if not metadata or distribution.source_key not in metadata:
raise DistributionConfigError(
f"metadata key '{distribution.source_key}' not found for distribution"
)
params.setdefault("mean", float(metadata[distribution.source_key]))
else:
params.setdefault("mean", float(base_amount))
return params
def _sample_distribution(
distribution_type: DistributionType,
params: Mapping[str, Any],
generator: Generator,
) -> float:
if distribution_type is DistributionType.NORMAL:
return _sample_normal(params, generator)
if distribution_type is DistributionType.LOGNORMAL:
return _sample_lognormal(params, generator)
if distribution_type is DistributionType.TRIANGULAR:
return _sample_triangular(params, generator)
if distribution_type is DistributionType.DISCRETE:
return _sample_discrete(params, generator)
raise DistributionConfigError(
f"Unsupported distribution type: {distribution_type}")
def _sample_normal(params: Mapping[str, Any], generator: Generator) -> float:
if "std_dev" not in params:
raise DistributionConfigError("normal distribution requires 'std_dev'")
std_dev = float(params["std_dev"])
if std_dev < 0:
raise DistributionConfigError("std_dev must be non-negative")
mean = float(params.get("mean", 0.0))
if std_dev == 0:
return mean
return float(generator.normal(loc=mean, scale=std_dev))
def _sample_lognormal(params: Mapping[str, Any], generator: Generator) -> float:
if "sigma" not in params:
raise DistributionConfigError(
"lognormal distribution requires 'sigma'")
sigma = float(params["sigma"])
if sigma < 0:
raise DistributionConfigError("sigma must be non-negative")
if "mean" not in params:
raise DistributionConfigError(
"lognormal distribution requires 'mean' (mu in log space)")
mean = float(params["mean"])
return float(generator.lognormal(mean=mean, sigma=sigma))
def _sample_triangular(params: Mapping[str, Any], generator: Generator) -> float:
required = {"min", "mode", "max"}
if not required.issubset(params):
missing = ", ".join(sorted(required - params.keys()))
raise DistributionConfigError(
f"triangular distribution missing parameters: {missing}")
left = float(params["min"])
mode = float(params["mode"])
right = float(params["max"])
if not (left <= mode <= right):
raise DistributionConfigError(
"triangular distribution requires min <= mode <= max")
if left == right:
return mode
return float(generator.triangular(left=left, mode=mode, right=right))
def _sample_discrete(params: Mapping[str, Any], generator: Generator) -> float:
values = params.get("values")
probabilities = params.get("probabilities")
if not isinstance(values, Sequence) or not isinstance(probabilities, Sequence):
raise DistributionConfigError(
"discrete distribution requires 'values' and 'probabilities' sequences")
if len(values) != len(probabilities) or not values:
raise DistributionConfigError(
"values and probabilities must be non-empty and of equal length")
probs = np.array(probabilities, dtype=float)
if np.any(probs < 0):
raise DistributionConfigError("probabilities must be non-negative")
total = probs.sum()
if not np.isclose(total, 1.0):
raise DistributionConfigError("probabilities must sum to 1.0")
probs = probs / total
choices = np.array(values, dtype=float)
return float(generator.choice(choices, p=probs))
def _summarise(values: np.ndarray, percentiles: Sequence[float]) -> MetricSummary:
clean = values[~np.isnan(values)]
sample_size = clean.size
failed_runs = values.size - sample_size
if sample_size == 0:
percentile_map: Dict[float, float] = {
pct: float("nan") for pct in percentiles}
return MetricSummary(
mean=float("nan"),
std_dev=float("nan"),
minimum=float("nan"),
maximum=float("nan"),
percentiles=percentile_map,
sample_size=0,
failed_runs=failed_runs,
)
percentile_map = {
pct: float(np.percentile(clean, pct)) for pct in percentiles
}
return MetricSummary(
mean=float(np.mean(clean)),
std_dev=float(np.std(clean, ddof=1)) if sample_size > 1 else 0.0,
minimum=float(np.min(clean)),
maximum=float(np.max(clean)),
percentiles=percentile_map,
sample_size=sample_size,
failed_runs=failed_runs,
)

View File

@@ -6,16 +6,21 @@ from typing import Callable, Sequence
from sqlalchemy.orm import Session
from config.database import SessionLocal
from models import Role, Scenario
from models import PricingSettings, Project, Role, Scenario
from services.pricing import PricingMetadata
from services.repositories import (
FinancialInputRepository,
PricingSettingsRepository,
PricingSettingsSeedResult,
ProjectRepository,
RoleRepository,
ScenarioRepository,
SimulationParameterRepository,
UserRepository,
ensure_admin_user as ensure_admin_user_record,
ensure_default_pricing_settings,
ensure_default_roles,
pricing_settings_to_metadata,
)
from services.scenario_validation import ScenarioComparisonValidator
@@ -33,6 +38,7 @@ class UnitOfWork(AbstractContextManager["UnitOfWork"]):
self.simulation_parameters: SimulationParameterRepository | None = None
self.users: UserRepository | None = None
self.roles: RoleRepository | None = None
self.pricing_settings: PricingSettingsRepository | None = None
def __enter__(self) -> "UnitOfWork":
self.session = self._session_factory()
@@ -43,6 +49,7 @@ class UnitOfWork(AbstractContextManager["UnitOfWork"]):
self.session)
self.users = UserRepository(self.session)
self.roles = RoleRepository(self.session)
self.pricing_settings = PricingSettingsRepository(self.session)
self._scenario_validator = ScenarioComparisonValidator()
return self
@@ -60,6 +67,7 @@ class UnitOfWork(AbstractContextManager["UnitOfWork"]):
self.simulation_parameters = None
self.users = None
self.roles = None
self.pricing_settings = None
def flush(self) -> None:
if not self.session:
@@ -116,3 +124,45 @@ class UnitOfWork(AbstractContextManager["UnitOfWork"]):
username=username,
password=password,
)
def ensure_default_pricing_settings(
self,
*,
metadata: PricingMetadata,
slug: str = "default",
name: str | None = None,
description: str | None = None,
) -> PricingSettingsSeedResult:
if not self.pricing_settings:
raise RuntimeError("UnitOfWork session is not initialised")
return ensure_default_pricing_settings(
self.pricing_settings,
metadata=metadata,
slug=slug,
name=name,
description=description,
)
def get_pricing_metadata(
self,
*,
slug: str = "default",
) -> PricingMetadata | None:
if not self.pricing_settings:
raise RuntimeError("UnitOfWork session is not initialised")
settings = self.pricing_settings.find_by_slug(
slug,
include_children=True,
)
if settings is None:
return None
return pricing_settings_to_metadata(settings)
def set_project_pricing_settings(
self,
project: Project,
pricing_settings: PricingSettings | None,
) -> Project:
if not self.projects:
raise RuntimeError("UnitOfWork session is not initialised")
return self.projects.set_pricing_settings(project, pricing_settings)

86
static/css/imports.css Normal file
View File

@@ -0,0 +1,86 @@
.import-upload {
background-color: var(--surface-color);
border: 1px dashed var(--border-color);
border-radius: var(--radius-md);
padding: 1.5rem;
margin-bottom: 1.5rem;
}
.import-upload__header {
margin-bottom: 1rem;
}
.import-upload__dropzone {
border: 2px dashed var(--border-color);
border-radius: var(--radius-sm);
padding: 2rem;
text-align: center;
transition: border-color 0.2s ease, background-color 0.2s ease;
}
.import-upload__dropzone.dragover {
border-color: var(--primary-color);
background-color: rgba(0, 123, 255, 0.05);
}
.import-upload__actions {
display: flex;
gap: 0.75rem;
margin-top: 1rem;
}
.table-cell-actions {
display: flex;
align-items: center;
gap: 0.5rem;
}
.btn-ghost {
background: transparent;
border: none;
cursor: pointer;
padding: 0.25rem 0.5rem;
color: var(--text-muted);
}
.btn-ghost:hover {
color: var(--primary-color);
}
.toast {
position: fixed;
right: 1rem;
bottom: 1rem;
display: flex;
align-items: center;
gap: 0.75rem;
padding: 1rem 1.25rem;
border-radius: var(--radius-md);
color: #fff;
box-shadow: var(--shadow-lg);
z-index: 1000;
}
.toast.hidden {
display: none;
}
.toast--success {
background-color: #198754;
}
.toast--error {
background-color: #dc3545;
}
.toast--info {
background-color: #0d6efd;
}
.toast__close {
background: none;
border: none;
color: inherit;
cursor: pointer;
font-size: 1.1rem;
}

View File

@@ -1,3 +1,204 @@
.report-overview {
margin-bottom: 2.5rem;
}
.report-grid {
display: grid;
gap: 1.5rem;
grid-template-columns: repeat(auto-fit, minmax(280px, 1fr));
}
.report-card {
background: var(--card);
border-radius: var(--radius);
padding: 1.5rem;
border: 1px solid var(--color-border);
box-shadow: 0 12px 30px rgba(4, 7, 14, 0.35);
}
.report-card h2 {
margin-top: 0;
margin-bottom: 1rem;
}
.report-section + .report-section {
margin-top: 3rem;
}
.section-header {
margin-bottom: 1.25rem;
}
.section-header h2 {
margin: 0;
}
.section-subtitle {
margin: 0.35rem 0 0;
color: var(--muted);
}
.metric-list {
list-style: none;
margin: 0;
padding: 0;
display: flex;
flex-direction: column;
gap: 0.75rem;
}
.metric-list.compact {
gap: 0.35rem;
}
.metric-list li {
display: flex;
justify-content: space-between;
align-items: baseline;
font-size: 0.95rem;
color: var(--muted);
}
.metric-list strong {
font-size: 1.05rem;
color: var(--text);
}
.metrics-table {
width: 100%;
border-collapse: collapse;
background: rgba(21, 27, 35, 0.6);
border-radius: var(--radius-sm);
overflow: hidden;
}
.metrics-table th,
.metrics-table td {
padding: 0.65rem 0.9rem;
text-align: left;
border-bottom: 1px solid rgba(255, 255, 255, 0.08);
}
.metrics-table th {
font-weight: 600;
color: var(--text);
}
.metrics-table tr:last-child td,
.metrics-table tr:last-child th {
border-bottom: none;
}
.definition-list {
margin: 0;
display: grid;
gap: 0.75rem;
}
.definition-list div {
display: grid;
grid-template-columns: 140px 1fr;
gap: 0.5rem;
align-items: baseline;
}
.definition-list dt {
color: var(--muted);
font-weight: 600;
}
.definition-list dd {
margin: 0;
}
.scenario-card {
background: var(--card);
border-radius: var(--radius);
padding: 1.5rem;
border: 1px solid var(--color-border);
box-shadow: 0 16px 32px rgba(4, 7, 14, 0.42);
display: flex;
flex-direction: column;
gap: 1.25rem;
}
.scenario-card + .scenario-card {
margin-top: 1.75rem;
}
.scenario-card-header {
display: flex;
justify-content: space-between;
gap: 1rem;
align-items: flex-start;
}
.scenario-card h3 {
margin: 0;
}
.scenario-meta {
text-align: right;
}
.meta-label {
display: block;
color: var(--muted);
font-size: 0.8rem;
text-transform: uppercase;
letter-spacing: 0.08em;
}
.meta-value {
font-weight: 600;
}
.scenario-grid {
display: grid;
gap: 1.25rem;
grid-template-columns: repeat(auto-fit, minmax(260px, 1fr));
}
.scenario-panel {
background: rgba(15, 20, 27, 0.8);
border-radius: var(--radius-sm);
padding: 1.1rem;
border: 1px solid rgba(255, 255, 255, 0.05);
box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.06);
}
.scenario-panel h4,
.scenario-panel h5 {
margin-top: 0;
margin-bottom: 0.75rem;
}
.note-list {
padding-left: 1.1rem;
color: var(--muted);
font-size: 0.9rem;
}
.muted {
color: var(--muted);
}
.page-actions .button {
text-decoration: none;
background: transparent;
border: 1px solid var(--color-border);
padding: 0.6rem 1rem;
border-radius: var(--radius-sm);
color: var(--text);
font-weight: 600;
transition: background 0.2s ease, border-color 0.2s ease;
}
.page-actions .button:hover,
.page-actions .button:focus {
background: rgba(241, 178, 26, 0.14);
border-color: var(--brand);
}
:root {
--bg: #0b0f14;
--bg-2: #0f141b;

11
static/js/alerts.js Normal file
View File

@@ -0,0 +1,11 @@
document.addEventListener("DOMContentLoaded", () => {
document.querySelectorAll("[data-toast-close]").forEach((button) => {
button.addEventListener("click", () => {
const toast = button.closest(".toast");
if (toast) {
toast.classList.add("hidden");
setTimeout(() => toast.remove(), 200);
}
});
});
});

155
static/js/exports.js Normal file
View File

@@ -0,0 +1,155 @@
document.addEventListener("DOMContentLoaded", () => {
const modalContainer = document.createElement("div");
modalContainer.id = "export-modal-container";
document.body.appendChild(modalContainer);
async function loadModal(dataset) {
const response = await fetch(`/exports/modal/${dataset}`);
if (!response.ok) {
throw new Error(`Failed to load export modal (${response.status})`);
}
const html = await response.text();
modalContainer.innerHTML = html;
const modal = modalContainer.querySelector(".modal");
if (!modal) return;
modal.classList.add("is-active");
const closeButtons = modal.querySelectorAll("[data-dismiss='modal']");
closeButtons.forEach((btn) =>
btn.addEventListener("click", () => closeModal(modal))
);
const form = modal.querySelector("[data-export-form]");
if (form) {
form.addEventListener("submit", handleSubmit);
}
}
function closeModal(modal) {
modal.classList.remove("is-active");
setTimeout(() => {
modalContainer.innerHTML = "";
}, 200);
}
async function handleSubmit(event) {
event.preventDefault();
const form = event.currentTarget;
const submitUrl = form.action;
const formData = new FormData(form);
const format = formData.get("format") || "csv";
const submitBtn = form.querySelector("button[type='submit']");
if (submitBtn) {
submitBtn.disabled = true;
submitBtn.classList.add("loading");
}
let response;
try {
response = await fetch(submitUrl, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
format,
include_metadata: formData.get("include_metadata") === "true",
filters: null,
}),
});
} catch (error) {
console.error(error);
NotificationCenter.show({
message: "Network error during export.",
level: "error",
});
const errorContainer = form.querySelector("[data-export-error]");
if (errorContainer) {
errorContainer.textContent = "Network error during export.";
errorContainer.classList.remove("hidden");
}
submitBtn?.classList.remove("loading");
submitBtn?.removeAttribute("disabled");
return;
}
if (!response.ok) {
let detail = "Export failed. Please try again.";
try {
const payload = await response.json();
if (payload?.detail) {
detail = Array.isArray(payload.detail)
? payload.detail.map((item) => item.msg || item).join("; ")
: payload.detail;
}
} catch (error) {
// ignore JSON parse issues
}
NotificationCenter.show({
message: detail,
level: "error",
});
const errorContainer = form.querySelector("[data-export-error]");
if (errorContainer) {
errorContainer.textContent = detail;
errorContainer.classList.remove("hidden");
}
submitBtn?.classList.remove("loading");
submitBtn?.removeAttribute("disabled");
return;
}
const blob = await response.blob();
const disposition = response.headers.get("Content-Disposition");
let filename = "export";
if (disposition) {
const match = disposition.match(/filename=([^;]+)/i);
if (match) {
filename = match[1].replace(/"/g, "");
}
}
const url = window.URL.createObjectURL(blob);
const link = document.createElement("a");
link.href = url;
link.download = filename;
document.body.appendChild(link);
link.click();
link.remove();
window.URL.revokeObjectURL(url);
const modal = modalContainer.querySelector(".modal");
if (modal) {
closeModal(modal);
}
NotificationCenter.show({
message: `Export ready: ${filename}`,
level: "success",
});
submitBtn?.classList.remove("loading");
submitBtn?.removeAttribute("disabled");
}
document.querySelectorAll("[data-export-trigger]").forEach((button) => {
button.addEventListener("click", async (event) => {
event.preventDefault();
const dataset = button.getAttribute("data-export-target");
if (!dataset) return;
try {
await loadModal(dataset);
} catch (error) {
console.error(error);
NotificationCenter.show({
message: "Unable to open export dialog.",
level: "error",
});
}
});
});
});

240
static/js/imports.js Normal file
View File

@@ -0,0 +1,240 @@
document.addEventListener("DOMContentLoaded", () => {
const moduleEl = document.querySelector("[data-import-module]");
if (!moduleEl) return;
const dropzone = moduleEl.querySelector("[data-import-dropzone]");
const input = dropzone?.querySelector("input[type='file']");
const uploadButton = moduleEl.querySelector("[data-import-upload-trigger]");
const resetButton = moduleEl.querySelector("[data-import-reset]");
const feedbackEl = moduleEl.querySelector("#import-upload-feedback");
const previewBody = moduleEl.querySelector("[data-import-preview-body]");
const previewContainer = moduleEl.querySelector("#import-preview-container");
const actionsEl = moduleEl.querySelector("[data-import-actions]");
const commitButton = moduleEl.querySelector("[data-import-commit]");
const cancelButton = moduleEl.querySelector("[data-import-cancel]");
let stageToken = null;
function showFeedback(message, type = "info") {
if (!feedbackEl) return;
feedbackEl.textContent = message;
feedbackEl.classList.remove("hidden", "success", "error", "info");
feedbackEl.classList.add(type);
}
function hideFeedback() {
if (!feedbackEl) return;
feedbackEl.textContent = "";
feedbackEl.classList.add("hidden");
}
function clearPreview() {
if (previewBody) {
previewBody.innerHTML = "";
}
previewContainer?.classList.add("hidden");
actionsEl?.classList.add("hidden");
commitButton?.setAttribute("disabled", "disabled");
stageToken = null;
}
function enableUpload() {
uploadButton?.removeAttribute("disabled");
resetButton?.classList.remove("hidden");
}
function disableUpload() {
uploadButton?.setAttribute("disabled", "disabled");
uploadButton?.classList.remove("loading");
resetButton?.classList.add("hidden");
}
dropzone?.addEventListener("dragover", (event) => {
event.preventDefault();
dropzone.classList.add("dragover");
});
dropzone?.addEventListener("dragleave", () => {
dropzone.classList.remove("dragover");
});
dropzone?.addEventListener("drop", (event) => {
event.preventDefault();
dropzone.classList.remove("dragover");
if (!event.dataTransfer?.files?.length || !input) {
return;
}
input.files = event.dataTransfer.files;
enableUpload();
hideFeedback();
});
input?.addEventListener("change", () => {
if (input.files?.length) {
enableUpload();
hideFeedback();
} else {
disableUpload();
}
});
resetButton?.addEventListener("click", () => {
if (input) {
input.value = "";
}
disableUpload();
hideFeedback();
clearPreview();
});
async function uploadAndPreview() {
if (!input?.files?.length) {
showFeedback(
"Please select a CSV or XLSX file before uploading.",
"error"
);
return;
}
const file = input.files[0];
showFeedback("Uploading…", "info");
uploadButton?.classList.add("loading");
uploadButton?.setAttribute("disabled", "disabled");
const formData = new FormData();
formData.append("file", file);
let response;
try {
response = await fetch("/imports/projects/preview", {
method: "POST",
body: formData,
});
} catch (error) {
console.error(error);
NotificationCenter?.show({
message: "Network error during upload.",
level: "error",
});
showFeedback("Network error during upload.", "error");
uploadButton?.classList.remove("loading");
uploadButton?.removeAttribute("disabled");
return;
}
if (!response.ok) {
const detail = await response.json().catch(() => ({}));
const message = detail?.detail || "Upload failed. Please check the file.";
NotificationCenter?.show({ message, level: "error" });
showFeedback(message, "error");
uploadButton?.classList.remove("loading");
uploadButton?.removeAttribute("disabled");
return;
}
const payload = await response.json();
hideFeedback();
renderPreview(payload);
uploadButton?.classList.remove("loading");
uploadButton?.removeAttribute("disabled");
NotificationCenter?.show({
message: `Preview ready: ${payload.summary.accepted} row(s) accepted`,
level: "success",
});
}
function renderPreview(payload) {
const rows = payload.rows || [];
const issues = payload.row_issues || [];
stageToken = payload.stage_token || null;
if (!previewBody) return;
previewBody.innerHTML = "";
const issueMap = new Map();
issues.forEach((issue) => {
issueMap.set(issue.row_number, issue.issues);
});
rows.forEach((row) => {
const tr = document.createElement("tr");
const rowIssues = issueMap.get(row.row_number) || [];
const issuesText = [
...row.issues,
...rowIssues.map((i) => i.message),
].join(", ");
tr.innerHTML = `
<td>${row.row_number}</td>
<td><span class="badge badge--${row.state}">${row.state}</span></td>
<td>${issuesText || "—"}</td>
${Object.values(row.data)
.map((value) => `<td>${value ?? ""}</td>`)
.join("")}
`;
previewBody.appendChild(tr);
});
previewContainer?.classList.remove("hidden");
if (stageToken && payload.summary.accepted > 0) {
actionsEl?.classList.remove("hidden");
commitButton?.removeAttribute("disabled");
} else {
actionsEl?.classList.add("hidden");
commitButton?.setAttribute("disabled", "disabled");
}
}
uploadButton?.addEventListener("click", uploadAndPreview);
commitButton?.addEventListener("click", async () => {
if (!stageToken) return;
commitButton.classList.add("loading");
commitButton.setAttribute("disabled", "disabled");
let response;
try {
response = await fetch("/imports/projects/commit", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ token: stageToken }),
});
} catch (error) {
console.error(error);
NotificationCenter?.show({
message: "Network error during commit.",
level: "error",
});
commitButton.classList.remove("loading");
commitButton.removeAttribute("disabled");
return;
}
if (!response.ok) {
const detail = await response.json().catch(() => ({}));
const message =
detail?.detail || "Commit failed. Please review the import data.";
NotificationCenter?.show({ message, level: "error" });
commitButton.classList.remove("loading");
commitButton.removeAttribute("disabled");
return;
}
const result = await response.json();
NotificationCenter?.show({
message: `Import committed. Created: ${result.summary.created}, Updated: ${result.summary.updated}`,
level: "success",
});
clearPreview();
if (input) {
input.value = "";
}
disableUpload();
});
cancelButton?.addEventListener("click", () => {
clearPreview();
NotificationCenter?.show({ message: "Import canceled.", level: "info" });
});
});

View File

@@ -0,0 +1,38 @@
(() => {
let container;
function ensureContainer() {
if (!container) {
container = document.createElement("div");
container.className = "toast-container";
document.body.appendChild(container);
}
return container;
}
function show({ message, level = "info", timeout = 5000 } = {}) {
const root = ensureContainer();
const toast = document.createElement("div");
toast.className = `toast toast--${level}`;
toast.setAttribute("role", "alert");
toast.innerHTML = `
<span class="toast__icon" aria-hidden="true"></span>
<p class="toast__message">${message}</p>
<button type="button" class="toast__close" aria-label="Dismiss">×</button>
`;
root.appendChild(toast);
const close = () => {
toast.classList.add("hidden");
setTimeout(() => toast.remove(), 200);
};
toast.querySelector(".toast__close").addEventListener("click", close);
if (timeout > 0) {
setTimeout(close, timeout);
}
}
window.NotificationCenter = { show };
})();

View File

@@ -1,18 +1,18 @@
{% extends "base.html" %}
{% block title %}Dashboard · CalMiner{% endblock %}
{% block head_extra %}
{% extends "base.html" %} {% block title %}Dashboard · CalMiner{% endblock %} {%
block head_extra %}
<link rel="stylesheet" href="/static/css/dashboard.css" />
{% endblock %}
{% block content %}
{% endblock %} {% block content %}
<section class="page-header dashboard-header">
<div>
<h1>Welcome back</h1>
<p class="page-subtitle">Monitor project progress and scenario insights at a glance.</p>
<p class="page-subtitle">
Monitor project progress and scenario insights at a glance.
</p>
</div>
<div class="header-actions">
<a class="btn primary" href="{{ url_for('projects.create_project_form') }}">New Project</a>
<a class="btn primary" href="{{ url_for('projects.create_project_form') }}"
>New Project</a
>
<a class="btn" href="#">Import Data</a>
</div>
</section>
@@ -45,7 +45,11 @@
<div class="card">
<header class="card-header">
<h2>Recent Projects</h2>
<a class="btn btn-link" href="{{ url_for('projects.project_list_page') }}">View all</a>
<a
class="btn btn-link"
href="{{ url_for('projects.project_list_page') }}"
>View all</a
>
</header>
{% if recent_projects %}
<table class="table">
@@ -59,17 +63,40 @@
<tbody>
{% for project in recent_projects %}
<tr>
<td>
<a class="table-link" href="{{ url_for('projects.view_project', project_id=project.id) }}">{{ project.name }}</a>
<td class="table-cell-actions">
<a
class="table-link"
href="{{ url_for('projects.view_project', project_id=project.id) }}"
>{{ project.name }}</a
>
<button
class="btn btn-ghost"
data-export-trigger
data-export-target="projects"
title="Export projects dataset"
>
<span aria-hidden="true"></span>
<span class="sr-only">Export</span>
</button>
</td>
<td>
{{ project.operation_type.value.replace('_', ' ') | title }}
</td>
<td>
{{ project.updated_at.strftime('%Y-%m-%d') if project.updated_at
else '—' }}
</td>
<td>{{ project.operation_type.value.replace('_', ' ') | title }}</td>
<td>{{ project.updated_at.strftime('%Y-%m-%d') if project.updated_at else '—' }}</td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p class="empty-state">No recent projects. <a href="{{ url_for('projects.create_project_form') }}">Create one now.</a></p>
<p class="empty-state">
No recent projects.
<a href="{{ url_for('projects.create_project_form') }}"
>Create one now.</a
>
</p>
{% endif %}
</div>
@@ -81,7 +108,9 @@
<ul class="timeline">
{% for update in simulation_updates %}
<li>
<span class="timeline-label">{{ update.timestamp_label or '—' }}</span>
<span class="timeline-label"
>{{ update.timestamp_label or '—' }}</span
>
<div>
<strong>{{ update.title }}</strong>
<p>{{ update.description }}</p>
@@ -90,7 +119,9 @@
{% endfor %}
</ul>
{% else %}
<p class="empty-state">No simulation runs yet. Configure a scenario to start simulations.</p>
<p class="empty-state">
No simulation runs yet. Configure a scenario to start simulations.
</p>
{% endif %}
</div>
</div>
@@ -109,11 +140,22 @@
{% if alert.link %}
<a class="btn btn-link" href="{{ alert.link }}">Review</a>
{% endif %}
<button
class="btn btn-ghost"
data-export-trigger
data-export-target="scenarios"
title="Export scenarios dataset"
>
<span aria-hidden="true"></span>
<span class="sr-only">Export</span>
</button>
</li>
{% endfor %}
</ul>
{% else %}
<p class="empty-state">All scenarios look good. We'll highlight issues here.</p>
<p class="empty-state">
All scenarios look good. We'll highlight issues here.
</p>
{% endif %}
</div>
@@ -122,8 +164,12 @@
<h2>Resources</h2>
</header>
<ul class="links-list">
<li><a href="https://github.com/" target="_blank">CalMiner Repository</a></li>
<li><a href="https://example.com/docs" target="_blank">Documentation</a></li>
<li>
<a href="https://github.com/" target="_blank">CalMiner Repository</a>
</li>
<li>
<a href="https://example.com/docs" target="_blank">Documentation</a>
</li>
<li><a href="mailto:support@example.com">Contact Support</a></li>
</ul>
</div>

View File

@@ -5,6 +5,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>{% block title %}CalMiner{% endblock %}</title>
<link rel="stylesheet" href="/static/css/main.css" />
<link rel="stylesheet" href="/static/css/imports.css" />
{% block head_extra %}{% endblock %}
</head>
<body>
@@ -21,6 +22,9 @@
</div>
{% block scripts %}{% endblock %}
<script src="/static/js/projects.js" defer></script>
<script src="/static/js/exports.js" defer></script>
<script src="/static/js/imports.js" defer></script>
<script src="/static/js/notifications.js" defer></script>
<script src="/static/js/theme.js"></script>
</body>
</html>

View File

@@ -0,0 +1,52 @@
<div
class="modal"
id="export-modal-{{ dataset }}"
data-export-dataset="{{ dataset }}"
>
<div class="modal-dialog">
<div class="modal-content">
<div class="modal-header">
<h5 class="modal-title">Export {{ dataset|capitalize }}</h5>
<button
type="button"
class="btn-close"
data-dismiss="modal"
aria-label="Close"
></button>
</div>
<form method="post" action="{{ submit_url }}" data-export-form>
<div class="modal-body">
<div class="mb-3">
<label class="form-label" for="export-format">Format</label>
<select class="form-select" id="export-format" name="format">
<option value="csv">CSV</option>
<option value="xlsx">Excel (.xlsx)</option>
</select>
</div>
<div class="form-check">
<input
class="form-check-input"
type="checkbox"
value="true"
id="include-metadata"
name="include_metadata"
/>
<label class="form-check-label" for="include-metadata">
Include metadata sheet (Excel only)
</label>
</div>
<small class="form-text text-muted"
>Filters can be adjusted in the advanced export section.</small
>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-secondary" data-dismiss="modal">
Cancel
</button>
<button type="submit" class="btn btn-primary">Download</button>
</div>
<p class="form-error hidden" data-export-error></p>
</form>
</div>
</div>
</div>

34
templates/imports/ui.html Normal file
View File

@@ -0,0 +1,34 @@
{% extends "base.html" %}
{% from "partials/alerts.html" import toast %}
{% block title %}Imports · CalMiner{% endblock %}
{% block head_extra %}
<link rel="stylesheet" href="/static/css/imports.css" />
{% endblock %}
{% block content %}
<section class="page-header">
<div>
<h1>Data Imports</h1>
<p class="text-muted">Upload CSV or Excel files to preview and commit bulk updates.</p>
</div>
</section>
<section class="card" data-import-module>
<header class="card-header">
<h2>Upload Projects or Scenarios</h2>
</header>
<div class="card-body">
{% include "partials/import_upload.html" %}
{% include "partials/import_preview_table.html" %}
<div class="import-actions hidden" data-import-actions>
<button class="btn primary" data-import-commit disabled>Commit Import</button>
<button class="btn" data-import-cancel>Cancel</button>
</div>
</div>
</section>
{{ toast("import-toast", hidden=True) }}
{% endblock %}

View File

@@ -0,0 +1,10 @@
{% macro toast(id, hidden=True, level="info", message="") %}
<div id="{{ id }}" class="toast toast--{{ level }}{% if hidden %} hidden{% endif %}" role="alert">
<span class="toast__icon" aria-hidden="true"></span>
<p class="toast__message">{{ message }}</p>
<button type="button" class="toast__close" data-toast-close>
<span aria-hidden="true">×</span>
<span class="sr-only">Dismiss</span>
</button>
</div>
{% endmacro %}

View File

@@ -0,0 +1,17 @@
{% from "partials/components.html" import table_container %}
{% call table_container("import-preview-container", hidden=True, aria_label="Import preview table", heading=table_heading or "Preview Rows") %}
<thead>
<tr>
<th scope="col">Row</th>
<th scope="col">Status</th>
<th scope="col">Issues</th>
{% for column in columns %}
<th scope="col">{{ column }}</th>
{% endfor %}
</tr>
</thead>
<tbody data-import-preview-body>
<!-- Rows injected via JavaScript -->
</tbody>
{% endcall %}

View File

@@ -0,0 +1,25 @@
{% from "partials/components.html" import feedback %}
<section class="import-upload" data-import-upload>
<header class="import-upload__header">
<h3>{{ title or "Bulk Import" }}</h3>
{% if description %}<p class="import-upload__description">{{ description }}</p>{% endif %}
</header>
<div class="import-upload__dropzone" data-import-dropzone>
<span class="icon-upload" aria-hidden="true"></span>
<p>Drag & drop CSV/XLSX files here or</p>
<label class="btn secondary">
Browse
<input type="file" name="import-file" accept=".csv,.xlsx" hidden />
</label>
<p class="import-upload__hint">Maximum size {{ max_size_hint or "10 MB" }}. UTF-8 encoding required.</p>
</div>
<div class="import-upload__actions">
<button type="button" class="btn primary" data-import-upload-trigger disabled>Upload & Preview</button>
<button type="button" class="btn" data-import-reset hidden>Reset</button>
</div>
{{ feedback("import-upload-feedback", hidden=True, role="alert") }}
</section>

View File

@@ -0,0 +1,33 @@
{% if filters %}
<section class="report-filters">
<div class="report-card">
<h2>Active Filters</h2>
<dl class="definition-list">
{% if filters.scenario_ids %}
<div>
<dt>Scenario IDs</dt>
<dd>{{ filters.scenario_ids | join(', ') }}</dd>
</div>
{% endif %}
{% if filters.start_date %}
<div>
<dt>Start Date</dt>
<dd>{{ filters.start_date }}</dd>
</div>
{% endif %}
{% if filters.end_date %}
<div>
<dt>End Date</dt>
<dd>{{ filters.end_date }}</dd>
</div>
{% endif %}
{% if not (filters.scenario_ids or filters.start_date or filters.end_date) %}
<div>
<dt>Status</dt>
<dd>No filters applied</dd>
</div>
{% endif %}
</dl>
</div>
</section>
{% endif %}

View File

@@ -0,0 +1,46 @@
{% set sorted_metrics = metrics | dictsort %}
{% set ns = namespace(percentile_keys=[]) %}
{% if percentiles %}
{% set ns.percentile_keys = percentiles %}
{% elif sorted_metrics %}
{% set reference_percentiles = sorted_metrics[0][1].percentiles.keys() | list %}
{% set ns.percentile_keys = reference_percentiles %}
{% endif %}
{% if sorted_metrics %}
<table class="metrics-table">
<thead>
<tr>
<th scope="col">Metric</th>
<th scope="col">Mean</th>
{% for percentile in ns.percentile_keys %}
{% set percentile_label = '%g' % percentile %}
<th scope="col">P{{ percentile_label }}</th>
{% endfor %}
<th scope="col">Std Dev</th>
</tr>
</thead>
<tbody>
{% for metric_name, summary in sorted_metrics %}
<tr>
<th scope="row">{{ metric_name | replace('_', ' ') | title }}</th>
<td>{{ summary.mean | format_metric(metric_name, currency) }}</td>
{% for percentile in ns.percentile_keys %}
{% set percentile_key = '%g' % percentile %}
{% set percentile_value = summary.percentiles.get(percentile_key) %}
<td>
{% if percentile_value is not none %}
{{ percentile_value | format_metric(metric_name, currency) }}
{% else %}
{% endif %}
</td>
{% endfor %}
<td>{{ summary.std_dev | format_metric(metric_name, currency) }}</td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p class="muted">Monte Carlo metrics are unavailable.</p>
{% endif %}

View File

@@ -0,0 +1,56 @@
{% if options %}
{% set distribution_enabled = options.distribution %}
{% set samples_enabled = options.samples and options.distribution %}
{% endif %}
<section class="report-options">
<div class="report-card">
<h2>Data Options</h2>
<ul class="metric-list compact">
<li>
<span>Monte Carlo Distribution</span>
<strong>
{% if options %}
{{ distribution_enabled and "Enabled" or "Disabled" }}
{% else %}
Not requested
{% endif %}
</strong>
</li>
<li>
<span>Sample Storage</span>
<strong>
{% if options %}
{% if options.samples %}
{% if samples_enabled %}
Enabled
{% else %}
Requires distribution
{% endif %}
{% else %}
Disabled
{% endif %}
{% else %}
Not requested
{% endif %}
</strong>
</li>
<li>
<span>Iterations</span>
<strong>{{ iterations }}</strong>
</li>
<li>
<span>Percentiles</span>
<strong>
{% if percentiles %}
{% for percentile in percentiles %}
{{ '%g' % percentile }}{% if not loop.last %}, {% endif %}
{% endfor %}
{% else %}
Defaults
{% endif %}
</strong>
</li>
</ul>
</div>
</section>

View File

@@ -0,0 +1,14 @@
<div class="scenario-actions">
<a
href="{{ request.url_for('reports.scenario_distribution_page', scenario_id=scenario.id) }}"
class="button button-secondary"
>
View Distribution
</a>
<a
href="{{ request.url_for('reports.scenario_distribution', scenario_id=scenario.id) }}"
class="button button-secondary"
>
Download JSON
</a>
</div>

View File

@@ -0,0 +1,24 @@
<header class="page-header">
<div>
<h1 class="page-title">{{ title }}</h1>
{% if subtitle %}
<p class="page-subtitle">{{ subtitle }}</p>
{% endif %}
</div>
{% if actions %}
<div class="page-actions">
{% for action in actions %}
{% set classes = action.classes or 'button button-secondary' %}
<a
href="{{ action.href }}"
class="{{ classes }}"
{% if action.target %}target="{{ action.target }}"{% endif %}
{% if action.rel %}rel="{{ action.rel }}"{% endif %}
{% if action.download %}download="{{ action.download }}"{% endif %}
>
{{ action.label }}
</a>
{% endfor %}
</div>
{% endif %}
</header>

View File

@@ -25,7 +25,8 @@
"links": [
{"href": dashboard_href, "label": "Dashboard", "match_prefix": "/"},
{"href": projects_href, "label": "Projects", "match_prefix": "/projects"},
{"href": project_create_href, "label": "New Project", "match_prefix": "/projects/create"}
{"href": project_create_href, "label": "New Project", "match_prefix": "/projects/create"},
{"href": "/imports/ui", "label": "Imports", "match_prefix": "/imports"}
]
},
{

View File

@@ -36,7 +36,18 @@
<tbody>
{% for project in projects %}
<tr>
<td>{{ project.name }}</td>
<td class="table-cell-actions">
{{ project.name }}
<button
class="btn btn-ghost"
data-export-trigger
data-export-target="projects"
title="Export projects dataset"
>
<span aria-hidden="true"></span>
<span class="sr-only">Export</span>
</button>
</td>
<td>{{ project.location or '—' }}</td>
<td>{{ project.operation_type.value.replace('_', ' ') | title }}</td>
<td>{{ project.scenario_count }}</td>

View File

@@ -0,0 +1,205 @@
{% extends "base.html" %}
{% block title %}Project Summary | CalMiner{% endblock %}
{% block content %}
{% include "partials/reports_header.html" with context %}
{% include "partials/reports/options_card.html" with options=include_options iterations=iterations percentiles=percentiles %}
{% include "partials/reports/filters_card.html" with filters=filters %}
<section class="report-overview">
<div class="report-grid">
<article class="report-card">
<h2>Project Details</h2>
<dl class="definition-list">
<div>
<dt>Name</dt>
<dd>{{ project.name }}</dd>
</div>
<div>
<dt>Location</dt>
<dd>{{ project.location or "—" }}</dd>
</div>
<div>
<dt>Operation Type</dt>
<dd>{{ project.operation_type | replace("_", " ") | title }}</dd>
</div>
<div>
<dt>Scenarios</dt>
<dd>{{ scenario_count }}</dd>
</div>
<div>
<dt>Created</dt>
<dd>{{ project.created_at | format_datetime }}</dd>
</div>
<div>
<dt>Updated</dt>
<dd>{{ project.updated_at | format_datetime }}</dd>
</div>
</dl>
</article>
<article class="report-card">
<h2>Financial Summary</h2>
<ul class="metric-list">
<li>
<span>Total Inflows</span>
<strong>{{ aggregates.financials.total_inflows | currency_display(project.currency) }}</strong>
</li>
<li>
<span>Total Outflows</span>
<strong>{{ aggregates.financials.total_outflows | currency_display(project.currency) }}</strong>
</li>
<li>
<span>Net Cash Flow</span>
<strong>{{ aggregates.financials.total_net | currency_display(project.currency) }}</strong>
</li>
</ul>
</article>
<article class="report-card">
<h2>Deterministic Metrics</h2>
{% if aggregates.deterministic_metrics %}
<table class="metrics-table">
<thead>
<tr>
<th scope="col">Metric</th>
<th scope="col">Average</th>
<th scope="col">Best</th>
<th scope="col">Worst</th>
</tr>
</thead>
<tbody>
{% for key, metric in aggregates.deterministic_metrics.items() %}
<tr>
<th scope="row">{{ key | replace("_", " ") | title }}</th>
<td>{{ metric.average | format_metric(key, project.currency) }}</td>
<td>{{ metric.maximum | format_metric(key, project.currency) }}</td>
<td>{{ metric.minimum | format_metric(key, project.currency) }}</td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p class="muted">Deterministic metrics are unavailable for the current filters.</p>
{% endif %}
</article>
</div>
</section>
<section class="report-section">
<header class="section-header">
<h2>Scenario Breakdown</h2>
<p class="section-subtitle">Deterministic metrics and Monte Carlo summaries for each scenario.</p>
</header>
{% if scenarios %}
{% for item in scenarios %}
<article class="scenario-card">
<div class="scenario-card-header">
<div>
<h3>{{ item.scenario.name }}</h3>
<p class="muted">{{ item.scenario.status | title }} · {{ item.scenario.primary_resource or "No primary resource" }}</p>
</div>
<div class="scenario-meta">
<span class="meta-label">Currency</span>
<span class="meta-value">{{ item.scenario.currency or project.currency or "—" }}</span>
</div>
{% include "partials/reports/scenario_actions.html" with scenario=item.scenario %}
</div>
<div class="scenario-grid">
<section class="scenario-panel">
<h4>Financial Totals</h4>
<ul class="metric-list compact">
<li>
<span>Inflows</span>
<strong>{{ item.financials.inflows | currency_display(item.scenario.currency or project.currency) }}</strong>
</li>
<li>
<span>Outflows</span>
<strong>{{ item.financials.outflows | currency_display(item.scenario.currency or project.currency) }}</strong>
</li>
<li>
<span>Net</span>
<strong>{{ item.financials.net | currency_display(item.scenario.currency or project.currency) }}</strong>
</li>
</ul>
<h5>By Category</h5>
{% if item.financials.by_category %}
<ul class="metric-list compact">
{% for label, value in item.financials.by_category.items() %}
<li>
<span>{{ label | replace("_", " ") | title }}</span>
<strong>{{ value | currency_display(item.scenario.currency or project.currency) }}</strong>
</li>
{% endfor %}
</ul>
{% else %}
<p class="muted">No financial inputs recorded.</p>
{% endif %}
</section>
<section class="scenario-panel">
<h4>Deterministic Metrics</h4>
<table class="metrics-table">
<tbody>
<tr>
<th scope="row">Discount Rate</th>
<td>{{ item.metrics.discount_rate | percentage_display }}</td>
</tr>
<tr>
<th scope="row">NPV</th>
<td>{{ item.metrics.npv | currency_display(item.scenario.currency or project.currency) }}</td>
</tr>
<tr>
<th scope="row">IRR</th>
<td>{{ item.metrics.irr | percentage_display }}</td>
</tr>
<tr>
<th scope="row">Payback Period</th>
<td>{{ item.metrics.payback_period | period_display }}</td>
</tr>
</tbody>
</table>
{% if item.metrics.notes %}
<ul class="note-list">
{% for note in item.metrics.notes %}
<li>{{ note }}</li>
{% endfor %}
</ul>
{% endif %}
</section>
<section class="scenario-panel">
<h4>Monte Carlo Summary</h4>
{% if item.monte_carlo and item.monte_carlo.available %}
<p class="muted">
Iterations: {{ item.monte_carlo.iterations }}
{% if percentiles %}
· Percentiles:
{% for percentile in percentiles %}
{{ '%g' % percentile }}{% if not loop.last %}, {% endif %}
{% endfor %}
{% endif %}
</p>
{% include "partials/reports/monte_carlo_table.html" with metrics=item.monte_carlo.metrics currency=item.scenario.currency or project.currency percentiles=percentiles %}
{% else %}
<p class="muted">Monte Carlo metrics are unavailable for this scenario.</p>
{% if item.monte_carlo and item.monte_carlo.notes %}
<ul class="note-list">
{% for note in item.monte_carlo.notes %}
<li>{{ note }}</li>
{% endfor %}
</ul>
{% endif %}
{% endif %}
</section>
</div>
</article>
{% endfor %}
{% else %}
<p class="muted">No scenarios match the current filters.</p>
{% endif %}
</section>
{% endblock %}

View File

@@ -0,0 +1,166 @@
{% extends "base.html" %}
{% block title %}Scenario Comparison | CalMiner{% endblock %}
{% block content %}
{% include "partials/reports_header.html" with context %}
{% include "partials/reports/options_card.html" with options=include_options iterations=iterations percentiles=percentiles %}
<section class="report-filters">
<div class="report-card">
<h2>Compared Scenarios</h2>
<ul class="metric-list compact">
{% for item in scenarios %}
<li>
<span>{{ item.scenario.name }}</span>
<strong>#{{ item.scenario.id }}</strong>
</li>
{% endfor %}
</ul>
</div>
</section>
<section class="report-overview">
<article class="report-card">
<h2>Project Details</h2>
<dl class="definition-list">
<div>
<dt>Name</dt>
<dd>{{ project.name }}</dd>
</div>
<div>
<dt>Location</dt>
<dd>{{ project.location or "—" }}</dd>
</div>
<div>
<dt>Operation Type</dt>
<dd>{{ project.operation_type | replace("_", " ") | title }}</dd>
</div>
<div>
<dt>Scenarios Compared</dt>
<dd>{{ scenarios | length }}</dd>
</div>
</dl>
</article>
<article class="report-card">
<h2>Comparison Summary</h2>
{% if comparison %}
<table class="metrics-table">
<thead>
<tr>
<th scope="col">Metric</th>
<th scope="col">Direction</th>
<th scope="col">Best Performer</th>
<th scope="col">Worst Performer</th>
<th scope="col">Average</th>
</tr>
</thead>
<tbody>
{% for key, metric in comparison.items() %}
<tr>
<th scope="row">{{ key | replace("_", " ") | title }}</th>
<td>{{ metric.direction | replace("_", " ") | title }}</td>
<td>
{% if metric.best %}
<strong>{{ metric.best.name }}</strong>
<span class="muted">({{ metric.best.value | format_metric(key, project.currency) }})</span>
{% else %}
{% endif %}
</td>
<td>
{% if metric.worst %}
<strong>{{ metric.worst.name }}</strong>
<span class="muted">({{ metric.worst.value | format_metric(key, project.currency) }})</span>
{% else %}
{% endif %}
</td>
<td>{{ metric.average | format_metric(key, project.currency) }}</td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p class="muted">No deterministic metrics available for comparison.</p>
{% endif %}
</article>
</section>
<section class="report-section">
<header class="section-header">
<h2>Scenario Details</h2>
<p class="section-subtitle">Each scenario includes deterministic metrics and Monte Carlo summaries.</p>
</header>
{% for item in scenarios %}
<article class="scenario-card">
<div class="scenario-card-header">
<div>
<h3>{{ item.scenario.name }}</h3>
<p class="muted">{{ item.scenario.status | title }} · Currency: {{ item.scenario.currency or project.currency }}</p>
</div>
<div class="scenario-meta">
<span class="meta-label">Primary Resource</span>
<span class="meta-value">{{ item.scenario.primary_resource or "—" }}</span>
</div>
{% include "partials/reports/scenario_actions.html" with scenario=item.scenario %}
</div>
<div class="scenario-grid">
<section class="scenario-panel">
<h4>Deterministic Metrics</h4>
<table class="metrics-table">
<tbody>
<tr>
<th scope="row">NPV</th>
<td>{{ item.metrics.npv | currency_display(item.scenario.currency or project.currency) }}</td>
</tr>
<tr>
<th scope="row">IRR</th>
<td>{{ item.metrics.irr | percentage_display }}</td>
</tr>
<tr>
<th scope="row">Payback Period</th>
<td>{{ item.metrics.payback_period | period_display }}</td>
</tr>
</tbody>
</table>
{% if item.metrics.notes %}
<ul class="note-list">
{% for note in item.metrics.notes %}
<li>{{ note }}</li>
{% endfor %}
</ul>
{% endif %}
</section>
<section class="scenario-panel">
<h4>Monte Carlo Summary</h4>
{% if item.monte_carlo and item.monte_carlo.available %}
<p class="muted">
Iterations: {{ item.monte_carlo.iterations }}
{% if percentiles %}
· Percentiles:
{% for percentile in percentiles %}
{{ '%g' % percentile }}{% if not loop.last %}, {% endif %}
{% endfor %}
{% endif %}
</p>
{% include "partials/reports/monte_carlo_table.html" with metrics=item.monte_carlo.metrics currency=item.scenario.currency or project.currency percentiles=percentiles %}
{% else %}
<p class="muted">No Monte Carlo data available for this scenario.</p>
{% if item.monte_carlo and item.monte_carlo.notes %}
<ul class="note-list">
{% for note in item.monte_carlo.notes %}
<li>{{ note }}</li>
{% endfor %}
</ul>
{% endif %}
{% endif %}
</section>
</div>
</article>
{% endfor %}
</section>
{% endblock %}

View File

@@ -0,0 +1,149 @@
{% extends "base.html" %}
{% block title %}Scenario Distribution | CalMiner{% endblock %}
{% block content %}
{% include "partials/reports_header.html" with context %}
<section class="report-overview">
<div class="report-grid">
<article class="report-card">
<h2>Scenario Details</h2>
<dl class="definition-list">
<div>
<dt>Name</dt>
<dd>{{ scenario.name }}</dd>
</div>
<div>
<dt>Project ID</dt>
<dd>{{ scenario.project_id }}</dd>
</div>
<div>
<dt>Status</dt>
<dd>{{ scenario.status | title }}</dd>
</div>
<div>
<dt>Currency</dt>
<dd>{{ scenario.currency or "—" }}</dd>
</div>
<div>
<dt>Discount Rate</dt>
<dd>{{ metrics.discount_rate | percentage_display }}</dd>
</div>
<div>
<dt>Updated</dt>
<dd>{{ scenario.updated_at | format_datetime }}</dd>
</div>
</dl>
</article>
<article class="report-card">
<h2>Financial Totals</h2>
<ul class="metric-list">
<li>
<span>Inflows</span>
<strong>{{ summary.inflows | currency_display(scenario.currency) }}</strong>
</li>
<li>
<span>Outflows</span>
<strong>{{ summary.outflows | currency_display(scenario.currency) }}</strong>
</li>
<li>
<span>Net Cash Flow</span>
<strong>{{ summary.net | currency_display(scenario.currency) }}</strong>
</li>
</ul>
{% if summary.by_category %}
<h3>By Category</h3>
<ul class="metric-list compact">
{% for label, value in summary.by_category.items() %}
<li>
<span>{{ label | replace("_", " ") | title }}</span>
<strong>{{ value | currency_display(scenario.currency) }}</strong>
</li>
{% endfor %}
</ul>
{% endif %}
</article>
</div>
</section>
<section class="report-section">
<header class="section-header">
<h2>Deterministic Metrics</h2>
<p class="section-subtitle">Key financial indicators calculated from deterministic cash flows.</p>
</header>
<table class="metrics-table">
<tbody>
<tr>
<th scope="row">NPV</th>
<td>{{ metrics.npv | currency_display(scenario.currency) }}</td>
</tr>
<tr>
<th scope="row">IRR</th>
<td>{{ metrics.irr | percentage_display }}</td>
</tr>
<tr>
<th scope="row">Payback Period</th>
<td>{{ metrics.payback_period | period_display }}</td>
</tr>
</tbody>
</table>
{% if metrics.notes %}
<ul class="note-list">
{% for note in metrics.notes %}
<li>{{ note }}</li>
{% endfor %}
</ul>
{% endif %}
</section>
<section class="report-section">
<header class="section-header">
<h2>Monte Carlo Distribution</h2>
<p class="section-subtitle">Simulation-driven distributions contextualize stochastic variability.</p>
</header>
{% if monte_carlo and monte_carlo.available %}
<div class="simulation-summary">
<p>Iterations: {{ monte_carlo.iterations }} · Percentiles: {{ percentiles | join(", ") }}</p>
<table class="metrics-table">
<thead>
<tr>
<th scope="col">Metric</th>
<th scope="col">Mean</th>
<th scope="col">P5</th>
<th scope="col">Median</th>
<th scope="col">P95</th>
</tr>
</thead>
<tbody>
{% for metric, summary in monte_carlo.metrics.items() %}
<tr>
<th scope="row">{{ metric | replace("_", " ") | title }}</th>
<td>{{ summary.mean | format_metric(metric, scenario.currency) }}</td>
<td>{{ summary.percentiles['5'] | format_metric(metric, scenario.currency) }}</td>
<td>{{ summary.percentiles['50'] | format_metric(metric, scenario.currency) }}</td>
<td>{{ summary.percentiles['95'] | format_metric(metric, scenario.currency) }}</td>
</tr>
{% endfor %}
</tbody>
</table>
{% if monte_carlo.notes %}
<ul class="note-list">
{% for note in monte_carlo.notes %}
<li>{{ note }}</li>
{% endfor %}
</ul>
{% endif %}
</div>
{% else %}
<p class="muted">Monte Carlo output is unavailable for this scenario.</p>
{% if monte_carlo and monte_carlo.notes %}
<ul class="note-list">
{% for note in monte_carlo.notes %}
<li>{{ note }}</li>
{% endfor %}
</ul>
{% endif %}
{% endif %}
</section>
{% endblock %}

View File

@@ -49,7 +49,11 @@
<div class="form-group">
<label for="currency">Currency</label>
<input id="currency" name="currency" type="text" maxlength="3" value="{{ scenario.currency if scenario else '' }}" />
{% set currency_prefill = scenario.currency if scenario and scenario.currency else default_currency %}
<input id="currency" name="currency" type="text" maxlength="3" value="{{ currency_prefill or '' }}" placeholder="{{ default_currency or '' }}" />
{% if default_currency %}
<p class="field-help">Defaults to {{ default_currency }} when left blank.</p>
{% endif %}
</div>
<div class="form-group">

View File

@@ -18,6 +18,7 @@ from routes.dashboard import router as dashboard_router
from routes.projects import router as projects_router
from routes.scenarios import router as scenarios_router
from routes.imports import router as imports_router
from routes.exports import router as exports_router
from services.importers import ImportIngestionService
from services.unit_of_work import UnitOfWork
from services.session import AuthSession, SessionTokens
@@ -54,6 +55,7 @@ def app(session_factory: sessionmaker) -> FastAPI:
application.include_router(projects_router)
application.include_router(scenarios_router)
application.include_router(imports_router)
application.include_router(exports_router)
def _override_uow() -> Iterator[UnitOfWork]:
with UnitOfWork(session_factory=session_factory) as uow:

View File

@@ -78,10 +78,8 @@ class TestScenarioLifecycle:
json={"currency": "ca"},
)
assert invalid_update.status_code == 422
assert (
invalid_update.json()["detail"][0]["msg"]
== "Value error, Currency code must be a 3-letter ISO value"
)
assert "Invalid currency code" in invalid_update.json()[
"detail"][0]["msg"]
# Scenario detail should still show the previous (valid) currency
scenario_detail = client.get(f"/scenarios/{scenario_id}/view")

View File

@@ -10,7 +10,15 @@ from sqlalchemy.orm import Session, sessionmaker
from config.database import Base
from config.settings import AdminBootstrapSettings
from services.bootstrap import AdminBootstrapResult, RoleBootstrapResult, bootstrap_admin
from models import MiningOperationType, Project
from services.bootstrap import (
AdminBootstrapResult,
PricingBootstrapResult,
RoleBootstrapResult,
bootstrap_admin,
bootstrap_pricing_settings,
)
from services.pricing import PricingMetadata
from services.unit_of_work import UnitOfWork
@@ -114,3 +122,86 @@ def test_bootstrap_respects_force_reset(unit_of_work_factory: Callable[[], UnitO
user = users_repo.get_by_email(rotated_settings.email)
assert user is not None
assert user.verify_password("rotated")
def test_bootstrap_pricing_creates_defaults(unit_of_work_factory: Callable[[], UnitOfWork]) -> None:
metadata = PricingMetadata(
default_payable_pct=95.0,
default_currency="CAD",
moisture_threshold_pct=3.0,
moisture_penalty_per_pct=1.25,
)
result = bootstrap_pricing_settings(
metadata=metadata,
unit_of_work_factory=unit_of_work_factory,
)
assert isinstance(result, PricingBootstrapResult)
assert result.seed.created is True
assert result.projects_assigned == 0
with unit_of_work_factory() as uow:
settings_repo = uow.pricing_settings
assert settings_repo is not None
stored = settings_repo.get_by_slug("default")
assert stored.default_currency == "CAD"
assert float(stored.default_payable_pct) == pytest.approx(95.0)
assert float(stored.moisture_threshold_pct) == pytest.approx(3.0)
assert float(stored.moisture_penalty_per_pct) == pytest.approx(1.25)
def test_bootstrap_pricing_assigns_projects(unit_of_work_factory: Callable[[], UnitOfWork]) -> None:
metadata = PricingMetadata(
default_payable_pct=90.0,
default_currency="USD",
moisture_threshold_pct=5.0,
moisture_penalty_per_pct=0.5,
)
with unit_of_work_factory() as uow:
projects_repo = uow.projects
assert projects_repo is not None
project = Project(
name="Project Alpha",
operation_type=MiningOperationType.OPEN_PIT,
)
created = projects_repo.create(project)
project_id = created.id
result = bootstrap_pricing_settings(
metadata=metadata,
unit_of_work_factory=unit_of_work_factory,
)
assert result.projects_assigned == 1
assert result.seed.created is True
with unit_of_work_factory() as uow:
projects_repo = uow.projects
assert projects_repo is not None
stored = projects_repo.get(project_id, with_pricing=True)
assert stored.pricing_settings is not None
assert stored.pricing_settings.default_currency == "USD"
def test_bootstrap_pricing_is_idempotent(unit_of_work_factory: Callable[[], UnitOfWork]) -> None:
metadata = PricingMetadata(
default_payable_pct=92.5,
default_currency="EUR",
moisture_threshold_pct=4.5,
moisture_penalty_per_pct=0.75,
)
first = bootstrap_pricing_settings(
metadata=metadata,
unit_of_work_factory=unit_of_work_factory,
)
second = bootstrap_pricing_settings(
metadata=metadata,
unit_of_work_factory=unit_of_work_factory,
)
assert first.seed.created is True
assert second.seed.created is False
assert second.projects_assigned == 0

42
tests/test_currency.py Normal file
View File

@@ -0,0 +1,42 @@
from __future__ import annotations
import pytest
from services.currency import CurrencyValidationError, normalise_currency, require_currency
@pytest.mark.parametrize(
"raw,expected",
[
("usd", "USD"),
(" Eur ", "EUR"),
("JPY", "JPY"),
(None, None),
],
)
def test_normalise_currency_valid_inputs(raw: str | None, expected: str | None) -> None:
assert normalise_currency(raw) == expected
@pytest.mark.parametrize("raw", ["usd1", "us", "", "12", "X Y Z"])
def test_normalise_currency_invalid_inputs(raw: str) -> None:
with pytest.raises(CurrencyValidationError):
normalise_currency(raw)
def test_require_currency_with_value() -> None:
assert require_currency("gbp", default="usd") == "GBP"
def test_require_currency_with_default() -> None:
assert require_currency(None, default="cad") == "CAD"
def test_require_currency_missing_default() -> None:
with pytest.raises(CurrencyValidationError):
require_currency(None)
def test_require_currency_invalid_default() -> None:
with pytest.raises(CurrencyValidationError):
require_currency(None, default="invalid")

144
tests/test_export_routes.py Normal file
View File

@@ -0,0 +1,144 @@
from __future__ import annotations
import csv
from io import BytesIO, StringIO
from zipfile import ZipFile
from fastapi.testclient import TestClient
from sqlalchemy.orm import Session
from models import Project, Scenario, ScenarioStatus
from services.unit_of_work import UnitOfWork
def _seed_projects(session: Session) -> None:
project = Project(name="Alpha", operation_type="open_pit")
project.updated_at = project.created_at
session.add(project)
session.commit()
def _seed_scenarios(session: Session, project: Project) -> Scenario:
scenario = Scenario(
name="Scenario A",
project_id=project.id,
status=ScenarioStatus.ACTIVE,
)
session.add(scenario)
session.commit()
session.refresh(scenario)
return scenario
def test_projects_export_modal(client: TestClient) -> None:
response = client.get("/exports/modal/projects")
assert response.status_code == 200
assert "Export Projects" in response.text
def test_scenarios_export_modal(client: TestClient) -> None:
response = client.get("/exports/modal/scenarios")
assert response.status_code == 200
assert "Export Scenarios" in response.text
def test_project_export_csv(client: TestClient, unit_of_work_factory) -> None:
with unit_of_work_factory() as uow:
project = Project(name="CSV Project", operation_type="open_pit")
uow.projects.create(project)
response = client.post(
"/exports/projects",
json={"format": "csv"},
)
assert response.status_code == 200
assert response.headers["Content-Type"].startswith("text/csv")
assert "attachment; filename=" in response.headers["Content-Disposition"]
content = response.content.decode("utf-8")
reader = csv.reader(StringIO(content))
rows = list(reader)
assert rows[0][:3] == ["name", "location", "operation_type"]
assert any(row[0] == "CSV Project" for row in rows[1:])
def test_project_export_excel(client: TestClient, unit_of_work_factory) -> None:
with unit_of_work_factory() as uow:
project = Project(name="XLSX Project", operation_type="open_pit")
uow.projects.create(project)
response = client.post(
"/exports/projects",
json={"format": "xlsx"},
)
assert response.status_code == 200
assert response.headers["Content-Type"].startswith(
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
)
with ZipFile(BytesIO(response.content)) as archive:
assert "xl/workbook.xml" in archive.namelist()
def test_scenario_export_csv(client: TestClient, unit_of_work_factory) -> None:
with unit_of_work_factory() as uow:
project = Project(name="Scenario Project", operation_type="open_pit")
uow.projects.create(project)
scenario = Scenario(
name="Scenario CSV",
project_id=project.id,
status=ScenarioStatus.ACTIVE,
)
uow.scenarios.create(scenario)
response = client.post(
"/exports/scenarios",
json={"format": "csv"},
)
assert response.status_code == 200
reader = csv.reader(StringIO(response.content.decode("utf-8")))
rows = list(reader)
assert rows[0][0] == "project_name"
assert any(row[0] == "Scenario Project" for row in rows[1:])
def test_scenario_export_excel(client: TestClient, unit_of_work_factory) -> None:
with unit_of_work_factory() as uow:
project = Project(name="Scenario Excel", operation_type="open_pit")
uow.projects.create(project)
scenario = Scenario(
name="Scenario XLSX",
project_id=project.id,
status=ScenarioStatus.ACTIVE,
)
uow.scenarios.create(scenario)
response = client.post(
"/exports/scenarios",
json={"format": "xlsx"},
)
assert response.status_code == 200
assert response.headers["Content-Type"].startswith(
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
)
with ZipFile(BytesIO(response.content)) as archive:
assert "xl/workbook.xml" in archive.namelist()
def test_scenario_export_rejects_invalid_currency_filter(client: TestClient) -> None:
response = client.post(
"/exports/scenarios",
json={
"format": "csv",
"filters": {"currencies": ["USD", "XX"]},
},
)
assert response.status_code == 422
detail = response.json()["detail"]
assert "Invalid currency code" in detail

View File

@@ -0,0 +1,234 @@
from __future__ import annotations
from dataclasses import dataclass
from datetime import date, datetime, timezone
from decimal import Decimal
from io import BytesIO
from typing import Any, Iterable
import pytest
from services.export_serializers import (
CSVExportColumn,
CSVExporter,
ExcelExporter,
default_formatter,
default_project_columns,
default_scenario_columns,
export_projects_to_excel,
export_scenarios_to_excel,
format_date_iso,
format_datetime_utc,
format_decimal,
stream_projects_to_csv,
stream_scenarios_to_csv,
)
from openpyxl import load_workbook
@dataclass(slots=True)
class DummyProject:
name: str
location: str | None = None
operation_type: str = "open_pit"
description: str | None = None
created_at: datetime | None = None
updated_at: datetime | None = None
@dataclass(slots=True)
class DummyScenario:
project: DummyProject | None
name: str
status: str = "draft"
start_date: date | None = None
end_date: date | None = None
discount_rate: Decimal | None = None
currency: str | None = None
primary_resource: str | None = None
description: str | None = None
created_at: datetime | None = None
updated_at: datetime | None = None
def collect_csv_bytes(chunks: Iterable[bytes]) -> list[str]:
return [chunk.decode("utf-8") for chunk in chunks]
def load_workbook_bytes(data: bytes):
buffer = BytesIO(data)
return load_workbook(buffer, read_only=True, data_only=True)
def test_csv_exporter_writes_header_and_rows() -> None:
exporter = CSVExporter(
[
CSVExportColumn("Name", "name"),
CSVExportColumn("Location", "location"),
]
)
project = DummyProject(name="Alpha", location="Nevada")
chunks = collect_csv_bytes(exporter.iter_bytes([project]))
assert chunks[0] == "Name,Location\n"
assert chunks[1] == "Alpha,Nevada\n"
def test_excel_exporter_basic_workbook() -> None:
exporter = ExcelExporter(default_project_columns(), sheet_name="Projects")
project = DummyProject(name="Alpha", location="Nevada")
data = exporter.export([project])
workbook = load_workbook_bytes(data)
sheet = workbook["Projects"]
rows = list(sheet.rows)
assert [cell.value for cell in rows[0]] == [
"name",
"location",
"operation_type",
"description",
"created_at",
"updated_at",
]
assert rows[1][0].value == "Alpha"
def test_excel_export_projects_helper_with_metadata() -> None:
project = DummyProject(name="Alpha", location="Nevada")
data = export_projects_to_excel(
[project], metadata={"rows": 1}, workbook_title="Project Export")
workbook = load_workbook_bytes(data)
assert workbook.properties.title == "Project Export"
assert "Projects" in workbook.sheetnames
assert any(sheet.title.startswith("Metadata")
for sheet in workbook.worksheets)
def test_excel_export_scenarios_helper_projects_resolved() -> None:
project = DummyProject(name="Alpha")
scenario = DummyScenario(project=project, name="Scenario 1")
data = export_scenarios_to_excel([scenario])
workbook = load_workbook_bytes(data)
sheet = workbook["Scenarios"]
rows = list(sheet.rows)
assert rows[1][0].value == "Alpha"
assert rows[1][1].value == "Scenario 1"
exporter = CSVExporter(
[
CSVExportColumn("Name", "name"),
CSVExportColumn("Location", "location"),
]
)
project = DummyProject(name="Alpha", location="Nevada")
chunks = collect_csv_bytes(exporter.iter_bytes([project]))
assert chunks[0] == "Name,Location\n"
assert chunks[1] == "Alpha,Nevada\n"
def test_csv_exporter_handles_optional_values_and_default_formatter() -> None:
exporter = CSVExporter(
[
CSVExportColumn("Name", "name"),
CSVExportColumn("Description", "description"),
]
)
project = DummyProject(name="Bravo")
chunks = collect_csv_bytes(exporter.iter_bytes([project]))
assert chunks[-1] == "Bravo,\n"
def test_stream_projects_uses_default_columns() -> None:
projects = [
DummyProject(
name="Alpha",
location="Nevada",
operation_type="open_pit",
description="Primary",
created_at=datetime(2025, 1, 1, tzinfo=timezone.utc),
updated_at=datetime(2025, 1, 2, tzinfo=timezone.utc),
)
]
chunks = collect_csv_bytes(stream_projects_to_csv(projects))
assert chunks[0].startswith("name,location,operation_type")
assert any("Alpha" in chunk for chunk in chunks)
def test_stream_scenarios_resolves_project_name_accessor() -> None:
project = DummyProject(name="Project X")
scenario = DummyScenario(project=project, name="Scenario A")
chunks = collect_csv_bytes(stream_scenarios_to_csv([scenario]))
assert "Project X" in chunks[-1]
assert "Scenario A" in chunks[-1]
def test_custom_formatter_applies() -> None:
def uppercase(value: Any) -> str:
return str(value).upper() if value is not None else ""
exporter = CSVExporter([
CSVExportColumn("Name", "name", formatter=uppercase),
])
chunks = collect_csv_bytes(
exporter.iter_bytes([DummyProject(name="alpha")]))
assert chunks[-1] == "ALPHA\n"
def test_default_formatter_handles_multiple_types() -> None:
assert default_formatter(None) == ""
assert default_formatter(True) == "true"
assert default_formatter(False) == "false"
assert default_formatter(Decimal("1.234")) == "1.23"
assert default_formatter(
datetime(2025, 1, 1, tzinfo=timezone.utc)).endswith("Z")
assert default_formatter(date(2025, 1, 1)) == "2025-01-01"
def test_format_helpers() -> None:
assert format_date_iso(date(2025, 5, 1)) == "2025-05-01"
assert format_date_iso("not-a-date") == ""
ts = datetime(2025, 5, 1, 12, 0, tzinfo=timezone.utc)
assert format_datetime_utc(ts) == "2025-05-01T12:00:00Z"
assert format_datetime_utc("nope") == ""
assert format_decimal(None) == ""
assert format_decimal(Decimal("12.345")) == "12.35"
assert format_decimal(10) == "10.00"
def test_default_project_columns_includes_required_fields() -> None:
columns = default_project_columns()
headers = [column.header for column in columns]
assert headers[:3] == ["name", "location", "operation_type"]
def test_default_scenario_columns_handles_missing_project() -> None:
scenario = DummyScenario(project=None, name="Orphan Scenario")
exporter = CSVExporter(default_scenario_columns())
chunks = collect_csv_bytes(exporter.iter_bytes([scenario]))
assert chunks[-1].startswith(",Orphan Scenario")
def test_csv_exporter_requires_columns() -> None:
with pytest.raises(ValueError):
CSVExporter([])

162
tests/test_financial.py Normal file
View File

@@ -0,0 +1,162 @@
from __future__ import annotations
from datetime import date
import pytest
from services.financial import (
CashFlow,
PaybackNotReachedError,
internal_rate_of_return,
net_present_value,
normalize_cash_flows,
payback_period,
)
def test_normalize_cash_flows_with_dates() -> None:
base = date(2025, 1, 1)
period_length = 365.0 / 4
flows = [
CashFlow(amount=-1_000_000, date=base),
CashFlow(amount=350_000, date=date(2025, 4, 1)),
CashFlow(amount=420_000, date=date(2025, 7, 1)),
]
normalised = normalize_cash_flows(flows, compounds_per_year=4)
assert normalised[0] == (-1_000_000.0, 0.0)
expected_second = (date(2025, 4, 1) - base).days / period_length
expected_third = (date(2025, 7, 1) - base).days / period_length
assert normalised[1][1] == pytest.approx(expected_second, rel=1e-6)
assert normalised[2][1] == pytest.approx(expected_third, rel=1e-6)
def test_net_present_value_with_period_indices() -> None:
rate = 0.10
flows = [
CashFlow(amount=-1_000, period_index=0),
CashFlow(amount=500, period_index=1),
CashFlow(amount=500, period_index=2),
CashFlow(amount=500, period_index=3),
]
expected = -1_000 + sum(500 / (1 + rate) **
period for period in range(1, 4))
result = net_present_value(rate, flows)
assert result == pytest.approx(expected, rel=1e-9)
def test_net_present_value_with_residual_value() -> None:
rate = 0.08
flows = [
CashFlow(amount=-100_000, period_index=0),
CashFlow(amount=30_000, period_index=1),
CashFlow(amount=35_000, period_index=2),
]
expected = (
-100_000
+ 30_000 / (1 + rate)
+ 35_000 / (1 + rate) ** 2
+ 25_000 / (1 + rate) ** 3
)
result = net_present_value(rate, flows, residual_value=25_000)
assert result == pytest.approx(expected, rel=1e-9)
def test_internal_rate_of_return_simple_case() -> None:
flows = [
CashFlow(amount=-1_000, period_index=0),
CashFlow(amount=1_210, period_index=1),
]
irr = internal_rate_of_return(flows, guess=0.05)
assert irr == pytest.approx(0.21, rel=1e-9)
def test_internal_rate_of_return_multiple_sign_changes() -> None:
flows = [
CashFlow(amount=-500_000, period_index=0),
CashFlow(amount=250_000, period_index=1),
CashFlow(amount=-100_000, period_index=2),
CashFlow(amount=425_000, period_index=3),
]
irr = internal_rate_of_return(flows, guess=0.2)
npv = net_present_value(irr, flows)
assert npv == pytest.approx(0.0, abs=1e-6)
def test_internal_rate_of_return_requires_mixed_signs() -> None:
flows = [
CashFlow(amount=100_000, period_index=0),
CashFlow(amount=150_000, period_index=1),
]
with pytest.raises(ValueError):
internal_rate_of_return(flows)
def test_payback_period_exact_period() -> None:
flows = [
CashFlow(amount=-120_000, period_index=0),
CashFlow(amount=40_000, period_index=1),
CashFlow(amount=40_000, period_index=2),
CashFlow(amount=40_000, period_index=3),
]
period = payback_period(flows, allow_fractional=False)
assert period == pytest.approx(3.0)
def test_payback_period_fractional_period() -> None:
flows = [
CashFlow(amount=-100_000, period_index=0),
CashFlow(amount=80_000, period_index=1),
CashFlow(amount=30_000, period_index=2),
]
fractional = payback_period(flows)
whole = payback_period(flows, allow_fractional=False)
assert fractional == pytest.approx(1 + 20_000 / 30_000, rel=1e-9)
assert whole == pytest.approx(2.0)
def test_payback_period_raises_when_never_recovered() -> None:
flows = [
CashFlow(amount=-250_000, period_index=0),
CashFlow(amount=50_000, period_index=1),
CashFlow(amount=60_000, period_index=2),
CashFlow(amount=70_000, period_index=3),
]
with pytest.raises(PaybackNotReachedError):
payback_period(flows)
def test_payback_period_with_quarterly_compounding() -> None:
base = date(2025, 1, 1)
flows = [
CashFlow(amount=-120_000, date=base),
CashFlow(amount=35_000, date=date(2025, 4, 1)),
CashFlow(amount=35_000, date=date(2025, 7, 1)),
CashFlow(amount=50_000, date=date(2025, 10, 1)),
]
period = payback_period(flows, compounds_per_year=4)
period_length = 365.0 / 4
expected_period = (date(2025, 10, 1) - base).days / period_length
assert period == pytest.approx(expected_period, abs=1e-6)

View File

@@ -68,3 +68,35 @@ def test_scenario_import_commit_invalid_token_returns_404(
)
assert response.status_code == 404
assert "Unknown scenario import token" in response.json()["detail"]
def test_scenario_import_preview_rejects_invalid_currency(
client: TestClient,
unit_of_work_factory,
) -> None:
with unit_of_work_factory() as uow:
assert uow.projects is not None
project = Project(
name="Import Currency Project",
operation_type=MiningOperationType.OPEN_PIT,
)
uow.projects.create(project)
csv_content = (
"project_name,name,currency\n"
"Import Currency Project,Invalid Currency,US\n"
)
response = client.post(
"/imports/scenarios/preview",
files={"file": ("scenarios.csv", csv_content, "text/csv")},
)
assert response.status_code == 200
payload = response.json()
assert payload["summary"]["accepted"] == 0
assert payload["summary"]["errored"] == 1
assert payload["parser_errors"]
parser_error = payload["parser_errors"][0]
assert parser_error["field"] == "currency"
assert "Invalid currency code" in parser_error["message"]

View File

@@ -0,0 +1,124 @@
from __future__ import annotations
from io import BytesIO
import pandas as pd
import pytest
from fastapi.testclient import TestClient
from models import (
MiningOperationType,
Project,
Scenario,
ScenarioStatus,
)
from models.import_export_log import ImportExportLog
@pytest.fixture()
def project_seed(unit_of_work_factory):
with unit_of_work_factory() as uow:
assert uow.projects is not None
project = Project(name="Seed Project", operation_type=MiningOperationType.OPEN_PIT)
uow.projects.create(project)
yield project
def test_project_import_preview_and_commit(client: TestClient, unit_of_work_factory) -> None:
csv_content = (
"name,location,operation_type\n"
"Project Import A,Chile,open pit\n"
"Project Import B,Canada,underground\n"
)
files = {"file": ("projects.csv", csv_content, "text/csv")}
preview_response = client.post("/imports/projects/preview", files=files)
assert preview_response.status_code == 200
preview_payload = preview_response.json()
assert preview_payload["summary"]["accepted"] == 2
assert preview_payload["stage_token"]
token = preview_payload["stage_token"]
commit_response = client.post("/imports/projects/commit", json={"token": token})
assert commit_response.status_code == 200
commit_payload = commit_response.json()
assert commit_payload["summary"]["created"] == 2
with unit_of_work_factory() as uow:
assert uow.projects is not None
names = {project.name for project in uow.projects.list()}
assert {"Project Import A", "Project Import B"}.issubset(names)
# ensure audit logs recorded preview and commit events
assert uow.session is not None
logs = (
uow.session.query(ImportExportLog)
.filter(ImportExportLog.dataset == "projects")
.order_by(ImportExportLog.created_at)
.all()
)
actions = [log.action for log in logs]
assert "preview" in actions
assert "commit" in actions
def test_scenario_import_preview_and_commit(client: TestClient, unit_of_work_factory, project_seed) -> None:
csv_content = (
"project_name,name,status\n"
"Seed Project,Scenario Import A,Draft\n"
"Seed Project,Scenario Import B,Active\n"
)
files = {"file": ("scenarios.csv", csv_content, "text/csv")}
preview_response = client.post("/imports/scenarios/preview", files=files)
assert preview_response.status_code == 200
preview_payload = preview_response.json()
assert preview_payload["summary"]["accepted"] == 2
token = preview_payload["stage_token"]
commit_response = client.post("/imports/scenarios/commit", json={"token": token})
assert commit_response.status_code == 200
commit_payload = commit_response.json()
assert commit_payload["summary"]["created"] == 2
with unit_of_work_factory() as uow:
assert uow.projects is not None and uow.scenarios is not None
project = uow.projects.list()[0]
scenarios = uow.scenarios.list_for_project(project.id)
names = {scenario.name for scenario in scenarios}
assert {"Scenario Import A", "Scenario Import B"}.issubset(names)
assert uow.session is not None
logs = (
uow.session.query(ImportExportLog)
.filter(ImportExportLog.dataset == "scenarios")
.order_by(ImportExportLog.created_at)
.all()
)
actions = [log.action for log in logs]
assert "preview" in actions
assert "commit" in actions
def test_project_export_endpoint(client: TestClient, unit_of_work_factory) -> None:
with unit_of_work_factory() as uow:
assert uow.projects is not None
uow.projects.create(Project(name="Export Project", operation_type=MiningOperationType.OPEN_PIT))
response = client.post("/exports/projects", json={"format": "csv"})
assert response.status_code == 200
assert response.headers["Content-Type"].startswith("text/csv")
assert "attachment; filename=" in response.headers["Content-Disposition"]
body = response.content.decode("utf-8")
assert "Export Project" in body
with unit_of_work_factory() as uow:
assert uow.session is not None
logs = (
uow.session.query(ImportExportLog)
.filter(ImportExportLog.dataset == "projects", ImportExportLog.action == "export")
.order_by(ImportExportLog.created_at.desc())
.first()
)
assert logs is not None
assert logs.status == "success"
assert logs.row_count >= 1

View File

@@ -1,12 +1,14 @@
from __future__ import annotations
from io import BytesIO
from textwrap import dedent
import pandas as pd
import pytest
from services.importers import ImportResult, load_project_imports, load_scenario_imports
from schemas.imports import ProjectImportRow, ScenarioImportRow
from models.project import MiningOperationType
def test_load_project_imports_from_csv() -> None:
@@ -76,3 +78,84 @@ def test_import_errors_include_row_numbers() -> None:
assert error.row_number == 2
assert error.field == "name"
assert "required" in error.message
def test_project_import_handles_missing_columns() -> None:
csv_content = "name\nProject Only\n"
stream = BytesIO(csv_content.encode("utf-8"))
result = load_project_imports(stream, "projects.csv")
assert result.rows == []
assert len(result.errors) == 1
error = result.errors[0]
assert error.row_number == 2
assert error.field == "operation_type"
def test_project_import_rejects_invalid_operation_type() -> None:
csv_content = "name,operation_type\nProject X,unknown\n"
stream = BytesIO(csv_content.encode("utf-8"))
result = load_project_imports(stream, "projects.csv")
assert len(result.rows) == 0
assert len(result.errors) == 1
error = result.errors[0]
assert error.row_number == 2
assert error.field == "operation_type"
def test_scenario_import_flags_invalid_dates() -> None:
csv_content = dedent(
"""
project_name,name,status,start_date,end_date
Project A,Scenario Reverse,Draft,2025-12-31,2025-01-01
"""
).strip()
stream = BytesIO(csv_content.encode("utf-8"))
result = load_scenario_imports(stream, "scenarios.csv")
assert len(result.rows) == 0
assert len(result.errors) == 1
error = result.errors[0]
assert error.row_number == 2
assert error.field is None
def test_scenario_import_handles_large_dataset() -> None:
buffer = BytesIO()
df = pd.DataFrame(
{
"project_name": ["Project"] * 500,
"name": [f"Scenario {i}" for i in range(500)],
"status": ["draft"] * 500,
}
)
df.to_csv(buffer, index=False)
buffer.seek(0)
result = load_scenario_imports(buffer, "bulk.csv")
assert len(result.rows) == 500
assert len(result.rows) == 500
def test_scenario_import_rejects_invalid_currency() -> None:
csv_content = dedent(
"""
project_name,name,currency
Project A,Scenario Invalid,US
"""
).strip()
stream = BytesIO(csv_content.encode("utf-8"))
result = load_scenario_imports(stream, "scenarios.csv")
assert not result.rows
assert result.errors
error = result.errors[0]
assert error.row_number == 2
assert error.field == "currency"
assert "Invalid currency code" in error.message

155
tests/test_pricing.py Normal file
View File

@@ -0,0 +1,155 @@
from __future__ import annotations
import math
import pytest
from services.pricing import (
PricingInput,
PricingMetadata,
PricingResult,
calculate_pricing,
)
def test_calculate_pricing_with_explicit_penalties() -> None:
pricing_input = PricingInput(
metal="copper",
ore_tonnage=100_000,
head_grade_pct=1.2,
recovery_pct=90,
payable_pct=96,
reference_price=8_500,
treatment_charge=100_000,
smelting_charge=0,
moisture_pct=10,
moisture_threshold_pct=8,
moisture_penalty_per_pct=3_000,
impurity_ppm={"As": 100},
impurity_thresholds={"As": 0},
impurity_penalty_per_ppm={"As": 2},
premiums=50_000,
fx_rate=1.0,
currency_code="usd",
)
result = calculate_pricing(pricing_input)
assert isinstance(result, PricingResult)
assert math.isclose(result.payable_metal_tonnes, 1_036.8, rel_tol=1e-6)
assert math.isclose(result.gross_revenue, 1_036.8 * 8_500, rel_tol=1e-6)
assert math.isclose(result.moisture_penalty, 6_000, rel_tol=1e-6)
assert math.isclose(result.impurity_penalty, 200, rel_tol=1e-6)
assert math.isclose(result.net_revenue, 8_756_600, rel_tol=1e-6)
assert result.treatment_smelt_charges == pytest.approx(100_000)
assert result.currency == "USD"
def test_calculate_pricing_with_metadata_defaults() -> None:
metadata = PricingMetadata(
default_payable_pct=95,
default_currency="EUR",
moisture_threshold_pct=7,
moisture_penalty_per_pct=2_000,
impurity_thresholds={"Pb": 50},
impurity_penalty_per_ppm={"Pb": 1.5},
)
pricing_input = PricingInput(
metal="lead",
ore_tonnage=50_000,
head_grade_pct=5,
recovery_pct=85,
payable_pct=None,
reference_price=2_000,
treatment_charge=30_000,
smelting_charge=20_000,
moisture_pct=9,
moisture_threshold_pct=None,
moisture_penalty_per_pct=None,
impurity_ppm={"Pb": 120},
premiums=12_000,
fx_rate=1.2,
currency_code=None,
)
result = calculate_pricing(pricing_input, metadata=metadata)
expected_payable = 50_000 * 0.05 * 0.85 * 0.95
assert math.isclose(result.payable_metal_tonnes,
expected_payable, rel_tol=1e-6)
assert result.moisture_penalty == pytest.approx((9 - 7) * 2_000)
assert result.impurity_penalty == pytest.approx((120 - 50) * 1.5)
assert result.treatment_smelt_charges == pytest.approx(50_000)
assert result.currency == "EUR"
assert result.net_revenue > 0
def test_calculate_pricing_currency_override() -> None:
pricing_input = PricingInput(
metal="gold",
ore_tonnage=10_000,
head_grade_pct=2.5,
recovery_pct=92,
payable_pct=98,
reference_price=60_000,
treatment_charge=40_000,
smelting_charge=10_000,
moisture_pct=5,
moisture_threshold_pct=7,
moisture_penalty_per_pct=1_000,
premiums=25_000,
fx_rate=1.0,
currency_code="cad",
)
metadata = PricingMetadata(default_currency="USD")
result = calculate_pricing(
pricing_input, metadata=metadata, currency="CAD")
assert result.currency == "CAD"
assert result.net_revenue > 0
def test_calculate_pricing_multiple_inputs_aggregate() -> None:
metadata = PricingMetadata(default_currency="USD")
inputs = [
PricingInput(
metal="copper",
ore_tonnage=10_000,
head_grade_pct=1.5,
recovery_pct=88,
payable_pct=95,
reference_price=8_000,
treatment_charge=20_000,
smelting_charge=5_000,
moisture_pct=7,
moisture_threshold_pct=8,
moisture_penalty_per_pct=1_000,
premiums=0,
fx_rate=1.0,
currency_code=None,
),
PricingInput(
metal="copper",
ore_tonnage=8_000,
head_grade_pct=1.1,
recovery_pct=90,
payable_pct=96,
reference_price=8_000,
treatment_charge=18_000,
smelting_charge=4_000,
moisture_pct=9,
moisture_threshold_pct=8,
moisture_penalty_per_pct=1_000,
premiums=0,
fx_rate=1.0,
currency_code="usd",
),
]
results = [calculate_pricing(i, metadata=metadata) for i in inputs]
assert all(result.currency == "USD" for result in results)
assert sum(result.net_revenue for result in results) > 0

View File

@@ -0,0 +1,209 @@
from __future__ import annotations
from collections.abc import Iterator
import pytest
from sqlalchemy import create_engine
from sqlalchemy.orm import Session, sessionmaker
from config.database import Base
from models import PricingImpuritySettings, PricingMetalSettings, PricingSettings
from services.pricing import PricingMetadata
from services.repositories import (
PricingSettingsRepository,
ensure_default_pricing_settings,
)
from services.unit_of_work import UnitOfWork
@pytest.fixture()
def engine() -> Iterator:
engine = create_engine("sqlite:///:memory:", future=True)
Base.metadata.create_all(bind=engine)
try:
yield engine
finally:
Base.metadata.drop_all(bind=engine)
@pytest.fixture()
def session(engine) -> Iterator[Session]:
TestingSession = sessionmaker(
bind=engine, expire_on_commit=False, future=True)
db = TestingSession()
try:
yield db
finally:
db.close()
def test_pricing_settings_repository_crud(session: Session) -> None:
repo = PricingSettingsRepository(session)
settings = PricingSettings(
name="Contract A",
slug="Contract-A",
default_currency="usd",
default_payable_pct=95.0,
moisture_threshold_pct=7.5,
moisture_penalty_per_pct=1500.0,
)
repo.create(settings)
metal_override = PricingMetalSettings(
metal_code="Copper",
payable_pct=96.0,
moisture_threshold_pct=None,
moisture_penalty_per_pct=None,
)
repo.attach_metal_override(settings, metal_override)
impurity_override = PricingImpuritySettings(
impurity_code="as",
threshold_ppm=100.0,
penalty_per_ppm=3.5,
)
repo.attach_impurity_override(settings, impurity_override)
retrieved = repo.get_by_slug("CONTRACT-A", include_children=True)
assert retrieved.slug == "contract-a"
assert retrieved.default_currency == "USD"
assert len(retrieved.metal_overrides) == 1
assert retrieved.metal_overrides[0].metal_code == "copper"
assert len(retrieved.impurity_overrides) == 1
assert retrieved.impurity_overrides[0].impurity_code == "AS"
listed = repo.list(include_children=True)
assert len(listed) == 1
assert listed[0].id == settings.id
def test_ensure_default_pricing_settings_creates_and_updates(session: Session) -> None:
repo = PricingSettingsRepository(session)
metadata_initial = PricingMetadata(
default_payable_pct=100.0,
default_currency="USD",
moisture_threshold_pct=8.0,
moisture_penalty_per_pct=0.0,
impurity_thresholds={"As": 50.0},
impurity_penalty_per_ppm={"As": 2.0},
)
result_create = ensure_default_pricing_settings(
repo,
metadata=metadata_initial,
name="Seeded Pricing",
description="Seeded from defaults",
)
assert result_create.created is True
assert result_create.settings.slug == "default"
assert result_create.settings.default_currency == "USD"
assert len(result_create.settings.impurity_overrides) == 1
assert result_create.settings.impurity_overrides[0].penalty_per_ppm == 2.0
metadata_update = PricingMetadata(
default_payable_pct=97.0,
default_currency="EUR",
moisture_threshold_pct=6.5,
moisture_penalty_per_pct=250.0,
impurity_thresholds={"As": 45.0, "Pb": 12.0},
impurity_penalty_per_ppm={"As": 3.0, "Pb": 1.25},
)
result_update = ensure_default_pricing_settings(
repo,
metadata=metadata_update,
name="Seeded Pricing",
description="Seeded from defaults",
)
assert result_update.created is False
assert result_update.updated_fields > 0
assert result_update.impurity_upserts >= 1
updated = repo.get_by_slug("default", include_children=True)
assert updated.default_currency == "EUR"
as_override = {
item.impurity_code: item for item in updated.impurity_overrides}["AS"]
assert float(as_override.threshold_ppm) == 45.0
assert float(as_override.penalty_per_ppm) == 3.0
pb_override = {
item.impurity_code: item for item in updated.impurity_overrides}["PB"]
assert float(pb_override.threshold_ppm) == 12.0
def test_unit_of_work_exposes_pricing_settings(engine) -> None:
TestingSession = sessionmaker(
bind=engine, expire_on_commit=False, future=True)
metadata = PricingMetadata(
default_payable_pct=99.0,
default_currency="USD",
moisture_threshold_pct=7.0,
moisture_penalty_per_pct=125.0,
impurity_thresholds={"Zn": 80.0},
impurity_penalty_per_ppm={"Zn": 0.5},
)
with UnitOfWork(session_factory=TestingSession) as uow:
assert uow.pricing_settings is not None
result = uow.ensure_default_pricing_settings(
metadata=metadata,
slug="contract-core",
name="Contract Core",
)
assert result.settings.slug == "contract-core"
assert result.created is True
with UnitOfWork(session_factory=TestingSession) as uow:
assert uow.pricing_settings is not None
stored = uow.pricing_settings.get_by_slug(
"contract-core", include_children=True)
assert stored.default_payable_pct == 99.0
assert stored.impurity_overrides[0].impurity_code == "ZN"
def test_unit_of_work_get_pricing_metadata_returns_defaults(engine) -> None:
TestingSession = sessionmaker(
bind=engine, expire_on_commit=False, future=True)
seeded_metadata = PricingMetadata(
default_payable_pct=96.5,
default_currency="aud",
moisture_threshold_pct=6.25,
moisture_penalty_per_pct=210.0,
impurity_thresholds={"As": 45.0, "Pb": 15.0},
impurity_penalty_per_ppm={"As": 1.75, "Pb": 0.9},
)
with UnitOfWork(session_factory=TestingSession) as uow:
result = uow.ensure_default_pricing_settings(
metadata=seeded_metadata,
slug="default",
name="Default Contract",
description="Primary contract defaults",
)
assert result.created is True
with UnitOfWork(session_factory=TestingSession) as uow:
retrieved = uow.get_pricing_metadata()
assert retrieved is not None
assert retrieved.default_currency == "AUD"
assert retrieved.default_payable_pct == 96.5
assert retrieved.moisture_threshold_pct == 6.25
assert retrieved.moisture_penalty_per_pct == 210.0
assert retrieved.impurity_thresholds["AS"] == 45.0
assert retrieved.impurity_thresholds["PB"] == 15.0
assert retrieved.impurity_penalty_per_ppm["AS"] == 1.75
assert retrieved.impurity_penalty_per_ppm["PB"] == 0.9
def test_unit_of_work_get_pricing_metadata_returns_none_when_missing(engine) -> None:
TestingSession = sessionmaker(
bind=engine, expire_on_commit=False, future=True)
with UnitOfWork(session_factory=TestingSession) as uow:
missing = uow.get_pricing_metadata(slug="non-existent")
assert missing is None

View File

@@ -1,7 +1,7 @@
from __future__ import annotations
from collections.abc import Iterator
from datetime import datetime, timezone
from datetime import date, datetime, timezone
import pytest
from sqlalchemy import create_engine
@@ -13,9 +13,11 @@ from models import (
FinancialCategory,
FinancialInput,
MiningOperationType,
PricingSettings,
Project,
Scenario,
ScenarioStatus,
ResourceType,
SimulationParameter,
StochasticVariable,
)
@@ -25,6 +27,7 @@ from services.repositories import (
ScenarioRepository,
SimulationParameterRepository,
)
from services.export_query import ProjectExportFilters, ScenarioExportFilters
from services.unit_of_work import UnitOfWork
@@ -136,6 +139,7 @@ def test_unit_of_work_commit_and_rollback(engine) -> None:
# Commit path
with UnitOfWork(session_factory=TestingSession) as uow:
assert uow.projects is not None
uow.projects.create(
Project(name="Project Delta", operation_type=MiningOperationType.PLACER)
)
@@ -144,9 +148,34 @@ def test_unit_of_work_commit_and_rollback(engine) -> None:
projects = ProjectRepository(session).list()
assert len(projects) == 1
def test_unit_of_work_set_project_pricing_settings(engine) -> None:
TestingSession = sessionmaker(bind=engine, expire_on_commit=False, future=True)
with UnitOfWork(session_factory=TestingSession) as uow:
assert uow.projects is not None and uow.pricing_settings is not None
project = Project(name="Project Pricing", operation_type=MiningOperationType.OTHER)
uow.projects.create(project)
pricing_settings = PricingSettings(
name="Default Pricing",
slug="default",
default_currency="usd",
default_payable_pct=100.0,
moisture_threshold_pct=8.0,
moisture_penalty_per_pct=0.0,
)
uow.pricing_settings.create(pricing_settings)
uow.set_project_pricing_settings(project, pricing_settings)
with TestingSession() as session:
repo = ProjectRepository(session)
stored = repo.get(1, with_pricing=True)
assert stored.pricing_settings is not None
assert stored.pricing_settings.slug == "default"
# Rollback path
with pytest.raises(RuntimeError):
with UnitOfWork(session_factory=TestingSession) as uow:
assert uow.projects is not None
uow.projects.create(
Project(name="Project Epsilon", operation_type=MiningOperationType.OTHER)
)
@@ -242,3 +271,159 @@ def test_financial_input_repository_latest_created_at(session: Session) -> None:
latest = repo.latest_created_at()
assert latest == new_timestamp
def test_project_repository_filtered_for_export(session: Session) -> None:
repo = ProjectRepository(session)
alpha_created = datetime(2025, 1, 1, 9, 30, tzinfo=timezone.utc)
alpha_updated = datetime(2025, 1, 2, 12, 0, tzinfo=timezone.utc)
bravo_created = datetime(2025, 2, 1, 9, 30, tzinfo=timezone.utc)
bravo_updated = datetime(2025, 2, 2, 12, 0, tzinfo=timezone.utc)
project_alpha = Project(
name="Alpha",
location="Nevada",
operation_type=MiningOperationType.OPEN_PIT,
description="Primary export candidate",
)
project_alpha.created_at = alpha_created
project_alpha.updated_at = alpha_updated
project_bravo = Project(
name="Bravo",
location="Ontario",
operation_type=MiningOperationType.UNDERGROUND,
description="Excluded project",
)
project_bravo.created_at = bravo_created
project_bravo.updated_at = bravo_updated
scenario_alpha = Scenario(
name="Alpha Scenario",
project=project_alpha,
status=ScenarioStatus.ACTIVE,
)
session.add_all([project_alpha, project_bravo, scenario_alpha])
session.flush()
filters = ProjectExportFilters(
ids=(project_alpha.id, project_alpha.id, -5),
names=("Alpha", " alpha ", ""),
name_contains="alp",
locations=(" nevada ", ""),
operation_types=(MiningOperationType.OPEN_PIT,),
created_from=alpha_created,
created_to=alpha_created,
updated_from=alpha_updated,
updated_to=alpha_updated,
)
results = repo.filtered_for_export(filters, include_scenarios=True)
assert [project.name for project in results] == ["Alpha"]
assert len(results[0].scenarios) == 1
assert results[0].scenarios[0].name == "Alpha Scenario"
def test_project_repository_with_pricing_settings(session: Session) -> None:
repo = ProjectRepository(session)
settings = PricingSettings(
name="Contract Core",
slug="contract-core",
default_currency="usd",
default_payable_pct=95.0,
moisture_threshold_pct=7.5,
moisture_penalty_per_pct=100.0,
)
project = Project(
name="Project Pricing",
operation_type=MiningOperationType.OPEN_PIT,
pricing_settings=settings,
)
session.add(project)
session.flush()
fetched = repo.get(project.id, with_pricing=True)
assert fetched.pricing_settings is not None
assert fetched.pricing_settings.slug == "contract-core"
assert fetched.pricing_settings.default_currency == "USD"
listed = repo.list(with_pricing=True)
assert listed[0].pricing_settings is not None
repo.set_pricing_settings(project, None)
session.refresh(project)
assert project.pricing_settings is None
repo.set_pricing_settings(project, settings)
session.refresh(project)
assert project.pricing_settings is settings
export_results = repo.filtered_for_export(None, include_pricing=True)
assert export_results[0].pricing_settings is not None
def test_scenario_repository_filtered_for_export(session: Session) -> None:
repo = ScenarioRepository(session)
project_export = Project(
name="Export Project",
operation_type=MiningOperationType.PLACER,
)
project_other = Project(
name="Other Project",
operation_type=MiningOperationType.OTHER,
)
scenario_match = Scenario(
name="Case Alpha",
project=project_export,
status=ScenarioStatus.ACTIVE,
start_date=date(2025, 1, 5),
end_date=date(2025, 2, 1),
discount_rate=7.5,
currency="usd",
primary_resource=ResourceType.EXPLOSIVES,
)
scenario_match.created_at = datetime(2025, 1, 6, tzinfo=timezone.utc)
scenario_match.updated_at = datetime(2025, 1, 16, tzinfo=timezone.utc)
scenario_other = Scenario(
name="Case Beta",
project=project_other,
status=ScenarioStatus.DRAFT,
start_date=date(2024, 12, 20),
end_date=date(2025, 3, 1),
currency="cad",
primary_resource=ResourceType.WATER,
)
scenario_other.created_at = datetime(2024, 12, 25, tzinfo=timezone.utc)
scenario_other.updated_at = datetime(2025, 3, 5, tzinfo=timezone.utc)
session.add_all([project_export, project_other, scenario_match, scenario_other])
session.flush()
filters = ScenarioExportFilters(
ids=(scenario_match.id, scenario_match.id, -1),
project_ids=(project_export.id, 0),
project_names=(" Export Project ", "EXPORT PROJECT"),
name_contains="case",
statuses=(ScenarioStatus.ACTIVE,),
start_date_from=date(2025, 1, 1),
start_date_to=date(2025, 1, 31),
end_date_from=date(2025, 1, 31),
end_date_to=date(2025, 2, 28),
created_from=datetime(2025, 1, 1, tzinfo=timezone.utc),
created_to=datetime(2025, 1, 31, tzinfo=timezone.utc),
updated_from=datetime(2025, 1, 10, tzinfo=timezone.utc),
updated_to=datetime(2025, 1, 31, tzinfo=timezone.utc),
currencies=(" usd ", "USD"),
primary_resources=(ResourceType.EXPLOSIVES,),
)
results = repo.filtered_for_export(filters, include_project=True)
assert [scenario.name for scenario in results] == ["Case Alpha"]
assert results[0].project.name == "Export Project"

View File

@@ -0,0 +1,120 @@
from __future__ import annotations
import pytest
from models import MiningOperationType, Project, Scenario, ScenarioStatus
from services.pricing import PricingInput, PricingMetadata
from services.scenario_evaluation import ScenarioPricingConfig, ScenarioPricingEvaluator
def build_scenario() -> Scenario:
project = Project(name="Test Project",
operation_type=MiningOperationType.OPEN_PIT)
scenario = Scenario(
project=project,
project_id=1,
name="Scenario A",
status=ScenarioStatus.ACTIVE,
currency="USD",
)
scenario.id = 1 # simulate persisted entity
return scenario
def test_scenario_pricing_evaluator_uses_metadata_defaults() -> None:
scenario = build_scenario()
evaluator = ScenarioPricingEvaluator(
ScenarioPricingConfig(
metadata=PricingMetadata(
default_currency="USD", default_payable_pct=95)
)
)
inputs = [
PricingInput(
metal="copper",
ore_tonnage=50_000,
head_grade_pct=1.0,
recovery_pct=90,
payable_pct=None,
reference_price=9_000,
treatment_charge=50_000,
smelting_charge=10_000,
moisture_pct=9,
moisture_threshold_pct=None,
moisture_penalty_per_pct=None,
impurity_ppm={"As": 120},
premiums=10_000,
fx_rate=1.0,
currency_code=None,
)
]
snapshot = evaluator.evaluate(scenario, inputs=inputs)
assert snapshot.scenario_id == scenario.id
assert len(snapshot.results) == 1
result = snapshot.results[0]
assert result.currency == "USD"
assert result.net_revenue > 0
def test_scenario_pricing_evaluator_override_metadata() -> None:
scenario = build_scenario()
evaluator = ScenarioPricingEvaluator(ScenarioPricingConfig())
metadata_override = PricingMetadata(
default_currency="CAD",
default_payable_pct=90,
moisture_threshold_pct=5,
moisture_penalty_per_pct=500,
)
inputs = [
PricingInput(
metal="copper",
ore_tonnage=20_000,
head_grade_pct=1.2,
recovery_pct=88,
payable_pct=None,
reference_price=8_200,
treatment_charge=15_000,
smelting_charge=6_000,
moisture_pct=6,
moisture_threshold_pct=None,
moisture_penalty_per_pct=None,
premiums=5_000,
fx_rate=1.0,
currency_code="cad",
),
PricingInput(
metal="gold",
ore_tonnage=5_000,
head_grade_pct=2.0,
recovery_pct=90,
payable_pct=None,
reference_price=60_000,
treatment_charge=10_000,
smelting_charge=5_000,
moisture_pct=4,
moisture_threshold_pct=None,
moisture_penalty_per_pct=None,
premiums=15_000,
fx_rate=1.0,
currency_code="cad",
),
]
snapshot = evaluator.evaluate(
scenario,
inputs=inputs,
metadata_override=metadata_override,
)
assert len(snapshot.results) == 2
assert all(result.currency ==
scenario.currency for result in snapshot.results)
copper_result = snapshot.results[0]
expected_payable = 20_000 * 0.012 * 0.88 * 0.90
assert copper_result.payable_metal_tonnes == pytest.approx(
expected_payable)
assert sum(result.net_revenue for result in snapshot.results) > 0

View File

@@ -278,3 +278,70 @@ class TestScenarioComparisonEndpoint:
detail = response.json()["detail"]
assert detail["code"] == "SCENARIO_PROJECT_MISMATCH"
assert project_a_id != project_b_id
class TestScenarioApiCurrencyValidation:
def test_create_api_rejects_invalid_currency(
self,
api_client: TestClient,
session_factory: sessionmaker,
) -> None:
with UnitOfWork(session_factory=session_factory) as uow:
assert uow.projects is not None
assert uow.scenarios is not None
project = Project(
name="Currency Validation Project",
operation_type=MiningOperationType.OPEN_PIT,
)
uow.projects.create(project)
project_id = project.id
response = api_client.post(
f"/projects/{project_id}/scenarios",
json={
"name": "Invalid Currency Scenario",
"currency": "US",
},
)
assert response.status_code == 422
detail = response.json().get("detail", [])
assert any(
"Invalid currency code" in item.get("msg", "") for item in detail
), detail
with UnitOfWork(session_factory=session_factory) as uow:
assert uow.scenarios is not None
scenarios = uow.scenarios.list_for_project(project_id)
assert scenarios == []
def test_create_api_normalises_currency(
self,
api_client: TestClient,
session_factory: sessionmaker,
) -> None:
with UnitOfWork(session_factory=session_factory) as uow:
assert uow.projects is not None
assert uow.scenarios is not None
project = Project(
name="Currency Normalisation Project",
operation_type=MiningOperationType.OPEN_PIT,
)
uow.projects.create(project)
project_id = project.id
response = api_client.post(
f"/projects/{project_id}/scenarios",
json={
"name": "Normalised Currency Scenario",
"currency": "cad",
},
)
assert response.status_code == 201
with UnitOfWork(session_factory=session_factory) as uow:
assert uow.scenarios is not None
scenarios = uow.scenarios.list_for_project(project_id)
assert len(scenarios) == 1
assert scenarios[0].currency == "CAD"

158
tests/test_simulation.py Normal file
View File

@@ -0,0 +1,158 @@
from __future__ import annotations
import math
import numpy as np
import pytest
from services.financial import CashFlow, net_present_value
from services.simulation import (
CashFlowSpec,
DistributionConfigError,
DistributionSource,
DistributionSpec,
DistributionType,
SimulationConfig,
SimulationMetric,
run_monte_carlo,
)
def test_run_monte_carlo_deterministic_matches_financial_helpers() -> None:
base_flows = [
CashFlow(amount=-1000.0, period_index=0),
CashFlow(amount=600.0, period_index=1),
CashFlow(amount=600.0, period_index=2),
]
specs = [CashFlowSpec(cash_flow=flow) for flow in base_flows]
config = SimulationConfig(
iterations=10,
discount_rate=0.1,
percentiles=(50,),
seed=123,
)
result = run_monte_carlo(specs, config)
summary = result.summaries[SimulationMetric.NPV]
expected = net_present_value(0.1, base_flows)
assert summary.sample_size == config.iterations
assert summary.failed_runs == 0
assert summary.mean == pytest.approx(expected, rel=1e-6)
assert summary.std_dev == 0.0
assert summary.percentiles[50] == pytest.approx(expected, rel=1e-6)
def test_run_monte_carlo_normal_distribution_uses_seed_for_reproducibility() -> None:
base_flows = [
CashFlow(amount=-100.0, period_index=0),
CashFlow(amount=0.0, period_index=1),
CashFlow(amount=0.0, period_index=2),
]
revenue_flow = CashFlowSpec(
cash_flow=CashFlow(amount=120.0, period_index=1),
distribution=DistributionSpec(
type=DistributionType.NORMAL,
parameters={"mean": 120.0, "std_dev": 10.0},
),
)
specs = [CashFlowSpec(cash_flow=base_flows[0]), revenue_flow]
config = SimulationConfig(
iterations=1000,
discount_rate=0.0,
percentiles=(5.0, 50.0, 95.0),
seed=42,
)
result = run_monte_carlo(specs, config)
summary = result.summaries[SimulationMetric.NPV]
assert summary.sample_size == config.iterations
assert summary.failed_runs == 0
# With zero discount rate the expected mean NPV equals mean sampled value minus investment.
assert summary.mean == pytest.approx(20.0, abs=1.0)
assert summary.std_dev == pytest.approx(10.0, abs=1.0)
assert summary.percentiles[50.0] == pytest.approx(summary.mean, abs=1.0)
def test_run_monte_carlo_supports_scenario_field_source() -> None:
base_flow = CashFlow(amount=0.0, period_index=1)
spec = CashFlowSpec(
cash_flow=base_flow,
distribution=DistributionSpec(
type=DistributionType.NORMAL,
parameters={"std_dev": 0.0},
source=DistributionSource.SCENARIO_FIELD,
source_key="salvage_mean",
),
)
config = SimulationConfig(iterations=1, discount_rate=0.0, seed=7)
result = run_monte_carlo(
[CashFlowSpec(cash_flow=CashFlow(
amount=-100.0, period_index=0)), spec],
config,
scenario_context={"salvage_mean": 150.0},
)
summary = result.summaries[SimulationMetric.NPV]
assert summary.sample_size == 1
assert summary.mean == pytest.approx(50.0)
def test_run_monte_carlo_records_failed_metrics_when_not_defined() -> None:
base_flows = [CashFlow(amount=100.0, period_index=0)]
specs = [CashFlowSpec(cash_flow=flow) for flow in base_flows]
config = SimulationConfig(
iterations=5,
discount_rate=0.1,
metrics=(SimulationMetric.IRR,),
seed=5,
)
result = run_monte_carlo(specs, config)
summary = result.summaries[SimulationMetric.IRR]
assert summary.sample_size == 0
assert summary.failed_runs == config.iterations
assert math.isnan(summary.mean)
def test_run_monte_carlo_distribution_missing_context_raises() -> None:
spec = DistributionSpec(
type=DistributionType.NORMAL,
parameters={"std_dev": 1.0},
source=DistributionSource.SCENARIO_FIELD,
source_key="unknown",
)
cash_flow_spec = CashFlowSpec(
cash_flow=CashFlow(amount=0.0, period_index=0),
distribution=spec,
)
config = SimulationConfig(iterations=1, discount_rate=0.0)
with pytest.raises(DistributionConfigError):
run_monte_carlo([cash_flow_spec], config, scenario_context={})
def test_run_monte_carlo_can_return_samples() -> None:
base_flow = CashFlow(amount=50.0, period_index=1)
specs = [
CashFlowSpec(cash_flow=CashFlow(amount=-40.0, period_index=0)),
CashFlowSpec(cash_flow=base_flow),
]
config = SimulationConfig(
iterations=3,
discount_rate=0.0,
metrics=(SimulationMetric.NPV,),
return_samples=True,
seed=11,
)
result = run_monte_carlo(specs, config)
assert result.samples is not None
assert SimulationMetric.NPV in result.samples
samples = result.samples[SimulationMetric.NPV]
assert isinstance(samples, np.ndarray)
assert samples.shape == (config.iterations,)