feat: Enhance currency handling and validation across scenarios
- Updated form template to prefill currency input with default value and added help text for clarity. - Modified integration tests to assert more descriptive error messages for invalid currency codes. - Introduced new tests for currency normalization and validation in various scenarios, including imports and exports. - Added comprehensive tests for pricing calculations, ensuring defaults are respected and overrides function correctly. - Implemented unit tests for pricing settings repository, ensuring CRUD operations and default settings are handled properly. - Enhanced scenario pricing evaluation tests to validate currency handling and metadata defaults. - Added simulation tests to ensure Monte Carlo runs are accurate and handle various distribution scenarios.
This commit is contained in:
@@ -1 +1,10 @@
|
||||
"""Service layer utilities."""
|
||||
|
||||
from .pricing import calculate_pricing, PricingInput, PricingMetadata, PricingResult
|
||||
|
||||
__all__ = [
|
||||
"calculate_pricing",
|
||||
"PricingInput",
|
||||
"PricingMetadata",
|
||||
"PricingResult",
|
||||
]
|
||||
|
||||
@@ -6,7 +6,11 @@ from typing import Callable
|
||||
|
||||
from config.settings import AdminBootstrapSettings
|
||||
from models import User
|
||||
from services.repositories import ensure_default_roles
|
||||
from services.pricing import PricingMetadata
|
||||
from services.repositories import (
|
||||
PricingSettingsSeedResult,
|
||||
ensure_default_roles,
|
||||
)
|
||||
from services.unit_of_work import UnitOfWork
|
||||
|
||||
|
||||
@@ -27,6 +31,12 @@ class AdminBootstrapResult:
|
||||
roles_granted: int
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class PricingBootstrapResult:
|
||||
seed: PricingSettingsSeedResult
|
||||
projects_assigned: int
|
||||
|
||||
|
||||
def bootstrap_admin(
|
||||
*,
|
||||
settings: AdminBootstrapSettings,
|
||||
@@ -127,3 +137,37 @@ def _bootstrap_admin_user(
|
||||
password_rotated=password_rotated,
|
||||
roles_granted=roles_granted,
|
||||
)
|
||||
|
||||
|
||||
def bootstrap_pricing_settings(
|
||||
*,
|
||||
metadata: PricingMetadata,
|
||||
unit_of_work_factory: Callable[[], UnitOfWork] = UnitOfWork,
|
||||
default_slug: str = "default",
|
||||
) -> PricingBootstrapResult:
|
||||
"""Ensure baseline pricing settings exist and projects reference them."""
|
||||
|
||||
with unit_of_work_factory() as uow:
|
||||
seed_result = uow.ensure_default_pricing_settings(
|
||||
metadata=metadata,
|
||||
slug=default_slug,
|
||||
)
|
||||
|
||||
assigned = 0
|
||||
if uow.projects:
|
||||
default_settings = seed_result.settings
|
||||
projects = uow.projects.list(with_pricing=True)
|
||||
for project in projects:
|
||||
if project.pricing_settings is None:
|
||||
uow.set_project_pricing_settings(project, default_settings)
|
||||
assigned += 1
|
||||
|
||||
logger.info(
|
||||
"Pricing bootstrap result: slug=%s created=%s updated_fields=%s impurity_upserts=%s projects_assigned=%s",
|
||||
seed_result.settings.slug,
|
||||
seed_result.created,
|
||||
seed_result.updated_fields,
|
||||
seed_result.impurity_upserts,
|
||||
assigned,
|
||||
)
|
||||
return PricingBootstrapResult(seed=seed_result, projects_assigned=assigned)
|
||||
|
||||
43
services/currency.py
Normal file
43
services/currency.py
Normal file
@@ -0,0 +1,43 @@
|
||||
from __future__ import annotations
|
||||
|
||||
"""Utilities for currency normalization within pricing and financial workflows."""
|
||||
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
|
||||
VALID_CURRENCY_PATTERN = re.compile(r"^[A-Z]{3}$")
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class CurrencyValidationError(ValueError):
|
||||
"""Raised when a currency code fails validation."""
|
||||
|
||||
code: str
|
||||
|
||||
def __str__(self) -> str: # pragma: no cover - dataclass repr not required in tests
|
||||
return f"Invalid currency code: {self.code!r}"
|
||||
|
||||
|
||||
def normalise_currency(code: str | None) -> str | None:
|
||||
"""Normalise currency codes to uppercase ISO-4217 values."""
|
||||
|
||||
if code is None:
|
||||
return None
|
||||
candidate = code.strip().upper()
|
||||
if not VALID_CURRENCY_PATTERN.match(candidate):
|
||||
raise CurrencyValidationError(candidate)
|
||||
return candidate
|
||||
|
||||
|
||||
def require_currency(code: str | None, default: str | None = None) -> str:
|
||||
"""Return normalised currency code, falling back to default when missing."""
|
||||
|
||||
normalised = normalise_currency(code)
|
||||
if normalised is not None:
|
||||
return normalised
|
||||
if default is None:
|
||||
raise CurrencyValidationError("<missing currency>")
|
||||
fallback = normalise_currency(default)
|
||||
if fallback is None:
|
||||
raise CurrencyValidationError("<invalid default currency>")
|
||||
return fallback
|
||||
@@ -5,6 +5,7 @@ from datetime import date, datetime
|
||||
from typing import Iterable
|
||||
|
||||
from models import MiningOperationType, ResourceType, ScenarioStatus
|
||||
from services.currency import CurrencyValidationError, normalise_currency
|
||||
|
||||
|
||||
def _normalise_lower_strings(values: Iterable[str]) -> tuple[str, ...]:
|
||||
@@ -19,15 +20,22 @@ def _normalise_lower_strings(values: Iterable[str]) -> tuple[str, ...]:
|
||||
return tuple(sorted(unique))
|
||||
|
||||
|
||||
def _normalise_upper_strings(values: Iterable[str]) -> tuple[str, ...]:
|
||||
def _normalise_upper_strings(values: Iterable[str | None]) -> tuple[str, ...]:
|
||||
unique: set[str] = set()
|
||||
for value in values:
|
||||
if not value:
|
||||
if value is None:
|
||||
continue
|
||||
trimmed = value.strip().upper()
|
||||
if not trimmed:
|
||||
candidate = value if isinstance(value, str) else str(value)
|
||||
candidate = candidate.strip()
|
||||
if not candidate:
|
||||
continue
|
||||
unique.add(trimmed)
|
||||
try:
|
||||
normalised = normalise_currency(candidate)
|
||||
except CurrencyValidationError as exc:
|
||||
raise ValueError(str(exc)) from exc
|
||||
if normalised is None:
|
||||
continue
|
||||
unique.add(normalised)
|
||||
return tuple(sorted(unique))
|
||||
|
||||
|
||||
|
||||
248
services/financial.py
Normal file
248
services/financial.py
Normal file
@@ -0,0 +1,248 @@
|
||||
from __future__ import annotations
|
||||
|
||||
"""Financial calculation helpers for project evaluation metrics."""
|
||||
|
||||
from dataclasses import dataclass
|
||||
from datetime import date, datetime
|
||||
from math import isclose, isfinite
|
||||
from typing import Iterable, List, Sequence, Tuple
|
||||
|
||||
Number = float
|
||||
|
||||
|
||||
@dataclass(frozen=True, slots=True)
|
||||
class CashFlow:
|
||||
"""Represents a dated cash flow in scenario currency."""
|
||||
|
||||
amount: Number
|
||||
period_index: int | None = None
|
||||
date: date | datetime | None = None
|
||||
|
||||
|
||||
class ConvergenceError(RuntimeError):
|
||||
"""Raised when an iterative solver fails to converge."""
|
||||
|
||||
|
||||
class PaybackNotReachedError(RuntimeError):
|
||||
"""Raised when cumulative cash flows never reach a non-negative total."""
|
||||
|
||||
|
||||
def _coerce_date(value: date | datetime) -> date:
|
||||
if isinstance(value, datetime):
|
||||
return value.date()
|
||||
return value
|
||||
|
||||
|
||||
def normalize_cash_flows(
|
||||
cash_flows: Iterable[CashFlow],
|
||||
*,
|
||||
compounds_per_year: int = 1,
|
||||
) -> List[Tuple[Number, float]]:
|
||||
"""Normalise cash flows to ``(amount, periods)`` tuples.
|
||||
|
||||
When explicit ``period_index`` values are provided they take precedence. If
|
||||
only dates are supplied, the first dated cash flow anchors the timeline and
|
||||
subsequent cash flows convert their day offsets into fractional periods
|
||||
based on ``compounds_per_year``. When neither a period index nor a date is
|
||||
present, cash flows are treated as sequential periods in input order.
|
||||
"""
|
||||
|
||||
flows: Sequence[CashFlow] = list(cash_flows)
|
||||
if not flows:
|
||||
return []
|
||||
|
||||
if compounds_per_year <= 0:
|
||||
raise ValueError("compounds_per_year must be a positive integer")
|
||||
|
||||
base_date: date | None = None
|
||||
for flow in flows:
|
||||
if flow.date is not None:
|
||||
base_date = _coerce_date(flow.date)
|
||||
break
|
||||
|
||||
normalised: List[Tuple[Number, float]] = []
|
||||
for idx, flow in enumerate(flows):
|
||||
amount = float(flow.amount)
|
||||
if flow.period_index is not None:
|
||||
periods = float(flow.period_index)
|
||||
elif flow.date is not None and base_date is not None:
|
||||
current_date = _coerce_date(flow.date)
|
||||
delta_days = (current_date - base_date).days
|
||||
period_length_days = 365.0 / float(compounds_per_year)
|
||||
periods = delta_days / period_length_days
|
||||
else:
|
||||
periods = float(idx)
|
||||
normalised.append((amount, periods))
|
||||
|
||||
return normalised
|
||||
|
||||
|
||||
def discount_factor(rate: Number, periods: float, *, compounds_per_year: int = 1) -> float:
|
||||
"""Return the factor used to discount a value ``periods`` steps in the future."""
|
||||
|
||||
if compounds_per_year <= 0:
|
||||
raise ValueError("compounds_per_year must be a positive integer")
|
||||
|
||||
periodic_rate = rate / float(compounds_per_year)
|
||||
return (1.0 + periodic_rate) ** (-periods)
|
||||
|
||||
|
||||
def net_present_value(
|
||||
rate: Number,
|
||||
cash_flows: Iterable[CashFlow],
|
||||
*,
|
||||
residual_value: Number | None = None,
|
||||
residual_periods: float | None = None,
|
||||
compounds_per_year: int = 1,
|
||||
) -> float:
|
||||
"""Calculate Net Present Value for ``cash_flows``.
|
||||
|
||||
``rate`` is a decimal (``0.1`` for 10%). Cash flows are discounted using the
|
||||
given compounding frequency. When ``residual_value`` is provided it is
|
||||
discounted at ``residual_periods`` periods; by default the value occurs one
|
||||
period after the final cash flow.
|
||||
"""
|
||||
|
||||
normalised = normalize_cash_flows(
|
||||
cash_flows,
|
||||
compounds_per_year=compounds_per_year,
|
||||
)
|
||||
|
||||
if not normalised and residual_value is None:
|
||||
return 0.0
|
||||
|
||||
total = 0.0
|
||||
for amount, periods in normalised:
|
||||
factor = discount_factor(
|
||||
rate, periods, compounds_per_year=compounds_per_year)
|
||||
total += amount * factor
|
||||
|
||||
if residual_value is not None:
|
||||
if residual_periods is None:
|
||||
last_period = normalised[-1][1] if normalised else 0.0
|
||||
residual_periods = last_period + 1.0
|
||||
factor = discount_factor(
|
||||
rate, residual_periods, compounds_per_year=compounds_per_year)
|
||||
total += float(residual_value) * factor
|
||||
|
||||
return total
|
||||
|
||||
|
||||
def internal_rate_of_return(
|
||||
cash_flows: Iterable[CashFlow],
|
||||
*,
|
||||
guess: Number = 0.1,
|
||||
max_iterations: int = 100,
|
||||
tolerance: float = 1e-6,
|
||||
compounds_per_year: int = 1,
|
||||
) -> float:
|
||||
"""Return the internal rate of return for ``cash_flows``.
|
||||
|
||||
Uses Newton-Raphson iteration with a bracketed fallback when the derivative
|
||||
becomes unstable. Raises :class:`ConvergenceError` if no root is found.
|
||||
"""
|
||||
|
||||
flows = normalize_cash_flows(
|
||||
cash_flows,
|
||||
compounds_per_year=compounds_per_year,
|
||||
)
|
||||
if not flows:
|
||||
raise ValueError("cash_flows must contain at least one item")
|
||||
|
||||
amounts = [amount for amount, _ in flows]
|
||||
if not any(amount < 0 for amount in amounts) or not any(amount > 0 for amount in amounts):
|
||||
raise ValueError("cash_flows must include both negative and positive values")
|
||||
|
||||
def _npv_with_flows(rate: float) -> float:
|
||||
periodic_rate = rate / float(compounds_per_year)
|
||||
if periodic_rate <= -1.0:
|
||||
return float("inf")
|
||||
total = 0.0
|
||||
for amount, periods in flows:
|
||||
factor = (1.0 + periodic_rate) ** (-periods)
|
||||
total += amount * factor
|
||||
return total
|
||||
|
||||
def _derivative(rate: float) -> float:
|
||||
periodic_rate = rate / float(compounds_per_year)
|
||||
if periodic_rate <= -1.0:
|
||||
return float("inf")
|
||||
derivative = 0.0
|
||||
for amount, periods in flows:
|
||||
factor = (1.0 + periodic_rate) ** (-periods - 1.0)
|
||||
derivative += -amount * periods * factor / float(compounds_per_year)
|
||||
return derivative
|
||||
|
||||
rate = float(guess)
|
||||
for _ in range(max_iterations):
|
||||
value = _npv_with_flows(rate)
|
||||
if isclose(value, 0.0, abs_tol=tolerance):
|
||||
return rate
|
||||
derivative = _derivative(rate)
|
||||
if derivative == 0.0 or not isfinite(derivative):
|
||||
break
|
||||
next_rate = rate - value / derivative
|
||||
if abs(next_rate - rate) < tolerance:
|
||||
return next_rate
|
||||
rate = next_rate
|
||||
|
||||
# Fallback to bracketed bisection between sensible bounds.
|
||||
lower_bound = -0.99 * float(compounds_per_year)
|
||||
upper_bound = 10.0
|
||||
lower_value = _npv_with_flows(lower_bound)
|
||||
upper_value = _npv_with_flows(upper_bound)
|
||||
|
||||
attempts = 0
|
||||
while lower_value * upper_value > 0 and attempts < 12:
|
||||
upper_bound *= 2.0
|
||||
upper_value = _npv_with_flows(upper_bound)
|
||||
attempts += 1
|
||||
|
||||
if lower_value * upper_value > 0:
|
||||
raise ConvergenceError("IRR could not be bracketed within default bounds")
|
||||
|
||||
for _ in range(max_iterations * 2):
|
||||
midpoint = (lower_bound + upper_bound) / 2.0
|
||||
mid_value = _npv_with_flows(midpoint)
|
||||
if isclose(mid_value, 0.0, abs_tol=tolerance):
|
||||
return midpoint
|
||||
if lower_value * mid_value < 0:
|
||||
upper_bound = midpoint
|
||||
upper_value = mid_value
|
||||
else:
|
||||
lower_bound = midpoint
|
||||
lower_value = mid_value
|
||||
raise ConvergenceError("IRR solver failed to converge")
|
||||
|
||||
|
||||
def payback_period(
|
||||
cash_flows: Iterable[CashFlow],
|
||||
*,
|
||||
allow_fractional: bool = True,
|
||||
compounds_per_year: int = 1,
|
||||
) -> float:
|
||||
"""Return the period index where cumulative cash flow becomes non-negative."""
|
||||
|
||||
flows = normalize_cash_flows(
|
||||
cash_flows,
|
||||
compounds_per_year=compounds_per_year,
|
||||
)
|
||||
if not flows:
|
||||
raise ValueError("cash_flows must contain at least one item")
|
||||
|
||||
flows = sorted(flows, key=lambda item: item[1])
|
||||
cumulative = 0.0
|
||||
previous_period = flows[0][1]
|
||||
|
||||
for index, (amount, periods) in enumerate(flows):
|
||||
next_cumulative = cumulative + amount
|
||||
if next_cumulative >= 0.0:
|
||||
if not allow_fractional or isclose(amount, 0.0):
|
||||
return periods
|
||||
prev_period = previous_period if index > 0 else periods
|
||||
fraction = -cumulative / amount
|
||||
return prev_period + fraction * (periods - prev_period)
|
||||
cumulative = next_cumulative
|
||||
previous_period = periods
|
||||
|
||||
raise PaybackNotReachedError("Cumulative cash flow never becomes non-negative")
|
||||
176
services/pricing.py
Normal file
176
services/pricing.py
Normal file
@@ -0,0 +1,176 @@
|
||||
from __future__ import annotations
|
||||
|
||||
"""Pricing service implementing commodity revenue calculations.
|
||||
|
||||
This module exposes data models and helpers for computing product pricing
|
||||
according to the formulas outlined in
|
||||
``calminer-docs/specifications/price_calculation.md``. It focuses on the core
|
||||
calculation steps (payable metal, penalties, net revenue) and is intended to be
|
||||
composed within broader scenario evaluation workflows.
|
||||
"""
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Mapping
|
||||
|
||||
from pydantic import BaseModel, Field, PositiveFloat, field_validator
|
||||
from services.currency import require_currency
|
||||
|
||||
|
||||
class PricingInput(BaseModel):
|
||||
"""Normalized inputs for pricing calculations."""
|
||||
|
||||
metal: str = Field(..., min_length=1)
|
||||
ore_tonnage: PositiveFloat = Field(
|
||||
..., description="Total ore mass processed (metric tonnes)")
|
||||
head_grade_pct: PositiveFloat = Field(..., gt=0,
|
||||
le=100, description="Head grade as percent")
|
||||
recovery_pct: PositiveFloat = Field(..., gt=0,
|
||||
le=100, description="Recovery rate percent")
|
||||
payable_pct: float | None = Field(
|
||||
None, gt=0, le=100, description="Contractual payable percentage")
|
||||
reference_price: PositiveFloat = Field(
|
||||
..., description="Reference price in base currency per unit")
|
||||
treatment_charge: float = Field(0, ge=0)
|
||||
smelting_charge: float = Field(0, ge=0)
|
||||
moisture_pct: float = Field(0, ge=0, le=100)
|
||||
moisture_threshold_pct: float | None = Field(None, ge=0, le=100)
|
||||
moisture_penalty_per_pct: float | None = Field(None)
|
||||
impurity_ppm: Mapping[str, float] = Field(default_factory=dict)
|
||||
impurity_thresholds: Mapping[str, float] = Field(default_factory=dict)
|
||||
impurity_penalty_per_ppm: Mapping[str, float] = Field(default_factory=dict)
|
||||
premiums: float = Field(0)
|
||||
fx_rate: PositiveFloat = Field(
|
||||
1, description="Multiplier to convert to scenario currency")
|
||||
currency_code: str | None = Field(
|
||||
None, description="Optional explicit currency override")
|
||||
|
||||
@field_validator("impurity_ppm", mode="before")
|
||||
@classmethod
|
||||
def _validate_impurity_mapping(cls, value):
|
||||
if isinstance(value, Mapping):
|
||||
return {k: float(v) for k, v in value.items()}
|
||||
return value
|
||||
|
||||
|
||||
class PricingResult(BaseModel):
|
||||
"""Structured output summarising pricing computation results."""
|
||||
|
||||
metal: str
|
||||
ore_tonnage: float
|
||||
head_grade_pct: float
|
||||
recovery_pct: float
|
||||
payable_metal_tonnes: float
|
||||
reference_price: float
|
||||
gross_revenue: float
|
||||
moisture_penalty: float
|
||||
impurity_penalty: float
|
||||
treatment_smelt_charges: float
|
||||
premiums: float
|
||||
net_revenue: float
|
||||
currency: str | None
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class PricingMetadata:
|
||||
"""Metadata defaults applied when explicit inputs are omitted."""
|
||||
|
||||
default_payable_pct: float = 100.0
|
||||
default_currency: str | None = "USD"
|
||||
moisture_threshold_pct: float = 8.0
|
||||
moisture_penalty_per_pct: float = 0.0
|
||||
impurity_thresholds: Mapping[str, float] = field(default_factory=dict)
|
||||
impurity_penalty_per_ppm: Mapping[str, float] = field(default_factory=dict)
|
||||
|
||||
|
||||
def calculate_pricing(
|
||||
pricing_input: PricingInput,
|
||||
*,
|
||||
metadata: PricingMetadata | None = None,
|
||||
currency: str | None = None,
|
||||
) -> PricingResult:
|
||||
"""Calculate pricing metrics for the provided commodity input.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
pricing_input:
|
||||
Normalised input data including ore tonnage, grades, charges, and
|
||||
optional penalties.
|
||||
metadata:
|
||||
Optional default metadata applied when specific values are omitted from
|
||||
``pricing_input``.
|
||||
currency:
|
||||
Optional override for the output currency label. Falls back to
|
||||
``metadata.default_currency`` when not provided.
|
||||
"""
|
||||
|
||||
applied_metadata = metadata or PricingMetadata()
|
||||
|
||||
payable_pct = (
|
||||
pricing_input.payable_pct
|
||||
if pricing_input.payable_pct is not None
|
||||
else applied_metadata.default_payable_pct
|
||||
)
|
||||
moisture_threshold = (
|
||||
pricing_input.moisture_threshold_pct
|
||||
if pricing_input.moisture_threshold_pct is not None
|
||||
else applied_metadata.moisture_threshold_pct
|
||||
)
|
||||
moisture_penalty_factor = (
|
||||
pricing_input.moisture_penalty_per_pct
|
||||
if pricing_input.moisture_penalty_per_pct is not None
|
||||
else applied_metadata.moisture_penalty_per_pct
|
||||
)
|
||||
|
||||
impurity_thresholds = {
|
||||
**applied_metadata.impurity_thresholds,
|
||||
**pricing_input.impurity_thresholds,
|
||||
}
|
||||
impurity_penalty_factors = {
|
||||
**applied_metadata.impurity_penalty_per_ppm,
|
||||
**pricing_input.impurity_penalty_per_ppm,
|
||||
}
|
||||
|
||||
q_metal = pricing_input.ore_tonnage * (pricing_input.head_grade_pct / 100.0) * (
|
||||
pricing_input.recovery_pct / 100.0
|
||||
)
|
||||
payable_metal = q_metal * (payable_pct / 100.0)
|
||||
|
||||
gross_revenue_ref = payable_metal * pricing_input.reference_price
|
||||
charges = pricing_input.treatment_charge + pricing_input.smelting_charge
|
||||
|
||||
moisture_excess = max(0.0, pricing_input.moisture_pct - moisture_threshold)
|
||||
moisture_penalty = moisture_excess * moisture_penalty_factor
|
||||
|
||||
impurity_penalty_total = 0.0
|
||||
for impurity, value in pricing_input.impurity_ppm.items():
|
||||
threshold = impurity_thresholds.get(impurity, 0.0)
|
||||
penalty_factor = impurity_penalty_factors.get(impurity, 0.0)
|
||||
impurity_penalty_total += max(0.0, value - threshold) * penalty_factor
|
||||
|
||||
net_revenue_ref = (
|
||||
gross_revenue_ref - charges - moisture_penalty - impurity_penalty_total
|
||||
)
|
||||
net_revenue_ref += pricing_input.premiums
|
||||
|
||||
net_revenue = net_revenue_ref * pricing_input.fx_rate
|
||||
|
||||
currency_code = require_currency(
|
||||
currency or pricing_input.currency_code,
|
||||
default=applied_metadata.default_currency,
|
||||
)
|
||||
|
||||
return PricingResult(
|
||||
metal=pricing_input.metal,
|
||||
ore_tonnage=pricing_input.ore_tonnage,
|
||||
head_grade_pct=pricing_input.head_grade_pct,
|
||||
recovery_pct=pricing_input.recovery_pct,
|
||||
payable_metal_tonnes=payable_metal,
|
||||
reference_price=pricing_input.reference_price,
|
||||
gross_revenue=gross_revenue_ref,
|
||||
moisture_penalty=moisture_penalty,
|
||||
impurity_penalty=impurity_penalty_total,
|
||||
treatment_smelt_charges=charges,
|
||||
premiums=pricing_input.premiums,
|
||||
net_revenue=net_revenue,
|
||||
currency=currency_code,
|
||||
)
|
||||
676
services/reporting.py
Normal file
676
services/reporting.py
Normal file
@@ -0,0 +1,676 @@
|
||||
from __future__ import annotations
|
||||
|
||||
"""Reporting service layer aggregating deterministic and simulation metrics."""
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import date
|
||||
import math
|
||||
from typing import Iterable, Mapping, Sequence
|
||||
|
||||
from models import FinancialCategory, Project, Scenario
|
||||
from services.financial import (
|
||||
CashFlow,
|
||||
ConvergenceError,
|
||||
PaybackNotReachedError,
|
||||
internal_rate_of_return,
|
||||
net_present_value,
|
||||
payback_period,
|
||||
)
|
||||
from services.simulation import (
|
||||
CashFlowSpec,
|
||||
SimulationConfig,
|
||||
SimulationMetric,
|
||||
SimulationResult,
|
||||
run_monte_carlo,
|
||||
)
|
||||
from services.unit_of_work import UnitOfWork
|
||||
|
||||
DEFAULT_DISCOUNT_RATE = 0.1
|
||||
DEFAULT_ITERATIONS = 500
|
||||
DEFAULT_PERCENTILES: tuple[float, float, float] = (5.0, 50.0, 95.0)
|
||||
|
||||
_COST_CATEGORY_SIGNS: Mapping[FinancialCategory, float] = {
|
||||
FinancialCategory.REVENUE: 1.0,
|
||||
FinancialCategory.CAPITAL_EXPENDITURE: -1.0,
|
||||
FinancialCategory.OPERATING_EXPENDITURE: -1.0,
|
||||
FinancialCategory.CONTINGENCY: -1.0,
|
||||
FinancialCategory.OTHER: -1.0,
|
||||
}
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class IncludeOptions:
|
||||
"""Flags controlling optional sections in report payloads."""
|
||||
|
||||
distribution: bool = False
|
||||
samples: bool = False
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class ReportFilters:
|
||||
"""Filter parameters applied when selecting scenarios for a report."""
|
||||
|
||||
scenario_ids: set[int] | None = None
|
||||
start_date: date | None = None
|
||||
end_date: date | None = None
|
||||
|
||||
def matches(self, scenario: Scenario) -> bool:
|
||||
if self.scenario_ids is not None and scenario.id not in self.scenario_ids:
|
||||
return False
|
||||
if self.start_date and scenario.start_date and scenario.start_date < self.start_date:
|
||||
return False
|
||||
if self.end_date and scenario.end_date and scenario.end_date > self.end_date:
|
||||
return False
|
||||
return True
|
||||
|
||||
def to_dict(self) -> dict[str, object]:
|
||||
payload: dict[str, object] = {}
|
||||
if self.scenario_ids is not None:
|
||||
payload["scenario_ids"] = sorted(self.scenario_ids)
|
||||
if self.start_date is not None:
|
||||
payload["start_date"] = self.start_date
|
||||
if self.end_date is not None:
|
||||
payload["end_date"] = self.end_date
|
||||
return payload
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class ScenarioFinancialTotals:
|
||||
currency: str | None
|
||||
inflows: float
|
||||
outflows: float
|
||||
net: float
|
||||
by_category: dict[str, float]
|
||||
|
||||
def to_dict(self) -> dict[str, object]:
|
||||
return {
|
||||
"currency": self.currency,
|
||||
"inflows": _round_optional(self.inflows),
|
||||
"outflows": _round_optional(self.outflows),
|
||||
"net": _round_optional(self.net),
|
||||
"by_category": {
|
||||
key: _round_optional(value) for key, value in sorted(self.by_category.items())
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class ScenarioDeterministicMetrics:
|
||||
currency: str | None
|
||||
discount_rate: float
|
||||
compounds_per_year: int
|
||||
npv: float | None
|
||||
irr: float | None
|
||||
payback_period: float | None
|
||||
notes: list[str] = field(default_factory=list)
|
||||
|
||||
def to_dict(self) -> dict[str, object]:
|
||||
return {
|
||||
"currency": self.currency,
|
||||
"discount_rate": _round_optional(self.discount_rate, digits=4),
|
||||
"compounds_per_year": self.compounds_per_year,
|
||||
"npv": _round_optional(self.npv),
|
||||
"irr": _round_optional(self.irr, digits=6),
|
||||
"payback_period": _round_optional(self.payback_period, digits=4),
|
||||
"notes": self.notes,
|
||||
}
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class ScenarioMonteCarloResult:
|
||||
available: bool
|
||||
notes: list[str] = field(default_factory=list)
|
||||
result: SimulationResult | None = None
|
||||
include_samples: bool = False
|
||||
|
||||
def to_dict(self) -> dict[str, object]:
|
||||
if not self.available or self.result is None:
|
||||
return {
|
||||
"available": False,
|
||||
"notes": self.notes,
|
||||
}
|
||||
|
||||
metrics: dict[str, dict[str, object]] = {}
|
||||
for metric, summary in self.result.summaries.items():
|
||||
metrics[metric.value] = {
|
||||
"mean": _round_optional(summary.mean),
|
||||
"std_dev": _round_optional(summary.std_dev),
|
||||
"minimum": _round_optional(summary.minimum),
|
||||
"maximum": _round_optional(summary.maximum),
|
||||
"percentiles": {
|
||||
f"{percentile:g}": _round_optional(value)
|
||||
for percentile, value in sorted(summary.percentiles.items())
|
||||
},
|
||||
"sample_size": summary.sample_size,
|
||||
"failed_runs": summary.failed_runs,
|
||||
}
|
||||
|
||||
samples_payload: dict[str, list[float | None]] | None = None
|
||||
if self.include_samples and self.result.samples:
|
||||
samples_payload = {}
|
||||
for metric, samples in self.result.samples.items():
|
||||
samples_payload[metric.value] = [
|
||||
_sanitize_float(sample) for sample in samples.tolist()
|
||||
]
|
||||
|
||||
payload: dict[str, object] = {
|
||||
"available": True,
|
||||
"iterations": self.result.iterations,
|
||||
"metrics": metrics,
|
||||
"notes": self.notes,
|
||||
}
|
||||
if samples_payload:
|
||||
payload["samples"] = samples_payload
|
||||
return payload
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class ScenarioReport:
|
||||
scenario: Scenario
|
||||
totals: ScenarioFinancialTotals
|
||||
deterministic: ScenarioDeterministicMetrics
|
||||
monte_carlo: ScenarioMonteCarloResult | None
|
||||
|
||||
def to_dict(self) -> dict[str, object]:
|
||||
scenario_info = {
|
||||
"id": self.scenario.id,
|
||||
"project_id": self.scenario.project_id,
|
||||
"name": self.scenario.name,
|
||||
"description": self.scenario.description,
|
||||
"status": self.scenario.status.value,
|
||||
"start_date": self.scenario.start_date,
|
||||
"end_date": self.scenario.end_date,
|
||||
"currency": self.scenario.currency,
|
||||
"primary_resource": self.scenario.primary_resource.value
|
||||
if self.scenario.primary_resource
|
||||
else None,
|
||||
"discount_rate": _round_optional(self.deterministic.discount_rate, digits=4),
|
||||
"created_at": self.scenario.created_at,
|
||||
"updated_at": self.scenario.updated_at,
|
||||
"simulation_parameter_count": len(self.scenario.simulation_parameters or []),
|
||||
}
|
||||
payload: dict[str, object] = {
|
||||
"scenario": scenario_info,
|
||||
"financials": self.totals.to_dict(),
|
||||
"metrics": self.deterministic.to_dict(),
|
||||
}
|
||||
if self.monte_carlo is not None:
|
||||
payload["monte_carlo"] = self.monte_carlo.to_dict()
|
||||
return payload
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class AggregatedMetric:
|
||||
average: float | None
|
||||
minimum: float | None
|
||||
maximum: float | None
|
||||
|
||||
def to_dict(self) -> dict[str, object]:
|
||||
return {
|
||||
"average": _round_optional(self.average),
|
||||
"minimum": _round_optional(self.minimum),
|
||||
"maximum": _round_optional(self.maximum),
|
||||
}
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class ProjectAggregates:
|
||||
total_inflows: float
|
||||
total_outflows: float
|
||||
total_net: float
|
||||
deterministic_metrics: dict[str, AggregatedMetric]
|
||||
|
||||
def to_dict(self) -> dict[str, object]:
|
||||
return {
|
||||
"financials": {
|
||||
"total_inflows": _round_optional(self.total_inflows),
|
||||
"total_outflows": _round_optional(self.total_outflows),
|
||||
"total_net": _round_optional(self.total_net),
|
||||
},
|
||||
"deterministic_metrics": {
|
||||
metric: data.to_dict()
|
||||
for metric, data in sorted(self.deterministic_metrics.items())
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class MetricComparison:
|
||||
metric: str
|
||||
direction: str
|
||||
best: tuple[int, str, float] | None
|
||||
worst: tuple[int, str, float] | None
|
||||
average: float | None
|
||||
|
||||
def to_dict(self) -> dict[str, object]:
|
||||
return {
|
||||
"metric": self.metric,
|
||||
"direction": self.direction,
|
||||
"best": _comparison_entry(self.best),
|
||||
"worst": _comparison_entry(self.worst),
|
||||
"average": _round_optional(self.average),
|
||||
}
|
||||
|
||||
|
||||
def parse_include_tokens(raw: str | None) -> IncludeOptions:
|
||||
tokens: set[str] = set()
|
||||
if raw:
|
||||
for part in raw.split(","):
|
||||
token = part.strip().lower()
|
||||
if token:
|
||||
tokens.add(token)
|
||||
if "all" in tokens:
|
||||
return IncludeOptions(distribution=True, samples=True)
|
||||
return IncludeOptions(
|
||||
distribution=bool({"distribution", "monte_carlo", "mc"} & tokens),
|
||||
samples="samples" in tokens,
|
||||
)
|
||||
|
||||
|
||||
def validate_percentiles(values: Sequence[float] | None) -> tuple[float, ...]:
|
||||
if not values:
|
||||
return DEFAULT_PERCENTILES
|
||||
seen: set[float] = set()
|
||||
cleaned: list[float] = []
|
||||
for value in values:
|
||||
percentile = float(value)
|
||||
if percentile < 0.0 or percentile > 100.0:
|
||||
raise ValueError("Percentiles must be between 0 and 100.")
|
||||
if percentile not in seen:
|
||||
seen.add(percentile)
|
||||
cleaned.append(percentile)
|
||||
if not cleaned:
|
||||
return DEFAULT_PERCENTILES
|
||||
return tuple(cleaned)
|
||||
|
||||
|
||||
class ReportingService:
|
||||
"""Coordinates project and scenario reporting aggregation."""
|
||||
|
||||
def __init__(self, uow: UnitOfWork) -> None:
|
||||
self._uow = uow
|
||||
|
||||
def project_summary(
|
||||
self,
|
||||
project: Project,
|
||||
*,
|
||||
filters: ReportFilters,
|
||||
include: IncludeOptions,
|
||||
iterations: int,
|
||||
percentiles: tuple[float, ...],
|
||||
) -> dict[str, object]:
|
||||
scenarios = self._load_scenarios(project.id, filters)
|
||||
reports = [
|
||||
self._build_scenario_report(
|
||||
scenario,
|
||||
include_distribution=include.distribution,
|
||||
include_samples=include.samples,
|
||||
iterations=iterations,
|
||||
percentiles=percentiles,
|
||||
)
|
||||
for scenario in scenarios
|
||||
]
|
||||
aggregates = self._aggregate_project(reports)
|
||||
return {
|
||||
"project": _project_payload(project),
|
||||
"scenario_count": len(reports),
|
||||
"filters": filters.to_dict(),
|
||||
"aggregates": aggregates.to_dict(),
|
||||
"scenarios": [report.to_dict() for report in reports],
|
||||
}
|
||||
|
||||
def scenario_comparison(
|
||||
self,
|
||||
project: Project,
|
||||
scenarios: Sequence[Scenario],
|
||||
*,
|
||||
include: IncludeOptions,
|
||||
iterations: int,
|
||||
percentiles: tuple[float, ...],
|
||||
) -> dict[str, object]:
|
||||
reports = [
|
||||
self._build_scenario_report(
|
||||
self._reload_scenario(scenario.id),
|
||||
include_distribution=include.distribution,
|
||||
include_samples=include.samples,
|
||||
iterations=iterations,
|
||||
percentiles=percentiles,
|
||||
)
|
||||
for scenario in scenarios
|
||||
]
|
||||
comparison = {
|
||||
metric: data.to_dict()
|
||||
for metric, data in self._build_comparisons(reports).items()
|
||||
}
|
||||
return {
|
||||
"project": _project_payload(project),
|
||||
"scenarios": [report.to_dict() for report in reports],
|
||||
"comparison": comparison,
|
||||
}
|
||||
|
||||
def scenario_distribution(
|
||||
self,
|
||||
scenario: Scenario,
|
||||
*,
|
||||
include: IncludeOptions,
|
||||
iterations: int,
|
||||
percentiles: tuple[float, ...],
|
||||
) -> dict[str, object]:
|
||||
report = self._build_scenario_report(
|
||||
self._reload_scenario(scenario.id),
|
||||
include_distribution=True,
|
||||
include_samples=include.samples,
|
||||
iterations=iterations,
|
||||
percentiles=percentiles,
|
||||
)
|
||||
return {
|
||||
"scenario": report.to_dict()["scenario"],
|
||||
"summary": report.totals.to_dict(),
|
||||
"metrics": report.deterministic.to_dict(),
|
||||
"monte_carlo": (
|
||||
report.monte_carlo.to_dict() if report.monte_carlo else {
|
||||
"available": False}
|
||||
),
|
||||
}
|
||||
|
||||
def _load_scenarios(self, project_id: int, filters: ReportFilters) -> list[Scenario]:
|
||||
repo = self._require_scenario_repo()
|
||||
scenarios = repo.list_for_project(project_id, with_children=True)
|
||||
return [scenario for scenario in scenarios if filters.matches(scenario)]
|
||||
|
||||
def _reload_scenario(self, scenario_id: int) -> Scenario:
|
||||
repo = self._require_scenario_repo()
|
||||
return repo.get(scenario_id, with_children=True)
|
||||
|
||||
def _build_scenario_report(
|
||||
self,
|
||||
scenario: Scenario,
|
||||
*,
|
||||
include_distribution: bool,
|
||||
include_samples: bool,
|
||||
iterations: int,
|
||||
percentiles: tuple[float, ...],
|
||||
) -> ScenarioReport:
|
||||
cash_flows, totals = _build_cash_flows(scenario)
|
||||
deterministic = _calculate_deterministic_metrics(
|
||||
scenario, cash_flows, totals)
|
||||
monte_carlo: ScenarioMonteCarloResult | None = None
|
||||
if include_distribution:
|
||||
monte_carlo = _run_monte_carlo(
|
||||
scenario,
|
||||
cash_flows,
|
||||
include_samples=include_samples,
|
||||
iterations=iterations,
|
||||
percentiles=percentiles,
|
||||
)
|
||||
return ScenarioReport(
|
||||
scenario=scenario,
|
||||
totals=totals,
|
||||
deterministic=deterministic,
|
||||
monte_carlo=monte_carlo,
|
||||
)
|
||||
|
||||
def _aggregate_project(self, reports: Sequence[ScenarioReport]) -> ProjectAggregates:
|
||||
total_inflows = sum(report.totals.inflows for report in reports)
|
||||
total_outflows = sum(report.totals.outflows for report in reports)
|
||||
total_net = sum(report.totals.net for report in reports)
|
||||
|
||||
metrics: dict[str, AggregatedMetric] = {}
|
||||
for metric_name in ("npv", "irr", "payback_period"):
|
||||
values = [
|
||||
getattr(report.deterministic, metric_name)
|
||||
for report in reports
|
||||
if getattr(report.deterministic, metric_name) is not None
|
||||
]
|
||||
if values:
|
||||
metrics[metric_name] = AggregatedMetric(
|
||||
average=sum(values) / len(values),
|
||||
minimum=min(values),
|
||||
maximum=max(values),
|
||||
)
|
||||
return ProjectAggregates(
|
||||
total_inflows=total_inflows,
|
||||
total_outflows=total_outflows,
|
||||
total_net=total_net,
|
||||
deterministic_metrics=metrics,
|
||||
)
|
||||
|
||||
def _build_comparisons(
|
||||
self, reports: Sequence[ScenarioReport]
|
||||
) -> Mapping[str, MetricComparison]:
|
||||
comparisons: dict[str, MetricComparison] = {}
|
||||
for metric_name, direction in (
|
||||
("npv", "higher_is_better"),
|
||||
("irr", "higher_is_better"),
|
||||
("payback_period", "lower_is_better"),
|
||||
):
|
||||
entries: list[tuple[int, str, float]] = []
|
||||
for report in reports:
|
||||
value = getattr(report.deterministic, metric_name)
|
||||
if value is None:
|
||||
continue
|
||||
entries.append(
|
||||
(report.scenario.id, report.scenario.name, value))
|
||||
if not entries:
|
||||
continue
|
||||
if direction == "higher_is_better":
|
||||
best = max(entries, key=lambda item: item[2])
|
||||
worst = min(entries, key=lambda item: item[2])
|
||||
else:
|
||||
best = min(entries, key=lambda item: item[2])
|
||||
worst = max(entries, key=lambda item: item[2])
|
||||
average = sum(item[2] for item in entries) / len(entries)
|
||||
comparisons[metric_name] = MetricComparison(
|
||||
metric=metric_name,
|
||||
direction=direction,
|
||||
best=best,
|
||||
worst=worst,
|
||||
average=average,
|
||||
)
|
||||
return comparisons
|
||||
|
||||
def _require_scenario_repo(self):
|
||||
if not self._uow.scenarios:
|
||||
raise RuntimeError("Scenario repository not initialised")
|
||||
return self._uow.scenarios
|
||||
|
||||
|
||||
def _build_cash_flows(scenario: Scenario) -> tuple[list[CashFlow], ScenarioFinancialTotals]:
|
||||
cash_flows: list[CashFlow] = []
|
||||
by_category: dict[str, float] = {}
|
||||
inflows = 0.0
|
||||
outflows = 0.0
|
||||
net = 0.0
|
||||
period_index = 0
|
||||
|
||||
for financial_input in scenario.financial_inputs or []:
|
||||
sign = _COST_CATEGORY_SIGNS.get(financial_input.category, -1.0)
|
||||
amount = float(financial_input.amount) * sign
|
||||
net += amount
|
||||
if amount >= 0:
|
||||
inflows += amount
|
||||
else:
|
||||
outflows += -amount
|
||||
by_category.setdefault(financial_input.category.value, 0.0)
|
||||
by_category[financial_input.category.value] += amount
|
||||
|
||||
if financial_input.effective_date is not None:
|
||||
cash_flows.append(
|
||||
CashFlow(amount=amount, date=financial_input.effective_date)
|
||||
)
|
||||
else:
|
||||
cash_flows.append(
|
||||
CashFlow(amount=amount, period_index=period_index))
|
||||
period_index += 1
|
||||
|
||||
currency = scenario.currency
|
||||
if currency is None and scenario.financial_inputs:
|
||||
currency = scenario.financial_inputs[0].currency
|
||||
|
||||
totals = ScenarioFinancialTotals(
|
||||
currency=currency,
|
||||
inflows=inflows,
|
||||
outflows=outflows,
|
||||
net=net,
|
||||
by_category=by_category,
|
||||
)
|
||||
return cash_flows, totals
|
||||
|
||||
|
||||
def _calculate_deterministic_metrics(
|
||||
scenario: Scenario,
|
||||
cash_flows: Sequence[CashFlow],
|
||||
totals: ScenarioFinancialTotals,
|
||||
) -> ScenarioDeterministicMetrics:
|
||||
notes: list[str] = []
|
||||
discount_rate = _normalise_discount_rate(scenario.discount_rate)
|
||||
if scenario.discount_rate is None:
|
||||
notes.append(
|
||||
f"Discount rate not set; defaulted to {discount_rate:.2%}."
|
||||
)
|
||||
|
||||
if not cash_flows:
|
||||
notes.append(
|
||||
"No financial inputs available for deterministic metrics.")
|
||||
return ScenarioDeterministicMetrics(
|
||||
currency=totals.currency,
|
||||
discount_rate=discount_rate,
|
||||
compounds_per_year=1,
|
||||
npv=None,
|
||||
irr=None,
|
||||
payback_period=None,
|
||||
notes=notes,
|
||||
)
|
||||
|
||||
npv_value: float | None
|
||||
try:
|
||||
npv_value = net_present_value(
|
||||
discount_rate,
|
||||
cash_flows,
|
||||
compounds_per_year=1,
|
||||
)
|
||||
except ValueError as exc:
|
||||
npv_value = None
|
||||
notes.append(f"NPV unavailable: {exc}.")
|
||||
|
||||
irr_value: float | None
|
||||
try:
|
||||
irr_value = internal_rate_of_return(
|
||||
cash_flows,
|
||||
compounds_per_year=1,
|
||||
)
|
||||
except (ValueError, ConvergenceError) as exc:
|
||||
irr_value = None
|
||||
notes.append(f"IRR unavailable: {exc}.")
|
||||
|
||||
payback_value: float | None
|
||||
try:
|
||||
payback_value = payback_period(
|
||||
cash_flows,
|
||||
compounds_per_year=1,
|
||||
)
|
||||
except (ValueError, PaybackNotReachedError) as exc:
|
||||
payback_value = None
|
||||
notes.append(f"Payback period unavailable: {exc}.")
|
||||
|
||||
return ScenarioDeterministicMetrics(
|
||||
currency=totals.currency,
|
||||
discount_rate=discount_rate,
|
||||
compounds_per_year=1,
|
||||
npv=npv_value,
|
||||
irr=irr_value,
|
||||
payback_period=payback_value,
|
||||
notes=notes,
|
||||
)
|
||||
|
||||
|
||||
def _run_monte_carlo(
|
||||
scenario: Scenario,
|
||||
cash_flows: Sequence[CashFlow],
|
||||
*,
|
||||
include_samples: bool,
|
||||
iterations: int,
|
||||
percentiles: tuple[float, ...],
|
||||
) -> ScenarioMonteCarloResult:
|
||||
if not cash_flows:
|
||||
return ScenarioMonteCarloResult(
|
||||
available=False,
|
||||
notes=["No financial inputs available for Monte Carlo simulation."],
|
||||
)
|
||||
|
||||
discount_rate = _normalise_discount_rate(scenario.discount_rate)
|
||||
specs = [CashFlowSpec(cash_flow=flow) for flow in cash_flows]
|
||||
notes: list[str] = []
|
||||
if not scenario.simulation_parameters:
|
||||
notes.append(
|
||||
"Scenario has no stochastic parameters; simulation mirrors deterministic cash flows."
|
||||
)
|
||||
config = SimulationConfig(
|
||||
iterations=iterations,
|
||||
discount_rate=discount_rate,
|
||||
metrics=(
|
||||
SimulationMetric.NPV,
|
||||
SimulationMetric.IRR,
|
||||
SimulationMetric.PAYBACK,
|
||||
),
|
||||
percentiles=percentiles,
|
||||
return_samples=include_samples,
|
||||
)
|
||||
try:
|
||||
result = run_monte_carlo(specs, config)
|
||||
except Exception as exc: # pragma: no cover - safeguard for unexpected failures
|
||||
notes.append(f"Simulation failed: {exc}.")
|
||||
return ScenarioMonteCarloResult(available=False, notes=notes)
|
||||
return ScenarioMonteCarloResult(
|
||||
available=True,
|
||||
notes=notes,
|
||||
result=result,
|
||||
include_samples=include_samples,
|
||||
)
|
||||
|
||||
|
||||
def _normalise_discount_rate(value: float | None) -> float:
|
||||
if value is None:
|
||||
return DEFAULT_DISCOUNT_RATE
|
||||
rate = float(value)
|
||||
if rate > 1.0:
|
||||
return rate / 100.0
|
||||
return rate
|
||||
|
||||
|
||||
def _sanitize_float(value: float | None) -> float | None:
|
||||
if value is None:
|
||||
return None
|
||||
if math.isnan(value) or math.isinf(value):
|
||||
return None
|
||||
return float(value)
|
||||
|
||||
|
||||
def _round_optional(value: float | None, *, digits: int = 2) -> float | None:
|
||||
clean = _sanitize_float(value)
|
||||
if clean is None:
|
||||
return None
|
||||
return round(clean, digits)
|
||||
|
||||
|
||||
def _comparison_entry(entry: tuple[int, str, float] | None) -> dict[str, object] | None:
|
||||
if entry is None:
|
||||
return None
|
||||
scenario_id, name, value = entry
|
||||
return {
|
||||
"scenario_id": scenario_id,
|
||||
"name": name,
|
||||
"value": _round_optional(value),
|
||||
}
|
||||
|
||||
|
||||
def _project_payload(project: Project) -> dict[str, object]:
|
||||
return {
|
||||
"id": project.id,
|
||||
"name": project.name,
|
||||
"location": project.location,
|
||||
"operation_type": project.operation_type.value,
|
||||
"description": project.description,
|
||||
"created_at": project.created_at,
|
||||
"updated_at": project.updated_at,
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Iterable
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime
|
||||
from typing import Mapping, Sequence
|
||||
|
||||
@@ -11,6 +12,9 @@ from sqlalchemy.orm import Session, joinedload, selectinload
|
||||
from models import (
|
||||
FinancialInput,
|
||||
Project,
|
||||
PricingImpuritySettings,
|
||||
PricingMetalSettings,
|
||||
PricingSettings,
|
||||
ResourceType,
|
||||
Role,
|
||||
Scenario,
|
||||
@@ -21,6 +25,7 @@ from models import (
|
||||
)
|
||||
from services.exceptions import EntityConflictError, EntityNotFoundError
|
||||
from services.export_query import ProjectExportFilters, ScenarioExportFilters
|
||||
from services.pricing import PricingMetadata
|
||||
|
||||
|
||||
class ProjectRepository:
|
||||
@@ -29,10 +34,17 @@ class ProjectRepository:
|
||||
def __init__(self, session: Session) -> None:
|
||||
self.session = session
|
||||
|
||||
def list(self, *, with_children: bool = False) -> Sequence[Project]:
|
||||
def list(
|
||||
self,
|
||||
*,
|
||||
with_children: bool = False,
|
||||
with_pricing: bool = False,
|
||||
) -> Sequence[Project]:
|
||||
stmt = select(Project).order_by(Project.created_at)
|
||||
if with_children:
|
||||
stmt = stmt.options(selectinload(Project.scenarios))
|
||||
if with_pricing:
|
||||
stmt = stmt.options(selectinload(Project.pricing_settings))
|
||||
return self.session.execute(stmt).scalars().all()
|
||||
|
||||
def count(self) -> int:
|
||||
@@ -47,10 +59,18 @@ class ProjectRepository:
|
||||
)
|
||||
return self.session.execute(stmt).scalars().all()
|
||||
|
||||
def get(self, project_id: int, *, with_children: bool = False) -> Project:
|
||||
def get(
|
||||
self,
|
||||
project_id: int,
|
||||
*,
|
||||
with_children: bool = False,
|
||||
with_pricing: bool = False,
|
||||
) -> Project:
|
||||
stmt = select(Project).where(Project.id == project_id)
|
||||
if with_children:
|
||||
stmt = stmt.options(joinedload(Project.scenarios))
|
||||
if with_pricing:
|
||||
stmt = stmt.options(joinedload(Project.pricing_settings))
|
||||
result = self.session.execute(stmt)
|
||||
if with_children:
|
||||
result = result.unique()
|
||||
@@ -86,10 +106,13 @@ class ProjectRepository:
|
||||
filters: ProjectExportFilters | None = None,
|
||||
*,
|
||||
include_scenarios: bool = False,
|
||||
include_pricing: bool = False,
|
||||
) -> Sequence[Project]:
|
||||
stmt = select(Project)
|
||||
if include_scenarios:
|
||||
stmt = stmt.options(selectinload(Project.scenarios))
|
||||
if include_pricing:
|
||||
stmt = stmt.options(selectinload(Project.pricing_settings))
|
||||
|
||||
if filters:
|
||||
ids = filters.normalised_ids()
|
||||
@@ -131,6 +154,18 @@ class ProjectRepository:
|
||||
project = self.get(project_id)
|
||||
self.session.delete(project)
|
||||
|
||||
def set_pricing_settings(
|
||||
self,
|
||||
project: Project,
|
||||
pricing_settings: PricingSettings | None,
|
||||
) -> Project:
|
||||
project.pricing_settings = pricing_settings
|
||||
project.pricing_settings_id = (
|
||||
pricing_settings.id if pricing_settings is not None else None
|
||||
)
|
||||
self.session.flush()
|
||||
return project
|
||||
|
||||
|
||||
class ScenarioRepository:
|
||||
"""Persistence operations for Scenario entities."""
|
||||
@@ -138,13 +173,26 @@ class ScenarioRepository:
|
||||
def __init__(self, session: Session) -> None:
|
||||
self.session = session
|
||||
|
||||
def list_for_project(self, project_id: int) -> Sequence[Scenario]:
|
||||
def list_for_project(
|
||||
self,
|
||||
project_id: int,
|
||||
*,
|
||||
with_children: bool = False,
|
||||
) -> Sequence[Scenario]:
|
||||
stmt = (
|
||||
select(Scenario)
|
||||
.where(Scenario.project_id == project_id)
|
||||
.order_by(Scenario.created_at)
|
||||
)
|
||||
return self.session.execute(stmt).scalars().all()
|
||||
if with_children:
|
||||
stmt = stmt.options(
|
||||
selectinload(Scenario.financial_inputs),
|
||||
selectinload(Scenario.simulation_parameters),
|
||||
)
|
||||
result = self.session.execute(stmt)
|
||||
if with_children:
|
||||
result = result.unique()
|
||||
return result.scalars().all()
|
||||
|
||||
def count(self) -> int:
|
||||
stmt = select(func.count(Scenario.id))
|
||||
@@ -376,6 +424,101 @@ class SimulationParameterRepository:
|
||||
self.session.delete(entity)
|
||||
|
||||
|
||||
class PricingSettingsRepository:
|
||||
"""Persistence operations for pricing configuration entities."""
|
||||
|
||||
def __init__(self, session: Session) -> None:
|
||||
self.session = session
|
||||
|
||||
def list(self, *, include_children: bool = False) -> Sequence[PricingSettings]:
|
||||
stmt = select(PricingSettings).order_by(PricingSettings.created_at)
|
||||
if include_children:
|
||||
stmt = stmt.options(
|
||||
selectinload(PricingSettings.metal_overrides),
|
||||
selectinload(PricingSettings.impurity_overrides),
|
||||
)
|
||||
result = self.session.execute(stmt)
|
||||
if include_children:
|
||||
result = result.unique()
|
||||
return result.scalars().all()
|
||||
|
||||
def get(self, settings_id: int, *, include_children: bool = False) -> PricingSettings:
|
||||
stmt = select(PricingSettings).where(PricingSettings.id == settings_id)
|
||||
if include_children:
|
||||
stmt = stmt.options(
|
||||
selectinload(PricingSettings.metal_overrides),
|
||||
selectinload(PricingSettings.impurity_overrides),
|
||||
)
|
||||
result = self.session.execute(stmt)
|
||||
if include_children:
|
||||
result = result.unique()
|
||||
settings = result.scalar_one_or_none()
|
||||
if settings is None:
|
||||
raise EntityNotFoundError(
|
||||
f"Pricing settings {settings_id} not found")
|
||||
return settings
|
||||
|
||||
def find_by_slug(
|
||||
self,
|
||||
slug: str,
|
||||
*,
|
||||
include_children: bool = False,
|
||||
) -> PricingSettings | None:
|
||||
normalised = slug.strip().lower()
|
||||
stmt = select(PricingSettings).where(
|
||||
PricingSettings.slug == normalised)
|
||||
if include_children:
|
||||
stmt = stmt.options(
|
||||
selectinload(PricingSettings.metal_overrides),
|
||||
selectinload(PricingSettings.impurity_overrides),
|
||||
)
|
||||
result = self.session.execute(stmt)
|
||||
if include_children:
|
||||
result = result.unique()
|
||||
return result.scalar_one_or_none()
|
||||
|
||||
def get_by_slug(self, slug: str, *, include_children: bool = False) -> PricingSettings:
|
||||
settings = self.find_by_slug(slug, include_children=include_children)
|
||||
if settings is None:
|
||||
raise EntityNotFoundError(
|
||||
f"Pricing settings slug '{slug}' not found"
|
||||
)
|
||||
return settings
|
||||
|
||||
def create(self, settings: PricingSettings) -> PricingSettings:
|
||||
self.session.add(settings)
|
||||
try:
|
||||
self.session.flush()
|
||||
except IntegrityError as exc: # pragma: no cover - relies on DB constraints
|
||||
raise EntityConflictError(
|
||||
"Pricing settings violates constraints") from exc
|
||||
return settings
|
||||
|
||||
def delete(self, settings_id: int) -> None:
|
||||
settings = self.get(settings_id, include_children=True)
|
||||
self.session.delete(settings)
|
||||
|
||||
def attach_metal_override(
|
||||
self,
|
||||
settings: PricingSettings,
|
||||
override: PricingMetalSettings,
|
||||
) -> PricingMetalSettings:
|
||||
settings.metal_overrides.append(override)
|
||||
self.session.add(override)
|
||||
self.session.flush()
|
||||
return override
|
||||
|
||||
def attach_impurity_override(
|
||||
self,
|
||||
settings: PricingSettings,
|
||||
override: PricingImpuritySettings,
|
||||
) -> PricingImpuritySettings:
|
||||
settings.impurity_overrides.append(override)
|
||||
self.session.add(override)
|
||||
self.session.flush()
|
||||
return override
|
||||
|
||||
|
||||
class RoleRepository:
|
||||
"""Persistence operations for Role entities."""
|
||||
|
||||
@@ -507,6 +650,159 @@ class UserRepository:
|
||||
self.session.flush()
|
||||
|
||||
|
||||
DEFAULT_PRICING_SETTINGS_NAME = "Default Pricing Settings"
|
||||
DEFAULT_PRICING_SETTINGS_DESCRIPTION = (
|
||||
"Default pricing configuration generated from environment metadata."
|
||||
)
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class PricingSettingsSeedResult:
|
||||
settings: PricingSettings
|
||||
created: bool
|
||||
updated_fields: int
|
||||
impurity_upserts: int
|
||||
|
||||
|
||||
def ensure_default_pricing_settings(
|
||||
repo: PricingSettingsRepository,
|
||||
*,
|
||||
metadata: PricingMetadata,
|
||||
slug: str = "default",
|
||||
name: str | None = None,
|
||||
description: str | None = None,
|
||||
) -> PricingSettingsSeedResult:
|
||||
"""Ensure a baseline pricing settings record exists and matches metadata defaults."""
|
||||
|
||||
normalised_slug = (slug or "default").strip().lower() or "default"
|
||||
target_name = name or DEFAULT_PRICING_SETTINGS_NAME
|
||||
target_description = description or DEFAULT_PRICING_SETTINGS_DESCRIPTION
|
||||
|
||||
updated_fields = 0
|
||||
impurity_upserts = 0
|
||||
|
||||
try:
|
||||
settings = repo.get_by_slug(normalised_slug, include_children=True)
|
||||
created = False
|
||||
except EntityNotFoundError:
|
||||
settings = PricingSettings(
|
||||
name=target_name,
|
||||
slug=normalised_slug,
|
||||
description=target_description,
|
||||
default_currency=metadata.default_currency,
|
||||
default_payable_pct=metadata.default_payable_pct,
|
||||
moisture_threshold_pct=metadata.moisture_threshold_pct,
|
||||
moisture_penalty_per_pct=metadata.moisture_penalty_per_pct,
|
||||
)
|
||||
settings.metadata_payload = None
|
||||
settings = repo.create(settings)
|
||||
created = True
|
||||
else:
|
||||
if settings.name != target_name:
|
||||
settings.name = target_name
|
||||
updated_fields += 1
|
||||
if target_description and settings.description != target_description:
|
||||
settings.description = target_description
|
||||
updated_fields += 1
|
||||
if settings.default_currency != metadata.default_currency:
|
||||
settings.default_currency = metadata.default_currency
|
||||
updated_fields += 1
|
||||
if float(settings.default_payable_pct) != float(metadata.default_payable_pct):
|
||||
settings.default_payable_pct = metadata.default_payable_pct
|
||||
updated_fields += 1
|
||||
if float(settings.moisture_threshold_pct) != float(metadata.moisture_threshold_pct):
|
||||
settings.moisture_threshold_pct = metadata.moisture_threshold_pct
|
||||
updated_fields += 1
|
||||
if float(settings.moisture_penalty_per_pct) != float(metadata.moisture_penalty_per_pct):
|
||||
settings.moisture_penalty_per_pct = metadata.moisture_penalty_per_pct
|
||||
updated_fields += 1
|
||||
|
||||
impurity_thresholds = {
|
||||
code.strip().upper(): float(value)
|
||||
for code, value in (metadata.impurity_thresholds or {}).items()
|
||||
if code.strip()
|
||||
}
|
||||
impurity_penalties = {
|
||||
code.strip().upper(): float(value)
|
||||
for code, value in (metadata.impurity_penalty_per_ppm or {}).items()
|
||||
if code.strip()
|
||||
}
|
||||
|
||||
if impurity_thresholds or impurity_penalties:
|
||||
existing_map = {
|
||||
override.impurity_code: override
|
||||
for override in settings.impurity_overrides
|
||||
}
|
||||
target_codes = set(impurity_thresholds) | set(impurity_penalties)
|
||||
for code in sorted(target_codes):
|
||||
threshold_value = impurity_thresholds.get(code, 0.0)
|
||||
penalty_value = impurity_penalties.get(code, 0.0)
|
||||
existing = existing_map.get(code)
|
||||
if existing is None:
|
||||
repo.attach_impurity_override(
|
||||
settings,
|
||||
PricingImpuritySettings(
|
||||
impurity_code=code,
|
||||
threshold_ppm=threshold_value,
|
||||
penalty_per_ppm=penalty_value,
|
||||
),
|
||||
)
|
||||
impurity_upserts += 1
|
||||
continue
|
||||
changed = False
|
||||
if float(existing.threshold_ppm) != float(threshold_value):
|
||||
existing.threshold_ppm = threshold_value
|
||||
changed = True
|
||||
if float(existing.penalty_per_ppm) != float(penalty_value):
|
||||
existing.penalty_per_ppm = penalty_value
|
||||
changed = True
|
||||
if changed:
|
||||
updated_fields += 1
|
||||
|
||||
if updated_fields > 0 or impurity_upserts > 0:
|
||||
repo.session.flush()
|
||||
|
||||
return PricingSettingsSeedResult(
|
||||
settings=settings,
|
||||
created=created,
|
||||
updated_fields=updated_fields,
|
||||
impurity_upserts=impurity_upserts,
|
||||
)
|
||||
|
||||
|
||||
def pricing_settings_to_metadata(settings: PricingSettings) -> PricingMetadata:
|
||||
"""Convert a persisted pricing settings record into metadata defaults."""
|
||||
|
||||
payload = settings.metadata_payload or {}
|
||||
payload_thresholds = payload.get("impurity_thresholds") or {}
|
||||
payload_penalties = payload.get("impurity_penalty_per_ppm") or {}
|
||||
|
||||
thresholds: dict[str, float] = {
|
||||
code.strip().upper(): float(value)
|
||||
for code, value in payload_thresholds.items()
|
||||
if isinstance(code, str) and code.strip()
|
||||
}
|
||||
penalties: dict[str, float] = {
|
||||
code.strip().upper(): float(value)
|
||||
for code, value in payload_penalties.items()
|
||||
if isinstance(code, str) and code.strip()
|
||||
}
|
||||
|
||||
for override in settings.impurity_overrides:
|
||||
code = override.impurity_code.strip().upper()
|
||||
thresholds[code] = float(override.threshold_ppm)
|
||||
penalties[code] = float(override.penalty_per_ppm)
|
||||
|
||||
return PricingMetadata(
|
||||
default_payable_pct=float(settings.default_payable_pct),
|
||||
default_currency=settings.default_currency,
|
||||
moisture_threshold_pct=float(settings.moisture_threshold_pct),
|
||||
moisture_penalty_per_pct=float(settings.moisture_penalty_per_pct),
|
||||
impurity_thresholds=thresholds,
|
||||
impurity_penalty_per_ppm=penalties,
|
||||
)
|
||||
|
||||
|
||||
DEFAULT_ROLE_DEFINITIONS: tuple[dict[str, str], ...] = (
|
||||
{
|
||||
"name": "admin",
|
||||
|
||||
54
services/scenario_evaluation.py
Normal file
54
services/scenario_evaluation.py
Normal file
@@ -0,0 +1,54 @@
|
||||
from __future__ import annotations
|
||||
|
||||
"""Scenario evaluation services including pricing integration."""
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Iterable, Mapping
|
||||
|
||||
from models.scenario import Scenario
|
||||
from services.pricing import (
|
||||
PricingInput,
|
||||
PricingMetadata,
|
||||
PricingResult,
|
||||
calculate_pricing,
|
||||
)
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class ScenarioPricingConfig:
|
||||
"""Configuration for pricing evaluation within a scenario."""
|
||||
|
||||
metadata: PricingMetadata | None = None
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class ScenarioPricingSnapshot:
|
||||
"""Captured pricing results for a scenario."""
|
||||
|
||||
scenario_id: int
|
||||
results: list[PricingResult]
|
||||
|
||||
|
||||
class ScenarioPricingEvaluator:
|
||||
"""Evaluate scenario profitability inputs using pricing services."""
|
||||
|
||||
def __init__(self, config: ScenarioPricingConfig | None = None) -> None:
|
||||
self._config = config or ScenarioPricingConfig()
|
||||
|
||||
def evaluate(
|
||||
self,
|
||||
scenario: Scenario,
|
||||
*,
|
||||
inputs: Iterable[PricingInput],
|
||||
metadata_override: PricingMetadata | None = None,
|
||||
) -> ScenarioPricingSnapshot:
|
||||
metadata = metadata_override or self._config.metadata
|
||||
results: list[PricingResult] = []
|
||||
for pricing_input in inputs:
|
||||
result = calculate_pricing(
|
||||
pricing_input,
|
||||
metadata=metadata,
|
||||
currency=scenario.currency,
|
||||
)
|
||||
results.append(result)
|
||||
return ScenarioPricingSnapshot(scenario_id=scenario.id, results=results)
|
||||
352
services/simulation.py
Normal file
352
services/simulation.py
Normal file
@@ -0,0 +1,352 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, Iterable, Mapping, Sequence
|
||||
|
||||
import numpy as np
|
||||
from numpy.random import Generator, default_rng
|
||||
|
||||
from .financial import (
|
||||
CashFlow,
|
||||
ConvergenceError,
|
||||
PaybackNotReachedError,
|
||||
internal_rate_of_return,
|
||||
net_present_value,
|
||||
payback_period,
|
||||
)
|
||||
|
||||
|
||||
class DistributionConfigError(ValueError):
|
||||
"""Raised when a distribution specification is invalid."""
|
||||
|
||||
|
||||
class SimulationMetric(Enum):
|
||||
"""Supported Monte Carlo summary metrics."""
|
||||
|
||||
NPV = "npv"
|
||||
IRR = "irr"
|
||||
PAYBACK = "payback"
|
||||
|
||||
|
||||
class DistributionType(Enum):
|
||||
"""Supported probability distribution families."""
|
||||
|
||||
NORMAL = "normal"
|
||||
LOGNORMAL = "lognormal"
|
||||
TRIANGULAR = "triangular"
|
||||
DISCRETE = "discrete"
|
||||
|
||||
|
||||
class DistributionSource(Enum):
|
||||
"""Origins for parameter values when sourcing dynamically."""
|
||||
|
||||
STATIC = "static"
|
||||
SCENARIO_FIELD = "scenario_field"
|
||||
METADATA_KEY = "metadata_key"
|
||||
|
||||
|
||||
@dataclass(frozen=True, slots=True)
|
||||
class DistributionSpec:
|
||||
"""Defines the stochastic behaviour for a single cash flow."""
|
||||
|
||||
type: DistributionType
|
||||
parameters: Mapping[str, Any]
|
||||
source: DistributionSource = DistributionSource.STATIC
|
||||
source_key: str | None = None
|
||||
|
||||
|
||||
@dataclass(frozen=True, slots=True)
|
||||
class CashFlowSpec:
|
||||
"""Pairs a baseline cash flow with an optional distribution."""
|
||||
|
||||
cash_flow: CashFlow
|
||||
distribution: DistributionSpec | None = None
|
||||
|
||||
|
||||
@dataclass(frozen=True, slots=True)
|
||||
class SimulationConfig:
|
||||
"""Controls Monte Carlo simulation behaviour."""
|
||||
|
||||
iterations: int
|
||||
discount_rate: float
|
||||
seed: int | None = None
|
||||
metrics: Sequence[SimulationMetric] = (
|
||||
SimulationMetric.NPV, SimulationMetric.IRR, SimulationMetric.PAYBACK)
|
||||
percentiles: Sequence[float] = (5.0, 50.0, 95.0)
|
||||
compounds_per_year: int = 1
|
||||
return_samples: bool = False
|
||||
residual_value: float | None = None
|
||||
residual_periods: float | None = None
|
||||
|
||||
|
||||
@dataclass(frozen=True, slots=True)
|
||||
class MetricSummary:
|
||||
"""Aggregated statistics for a simulated metric."""
|
||||
|
||||
mean: float
|
||||
std_dev: float
|
||||
minimum: float
|
||||
maximum: float
|
||||
percentiles: Mapping[float, float]
|
||||
sample_size: int
|
||||
failed_runs: int
|
||||
|
||||
|
||||
@dataclass(frozen=True, slots=True)
|
||||
class SimulationResult:
|
||||
"""Monte Carlo output including per-metric summaries."""
|
||||
|
||||
iterations: int
|
||||
summaries: Mapping[SimulationMetric, MetricSummary]
|
||||
samples: Mapping[SimulationMetric, np.ndarray] | None = None
|
||||
|
||||
|
||||
def run_monte_carlo(
|
||||
cash_flows: Sequence[CashFlowSpec],
|
||||
config: SimulationConfig,
|
||||
*,
|
||||
scenario_context: Mapping[str, Any] | None = None,
|
||||
metadata: Mapping[str, Any] | None = None,
|
||||
rng: Generator | None = None,
|
||||
) -> SimulationResult:
|
||||
"""Execute Monte Carlo simulation for the provided cash flows."""
|
||||
|
||||
if config.iterations <= 0:
|
||||
raise ValueError("iterations must be greater than zero")
|
||||
if config.compounds_per_year <= 0:
|
||||
raise ValueError("compounds_per_year must be greater than zero")
|
||||
for pct in config.percentiles:
|
||||
if pct < 0.0 or pct > 100.0:
|
||||
raise ValueError("percentiles must be within [0, 100]")
|
||||
|
||||
generator = rng or default_rng(config.seed)
|
||||
|
||||
metric_arrays: Dict[SimulationMetric, np.ndarray] = {
|
||||
metric: np.empty(config.iterations, dtype=float)
|
||||
for metric in config.metrics
|
||||
}
|
||||
|
||||
for idx in range(config.iterations):
|
||||
iteration_flows = [
|
||||
_realise_cash_flow(
|
||||
spec,
|
||||
generator,
|
||||
scenario_context=scenario_context,
|
||||
metadata=metadata,
|
||||
)
|
||||
for spec in cash_flows
|
||||
]
|
||||
|
||||
if SimulationMetric.NPV in metric_arrays:
|
||||
metric_arrays[SimulationMetric.NPV][idx] = net_present_value(
|
||||
config.discount_rate,
|
||||
iteration_flows,
|
||||
residual_value=config.residual_value,
|
||||
residual_periods=config.residual_periods,
|
||||
compounds_per_year=config.compounds_per_year,
|
||||
)
|
||||
if SimulationMetric.IRR in metric_arrays:
|
||||
try:
|
||||
metric_arrays[SimulationMetric.IRR][idx] = internal_rate_of_return(
|
||||
iteration_flows,
|
||||
compounds_per_year=config.compounds_per_year,
|
||||
)
|
||||
except (ValueError, ConvergenceError):
|
||||
metric_arrays[SimulationMetric.IRR][idx] = np.nan
|
||||
if SimulationMetric.PAYBACK in metric_arrays:
|
||||
try:
|
||||
metric_arrays[SimulationMetric.PAYBACK][idx] = payback_period(
|
||||
iteration_flows,
|
||||
compounds_per_year=config.compounds_per_year,
|
||||
)
|
||||
except (ValueError, PaybackNotReachedError):
|
||||
metric_arrays[SimulationMetric.PAYBACK][idx] = np.nan
|
||||
|
||||
summaries = {
|
||||
metric: _summarise(metric_arrays[metric], config.percentiles)
|
||||
for metric in metric_arrays
|
||||
}
|
||||
|
||||
samples = metric_arrays if config.return_samples else None
|
||||
return SimulationResult(
|
||||
iterations=config.iterations,
|
||||
summaries=summaries,
|
||||
samples=samples,
|
||||
)
|
||||
|
||||
|
||||
def _realise_cash_flow(
|
||||
spec: CashFlowSpec,
|
||||
generator: Generator,
|
||||
*,
|
||||
scenario_context: Mapping[str, Any] | None,
|
||||
metadata: Mapping[str, Any] | None,
|
||||
) -> CashFlow:
|
||||
if spec.distribution is None:
|
||||
return spec.cash_flow
|
||||
|
||||
distribution = spec.distribution
|
||||
base_amount = spec.cash_flow.amount
|
||||
params = _resolve_parameters(
|
||||
distribution,
|
||||
base_amount,
|
||||
scenario_context=scenario_context,
|
||||
metadata=metadata,
|
||||
)
|
||||
sample = _sample_distribution(
|
||||
distribution.type,
|
||||
params,
|
||||
generator,
|
||||
)
|
||||
return CashFlow(
|
||||
amount=float(sample),
|
||||
period_index=spec.cash_flow.period_index,
|
||||
date=spec.cash_flow.date,
|
||||
)
|
||||
|
||||
|
||||
def _resolve_parameters(
|
||||
distribution: DistributionSpec,
|
||||
base_amount: float,
|
||||
*,
|
||||
scenario_context: Mapping[str, Any] | None,
|
||||
metadata: Mapping[str, Any] | None,
|
||||
) -> Dict[str, Any]:
|
||||
params = dict(distribution.parameters)
|
||||
|
||||
if distribution.source == DistributionSource.SCENARIO_FIELD:
|
||||
if distribution.source_key is None:
|
||||
raise DistributionConfigError(
|
||||
"source_key is required for scenario_field sourcing")
|
||||
if not scenario_context or distribution.source_key not in scenario_context:
|
||||
raise DistributionConfigError(
|
||||
f"scenario field '{distribution.source_key}' not found for distribution"
|
||||
)
|
||||
params.setdefault("mean", float(
|
||||
scenario_context[distribution.source_key]))
|
||||
elif distribution.source == DistributionSource.METADATA_KEY:
|
||||
if distribution.source_key is None:
|
||||
raise DistributionConfigError(
|
||||
"source_key is required for metadata_key sourcing")
|
||||
if not metadata or distribution.source_key not in metadata:
|
||||
raise DistributionConfigError(
|
||||
f"metadata key '{distribution.source_key}' not found for distribution"
|
||||
)
|
||||
params.setdefault("mean", float(metadata[distribution.source_key]))
|
||||
else:
|
||||
params.setdefault("mean", float(base_amount))
|
||||
|
||||
return params
|
||||
|
||||
|
||||
def _sample_distribution(
|
||||
distribution_type: DistributionType,
|
||||
params: Mapping[str, Any],
|
||||
generator: Generator,
|
||||
) -> float:
|
||||
if distribution_type is DistributionType.NORMAL:
|
||||
return _sample_normal(params, generator)
|
||||
if distribution_type is DistributionType.LOGNORMAL:
|
||||
return _sample_lognormal(params, generator)
|
||||
if distribution_type is DistributionType.TRIANGULAR:
|
||||
return _sample_triangular(params, generator)
|
||||
if distribution_type is DistributionType.DISCRETE:
|
||||
return _sample_discrete(params, generator)
|
||||
raise DistributionConfigError(
|
||||
f"Unsupported distribution type: {distribution_type}")
|
||||
|
||||
|
||||
def _sample_normal(params: Mapping[str, Any], generator: Generator) -> float:
|
||||
if "std_dev" not in params:
|
||||
raise DistributionConfigError("normal distribution requires 'std_dev'")
|
||||
std_dev = float(params["std_dev"])
|
||||
if std_dev < 0:
|
||||
raise DistributionConfigError("std_dev must be non-negative")
|
||||
mean = float(params.get("mean", 0.0))
|
||||
if std_dev == 0:
|
||||
return mean
|
||||
return float(generator.normal(loc=mean, scale=std_dev))
|
||||
|
||||
|
||||
def _sample_lognormal(params: Mapping[str, Any], generator: Generator) -> float:
|
||||
if "sigma" not in params:
|
||||
raise DistributionConfigError(
|
||||
"lognormal distribution requires 'sigma'")
|
||||
sigma = float(params["sigma"])
|
||||
if sigma < 0:
|
||||
raise DistributionConfigError("sigma must be non-negative")
|
||||
if "mean" not in params:
|
||||
raise DistributionConfigError(
|
||||
"lognormal distribution requires 'mean' (mu in log space)")
|
||||
mean = float(params["mean"])
|
||||
return float(generator.lognormal(mean=mean, sigma=sigma))
|
||||
|
||||
|
||||
def _sample_triangular(params: Mapping[str, Any], generator: Generator) -> float:
|
||||
required = {"min", "mode", "max"}
|
||||
if not required.issubset(params):
|
||||
missing = ", ".join(sorted(required - params.keys()))
|
||||
raise DistributionConfigError(
|
||||
f"triangular distribution missing parameters: {missing}")
|
||||
left = float(params["min"])
|
||||
mode = float(params["mode"])
|
||||
right = float(params["max"])
|
||||
if not (left <= mode <= right):
|
||||
raise DistributionConfigError(
|
||||
"triangular distribution requires min <= mode <= max")
|
||||
if left == right:
|
||||
return mode
|
||||
return float(generator.triangular(left=left, mode=mode, right=right))
|
||||
|
||||
|
||||
def _sample_discrete(params: Mapping[str, Any], generator: Generator) -> float:
|
||||
values = params.get("values")
|
||||
probabilities = params.get("probabilities")
|
||||
if not isinstance(values, Sequence) or not isinstance(probabilities, Sequence):
|
||||
raise DistributionConfigError(
|
||||
"discrete distribution requires 'values' and 'probabilities' sequences")
|
||||
if len(values) != len(probabilities) or not values:
|
||||
raise DistributionConfigError(
|
||||
"values and probabilities must be non-empty and of equal length")
|
||||
probs = np.array(probabilities, dtype=float)
|
||||
if np.any(probs < 0):
|
||||
raise DistributionConfigError("probabilities must be non-negative")
|
||||
total = probs.sum()
|
||||
if not np.isclose(total, 1.0):
|
||||
raise DistributionConfigError("probabilities must sum to 1.0")
|
||||
probs = probs / total
|
||||
choices = np.array(values, dtype=float)
|
||||
return float(generator.choice(choices, p=probs))
|
||||
|
||||
|
||||
def _summarise(values: np.ndarray, percentiles: Sequence[float]) -> MetricSummary:
|
||||
clean = values[~np.isnan(values)]
|
||||
sample_size = clean.size
|
||||
failed_runs = values.size - sample_size
|
||||
|
||||
if sample_size == 0:
|
||||
percentile_map: Dict[float, float] = {
|
||||
pct: float("nan") for pct in percentiles}
|
||||
return MetricSummary(
|
||||
mean=float("nan"),
|
||||
std_dev=float("nan"),
|
||||
minimum=float("nan"),
|
||||
maximum=float("nan"),
|
||||
percentiles=percentile_map,
|
||||
sample_size=0,
|
||||
failed_runs=failed_runs,
|
||||
)
|
||||
|
||||
percentile_map = {
|
||||
pct: float(np.percentile(clean, pct)) for pct in percentiles
|
||||
}
|
||||
return MetricSummary(
|
||||
mean=float(np.mean(clean)),
|
||||
std_dev=float(np.std(clean, ddof=1)) if sample_size > 1 else 0.0,
|
||||
minimum=float(np.min(clean)),
|
||||
maximum=float(np.max(clean)),
|
||||
percentiles=percentile_map,
|
||||
sample_size=sample_size,
|
||||
failed_runs=failed_runs,
|
||||
)
|
||||
@@ -6,16 +6,21 @@ from typing import Callable, Sequence
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from config.database import SessionLocal
|
||||
from models import Role, Scenario
|
||||
from models import PricingSettings, Project, Role, Scenario
|
||||
from services.pricing import PricingMetadata
|
||||
from services.repositories import (
|
||||
FinancialInputRepository,
|
||||
PricingSettingsRepository,
|
||||
PricingSettingsSeedResult,
|
||||
ProjectRepository,
|
||||
RoleRepository,
|
||||
ScenarioRepository,
|
||||
SimulationParameterRepository,
|
||||
UserRepository,
|
||||
ensure_admin_user as ensure_admin_user_record,
|
||||
ensure_default_pricing_settings,
|
||||
ensure_default_roles,
|
||||
pricing_settings_to_metadata,
|
||||
)
|
||||
from services.scenario_validation import ScenarioComparisonValidator
|
||||
|
||||
@@ -33,6 +38,7 @@ class UnitOfWork(AbstractContextManager["UnitOfWork"]):
|
||||
self.simulation_parameters: SimulationParameterRepository | None = None
|
||||
self.users: UserRepository | None = None
|
||||
self.roles: RoleRepository | None = None
|
||||
self.pricing_settings: PricingSettingsRepository | None = None
|
||||
|
||||
def __enter__(self) -> "UnitOfWork":
|
||||
self.session = self._session_factory()
|
||||
@@ -43,6 +49,7 @@ class UnitOfWork(AbstractContextManager["UnitOfWork"]):
|
||||
self.session)
|
||||
self.users = UserRepository(self.session)
|
||||
self.roles = RoleRepository(self.session)
|
||||
self.pricing_settings = PricingSettingsRepository(self.session)
|
||||
self._scenario_validator = ScenarioComparisonValidator()
|
||||
return self
|
||||
|
||||
@@ -60,6 +67,7 @@ class UnitOfWork(AbstractContextManager["UnitOfWork"]):
|
||||
self.simulation_parameters = None
|
||||
self.users = None
|
||||
self.roles = None
|
||||
self.pricing_settings = None
|
||||
|
||||
def flush(self) -> None:
|
||||
if not self.session:
|
||||
@@ -116,3 +124,45 @@ class UnitOfWork(AbstractContextManager["UnitOfWork"]):
|
||||
username=username,
|
||||
password=password,
|
||||
)
|
||||
|
||||
def ensure_default_pricing_settings(
|
||||
self,
|
||||
*,
|
||||
metadata: PricingMetadata,
|
||||
slug: str = "default",
|
||||
name: str | None = None,
|
||||
description: str | None = None,
|
||||
) -> PricingSettingsSeedResult:
|
||||
if not self.pricing_settings:
|
||||
raise RuntimeError("UnitOfWork session is not initialised")
|
||||
return ensure_default_pricing_settings(
|
||||
self.pricing_settings,
|
||||
metadata=metadata,
|
||||
slug=slug,
|
||||
name=name,
|
||||
description=description,
|
||||
)
|
||||
|
||||
def get_pricing_metadata(
|
||||
self,
|
||||
*,
|
||||
slug: str = "default",
|
||||
) -> PricingMetadata | None:
|
||||
if not self.pricing_settings:
|
||||
raise RuntimeError("UnitOfWork session is not initialised")
|
||||
settings = self.pricing_settings.find_by_slug(
|
||||
slug,
|
||||
include_children=True,
|
||||
)
|
||||
if settings is None:
|
||||
return None
|
||||
return pricing_settings_to_metadata(settings)
|
||||
|
||||
def set_project_pricing_settings(
|
||||
self,
|
||||
project: Project,
|
||||
pricing_settings: PricingSettings | None,
|
||||
) -> Project:
|
||||
if not self.projects:
|
||||
raise RuntimeError("UnitOfWork session is not initialised")
|
||||
return self.projects.set_pricing_settings(project, pricing_settings)
|
||||
|
||||
Reference in New Issue
Block a user