feat: Enhance project and scenario creation with monitoring metrics
Some checks failed
CI / lint (push) Failing after 1m14s
CI / test (push) Has been skipped
CI / build (push) Has been skipped

- Added monitoring metrics for project creation success and error handling in `ProjectRepository`.
- Implemented similar monitoring for scenario creation in `ScenarioRepository`.
- Refactored `run_monte_carlo` function in `simulation.py` to include timing and success/error metrics.
- Introduced new CSS styles for headers, alerts, and navigation buttons in `main.css` and `projects.css`.
- Created a new JavaScript file for navigation logic to handle chevron buttons.
- Updated HTML templates to include new navigation buttons and improved styling for buttons.
- Added tests for reporting service and routes to ensure proper functionality and access control.
- Removed unused imports and optimized existing test files for better clarity and performance.
This commit is contained in:
2025-11-12 10:36:24 +01:00
parent f68321cd04
commit ce9c174b53
61 changed files with 2124 additions and 308 deletions

96
services/metrics.py Normal file
View File

@@ -0,0 +1,96 @@
from __future__ import annotations
import json
from datetime import datetime
from typing import Any, Dict, Optional
from sqlalchemy.orm import Session
from config.database import get_db
from models.performance_metric import PerformanceMetric
class MetricsService:
def __init__(self, db: Session):
self.db = db
def store_metric(
self,
metric_name: str,
value: float,
labels: Optional[Dict[str, Any]] = None,
endpoint: Optional[str] = None,
method: Optional[str] = None,
status_code: Optional[int] = None,
duration_seconds: Optional[float] = None,
) -> PerformanceMetric:
"""Store a performance metric in the database."""
metric = PerformanceMetric(
timestamp=datetime.utcnow(),
metric_name=metric_name,
value=value,
labels=json.dumps(labels) if labels else None,
endpoint=endpoint,
method=method,
status_code=status_code,
duration_seconds=duration_seconds,
)
self.db.add(metric)
self.db.commit()
self.db.refresh(metric)
return metric
def get_metrics(
self,
metric_name: Optional[str] = None,
start_time: Optional[datetime] = None,
end_time: Optional[datetime] = None,
limit: int = 100,
) -> list[PerformanceMetric]:
"""Retrieve stored metrics with optional filtering."""
query = self.db.query(PerformanceMetric)
if metric_name:
query = query.filter(PerformanceMetric.metric_name == metric_name)
if start_time:
query = query.filter(PerformanceMetric.timestamp >= start_time)
if end_time:
query = query.filter(PerformanceMetric.timestamp <= end_time)
return query.order_by(PerformanceMetric.timestamp.desc()).limit(limit).all()
def get_aggregated_metrics(
self,
metric_name: str,
start_time: Optional[datetime] = None,
end_time: Optional[datetime] = None,
) -> Dict[str, Any]:
"""Get aggregated statistics for a metric."""
query = self.db.query(PerformanceMetric).filter(
PerformanceMetric.metric_name == metric_name
)
if start_time:
query = query.filter(PerformanceMetric.timestamp >= start_time)
if end_time:
query = query.filter(PerformanceMetric.timestamp <= end_time)
metrics = query.all()
if not metrics:
return {"count": 0, "avg": 0, "min": 0, "max": 0}
values = [m.value for m in metrics]
return {
"count": len(values),
"avg": sum(values) / len(values),
"min": min(values),
"max": max(values),
}
def get_metrics_service(db: Session) -> MetricsService:
return MetricsService(db)

View File

@@ -5,7 +5,10 @@ from __future__ import annotations
from dataclasses import dataclass, field
from datetime import date
import math
from typing import Iterable, Mapping, Sequence
from typing import Mapping, Sequence
from urllib.parse import urlencode
from fastapi import Request
from models import FinancialCategory, Project, Scenario
from services.financial import (
@@ -177,13 +180,13 @@ class ScenarioReport:
"project_id": self.scenario.project_id,
"name": self.scenario.name,
"description": self.scenario.description,
"status": self.scenario.status.value,
"status": self.scenario.status.value if hasattr(self.scenario.status, 'value') else self.scenario.status,
"start_date": self.scenario.start_date,
"end_date": self.scenario.end_date,
"currency": self.scenario.currency,
"primary_resource": self.scenario.primary_resource.value
if self.scenario.primary_resource
else None,
if self.scenario.primary_resource and hasattr(self.scenario.primary_resource, 'value')
else self.scenario.primary_resource,
"discount_rate": _round_optional(self.deterministic.discount_rate, digits=4),
"created_at": self.scenario.created_at,
"updated_at": self.scenario.updated_at,
@@ -374,13 +377,12 @@ class ReportingService:
}
def _load_scenarios(self, project_id: int, filters: ReportFilters) -> list[Scenario]:
repo = self._require_scenario_repo()
scenarios = repo.list_for_project(project_id, with_children=True)
scenarios = self._uow.scenarios.list_for_project(
project_id, with_children=True)
return [scenario for scenario in scenarios if filters.matches(scenario)]
def _reload_scenario(self, scenario_id: int) -> Scenario:
repo = self._require_scenario_repo()
return repo.get(scenario_id, with_children=True)
return self._uow.scenarios.get(scenario_id, with_children=True)
def _build_scenario_report(
self,
@@ -469,10 +471,147 @@ class ReportingService:
)
return comparisons
def _require_scenario_repo(self):
if not self._uow.scenarios:
raise RuntimeError("Scenario repository not initialised")
return self._uow.scenarios
def build_project_summary_context(
self,
project: Project,
filters: ReportFilters,
include: IncludeOptions,
iterations: int,
percentiles: tuple[float, ...],
request: Request,
) -> dict[str, object]:
"""Build template context for project summary page."""
scenarios = self._load_scenarios(project.id, filters)
reports = [
self._build_scenario_report(
scenario,
include_distribution=include.distribution,
include_samples=include.samples,
iterations=iterations,
percentiles=percentiles,
)
for scenario in scenarios
]
aggregates = self._aggregate_project(reports)
return {
"request": request,
"project": _project_payload(project),
"scenario_count": len(reports),
"aggregates": aggregates.to_dict(),
"scenarios": [report.to_dict() for report in reports],
"filters": filters.to_dict(),
"include_options": include,
"iterations": iterations,
"percentiles": percentiles,
"title": f"Project Summary · {project.name}",
"subtitle": "Aggregated financial and simulation insights across scenarios.",
"actions": [
{
"href": request.url_for(
"reports.project_summary",
project_id=project.id,
),
"label": "Download JSON",
}
],
}
def build_scenario_comparison_context(
self,
project: Project,
scenarios: Sequence[Scenario],
include: IncludeOptions,
iterations: int,
percentiles: tuple[float, ...],
request: Request,
) -> dict[str, object]:
"""Build template context for scenario comparison page."""
reports = [
self._build_scenario_report(
self._reload_scenario(scenario.id),
include_distribution=include.distribution,
include_samples=include.samples,
iterations=iterations,
percentiles=percentiles,
)
for scenario in scenarios
]
comparison = {
metric: data.to_dict()
for metric, data in self._build_comparisons(reports).items()
}
comparison_json_url = request.url_for(
"reports.project_scenario_comparison",
project_id=project.id,
)
scenario_ids = [str(s.id) for s in scenarios]
comparison_query = urlencode(
[("scenario_ids", str(identifier)) for identifier in scenario_ids]
)
if comparison_query:
comparison_json_url = f"{comparison_json_url}?{comparison_query}"
return {
"request": request,
"project": _project_payload(project),
"scenarios": [report.to_dict() for report in reports],
"comparison": comparison,
"include_options": include,
"iterations": iterations,
"percentiles": percentiles,
"title": f"Scenario Comparison · {project.name}",
"subtitle": "Evaluate deterministic metrics and Monte Carlo trends side by side.",
"actions": [
{
"href": comparison_json_url,
"label": "Download JSON",
}
],
}
def build_scenario_distribution_context(
self,
scenario: Scenario,
include: IncludeOptions,
iterations: int,
percentiles: tuple[float, ...],
request: Request,
) -> dict[str, object]:
"""Build template context for scenario distribution page."""
report = self._build_scenario_report(
self._reload_scenario(scenario.id),
include_distribution=True,
include_samples=include.samples,
iterations=iterations,
percentiles=percentiles,
)
return {
"request": request,
"scenario": report.to_dict()["scenario"],
"summary": report.totals.to_dict(),
"metrics": report.deterministic.to_dict(),
"monte_carlo": (
report.monte_carlo.to_dict() if report.monte_carlo else {
"available": False}
),
"include_options": include,
"iterations": iterations,
"percentiles": percentiles,
"title": f"Scenario Distribution · {scenario.name}",
"subtitle": "Deterministic and simulated distributions for a single scenario.",
"actions": [
{
"href": request.url_for(
"reports.scenario_distribution",
scenario_id=scenario.id,
),
"label": "Download JSON",
}
],
}
def _build_cash_flows(scenario: Scenario) -> tuple[list[CashFlow], ScenarioFinancialTotals]:

View File

@@ -15,7 +15,6 @@ from models import (
PricingImpuritySettings,
PricingMetalSettings,
PricingSettings,
ResourceType,
Role,
Scenario,
ScenarioStatus,
@@ -88,8 +87,12 @@ class ProjectRepository:
try:
self.session.flush()
except IntegrityError as exc: # pragma: no cover - reliance on DB constraints
from monitoring.metrics import observe_project_operation
observe_project_operation("create", "error")
raise EntityConflictError(
"Project violates uniqueness constraints") from exc
from monitoring.metrics import observe_project_operation
observe_project_operation("create", "success")
return project
def find_by_names(self, names: Iterable[str]) -> Mapping[str, Project]:
@@ -251,7 +254,11 @@ class ScenarioRepository:
try:
self.session.flush()
except IntegrityError as exc: # pragma: no cover
from monitoring.metrics import observe_scenario_operation
observe_scenario_operation("create", "error")
raise EntityConflictError("Scenario violates constraints") from exc
from monitoring.metrics import observe_scenario_operation
observe_scenario_operation("create", "success")
return scenario
def find_by_project_and_names(

View File

@@ -3,7 +3,7 @@ from __future__ import annotations
"""Scenario evaluation services including pricing integration."""
from dataclasses import dataclass
from typing import Iterable, Mapping
from typing import Iterable
from models.scenario import Scenario
from services.pricing import (

View File

@@ -2,7 +2,8 @@ from __future__ import annotations
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Iterable, Mapping, Sequence
from typing import Any, Dict, Mapping, Sequence
import time
import numpy as np
from numpy.random import Generator, default_rng
@@ -15,6 +16,7 @@ from .financial import (
net_present_value,
payback_period,
)
from monitoring.metrics import observe_simulation
class DistributionConfigError(ValueError):
@@ -120,60 +122,79 @@ def run_monte_carlo(
if pct < 0.0 or pct > 100.0:
raise ValueError("percentiles must be within [0, 100]")
generator = rng or default_rng(config.seed)
start_time = time.time()
try:
generator = rng or default_rng(config.seed)
metric_arrays: Dict[SimulationMetric, np.ndarray] = {
metric: np.empty(config.iterations, dtype=float)
for metric in config.metrics
}
metric_arrays: Dict[SimulationMetric, np.ndarray] = {
metric: np.empty(config.iterations, dtype=float)
for metric in config.metrics
}
for idx in range(config.iterations):
iteration_flows = [
_realise_cash_flow(
spec,
generator,
scenario_context=scenario_context,
metadata=metadata,
)
for spec in cash_flows
]
for idx in range(config.iterations):
iteration_flows = [
_realise_cash_flow(
spec,
generator,
scenario_context=scenario_context,
metadata=metadata,
)
for spec in cash_flows
]
if SimulationMetric.NPV in metric_arrays:
metric_arrays[SimulationMetric.NPV][idx] = net_present_value(
config.discount_rate,
iteration_flows,
residual_value=config.residual_value,
residual_periods=config.residual_periods,
compounds_per_year=config.compounds_per_year,
)
if SimulationMetric.IRR in metric_arrays:
try:
metric_arrays[SimulationMetric.IRR][idx] = internal_rate_of_return(
if SimulationMetric.NPV in metric_arrays:
metric_arrays[SimulationMetric.NPV][idx] = net_present_value(
config.discount_rate,
iteration_flows,
residual_value=config.residual_value,
residual_periods=config.residual_periods,
compounds_per_year=config.compounds_per_year,
)
except (ValueError, ConvergenceError):
metric_arrays[SimulationMetric.IRR][idx] = np.nan
if SimulationMetric.PAYBACK in metric_arrays:
try:
metric_arrays[SimulationMetric.PAYBACK][idx] = payback_period(
iteration_flows,
compounds_per_year=config.compounds_per_year,
)
except (ValueError, PaybackNotReachedError):
metric_arrays[SimulationMetric.PAYBACK][idx] = np.nan
if SimulationMetric.IRR in metric_arrays:
try:
metric_arrays[SimulationMetric.IRR][idx] = internal_rate_of_return(
iteration_flows,
compounds_per_year=config.compounds_per_year,
)
except (ValueError, ConvergenceError):
metric_arrays[SimulationMetric.IRR][idx] = np.nan
if SimulationMetric.PAYBACK in metric_arrays:
try:
metric_arrays[SimulationMetric.PAYBACK][idx] = payback_period(
iteration_flows,
compounds_per_year=config.compounds_per_year,
)
except (ValueError, PaybackNotReachedError):
metric_arrays[SimulationMetric.PAYBACK][idx] = np.nan
summaries = {
metric: _summarise(metric_arrays[metric], config.percentiles)
for metric in metric_arrays
}
summaries = {
metric: _summarise(metric_arrays[metric], config.percentiles)
for metric in metric_arrays
}
samples = metric_arrays if config.return_samples else None
return SimulationResult(
iterations=config.iterations,
summaries=summaries,
samples=samples,
)
samples = metric_arrays if config.return_samples else None
result = SimulationResult(
iterations=config.iterations,
summaries=summaries,
samples=samples,
)
# Record successful simulation
duration = time.time() - start_time
observe_simulation(
status="success",
duration_seconds=duration,
)
return result
except Exception as e:
# Record failed simulation
duration = time.time() - start_time
observe_simulation(
status="error",
duration_seconds=duration,
)
raise
def _realise_cash_flow(