feat: Add Processing Opex functionality

- Introduced OpexValidationError for handling validation errors in processing opex calculations.
- Implemented ProjectProcessingOpexRepository and ScenarioProcessingOpexRepository for managing project and scenario-level processing opex snapshots.
- Enhanced UnitOfWork to include repositories for processing opex.
- Updated sidebar navigation and scenario detail templates to include links to the new Processing Opex Planner.
- Created a new template for the Processing Opex Planner with form handling for input components and parameters.
- Developed integration tests for processing opex calculations, covering HTML and JSON flows, including validation for currency mismatches and unsupported frequencies.
- Added unit tests for the calculation logic, ensuring correct handling of various scenarios and edge cases.
This commit is contained in:
2025-11-13 09:26:57 +01:00
parent 1240b08740
commit 1feae7ff85
16 changed files with 1931 additions and 11 deletions

View File

@@ -0,0 +1,310 @@
from __future__ import annotations
from collections.abc import Callable
import pytest
from fastapi.testclient import TestClient
from services.unit_of_work import UnitOfWork
def _create_project(client: TestClient, name: str) -> int:
response = client.post(
"/projects",
json={
"name": name,
"location": "Nevada",
"operation_type": "open_pit",
"description": "Project for processing opex testing",
},
)
assert response.status_code == 201
return response.json()["id"]
def _create_scenario(client: TestClient, project_id: int, name: str) -> int:
response = client.post(
f"/projects/{project_id}/scenarios",
json={
"name": name,
"description": "Processing opex scenario",
"status": "draft",
"currency": "usd",
"primary_resource": "diesel",
},
)
assert response.status_code == 201
return response.json()["id"]
def test_processing_opex_calculation_html_flow(
client: TestClient,
unit_of_work_factory: Callable[[], UnitOfWork],
) -> None:
project_id = _create_project(client, "Opex HTML Project")
scenario_id = _create_scenario(client, project_id, "Opex HTML Scenario")
form_page = client.get(
f"/calculations/processing-opex?project_id={project_id}&scenario_id={scenario_id}"
)
assert form_page.status_code == 200
assert "Processing Opex Planner" in form_page.text
response = client.post(
f"/calculations/processing-opex?project_id={project_id}&scenario_id={scenario_id}",
data={
"components[0][name]": "Power",
"components[0][category]": "energy",
"components[0][unit_cost]": "1000",
"components[0][quantity]": "1",
"components[0][frequency]": "monthly",
"components[0][currency]": "USD",
"components[0][period_start]": "1",
"components[0][period_end]": "3",
"components[1][name]": "Maintenance",
"components[1][category]": "maintenance",
"components[1][unit_cost]": "2500",
"components[1][quantity]": "1",
"components[1][frequency]": "quarterly",
"components[1][currency]": "USD",
"components[1][period_start]": "1",
"components[1][period_end]": "2",
"parameters[currency_code]": "USD",
"parameters[escalation_pct]": "5",
"parameters[discount_rate_pct]": "3",
"parameters[evaluation_horizon_years]": "3",
"parameters[apply_escalation]": "1",
"options[persist]": "1",
"options[snapshot_notes]": "Processing opex HTML flow",
},
)
assert response.status_code == 200
assert "Processing opex calculation completed successfully." in response.text
assert "Opex Summary" in response.text
assert "$22,000.00" in response.text or "22,000" in response.text
with unit_of_work_factory() as uow:
assert uow.project_processing_opex is not None
assert uow.scenario_processing_opex is not None
project_snapshots = uow.project_processing_opex.list_for_project(
project_id)
scenario_snapshots = uow.scenario_processing_opex.list_for_scenario(
scenario_id)
assert len(project_snapshots) == 1
assert len(scenario_snapshots) == 1
project_snapshot = project_snapshots[0]
scenario_snapshot = scenario_snapshots[0]
assert project_snapshot.overall_annual is not None
assert float(
project_snapshot.overall_annual) == pytest.approx(22_000.0)
assert project_snapshot.escalated_total is not None
assert float(
project_snapshot.escalated_total) == pytest.approx(58_330.0)
assert project_snapshot.apply_escalation is True
assert project_snapshot.component_count == 2
assert project_snapshot.currency_code == "USD"
assert scenario_snapshot.overall_annual is not None
assert float(
scenario_snapshot.overall_annual) == pytest.approx(22_000.0)
assert scenario_snapshot.escalated_total is not None
assert float(
scenario_snapshot.escalated_total) == pytest.approx(58_330.0)
assert scenario_snapshot.apply_escalation is True
assert scenario_snapshot.component_count == 2
assert scenario_snapshot.currency_code == "USD"
def test_processing_opex_calculation_json_flow(
client: TestClient,
unit_of_work_factory: Callable[[], UnitOfWork],
) -> None:
project_id = _create_project(client, "Opex JSON Project")
scenario_id = _create_scenario(client, project_id, "Opex JSON Scenario")
payload = {
"components": [
{
"name": "Reagents",
"category": "materials",
"unit_cost": 400,
"quantity": 10,
"frequency": "monthly",
"currency": "USD",
"period_start": 1,
"period_end": 3,
},
{
"name": "Labor",
"category": "labor",
"unit_cost": 1500,
"quantity": 4,
"frequency": "weekly",
"currency": "USD",
"period_start": 1,
"period_end": 3,
},
{
"name": "Maintenance",
"category": "maintenance",
"unit_cost": 12000,
"quantity": 1,
"frequency": "annually",
"currency": "USD",
"period_start": 1,
"period_end": 3,
},
],
"parameters": {
"currency_code": "USD",
"escalation_pct": 4,
"discount_rate_pct": 2,
"evaluation_horizon_years": 3,
"apply_escalation": True,
},
"options": {"persist": True, "snapshot_notes": "Processing opex JSON flow"},
}
response = client.post(
f"/calculations/processing-opex?project_id={project_id}&scenario_id={scenario_id}",
json=payload,
)
assert response.status_code == 200
data = response.json()
assert data["currency"] == "USD"
expected_overall = 372_000.0
escalation_factor = 1 + (payload["parameters"]["escalation_pct"] / 100.0)
expected_timeline = [
expected_overall * (escalation_factor ** i) for i in range(payload["parameters"]["evaluation_horizon_years"])
]
expected_escalated_total = sum(expected_timeline)
expected_average = expected_escalated_total / len(expected_timeline)
assert data["totals"]["overall_annual"] == pytest.approx(expected_overall)
assert data["totals"]["escalated_total"] == pytest.approx(
expected_escalated_total)
assert data["totals"]["escalation_pct"] == pytest.approx(4.0)
by_category = {entry["category"] : entry for entry in data["totals"]["by_category"]}
assert by_category["materials"]["annual_cost"] == pytest.approx(48_000.0)
assert by_category["labor"]["annual_cost"] == pytest.approx(312_000.0)
assert by_category["maintenance"]["annual_cost"] == pytest.approx(12_000.0)
assert len(data["timeline"]) == 3
for index, entry in enumerate(data["timeline"], start=0):
assert entry["period"] == index + 1
assert entry["base_cost"] == pytest.approx(expected_overall)
assert entry["escalated_cost"] == pytest.approx(
expected_timeline[index])
assert data["metrics"]["annual_average"] == pytest.approx(expected_average)
with unit_of_work_factory() as uow:
assert uow.project_processing_opex is not None
assert uow.scenario_processing_opex is not None
project_snapshot = uow.project_processing_opex.latest_for_project(
project_id)
scenario_snapshot = uow.scenario_processing_opex.latest_for_scenario(
scenario_id)
assert project_snapshot is not None
assert scenario_snapshot is not None
assert project_snapshot.overall_annual is not None
assert float(project_snapshot.overall_annual) == pytest.approx(
expected_overall)
assert project_snapshot.escalated_total is not None
assert float(project_snapshot.escalated_total) == pytest.approx(
expected_escalated_total)
assert project_snapshot.apply_escalation is True
assert scenario_snapshot.annual_average is not None
assert float(scenario_snapshot.annual_average) == pytest.approx(
expected_average)
assert scenario_snapshot.apply_escalation is True
@pytest.mark.parametrize("content_type", ["form", "json"])
def test_processing_opex_calculation_currency_mismatch(
client: TestClient,
unit_of_work_factory: Callable[[], UnitOfWork],
content_type: str,
) -> None:
project_id = _create_project(
client, f"Opex {content_type.title()} Error Project")
scenario_id = _create_scenario(
client, project_id, f"Opex {content_type.title()} Error Scenario")
if content_type == "json":
payload = {
"components": [
{
"name": "Power",
"category": "energy",
"unit_cost": 500,
"quantity": 1,
"frequency": "monthly",
"currency": "USD",
"period_start": 1,
"period_end": 2,
}
],
"parameters": {"currency_code": "CAD"},
"options": {"persist": True},
}
response = client.post(
f"/calculations/processing-opex?project_id={project_id}&scenario_id={scenario_id}",
json=payload,
)
assert response.status_code == 422
body = response.json()
assert "Component currency does not match" in body.get("message", "")
assert any(
"components[0].currency" in entry for entry in body.get("errors", []))
else:
response = client.post(
f"/calculations/processing-opex?project_id={project_id}&scenario_id={scenario_id}",
data={
"components[0][name]": "Power",
"components[0][category]": "energy",
"components[0][unit_cost]": "500",
"components[0][quantity]": "1",
"components[0][frequency]": "monthly",
"components[0][currency]": "USD",
"components[0][period_start]": "1",
"components[0][period_end]": "2",
"parameters[currency_code]": "CAD",
"options[persist]": "1",
},
)
assert response.status_code == 422
assert hasattr(response, "context")
context = getattr(response, "context", {}) or {}
combined_errors = [
str(entry)
for entry in (
(context.get("errors") or [])
+ (context.get("component_errors") or [])
)
]
assert any(
"components[0].currency" in entry for entry in combined_errors)
with unit_of_work_factory() as uow:
assert uow.project_processing_opex is not None
assert uow.scenario_processing_opex is not None
project_snapshots = uow.project_processing_opex.list_for_project(
project_id)
scenario_snapshots = uow.scenario_processing_opex.list_for_scenario(
scenario_id)
assert project_snapshots == []
assert scenario_snapshots == []

View File

@@ -0,0 +1,159 @@
import pytest
from schemas.calculations import (
ProcessingOpexCalculationRequest,
ProcessingOpexComponentInput,
ProcessingOpexOptions,
ProcessingOpexParameters,
)
from services.calculations import calculate_processing_opex
from services.exceptions import OpexValidationError
def _component(**overrides) -> ProcessingOpexComponentInput:
defaults = {
"id": None,
"name": "Component",
"category": "energy",
"unit_cost": 1000.0,
"quantity": 1.0,
"frequency": "monthly",
"currency": "USD",
"period_start": 1,
"period_end": 1,
"notes": None,
}
defaults.update(overrides)
return ProcessingOpexComponentInput(**defaults)
def test_calculate_processing_opex_success():
request = ProcessingOpexCalculationRequest(
components=[
_component(
name="Power",
category="energy",
unit_cost=1000.0,
quantity=1,
frequency="monthly",
period_start=1,
period_end=3,
),
_component(
name="Maintenance",
category="maintenance",
unit_cost=2500.0,
quantity=1,
frequency="quarterly",
period_start=1,
period_end=2,
),
],
parameters=ProcessingOpexParameters(
currency_code="USD",
escalation_pct=5,
discount_rate_pct=None,
evaluation_horizon_years=2,
apply_escalation=True,
),
options=ProcessingOpexOptions(persist=True, snapshot_notes=None),
)
result = calculate_processing_opex(request)
assert result.currency == "USD"
assert result.options.persist is True
assert result.totals.overall_annual == pytest.approx(22_000.0)
assert result.totals.escalated_total == pytest.approx(58_330.0, rel=1e-4)
assert result.totals.escalation_pct == pytest.approx(5.0)
categories = {entry.category: entry for entry in result.totals.by_category}
assert categories["energy"].annual_cost == pytest.approx(12_000.0)
assert categories["maintenance"].annual_cost == pytest.approx(10_000.0)
assert len(result.timeline) == 3
timeline = {entry.period: entry for entry in result.timeline}
assert timeline[1].base_cost == pytest.approx(22_000.0)
assert timeline[2].base_cost == pytest.approx(22_000.0)
assert timeline[3].base_cost == pytest.approx(12_000.0)
assert timeline[1].escalated_cost == pytest.approx(22_000.0)
assert timeline[2].escalated_cost == pytest.approx(23_100.0, rel=1e-4)
assert timeline[3].escalated_cost == pytest.approx(13_230.0, rel=1e-4)
assert result.metrics.annual_average == pytest.approx(
19_443.3333, rel=1e-4)
assert len(result.components) == 2
assert result.components[0].frequency == "monthly"
assert result.components[1].frequency == "quarterly"
def test_calculate_processing_opex_currency_mismatch():
request = ProcessingOpexCalculationRequest(
components=[_component(currency="USD")],
parameters=ProcessingOpexParameters(
currency_code="CAD",
escalation_pct=None,
discount_rate_pct=None,
evaluation_horizon_years=10,
),
)
with pytest.raises(OpexValidationError) as exc:
calculate_processing_opex(request)
assert "Component currency does not match" in exc.value.message
assert exc.value.field_errors and "components[0].currency" in exc.value.field_errors[0]
def test_calculate_processing_opex_unsupported_frequency():
request = ProcessingOpexCalculationRequest(
components=[_component(frequency="biweekly")],
parameters=ProcessingOpexParameters(
currency_code="USD",
escalation_pct=None,
discount_rate_pct=None,
evaluation_horizon_years=2,
),
)
with pytest.raises(OpexValidationError) as exc:
calculate_processing_opex(request)
assert "Unsupported frequency" in exc.value.message
assert exc.value.field_errors and "components[0].frequency" in exc.value.field_errors[0]
def test_calculate_processing_opex_requires_components():
request = ProcessingOpexCalculationRequest(components=[])
with pytest.raises(OpexValidationError) as exc:
calculate_processing_opex(request)
assert "At least one processing opex component" in exc.value.message
assert exc.value.field_errors and "components" in exc.value.field_errors[0]
def test_calculate_processing_opex_extends_evaluation_horizon():
request = ProcessingOpexCalculationRequest(
components=[
_component(period_start=1, period_end=4),
],
parameters=ProcessingOpexParameters(
currency_code="USD",
discount_rate_pct=0,
escalation_pct=0,
evaluation_horizon_years=2,
apply_escalation=False,
),
)
result = calculate_processing_opex(request)
assert len(result.timeline) == 4
assert result.timeline[-1].period == 4
assert all(entry.escalated_cost is None for entry in result.timeline)
assert result.timeline[-1].base_cost == pytest.approx(12_000.0)
assert result.metrics.annual_average == pytest.approx(
12_000.0, rel=1e-4)