feat: Enhance currency handling and validation across scenarios

- Updated form template to prefill currency input with default value and added help text for clarity.
- Modified integration tests to assert more descriptive error messages for invalid currency codes.
- Introduced new tests for currency normalization and validation in various scenarios, including imports and exports.
- Added comprehensive tests for pricing calculations, ensuring defaults are respected and overrides function correctly.
- Implemented unit tests for pricing settings repository, ensuring CRUD operations and default settings are handled properly.
- Enhanced scenario pricing evaluation tests to validate currency handling and metadata defaults.
- Added simulation tests to ensure Monte Carlo runs are accurate and handle various distribution scenarios.
This commit is contained in:
2025-11-11 18:29:59 +01:00
parent 032e6d2681
commit 795a9f99f4
50 changed files with 5110 additions and 81 deletions

158
tests/test_simulation.py Normal file
View File

@@ -0,0 +1,158 @@
from __future__ import annotations
import math
import numpy as np
import pytest
from services.financial import CashFlow, net_present_value
from services.simulation import (
CashFlowSpec,
DistributionConfigError,
DistributionSource,
DistributionSpec,
DistributionType,
SimulationConfig,
SimulationMetric,
run_monte_carlo,
)
def test_run_monte_carlo_deterministic_matches_financial_helpers() -> None:
base_flows = [
CashFlow(amount=-1000.0, period_index=0),
CashFlow(amount=600.0, period_index=1),
CashFlow(amount=600.0, period_index=2),
]
specs = [CashFlowSpec(cash_flow=flow) for flow in base_flows]
config = SimulationConfig(
iterations=10,
discount_rate=0.1,
percentiles=(50,),
seed=123,
)
result = run_monte_carlo(specs, config)
summary = result.summaries[SimulationMetric.NPV]
expected = net_present_value(0.1, base_flows)
assert summary.sample_size == config.iterations
assert summary.failed_runs == 0
assert summary.mean == pytest.approx(expected, rel=1e-6)
assert summary.std_dev == 0.0
assert summary.percentiles[50] == pytest.approx(expected, rel=1e-6)
def test_run_monte_carlo_normal_distribution_uses_seed_for_reproducibility() -> None:
base_flows = [
CashFlow(amount=-100.0, period_index=0),
CashFlow(amount=0.0, period_index=1),
CashFlow(amount=0.0, period_index=2),
]
revenue_flow = CashFlowSpec(
cash_flow=CashFlow(amount=120.0, period_index=1),
distribution=DistributionSpec(
type=DistributionType.NORMAL,
parameters={"mean": 120.0, "std_dev": 10.0},
),
)
specs = [CashFlowSpec(cash_flow=base_flows[0]), revenue_flow]
config = SimulationConfig(
iterations=1000,
discount_rate=0.0,
percentiles=(5.0, 50.0, 95.0),
seed=42,
)
result = run_monte_carlo(specs, config)
summary = result.summaries[SimulationMetric.NPV]
assert summary.sample_size == config.iterations
assert summary.failed_runs == 0
# With zero discount rate the expected mean NPV equals mean sampled value minus investment.
assert summary.mean == pytest.approx(20.0, abs=1.0)
assert summary.std_dev == pytest.approx(10.0, abs=1.0)
assert summary.percentiles[50.0] == pytest.approx(summary.mean, abs=1.0)
def test_run_monte_carlo_supports_scenario_field_source() -> None:
base_flow = CashFlow(amount=0.0, period_index=1)
spec = CashFlowSpec(
cash_flow=base_flow,
distribution=DistributionSpec(
type=DistributionType.NORMAL,
parameters={"std_dev": 0.0},
source=DistributionSource.SCENARIO_FIELD,
source_key="salvage_mean",
),
)
config = SimulationConfig(iterations=1, discount_rate=0.0, seed=7)
result = run_monte_carlo(
[CashFlowSpec(cash_flow=CashFlow(
amount=-100.0, period_index=0)), spec],
config,
scenario_context={"salvage_mean": 150.0},
)
summary = result.summaries[SimulationMetric.NPV]
assert summary.sample_size == 1
assert summary.mean == pytest.approx(50.0)
def test_run_monte_carlo_records_failed_metrics_when_not_defined() -> None:
base_flows = [CashFlow(amount=100.0, period_index=0)]
specs = [CashFlowSpec(cash_flow=flow) for flow in base_flows]
config = SimulationConfig(
iterations=5,
discount_rate=0.1,
metrics=(SimulationMetric.IRR,),
seed=5,
)
result = run_monte_carlo(specs, config)
summary = result.summaries[SimulationMetric.IRR]
assert summary.sample_size == 0
assert summary.failed_runs == config.iterations
assert math.isnan(summary.mean)
def test_run_monte_carlo_distribution_missing_context_raises() -> None:
spec = DistributionSpec(
type=DistributionType.NORMAL,
parameters={"std_dev": 1.0},
source=DistributionSource.SCENARIO_FIELD,
source_key="unknown",
)
cash_flow_spec = CashFlowSpec(
cash_flow=CashFlow(amount=0.0, period_index=0),
distribution=spec,
)
config = SimulationConfig(iterations=1, discount_rate=0.0)
with pytest.raises(DistributionConfigError):
run_monte_carlo([cash_flow_spec], config, scenario_context={})
def test_run_monte_carlo_can_return_samples() -> None:
base_flow = CashFlow(amount=50.0, period_index=1)
specs = [
CashFlowSpec(cash_flow=CashFlow(amount=-40.0, period_index=0)),
CashFlowSpec(cash_flow=base_flow),
]
config = SimulationConfig(
iterations=3,
discount_rate=0.0,
metrics=(SimulationMetric.NPV,),
return_samples=True,
seed=11,
)
result = run_monte_carlo(specs, config)
assert result.samples is not None
assert SimulationMetric.NPV in result.samples
samples = result.samples[SimulationMetric.NPV]
assert isinstance(samples, np.ndarray)
assert samples.shape == (config.iterations,)