Enhance UI rendering and add unit tests for simulation functionality

- Updated the `_render` function in `ui.py` to correctly pass the request object to `TemplateResponse`.
- Initialized `upcoming_maintenance` as a typed list in `_load_dashboard` for better type safety.
- Added new unit tests in `test_simulation.py` to cover triangular sampling and uniform distribution defaults.
- Implemented a test to ensure that running the simulation without parameters returns an empty result.
- Created a parameterized test in `test_ui_routes.py` to verify that additional UI routes render the correct templates and context.
This commit is contained in:
2025-10-21 09:26:39 +02:00
parent 9114b584c2
commit 139ae04538
6 changed files with 2319 additions and 8 deletions

View File

@@ -1,3 +1,5 @@
from math import isclose
from random import Random
from uuid import uuid4
import pytest
@@ -7,7 +9,7 @@ from sqlalchemy.orm import Session
from typing import Any, Dict, List
from models.simulation_result import SimulationResult
from services.simulation import run_simulation
from services.simulation import DEFAULT_UNIFORM_SPAN_RATIO, run_simulation
@pytest.fixture
@@ -97,6 +99,46 @@ def test_run_simulation_normal_std_dev_fallback():
assert all("result" in entry for entry in results)
def test_run_simulation_triangular_sampling_path():
params: List[Dict[str, Any]] = [
{"name": "tri", "value": 10.0, "distribution": "triangular"}
]
seed = 21
iterations = 4
results = run_simulation(params, iterations=iterations, seed=seed)
assert len(results) == iterations
span = 10.0 * DEFAULT_UNIFORM_SPAN_RATIO
rng = Random(seed)
expected_samples = [
rng.triangular(10.0 - span, 10.0 + span, 10.0) for _ in range(iterations)
]
actual_samples = [entry["result"] for entry in results]
for actual, expected in zip(actual_samples, expected_samples):
assert isclose(actual, expected, rel_tol=1e-9)
def test_run_simulation_uniform_defaults_apply_bounds():
params: List[Dict[str, Any]] = [
{"name": "uniform-auto", "value": 200.0, "distribution": "uniform"}
]
seed = 17
iterations = 3
results = run_simulation(params, iterations=iterations, seed=seed)
assert len(results) == iterations
span = 200.0 * DEFAULT_UNIFORM_SPAN_RATIO
rng = Random(seed)
expected_samples = [
rng.uniform(200.0 - span, 200.0 + span) for _ in range(iterations)
]
actual_samples = [entry["result"] for entry in results]
for actual, expected in zip(actual_samples, expected_samples):
assert isclose(actual, expected, rel_tol=1e-9)
def test_run_simulation_without_parameters_returns_empty():
assert run_simulation([], iterations=5) == []
def test_simulation_endpoint_no_params(client: TestClient):
scenario_payload: Dict[str, Any] = {
"name": f"NoParamScenario-{uuid4()}",

View File

@@ -1,5 +1,6 @@
from typing import Any, Dict, cast
import pytest
from fastapi.testclient import TestClient
from models.scenario import Scenario
@@ -98,3 +99,33 @@ def test_dashboard_data_endpoint_returns_aggregates(
activity_labels = payload["scenario_activity_chart"]["labels"]
activity_idx = activity_labels.index(scenario.name)
assert payload["scenario_activity_chart"]["production"][activity_idx] == 800.0
@pytest.mark.parametrize(
("path", "template_name"),
[
("/", "Dashboard.html"),
("/ui/parameters", "ParameterInput.html"),
("/ui/costs", "costs.html"),
("/ui/consumption", "consumption.html"),
("/ui/production", "production.html"),
("/ui/equipment", "equipment.html"),
("/ui/maintenance", "maintenance.html"),
("/ui/simulations", "simulations.html"),
],
)
def test_additional_ui_routes_render_templates(
api_client: TestClient,
seeded_ui_data: Dict[str, Any],
path: str,
template_name: str,
) -> None:
response = api_client.get(path)
assert response.status_code == 200
template = getattr(response, "template", None)
assert template is not None
assert template.name == template_name
context = cast(Dict[str, Any], getattr(response, "context", {}))
assert context