Enhance UI rendering and add unit tests for simulation functionality
- Updated the `_render` function in `ui.py` to correctly pass the request object to `TemplateResponse`. - Initialized `upcoming_maintenance` as a typed list in `_load_dashboard` for better type safety. - Added new unit tests in `test_simulation.py` to cover triangular sampling and uniform distribution defaults. - Implemented a test to ensure that running the simulation without parameters returns an empty result. - Created a parameterized test in `test_ui_routes.py` to verify that additional UI routes render the correct templates and context.
This commit is contained in:
@@ -113,12 +113,11 @@ pytest
|
||||
- The fixture now polls `[http://localhost:8001](http://localhost:8001)` until it responds (up to ~30s), ensuring the uvicorn subprocess is ready before Playwright starts navigation, then preloads `/` and waits for a `networkidle` state so sidebar navigation and global assets are ready for each test.
|
||||
- Latest run (`pytest tests/e2e/` on 2025-10-21) passes end-to-end smoke and form coverage after aligning form selectors, titles, and the live server startup behaviour.
|
||||
|
||||
### Coverage Snapshot (2025-10-20)
|
||||
### Coverage Snapshot (2025-10-21)
|
||||
|
||||
- `pytest --cov=. --cov-report=term-missing` reports **95%** overall coverage across the project.
|
||||
- Lower coverage hotspots to target next: `services/simulation.py` (79%), `middleware/validation.py` (78%), `routes/ui.py` (82%), and several API routers around lines 12-22 that create database sessions only.
|
||||
- Deprecation cleanup migrated routes to Pydantic v2 patterns (`model_config = ConfigDict(...)`, `model_dump()`) and updated SQLAlchemy's `declarative_base`; reran `pytest` to confirm the suite passes without warnings.
|
||||
- Coverage for route-heavy modules is primarily limited by error paths (e.g., bad request branches) that still need explicit tests.
|
||||
- `pytest --cov=. --cov-report=term-missing` reports **91%** overall coverage.
|
||||
- Recent additions pushed `routes/ui.py` and `services/simulation.py` to 100%; remaining gaps are concentrated in `config/database.py`, several `models/*.py` loaders, and `services/reporting.py` (95%).
|
||||
- Playwright specs under `tests/e2e/` are excluded from the coverage run to keep browser automation optional; their files show as uncovered because they are not executed in the `pytest --cov` workflow.
|
||||
|
||||
## Database Objects
|
||||
|
||||
|
||||
@@ -100,6 +100,8 @@ pytest tests/e2e/ --headed
|
||||
- Run with coverage: `pytest --cov --cov-report=term` for quick baselines (use `--cov-report=html` when visualizing hotspots).
|
||||
- Target 95%+ overall coverage. Focus on historically low modules: `services/simulation.py`, `services/reporting.py`, `middleware/validation.py`, and `routes/ui.py`.
|
||||
- Recent additions include unit tests that validate Monte Carlo parameter errors, reporting fallbacks, and JSON middleware rejection paths to guard against malformed inputs.
|
||||
- Latest snapshot (2025-10-21): `pytest --cov=. --cov-report=term-missing` returns **91%** overall coverage after achieving full coverage in `routes/ui.py` and `services/simulation.py`.
|
||||
- Archive coverage artifacts by running `pytest --cov=. --cov-report=xml:reports/coverage/coverage-2025-10-21.xml --cov-report=term-missing`; the generated XML lives under `reports/coverage/` for CI uploads or historical comparisons.
|
||||
|
||||
## CI Integration
|
||||
|
||||
|
||||
2236
reports/coverage/coverage-2025-10-21.xml
Normal file
2236
reports/coverage/coverage-2025-10-21.xml
Normal file
File diff suppressed because it is too large
Load Diff
@@ -40,7 +40,8 @@ def _render(
|
||||
template_name: str,
|
||||
extra: Optional[Dict[str, Any]] = None,
|
||||
):
|
||||
return templates.TemplateResponse(template_name, _context(request, extra))
|
||||
context = _context(request, extra)
|
||||
return templates.TemplateResponse(request, template_name, context)
|
||||
|
||||
|
||||
def _format_currency(value: float) -> str:
|
||||
@@ -436,7 +437,7 @@ def _load_dashboard(db: Session) -> Dict[str, Any]:
|
||||
recent_simulations.sort(key=lambda item: item["iterations"], reverse=True)
|
||||
recent_simulations = recent_simulations[:5]
|
||||
|
||||
upcoming_maintenance = []
|
||||
upcoming_maintenance: list[Dict[str, Any]] = []
|
||||
for record in (
|
||||
db.query(Maintenance)
|
||||
.order_by(Maintenance.maintenance_date.asc())
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
from math import isclose
|
||||
from random import Random
|
||||
from uuid import uuid4
|
||||
|
||||
import pytest
|
||||
@@ -7,7 +9,7 @@ from sqlalchemy.orm import Session
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from models.simulation_result import SimulationResult
|
||||
from services.simulation import run_simulation
|
||||
from services.simulation import DEFAULT_UNIFORM_SPAN_RATIO, run_simulation
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -97,6 +99,46 @@ def test_run_simulation_normal_std_dev_fallback():
|
||||
assert all("result" in entry for entry in results)
|
||||
|
||||
|
||||
def test_run_simulation_triangular_sampling_path():
|
||||
params: List[Dict[str, Any]] = [
|
||||
{"name": "tri", "value": 10.0, "distribution": "triangular"}
|
||||
]
|
||||
seed = 21
|
||||
iterations = 4
|
||||
results = run_simulation(params, iterations=iterations, seed=seed)
|
||||
assert len(results) == iterations
|
||||
span = 10.0 * DEFAULT_UNIFORM_SPAN_RATIO
|
||||
rng = Random(seed)
|
||||
expected_samples = [
|
||||
rng.triangular(10.0 - span, 10.0 + span, 10.0) for _ in range(iterations)
|
||||
]
|
||||
actual_samples = [entry["result"] for entry in results]
|
||||
for actual, expected in zip(actual_samples, expected_samples):
|
||||
assert isclose(actual, expected, rel_tol=1e-9)
|
||||
|
||||
|
||||
def test_run_simulation_uniform_defaults_apply_bounds():
|
||||
params: List[Dict[str, Any]] = [
|
||||
{"name": "uniform-auto", "value": 200.0, "distribution": "uniform"}
|
||||
]
|
||||
seed = 17
|
||||
iterations = 3
|
||||
results = run_simulation(params, iterations=iterations, seed=seed)
|
||||
assert len(results) == iterations
|
||||
span = 200.0 * DEFAULT_UNIFORM_SPAN_RATIO
|
||||
rng = Random(seed)
|
||||
expected_samples = [
|
||||
rng.uniform(200.0 - span, 200.0 + span) for _ in range(iterations)
|
||||
]
|
||||
actual_samples = [entry["result"] for entry in results]
|
||||
for actual, expected in zip(actual_samples, expected_samples):
|
||||
assert isclose(actual, expected, rel_tol=1e-9)
|
||||
|
||||
|
||||
def test_run_simulation_without_parameters_returns_empty():
|
||||
assert run_simulation([], iterations=5) == []
|
||||
|
||||
|
||||
def test_simulation_endpoint_no_params(client: TestClient):
|
||||
scenario_payload: Dict[str, Any] = {
|
||||
"name": f"NoParamScenario-{uuid4()}",
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from typing import Any, Dict, cast
|
||||
|
||||
import pytest
|
||||
from fastapi.testclient import TestClient
|
||||
|
||||
from models.scenario import Scenario
|
||||
@@ -98,3 +99,33 @@ def test_dashboard_data_endpoint_returns_aggregates(
|
||||
activity_labels = payload["scenario_activity_chart"]["labels"]
|
||||
activity_idx = activity_labels.index(scenario.name)
|
||||
assert payload["scenario_activity_chart"]["production"][activity_idx] == 800.0
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("path", "template_name"),
|
||||
[
|
||||
("/", "Dashboard.html"),
|
||||
("/ui/parameters", "ParameterInput.html"),
|
||||
("/ui/costs", "costs.html"),
|
||||
("/ui/consumption", "consumption.html"),
|
||||
("/ui/production", "production.html"),
|
||||
("/ui/equipment", "equipment.html"),
|
||||
("/ui/maintenance", "maintenance.html"),
|
||||
("/ui/simulations", "simulations.html"),
|
||||
],
|
||||
)
|
||||
def test_additional_ui_routes_render_templates(
|
||||
api_client: TestClient,
|
||||
seeded_ui_data: Dict[str, Any],
|
||||
path: str,
|
||||
template_name: str,
|
||||
) -> None:
|
||||
response = api_client.get(path)
|
||||
assert response.status_code == 200
|
||||
|
||||
template = getattr(response, "template", None)
|
||||
assert template is not None
|
||||
assert template.name == template_name
|
||||
|
||||
context = cast(Dict[str, Any], getattr(response, "context", {}))
|
||||
assert context
|
||||
|
||||
Reference in New Issue
Block a user