Files
calminer/tests/unit/test_reporting.py
zwitschi 434be86b76 feat: Enhance dashboard metrics and summary statistics
- Added new summary fields: variance, 5th percentile, 95th percentile, VaR (95%), and expected shortfall (95%) to the dashboard.
- Updated the display logic for summary metrics to handle non-finite values gracefully.
- Modified the chart rendering to include additional percentile points and tail risk metrics in tooltips.

test: Introduce unit tests for consumption, costs, and other modules

- Created a comprehensive test suite for consumption, costs, equipment, maintenance, production, reporting, and simulation modules.
- Implemented fixtures for database setup and teardown using an in-memory SQLite database for isolated testing.
- Added tests for creating, listing, and validating various entities, ensuring proper error handling and response validation.

refactor: Consolidate parameter tests and remove deprecated files

- Merged parameter-related tests into a new test file for better organization and clarity.
- Removed the old parameter test file that was no longer in use.
- Improved test coverage for parameter creation, listing, and validation scenarios.

fix: Ensure proper validation and error handling in API endpoints

- Added validation to reject negative amounts in consumption and production records.
- Implemented checks to prevent duplicate scenario creation and ensure proper error messages are returned.
- Enhanced reporting endpoint tests to validate input formats and expected outputs.
2025-10-20 22:06:39 +02:00

94 lines
3.2 KiB
Python

import math
from typing import Any, Dict, List
import pytest
from fastapi.testclient import TestClient
from services.reporting import generate_report
def test_generate_report_empty():
report = generate_report([])
assert report == {
"count": 0,
"mean": 0.0,
"median": 0.0,
"min": 0.0,
"max": 0.0,
"std_dev": 0.0,
"variance": 0.0,
"percentile_10": 0.0,
"percentile_90": 0.0,
"percentile_5": 0.0,
"percentile_95": 0.0,
"value_at_risk_95": 0.0,
"expected_shortfall_95": 0.0,
}
def test_generate_report_with_values():
values: List[Dict[str, float]] = [
{"iteration": 1, "result": 10.0},
{"iteration": 2, "result": 20.0},
{"iteration": 3, "result": 30.0},
]
report = generate_report(values)
assert report["count"] == 3
assert math.isclose(float(report["mean"]), 20.0)
assert math.isclose(float(report["median"]), 20.0)
assert math.isclose(float(report["min"]), 10.0)
assert math.isclose(float(report["max"]), 30.0)
assert math.isclose(float(report["std_dev"]), 8.1649658, rel_tol=1e-6)
assert math.isclose(float(report["variance"]), 66.6666666, rel_tol=1e-6)
assert math.isclose(float(report["percentile_10"]), 12.0)
assert math.isclose(float(report["percentile_90"]), 28.0)
assert math.isclose(float(report["percentile_5"]), 11.0)
assert math.isclose(float(report["percentile_95"]), 29.0)
assert math.isclose(float(report["value_at_risk_95"]), 11.0)
assert math.isclose(float(report["expected_shortfall_95"]), 10.0)
@pytest.fixture
def client(api_client: TestClient) -> TestClient:
return api_client
def test_reporting_endpoint_invalid_input(client: TestClient):
resp = client.post("/api/reporting/summary", json={})
assert resp.status_code == 400
assert resp.json()["detail"] == "Invalid input format"
def test_reporting_endpoint_success(client: TestClient):
input_data: List[Dict[str, float]] = [
{"iteration": 1, "result": 10.0},
{"iteration": 2, "result": 20.0},
{"iteration": 3, "result": 30.0},
]
resp = client.post("/api/reporting/summary", json=input_data)
assert resp.status_code == 200
data: Dict[str, Any] = resp.json()
assert data["count"] == 3
assert math.isclose(float(data["mean"]), 20.0)
assert math.isclose(float(data["variance"]), 66.6666666, rel_tol=1e-6)
assert math.isclose(float(data["value_at_risk_95"]), 11.0)
assert math.isclose(float(data["expected_shortfall_95"]), 10.0)
validation_error_cases: List[tuple[List[Any], str]] = [
(["not-a-dict"], "Entry at index 0 must be an object"),
([{"iteration": 1}], "Entry at index 0 must include numeric 'result'"),
([{"iteration": 1, "result": "bad"}],
"Entry at index 0 must include numeric 'result'"),
]
@pytest.mark.parametrize("payload,expected_detail", validation_error_cases)
def test_reporting_endpoint_validation_errors(
client: TestClient, payload: List[Any], expected_detail: str
):
resp = client.post("/api/reporting/summary", json=payload)
assert resp.status_code == 400
assert resp.json()["detail"] == expected_detail