- Updated README.md to reflect new features and usage instructions. - Removed deprecated Dashboard.html component and integrated dashboard functionality directly into the main application. - Revised architecture documentation for clarity and added module map and request flow diagrams. - Enhanced maintenance model to include equipment association and cost tracking. - Updated requirements.txt to include new dependencies (httpx, pandas, numpy). - Improved consumption, maintenance, production, and reporting routes with better validation and response handling. - Added unit tests for maintenance and production routes, ensuring proper CRUD operations and validation. - Enhanced reporting service to calculate and return detailed summary statistics. - Redesigned Dashboard.html for improved user experience and integrated Chart.js for visualizing simulation results.
71 lines
2.2 KiB
Python
71 lines
2.2 KiB
Python
from fastapi.testclient import TestClient
|
|
import pytest
|
|
|
|
from main import app
|
|
from services.reporting import generate_report
|
|
|
|
|
|
def test_generate_report_empty():
|
|
report = generate_report([])
|
|
assert report == {
|
|
"count": 0,
|
|
"mean": 0.0,
|
|
"median": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"std_dev": 0.0,
|
|
"percentile_10": 0.0,
|
|
"percentile_90": 0.0,
|
|
}
|
|
|
|
|
|
def test_generate_report_with_values():
|
|
values = [{"iteration": 1, "result": 10.0}, {
|
|
"iteration": 2, "result": 20.0}, {"iteration": 3, "result": 30.0}]
|
|
report = generate_report(values)
|
|
assert report["count"] == 3
|
|
assert report["mean"] == pytest.approx(20.0)
|
|
assert report["median"] == pytest.approx(20.0)
|
|
assert report["min"] == pytest.approx(10.0)
|
|
assert report["max"] == pytest.approx(30.0)
|
|
assert report["std_dev"] == pytest.approx(8.1649658, rel=1e-6)
|
|
assert report["percentile_10"] == pytest.approx(12.0)
|
|
assert report["percentile_90"] == pytest.approx(28.0)
|
|
|
|
|
|
def test_reporting_endpoint_invalid_input():
|
|
client = TestClient(app)
|
|
resp = client.post("/api/reporting/summary", json={})
|
|
assert resp.status_code == 400
|
|
assert resp.json()["detail"] == "Invalid input format"
|
|
|
|
|
|
def test_reporting_endpoint_success():
|
|
client = TestClient(app)
|
|
input_data = [
|
|
{"iteration": 1, "result": 10.0},
|
|
{"iteration": 2, "result": 20.0},
|
|
{"iteration": 3, "result": 30.0},
|
|
]
|
|
resp = client.post("/api/reporting/summary", json=input_data)
|
|
assert resp.status_code == 200
|
|
data = resp.json()
|
|
assert data["count"] == 3
|
|
assert data["mean"] == pytest.approx(20.0)
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"payload,expected_detail",
|
|
[
|
|
(["not-a-dict"], "Entry at index 0 must be an object"),
|
|
([{"iteration": 1}], "Entry at index 0 must include numeric 'result'"),
|
|
([{"iteration": 1, "result": "bad"}],
|
|
"Entry at index 0 must include numeric 'result'"),
|
|
],
|
|
)
|
|
def test_reporting_endpoint_validation_errors(payload, expected_detail):
|
|
client = TestClient(app)
|
|
resp = client.post("/api/reporting/summary", json=payload)
|
|
assert resp.status_code == 400
|
|
assert resp.json()["detail"] == expected_detail
|