Refactor and enhance CalMiner application

- Updated README.md to reflect new features and usage instructions.
- Removed deprecated Dashboard.html component and integrated dashboard functionality directly into the main application.
- Revised architecture documentation for clarity and added module map and request flow diagrams.
- Enhanced maintenance model to include equipment association and cost tracking.
- Updated requirements.txt to include new dependencies (httpx, pandas, numpy).
- Improved consumption, maintenance, production, and reporting routes with better validation and response handling.
- Added unit tests for maintenance and production routes, ensuring proper CRUD operations and validation.
- Enhanced reporting service to calculate and return detailed summary statistics.
- Redesigned Dashboard.html for improved user experience and integrated Chart.js for visualizing simulation results.
This commit is contained in:
2025-10-20 20:53:55 +02:00
parent fee857637f
commit e73a987d25
19 changed files with 794 additions and 184 deletions

View File

@@ -1,15 +1,38 @@
from services.reporting import generate_report
from routes.reporting import router
from fastapi.testclient import TestClient
from main import app
import pytest
# Function test
from main import app
from services.reporting import generate_report
def test_generate_report_empty():
report = generate_report([])
assert isinstance(report, dict)
assert report == {
"count": 0,
"mean": 0.0,
"median": 0.0,
"min": 0.0,
"max": 0.0,
"std_dev": 0.0,
"percentile_10": 0.0,
"percentile_90": 0.0,
}
def test_generate_report_with_values():
values = [{"iteration": 1, "result": 10.0}, {
"iteration": 2, "result": 20.0}, {"iteration": 3, "result": 30.0}]
report = generate_report(values)
assert report["count"] == 3
assert report["mean"] == pytest.approx(20.0)
assert report["median"] == pytest.approx(20.0)
assert report["min"] == pytest.approx(10.0)
assert report["max"] == pytest.approx(30.0)
assert report["std_dev"] == pytest.approx(8.1649658, rel=1e-6)
assert report["percentile_10"] == pytest.approx(12.0)
assert report["percentile_90"] == pytest.approx(28.0)
# Endpoint test
def test_reporting_endpoint_invalid_input():
client = TestClient(app)
resp = client.post("/api/reporting/summary", json={})
@@ -19,9 +42,29 @@ def test_reporting_endpoint_invalid_input():
def test_reporting_endpoint_success():
client = TestClient(app)
# Minimal input: list of dicts
input_data = [{"iteration": 1, "result": 10.0}]
input_data = [
{"iteration": 1, "result": 10.0},
{"iteration": 2, "result": 20.0},
{"iteration": 3, "result": 30.0},
]
resp = client.post("/api/reporting/summary", json=input_data)
assert resp.status_code == 200
data = resp.json()
assert isinstance(data, dict)
assert data["count"] == 3
assert data["mean"] == pytest.approx(20.0)
@pytest.mark.parametrize(
"payload,expected_detail",
[
(["not-a-dict"], "Entry at index 0 must be an object"),
([{"iteration": 1}], "Entry at index 0 must include numeric 'result'"),
([{"iteration": 1, "result": "bad"}],
"Entry at index 0 must include numeric 'result'"),
],
)
def test_reporting_endpoint_validation_errors(payload, expected_detail):
client = TestClient(app)
resp = client.post("/api/reporting/summary", json=payload)
assert resp.status_code == 400
assert resp.json()["detail"] == expected_detail