- Added new summary fields: variance, 5th percentile, 95th percentile, VaR (95%), and expected shortfall (95%) to the dashboard. - Updated the display logic for summary metrics to handle non-finite values gracefully. - Modified the chart rendering to include additional percentile points and tail risk metrics in tooltips. test: Introduce unit tests for consumption, costs, and other modules - Created a comprehensive test suite for consumption, costs, equipment, maintenance, production, reporting, and simulation modules. - Implemented fixtures for database setup and teardown using an in-memory SQLite database for isolated testing. - Added tests for creating, listing, and validating various entities, ensuring proper error handling and response validation. refactor: Consolidate parameter tests and remove deprecated files - Merged parameter-related tests into a new test file for better organization and clarity. - Removed the old parameter test file that was no longer in use. - Improved test coverage for parameter creation, listing, and validation scenarios. fix: Ensure proper validation and error handling in API endpoints - Added validation to reject negative amounts in consumption and production records. - Implemented checks to prevent duplicate scenario creation and ensure proper error messages are returned. - Enhanced reporting endpoint tests to validate input formats and expected outputs.
70 lines
2.0 KiB
Python
70 lines
2.0 KiB
Python
from uuid import uuid4
|
|
|
|
import pytest
|
|
from fastapi.testclient import TestClient
|
|
|
|
|
|
@pytest.fixture
|
|
def client(api_client: TestClient) -> TestClient:
|
|
return api_client
|
|
|
|
|
|
def _create_scenario(client: TestClient) -> int:
|
|
payload = {
|
|
"name": f"Consumption Scenario {uuid4()}",
|
|
"description": "Scenario for consumption tests",
|
|
}
|
|
response = client.post("/api/scenarios/", json=payload)
|
|
assert response.status_code == 200
|
|
return response.json()["id"]
|
|
|
|
|
|
def test_create_consumption(client: TestClient) -> None:
|
|
scenario_id = _create_scenario(client)
|
|
payload = {
|
|
"scenario_id": scenario_id,
|
|
"amount": 125.5,
|
|
"description": "Fuel usage baseline",
|
|
}
|
|
|
|
response = client.post("/api/consumption/", json=payload)
|
|
assert response.status_code == 201
|
|
body = response.json()
|
|
assert body["id"] > 0
|
|
assert body["scenario_id"] == scenario_id
|
|
assert body["amount"] == pytest.approx(125.5)
|
|
assert body["description"] == "Fuel usage baseline"
|
|
|
|
|
|
def test_list_consumption_returns_created_items(client: TestClient) -> None:
|
|
scenario_id = _create_scenario(client)
|
|
values = [50.0, 80.75]
|
|
for amount in values:
|
|
response = client.post(
|
|
"/api/consumption/",
|
|
json={
|
|
"scenario_id": scenario_id,
|
|
"amount": amount,
|
|
"description": f"Consumption {amount}",
|
|
},
|
|
)
|
|
assert response.status_code == 201
|
|
|
|
list_response = client.get("/api/consumption/")
|
|
assert list_response.status_code == 200
|
|
items = [item for item in list_response.json(
|
|
) if item["scenario_id"] == scenario_id]
|
|
assert {item["amount"] for item in items} == set(values)
|
|
|
|
|
|
def test_create_consumption_rejects_negative_amount(client: TestClient) -> None:
|
|
scenario_id = _create_scenario(client)
|
|
payload = {
|
|
"scenario_id": scenario_id,
|
|
"amount": -10,
|
|
"description": "Invalid negative amount",
|
|
}
|
|
|
|
response = client.post("/api/consumption/", json=payload)
|
|
assert response.status_code == 422
|