Files
calminer/tests/unit/test_production.py
zwitschi 434be86b76 feat: Enhance dashboard metrics and summary statistics
- Added new summary fields: variance, 5th percentile, 95th percentile, VaR (95%), and expected shortfall (95%) to the dashboard.
- Updated the display logic for summary metrics to handle non-finite values gracefully.
- Modified the chart rendering to include additional percentile points and tail risk metrics in tooltips.

test: Introduce unit tests for consumption, costs, and other modules

- Created a comprehensive test suite for consumption, costs, equipment, maintenance, production, reporting, and simulation modules.
- Implemented fixtures for database setup and teardown using an in-memory SQLite database for isolated testing.
- Added tests for creating, listing, and validating various entities, ensuring proper error handling and response validation.

refactor: Consolidate parameter tests and remove deprecated files

- Merged parameter-related tests into a new test file for better organization and clarity.
- Removed the old parameter test file that was no longer in use.
- Improved test coverage for parameter creation, listing, and validation scenarios.

fix: Ensure proper validation and error handling in API endpoints

- Added validation to reject negative amounts in consumption and production records.
- Implemented checks to prevent duplicate scenario creation and ensure proper error messages are returned.
- Enhanced reporting endpoint tests to validate input formats and expected outputs.
2025-10-20 22:06:39 +02:00

71 lines
2.1 KiB
Python

from uuid import uuid4
import pytest
from fastapi.testclient import TestClient
@pytest.fixture
def client(api_client: TestClient) -> TestClient:
return api_client
def _create_scenario(client: TestClient) -> int:
payload = {
"name": f"Production Scenario {uuid4()}",
"description": "Scenario for production tests",
}
response = client.post("/api/scenarios/", json=payload)
assert response.status_code == 200
return response.json()["id"]
def test_create_production_record(client: TestClient) -> None:
scenario_id = _create_scenario(client)
payload = {
"scenario_id": scenario_id,
"amount": 475.25,
"description": "Daily output",
}
response = client.post("/api/production/", json=payload)
assert response.status_code == 201
created = response.json()
assert created["scenario_id"] == scenario_id
assert created["amount"] == pytest.approx(475.25)
assert created["description"] == "Daily output"
def test_list_production_filters_by_scenario(client: TestClient) -> None:
target_scenario = _create_scenario(client)
other_scenario = _create_scenario(client)
for scenario_id, amount in [(target_scenario, 100.0), (target_scenario, 150.0), (other_scenario, 200.0)]:
response = client.post(
"/api/production/",
json={
"scenario_id": scenario_id,
"amount": amount,
"description": f"Output {amount}",
},
)
assert response.status_code == 201
list_response = client.get("/api/production/")
assert list_response.status_code == 200
items = [item for item in list_response.json()
if item["scenario_id"] == target_scenario]
assert {item["amount"] for item in items} == {100.0, 150.0}
def test_create_production_rejects_negative_amount(client: TestClient) -> None:
scenario_id = _create_scenario(client)
response = client.post(
"/api/production/",
json={
"scenario_id": scenario_id,
"amount": -5,
"description": "Invalid output",
},
)
assert response.status_code == 422