feat: Enhance project and scenario creation with monitoring metrics
Some checks failed
CI / lint (push) Failing after 1m14s
CI / test (push) Has been skipped
CI / build (push) Has been skipped

- Added monitoring metrics for project creation success and error handling in `ProjectRepository`.
- Implemented similar monitoring for scenario creation in `ScenarioRepository`.
- Refactored `run_monte_carlo` function in `simulation.py` to include timing and success/error metrics.
- Introduced new CSS styles for headers, alerts, and navigation buttons in `main.css` and `projects.css`.
- Created a new JavaScript file for navigation logic to handle chevron buttons.
- Updated HTML templates to include new navigation buttons and improved styling for buttons.
- Added tests for reporting service and routes to ensure proper functionality and access control.
- Removed unused imports and optimized existing test files for better clarity and performance.
This commit is contained in:
2025-11-12 10:36:24 +01:00
parent f68321cd04
commit ce9c174b53
61 changed files with 2124 additions and 308 deletions

96
services/metrics.py Normal file
View File

@@ -0,0 +1,96 @@
from __future__ import annotations
import json
from datetime import datetime
from typing import Any, Dict, Optional
from sqlalchemy.orm import Session
from config.database import get_db
from models.performance_metric import PerformanceMetric
class MetricsService:
def __init__(self, db: Session):
self.db = db
def store_metric(
self,
metric_name: str,
value: float,
labels: Optional[Dict[str, Any]] = None,
endpoint: Optional[str] = None,
method: Optional[str] = None,
status_code: Optional[int] = None,
duration_seconds: Optional[float] = None,
) -> PerformanceMetric:
"""Store a performance metric in the database."""
metric = PerformanceMetric(
timestamp=datetime.utcnow(),
metric_name=metric_name,
value=value,
labels=json.dumps(labels) if labels else None,
endpoint=endpoint,
method=method,
status_code=status_code,
duration_seconds=duration_seconds,
)
self.db.add(metric)
self.db.commit()
self.db.refresh(metric)
return metric
def get_metrics(
self,
metric_name: Optional[str] = None,
start_time: Optional[datetime] = None,
end_time: Optional[datetime] = None,
limit: int = 100,
) -> list[PerformanceMetric]:
"""Retrieve stored metrics with optional filtering."""
query = self.db.query(PerformanceMetric)
if metric_name:
query = query.filter(PerformanceMetric.metric_name == metric_name)
if start_time:
query = query.filter(PerformanceMetric.timestamp >= start_time)
if end_time:
query = query.filter(PerformanceMetric.timestamp <= end_time)
return query.order_by(PerformanceMetric.timestamp.desc()).limit(limit).all()
def get_aggregated_metrics(
self,
metric_name: str,
start_time: Optional[datetime] = None,
end_time: Optional[datetime] = None,
) -> Dict[str, Any]:
"""Get aggregated statistics for a metric."""
query = self.db.query(PerformanceMetric).filter(
PerformanceMetric.metric_name == metric_name
)
if start_time:
query = query.filter(PerformanceMetric.timestamp >= start_time)
if end_time:
query = query.filter(PerformanceMetric.timestamp <= end_time)
metrics = query.all()
if not metrics:
return {"count": 0, "avg": 0, "min": 0, "max": 0}
values = [m.value for m in metrics]
return {
"count": len(values),
"avg": sum(values) / len(values),
"min": min(values),
"max": max(values),
}
def get_metrics_service(db: Session) -> MetricsService:
return MetricsService(db)