From 39c45e720c5215691402efe24404210d647b12f9 Mon Sep 17 00:00:00 2001 From: zwitschi Date: Mon, 20 Oct 2025 18:37:57 +0200 Subject: [PATCH] Add initial implementation of CalMiner with project structure, environment setup, and core features - Create .env.example for environment variables - Update README with project structure and development setup instructions - Implement FastAPI application with API routes for scenarios and parameters - Add database models for scenarios, parameters, and simulation results - Introduce validation middleware for JSON requests - Create services for running simulations and generating reports - Add testing strategy and directory structure in documentation --- .env.example | 4 + README.md | 62 +++++++++++--- components/Dashboard.html | 25 ++++++ config/database.py | 14 +++ docs/architecture.md | 15 +++- docs/development_setup.md | 67 ++++++++++----- docs/implementation_plan.md | 166 ++++++++++++++++++++++++++++++------ docs/testing.md | 45 ++++++++++ main.py | 17 ++++ middleware/validation.py | 13 +++ models/parameters.py | 17 ++++ models/scenario.py | 20 +++++ models/simulation_result.py | 14 +++ requirements.txt | 7 ++ routes/parameters.py | 50 +++++++++++ routes/reporting.py | 23 +++++ routes/scenarios.py | 52 +++++++++++ routes/simulations.py | 25 ++++++ services/reporting.py | 15 ++++ services/simulation.py | 17 ++++ 20 files changed, 604 insertions(+), 64 deletions(-) create mode 100644 .env.example create mode 100644 components/Dashboard.html create mode 100644 config/database.py create mode 100644 main.py create mode 100644 middleware/validation.py create mode 100644 models/parameters.py create mode 100644 models/scenario.py create mode 100644 models/simulation_result.py create mode 100644 requirements.txt create mode 100644 routes/parameters.py create mode 100644 routes/reporting.py create mode 100644 routes/scenarios.py create mode 100644 routes/simulations.py create mode 100644 services/reporting.py create mode 100644 services/simulation.py diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..f059443 --- /dev/null +++ b/.env.example @@ -0,0 +1,4 @@ +# Example environment variables for CalMiner + +# PostgreSQL database connection URL +database_url=postgresql://:@localhost:5432/calminer \ No newline at end of file diff --git a/README.md b/README.md index 592958f..e3b040a 100644 --- a/README.md +++ b/README.md @@ -24,29 +24,67 @@ A range of features are implemented to support these functionalities. The architecture is documented in [docs/architecture.md](docs/architecture.md). +## Project Structure + +The project is organized into several key directories: + +- `models/`: Contains SQLAlchemy models representing database tables. +- `routes/`: Defines FastAPI routes for API endpoints. +- `services/`: Business logic and service layer. +- `components/`: Frontend components (to be defined). +- `config/`: Configuration files and settings. +- `middleware/`: Custom middleware for request/response processing. +- `tests/`: Unit and integration tests. +- `templates/`: HTML templates (if applicable). +- `docs/`: Documentation files. + +Key files include: + +- `main.py`: FastAPI application entry point. +- `.env`: Environment variables for configuration. +- `requirements.txt`: Python dependencies. + ## Development The development setup instructions are provided in [docs/development_setup.md](docs/development_setup.md). +To get started locally: + +```powershell +# Clone the repository +git clone https://git.allucanget.biz/allucanget/calminer.git +cd calminer + +# Create and activate a virtual environment +python -m venv .venv +.\.venv\Scripts\Activate.ps1 + +# Install dependencies +pip install -r requirements.txt + +# Start the development server +uvicorn main:app --reload +``` + ## Testing Testing guidelines and best practices are outlined in [docs/testing.md](docs/testing.md). ## Database Objects -The database is composed of several tables that store different types of information. All tables are under schema `bricsium_platform`. See `structure.sql` for full DDL. Here are some of the most important ones: +The database is composed of several tables that store different types of information. -- **CAPEX** — `bricsium_platform.capex`: Stores data on capital expenditures. -- **OPEX** — `bricsium_platform.opex`: Contains information on operational expenditures. -- **Chemical consumption** — `bricsium_platform.chemical_consumption`: Tracks the consumption of chemical reagents. -- **Fuel consumption** — `bricsium_platform.fuel_consumption`: Records the amount of fuel consumed. -- **Water consumption** — `bricsium_platform.water_consumption`: Monitors the use of water. -- **Scrap consumption** — `bricsium_platform.scrap_consumption`: Tracks the consumption of scrap materials. -- **Production output** — `bricsium_platform.production_output`: Stores data on production output, such as tons produced and recovery rates. -- **Equipment operation** — `bricsium_platform.equipment_operation`: Contains operational data for each piece of equipment. -- **Ore batch** — `bricsium_platform.ore_batch`: Stores information on ore batches, including their grade and other characteristics. -- **Exchange rate** — `bricsium_platform.exchange_rate`: Contains currency exchange rates. -- **Simulation result** — `bricsium_platform.simulation_result`: Stores the results of the Monte Carlo simulations. +- **CAPEX** — `capex`: Stores data on capital expenditures. +- **OPEX** — `opex`: Contains information on operational expenditures. +- **Chemical consumption** — `chemical_consumption`: Tracks the consumption of chemical reagents. +- **Fuel consumption** — `fuel_consumption`: Records the amount of fuel consumed. +- **Water consumption** — `water_consumption`: Monitors the use of water. +- **Scrap consumption** — `scrap_consumption`: Tracks the consumption of scrap materials. +- **Production output** — `production_output`: Stores data on production output, such as tons produced and recovery rates. +- **Equipment operation** — `equipment_operation`: Contains operational data for each piece of equipment. +- **Ore batch** — `ore_batch`: Stores information on ore batches, including their grade and other characteristics. +- **Exchange rate** — `exchange_rate`: Contains currency exchange rates. +- **Simulation result** — `simulation_result`: Stores the results of the Monte Carlo simulations. ## Static Parameters diff --git a/components/Dashboard.html b/components/Dashboard.html new file mode 100644 index 0000000..d4f06bf --- /dev/null +++ b/components/Dashboard.html @@ -0,0 +1,25 @@ + + + + + CalMiner Dashboard + + +

Simulation Results Dashboard

+
+ + + + \ No newline at end of file diff --git a/config/database.py b/config/database.py new file mode 100644 index 0000000..9a3328e --- /dev/null +++ b/config/database.py @@ -0,0 +1,14 @@ +from sqlalchemy import create_engine +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import sessionmaker +import os +from dotenv import load_dotenv + +load_dotenv() +DATABASE_URL = os.environ.get("DATABASE_URL") +if not DATABASE_URL: + raise RuntimeError("DATABASE_URL environment variable is not set") + +engine = create_engine(DATABASE_URL, echo=True, future=True) +SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) +Base = declarative_base() diff --git a/docs/architecture.md b/docs/architecture.md index b19a70c..0d1c459 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -8,8 +8,10 @@ CalMiner is a web application for planning mining projects, estimating costs, re - **Frontend**: Web interface for user interaction (to be defined). - **Backend**: Python API server (e.g., FastAPI) handling business logic. -- **Database**: PostgreSQL with schema `bricsium_platform` (see `structure.sql`). +- **Database**: PostgreSQL. +- **Configuration**: Environment variables and settings loaded via `python-dotenv` and stored in `config/` directory. - **Simulation Engine**: Python-based Monte Carlo runs and stochastic calculations. +- **API Routes**: FastAPI routers defined in `routes/` for scenarios, simulations, consumptions, and reporting endpoints. ## Data Flow @@ -22,8 +24,15 @@ CalMiner is a web application for planning mining projects, estimating costs, re ## Database Architecture - Schema: `bricsium_platform` -- Key tables: Scenarios, parameters, consumptions, outputs, simulations. -- Relationships: Foreign keys link scenarios to parameters, consumptions, and results. +- Key tables include: + + - `scenario` (scenario metadata and parameters) + - `capex`, `opex` (capital and operational expenditures) + - `chemical_consumption`, `fuel_consumption`, `water_consumption`, `scrap_consumption` + - `production_output`, `equipment_operation`, `ore_batch` + - `exchange_rate`, `simulation_result` + +- Relationships: Foreign keys link scenarios to parameters, consumptions, and simulation results. ## Next Steps diff --git a/docs/development_setup.md b/docs/development_setup.md index 22800aa..ad33c53 100644 --- a/docs/development_setup.md +++ b/docs/development_setup.md @@ -6,41 +6,66 @@ - PostgreSQL (version 13+) - Git +## Clone and Project Setup + +```powershell +# Clone the repository +git clone https://git.allucanget.biz/allucanget/calminer.git +cd calminer +``` + +## Virtual Environment + +```powershell +# Create and activate a virtual environment +python -m venv .venv +.\.venv\Scripts\Activate.ps1 +``` + +## Install Dependencies + +```powershell +pip install -r requirements.txt +``` + ## Database Setup -1. Install PostgreSQL and create a database named `calminer`. - -2. Create schema `bricsium_platform`: +1. Create database user: ```sql - CREATE SCHEMA bricsium_platform; + CREATE USER calminer_user WITH PASSWORD 'your_password'; ``` -3. Load the schema from `structure.sql`: +2. Create database: - ```bash - psql -d calminer -f structure.sql + ```sql + CREATE DATABASE calminer; ``` -## Backend Setup +## Environment Variables -1. Clone the repo. -2. Create a virtual environment: `python -m venv .venv` -3. Activate it: `.venv\Scripts\activate` (Windows) or `source .venv/bin/activate` (Linux/Mac) -4. Install dependencies: `pip install -r requirements.txt` -5. Set up environment variables (e.g., DB connection string in .env). -6. Run migrations if any. -7. Start server: `python main.py` or `uvicorn main:app --reload` +1. Copy `.env.example` to `.env` at project root. +2. Edit `.env` to set database connection string: -## Frontend Setup + ```dotenv + DATABASE_URL=postgresql://:@localhost:5432/calminer + ``` -(TBD - add when implemented) +3. The application uses `python-dotenv` to load these variables. -## Running Locally +## Running the Application -- Backend: `uvicorn main:app --reload` -- Frontend: (TBD) +```powershell +# Start the FastAPI server +uvicorn main:app --reload +``` ## Testing -- Run tests: `pytest` +```powershell +pytest +``` + +## Frontend Setup + +(TBD - add when frontend implemented) diff --git a/docs/implementation_plan.md b/docs/implementation_plan.md index 4b0ad91..7e2f373 100644 --- a/docs/implementation_plan.md +++ b/docs/implementation_plan.md @@ -1,43 +1,153 @@ # Implementation Plan -## Feature: Scenario Creation and Management +This document outlines the MVP features and implementation steps for CalMiner. -### Scenario Implementation Steps +Refer to the following for context alignment: -1. Create `models/scenario.py` for DB interactions. -2. Implement API endpoints in `routes/scenarios.py`: GET, POST, PUT, DELETE. -3. Add frontend component `components/ScenarioForm.html` for CRUD. -4. Update `README.md` with API docs. +- System architecture: [docs/architecture.md](architecture.md) +- Development setup: [docs/development_setup.md](development_setup.md) -## Feature: Parameter Input and Validation +## Project Setup -### Parameter Implementation Steps +1. Connect to PostgreSQL database with schema `calminer`. +2. Create and activate a virtual environment and install dependencies via `requirements.txt`. +3. Define environment variables in `.env`, including `DATABASE_URL`. +4. Configure FastAPI entrypoint in `main.py` to include routers. -1. Define parameter schemas in `models/parameters.py`. -2. Create validation middleware in `middleware/validation.py`. -3. Build input form in `components/ParameterInput.html`. -4. Integrate with scenario management. +## Feature: Scenario Management -## Feature: Monte Carlo Simulation Run +### Implementation Steps -### Simulation Implementation Steps +1. Create `models/scenario.py` for scenario CRUD. +2. Implement API endpoints in `routes/scenarios.py` (GET, POST, PUT, DELETE). +3. Write unit tests in `tests/unit/test_scenario.py`. +4. Build UI component `components/ScenarioForm.html`. -1. Implement simulation logic in `services/simulation.py`. -2. Add endpoint `POST /api/simulations/run`. -3. Store results in `models/simulation_result.py`. -4. Add progress tracking UI. +## Feature: Process Parameters -## Feature: Basic Reporting +### Implementation Steps -### Reporting Implementation Steps +1. Create `models/parameters.py` for process parameters. +2. Implement Pydantic schemas in `routes/parameters.py`. +3. Add validation middleware in `middleware/validation.py`. +4. Write unit tests in `tests/unit/test_parameter.py`. +5. Build UI component `components/ParameterInput.html`. -1. Create report service `services/reporting.py`. -2. Build dashboard component `components/Dashboard.html`. -3. Fetch data from simulation results. -4. Add charts using Chart.js. +## Feature: Stochastic Variables -## Next Steps +### Implementation Steps -- Assign issues in GitHub. -- Estimate effort for each step. -- Start with backend models. +1. Create `models/distribution.py` for variable distributions. +2. Implement API routes in `routes/distributions.py`. +3. Write Pydantic schemas and validations. +4. Write unit tests in `tests/unit/test_distribution.py`. +5. Build UI component `components/DistributionEditor.html`. + +## Feature: Cost Tracking + +### Implementation Steps + +1. Create `models/capex.py` and `models/opex.py`. +2. Implement API routes in `routes/costs.py`. +3. Write Pydantic schemas for CAPEX/OPEX. +4. Write unit tests in `tests/unit/test_costs.py`. +5. Build UI component `components/CostForm.html`. + +## Feature: Consumption Tracking + +### Implementation Steps + +1. Create models for consumption: `chemical_consumption.py`, `fuel_consumption.py`, `water_consumption.py`, `scrap_consumption.py`. +2. Implement API routes in `routes/consumption.py`. +3. Write Pydantic schemas for consumption data. +4. Write unit tests in `tests/unit/test_consumption.py`. +5. Build UI component `components/ConsumptionDashboard.html`. + +## Feature: Production Output + +### Implementation Steps + +1. Create `models/production_output.py`. +2. Implement API routes in `routes/production.py`. +3. Write Pydantic schemas for production output. +4. Write unit tests in `tests/unit/test_production.py`. +5. Build UI component `components/ProductionChart.html`. + +## Feature: Equipment Management + +### Implementation Steps + +1. Create `models/equipment.py` for equipment data. +2. Implement API routes in `routes/equipment.py`. +3. Write Pydantic schemas for equipment. +4. Write unit tests in `tests/unit/test_equipment.py`. +5. Build UI component `components/EquipmentList.html`. + +## Feature: Maintenance Logging + +### Implementation Steps + +1. Create `models/maintenance.py` for maintenance events. +2. Implement API routes in `routes/maintenance.py`. +3. Write Pydantic schemas for maintenance logs. +4. Write unit tests in `tests/unit/test_maintenance.py`. +5. Build UI component `components/MaintenanceLog.html`. + +## Feature: Monte Carlo Simulation Engine + +### Implementation Steps + +1. Implement Monte Carlo logic in `services/simulation.py`. +2. Persist results in `models/simulation_result.py`. +3. Expose endpoint in `routes/simulations.py`. +4. Write integration tests in `tests/unit/test_simulation.py`. +5. Build UI component `components/SimulationRunner.html`. + +## Feature: Reporting / Dashboard + +### Implementation Steps + +1. Implement report calculations in `services/reporting.py`. +2. Add detailed and summary endpoints in `routes/reporting.py`. +3. Write unit tests in `tests/unit/test_reporting.py`. +4. Enhance UI in `components/Dashboard.html` with charts. + +## MVP Feature Analysis (summary) + +Goal: Identify core MVP features, acceptance criteria, and quick estimates. + +Features: + +- Scenario Management + + - Acceptance: create/read/update/delete scenarios; persist to DB; API coverage with tests. + - Estimate: 3-5 days (backend + minimal UI). + +- Parameter Input & Validation + + - Acceptance: define parameter schemas, validate inputs, surface errors to API/UI. + - Estimate: 2-3 days. + +- Monte Carlo Simulation Engine + + - Acceptance: run parameterised simulations, store results, ability to rerun with different seeds, basic progress reporting. + - Estimate: 1-2 weeks (core engine + persistence). + +- Reporting / Dashboard + - Acceptance: display simulation outputs (NPV, IRR distributions), basic charts, export CSV. + - Estimate: 4-7 days. + +Edge cases to consider: + +- Large simulation runs (memory / timeouts) — use streaming, chunking, or background workers. +- DB migration and schema versioning. +- Authentication/authorization for scenario access. + +Next actionable items: + +1. Break Scenario Management into sub-issues (models, routes, tests, simple UI). +2. Scaffold Parameter Input & Validation (models/parameters.py, middleware, routes, tests). +3. Prototype the simulation engine with a small deterministic runner and unit tests. +4. Scaffold Monte Carlo Simulation endpoints (`services/simulation.py`, `routes/simulations.py`, tests). +5. Scaffold Reporting endpoints (`services/reporting.py`, `routes/reporting.py`, front-end Dashboard, tests). +6. Add CI job for tests and coverage. diff --git a/docs/testing.md b/docs/testing.md index 525fe16..8e568aa 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -27,3 +27,48 @@ CalMiner will use a combination of unit, integration, and end-to-end tests to en - Unit: `pytest tests/unit/` - Integration: `pytest tests/integration/` - All: `pytest` + +## Test Directory Structure + +Organize tests under the `tests/` directory mirroring the application structure: + +```bash +tests/ + unit/ + test_.py + integration/ + test_.py + fixtures/ + conftest.py +``` + +## Writing Tests + +- Name tests with the `test_` prefix. +- Group related tests in classes or modules. +- Use descriptive assertion messages. + +## Fixtures and Test Data + +- Define reusable fixtures in `tests/fixtures/conftest.py`. +- Use temporary in-memory databases or isolated schemas for DB tests. +- Load sample data via fixtures for consistent test environments. + +## Mocking and Dependency Injection + +- Use `unittest.mock` to mock external dependencies. +- Inject dependencies via function parameters or FastAPI's dependency overrides in tests. + +## Code Coverage + +- Install `pytest-cov` to generate coverage reports. +- Run with coverage: `pytest --cov=calminer --cov-report=html`. +- Ensure coverage meets the 80% threshold. + +## CI Integration + +- Configure GitHub Actions workflow in `.github/workflows/ci.yml` to: + - Install dependencies + - Run `pytest` with coverage + - Fail on coverage <80% + - Upload coverage artifact diff --git a/main.py b/main.py new file mode 100644 index 0000000..b336f2e --- /dev/null +++ b/main.py @@ -0,0 +1,17 @@ +from routes.parameters import router as parameters_router +from fastapi import FastAPI +from fastapi.middleware import Middleware +from middleware.validation import validate_json +from config.database import Base, engine +from routes.scenarios import router as scenarios_router + +# Initialize database schema +Base.metadata.create_all(bind=engine) + +app = FastAPI() +# Register validation middleware +app.middleware("http")(validate_json) + +# Include API routers +app.include_router(scenarios_router) +app.include_router(parameters_router) diff --git a/middleware/validation.py b/middleware/validation.py new file mode 100644 index 0000000..0819eaa --- /dev/null +++ b/middleware/validation.py @@ -0,0 +1,13 @@ +from fastapi import Request, HTTPException + + +async def validate_json(request: Request, call_next): + # Only validate JSON for requests with a body + if request.method in ("POST", "PUT", "PATCH"): + try: + # attempt to parse json body + await request.json() + except Exception: + raise HTTPException(status_code=400, detail="Invalid JSON payload") + response = await call_next(request) + return response diff --git a/models/parameters.py b/models/parameters.py new file mode 100644 index 0000000..3de252a --- /dev/null +++ b/models/parameters.py @@ -0,0 +1,17 @@ +from sqlalchemy import Column, Integer, String, Float, ForeignKey +from sqlalchemy.orm import relationship +from config.database import Base + + +class Parameter(Base): + __tablename__ = "parameter" + + id = Column(Integer, primary_key=True, index=True) + scenario_id = Column(Integer, ForeignKey("scenario.id"), nullable=False) + name = Column(String, nullable=False) + value = Column(Float, nullable=False) + + scenario = relationship("Scenario", back_populates="parameters") + + def __repr__(self): + return f"" diff --git a/models/scenario.py b/models/scenario.py new file mode 100644 index 0000000..2e0a611 --- /dev/null +++ b/models/scenario.py @@ -0,0 +1,20 @@ +from sqlalchemy import Column, Integer, String, DateTime, func +from sqlalchemy.orm import relationship +from config.database import Base + + +class Scenario(Base): + __tablename__ = "scenario" + + id = Column(Integer, primary_key=True, index=True) + name = Column(String, unique=True, nullable=False) + description = Column(String) + created_at = Column(DateTime(timezone=True), server_default=func.now()) + updated_at = Column(DateTime(timezone=True), onupdate=func.now()) + parameters = relationship("Parameter", back_populates="scenario") + simulation_results = relationship( + "SimulationResult", back_populates="scenario") + + # relationships can be defined later + def __repr__(self): + return f"" diff --git a/models/simulation_result.py b/models/simulation_result.py new file mode 100644 index 0000000..c5edac7 --- /dev/null +++ b/models/simulation_result.py @@ -0,0 +1,14 @@ +from sqlalchemy import Column, Integer, Float, ForeignKey +from sqlalchemy.orm import relationship +from config.database import Base + + +class SimulationResult(Base): + __tablename__ = "simulation_result" + + id = Column(Integer, primary_key=True, index=True) + scenario_id = Column(Integer, ForeignKey("scenario.id"), nullable=False) + iteration = Column(Integer, nullable=False) + result = Column(Float, nullable=False) + + scenario = relationship("Scenario", back_populates="simulation_results") diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..935896e --- /dev/null +++ b/requirements.txt @@ -0,0 +1,7 @@ +fastapi +uvicorn +sqlalchemy +psycopg2-binary +python-dotenv +pytest +pytest-cov \ No newline at end of file diff --git a/routes/parameters.py b/routes/parameters.py new file mode 100644 index 0000000..9909bfb --- /dev/null +++ b/routes/parameters.py @@ -0,0 +1,50 @@ +from fastapi import APIRouter, Depends, HTTPException +from sqlalchemy.orm import Session +from config.database import SessionLocal +from models.parameters import Parameter +from models.scenario import Scenario +from pydantic import BaseModel +from typing import Optional, List + +router = APIRouter(prefix="/api/parameters", tags=["parameters"]) + + +class ParameterCreate(BaseModel): + scenario_id: int + name: str + value: float + + +class ParameterRead(ParameterCreate): + id: int + + class Config: + orm_mode = True + +# Dependency + + +def get_db(): + db = SessionLocal() + try: + yield db + finally: + db.close() + + +@router.post("/", response_model=ParameterRead) +def create_parameter(param: ParameterCreate, db: Session = Depends(get_db)): + scen = db.query(Scenario).filter(Scenario.id == param.scenario_id).first() + if not scen: + raise HTTPException(status_code=404, detail="Scenario not found") + new_param = Parameter(scenario_id=param.scenario_id, + name=param.name, value=param.value) + db.add(new_param) + db.commit() + db.refresh(new_param) + return new_param + + +@router.get("/", response_model=List[ParameterRead]) +def list_parameters(db: Session = Depends(get_db)): + return db.query(Parameter).all() diff --git a/routes/reporting.py b/routes/reporting.py new file mode 100644 index 0000000..1d6a6fa --- /dev/null +++ b/routes/reporting.py @@ -0,0 +1,23 @@ +from fastapi import APIRouter, HTTPException, Depends +from typing import Dict, Any + +from services.reporting import generate_report +from sqlalchemy.orm import Session +from config.database import SessionLocal + +router = APIRouter(prefix="/api/reporting", tags=["Reporting"]) + +def get_db(): + db = SessionLocal() + try: + yield db + finally: + db.close() + +@router.post("/summary", response_model=Dict[str, float]) +async def summary_report(results: Any): + # Expect a list of simulation result dicts + if not isinstance(results, list): + raise HTTPException(status_code=400, detail="Invalid input format") + report = generate_report(results) + return report diff --git a/routes/scenarios.py b/routes/scenarios.py new file mode 100644 index 0000000..1619b76 --- /dev/null +++ b/routes/scenarios.py @@ -0,0 +1,52 @@ +from fastapi import APIRouter, Depends, HTTPException +from sqlalchemy.orm import Session +from config.database import SessionLocal +from models.scenario import Scenario +from pydantic import BaseModel +from typing import Optional +from datetime import datetime + +router = APIRouter(prefix="/api/scenarios", tags=["scenarios"]) + +# Pydantic schemas + + +class ScenarioCreate(BaseModel): + name: str + description: Optional[str] = None + + +class ScenarioRead(ScenarioCreate): + id: int + created_at: datetime + updated_at: Optional[datetime] = None + + class Config: + orm_mode = True + +# Dependency + + +def get_db(): + db = SessionLocal() + try: + yield db + finally: + db.close() + + +@router.post("/", response_model=ScenarioRead) +def create_scenario(scenario: ScenarioCreate, db: Session = Depends(get_db)): + db_s = db.query(Scenario).filter(Scenario.name == scenario.name).first() + if db_s: + raise HTTPException(status_code=400, detail="Scenario already exists") + new_s = Scenario(name=scenario.name, description=scenario.description) + db.add(new_s) + db.commit() + db.refresh(new_s) + return new_s + + +@router.get("/", response_model=list[ScenarioRead]) +def list_scenarios(db: Session = Depends(get_db)): + return db.query(Scenario).all() diff --git a/routes/simulations.py b/routes/simulations.py new file mode 100644 index 0000000..0a9f100 --- /dev/null +++ b/routes/simulations.py @@ -0,0 +1,25 @@ +from fastapi import APIRouter, HTTPException, Depends +from typing import List + +from services.simulation import run_simulation +from sqlalchemy.orm import Session +from config.database import SessionLocal + +router = APIRouter(prefix="/api/simulations", tags=["Simulations"]) + + +def get_db(): + db = SessionLocal() + try: + yield db + finally: + db.close() + + +@router.post("/run", response_model=List[dict]) +async def simulate(params: List[dict], iterations: int = 1000, db: Session = Depends(get_db)): + if not params: + raise HTTPException(status_code=400, detail="No parameters provided") + # TODO: you might use db to fetch scenario info or persist results + results = run_simulation(params, iterations) + return results diff --git a/services/reporting.py b/services/reporting.py new file mode 100644 index 0000000..1e6a9ce --- /dev/null +++ b/services/reporting.py @@ -0,0 +1,15 @@ +from typing import List, Dict + + +def generate_report(simulation_results: List[Dict[str, float]]) -> Dict[str, float]: + """ + Generate summary report from simulation results. + + Args: + simulation_results: List of dicts with 'iteration' and 'result'. + + Returns: + Dictionary with summary statistics (e.g., mean, median). + """ + # TODO: implement reporting logic (e.g., calculate mean, median, percentiles) + return {} diff --git a/services/simulation.py b/services/simulation.py new file mode 100644 index 0000000..db4a4ed --- /dev/null +++ b/services/simulation.py @@ -0,0 +1,17 @@ +from typing import Dict, List + + +def run_simulation(parameters: List[Dict[str, float]], iterations: int = 1000) -> List[Dict[str, float]]: + """ + Run Monte Carlo simulation with given parameters. + + Args: + parameters: List of parameter dicts with keys 'name' and 'value'. + iterations: Number of simulation iterations. + + Returns: + List of simulation result dicts for each iteration. + """ + # TODO: implement Monte Carlo logic + results: List[Dict[str, float]] = [] + return results