# Gunicorn configuration file import multiprocessing # Server socket bind = "0.0.0.0:8000" backlog = 2048 # Worker processes workers = multiprocessing.cpu_count() * 2 + 1 worker_class = "sync" worker_connections = 1000 max_requests = 1000 max_requests_jitter = 50 timeout = 30 keepalive = 2 # Logging loglevel = "info" accesslog = "-" errorlog = "-" # Process naming proc_name = "jobs_app" # Server mechanics daemon = False pidfile = "/tmp/gunicorn.pid" user = None group = None tmp_upload_dir = None # SSL (if needed) keyfile = None certfile = None # Application wsgi_module = "web.app:app" callable = "app" def when_ready(server): """Start background scheduler once in Gunicorn master if enabled.""" import os flag = (os.environ.get("SCRAPE_SCHEDULER_ENABLED") or "").strip().lower() if flag not in {"1", "true", "yes", "on"}: return try: from web.craigslist import start_scheduler_in_background start_scheduler_in_background() server.log.info("Background scraper scheduler started in Gunicorn master.") except Exception as exc: server.log.warning("Failed to start background scraper scheduler: %s", exc)