""" Error handlers with memory awareness for production deployment. """ import logging from flask import Flask, jsonify from src.llm.llm_configuration_error import LLMConfigurationError from src.utils.memory_utils import get_memory_usage, optimize_memory logger = logging.getLogger(__name__) def register_error_handlers(app: Flask): """Register memory-aware error handlers.""" @app.errorhandler(500) def handle_internal_error(error): """Handle internal server errors with memory optimization.""" memory_mb = get_memory_usage() logger.error(f"Internal server error (Memory: {memory_mb:.1f}MB): {error}") # If memory is high, try to optimize if memory_mb > 400: logger.warning("High memory usage detected, optimizing...") optimize_memory() return ( jsonify( { "status": "error", "message": "Internal server error", "memory_mb": round(memory_mb, 1), } ), 500, ) @app.errorhandler(503) def handle_service_unavailable(error): """Handle service unavailable errors.""" memory_mb = get_memory_usage() logger.error(f"Service unavailable (Memory: {memory_mb:.1f}MB): {error}") return ( jsonify( { "status": "error", "message": "Service temporarily unavailable", "memory_mb": round(memory_mb, 1), } ), 503, ) @app.errorhandler(LLMConfigurationError) def handle_llm_configuration_error(error): """Handle LLM configuration errors with consistent JSON response.""" memory_mb = get_memory_usage() logger.error(f"LLM configuration error (Memory: {memory_mb:.1f}MB): {error}") return ( jsonify( { "status": "error", "message": f"LLM service configuration error: {str(error)}", "details": ("Please ensure OPENROUTER_API_KEY or GROQ_API_KEY " "environment variables are set"), } ), 503, )