Logging System
Build production logging with the logging module and structlog — levels, handlers, formatters, and structured output.
Installation
pip install structlog python-json-logger
logging — Standard Library
import logging
import logging.handlers
import sys
from pathlib import Path
# ── Basic setup ───────────────────────────────────────────
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s [%(levelname)s] %(name)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
logger = logging.getLogger(__name__)
logger.debug("Debug info")
logger.info("General info")
logger.warning("Warning")
logger.error("Error")
logger.critical("Critical failure")
# Include exception stack trace
try:
1 / 0
except ZeroDivisionError:
logger.exception("Exception occurred") # ERROR level + stack trace
Production Logging (dictConfig)
import logging
import logging.config
from pathlib import Path
LOG_DIR = Path("logs")
LOG_DIR.mkdir(exist_ok=True)
LOGGING_CONFIG = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"detailed": {
"format": "%(asctime)s [%(levelname)s] %(name)s:%(lineno)d - %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S",
},
"simple": {
"format": "[%(levelname)s] %(message)s",
},
"json": {
"()": "pythonjsonlogger.jsonlogger.JsonFormatter",
"format": "%(asctime)s %(name)s %(levelname)s %(message)s",
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"stream": "ext://sys.stdout",
"formatter": "simple",
"level": "DEBUG",
},
"file": {
"class": "logging.handlers.RotatingFileHandler",
"filename": str(LOG_DIR / "app.log"),
"maxBytes": 10 * 1024 * 1024, # 10 MB
"backupCount": 5,
"formatter": "detailed",
"encoding": "utf-8",
},
"error_file": {
"class": "logging.handlers.RotatingFileHandler",
"filename": str(LOG_DIR / "error.log"),
"maxBytes": 5 * 1024 * 1024,
"backupCount": 3,
"formatter": "detailed",
"level": "ERROR",
"encoding": "utf-8",
},
"timed_file": {
"class": "logging.handlers.TimedRotatingFileHandler",
"filename": str(LOG_DIR / "daily.log"),
"when": "midnight",
"interval": 1,
"backupCount": 30,
"formatter": "json",
"encoding": "utf-8",
},
},
"loggers": {
"myapp": {
"level": "DEBUG",
"handlers": ["console", "file", "error_file"],
"propagate": False,
},
"uvicorn": {
"level": "INFO",
"handlers": ["console"],
"propagate": False,
},
"sqlalchemy.engine": {
"level": "WARNING", # suppress SQL query logs
"handlers": ["file"],
"propagate": False,
},
},
"root": {
"level": "WARNING",
"handlers": ["console"],
},
}
logging.config.dictConfig(LOGGING_CONFIG)
logger = logging.getLogger("myapp.service")
logger.info("Server started")
structlog — Structured Logging
import structlog
import logging
# ── Configuration ─────────────────────────────────────────
structlog.configure(
processors=[
structlog.contextvars.merge_contextvars,
structlog.processors.add_log_level,
structlog.processors.TimeStamper(fmt="iso"),
structlog.dev.ConsoleRenderer(), # development: colored output
# structlog.processors.JSONRenderer(), # production: JSON
],
wrapper_class=structlog.make_filtering_bound_logger(logging.DEBUG),
context_class=dict,
logger_factory=structlog.PrintLoggerFactory(),
)
log = structlog.get_logger()
# ── Structured log events ─────────────────────────────────
log.info("User login", user_id=42, ip="192.168.1.1", method="jwt")
log.warning("Rate limit exceeded", user_id=42, limit=100, current=150)
log.error("DB connection failed", host="db.prod.internal", port=5432, retry=3)
# Context binding
request_log = log.bind(request_id="abc-123", user_id=42)
request_log.info("Request started", path="/api/users", method="GET")
request_log.info("Query executed", table="users", duration_ms=12.3)
request_log.info("Response sent", status=200, duration_ms=45.7)
# contextvars — share context across entire request (async-safe)
from structlog.contextvars import bind_contextvars, clear_contextvars
async def handle_request(request_id: str, user_id: int):
clear_contextvars()
bind_contextvars(request_id=request_id, user_id=user_id)
log.info("Processing started") # request_id and user_id included automatically
# ... logic
log.info("Processing complete")
FastAPI Logging Middleware
import time
import uuid
import logging
import structlog
from fastapi import FastAPI, Request, Response
logger = structlog.get_logger()
app = FastAPI()
@app.middleware("http")
async def logging_middleware(request: Request, call_next):
request_id = str(uuid.uuid4())[:8]
start = time.perf_counter()
structlog.contextvars.clear_contextvars()
structlog.contextvars.bind_contextvars(
request_id=request_id,
method=request.method,
path=request.url.path,
)
logger.info("Request received")
try:
response: Response = await call_next(request)
duration_ms = (time.perf_counter() - start) * 1000
logger.info(
"Request complete",
status=response.status_code,
duration_ms=round(duration_ms, 2),
)
response.headers["X-Request-ID"] = request_id
return response
except Exception as exc:
logger.exception("Unhandled exception", exc_info=exc)
raise
Log Level Guide
DEBUG — Detailed flow during development (disable in production)
INFO — Normal operation events (server start, request complete)
WARNING — Needs attention but still working (rate limit near, retry succeeded)
ERROR — Feature failure (DB error, payment failure) — alert required
CRITICAL — Immediate action needed (server down, risk of data loss)
Summary
| Tool | Characteristics | Best For |
|---|---|---|
logging | Standard library, flexible config | All projects (foundation) |
structlog | Structured JSON, context binding | API servers, microservices |
Rich logging | Colored terminal output | Development environment |
- Development:
logging.basicConfigorstructlog.dev.ConsoleRenderer - Production:
dictConfig+ JSON formatter + file rotation + centralized log collection