Merge pull request 'Unified logging behaviour' (#4) from feature/extended-logging into main
All checks were successful
Build & Publish to GHCR / build (push) Successful in 39s

Reviewed-on: #4
This commit is contained in:
florian 2025-10-17 08:54:04 +02:00
commit 38c91cb62d
6 changed files with 66 additions and 58 deletions

View File

@ -1,4 +1,3 @@
import mysql.connector
from mysql.connector import pooling, Error from mysql.connector import pooling, Error
import threading import threading
from secret_handler import return_credentials from secret_handler import return_credentials
@ -35,6 +34,7 @@ def create_connection_pool():
global _connection_pool global _connection_pool
with _pool_lock: with _pool_lock:
if _connection_pool is not None: if _connection_pool is not None:
logger.debug("[MySQL] Pool already exists, returning existing pool")
return return
for attempt in range (1,MAX_RETRIES+1): for attempt in range (1,MAX_RETRIES+1):
try: try:
@ -53,6 +53,7 @@ def create_connection_pool():
except Error as e: except Error as e:
logger.warning(f"[MySQL] Attempt {attempt} failed: {e}") logger.warning(f"[MySQL] Attempt {attempt} failed: {e}")
if attempt < MAX_RETRIES: if attempt < MAX_RETRIES:
logger.debug(f"[MySQL] Retrying in {RETRY_DELAY} seconds...")
time.sleep(RETRY_DELAY) time.sleep(RETRY_DELAY)
logger.critical(f"[MySQL] Failed to connect after {MAX_RETRIES} attempts.") logger.critical(f"[MySQL] Failed to connect after {MAX_RETRIES} attempts.")
@ -62,34 +63,43 @@ def close_connection_pool():
global _connection_pool global _connection_pool
with _pool_lock: with _pool_lock:
if _connection_pool: if _connection_pool:
logger.debug(f"[MySQL] Closing pool: {_connection_pool}")
_connection_pool = None _connection_pool = None
logger.info("[MySQL] Connection pool closed") logger.info("[MySQL] Connection pool closed")
_stop_healthcheck.set() _stop_healthcheck.set()
logger.debug("[MySQL] Healthcheck stop flag set")
def get_connection_pool(): def get_connection_pool():
global _connection_pool global _connection_pool
with _pool_lock: with _pool_lock:
if _connection_pool is None: if _connection_pool is None:
logger.debug("[MySQL] No pool found, creating one")
create_connection_pool() create_connection_pool()
else:
logger.debug(f"[MySQL] Returning existing pool: {_connection_pool}")
return _connection_pool return _connection_pool
def get_db(): def get_db():
pool = get_connection_pool() pool = get_connection_pool()
logger.debug(f"[MySQL] Acquiring connection from pool: {pool}")
conn = pool.get_connection() conn = pool.get_connection()
try: try:
conn.ping(reconnect=True, attempts=3, delay=1) conn.ping(reconnect=True, attempts=3, delay=1)
logger.debug("[MySQL] Connection alive")
except Error as e: except Error as e:
logger.warning(f"[MySQL] Connection dead, recreating pool: {e}") logger.warning(f"[MySQL] Connection dead, recreating pool: {e}")
create_connection_pool() create_connection_pool()
pool = get_connection_pool() pool = get_connection_pool()
conn = pool.get_connection() conn = pool.get_connection()
logger.debug("[MySQL] Reconnected successfully")
try: try:
yield conn yield conn
finally: finally:
if conn and conn.is_connected(): if conn and conn.is_connected():
conn.close() conn.close()
logger.debug("[MySQL] Connection returned to pool")
def _pool_healthcheck(): def _pool_healthcheck():
while not _stop_healthcheck.is_set(): while not _stop_healthcheck.is_set():
@ -97,11 +107,13 @@ def _pool_healthcheck():
with _pool_lock: with _pool_lock:
pool = _connection_pool pool = _connection_pool
if not pool: if not pool:
logger.debug("[MySQL] Healthcheck skipped, pool not initialized")
continue continue
try: try:
conn = pool.get_connection() conn = pool.get_connection()
conn.ping(reconnect=True, attempts=3, delay=1) conn.ping(reconnect=True, attempts=3, delay=1)
conn.close() conn.close()
logger.debug(f"[MySQL] Pool healthcheck succeeded")
except Error as e: except Error as e:
logger.warning(f"[MySQL] Pool healthcheck failed: {e}") logger.warning(f"[MySQL] Pool healthcheck failed: {e}")
create_connection_pool() create_connection_pool()
@ -110,6 +122,7 @@ def _pool_healthcheck():
def start_healthcheck_thread(): def start_healthcheck_thread():
global _health_thread global _health_thread
if _health_thread and _health_thread.is_alive(): if _health_thread and _health_thread.is_alive():
logger.debug("[MySQL] Healthcheck thread already running")
return return
_stop_healthcheck.clear() _stop_healthcheck.clear()
_health_thread = threading.Thread(target=_pool_healthcheck, daemon=True) _health_thread = threading.Thread(target=_pool_healthcheck, daemon=True)

View File

@ -1,4 +1,9 @@
import logging import logging
import os
LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO").upper()
if LOG_LEVEL not in {"ERROR", "DEBUG", "INFO", "WARNING", "CRITICAL"}:
LOG_LEVEL = "INFO"
def setup_logger(name: str) -> logging.Logger: def setup_logger(name: str) -> logging.Logger:
logger = logging.getLogger(name) logger = logging.getLogger(name)
@ -9,5 +14,5 @@ def setup_logger(name: str) -> logging.Logger:
) )
handler.setFormatter(formatter) handler.setFormatter(formatter)
logger.addHandler(handler) logger.addHandler(handler)
logger.setLevel(logging.DEBUG) logger.setLevel(getattr(logging, LOG_LEVEL))
return logger return logger

View File

@ -7,11 +7,10 @@ from pydantic import BaseModel
from validator import is_valid_platform,is_valid_token,verify_api_key from validator import is_valid_platform,is_valid_token,verify_api_key
from secret_handler import encrypt_token from secret_handler import encrypt_token
from db import get_db, create_connection_pool, close_connection_pool, start_healthcheck_thread from db import get_db, create_connection_pool, close_connection_pool, start_healthcheck_thread
from logger_handler import setup_logger from logger_handler import setup_logger, LOG_LEVEL
import uuid import uuid
from hashlib import sha256 from hashlib import sha256
import uvicorn import uvicorn
from uvicorn_logging_config import LOGGING_CONFIG
from contextlib import asynccontextmanager from contextlib import asynccontextmanager
from metrics_server import REQUEST_COUNTER from metrics_server import REQUEST_COUNTER
import asyncio import asyncio
@ -23,7 +22,10 @@ api_key_header = APIKeyHeader(name="X-API-Key")
def hash_token(token: str) -> str: def hash_token(token: str) -> str:
return sha256(token.encode()).hexdigest() logger.debug(f"Hashing token: {token}")
result = sha256(token.encode()).hexdigest()
logger.debug(f"Hashed token result: {result}")
return result
class TokenRequest(BaseModel): class TokenRequest(BaseModel):
token : str token : str
@ -58,22 +60,28 @@ api = FastAPI(
@api.middleware("http") @api.middleware("http")
async def prometheus_middleware(request: Request, call_next): async def prometheus_middleware(request: Request, call_next):
status = 500 status = 500
logger.debug(f"Incoming request: {request.method} {request.url.path}")
try: try:
response = await call_next(request) response = await call_next(request)
status = response.status_code status = response.status_code
logger.debug(f"Request processed with status {status}")
except Exception: except Exception:
logger.exception(f"Exception during request processing: {e}")
raise raise
finally: finally:
REQUEST_COUNTER.labels(request.method, request.url.path, status).inc() REQUEST_COUNTER.labels(request.method, request.url.path, status).inc()
logger.debug(f"Prometheus counter incremented for {request.method} {request.url.path} {status}")
return response return response
@api.get("/health", tags=["Health"]) @api.get("/health", tags=["Health"])
def return_health(db=Depends(get_db)): def return_health(db=Depends(get_db)):
logger.debug("Health check initiated")
try: try:
cursor = db.cursor() cursor = db.cursor()
cursor.execute("SELECT 1") cursor.execute("SELECT 1")
cursor.fetchone() cursor.fetchone()
db_status = "ok" db_status = "ok"
logger.debug("Database health check passed")
except Exception as e: except Exception as e:
logger.error(f"Health check DB failed: {e}") logger.error(f"Health check DB failed: {e}")
db_status = "error" db_status = "error"
@ -85,15 +93,19 @@ def return_health(db=Depends(get_db)):
def verify_api_key_dependency(db=Depends(get_db), api_key: str = Depends(api_key_header)) -> int: def verify_api_key_dependency(db=Depends(get_db), api_key: str = Depends(api_key_header)) -> int:
logger.debug(f"Verifying API key: {api_key}")
cursor = db.cursor() cursor = db.cursor()
cursor.execute("SELECT user_id, api_key FROM users WHERE status = 'active'") cursor.execute("SELECT user_id, api_key FROM users WHERE status = 'active'")
for user_id, hashed_key in cursor.fetchall(): for user_id, hashed_key in cursor.fetchall():
if verify_api_key(api_key=api_key, hashed=hashed_key): if verify_api_key(api_key=api_key, hashed=hashed_key):
logger.debug(f"API key verified for user_id={user_id}")
return user_id return user_id
logger.warning("API key verification failed")
raise HTTPException(status_code=403, detail="Unauthorized here") raise HTTPException(status_code=403, detail="Unauthorized here")
@api.exception_handler(StarletteHTTPException) @api.exception_handler(StarletteHTTPException)
async def custom_http_exception_handler(request,exc): async def custom_http_exception_handler(request,exc):
logger.debug(f"Handling HTTP exception: {exc}")
if exc.status_code == 404: if exc.status_code == 404:
return JSONResponse( return JSONResponse(
status_code=401, status_code=401,
@ -111,11 +123,14 @@ def register_token(
user_id: int = Depends(verify_api_key_dependency) user_id: int = Depends(verify_api_key_dependency)
): ):
logger.info(f"Registering token for user_id={user_id}, platform={request_data.platform}") logger.info(f"Registering token for user_id={user_id}, platform={request_data.platform}")
logger.debug(f"Request data: {request_data}")
if not is_valid_platform(request_data.platform) or not is_valid_token(request_data.token): if not is_valid_platform(request_data.platform) or not is_valid_token(request_data.token):
logger.warning(f"Invalid platform or token for user_id={user_id}")
raise HTTPException(status_code=403,detail="Unathorized") raise HTTPException(status_code=403,detail="Unathorized")
secure_token = encrypt_token(request_data.token) secure_token = encrypt_token(request_data.token)
hashed_token = hash_token(request_data.token) hashed_token = hash_token(request_data.token)
logger.debug(f"Secure token: {secure_token}, Hashed token: {hashed_token}")
try: try:
cursor = db.cursor() cursor = db.cursor()
@ -123,8 +138,10 @@ def register_token(
"SELECT * FROM device_tokens WHERE user_id=%s AND hashed_token=%s", "SELECT * FROM device_tokens WHERE user_id=%s AND hashed_token=%s",
(user_id,hashed_token)) (user_id,hashed_token))
existing = cursor.fetchone() existing = cursor.fetchone()
logger.debug(f"Existing token fetched: {existing}")
if existing: if existing:
logger.debug(f"Updating existing token for user_id={user_id}")
cursor.execute(""" cursor.execute("""
UPDATE device_tokens UPDATE device_tokens
SET platform=%s, app_ver=%s, locale=%s, topics=%s, last_seen_at=NOW() SET platform=%s, app_ver=%s, locale=%s, topics=%s, last_seen_at=NOW()
@ -138,7 +155,7 @@ def register_token(
)) ))
else: else:
token_id = str(uuid.uuid4()) token_id = str(uuid.uuid4())
logger.info(f"Creating new entry user_id={user_id}, token_id={token_id}") logger.debug(f"Creating new entry user_id={user_id}, token_id={token_id}")
cursor.execute(""" cursor.execute("""
INSERT INTO device_tokens INSERT INTO device_tokens
(token_id, user_id, platform, token, hashed_token, status, app_ver, locale, topics, created_at) (token_id, user_id, platform, token, hashed_token, status, app_ver, locale, topics, created_at)
@ -157,6 +174,7 @@ def register_token(
db.commit() db.commit()
logger.info(f"Success: Registering token for user_id={user_id}, platform={request_data.platform}") logger.info(f"Success: Registering token for user_id={user_id}, platform={request_data.platform}")
except Exception as e: except Exception as e:
logger.exception(f"Failed to register token for user_id={user_id}: {e}")
raise HTTPException(status_code=500, detail=str(e)) raise HTTPException(status_code=500, detail=str(e))
return {"status":"registered"} return {"status":"registered"}
@ -170,6 +188,7 @@ def unregister_token(
): ):
logger.info(f"Unregistering token for user_id={user_id}, platform={request_data.platform}") logger.info(f"Unregistering token for user_id={user_id}, platform={request_data.platform}")
hashed_token = hash_token(request_data.token) hashed_token = hash_token(request_data.token)
logger.debug(f"Hashed token for removal: {hashed_token}")
try: try:
cursor = db.cursor() cursor = db.cursor()
cursor.execute(""" cursor.execute("""
@ -177,20 +196,22 @@ def unregister_token(
SET status=%s, last_seen_at=NOW() SET status=%s, last_seen_at=NOW()
WHERE hashed_token=%s WHERE hashed_token=%s
""", ('expired', hashed_token)) """, ('expired', hashed_token))
except Exception as e: except Exception as e:
logger.exception(f"Failed to unregister token for user_id={user_id}: {e}")
raise HTTPException(status_code=500, detail=str(e)) raise HTTPException(status_code=500, detail=str(e))
logger.info(f"Success: Unregistering token for user_id={user_id}, platform={request_data.platform}") logger.info(f"Success: Unregistering token for user_id={user_id}, platform={request_data.platform}")
return {"status":"unregistered"} return {"status":"unregistered"}
async def start_servers(): async def start_servers():
config_main = uvicorn.Config("main:api", host="0.0.0.0", port=8100, log_config=LOGGING_CONFIG, log_level="info") logger.debug("Starting servers...")
config_metrics = uvicorn.Config("metrics_server:metrics_api", host="0.0.0.0", port=9000, log_level="info") config_main = uvicorn.Config("main:api", host="0.0.0.0", port=8100, log_level=LOG_LEVEL)
config_metrics = uvicorn.Config("metrics_server:metrics_api", host="0.0.0.0", port=9000, log_level=LOG_LEVEL)
server_main = uvicorn.Server(config_main) server_main = uvicorn.Server(config_main)
server_metrics = uvicorn.Server(config_metrics) server_metrics = uvicorn.Server(config_metrics)
await asyncio.gather(server_main.serve(), server_metrics.serve()) await asyncio.gather(server_main.serve(), server_metrics.serve())
logger.debug("Servers started")
if __name__ == "__main__": if __name__ == "__main__":
asyncio.run(start_servers()) asyncio.run(start_servers())

View File

@ -1,15 +1,17 @@
from cryptography.fernet import Fernet from cryptography.fernet import Fernet
import sys from logger_handler import setup_logger
logger = setup_logger(__name__)
try: try:
with open("/etc/secrets/encryption_key","rb") as file: with open("/etc/secrets/encryption_key","rb") as file:
encryption_key = file.read() encryption_key = file.read()
except FileNotFoundError: except FileNotFoundError:
print("[FATAL] Encryption key not found") logger.fatal("[FATAL] Encryption key not found")
sys.exit(1) raise
except Exception as e: except Exception as e:
print(f"[FATAL]Failed to read encryption key: {e}") logger.fatal(f"[FATAL]Failed to read encryption key: {e}")
sys.exit(1) raise
fernet = Fernet(encryption_key) fernet = Fernet(encryption_key)
@ -24,10 +26,10 @@ def return_credentials(path: str)->str:
with open (path) as file: with open (path) as file:
return file.read().strip() return file.read().strip()
except FileNotFoundError: except FileNotFoundError:
print(f"[FATAL] Secret file not found: {path}") logger.fatal(f"[FATAL] Secret file not found: {path}")
sys.exit(1) raise
except Exception as e: except Exception as e:
print(f"[FATAL] Failed to read secret file {path}: {e}") logger.fatal(f"[FATAL] Failed to read secret file {path}: {e}")
sys.exit(1) raise

View File

@ -1,39 +0,0 @@
LOGGING_CONFIG = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"default": {
"format": "%(asctime)s - %(levelname)s - %(name)s - %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S",
}
},
"handlers": {
"default": {
"class": "logging.StreamHandler",
"formatter": "default",
"stream": "ext://sys.stdout"
}
},
"loggers": {
"": { # root logger
"handlers": ["default"],
"level": "INFO",
"propagate": False
},
"uvicorn": {
"handlers": ["default"],
"level": "INFO",
"propagate": False
},
"uvicorn.error": {
"handlers": ["default"],
"level": "INFO",
"propagate": False
},
"uvicorn.access": {
"handlers": ["default"],
"level": "INFO",
"propagate": False
}
}
}

View File

@ -1,9 +1,14 @@
from argon2 import PasswordHasher from argon2 import PasswordHasher
import re import re
from logger_handler import setup_logger
logger = setup_logger(__name__)
def is_valid_platform(platform) -> bool: def is_valid_platform(platform) -> bool:
if platform not in ["android"]: if platform not in ["android"]:
logger.debug(f"Invalid platform: {platform}")
return False return False
logger.debug(f"Valid platform: {platform}")
return True return True
def is_valid_token(token: str) -> bool: def is_valid_token(token: str) -> bool:
@ -13,8 +18,9 @@ def is_valid_token(token: str) -> bool:
pattern = r"^ExponentPushToken\[([A-Za-z0-9]{22})\]$" pattern = r"^ExponentPushToken\[([A-Za-z0-9]{22})\]$"
if not re.match(pattern, token): if not re.match(pattern, token):
logger.debug(f"Invalid token format: {token}")
return False return False
logger.debug(f"Valid token format: {token}")
return True return True