Unified logging behaviour and added exponential backoff

- Logger doesn't start with log level DEBUG by default, instead reads a environment variable
- Secret handler raises exceptions instead of using the module os to exit
- Added extensive debug logging
- Added detailed function descriptions
- Added exponential backoff when parsing the RSS feed
This commit is contained in:
Florian 2025-10-17 15:12:02 +02:00
parent 14f974f4ed
commit abc894869f
8 changed files with 217 additions and 107 deletions

View File

@ -1,3 +1,58 @@
# service-royalroad-chapters
# Service: Royalroad Chapters
Fetches new RR chapter data
## Overview
`service-royalroad-chapters` is a FastAPI-based webserver that monitors Royalroad stories for new chapters and sends notifications data to `backend-api-internal`.
## Features
- Fetches the latest chapters from Royalroad RSS feeds.
- Updates the database when new chapters are released.
- Sends push notifications for new chapters.
- Prometheus metrics integrated for monitoring request counts.
## Endpoints
### GET `/royalroad`
- Checks for new chapters for all active stories in the database.
- Updates the `lastChapter` field in the database.
- Sends notifications if a new chapter is found.
- Response:
```json
{
"status": "checked"
}
```
## Requirements
- Python 3.12+
- MySQL database
- Python packages from requirements.txt
## Configuration
**Environment variables:**
- `LOG_LEVEL` (DEBUG, INFO, WARNING, ERROR, CRITICAL)
- Database credentials: `MYSQL_HOST`, `MYSQL_USER`, `MYSQL_PASSWORD`, `MYSQL_DATABASE`
- `API_KEY_INTERNAL`
- `BACKEND_API_URL`
## Metrics
Metrics are exposed on port `9000` for Prometheus
## Running the Service
- Inside the `src` folder `python main.py`

View File

@ -1,4 +1,3 @@
import mysql.connector
from mysql.connector import pooling, Error
import threading
from secret_handler import return_credentials
@ -37,12 +36,13 @@ def create_connection_pool():
global _connection_pool
with _pool_lock:
if _connection_pool is not None:
logger.debug("[MySQL] Pool already exists, returning existing pool")
return
for attempt in range (1,MAX_RETRIES+1):
try:
logger.info(f"[MySQL] Attempt {attempt} to connect...")
pool = pooling.MySQLConnectionPool(
pool_name="royalroadPool",
pool_name="royalRoadPool",
pool_size=5,
pool_reset_session=True,
**MYSQL_CONFIG
@ -55,6 +55,7 @@ def create_connection_pool():
except Error as e:
logger.warning(f"[MySQL] Attempt {attempt} failed: {e}")
if attempt < MAX_RETRIES:
logger.debug(f"[MySQL] Retrying in {RETRY_DELAY} seconds...")
time.sleep(RETRY_DELAY)
logger.critical(f"[MySQL] Failed to connect after {MAX_RETRIES} attempts.")
@ -64,34 +65,43 @@ def close_connection_pool():
global _connection_pool
with _pool_lock:
if _connection_pool:
logger.debug(f"[MySQL] Closing pool: {_connection_pool}")
_connection_pool = None
logger.info("[MySQL] Connection pool closed")
_stop_healthcheck.set()
logger.debug("[MySQL] Healthcheck stop flag set")
def get_connection_pool():
global _connection_pool
with _pool_lock:
if _connection_pool is None:
logger.debug("[MySQL] No pool found, creating one")
create_connection_pool()
else:
logger.debug(f"[MySQL] Returning existing pool: {_connection_pool}")
return _connection_pool
def get_db():
pool = get_connection_pool()
logger.debug(f"[MySQL] Acquiring connection from pool: {pool}")
conn = pool.get_connection()
try:
conn.ping(reconnect=True, attempts=3, delay=1)
logger.debug("[MySQL] Connection alive")
except Error as e:
logger.warning(f"[MySQL] Connection dead, recreating pool: {e}")
create_connection_pool()
pool = get_connection_pool()
conn = pool.get_connection()
logger.debug("[MySQL] Reconnected successfully")
try:
yield conn
finally:
if conn and conn.is_connected():
conn.close()
logger.debug("[MySQL] Connection returned to pool")
def _pool_healthcheck():
while not _stop_healthcheck.is_set():
@ -99,12 +109,13 @@ def _pool_healthcheck():
with _pool_lock:
pool = _connection_pool
if not pool:
logger.debug("[MySQL] Healthcheck skipped, pool not initialized")
continue
try:
conn = pool.get_connection()
conn.ping(reconnect=True, attempts=3, delay=1)
conn.close()
logger.debug("[MySQL] Pool healthcheck OK.")
logger.debug(f"[MySQL] Pool healthcheck succeeded")
except Error as e:
logger.warning(f"[MySQL] Pool healthcheck failed: {e}")
create_connection_pool()
@ -113,6 +124,7 @@ def _pool_healthcheck():
def start_healthcheck_thread():
global _health_thread
if _health_thread and _health_thread.is_alive():
logger.debug("[MySQL] Healthcheck thread already running")
return
_stop_healthcheck.clear()
_health_thread = threading.Thread(target=_pool_healthcheck, daemon=True)

View File

@ -1,26 +1,64 @@
import feedparser
import re
from logger_handler import setup_logger
import time
from urllib.error import URLError
from typing import Tuple
logger = setup_logger(__name__)
def extract_number(s: str)->int | None:
"""Extracts the first integer found in a string and returns either it or None"""
match = re.search(r"\d+", s)
if match:
return int(match.group())
else:
return None
def grab_latest_chapter_information(id:str)->tuple[int | None, str, str]:
url = f"https://www.royalroad.com/fiction/syndication/{id}"
feed = feedparser.parse(url)
def grab_latest_chapter_information(id: str, max_retries: int = 3) -> Tuple[int | None, str, str]:
"""
Fetches the latest chapter information from a Royalroad RSS feed, with retries on network-related errors.
if not feed.entries:
raise ValueError(f"No entries found for feed {id}")
latest_chapter_data = feed["entries"][0]
chapter_number = extract_number(latest_chapter_data["title"])
chapter_link = latest_chapter_data["link"]
title = feed["title"]
Parameters:
id: Royalroad story ID as a string.
max_retries: Number of retry attempts if fetching the feed fails due to network issues.
return chapter_number,chapter_link,title
Returns:
A tuple: (chapter_number, chapter_link, story_title)
Raises:
ValueError: If the feed has no entries.
Other network-related exceptions: If fetching fails after retries.
"""
rss_feed_url = f"https://www.royalroad.com/fiction/syndication/{id}"
attempt = 1
while attempt <= max_retries:
logger.debug(f"[Feed] Parsing feed URL: {rss_feed_url} (attempt {attempt}/{max_retries})")
try:
feed = feedparser.parse(rss_feed_url)
if not feed.entries:
raise ValueError(f"No entries found for feed {id}")
latest_chapter_data = feed["entries"][0]
chapter_number = extract_number(latest_chapter_data["title"])
chapter_link = latest_chapter_data["link"]
story_title = feed["title"]
logger.info(f"[Feed] Latest chapter for story '{story_title}' (ID {id}): {chapter_number} -> {chapter_link}")
return chapter_number, chapter_link, story_title
except (URLError, OSError) as e:
logger.warning(f"[Feed] Network error on attempt {attempt} for feed {id}: {e}")
if attempt == max_retries:
logger.error(f"[Feed] All {max_retries} attempts failed for feed {id}")
raise
backoff = 2 ** (attempt - 1)
logger.debug(f"[Feed] Retrying in {backoff} seconds...")
time.sleep(backoff)
attempt += 1
if __name__ == "__main__":
print(grab_latest_chapter_information("118891"))

View File

@ -1,4 +1,9 @@
import logging
import os
LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO").upper()
if LOG_LEVEL not in {"ERROR", "DEBUG", "INFO", "WARNING", "CRITICAL"}:
LOG_LEVEL = "INFO"
def setup_logger(name: str) -> logging.Logger:
logger = logging.getLogger(name)
@ -9,5 +14,6 @@ def setup_logger(name: str) -> logging.Logger:
)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
logger.setLevel(getattr(logging, LOG_LEVEL))
logger.debug(f"Logger {name} initialized with level {LOG_LEVEL}")
return logger

View File

@ -2,7 +2,7 @@ from fastapi import FastAPI, Depends, HTTPException, Response, Request
import uvicorn
from contextlib import asynccontextmanager
from db import get_db, create_connection_pool, close_connection_pool, start_healthcheck_thread
from logger_handler import setup_logger
from logger_handler import setup_logger, LOG_LEVEL
from feed_handler import grab_latest_chapter_information
from send_notification import send_notification
from metrics_server import REQUEST_COUNTER
@ -14,17 +14,18 @@ logger = setup_logger(__name__)
@asynccontextmanager
async def lifespan(app: FastAPI):
logger.info("Starting application...")
logger.info("[App] Starting application...")
logger.info("Creating MySQL connection pool...")
logger.info("[DB] Creating MySQL connection pool...")
create_connection_pool()
start_healthcheck_thread()
logger.info("MySQL healthcheck thread started.")
logger.info("[DB] MySQL healthcheck thread started.")
yield
logger.info("Closing MySQL connection pool...")
logger.info("[App] Closing MySQL connection pool...")
close_connection_pool()
logger.info("[App] Shutdown complete.")
api = FastAPI(
title="Docker Repository Query",
@ -35,50 +36,73 @@ api = FastAPI(
@api.middleware("http")
async def prometheus_middleware(request: Request, call_next):
logger.debug(f"[Metrics] Incoming request: {request.method} {request.url.path}")
status = 500
try:
response = await call_next(request)
status = response.status_code
logger.debug(f"[Metrics] Request processed with status {status}")
except Exception:
logger.error(f"[Metrics] Exception occurred: {e}", exc_info=True)
raise
finally:
REQUEST_COUNTER.labels(request.method, request.url.path, status).inc()
logger.debug(f"[Metrics] Counter incremented for {request.method} {request.url.path} [{status}]")
return response
@api.get("/health")
def return_health():
return Response(status_code=200)
@api.get("/royalroad")
def get_chapters(
request: Request,
db = Depends(get_db)
):
try:
logger.info("Checking for new Royalroad chapters")
def get_chapters(request: Request, db = Depends(get_db)):
"""
Checks for new Royalroad chapters and updates the database accordingly.
Behaviour:
- Fetches all active stories and their last known chapter number.
- Queries Royalroad for each story's latest chapter.
- Updates `lastChapter` if a new chapter is found.
- Sends a notification for any newly discovered chapters.
Returns:
A JSON object indicating the check status.
"""
logger.info("[Royalroad] Checking for new chapters...")
try:
cursor = db.cursor()
cursor.execute("SELECT id,royalroadId,lastChapter FROM stories where active=1")
for id,royalroadId,last_chapter_db in cursor.fetchall():
chapter_number,chapter_link,title = grab_latest_chapter_information(royalroadId)
cursor.execute("SELECT id, royalroadId, lastChapter FROM stories WHERE active=1")
stories = cursor.fetchall()
logger.debug(f"[Royalroad] Found {len(stories)} active stories to check.")
for id, royalroadId, last_chapter_db in stories:
chapter_number, chapter_link, story_title = grab_latest_chapter_information(royalroadId)
logger.debug(f"[Royalroad] Story {id}: last={last_chapter_db}, latest={chapter_number}")
if chapter_number > last_chapter_db:
cursor.execute("UPDATE stories SET lastChapter = %s WHERE id = %s",
(chapter_number, id))
logger.info(f"[Royalroad] New chapter detected for story ID {id}: {story_title}")
cursor.execute("UPDATE stories SET lastChapter = %s WHERE id = %s", (chapter_number, id))
db.commit()
send_notification(title,chapter_number,chapter_link)
send_notification(story_title, chapter_number, chapter_link)
logger.debug(f"[Royalroad] Notification sent for story ID {id}")
logger.info("[Royalroad] Chapter check completed successfully.")
return {"status": "checked"}
except ValueError as e:
logger.error(f"[Royalroad] Failed to fetch feed {royalroadId}: {e}")
raise HTTPException(status_code=404, detail=str(e))
except Exception as e:
logger.error(f"[Royalroad] Error during chapter check: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))
async def start_servers():
config_main = uvicorn.Config("main:api", host="0.0.0.0", port=5000, log_level="info")
config_metrics = uvicorn.Config("metrics_server:metrics_api", host="0.0.0.0", port=9000, log_level="info")
logger.info("[Server] Starting main API (port 5000) and metrics server (port 9000)...")
config_main = uvicorn.Config("main:api", host="0.0.0.0", port=5000, log_level=LOG_LEVEL.lower())
config_metrics = uvicorn.Config("metrics_server:metrics_api", host="0.0.0.0", port=9000, log_level=LOG_LEVEL.lower())
server_main = uvicorn.Server(config_main)
server_metrics = uvicorn.Server(config_metrics)
await asyncio.gather(server_main.serve(), server_metrics.serve())
logger.info("[Server] Both servers started successfully.")
if __name__ == "__main__":
asyncio.run(start_servers())

View File

@ -1,12 +1,14 @@
import sys
from logger_handler import setup_logger
logger = setup_logger(__name__)
def return_credentials(path: str)->str:
try:
with open (path) as file:
return file.read().strip()
except FileNotFoundError:
print(f"[FATAL] Secret file not found: {path}")
sys.exit(1)
logger.fatal(f"[FATAL] Secret file not found: {path}")
raise
except Exception as e:
print(f"[FATAL] Failed to read secret file {path}: {e}")
sys.exit(1)
logger.fatal(f"[FATAL] Failed to read secret file {path}: {e}")
raise

View File

@ -12,43 +12,55 @@ api_key= return_credentials("/etc/secrets/api_key")
logger = setup_logger(__name__)
def send_notification(title:str,
chapter:int,
link:str,
max_retries: int = 5,
timeout: int = 5
):
def send_notification(title: str,chapter: int,link: str,max_retries: int = 5,timeout: int = 5):
"""
Sends a notification to the internal backend service when a new Royalroad chapter is released.
Parameters:
title: Name of the story.
chapter: Latest chapter number.
link: Direct link to the new chapter.
"""
headers = {
"X-API-Key-Internal": api_key,
"Content-Type": "application/json"
}
}
data = {
"receipent_user_id": 1,
"message": {
"title": title,
"info": f"Chapter {chapter} has been released",
"link": link
}}
"receipent_user_id": 1,
"message": {
"title": title,
"info": f"Chapter {chapter} has been released",
"link": link,
"category":"royal-road"
}
}
logger.debug(f"[Notify] Preparing to send notification: title='{title}', chapter={chapter}, link='{link}'")
with requests.Session() as session:
for attempt in range(1,max_retries+1):
for attempt in range(1, max_retries + 1):
try:
logger.debug(f"[Notify] Sending request to backend (attempt {attempt}/{max_retries})")
response = session.post(backend_api_url, headers=headers, json=data, timeout=timeout)
response.raise_for_status()
logger.info("Notification sent successfully")
logger.info(f"[Notify] Notification sent successfully for '{title}' (chapter {chapter})")
return response.text
except (Timeout,ConnectionError) as e:
logger.warning(f"Attempt {attempt}/{max_retries} failed: {type(e).__name__}")
except (Timeout, ConnectionError) as e:
logger.warning(f"[Notify] Attempt {attempt}/{max_retries} failed: {type(e).__name__}")
if attempt == max_retries:
logger.error(f"[Notify] All retry attempts failed for '{title}'")
raise HTTPException(status_code=503, detail=f"Notification service unavailable: {type(e).__name__}")
time.sleep(2 ** (attempt - 1))
sleep_time = 2 ** (attempt - 1)
logger.debug(f"[Notify] Retrying in {sleep_time} seconds...")
time.sleep(sleep_time)
except HTTPError as e:
logger.error(f"HTTP {e.response.status_code}: {e.response.text}")
logger.error(f"[Notify] HTTP {e.response.status_code}: {e.response.text}")
raise HTTPException(status_code=e.response.status_code, detail=e.response.text)
except RequestException as e:
logger.error(f"Unexpected request failure: {e}")
logger.error(f"[Notify] Unexpected request failure: {e}")
raise HTTPException(status_code=500, detail=f"Request failed: {str(e)}")

View File

@ -1,39 +0,0 @@
LOGGING_CONFIG = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"default": {
"format": "%(asctime)s - %(levelname)s - %(name)s - %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S",
}
},
"handlers": {
"default": {
"class": "logging.StreamHandler",
"formatter": "default",
"stream": "ext://sys.stdout"
}
},
"loggers": {
"": { # root logger
"handlers": ["default"],
"level": "INFO",
"propagate": False
},
"uvicorn": {
"handlers": ["default"],
"level": "INFO",
"propagate": False
},
"uvicorn.error": {
"handlers": ["default"],
"level": "INFO",
"propagate": False
},
"uvicorn.access": {
"handlers": ["default"],
"level": "INFO",
"propagate": False
}
}
}