This commit is contained in:
2025-09-10 11:46:57 -07:00
parent 2cc2b4c9ce
commit f443e7a64e
33 changed files with 887 additions and 1467 deletions

View File

@@ -1,11 +1,11 @@
from pydantic_settings import BaseSettings
from pydantic_settings import BaseSettings, SettingsConfigDict
class Settings(BaseSettings):
DATABASE_URL: str
GPX_STORAGE_PATH: str
AI_MODEL: str = "openrouter/auto"
API_KEY: str
class Config:
env_file = ".env"
model_config = SettingsConfigDict(env_file=".env", extra="ignore")
settings = Settings()

View File

@@ -1,7 +1,8 @@
import os
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession
from sqlalchemy.orm import declarative_base, sessionmaker
DATABASE_URL = "postgresql+asyncpg://appuser:password@db:5432/cyclingdb"
DATABASE_URL = os.getenv("DATABASE_URL", "postgresql+asyncpg://postgres:password@db:5432/cycling")
engine = create_async_engine(DATABASE_URL, echo=True)
AsyncSessionLocal = sessionmaker(

View File

@@ -1,6 +1,9 @@
import logging
import json
from datetime import datetime
from fastapi import FastAPI, Depends, Request, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from .database import get_db, get_database_url
from .database import get_db
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import text
from alembic.config import Config
@@ -14,6 +17,45 @@ from .routes import prompts as prompt_routes
from .routes import dashboard as dashboard_routes
from .config import settings
# Configure structured JSON logging
class StructuredJSONFormatter(logging.Formatter):
def format(self, record):
log_data = {
"timestamp": datetime.utcnow().isoformat(),
"level": record.levelname,
"message": record.getMessage(),
"logger": record.name,
"module": record.module,
"function": record.funcName,
"line": record.lineno,
"thread": record.threadName,
}
if hasattr(record, 'extra'):
log_data.update(record.extra)
if record.exc_info:
log_data["exception"] = self.formatException(record.exc_info)
return json.dumps(log_data)
# Set up logging
logger = logging.getLogger("ai_cycling_coach")
logger.setLevel(logging.INFO)
# Create console handler with structured JSON format
console_handler = logging.StreamHandler()
console_handler.setFormatter(StructuredJSONFormatter())
logger.addHandler(console_handler)
# Configure rotating file handler
from logging.handlers import RotatingFileHandler
file_handler = RotatingFileHandler(
filename="/app/logs/app.log",
maxBytes=10*1024*1024, # 10 MB
backupCount=5,
encoding='utf-8'
)
file_handler.setFormatter(StructuredJSONFormatter())
logger.addHandler(file_handler)
app = FastAPI(
title="AI Cycling Coach API",
description="Backend service for AI-assisted cycling training platform",
@@ -49,61 +91,16 @@ app.include_router(workout_routes.router, prefix="/workouts", tags=["workouts"])
app.include_router(prompt_routes.router, prefix="/prompts", tags=["prompts"])
app.include_router(dashboard_routes.router, prefix="/api/dashboard", tags=["dashboard"])
async def check_migration_status():
"""Check if database migrations are up to date."""
try:
# Get Alembic configuration
config = Config("alembic.ini")
config.set_main_option("sqlalchemy.url", get_database_url())
script = ScriptDirectory.from_config(config)
# Get current database revision
from sqlalchemy import create_engine
engine = create_engine(get_database_url())
with engine.connect() as conn:
context = MigrationContext.configure(conn)
current_rev = context.get_current_revision()
# Get head revision
head_rev = script.get_current_head()
return {
"current_revision": current_rev,
"head_revision": head_rev,
"migrations_up_to_date": current_rev == head_rev
}
except Exception as e:
return {
"error": str(e),
"migrations_up_to_date": False
}
@app.get("/health")
async def health_check(db: AsyncSession = Depends(get_db)):
"""Enhanced health check with migration verification."""
health_status = {
async def health_check():
"""Simplified health check endpoint."""
return {
"status": "healthy",
"version": "0.1.0",
"timestamp": "2024-01-15T10:30:00Z" # Should be dynamic
"timestamp": datetime.utcnow().isoformat()
}
# Database connection check
try:
await db.execute(text("SELECT 1"))
health_status["database"] = "connected"
except Exception as e:
health_status["status"] = "unhealthy"
health_status["database"] = f"error: {str(e)}"
# Migration status check
migration_info = await check_migration_status()
health_status["migrations"] = migration_info
if not migration_info.get("migrations_up_to_date", False):
health_status["status"] = "unhealthy"
return health_status
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)
logger.info("Starting AI Cycling Coach API server")
uvicorn.run(app, host="0.0.0.0", port=8000, log_config=None)

View File

@@ -1,4 +1,4 @@
from sqlalchemy import Column, Integer, String, ForeignKey, JSON, Boolean, DateTime
from sqlalchemy import Column, Integer, String, ForeignKey, JSON, Boolean, DateTime, func
from sqlalchemy.orm import relationship
from .base import BaseModel

View File

@@ -0,0 +1,12 @@
from sqlalchemy import Column, Integer, ForeignKey
from sqlalchemy.orm import relationship
from .base import BaseModel
class PlanRule(BaseModel):
__tablename__ = "plan_rules"
plan_id = Column(Integer, ForeignKey('plans.id'), primary_key=True)
rule_id = Column(Integer, ForeignKey('rules.id'), primary_key=True)
plan = relationship("Plan", back_populates="rules")
rule = relationship("Rule", back_populates="plans")

View File

@@ -1,5 +1,6 @@
from sqlalchemy import Column, Integer, ForeignKey, Boolean
from sqlalchemy import Column, Integer, ForeignKey, Boolean, String
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm import relationship
from .base import BaseModel
class Rule(BaseModel):

View File

@@ -1,9 +1,54 @@
from fastapi import APIRouter
from fastapi.responses import PlainTextResponse, JSONResponse
from app.services.health_monitor import HealthMonitor
from prometheus_client import generate_latest, CONTENT_TYPE_LATEST, Gauge
from pathlib import Path
import json
router = APIRouter()
monitor = HealthMonitor()
# Prometheus metrics
SYNC_QUEUE = Gauge('sync_queue_size', 'Current Garmin sync queue size')
PENDING_ANALYSES = Gauge('pending_analyses', 'Number of pending workout analyses')
@router.get("/health")
async def get_health():
return monitor.check_system_health()
return monitor.check_system_health()
@router.get("/metrics")
async def prometheus_metrics():
# Update metrics with latest values
health_data = monitor.check_system_health()
SYNC_QUEUE.set(health_data['services'].get('sync_queue_size', 0))
PENDING_ANALYSES.set(health_data['services'].get('pending_analyses', 0))
return PlainTextResponse(
content=generate_latest(),
media_type=CONTENT_TYPE_LATEST
)
@router.get("/dashboard/health", response_class=JSONResponse)
async def health_dashboard():
"""Health dashboard endpoint with aggregated monitoring data"""
health_data = monitor.check_system_health()
# Get recent logs (last 100 lines)
log_file = Path("/app/logs/app.log")
recent_logs = []
try:
with log_file.open() as f:
lines = f.readlines()[-100:]
recent_logs = [json.loads(line.strip()) for line in lines]
except FileNotFoundError:
pass
return {
"system": health_data,
"logs": recent_logs,
"statistics": {
"log_entries": len(recent_logs),
"error_count": sum(1 for log in recent_logs if log.get('level') == 'ERROR'),
"warning_count": sum(1 for log in recent_logs if log.get('level') == 'WARNING')
}
}

View File

@@ -8,8 +8,9 @@ from app.models.workout import Workout
from app.models.analysis import Analysis
from app.models.garmin_sync_log import GarminSyncLog
from app.models.plan import Plan
from app.schemas.workout import Workout as WorkoutSchema, WorkoutSyncStatus
from app.schemas.workout import Workout as WorkoutSchema, WorkoutSyncStatus, WorkoutMetric
from app.schemas.analysis import Analysis as AnalysisSchema
from app.schemas.plan import Plan as PlanSchema
from app.services.workout_sync import WorkoutSyncService
from app.services.ai_service import AIService
from app.services.plan_evolution import PlanEvolutionService
@@ -32,7 +33,7 @@ async def read_workout(workout_id: int, db: AsyncSession = Depends(get_db)):
raise HTTPException(status_code=404, detail="Workout not found")
return workout
@router.get("/{workout_id}/metrics", response_model=list[schemas.WorkoutMetric])
@router.get("/{workout_id}/metrics", response_model=list[WorkoutMetric])
async def get_workout_metrics(
workout_id: int,
db: AsyncSession = Depends(get_db)
@@ -153,7 +154,7 @@ async def approve_analysis(
return {"message": "Analysis approved"}
@router.get("/plans/{plan_id}/evolution", response_model=List[schemas.Plan])
@router.get("/plans/{plan_id}/evolution", response_model=List[PlanSchema])
async def get_plan_evolution(
plan_id: int,
db: AsyncSession = Depends(get_db)

View File

@@ -1,4 +1,4 @@
from pydantic import BaseModel
from pydantic import BaseModel, Field
from datetime import datetime
from typing import List, Optional
from uuid import UUID

View File

@@ -36,38 +36,51 @@ class HealthMonitor:
return {
'database': self._check_database(),
'garmin_sync': self._check_garmin_sync(),
'ai_service': self._check_ai_service()
'ai_service': self._check_ai_service(),
'sync_queue_size': self._get_sync_queue_size(),
'pending_analyses': self._count_pending_analyses()
}
def _get_sync_queue_size(self) -> int:
"""Get number of pending sync operations"""
from app.models.garmin_sync_log import GarminSyncLog, SyncStatus
return GarminSyncLog.query.filter_by(status=SyncStatus.PENDING).count()
def _count_pending_analyses(self) -> int:
"""Count workouts needing analysis"""
from app.models.workout import Workout
return Workout.query.filter_by(analysis_status='pending').count()
def _check_database(self) -> str:
try:
with get_db() as db:
db.execute(text("SELECT 1"))
return "ok"
except Exception as e:
logger.error(f"Database check failed: {str(e)}")
logger.error("Database check failed", extra={"component": "database", "error": str(e)})
return "down"
def _check_garmin_sync(self) -> str:
try:
last_sync = GarminSyncLog.get_latest()
if last_sync and last_sync.status == SyncStatus.FAILED:
logger.warning("Garmin sync has failed status", extra={"component": "garmin_sync", "status": last_sync.status.value})
return "warning"
return "ok"
except Exception as e:
logger.error(f"Garmin sync check failed: {str(e)}")
logger.error("Garmin sync check failed", extra={"component": "garmin_sync", "error": str(e)})
return "down"
def _check_ai_service(self) -> str:
try:
response = requests.get(
f"{settings.AI_SERVICE_URL}/ping",
f"{settings.AI_SERVICE_URL}/ping",
timeout=5,
headers={"Authorization": f"Bearer {settings.OPENROUTER_API_KEY}"}
)
return "ok" if response.ok else "down"
except Exception as e:
logger.error(f"AI service check failed: {str(e)}")
logger.error("AI service check failed", extra={"component": "ai_service", "error": str(e)})
return "down"
def _log_anomalies(self, metrics: Dict[str, Any]):
@@ -75,6 +88,7 @@ class HealthMonitor:
for metric, value in metrics.items():
if metric in self.warning_thresholds and value > self.warning_thresholds[metric]:
alerts.append(f"{metric} {value}%")
logger.warning("System threshold exceeded", extra={"metric": metric, "value": value, "threshold": self.warning_thresholds[metric]})
if alerts:
logger.warning(f"System thresholds exceeded: {', '.join(alerts)}")
logger.warning("System thresholds exceeded", extra={"alerts": alerts})

View File

@@ -6,6 +6,7 @@ from app.models.garmin_sync_log import GarminSyncLog
from app.models.garmin_sync_log import GarminSyncLog
from datetime import datetime, timedelta
import logging
from typing import Dict, Any
import asyncio
logger = logging.getLogger(__name__)