371 lines
15 KiB
Python
371 lines
15 KiB
Python
from fastapi import APIRouter, Depends, HTTPException, BackgroundTasks, Query
|
|
from pydantic import BaseModel
|
|
from typing import Optional, List, Dict, Any
|
|
from datetime import datetime, timedelta
|
|
from ..models.api_token import APIToken
|
|
from ..services.sync_app import SyncApp
|
|
from ..services.garmin.client import GarminClient
|
|
from ..services.postgresql_manager import PostgreSQLManager
|
|
from sqlalchemy.orm import Session
|
|
from ..utils.config import config
|
|
from ..services.job_manager import job_manager
|
|
import logging
|
|
import json
|
|
import garth
|
|
import time
|
|
from garth.auth_tokens import OAuth1Token, OAuth2Token
|
|
from ..services.fitbit_client import FitbitClient
|
|
from fitbit import exceptions
|
|
from ..models.weight_record import WeightRecord
|
|
from ..models.config import Configuration
|
|
from enum import Enum
|
|
|
|
router = APIRouter()
|
|
logger = logging.getLogger(__name__)
|
|
|
|
class SyncActivityRequest(BaseModel):
|
|
days_back: int = 30
|
|
|
|
class SyncMetricsRequest(BaseModel):
|
|
days_back: int = 30
|
|
|
|
class UploadWeightRequest(BaseModel):
|
|
limit: int = 50
|
|
|
|
class SyncResponse(BaseModel):
|
|
status: str
|
|
message: str
|
|
job_id: Optional[str] = None
|
|
|
|
class WeightComparisonResponse(BaseModel):
|
|
fitbit_total: int
|
|
garmin_total: int
|
|
missing_in_garmin: int
|
|
missing_dates: List[str]
|
|
message: str
|
|
|
|
class FitbitSyncScope(str, Enum):
|
|
LAST_30_DAYS = "30d"
|
|
ALL_HISTORY = "all"
|
|
|
|
class WeightSyncRequest(BaseModel):
|
|
scope: FitbitSyncScope = FitbitSyncScope.LAST_30_DAYS
|
|
|
|
class JobStatusResponse(BaseModel):
|
|
id: str
|
|
operation: str
|
|
status: str
|
|
progress: int
|
|
message: str
|
|
cancel_requested: bool
|
|
|
|
def get_db():
|
|
db_manager = PostgreSQLManager(config.DATABASE_URL)
|
|
with db_manager.get_db_session() as session:
|
|
yield session
|
|
|
|
from ..services.garth_helper import load_and_verify_garth_session
|
|
from ..tasks.definitions import (
|
|
run_activity_sync_task,
|
|
run_metrics_sync_task,
|
|
run_health_scan_job,
|
|
run_fitbit_sync_job,
|
|
run_garmin_upload_job,
|
|
run_health_sync_job
|
|
)
|
|
|
|
@router.post("/sync/activities", response_model=SyncResponse)
|
|
def sync_activities(request: SyncActivityRequest, background_tasks: BackgroundTasks, db: Session = Depends(get_db)):
|
|
# Verify auth first before starting task
|
|
try:
|
|
load_and_verify_garth_session(db)
|
|
except Exception as e:
|
|
raise HTTPException(status_code=401, detail=f"Garmin auth failed: {str(e)}")
|
|
|
|
job_id = job_manager.create_job("Activity Sync")
|
|
db_manager = PostgreSQLManager(config.DATABASE_URL)
|
|
background_tasks.add_task(run_activity_sync_task, job_id, request.days_back, db_manager.get_db_session)
|
|
|
|
return SyncResponse(
|
|
status="started",
|
|
message="Activity sync started in background",
|
|
job_id=job_id
|
|
)
|
|
|
|
@router.post("/sync/metrics", response_model=SyncResponse)
|
|
def sync_metrics(request: SyncMetricsRequest, background_tasks: BackgroundTasks, db: Session = Depends(get_db)):
|
|
try:
|
|
load_and_verify_garth_session(db)
|
|
except Exception as e:
|
|
raise HTTPException(status_code=401, detail=f"Garmin auth failed: {str(e)}")
|
|
|
|
job_id = job_manager.create_job("Health Metrics Sync")
|
|
db_manager = PostgreSQLManager(config.DATABASE_URL)
|
|
background_tasks.add_task(run_metrics_sync_task, job_id, request.days_back, db_manager.get_db_session)
|
|
|
|
return SyncResponse(
|
|
status="started",
|
|
message="Health metrics sync started in background",
|
|
job_id=job_id
|
|
)
|
|
|
|
@router.post("/metrics/sync/scan", response_model=SyncResponse)
|
|
async def scan_health_trigger(
|
|
background_tasks: BackgroundTasks,
|
|
days_back: int = Query(30, description="Number of days to scan back")
|
|
):
|
|
"""Trigger background scan of health gaps"""
|
|
job_id = job_manager.create_job("scan_health_metrics")
|
|
|
|
db_manager = PostgreSQLManager(config.DATABASE_URL)
|
|
background_tasks.add_task(run_health_scan_job, job_id, days_back, db_manager.get_db_session)
|
|
return SyncResponse(
|
|
status="started",
|
|
message="Health metrics scan started in background",
|
|
job_id=job_id
|
|
)
|
|
|
|
@router.post("/sync/fitbit/weight", response_model=SyncResponse)
|
|
def sync_fitbit_weight(request: WeightSyncRequest, db: Session = Depends(get_db)):
|
|
# Keep functionality for now, ideally also background
|
|
# But user focused on Status/Stop which primarily implies the long running Garmin ones first.
|
|
# To save complexity in this turn, I'll leave this synchronous unless requested,
|
|
# but the prompt implies "sync status ... stop current job". Ideally all.
|
|
# Let's keep it synchronous for now to avoid breaking too much at once, as the Garmin tasks are the heavy ones mentioned.
|
|
# Or actually, I will wrap it too because consistency.
|
|
|
|
return sync_fitbit_weight_impl(request, db)
|
|
|
|
def sync_fitbit_weight_impl(request: WeightSyncRequest, db: Session):
|
|
logger.info(f"Starting Fitbit weight sync with scope: {request.scope}")
|
|
|
|
# 1. Get Credentials and Token
|
|
token = db.query(APIToken).filter_by(token_type='fitbit').first()
|
|
config_entry = db.query(Configuration).first()
|
|
|
|
if not token or not token.access_token:
|
|
raise HTTPException(status_code=401, detail="No Fitbit token found. Please authenticate first.")
|
|
|
|
if not config_entry or not config_entry.fitbit_client_id or not config_entry.fitbit_client_secret:
|
|
raise HTTPException(status_code=400, detail="Fitbit credentials missing.")
|
|
|
|
# 2. Init Client
|
|
# Define callback to save new token
|
|
def refresh_cb(token_dict):
|
|
logger.info("Fitbit token refreshed via callback")
|
|
try:
|
|
# Re-query to avoid stale object errors if session closed?
|
|
# We have 'db' session from argument.
|
|
# We can use it.
|
|
# Convert token_dict to model fields
|
|
# The token_dict from fitbit library usually has access_token, refresh_token, expires_in/at
|
|
|
|
# token is the APIToken object from line 197. Use it if attached, or query.
|
|
# It's better to query by ID or token_type again to be safe?
|
|
# Or just use the 'token' variable if it's still attached to session.
|
|
token.access_token = token_dict.get('access_token')
|
|
token.refresh_token = token_dict.get('refresh_token')
|
|
token.expires_at = datetime.fromtimestamp(token_dict.get('expires_at')) if token_dict.get('expires_at') else None
|
|
# scopes?
|
|
|
|
db.commit()
|
|
logger.info("New Fitbit token saved to DB")
|
|
except Exception as e:
|
|
logger.error(f"Failed to save refreshed token: {e}")
|
|
|
|
try:
|
|
fitbit_client = FitbitClient(
|
|
config_entry.fitbit_client_id,
|
|
config_entry.fitbit_client_secret,
|
|
access_token=token.access_token,
|
|
refresh_token=token.refresh_token,
|
|
redirect_uri=config_entry.fitbit_redirect_uri,
|
|
refresh_cb=refresh_cb
|
|
)
|
|
except Exception as e:
|
|
logger.error(f"Failed to initialize Fitbit client: {e}")
|
|
raise HTTPException(status_code=500, detail="Failed to initialize Fitbit client")
|
|
|
|
# 3. Determine Date Range
|
|
today = datetime.now().date()
|
|
ranges = []
|
|
|
|
if request.scope == FitbitSyncScope.LAST_30_DAYS:
|
|
start_date = today - timedelta(days=30)
|
|
ranges.append((start_date, today))
|
|
else:
|
|
# For ALL history, we need to chunk requests because Fitbit might limit response size or timeouts
|
|
start_year = 2015
|
|
current_start = datetime(start_year, 1, 1).date()
|
|
|
|
while current_start < today:
|
|
chunk_end = min(current_start + timedelta(days=30), today) # Fitbit limit is 31 days
|
|
ranges.append((current_start, chunk_end))
|
|
current_start = chunk_end + timedelta(days=1)
|
|
|
|
# 4. Fetch and Sync
|
|
total_processed = 0
|
|
total_new = 0
|
|
total_updated = 0
|
|
|
|
try:
|
|
total_chunks = len(ranges)
|
|
print(f"Starting sync for {total_chunks} time chunks.", flush=True)
|
|
|
|
for i, (start, end) in enumerate(ranges):
|
|
start_str = start.strftime('%Y-%m-%d')
|
|
end_str = end.strftime('%Y-%m-%d')
|
|
|
|
print(f"Processing chunk {i+1}/{total_chunks}: {start_str} to {end_str}", flush=True)
|
|
|
|
# Retry loop for this chunk
|
|
max_retries = 3
|
|
retry_count = 0
|
|
logs = []
|
|
|
|
while retry_count < max_retries:
|
|
try:
|
|
logs = fitbit_client.get_weight_logs(start_str, end_str)
|
|
print(f" > Found {len(logs)} records in chunk.", flush=True)
|
|
break # Success, exit retry loop
|
|
except Exception as e:
|
|
error_msg = str(e).lower()
|
|
if "rate limit" in error_msg or "retry-after" in error_msg or isinstance(e, exceptions.HTTPTooManyRequests): # exceptions not imported
|
|
wait_time = 65 # Default safe wait
|
|
if "retry-after" in error_msg and ":" in str(e):
|
|
try:
|
|
parts = str(e).split("Retry-After:")
|
|
if len(parts) > 1:
|
|
wait_time = int(float(parts[1].strip().replace('s',''))) + 5
|
|
except:
|
|
pass
|
|
|
|
print(f" > Rate limit hit. Waiting {wait_time} seconds before retrying chunk (Attempt {retry_count+1}/{max_retries})...", flush=True)
|
|
time.sleep(wait_time)
|
|
retry_count += 1
|
|
continue
|
|
else:
|
|
raise e # Not a rate limit, re-raise to fail sync
|
|
|
|
if retry_count >= max_retries:
|
|
print(f" > Max retries reached for chunk. Skipping.", flush=True)
|
|
continue
|
|
|
|
# Sleep to avoid hitting rate limits (150 calls/hour)
|
|
time.sleep(2)
|
|
|
|
for log in logs:
|
|
# Structure: {'bmi': 23.5, 'date': '2023-01-01', 'logId': 12345, 'time': '23:59:59', 'weight': 70.5, 'source': 'API'}
|
|
fitbit_id = str(log.get('logId'))
|
|
weight_val = log.get('weight')
|
|
bmi_val = log.get('bmi')
|
|
date_str = log.get('date')
|
|
time_str = log.get('time')
|
|
|
|
# Combine date and time
|
|
dt_str = f"{date_str} {time_str}"
|
|
timestamp = datetime.strptime(dt_str, '%Y-%m-%d %H:%M:%S')
|
|
|
|
# Check exist
|
|
# Check exist
|
|
existing = db.query(WeightRecord).filter_by(fitbit_id=fitbit_id).first()
|
|
if existing:
|
|
# Check for update (weight changed or BMI missing)
|
|
if abs(existing.weight - weight_val) > 0.01 or existing.bmi is None:
|
|
existing.weight = weight_val
|
|
existing.bmi = bmi_val
|
|
existing.unit = 'kg' # Force unit update too
|
|
existing.date = timestamp
|
|
existing.timestamp = timestamp
|
|
existing.sync_status = 'unsynced' # Mark for Garmin sync if we implement that direction
|
|
total_updated += 1
|
|
else:
|
|
new_record = WeightRecord(
|
|
fitbit_id=fitbit_id,
|
|
weight=weight_val,
|
|
bmi=bmi_val,
|
|
unit='kg',
|
|
date=timestamp,
|
|
timestamp=timestamp,
|
|
sync_status='unsynced'
|
|
)
|
|
db.add(new_record)
|
|
total_new += 1
|
|
|
|
total_processed += 1
|
|
|
|
db.commit() # Commit after each chunk
|
|
|
|
except Exception as e:
|
|
logger.error(f"Sync failed: {e}", exc_info=True)
|
|
return SyncResponse(
|
|
status="failed",
|
|
message=f"Sync failed: {str(e)}",
|
|
job_id=f"fitbit-weight-sync-{datetime.now().strftime('%Y%m%d%H%M%S')}"
|
|
)
|
|
|
|
return SyncResponse(
|
|
status="completed",
|
|
message=f"Fitbit Weight Sync ({request.scope}) completed. Processed: {total_processed} (New: {total_new}, Updated: {total_updated})",
|
|
job_id=f"fitbit-weight-sync-{datetime.now().strftime('%Y%m%d%H%M%S')}"
|
|
)
|
|
|
|
|
|
|
|
@router.post("/sync/compare-weight", response_model=WeightComparisonResponse)
|
|
def compare_weight_records(db: Session = Depends(get_db)):
|
|
"""Compare weight records between Fitbit (WeightRecord) and Garmin (HealthMetric)."""
|
|
logger.info("Comparing Fitbit vs Garmin weight records...")
|
|
|
|
# 1. Get Fitbit Dates
|
|
# We only care about dates for comparison? Timestamps might differ slightly.
|
|
# Let's compare based on DATE.
|
|
fitbit_dates = db.query(WeightRecord.date).all()
|
|
# Flatten and normalize to date objects
|
|
fitbit_date_set = {d[0].date() for d in fitbit_dates if d[0]}
|
|
|
|
# 2. Get Garmin Dates
|
|
from ..models.health_metric import HealthMetric
|
|
garmin_dates = db.query(HealthMetric.date).filter(
|
|
HealthMetric.metric_type == 'weight',
|
|
HealthMetric.source == 'garmin'
|
|
).all()
|
|
garmin_date_set = {d[0].date() for d in garmin_dates if d[0]}
|
|
|
|
# 3. Compare
|
|
missing_dates_set = fitbit_date_set - garmin_date_set
|
|
missing_dates_list = sorted([d.isoformat() for d in missing_dates_set], reverse=True)
|
|
|
|
return WeightComparisonResponse(
|
|
fitbit_total=len(fitbit_date_set),
|
|
garmin_total=len(garmin_date_set),
|
|
missing_in_garmin=len(missing_dates_set),
|
|
missing_dates=missing_dates_list,
|
|
message=f"Comparison Complete. Fitbit has {len(fitbit_date_set)} unique days, Garmin has {len(garmin_date_set)}. {len(missing_dates_set)} days from Fitbit are missing in Garmin."
|
|
)
|
|
|
|
limit = request.limit
|
|
job_id = job_manager.create_job("garmin_weight_upload")
|
|
|
|
db_manager = PostgreSQLManager(config.DATABASE_URL)
|
|
background_tasks.add_task(run_garmin_upload_job, job_id, limit, db_manager.get_db_session)
|
|
return {"job_id": job_id, "status": "started"}
|
|
|
|
@router.get("/jobs/active", response_model=List[JobStatusResponse])
|
|
def get_active_jobs():
|
|
return job_manager.get_active_jobs()
|
|
|
|
@router.post("/jobs/{job_id}/stop")
|
|
def stop_job(job_id: str):
|
|
if job_manager.request_cancel(job_id):
|
|
return {"status": "cancelled", "message": f"Cancellation requested for job {job_id}"}
|
|
raise HTTPException(status_code=404, detail="Job not found")
|
|
|
|
@router.get("/jobs/{job_id}", response_model=JobStatusResponse)
|
|
def get_job_status(job_id: str):
|
|
"""Get status of a specific job."""
|
|
job = job_manager.get_job(job_id)
|
|
if not job:
|
|
raise HTTPException(status_code=404, detail="Job not found")
|
|
return job
|