Files
FitTrack2/FitnessSync/backend/src/api/analysis.py
sstent d1cfd0fd8e feat: implement Fitbit OAuth, Garmin MFA, and optimize segment discovery
- Add Fitbit authentication flow (save credentials, OAuth callback handling)
- Implement Garmin MFA support with successful session/cookie handling
- Optimize segment discovery with new sampling and activity query services
- Refactor database session management in discovery API for better testability
- Enhance activity data parsing for charts and analysis
- Update tests to use testcontainers and proper dependency injection
- Clean up repository by ignoring and removing tracked transient files (.pyc, .db)
2026-01-16 15:35:26 -08:00

266 lines
11 KiB
Python

from fastapi import APIRouter, Depends, HTTPException, Body
from typing import List, Optional, Dict, Any
from sqlalchemy.orm import Session
from sqlalchemy import func
from datetime import date, datetime
from pydantic import BaseModel
import json
from io import StringIO
from fastapi.responses import StreamingResponse
from ..models.segment_effort import SegmentEffort
from ..models.activity import Activity
from ..models.bike_setup import BikeSetup
from ..models.health_metric import HealthMetric
from ..services.postgresql_manager import PostgreSQLManager
from ..services.parsers import extract_activity_data
from ..utils.config import config
router = APIRouter()
from .status import get_db
class EffortAnalysisData(BaseModel):
effort_id: int
activity_id: int
activity_name: str
date: str
elapsed_time: float # seconds
avg_power: Optional[int]
max_power: Optional[int]
avg_hr: Optional[int]
avg_cadence: Optional[int]
avg_speed: Optional[float]
avg_temperature: Optional[float]
bike_name: Optional[str]
bike_weight: Optional[float]
body_weight: Optional[float]
total_weight: Optional[float]
watts_per_kg: Optional[float]
class ComparisonResponse(BaseModel):
efforts: List[EffortAnalysisData]
winners: Dict[str, Optional[int]] # metric_key -> effort_id of winner
@router.post("/segments/efforts/compare", response_model=ComparisonResponse)
def compare_efforts(effort_ids: List[int] = Body(...), db: Session = Depends(get_db)):
"""
Compare multiple segment efforts with enriched data.
"""
if not effort_ids:
raise HTTPException(status_code=400, detail="No effort IDs provided")
efforts = db.query(SegmentEffort).filter(SegmentEffort.id.in_(effort_ids)).all()
if not efforts:
raise HTTPException(status_code=404, detail="No efforts found")
results = []
for effort in efforts:
activity = effort.activity
# 1. Bike Data
bike_weight = 0.0
bike_name = "Unknown"
if activity.bike_setup:
bike_weight = activity.bike_setup.weight_kg or 0.0
bike_name = activity.bike_setup.name or activity.bike_setup.frame
# 2. Body Weight (approximate from HealthMetrics closest to date)
# Find weight metric on or before activity date
act_date = activity.start_time.date()
weight_metric = db.query(HealthMetric).filter(
HealthMetric.metric_type == 'weight',
HealthMetric.date <= act_date
).order_by(HealthMetric.date.desc()).first()
body_weight = 0.0
if weight_metric:
val = weight_metric.metric_value
unit = (weight_metric.unit or '').lower()
# Heuristic: Value > 150 is likely lbs (unless user is very heavy, but 150kg is ~330lbs)
# Fitbit data showed ~200 marked as 'kg', which is definitely lbs.
if unit in ['lbs', 'lb', 'pounds'] or val > 150:
body_weight = val * 0.453592
else:
body_weight = val
total_weight = (body_weight or 0.0) + bike_weight
# Watts/kg
w_kg = 0.0
if effort.avg_power and body_weight > 0:
w_kg = effort.avg_power / body_weight
data = EffortAnalysisData(
effort_id=effort.id,
activity_id=activity.id,
activity_name=activity.activity_name or f"Activity {activity.id}",
date=activity.start_time.isoformat(),
elapsed_time=effort.elapsed_time,
avg_power=effort.avg_power,
max_power=effort.max_power,
avg_hr=effort.avg_hr,
avg_cadence=activity.avg_cadence, # Use activity avg as proxy if effort specific not available in DB
avg_speed=activity.avg_speed, # Proxy
avg_temperature=activity.avg_temperature,
bike_name=bike_name,
bike_weight=bike_weight if bike_weight > 0 else None,
body_weight=body_weight if body_weight > 0 else None,
total_weight=total_weight if total_weight > 0 else None,
watts_per_kg=round(w_kg, 2) if w_kg > 0 else None
)
results.append(data)
# Calculate Winners
winners = {}
if results:
# Helper to find min/max
def find_winner(key, mode='max'):
valid = [r for r in results if getattr(r, key) is not None]
if not valid: return None
if mode == 'max':
return max(valid, key=lambda x: getattr(x, key)).effort_id
else:
return min(valid, key=lambda x: getattr(x, key)).effort_id
winners['elapsed_time'] = find_winner('elapsed_time', 'min')
winners['avg_power'] = find_winner('avg_power', 'max')
winners['max_power'] = find_winner('max_power', 'max')
winners['avg_hr'] = find_winner('avg_hr', 'min') # Lower is usually better for same output, but depends on context. Assume efficiency.
winners['watts_per_kg'] = find_winner('watts_per_kg', 'max')
winners['avg_speed'] = find_winner('avg_speed', 'max')
return ComparisonResponse(efforts=results, winners=winners)
@router.post("/segments/efforts/export")
def export_analysis(effort_ids: List[int] = Body(...), db: Session = Depends(get_db)):
"""
Export structured JSON for LLM analysis.
"""
# Reuse comparison logic to get data
# In a real app, refactor to shared service function
comparison = compare_efforts(effort_ids, db)
# Convert to dict
# Convert to dict
data = []
for e_obj in comparison.efforts:
e_dict = e_obj.dict()
# Fetch and slice streams
# 1. Get Activity
# We need the activity object. comparison.efforts only has IDs.
# Efficient way: query them or cleaner: just query needed activities using the IDs.
# Or simplistic: Fetch inside loop (N+1 query, but export is rare/manual action).
effort = db.query(SegmentEffort).get(e_dict['effort_id'])
if effort and effort.activity:
try:
act = effort.activity
streams = {}
# 1. Use ActivityStream table (Preferred)
if act.streams and act.streams.time_offset:
ast = act.streams
base_time = act.start_time
# Reconstruct absolute timestamps
full_timestamps = [base_time + __import__('datetime').timedelta(seconds=t) for t in ast.time_offset]
# Alignment helper
def align_tz(dt, target_tz):
if not target_tz: return dt.replace(tzinfo=None) # naive
if dt.tzinfo == target_tz: return dt
if dt.tzinfo is None: return dt.replace(tzinfo=target_tz)
return dt.astimezone(target_tz)
start_time = align_tz(effort.start_time, full_timestamps[0].tzinfo if full_timestamps else None)
end_time = align_tz(effort.end_time, full_timestamps[0].tzinfo if full_timestamps else None)
# Find slice indices
# Since sorted, can optimize, but simple loop fine for export
indices = [i for i, t in enumerate(full_timestamps) if start_time <= t <= end_time]
if indices:
first = indices[0]
last = indices[-1] + 1
# Extract slices
streams['timestamps'] = [t.isoformat() for t in full_timestamps[first:last]]
if ast.heart_rate: streams['heart_rate'] = ast.heart_rate[first:last]
if ast.power: streams['power'] = ast.power[first:last]
if ast.speed: streams['speed'] = ast.speed[first:last]
if ast.cadence: streams['cadence'] = ast.cadence[first:last]
if ast.temperature: streams['temperature'] = ast.temperature[first:last]
if ast.elevation: streams['elevation'] = ast.elevation[first:last]
# LatLng - reconstruct from parallel arrays
if ast.latitude and ast.longitude:
lats = ast.latitude[first:last]
lngs = ast.longitude[first:last]
# Zip only up to shortest length to avoid errors
min_len = min(len(lats), len(lngs))
streams['latlng'] = [[lats[i], lngs[i]] for i in range(min_len)]
# 2. Fallback to File Parsing
elif act.file_content:
raw_data = extract_activity_data(act.file_content, act.file_type or 'fit')
# Slice by time
timestamps = raw_data.get('timestamps', [])
start_time = effort.start_time
end_time = effort.end_time
if timestamps and timestamps[0]:
stream_tz = timestamps[0].tzinfo
def align_tz_fallback(dt, target_tz):
if dt.tzinfo == target_tz: return dt
if dt.tzinfo is None and target_tz is not None: return dt.replace(tzinfo=target_tz)
if dt.tzinfo is not None and target_tz is None: return dt.replace(tzinfo=None)
return dt.astimezone(target_tz)
start_time = align_tz_fallback(start_time, stream_tz)
end_time = align_tz_fallback(end_time, stream_tz)
indices = [i for i, t in enumerate(timestamps) if t and start_time <= t <= end_time]
if indices:
first = indices[0]
last = indices[-1] + 1
keys = ['heart_rate', 'power', 'speed', 'cadence', 'temperature']
for k in keys:
if k in raw_data:
streams[k] = raw_data[k][first:last]
if 'points' in raw_data:
sliced_points = raw_data['points'][first:last]
streams['latlng'] = [[p[1], p[0]] for p in sliced_points] # lat, lon
streams['elevation'] = [p[2] if len(p) > 2 else None for p in sliced_points]
streams['timestamps'] = [t.isoformat() if t else None for t in raw_data['timestamps'][first:last]]
e_dict['streams'] = streams
except Exception as e:
print(f"Error extracting streams for effort {effort.id}: {e}")
e_dict['streams'] = {"error": str(e)}
data.append(e_dict)
# Create JSON file
file_content = json.dumps(data, indent=2)
return StreamingResponse(
io.BytesIO(file_content.encode()),
media_type="application/json",
headers={"Content-Disposition": "attachment; filename=efforts_analysis.json"}
)
import io