many updates
This commit is contained in:
232
FitnessSync/backend/src/api/analysis.py
Normal file
232
FitnessSync/backend/src/api/analysis.py
Normal file
@@ -0,0 +1,232 @@
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Body
|
||||
from typing import List, Optional, Dict, Any
|
||||
from sqlalchemy.orm import Session
|
||||
from sqlalchemy import func
|
||||
from datetime import date, datetime
|
||||
from pydantic import BaseModel
|
||||
|
||||
import json
|
||||
from io import StringIO
|
||||
from fastapi.responses import StreamingResponse
|
||||
|
||||
from ..models.segment_effort import SegmentEffort
|
||||
from ..models.activity import Activity
|
||||
from ..models.bike_setup import BikeSetup
|
||||
from ..models.health_metric import HealthMetric
|
||||
from ..services.postgresql_manager import PostgreSQLManager
|
||||
from ..services.parsers import extract_activity_data
|
||||
from ..utils.config import config
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
def get_db():
|
||||
db_manager = PostgreSQLManager(config.DATABASE_URL)
|
||||
with db_manager.get_db_session() as session:
|
||||
yield session
|
||||
|
||||
class EffortAnalysisData(BaseModel):
|
||||
effort_id: int
|
||||
activity_id: int
|
||||
activity_name: str
|
||||
date: str
|
||||
elapsed_time: float # seconds
|
||||
avg_power: Optional[int]
|
||||
max_power: Optional[int]
|
||||
avg_hr: Optional[int]
|
||||
avg_cadence: Optional[int]
|
||||
avg_speed: Optional[float]
|
||||
avg_temperature: Optional[float]
|
||||
|
||||
bike_name: Optional[str]
|
||||
bike_weight: Optional[float]
|
||||
body_weight: Optional[float]
|
||||
total_weight: Optional[float]
|
||||
watts_per_kg: Optional[float]
|
||||
|
||||
class ComparisonResponse(BaseModel):
|
||||
efforts: List[EffortAnalysisData]
|
||||
winners: Dict[str, int] # metric_key -> effort_id of winner
|
||||
|
||||
@router.post("/segments/efforts/compare", response_model=ComparisonResponse)
|
||||
def compare_efforts(effort_ids: List[int] = Body(...), db: Session = Depends(get_db)):
|
||||
"""
|
||||
Compare multiple segment efforts with enriched data.
|
||||
"""
|
||||
if not effort_ids:
|
||||
raise HTTPException(status_code=400, detail="No effort IDs provided")
|
||||
|
||||
efforts = db.query(SegmentEffort).filter(SegmentEffort.id.in_(effort_ids)).all()
|
||||
if not efforts:
|
||||
raise HTTPException(status_code=404, detail="No efforts found")
|
||||
|
||||
results = []
|
||||
|
||||
for effort in efforts:
|
||||
activity = effort.activity
|
||||
|
||||
# 1. Bike Data
|
||||
bike_weight = 0.0
|
||||
bike_name = "Unknown"
|
||||
if activity.bike_setup:
|
||||
bike_weight = activity.bike_setup.weight_kg or 0.0
|
||||
bike_name = activity.bike_setup.name or activity.bike_setup.frame
|
||||
|
||||
# 2. Body Weight (approximate from HealthMetrics closest to date)
|
||||
# Find weight metric on or before activity date
|
||||
act_date = activity.start_time.date()
|
||||
weight_metric = db.query(HealthMetric).filter(
|
||||
HealthMetric.metric_type == 'weight',
|
||||
HealthMetric.date <= act_date
|
||||
).order_by(HealthMetric.date.desc()).first()
|
||||
|
||||
body_weight = 0.0
|
||||
if weight_metric:
|
||||
val = weight_metric.metric_value
|
||||
unit = (weight_metric.unit or '').lower()
|
||||
|
||||
# Heuristic: Value > 150 is likely lbs (unless user is very heavy, but 150kg is ~330lbs)
|
||||
# Fitbit data showed ~200 marked as 'kg', which is definitely lbs.
|
||||
if unit in ['lbs', 'lb', 'pounds'] or val > 150:
|
||||
body_weight = val * 0.453592
|
||||
else:
|
||||
body_weight = val
|
||||
|
||||
total_weight = (body_weight or 0.0) + bike_weight
|
||||
|
||||
# Watts/kg
|
||||
w_kg = 0.0
|
||||
if effort.avg_power and body_weight > 0:
|
||||
w_kg = effort.avg_power / body_weight
|
||||
|
||||
data = EffortAnalysisData(
|
||||
effort_id=effort.id,
|
||||
activity_id=activity.id,
|
||||
activity_name=activity.activity_name or f"Activity {activity.id}",
|
||||
date=activity.start_time.isoformat(),
|
||||
elapsed_time=effort.elapsed_time,
|
||||
avg_power=effort.avg_power,
|
||||
max_power=effort.max_power,
|
||||
avg_hr=effort.avg_hr,
|
||||
avg_cadence=activity.avg_cadence, # Use activity avg as proxy if effort specific not available in DB
|
||||
avg_speed=activity.avg_speed, # Proxy
|
||||
avg_temperature=activity.avg_temperature,
|
||||
bike_name=bike_name,
|
||||
bike_weight=bike_weight if bike_weight > 0 else None,
|
||||
body_weight=body_weight if body_weight > 0 else None,
|
||||
total_weight=total_weight if total_weight > 0 else None,
|
||||
watts_per_kg=round(w_kg, 2) if w_kg > 0 else None
|
||||
)
|
||||
results.append(data)
|
||||
|
||||
# Calculate Winners
|
||||
winners = {}
|
||||
if results:
|
||||
# Helper to find min/max
|
||||
def find_winner(key, mode='max'):
|
||||
valid = [r for r in results if getattr(r, key) is not None]
|
||||
if not valid: return None
|
||||
if mode == 'max':
|
||||
return max(valid, key=lambda x: getattr(x, key)).effort_id
|
||||
else:
|
||||
return min(valid, key=lambda x: getattr(x, key)).effort_id
|
||||
|
||||
winners['elapsed_time'] = find_winner('elapsed_time', 'min')
|
||||
winners['avg_power'] = find_winner('avg_power', 'max')
|
||||
winners['max_power'] = find_winner('max_power', 'max')
|
||||
winners['avg_hr'] = find_winner('avg_hr', 'min') # Lower is usually better for same output, but depends on context. Assume efficiency.
|
||||
winners['watts_per_kg'] = find_winner('watts_per_kg', 'max')
|
||||
winners['avg_speed'] = find_winner('avg_speed', 'max')
|
||||
|
||||
return ComparisonResponse(efforts=results, winners=winners)
|
||||
|
||||
@router.post("/segments/efforts/export")
|
||||
def export_analysis(effort_ids: List[int] = Body(...), db: Session = Depends(get_db)):
|
||||
"""
|
||||
Export structured JSON for LLM analysis.
|
||||
"""
|
||||
# Reuse comparison logic to get data
|
||||
# In a real app, refactor to shared service function
|
||||
comparison = compare_efforts(effort_ids, db)
|
||||
|
||||
# Convert to dict
|
||||
# Convert to dict
|
||||
data = []
|
||||
for e_obj in comparison.efforts:
|
||||
e_dict = e_obj.dict()
|
||||
|
||||
# Fetch and slice streams
|
||||
# 1. Get Activity
|
||||
# We need the activity object. comparison.efforts only has IDs.
|
||||
# Efficient way: query them or cleaner: just query needed activities using the IDs.
|
||||
# Or simplistic: Fetch inside loop (N+1 query, but export is rare/manual action).
|
||||
|
||||
effort = db.query(SegmentEffort).get(e_dict['effort_id'])
|
||||
if effort and effort.activity and effort.activity.file_content:
|
||||
try:
|
||||
act = effort.activity
|
||||
raw_data = extract_activity_data(act.file_content, act.file_type or 'fit')
|
||||
|
||||
# Slice by time
|
||||
# Timestamps in raw_data['timestamps']
|
||||
timestamps = raw_data.get('timestamps', [])
|
||||
|
||||
start_time = effort.start_time
|
||||
end_time = effort.end_time
|
||||
|
||||
# Normalize start/end to match stream timestamps timezone
|
||||
if timestamps and timestamps[0]:
|
||||
stream_tz = timestamps[0].tzinfo
|
||||
|
||||
# Helper to align
|
||||
def align_tz(dt, target_tz):
|
||||
if dt.tzinfo == target_tz:
|
||||
return dt
|
||||
if dt.tzinfo is None and target_tz is not None:
|
||||
return dt.replace(tzinfo=target_tz) # Assume same ref
|
||||
if dt.tzinfo is not None and target_tz is None:
|
||||
return dt.replace(tzinfo=None) # Strip
|
||||
return dt.astimezone(target_tz)
|
||||
|
||||
start_time = align_tz(start_time, stream_tz)
|
||||
end_time = align_tz(end_time, stream_tz)
|
||||
|
||||
# Simple list comprehension to find indices
|
||||
indices = [i for i, t in enumerate(timestamps)
|
||||
if t and start_time <= t <= end_time]
|
||||
|
||||
streams = {}
|
||||
if indices:
|
||||
first = indices[0]
|
||||
last = indices[-1] + 1
|
||||
|
||||
# Keys to extract
|
||||
keys = ['heart_rate', 'power', 'speed', 'cadence', 'temperature']
|
||||
for k in keys:
|
||||
if k in raw_data:
|
||||
streams[k] = raw_data[k][first:last]
|
||||
|
||||
# Points/Elevation
|
||||
if 'points' in raw_data:
|
||||
sliced_points = raw_data['points'][first:last]
|
||||
streams['latlng'] = [[p[1], p[0]] for p in sliced_points] # lat, lon
|
||||
streams['elevation'] = [p[2] if len(p) > 2 else None for p in sliced_points]
|
||||
|
||||
streams['timestamps'] = [t.isoformat() if t else None for t in raw_data['timestamps'][first:last]]
|
||||
|
||||
e_dict['streams'] = streams
|
||||
except Exception as e:
|
||||
print(f"Error extracting streams for effort {effort.id}: {e}")
|
||||
e_dict['streams'] = {"error": str(e)}
|
||||
|
||||
data.append(e_dict)
|
||||
|
||||
# Create JSON file
|
||||
file_content = json.dumps(data, indent=2)
|
||||
|
||||
return StreamingResponse(
|
||||
io.BytesIO(file_content.encode()),
|
||||
media_type="application/json",
|
||||
headers={"Content-Disposition": "attachment; filename=efforts_analysis.json"}
|
||||
)
|
||||
import io
|
||||
Reference in New Issue
Block a user