many updates

This commit is contained in:
2026-01-11 06:06:43 -08:00
parent 67357b5038
commit 4bb86b603e
73 changed files with 2881 additions and 59 deletions

View File

@@ -60,7 +60,20 @@ async def log_requests(request: Request, call_next):
logger.error(f"Request Failed: {e}")
raise
app.mount("/static", StaticFiles(directory="../static"), name="static")
from pathlib import Path
# Resolve absolute path to static directory
BASE_DIR = Path(__file__).resolve().parent
STATIC_DIR = BASE_DIR.parent / "static"
if not STATIC_DIR.exists():
# Fallback or create?
# For now, just logging warning or ensuring it works in dev
logging.warning(f"Static directory not found at {STATIC_DIR}")
# Create it to prevent crash?
STATIC_DIR.mkdir(parents=True, exist_ok=True)
app.mount("/static", StaticFiles(directory=str(STATIC_DIR)), name="static")
templates = Jinja2Templates(directory="templates")
from src.api import status, sync, auth, logs, metrics, activities, scheduling, config_routes
@@ -82,6 +95,10 @@ app.include_router(segments.router, prefix="/api")
from src.api import bike_setups
app.include_router(bike_setups.router)
from src.api import discovery
app.include_router(discovery.router, prefix="/api/discovery")
from src.routers import web

View File

@@ -141,6 +141,7 @@ async def query_activities(
start_date: Optional[str] = Query(None),
end_date: Optional[str] = Query(None),
download_status: Optional[str] = Query(None),
bike_setup_id: Optional[int] = Query(None),
db: Session = Depends(get_db)
):
"""
@@ -154,7 +155,21 @@ async def query_activities(
# Apply filters based on parameters
if activity_type:
query = query.filter(Activity.activity_type == activity_type)
if activity_type == 'cycling':
# Match outdoor cycling types
# Using OR filtering for various sub-types
from sqlalchemy import or_
query = query.filter(or_(
Activity.activity_type == 'cycling',
Activity.activity_type == 'road_biking',
Activity.activity_type == 'mountain_biking',
Activity.activity_type == 'gravel_cycling',
Activity.activity_type == 'cyclocross',
Activity.activity_type == 'track_cycling',
Activity.activity_type == 'commuting'
))
else:
query = query.filter(Activity.activity_type == activity_type)
if start_date:
from datetime import datetime
@@ -168,6 +183,9 @@ async def query_activities(
if download_status:
query = query.filter(Activity.download_status == download_status)
if bike_setup_id:
query = query.filter(Activity.bike_setup_id == bike_setup_id)
# Execute the query
activities = query.all()
@@ -376,7 +394,20 @@ async def redownload_activity_endpoint(activity_id: str, db: Session = Depends(g
success = sync_app.redownload_activity(activity_id)
if success:
return {"message": f"Successfully redownloaded activity {activity_id}", "status": "success"}
# Trigger bike matching
try:
from ..services.bike_matching import process_activity_matching
# Fetch fresh activity object using new session logic or flush/commit handled by sync_app
# Just query by garmin_id
act_obj = db.query(Activity).filter(Activity.garmin_activity_id == activity_id).first()
if act_obj:
process_activity_matching(db, act_obj.id)
logger.info(f"Retriggered bike match for {activity_id} after redownload")
except Exception as match_err:
logger.error(f"Error matching bike after redownload: {match_err}")
return {"message": f"Successfully redownloaded and matched activity {activity_id}", "status": "success"}
else:
raise HTTPException(status_code=500, detail="Failed to redownload activity. Check logs for details.")
@@ -389,6 +420,48 @@ async def redownload_activity_endpoint(activity_id: str, db: Session = Depends(g
# New Sync Endpoints
class BikeMatchUpdate(BaseModel):
bike_setup_id: Optional[int] = None
manual_override: bool = True
@router.put("/activities/{activity_id}/bike")
async def update_activity_bike(activity_id: str, update: BikeMatchUpdate, db: Session = Depends(get_db)):
"""
Manually update the bike setup for an activity.
Sets bike_match_confidence to 2.0 to indicate manual override.
"""
try:
activity = db.query(Activity).filter(Activity.garmin_activity_id == activity_id).first()
if not activity:
raise HTTPException(status_code=404, detail="Activity not found")
# Verify bike setup exists if provided
if update.bike_setup_id:
from ..models.bike_setup import BikeSetup
setup = db.query(BikeSetup).filter(BikeSetup.id == update.bike_setup_id).first()
if not setup:
raise HTTPException(status_code=404, detail="Bike Setup not found")
activity.bike_setup_id = setup.id
activity.bike_match_confidence = 2.0 # Manual Override
logger.info(f"Manual bike override for {activity_id} to setup {setup.id}")
else:
# Clear setup
activity.bike_setup_id = None
activity.bike_match_confidence = 2.0 # Manual Clear
logger.info(f"Manual bike override for {activity_id} to cleared")
db.commit()
return {"message": "Bike setup updated successfully", "status": "success"}
except HTTPException:
raise
except Exception as e:
logger.error(f"Error updating activity bike: {e}")
db.rollback()
raise HTTPException(status_code=500, detail=str(e))
def run_scan_job(job_id: str, days_back: int, db_session_factory):
"""Background task wrapper for scan"""
try:
@@ -685,6 +758,23 @@ async def get_activity_streams(activity_id: str, db: Session = Depends(get_db)):
logger.error(f"Error getting streams: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.post("/activities/{activity_id}/estimate_power")
async def estimate_activity_power(activity_id: int, db: Session = Depends(get_db)):
"""
Trigger physics-based power estimation.
"""
from ..services.power_estimator import PowerEstimatorService
try:
service = PowerEstimatorService(db)
result = service.estimate_power_for_activity(activity_id)
return {"message": "Power estimated successfully", "stats": result}
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
logger.error(f"Error estimating power: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.get("/activities/{activity_id}/navigation")
async def get_activity_navigation(activity_id: str, db: Session = Depends(get_db)):
"""

View File

@@ -2,7 +2,7 @@ from fastapi import APIRouter, Depends, HTTPException, status
from sqlalchemy.orm import Session
from pydantic import BaseModel
from typing import List, Optional
from datetime import datetime
from datetime import datetime, date
import logging
from ..models.bike_setup import BikeSetup
@@ -22,12 +22,18 @@ class BikeSetupCreate(BaseModel):
frame: str
chainring: int
rear_cog: int
weight_kg: Optional[float] = None
purchase_date: Optional[date] = None
retirement_date: Optional[date] = None
name: Optional[str] = None
class BikeSetupUpdate(BaseModel):
frame: Optional[str] = None
chainring: Optional[int] = None
rear_cog: Optional[int] = None
weight_kg: Optional[float] = None
purchase_date: Optional[date] = None
retirement_date: Optional[date] = None
name: Optional[str] = None
class BikeSetupRead(BaseModel):
@@ -35,9 +41,15 @@ class BikeSetupRead(BaseModel):
frame: str
chainring: int
rear_cog: int
year: Optional[int] = None
weight_kg: Optional[float] = None
purchase_date: Optional[date] = None
retirement_date: Optional[date] = None
name: Optional[str] = None
created_at: Optional[datetime]
updated_at: Optional[datetime]
activity_count: int = 0
total_distance: float = 0.0
class Config:
from_attributes = True
@@ -46,8 +58,40 @@ router = APIRouter(prefix="/api/bike-setups", tags=["bike-setups"])
@router.get("/", response_model=List[BikeSetupRead])
def get_bike_setups(db: Session = Depends(get_db)):
"""List all bike setups."""
return db.query(BikeSetup).all()
"""List all bike setups with usage stats."""
from sqlalchemy import func
from ..models.activity import Activity
# Query setups with aggregated activity stats
results = db.query(
BikeSetup,
func.count(Activity.id).label("count"),
func.sum(Activity.distance).label("dist")
).outerjoin(Activity, BikeSetup.id == Activity.bike_setup_id)\
.group_by(BikeSetup.id).all()
response = []
for setup, count, dist in results:
# Clone setup attributes to Pydantic model
# Assuming Pydantic v2 or mapped correctly.
# We can construct dict or let Pydantic handle it if we pass enriched object?
# Constructing explicitly is safer with Pydantic 1.x pattern
response.append(BikeSetupRead(
id=setup.id,
frame=setup.frame,
chainring=setup.chainring,
rear_cog=setup.rear_cog,
weight_kg=setup.weight_kg,
purchase_date=setup.purchase_date,
retirement_date=setup.retirement_date,
name=setup.name,
created_at=setup.created_at,
updated_at=setup.updated_at,
activity_count=count,
total_distance=dist if dist else 0.0
))
return response
@router.post("/", response_model=BikeSetupRead, status_code=status.HTTP_201_CREATED)
def create_bike_setup(setup: BikeSetupCreate, db: Session = Depends(get_db)):
@@ -56,6 +100,9 @@ def create_bike_setup(setup: BikeSetupCreate, db: Session = Depends(get_db)):
frame=setup.frame,
chainring=setup.chainring,
rear_cog=setup.rear_cog,
weight_kg=setup.weight_kg,
purchase_date=setup.purchase_date,
retirement_date=setup.retirement_date,
name=setup.name
)
db.add(new_setup)
@@ -85,6 +132,12 @@ def update_bike_setup(setup_id: int, setup_data: BikeSetupUpdate, db: Session =
setup.chainring = setup_data.chainring
if setup_data.rear_cog is not None:
setup.rear_cog = setup_data.rear_cog
if setup_data.weight_kg is not None:
setup.weight_kg = setup_data.weight_kg
if setup_data.purchase_date is not None:
setup.purchase_date = setup_data.purchase_date
if setup_data.retirement_date is not None:
setup.retirement_date = setup_data.retirement_date
if setup_data.name is not None:
setup.name = setup_data.name

View File

@@ -0,0 +1,83 @@
from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.orm import Session
from datetime import datetime
from ..models import Base # Ensure models are loaded if needed
from ..services.postgresql_manager import PostgreSQLManager
from ..utils.config import config
from ..services.discovery import SegmentDiscoveryService
from ..schemas.discovery import DiscoveryFilter, DiscoveryResult, CandidateSegmentSchema, SingleDiscoveryRequest
router = APIRouter()
def get_db_session():
db_manager = PostgreSQLManager(config.DATABASE_URL)
with db_manager.get_db_session() as session:
yield session
@router.post("/segments", response_model=DiscoveryResult)
def discover_segments(
filter: DiscoveryFilter,
db: Session = Depends(get_db_session)
):
service = SegmentDiscoveryService(db)
# Defaults
start = filter.start_date or datetime.now().replace(year=datetime.now().year - 1) # Default 1 year?
candidates, debug_paths = service.discover_segments(
activity_type=filter.activity_type,
start_date=start,
end_date=filter.end_date
)
# Convert to schema
results = []
for c in candidates:
results.append(CandidateSegmentSchema(
points=c.points,
frequency=c.frequency,
distance=c.distance,
activity_ids=c.activity_ids
))
return DiscoveryResult(
candidates=results,
generated_at=datetime.now(),
activity_count=len(debug_paths),
debug_paths=debug_paths
)
@router.post("/single", response_model=DiscoveryResult)
def discover_single_activity(
request: SingleDiscoveryRequest,
db: Session = Depends(get_db_session)
):
service = SegmentDiscoveryService(db)
candidates = service.analyze_single_activity(request.activity_id)
# Convert to schema
results = []
for c in candidates:
results.append(CandidateSegmentSchema(
points=c.points,
frequency=c.frequency,
distance=c.distance,
activity_ids=c.activity_ids
))
return DiscoveryResult(
candidates=results,
generated_at=datetime.now(),
activity_count=1,
debug_paths=None
)

View File

@@ -44,7 +44,9 @@ class SegmentResponse(BaseModel):
distance: float
elevation_gain: Optional[float]
activity_type: str
activity_type: str
points: List[List[float]]
effort_count: int = 0
@router.post("/segments/create")
def create_segment(payload: SegmentCreate, db: Session = Depends(get_db)):
@@ -120,9 +122,17 @@ def create_segment(payload: SegmentCreate, db: Session = Depends(get_db)):
@router.get("/segments", response_model=List[SegmentResponse])
def list_segments(db: Session = Depends(get_db)):
segments = db.query(Segment).all()
# Query segments with effort count
from sqlalchemy import func
# Outer join to count efforts, grouping by Segment
# SQLAlchemy < 2.0 style
results = db.query(Segment, func.count(SegmentEffort.id)) \
.outerjoin(SegmentEffort, Segment.id == SegmentEffort.segment_id) \
.group_by(Segment.id).all()
res = []
for s in segments:
for s, count in results:
pts = json.loads(s.points) if isinstance(s.points, str) else s.points
res.append(SegmentResponse(
id=s.id,
@@ -130,7 +140,8 @@ def list_segments(db: Session = Depends(get_db)):
distance=s.distance,
elevation_gain=s.elevation_gain,
activity_type=s.activity_type,
points=pts
points=pts,
effort_count=count
))
return res
@@ -222,5 +233,94 @@ def scan_segments(db: Session = Depends(get_db)):
# Run in background
thread = threading.Thread(target=job_manager.run_serialized, args=(job_id, run_segment_matching_job))
thread.start()
return {"message": "Segment scan started", "job_id": job_id}
@router.post("/segments/scan/{activity_id}")
def scan_activity_segments(activity_id: int, db: Session = Depends(get_db)):
"""Scan a specific activity for segment matches."""
from ..models.activity import Activity
from ..services.segment_matcher import SegmentMatcher
from ..services.parsers import extract_points_from_file
# Resolve ID
activity = db.query(Activity).filter(Activity.id == activity_id).first()
if not activity:
activity = db.query(Activity).filter(Activity.garmin_activity_id == str(activity_id)).first()
if not activity:
raise HTTPException(status_code=404, detail="Activity not found")
if not activity.file_content:
raise HTTPException(status_code=400, detail="Activity has no file content")
# Clear existing efforts
db.query(SegmentEffort).filter(SegmentEffort.activity_id == activity.id).delete()
db.commit() # Commit delete
try:
points = extract_points_from_file(activity.file_content, activity.file_type)
if not points:
return {"message": "No points found in activity", "matches": 0}
matcher = SegmentMatcher(db)
efforts = matcher.match_activity(activity, points)
# matcher commits internally
return {"message": "Scan complete", "matches": len(efforts), "segment_ids": [e.segment_id for e in efforts]}
except Exception as e:
print(f"Error scanning activity {activity.id}: {e}")
raise HTTPException(status_code=500, detail=str(e))
class SegmentCreateCustom(BaseModel):
name: str
description: Optional[str] = None
activity_type: str
points: List[List[float]] # [[lon, lat], ...] or [[lon, lat, ele], ...]
@router.post("/segments/save_custom")
def save_custom_segment(payload: SegmentCreateCustom, db: Session = Depends(get_db)):
"""Save a segment from custom points (e.g. discovery results)."""
from ..utils.geo import calculate_bounds, haversine_distance, ramer_douglas_peucker
if not payload.points or len(payload.points) < 2:
raise HTTPException(status_code=400, detail="Invalid points")
# Simplify if needed? Discovery results are already simplified.
# But maybe we ensure consistency.
# payload.points is likely already simplified.
# Calculate metadata
dist = 0.0
elev_gain = 0.0
for i in range(len(payload.points)-1):
p1 = payload.points[i]
p2 = payload.points[i+1]
dist += haversine_distance(p1[1], p1[0], p2[1], p2[0])
# Elevation if present
if len(p1) > 2 and len(p2) > 2 and p1[2] is not None and p2[2] is not None:
diff = p2[2] - p1[2]
if diff > 0:
elev_gain += diff
bounds = calculate_bounds(payload.points)
segment = Segment(
name=payload.name,
description=payload.description,
distance=dist,
elevation_gain=elev_gain,
activity_type=payload.activity_type,
points=json.dumps(payload.points),
bounds=json.dumps(bounds)
)
db.add(segment)
db.commit()
db.refresh(segment)
return {"message": "Segment saved", "id": segment.id}

View File

@@ -25,11 +25,37 @@ def run_segment_matching_job(job_id: str):
activities = db.query(Activity).all()
total_activities = len(activities)
# Optimization: Pre-fetch segment locations for coarse filtering
segments_list = db.query(Segment).all()
segment_locations = []
# Parse segment bounds once
import json
for s in segments_list:
try:
if s.bounds:
# bounds: [min_lat, min_lon, max_lat, max_lon]
# We just need a center point or use bounds directly
b = json.loads(s.bounds) if isinstance(s.bounds, str) else s.bounds
if b and len(b) == 4:
segment_locations.append(b)
except: pass
has_segments = len(segment_locations) > 0
job_manager.update_job(job_id, progress=0, message=f"Starting scan of {total_activities} activities...")
matcher = SegmentMatcher(db)
total_matches = 0
skipped_far = 0
# APPROX 1000 miles in degrees
# 1 deg lat ~ 69 miles. 1000 miles ~ 14.5 degrees.
# Longitude varies but 14.5 is a safe upper bound (it's less distance at poles).
# Let's use 15 degrees buffer.
BUFFER_DEG = 15.0
for i, activity in enumerate(activities):
if job_manager.should_cancel(job_id):
logger.info(f"Job {job_id} cancelled.")
@@ -37,33 +63,57 @@ def run_segment_matching_job(job_id: str):
# Calculate progress
prog = int((i / total_activities) * 100)
job_manager.update_job(job_id, progress=prog, message=f"Scanning activity {i+1}/{total_activities} ({activity.id})")
job_manager.update_job(job_id, progress=prog, message=f"Scanning {i+1}/{total_activities} (Matches: {total_matches}, Skipped: {skipped_far})")
# Check for content
# Check for content first
if not activity.file_content:
continue
# OPTIMIZATION: Check Coarse Distance
# If activity has start location, check if it's "close" to ANY segment
if has_segments and activity.start_lat is not None and activity.start_lng is not None:
is_near_any = False
a_lat = activity.start_lat
a_lng = activity.start_lng
for b in segment_locations:
# b: [min_lat, min_lon, max_lat, max_lon]
# Expand bounds by buffer
# Check if point is inside expanded bounds
if (b[0] - BUFFER_DEG <= a_lat <= b[2] + BUFFER_DEG) and \
(b[1] - BUFFER_DEG <= a_lng <= b[3] + BUFFER_DEG):
is_near_any = True
break
if not is_near_any:
# Skip parsing!
skipped_far += 1
continue
# Extract points - cache this?
# For now, re-extract. It's CPU intensive but safe.
try:
points = extract_points_from_file(activity.file_content, activity.file_type)
if points:
# Clear existing efforts for this activity to avoid duplicates?
# Or SegmentMatcher handles it?
# SegmentMatcher currently just ADDS. It doesn't check for existence.
# So we should delete existing efforts for this activity first.
# Clear existing efforts
db.query(SegmentEffort).filter(SegmentEffort.activity_id == activity.id).delete()
efforts = matcher.match_activity(activity, points)
total_matches += len(efforts)
logger.info(f"Activity {activity.id}: {len(efforts)} matches")
if efforts:
logger.info(f"Activity {activity.id}: {len(efforts)} matches")
except Exception as e:
logger.error(f"Error processing activity {activity.id}: {e}")
# Continue to next
db.commit() # Final commit
job_manager.complete_job(job_id, result={"total_matches": total_matches, "activities_scanned": total_activities})
job_manager.complete_job(job_id, result={
"total_matches": total_matches,
"activities_scanned": total_activities,
"skipped_due_to_distance": skipped_far
})
except Exception as e:
logger.error(f"Job {job_id} failed: {e}")

View File

@@ -14,6 +14,10 @@ class Activity(Base):
duration = Column(Integer, nullable=True) # Duration in seconds
duration = Column(Integer, nullable=True) # Duration in seconds
# Location (added for optimization)
start_lat = Column(Float, nullable=True)
start_lng = Column(Float, nullable=True)
# Extended Metrics
distance = Column(Float, nullable=True) # meters
calories = Column(Float, nullable=True) # kcal
@@ -42,4 +46,5 @@ class Activity(Base):
updated_at = Column(DateTime(timezone=True), onupdate=func.now())
bike_setup_id = Column(Integer, ForeignKey("bike_setups.id"), nullable=True)
bike_match_confidence = Column(Float, nullable=True) # 0.0 to 1.0 score of match confidence
bike_setup = relationship("BikeSetup")

View File

@@ -1,4 +1,4 @@
from sqlalchemy import Column, Integer, String, DateTime
from sqlalchemy import Column, Integer, String, DateTime, Float
from sqlalchemy.sql import func
from .base import Base
@@ -9,6 +9,9 @@ class BikeSetup(Base):
frame = Column(String, nullable=False)
chainring = Column(Integer, nullable=False)
rear_cog = Column(Integer, nullable=False)
weight_kg = Column(Float, nullable=True) # Weight of the bike in kg
purchase_date = Column(DateTime, nullable=True)
retirement_date = Column(DateTime, nullable=True)
name = Column(String, nullable=True) # Optional, can be derived or user-set
created_at = Column(DateTime(timezone=True), server_default=func.now())

View File

@@ -36,3 +36,9 @@ async def bike_setups_page(request: Request):
async def activity_view_page(request: Request, activity_id: str):
return templates.TemplateResponse("activity_view.html", {"request": request, "activity_id": activity_id})
@router.get("/discovery")
async def discovery_page(request: Request):
from datetime import datetime, timedelta
return templates.TemplateResponse("discovery.html", {"request": request, "now": datetime.now(), "timedelta": timedelta})

View File

@@ -0,0 +1,29 @@
from pydantic import BaseModel
from typing import List, Optional
from datetime import datetime
class DiscoveryFilter(BaseModel):
activity_type: str
start_date: Optional[datetime] = None
end_date: Optional[datetime] = None
lat_min: Optional[float] = None
lat_max: Optional[float] = None
lon_min: Optional[float] = None
lon_max: Optional[float] = None
class SingleDiscoveryRequest(BaseModel):
activity_id: int
class CandidateSegmentSchema(BaseModel):
points: List[List[float]]
frequency: int
distance: float
activity_ids: List[int]
class DiscoveryResult(BaseModel):
candidates: List[CandidateSegmentSchema]
debug_paths: Optional[List[List[List[float]]]] = None
generated_at: datetime
activity_count: int
# How many activities were analyzed

View File

@@ -4,12 +4,70 @@ from sqlalchemy.orm import Session
from ..models.activity import Activity
from ..models.bike_setup import BikeSetup
import statistics
from ..services.parsers import extract_activity_data
logger = logging.getLogger(__name__)
# Constants
WHEEL_CIRCUMFERENCE_M = 2.1 # Approx 700x23c/28c generic
TOLERANCE_PERCENT = 0.15
def calculate_ratio_from_streams(speed_stream: List[float], cadence_stream: List[int], window_size: int = 10) -> float:
"""
Calculate median gear ratio from steady-state segments in streams using a sliding window.
"""
if not speed_stream or not cadence_stream or len(speed_stream) != len(cadence_stream):
return 0.0
ratios = []
# Pre-clean streams to handle None values efficiently?
# Or just handle inside loop.
# Python Loop might be slow for very long rides (10k+ points).
# But usually < 20k points.
n = len(speed_stream)
if n < window_size:
return 0.0
# Optimization: Skip if we don't have enough data
# Optimization: Step by window_size or half-window to avoid O(N*W)?
# User loop is O(N*W). For W=10, it's fine.
for i in range(0, n - window_size, 5): # Step by 5 to speed up/reduce overlap redundancy
window_speeds = speed_stream[i:i+window_size]
window_cadences = cadence_stream[i:i+window_size]
# Quick check for None before processing
if any(v is None for v in window_speeds) or any(c is None for c in window_cadences):
continue
# Check thresholds
if all(c > 55 for c in window_cadences) and all(v > 2.5 for v in window_speeds):
# Check consistency (stdev)
# Catch potential low variance errors if all values identical (stdev=0)
try:
cad_std = statistics.stdev(window_cadences)
spd_std = statistics.stdev(window_speeds)
if cad_std < 5 and spd_std < 0.5:
# Steady!
avg_speed = statistics.mean(window_speeds)
avg_cadence = statistics.mean(window_cadences)
# Ratio
ratio = (avg_speed * 60) / (avg_cadence * WHEEL_CIRCUMFERENCE_M)
ratios.append(ratio)
except statistics.StatisticsError:
# Variance requires at least two data points, window_size=10 is safe.
pass
if not ratios:
return 0.0
return statistics.median(ratios)
def calculate_observed_ratio(speed_mps: float, cadence_rpm: float) -> float:
"""
Calculate gear ratio from speed and cadence.
@@ -25,7 +83,7 @@ def match_activity_to_bike(db: Session, activity: Activity) -> Optional[BikeSetu
Match an activity to a bike setup based on gear ratio.
"""
if not activity.activity_type:
return None
return None, 0.0
type_lower = activity.activity_type.lower()
@@ -45,23 +103,41 @@ def match_activity_to_bike(db: Session, activity: Activity) -> Optional[BikeSetu
if not is_cycling:
# Not cycling
return None
return None, 0.0
if 'indoor' in type_lower:
# Indoor cycling - ignore
return None
return None, 0.0
if not activity.avg_speed or not activity.avg_cadence:
# Not enough data
return None
observed_ratio = calculate_observed_ratio(activity.avg_speed, activity.avg_cadence)
observed_ratio = 0.0
# helper to check if we can use streams
if activity.file_content:
try:
data = extract_activity_data(activity.file_content, activity.file_type)
speeds = data.get('speed') or []
cadences = data.get('cadence') or []
# If explicit streams exist, use them
if speeds and cadences and len(speeds) > 0:
observed_ratio = calculate_ratio_from_streams(speeds, cadences)
logger.debug(f"Smart Match Ratio for {activity.id}: {observed_ratio:.2f}")
except Exception as e:
logger.warning(f"Failed to extract streams for Smart Matching activity {activity.id}: {e}")
# Fallback to averages if Smart Matching failed or returned 0
if observed_ratio == 0:
return None
if not activity.avg_speed or not activity.avg_cadence:
# Not enough data
return None, 0.0
observed_ratio = calculate_observed_ratio(activity.avg_speed, activity.avg_cadence)
if observed_ratio == 0:
return None, 0.0
setups = db.query(BikeSetup).all()
if not setups:
return None
return None, 0.0
best_match = None
min_diff = float('inf')
@@ -70,6 +146,33 @@ def match_activity_to_bike(db: Session, activity: Activity) -> Optional[BikeSetu
if not setup.chainring or not setup.rear_cog:
continue
# Check Date Constraints
# Ignore if activity date is before purchase or after retirement
# Start time is datetime with timezone
act_date = activity.start_time
if setup.purchase_date:
p_date = setup.purchase_date
if p_date.tzinfo:
p_date = p_date.replace(tzinfo=None)
a_date = act_date
if a_date.tzinfo:
a_date = a_date.replace(tzinfo=None)
if a_date < p_date:
continue
if setup.retirement_date:
r_date = setup.retirement_date
if r_date.tzinfo:
r_date = r_date.replace(tzinfo=None)
a_date = act_date
if a_date.tzinfo:
a_date = a_date.replace(tzinfo=None)
if a_date > r_date:
continue
mechanical_ratio = setup.chainring / setup.rear_cog
diff = abs(observed_ratio - mechanical_ratio)
@@ -80,7 +183,15 @@ def match_activity_to_bike(db: Session, activity: Activity) -> Optional[BikeSetu
min_diff = diff
best_match = setup
return best_match
if best_match:
match_ratio = best_match.chainring / best_match.rear_cog
error_pct = min_diff / match_ratio
confidence = max(0.0, 1.0 - error_pct)
return best_match, confidence
return None, 0.0
return None, 0.0
def process_activity_matching(db: Session, activity_id: int):
"""
@@ -90,17 +201,20 @@ def process_activity_matching(db: Session, activity_id: int):
if not activity:
return
match = match_activity_to_bike(db, activity)
match, confidence = match_activity_to_bike(db, activity)
if match:
activity.bike_setup_id = match.id
logger.info(f"Matched Activity {activity.id} to Setup {match.frame} (Found Ratio: {calculate_observed_ratio(activity.avg_speed, activity.avg_cadence):.2f})")
activity.bike_match_confidence = confidence
logger.info(f"Matched Activity {activity.id} to Setup {match.frame} (Ratio: {calculate_observed_ratio(activity.avg_speed, activity.avg_cadence):.2f}, Confidence: {confidence:.2f})")
else:
# Implicitly "Generic" if None, but user requested explicit default logic.
generic = db.query(BikeSetup).filter(BikeSetup.name == "GenericBike").first()
if generic:
activity.bike_setup_id = generic.id
activity.bike_match_confidence = 0.5 # Low confidence fallback
else:
activity.bike_setup_id = None # Truly unknown
activity.bike_match_confidence = 0.0
db.commit()
@@ -111,7 +225,8 @@ def run_matching_for_all(db: Session):
from sqlalchemy import or_
activities = db.query(Activity).filter(
Activity.bike_setup_id == None,
# Activity.bike_setup_id == None,
# Re-match everything to enforce new rules/constraints
or_(
Activity.activity_type.ilike('%cycling%'),
Activity.activity_type.ilike('%road_biking%'),
@@ -119,7 +234,9 @@ def run_matching_for_all(db: Session):
Activity.activity_type.ilike('%mtb%'),
Activity.activity_type.ilike('%cyclocross%')
),
Activity.activity_type.notilike('%indoor%')
Activity.activity_type.notilike('%indoor%'),
# Skip manual overrides (confidence >= 2.0)
or_(Activity.bike_match_confidence == None, Activity.bike_match_confidence < 2.0)
).all()
count = 0

View File

@@ -0,0 +1,453 @@
from typing import List, Dict, Optional, Tuple, Set
from datetime import datetime, timedelta
import logging
import math
from sqlalchemy.orm import Session
from sqlalchemy import func
from ..models.activity import Activity
from ..models.segment import Segment
from ..utils.geo import haversine_distance, calculate_bounds, calculate_bearing, ramer_douglas_peucker_indices
from ..services.parsers import extract_points_from_file, extract_activity_data
logger = logging.getLogger(__name__)
class CandidateSegment:
def __init__(self, points: List[List[float]], frequency: int, activity_ids: List[int]):
self.points = points # [[lon, lat], ...]
self.frequency = frequency
self.activity_ids = activity_ids
self.distance = self._calculate_distance()
self.uuid = None # To be assigned for frontend reference
def _calculate_distance(self) -> float:
d = 0.0
for i in range(len(self.points) - 1):
p1 = self.points[i]
p2 = self.points[i+1]
d += haversine_distance(p1[1], p1[0], p2[1], p2[0])
return d
class SegmentDiscoveryService:
def __init__(self, db: Session):
self.db = db
def discover_segments(self,
activity_type: str,
start_date: Optional[datetime],
end_date: Optional[datetime] = None) -> Tuple[List[CandidateSegment], List[List[List[float]]]]:
logger.info(f"Starting segment discovery for {activity_type} since {start_date}")
# 1. Fetch activities
query = self.db.query(Activity).filter(Activity.activity_type == activity_type)
if start_date:
query = query.filter(Activity.start_time >= start_date)
if end_date:
query = query.filter(Activity.start_time <= end_date)
activities = query.all()
logger.info(f"Analyzing {len(activities)} activities.")
if len(activities) < 2:
return [], []
# 2. Extract and Simplify Points (The "Cloud")
# Structure: { activity_id: [[lon, lat], ...] }
# Decimate to ~50m
activity_paths = {}
for act in activities:
if not act.file_content:
continue
try:
raw_points = extract_points_from_file(act.file_content, act.file_type)
# Reduced min_dist to 15m (smaller than grid 20m) to ensure connectivity
simplified = self._decimate_points(raw_points, min_dist=15.0)
if len(simplified) > 5: # Ignore tiny paths
activity_paths[act.id] = simplified
except Exception as e:
logger.warning(f"Failed to process activity {act.id}: {e}")
# 3. Grid-Based Clustering
# We map every point to a grid cell (approx 20m x 20m).
# Count unique activities per cell.
grid_size_deg = 0.0002 # Approx 20m at equator
# cell_key -> set(activity_ids)
grid: Dict[Tuple[int, int], Set[int]] = {}
for act_id, points in activity_paths.items():
for p in points:
lon, lat = p[0], p[1]
xi = int(lon / grid_size_deg)
yi = int(lat / grid_size_deg)
if (xi, yi) not in grid:
grid[(xi, yi)] = set()
grid[(xi, yi)].add(act_id)
# 4. Filter Hotspots
# Keep cells with > 2 unique activities
min_freq = 2
hotspot_cells = {k: v for k, v in grid.items() if len(v) >= min_freq}
logger.info(f"Found {len(hotspot_cells)} hotspot cells.")
if not hotspot_cells:
return [], list(activity_paths.values())
# 5. Connect Hotspots (Stitching)
# Identify chains of adjacent hotspot cells.
# This is a graph traversal problem.
# Simple approach: Connected Components on grid.
clusters = self._find_connected_components(hotspot_cells, grid_size_deg)
# 6. Reconstruct Paths & Candidates
candidates = []
for cluster_cells in clusters:
# Reconstruct a representative path for this cluster.
# We can take the center of each cell and sort them?
# Sorting is hard without knowing direction.
# Better: Pick one activity that traverses this cluster best?
# Find activity that visits most cells in this cluster
best_act_id = self._find_representative_activity(cluster_cells, activity_paths, grid_size_deg)
if best_act_id:
# Extract the segment sub-path from this activity
path_points = self._extract_subpath_from_activity(activity_paths[best_act_id], cluster_cells, grid_size_deg)
if path_points and self._calculate_path_length(path_points) > 200: # Min length 200m
# Calculate actual frequency for this specific path
# (Refined from grid count)
# For now, use the max cell count in the cluster as proxy or re-verify?
# Let's use the average cell count? Or max?
# Robust way: check how many other activities follow this path (Hausdorff check).
# For MVP, use the cell overlap count.
freq = self._estimate_frequency(cluster_cells, hotspot_cells)
# Collect all activity IDs involved in this cluster
cluster_activity_ids = set()
for cell in cluster_cells:
if cell in hotspot_cells:
cluster_activity_ids.update(hotspot_cells[cell])
cand = CandidateSegment(path_points, freq, list(cluster_activity_ids))
candidates.append(cand)
# 7. Deduplicate against DB
final_candidates = self._deduplicate_against_db(candidates, activity_type)
return final_candidates, list(activity_paths.values())
def analyze_single_activity(self, activity_id: int) -> List[CandidateSegment]:
act = self.db.query(Activity).filter(Activity.id == activity_id).first()
# Fallback to Garmin ID if not found by primary key
if not act:
act = self.db.query(Activity).filter(Activity.garmin_activity_id == str(activity_id)).first()
if not act or not act.file_content:
return []
# Parse data
data = extract_activity_data(act.file_content, act.file_type)
points = data.get('points', [])
timestamps = data.get('timestamps', [])
if not points or not timestamps or len(points) != len(timestamps):
logger.warning(f"Analysis failed for {activity_id}: Mismatched points/timestamps")
return []
clean_points = []
# clean points loop
for p, ts in zip(points, timestamps):
if p and ts:
clean_points.append(p)
# Note: we drop timestamp alignment here if we just append p
# But we need timestamp for pause...
# Let's keep aligned struct or use index map?
# Actually clean_points is new list. We need aligned ts.
# Let's rebuild aligned lists
# Re-build aligned lists
aligned_points = []
aligned_ts = []
for p, ts in zip(points, timestamps):
if p and ts:
aligned_points.append(p)
aligned_ts.append(ts)
if len(aligned_points) < 10:
return []
# Step 1: Split by Pauses (> 10s)
sub_segments_indices = [] # List of [start_idx, end_idx]
seg_start = 0
for i in range(1, len(aligned_points)):
t1 = aligned_ts[i-1]
t2 = aligned_ts[i]
diff = (t2 - t1).total_seconds()
if diff > 10.0:
# Pause detected, split
if i - seg_start > 5:
sub_segments_indices.append([seg_start, i]) # i is exclusive?
seg_start = i
# Add last one
if len(aligned_points) - seg_start > 5:
sub_segments_indices.append([seg_start, len(aligned_points)])
final_segments = []
# Step 2: RDP Turn Detection on each sub-segment
for start_idx, end_idx in sub_segments_indices:
segment_points = aligned_points[start_idx:end_idx]
# Get RDP simplified INDICES (relative to segment_points start)
# Use epsilon=10.0m for robust major turn detection
rdp_indices = ramer_douglas_peucker_indices(segment_points, 10.0)
# Check turns at RDP vertices
split_points_relative = []
if len(rdp_indices) > 2:
last_bearing = None
# Iterate simplified vertices
for k in range(1, len(rdp_indices)):
idx1 = rdp_indices[k-1]
idx2 = rdp_indices[k]
p1 = segment_points[idx1]
p2 = segment_points[idx2]
bearing = calculate_bearing(p1[1], p1[0], p2[1], p2[0])
if last_bearing is not None:
diff = abs(bearing - last_bearing)
if diff > 180: diff = 360 - diff
if diff > 60:
# Turn detected at vertex k-1 (idx1)
# Convert relative idx1 to split point
split_points_relative.append(idx1)
last_bearing = bearing
# Split segment based on turns
current_rel_start = 0
for split_idx in split_points_relative:
# Check min length (e.g. 5 points)
if split_idx - current_rel_start > 5:
abs_start = start_idx + current_rel_start
abs_end = start_idx + split_idx + 1 # Include the vertex point?
# Turns usually happen AT a point.
# Segment should end at turn, next starts at turn? Or gap?
# Continuous: End at k, next start at k.
final_segments.append(aligned_points[abs_start : abs_end])
current_rel_start = split_idx
# Last piece
if len(segment_points) - current_rel_start > 5:
abs_start = start_idx + current_rel_start
abs_end = start_idx + len(segment_points)
final_segments.append(aligned_points[abs_start : abs_end])
# Step 3: Filter & Convert
candidates = []
for path in final_segments:
d = self._calculate_path_length(path)
if d > 100: # Min 100m
# Simple decimation for display
simplified = self._decimate_points(path, min_dist=10.0)
cand = CandidateSegment(simplified, 1, [activity_id])
candidates.append(cand)
return candidates
def _decimate_points(self, points: List[List[float]], min_dist: float) -> List[List[float]]:
if not points: return []
out = [points[0]]
last = points[0]
for p in points[1:]:
d = haversine_distance(last[1], last[0], p[1], p[0])
if d >= min_dist:
out.append(p)
last = p
return out
def _find_connected_components(self, cells: Dict[Tuple[int, int], Set[int]], grid_step: float) -> List[List[Tuple[int, int]]]:
# cells: map of (x,y) -> activity_ids
visited = set()
components = []
cell_keys = list(cells.keys())
for k in cell_keys:
if k in visited:
continue
# BFS
q = [k]
visited.add(k)
cluster = []
while q:
curr = q.pop(0)
cluster.append(curr)
cx, cy = curr
# Check 8 neighbors
for dx in [-1, 0, 1]:
for dy in [-1, 0, 1]:
if dx == 0 and dy == 0: continue
neighbor = (cx + dx, cy + dy)
if neighbor in cells and neighbor not in visited:
visited.add(neighbor)
q.append(neighbor)
if len(cluster) > 5: # Min cluster size
components.append(cluster)
return components
def _find_representative_activity(self, cluster_cells: List[Tuple[int, int]],
activity_paths: Dict[int, List[List[float]]],
grid_step: float) -> Optional[int]:
# Count which activity ID appears most in these cells
counts = {}
cell_set = set(cluster_cells)
# Optimization: We already stored act_ids in 'cells' dict in step 3.
# But I didn't pass 'cells' (just keys) to this func.
# Re-eval by looking at activity paths? No, too slow.
# Start scanning the paths against the cluster cells is slow.
# Better to pass the cell data.
# Quick hack: Iterate all cells in cluster, tally votes for act_ids.
# Need access to the grid content.
# I'll rely on the caller logic or re-design slightly.
# For this draft, let's assume I can't access grid content easily without passing it.
# I'll pass it in next iter.
# Actually I can reconstruct quickly if I had the grid.
# Let's iterate all paths (naive) - optimization for later.
best_id = None
max_overlap = 0
for act_id, points in activity_paths.items():
overlap = 0
for p in points:
xi = int(p[0] / grid_step)
yi = int(p[1] / grid_step)
if (xi, yi) in cell_set:
overlap += 1
if overlap > max_overlap:
max_overlap = overlap
best_id = act_id
return best_id
def _extract_subpath_from_activity(self, points: List[List[float]],
cluster_cells: List[Tuple[int, int]],
grid_step: float) -> List[List[float]]:
# Extract the contiguous sequence of points that lie within the cluster
cell_set = set(cluster_cells)
subpath = []
longest_subpath = []
for p in points:
xi = int(p[0] / grid_step)
yi = int(p[1] / grid_step)
if (xi, yi) in cell_set:
subpath.append(p)
else:
if len(subpath) > len(longest_subpath):
longest_subpath = subpath
subpath = []
if len(subpath) > len(longest_subpath):
longest_subpath = subpath
return longest_subpath
def _estimate_frequency(self, cluster_keys: List[Tuple[int, int]], grid: Dict[Tuple[int, int], Set[int]]) -> int:
# Average unique visitors per cell in cluster
if not cluster_keys: return 0
total = 0
for k in cluster_keys:
if k in grid:
total += len(grid[k])
return int(total / len(cluster_keys))
def _calculate_path_length(self, points: List[List[float]]) -> float:
d = 0.0
for i in range(len(points) - 1):
d += haversine_distance(points[i][1], points[i][0], points[i+1][1], points[i+1][0])
return d
def _deduplicate_against_db(self, candidates: List[CandidateSegment], activity_type: str) -> List[CandidateSegment]:
# Load all segments of type
existing = self.db.query(Segment).filter(Segment.activity_type == activity_type).all()
unique = []
for cand in candidates:
# Simple check: Do start and end points match any existing segment?
# Or bounding box overlap + Fréchet?
# Pure Python Fréchet is expensive O(N*M).
# Fast check: Hausdorff distance?
# Even faster: "Projected overlap".
is_duplicate = False
for ex in existing:
# Check simple proximity of Start/End (e.g. 50m)
# Need to parse JSON points
import json
ex_points = json.loads(ex.points) if isinstance(ex.points, str) else ex.points
if not ex_points: continue
ex_start = ex_points[0]
ex_end = ex_points[-1]
cand_start = cand.points[0]
cand_end = cand.points[-1]
d_start = haversine_distance(ex_start[1], ex_start[0], cand_start[1], cand_start[0])
d_end = haversine_distance(ex_end[1], ex_end[0], cand_end[1], cand_end[0])
if d_start < 50 and d_end < 50:
# Likely duplicate
# Check length similarity
if abs(cand.distance - ex.distance) < 200:
is_duplicate = True
break
if not is_duplicate:
unique.append(cand)
return unique

View File

@@ -14,7 +14,9 @@ def extract_activity_data(file_content: bytes, file_type: str) -> Dict[str, List
'points': [[lon, lat, ele], ...],
'timestamps': [datetime, ...],
'heart_rate': [int, ...],
'power': [int, ...]
'power': [int, ...],
'speed': [float, ...],
'cadence': [int, ...]
}
"""
if file_type == 'fit':
@@ -34,7 +36,7 @@ def extract_timestamps_from_file(file_content: bytes, file_type: str) -> List[Op
return data['timestamps']
def _extract_data_from_fit(file_content: bytes) -> Dict[str, List[Any]]:
data = {'points': [], 'timestamps': [], 'heart_rate': [], 'power': []}
data = {'points': [], 'timestamps': [], 'heart_rate': [], 'power': [], 'speed': [], 'cadence': []}
try:
with io.BytesIO(file_content) as f:
with fitdecode.FitReader(f) as fit:
@@ -64,6 +66,14 @@ def _extract_data_from_fit(file_content: bytes) -> Dict[str, List[Any]]:
ts = frame.get_value('timestamp') if frame.has_field('timestamp') else None
data['timestamps'].append(ts)
# Speed
speed = frame.get_value('enhanced_speed') if frame.has_field('enhanced_speed') else frame.get_value('speed') if frame.has_field('speed') else None
data['speed'].append(speed)
# Cadence
cad = frame.get_value('cadence') if frame.has_field('cadence') else None
data['cadence'].append(cad)
# HR
hr = frame.get_value('heart_rate') if frame.has_field('heart_rate') else None
data['heart_rate'].append(hr)

View File

@@ -0,0 +1,160 @@
import math
import logging
from typing import List, Optional, Tuple, Dict
from ..models.activity import Activity
from ..models.bike_setup import BikeSetup
from ..models.weight_record import WeightRecord
from ..services.parsers import extract_activity_data
logger = logging.getLogger(__name__)
class PowerEstimatorService:
def __init__(self, db_session):
self.db = db_session
# Physics Constants
self.GRAVITY = 9.80665
self.RHO = 1.225 # Air density at sea level, standard temp (kg/m^3)
# Default Parameters if not provided/estimated
self.DEFAULT_CDA = 0.32 # Typical road cyclist
self.DEFAULT_CRR = 0.005 # Typical road tire on asphalt
self.DRIVETRAIN_LOSS = 0.03 # 3% loss
def estimate_power_for_activity(self, activity_id: int) -> Dict[str, any]:
"""
Estimate power activity streams based on physics model.
Returns summary stats.
"""
activity = self.db.query(Activity).filter(Activity.id == activity_id).first()
if not activity:
raise ValueError("Activity not found")
if not activity.file_content:
raise ValueError("No file content to analyze")
# 1. Get Setup and Weights
bike_weight = 9.0 # Default 9kg
if activity.bike_setup and activity.bike_setup.weight_kg:
bike_weight = activity.bike_setup.weight_kg
rider_weight = 75.0 # Default 75kg
# Try to find weight record closest to activity date? Or just latest?
# Latest for now.
latest_weight = self.db.query(WeightRecord).order_by(WeightRecord.date.desc()).first()
if latest_weight and latest_weight.weight_kg:
rider_weight = latest_weight.weight_kg
total_mass = rider_weight + bike_weight
# 2. Extract Data
data = extract_activity_data(activity.file_content, activity.file_type)
# We need: Speed (m/s), Elevation (m) for Grade, Time (s) for acceleration
timestamps = data.get('timestamps')
speeds = data.get('enhanced_speed') or data.get('speed')
elevations = data.get('enhanced_altitude') or data.get('altitude')
if not speeds or not len(speeds) > 0:
raise ValueError("No speed data available")
# Generate Power Stream
power_stream = []
total_power = 0.0
count = 0
# Smoothing window? Physics is noisy on raw data.
# We'll calculate point-by-point but maybe assume slight smoothing explicitly or implicitly via grade.
# Pre-calc gradients?
# We need grade at each point. slope = d_ele / d_dist
# d_dist = speed * d_time
for i in range(len(speeds)):
t = timestamps[i]
v = speeds[i] # m/s
# Skip if stopped
if v is None or v < 0.1:
power_stream.append(0)
continue
# Get slope
# Look ahead/behind for slope smoothing (e.g. +/- 5 seconds) would be better
# Simple difference for now:
grade = 0.0
accel = 0.0
if i > 0 and i < len(speeds) - 1:
# Central difference
d_t = (timestamps[i+1] - timestamps[i-1]).total_seconds()
if d_t > 0:
d_v = (speeds[i+1] - speeds[i-1]) # acc
d_e = (elevations[i+1] - elevations[i-1]) if elevations else 0
d_s = (v * d_t) # approx distance covers
accel = d_v / d_t
if d_s > 1.0: # avoid div by zero/noise
grade = d_e / d_s
# Physics Formula
# F_total = F_grav + F_roll + F_aero + F_acc
# F_grav = m * g * sin(arctan(grade)) ~= m * g * grade
f_grav = total_mass * self.GRAVITY * grade
# F_roll = m * g * cos(arctan(grade)) * Crr ~= m * g * Crr
f_roll = total_mass * self.GRAVITY * self.DEFAULT_CRR
# F_aero = 0.5 * rho * CdA * v^2
# Assume no wind for now
f_aero = 0.5 * self.RHO * self.DEFAULT_CDA * (v**2)
# F_acc = m * a
f_acc = total_mass * accel
f_total = f_grav + f_roll + f_aero + f_acc
# Power = Force * Velocity
p_raw = f_total * v
# Apply Drivetrain Loss
p_mech = p_raw / (1 - self.DRIVETRAIN_LOSS)
# Power can't be negative for a human (braking/coasting = 0w output)
if p_mech < 0:
p_mech = 0
power_stream.append(int(p_mech))
total_power += p_mech
count += 1
avg_power = int(total_power / count) if count > 0 else 0
# Return estimated stream and stats
# Ideally we'd update the Activity 'power' stream and 'avg_power' metric
# BUT 'extract_activity_data' reads from FILE. We can't easily write back to FIT file.
# We should store "estimated_power" in DB or separate storage?
# The prompt implies we want to USE this data.
# If we just update `Activity.avg_power`, that's easy.
# Displaying the stream might require `Activity` to support JSON storage for streams or similar.
# Current schema has `Activity.file_content`.
# Updating the FIT file is hard.
# Maybe we just return it for now, or update the scalar metrics in DB?
# Let's update scalars.
activity.avg_power = avg_power
# Max power?
activity.max_power = max(power_stream) if power_stream else 0
self.db.commit()
return {
"avg_power": avg_power,
"max_power": activity.max_power,
"stream_sample": power_stream[:20]
}

View File

@@ -34,8 +34,15 @@ class SegmentMatcher:
# without special extensions. We'll fetch all and filter in Python.
# Ideally, we'd use PostGIS geometry types.
# Normalize activity type
act_type = activity.activity_type
if act_type in ['road_biking', 'mountain_biking', 'gravel_cycling', 'virtual_cycling', 'indoor_cycling']:
act_type = 'cycling'
elif act_type in ['trail_running', 'treadmill_running']:
act_type = 'running'
segments = self.db.query(Segment).filter(
Segment.activity_type == activity.activity_type
(Segment.activity_type == activity.activity_type) | (Segment.activity_type == act_type)
).all()
matched_efforts = []

View File

@@ -317,3 +317,13 @@ class GarminActivitySync:
data.get('maxBikingCadenceInRevPerMinute') or
data.get('maxSwimCadenceInStrokesPerMinute')
)
# Location
if data.get('startingLatitude') and data.get('startingLongitude'):
activity.start_lat = data.get('startingLatitude')
activity.start_lng = data.get('startingLongitude')
elif data.get('startRecallLatitude') and data.get('startRecallLongitude'):
# Sometimes Garmin uses these
activity.start_lat = data.get('startRecallLatitude')
activity.start_lng = data.get('startRecallLongitude')

View File

@@ -7,7 +7,7 @@ def haversine_distance(lat1: float, lon1: float, lat2: float, lon2: float) -> fl
on the earth (specified in decimal degrees)
"""
# Convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat2, lon2, lat2])
lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])
# Haversine formula
dlon = lon2 - lon1
@@ -17,6 +17,21 @@ def haversine_distance(lat1: float, lon1: float, lat2: float, lon2: float) -> fl
r = 6371000 # Radius of earth in meters
return c * r
def calculate_bearing(lat1: float, lon1: float, lat2: float, lon2: float) -> float:
"""
Calculate initial bearing between two points.
Returns degrees [0, 360).
"""
lat1, lon1, lat2, lon2 = map(math.radians, [lat1, lon1, lat2, lon2])
dLon = lon2 - lon1
y = math.sin(dLon) * math.cos(lat2)
x = math.cos(lat1) * math.sin(lat2) - math.sin(lat1) * math.cos(lat2) * math.cos(dLon)
brng = math.atan2(y, x)
return (math.degrees(brng) + 360) % 360
def perpendicular_distance(point: List[float], line_start: List[float], line_end: List[float]) -> float:
"""
Calculate perpendicular distance from point to line segment.
@@ -85,6 +100,54 @@ def ramer_douglas_peucker(points: List[List[float]], epsilon: float) -> List[Lis
else:
return [points[0], points[end]]
def ramer_douglas_peucker_indices(points: List[List[float]], epsilon: float) -> List[int]:
"""
Simplify a list of [lon, lat] points using RDP algorithm.
Returns the INDICES of the simplified points in the original list.
"""
if len(points) < 3:
return list(range(len(points)))
# Since recursion makes relative indexing hard, we can use an iterative approach
# or pass absolute start index?
# Actually, easiest is to pass (points, start_idx, end_idx) recursively?
# Or just use mask?
# Recursive helper that takes absolute indices
def _rdp(start_idx: int, end_idx: int) -> List[int]:
if end_idx - start_idx < 2:
return [start_idx, end_idx]
dmax = 0.0
index = 0
# Points are derived from original list by slicing? No, need random access
# Optimized: access global 'points'
# Find the point with the maximum distance
# Line from start_idx to end_idx
p_start = points[start_idx]
p_end = points[end_idx]
# Pre-calc line params for speed?
# perpendicular_distance is expensive in loop.
for i in range(start_idx + 1, end_idx):
d = perpendicular_distance(points[i], p_start, p_end)
if d > dmax:
index = i
dmax = d
if dmax > epsilon:
res1 = _rdp(start_idx, index)
res2 = _rdp(index, end_idx)
return res1[:-1] + res2
else:
return [start_idx, end_idx]
return _rdp(0, len(points) - 1)
def calculate_bounds(points: List[List[float]]) -> List[float]:
"""
Return [min_lat, min_lon, max_lat, max_lon]

View File

@@ -35,9 +35,9 @@
</div>
<div class="col-auto">
<select class="form-select" id="filter-type">
<option value="">All</option>
<option value="">All Types</option>
<option value="running">Running</option>
<option value="cycling">Cycling</option>
<option value="cycling">Cycling (All)</option>
<option value="swimming">Swimming</option>
<option value="walking">Walking</option>
<option value="hiking">Hiking</option>
@@ -45,6 +45,15 @@
<option value="yoga">Yoga</option>
</select>
</div>
<div class="col-auto">
<label for="filter-bike" class="col-form-label">Bike:</label>
</div>
<div class="col-auto">
<select class="form-select" id="filter-bike">
<option value="">All Bikes</option>
<!-- Populated by JS -->
</select>
</div>
<div class="col-auto">
<button class="btn btn-secondary" id="apply-filters-btn">Filter</button>
</div>
@@ -315,7 +324,10 @@
detailsModal = new bootstrap.Modal(document.getElementById('activityDetailsModal'));
detailsModal = new bootstrap.Modal(document.getElementById('activityDetailsModal'));
loadActivities();
fetchBikeSetups();
document.getElementById('prev-page-btn').addEventListener('click', () => changePage(-1));
document.getElementById('next-page-btn').addEventListener('click', () => changePage(1));
@@ -375,10 +387,19 @@
tbody.innerHTML = '<tr><td colspan="9" class="text-center">Loading...</td></tr>';
const typeFilter = document.getElementById('filter-type').value;
const bikeFilter = document.getElementById('filter-bike').value;
let url = `/api/activities/list?limit=${limit}&offset=${currentPage * limit}`;
if (typeFilter) {
url = `/api/activities/query?activity_type=${typeFilter}`;
// If any filter is active, force query mode
if (typeFilter || bikeFilter) {
url = `/api/activities/query?`;
const params = new URLSearchParams();
if (typeFilter) params.append('activity_type', typeFilter);
if (bikeFilter) params.append('bike_setup_id', bikeFilter);
url += params.toString();
document.getElementById('prev-page-btn').disabled = true;
document.getElementById('next-page-btn').disabled = true;
} else {
@@ -438,8 +459,8 @@
<button class="btn btn-outline-primary" onclick="downloadFile('${act.garmin_activity_id}')" title="Download Local File">
<i class="bi bi-download"></i>
</button>
<button class="btn btn-outline-warning" onclick="redownload('${act.garmin_activity_id}')" title="Redownload from Garmin">
<i class="bi bi-cloud-download"></i>
<button class="btn btn-outline-warning" onclick="redownload('${act.garmin_activity_id}')" title="Refresh Data & Rematch Bike">
<i class="bi bi-arrow-clockwise"></i>
</button>
<a href="/activity/${act.garmin_activity_id}" target="_blank" class="btn btn-outline-info" title="View Details">
<i class="bi bi-eye"></i>
@@ -577,6 +598,44 @@
}
}
async function fetchBikeSetups() {
try {
const res = await fetch('/api/bike-setups');
if (!res.ok) throw new Error("Failed to fetch bikes");
const bikes = await res.json();
const select = document.getElementById('filter-bike');
bikes.forEach(bike => {
const opt = document.createElement('option');
opt.value = bike.id;
opt.textContent = bike.name ? `${bike.name} (${bike.frame})` : bike.frame;
select.appendChild(opt);
});
} catch (e) {
console.error(e);
}
}
async function refreshActivity(garminId) {
if (!confirm("Are you sure you want to re-download this activity from Garmin and run bike matching?")) {
return;
}
showToast("Processing...", "Refreshing activity data...", "info");
try {
const res = await fetch(`/api/activities/${garminId}/redownload`, { method: 'POST' });
const data = await res.json();
if (res.ok) {
showToast("Success", data.message, "success");
loadActivities(); // Reload table
} else {
throw new Error(data.detail || "Refresh failed");
}
} catch (e) {
showToast("Error", e.message, "error");
}
}
window.showActivityDetails = async function (id) {
// Reset fields
document.querySelectorAll('[id^="det-"]').forEach(el => el.textContent = '-');

View File

@@ -56,6 +56,12 @@
<button class="btn btn-primary" id="download-btn">
<i class="bi bi-download"></i> Download
</button>
<button class="btn btn-outline-warning" id="refresh-btn" onclick="refreshActivity()">
<i class="bi bi-arrow-clockwise"></i> Refresh & Match
</button>
<button class="btn btn-warning" id="estimate-power-btn" onclick="estimatePower()">
<i class="bi bi-lightning-fill"></i> Estimate Power
</button>
<button class="btn btn-success" id="create-segment-btn" onclick="toggleSegmentMode()">
<i class="bi bi-bezier2"></i> Create Segment
</button>
@@ -228,7 +234,10 @@
<!-- Bike Info (New) -->
<div class="col-md-4">
<div class="card h-100 metric-card border-light shadow-sm">
<div class="card-header bg-light text-dark">Bike Setup</div>
<div class="card-header bg-light text-dark d-flex justify-content-between align-items-center">
<span>Bike Setup</span>
<button class="btn btn-sm btn-link p-0 text-decoration-none" onclick="editBikeSetup()">Edit</button>
</div>
<div class="card-body">
<div id="m-bike-info" class="text-center text-muted">No Setup</div>
</div>
@@ -647,6 +656,132 @@
setupDrag(endMarker, false);
}
async function refreshActivity() {
if (!confirm("Are you sure you want to re-download this activity from Garmin and run bike matching? This will overwrite any manual bike selection.")) {
return;
}
const btn = document.getElementById('refresh-btn');
const origHtml = btn.innerHTML;
btn.disabled = true;
btn.innerHTML = '<span class="spinner-border spinner-border-sm"></span> Refreshing...';
// Helper toast fallback if showToast not defined in this view (it inherits from base.html usually?)
// base.html usually has showToast.
if (typeof showToast === 'function') showToast("Processing...", "Refreshing activity data...", "info");
try {
const res = await fetch(`/api/activities/${activityId}/redownload`, { method: 'POST' });
const data = await res.json();
if (res.ok) {
if (typeof showToast === 'function') showToast("Success", data.message, "success");
else alert(data.message);
// Reload details
loadDetails();
loadCharts();
} else {
throw new Error(data.detail || "Refresh failed");
}
} catch (e) {
console.error(e);
if (typeof showToast === 'function') showToast("Error", e.message, "error");
else alert("Error: " + e.message);
} finally {
btn.disabled = false;
btn.innerHTML = origHtml;
}
}
let allBikes = [];
async function fetchAllBikes() {
try {
const res = await fetch('/api/bike-setups');
if (res.ok) allBikes = await res.json();
} catch (e) { console.error(e); }
}
document.addEventListener('DOMContentLoaded', fetchAllBikes);
function editBikeSetup() {
const container = document.getElementById('m-bike-info');
if (container.querySelector('select')) return;
// Current text
const currentHtml = container.innerHTML;
let initialSelect = `<div class="input-group input-group-sm">
<select class="form-select" id="bike-select">
<option value="">-- No Bike --</option>`;
allBikes.forEach(b => {
initialSelect += `<option value="${b.id}">${b.name || b.frame} (${b.chainring}/${b.rear_cog})</option>`;
});
initialSelect += `</select>
<button class="btn btn-success" onclick="saveBikeSetup()"><i class="bi bi-check"></i></button>
<button class="btn btn-outline-secondary" onclick="loadDetails()"><i class="bi bi-x"></i></button>
</div>`;
container.innerHTML = initialSelect;
}
async function saveBikeSetup() {
const sel = document.getElementById('bike-select');
const newId = sel.value ? parseInt(sel.value) : null;
try {
const res = await fetch(`/api/activities/${activityId}/bike`, {
method: 'PUT',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ bike_setup_id: newId, manual_override: true })
});
if (res.ok) {
if (typeof showToast === 'function') showToast("Success", "Bike setup updated", "success");
else alert("Bike setup updated");
loadDetails();
} else {
const err = await res.json();
alert("Error: " + err.detail);
}
} catch (e) {
console.error(e);
alert("Save failed");
}
}
async function estimatePower() {
if (!confirm("Estimate power for this activity using physics usage calculation? This will update average/max power stats.")) return;
const btn = document.getElementById('estimate-power-btn');
const origText = btn.innerHTML;
btn.disabled = true;
btn.innerHTML = '<span class="spinner-border spinner-border-sm" role="status" aria-hidden="true"></span> Estimating...';
try {
const res = await fetch(`/api/activities/${window.currentDbId}/estimate_power`, {
method: 'POST'
});
if (res.ok) {
const data = await res.json();
alert("Power estimation complete! Avg: " + data.stats.avg_power + " W");
loadDetails(); // Refresh stats
loadCharts(); // Refresh charts if stream updated (Service returns stream but we'd need to reload)
} else {
const err = await res.json();
alert("Error: " + err.detail);
}
} catch (e) {
console.error(e);
alert("Estimate failed: " + e.message);
} finally {
btn.disabled = false;
btn.innerHTML = origText;
}
}
async function saveSegment() {
if (startIndex >= endIndex) {
alert("Start point must be before End point.");

View File

@@ -75,6 +75,11 @@
<li class="nav-item">
<a class="nav-link {% if request.path == '/segments' %}active{% endif %}" href="/segments">Segments</a>
</li>
<li class="nav-item">
<a class="nav-link {% if request.path == '/discovery' %}active{% endif %}"
href="/discovery">Discovery</a>
</li>
<li class="nav-item">
<a class="nav-link {% if request.path == '/garmin-health' %}active{% endif %}"
href="/garmin-health">Garmin Health</a>

View File

@@ -21,6 +21,10 @@
<th>Chainring</th>
<th>Rear Cog</th>
<th>Gear Ratio</th>
<th>Rides</th>
<th>Distance</th>
<th>Weight</th>
<th>Active</th>
<th>Actions</th>
</tr>
</thead>
@@ -51,16 +55,33 @@
</div>
<div class="mb-3">
<label for="frame" class="form-label">Frame</label>
<input type="text" class="form-control" id="frame" name="frame" required placeholder="e.g. Dolan Pre Cursa">
<input type="text" class="form-control" id="frame" name="frame" required
placeholder="e.g. Dolan Pre Cursa">
</div>
<div class="row">
<div class="col-md-6 mb-3">
<label for="chainring" class="form-label">Chainring</label>
<input type="number" class="form-control" id="chainring" name="chainring" required min="1" placeholder="Teeth">
<input type="number" class="form-control" id="chainring" name="chainring" required min="1"
placeholder="Teeth">
</div>
<div class="col-md-6 mb-3">
<label for="rearCog" class="form-label">Rear Cog</label>
<input type="number" class="form-control" id="rearCog" name="rear_cog" required min="1" placeholder="Teeth">
<input type="number" class="form-control" id="rearCog" name="rear_cog" required min="1"
placeholder="Teeth">
</div>
</div>
<div class="row">
<div class="col-md-4 mb-3">
<label for="weightKg" class="form-label">Weight (kg)</label>
<input type="number" class="form-control" id="weightKg" name="weight_kg" step="0.1" min="0"
placeholder="e.g. 9.0">
</div>
<div class="col-md-4 mb-3">
<label for="purchaseDate" class="form-label">Purchase Date</label>
<input type="date" class="form-control" id="purchaseDate" name="purchase_date">
</div>
<div class="col-md-4 mb-3">
<label for="retirementDate" class="form-label">Retired Date</label>
<input type="date" class="form-control" id="retirementDate" name="retirement_date">
</div>
</div>
</form>
@@ -96,7 +117,7 @@
function renderTable() {
const tbody = document.getElementById('setupsTableBody');
tbody.innerHTML = '';
currentSetups.forEach(setup => {
const ratio = (setup.chainring / setup.rear_cog).toFixed(2);
const tr = document.createElement('tr');
@@ -106,6 +127,10 @@
<td>${setup.chainring}t</td>
<td>${setup.rear_cog}t</td>
<td>${ratio}</td>
<td><span class="badge bg-secondary">${setup.activity_count || 0}</span></td>
<td>${setup.total_distance ? (setup.total_distance / 1000).toFixed(0) + ' km' : '-'}</td>
<td>${setup.weight_kg ? setup.weight_kg + 'kg' : '-'}</td>
<td>${setup.retirement_date ? '<span class="badge bg-danger">Retired</span>' : '<span class="badge bg-success">Active</span>'}</td>
<td>
<button class="btn btn-sm btn-outline-primary me-1" onclick="editSetup(${setup.id})">
<i class="bi bi-pencil"></i>
@@ -128,11 +153,11 @@
// Hook into modal show event to reset form if adding
document.getElementById('addSetupModal').addEventListener('show.bs.modal', function (event) {
if (!event.relatedTarget || event.relatedTarget.getAttribute('data-bs-target')) {
// If triggered by button (not manual show for edit), reset
// Actually better to just check if we set an ID
// If triggered by button (not manual show for edit), reset
// Actually better to just check if we set an ID
}
});
// Reset on close
document.getElementById('addSetupModal').addEventListener('hidden.bs.modal', resetForm);
@@ -146,7 +171,10 @@
document.getElementById('frame').value = setup.frame;
document.getElementById('chainring').value = setup.chainring;
document.getElementById('rearCog').value = setup.rear_cog;
document.getElementById('weightKg').value = setup.weight_kg || '';
document.getElementById('purchaseDate').value = setup.purchase_date ? setup.purchase_date.split('T')[0] : '';
document.getElementById('retirementDate').value = setup.retirement_date ? setup.retirement_date.split('T')[0] : '';
document.getElementById('setupModalLabel').textContent = 'Edit Bike Setup';
setupModal.show();
}
@@ -157,7 +185,10 @@
name: document.getElementById('name').value || null,
frame: document.getElementById('frame').value,
chainring: parseInt(document.getElementById('chainring').value),
rear_cog: parseInt(document.getElementById('rearCog').value)
rear_cog: parseInt(document.getElementById('rearCog').value),
weight_kg: document.getElementById('weightKg').value ? parseFloat(document.getElementById('weightKg').value) : null,
purchase_date: document.getElementById('purchaseDate').value || null,
retirement_date: document.getElementById('retirementDate').value || null
};
if (!data.frame || !data.chainring || !data.rear_cog) {
@@ -168,7 +199,7 @@
try {
const method = id ? 'PUT' : 'POST';
const url = id ? `/api/bike-setups/${id}` : '/api/bike-setups/';
const response = await fetch(url, {
method: method,
headers: { 'Content-Type': 'application/json' },
@@ -176,7 +207,7 @@
});
if (!response.ok) throw new Error('Failed to save setup');
showToast('Setup saved successfully', 'success');
setupModal.hide();
loadSetups();
@@ -191,7 +222,7 @@
try {
const response = await fetch(`/api/bike-setups/${id}`, { method: 'DELETE' });
if (!response.ok) throw new Error('Failed to delete setup');
showToast('Setup deleted', 'success');
loadSetups();
} catch (error) {
@@ -199,4 +230,4 @@
}
}
</script>
{% endblock %}
{% endblock %}

View File

@@ -0,0 +1,444 @@
{% extends "base.html" %}
{% block head %}
<link rel="stylesheet" href="https://unpkg.com/leaflet@1.9.4/dist/leaflet.css"
integrity="sha256-p4NxAoJBhIIN+hmNHrzRCf9tD/miZyoHS5obTRR9BMY=" crossorigin="" />
<script src="https://unpkg.com/leaflet@1.9.4/dist/leaflet.js"
integrity="sha256-20nQCchB9co0qIjJZRGuk2/Z9VM+kNiyxNV1lvTlZBo=" crossorigin=""></script>
<style>
#map {
height: 600px;
width: 100%;
border-radius: 8px;
}
.candidate-card {
cursor: pointer;
transition: transform 0.2s;
}
.candidate-card:hover {
transform: translateY(-2px);
box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
}
.candidate-card.active {
border-color: #0d6efd;
background-color: #f8f9fa;
}
</style>
{% endblock %}
{% block content %}
<div class="row mb-4">
<div class="col-12">
<h2>Segment Discovery</h2>
<p class="text-muted">Find frequent routes in your activity history.</p>
</div>
</div>
<div class="card mb-4">
<div class="card-header">
<ul class="nav nav-tabs card-header-tabs" id="discoveryTabs" role="tablist">
<li class="nav-item" role="presentation">
<button class="nav-link active" id="global-tab" data-bs-toggle="tab" data-bs-target="#global-pane"
type="button" role="tab">Global Discovery</button>
</li>
<li class="nav-item" role="presentation">
<button class="nav-link" id="single-tab" data-bs-toggle="tab" data-bs-target="#single-pane"
type="button" role="tab">Single Activity</button>
</li>
</ul>
</div>
<div class="card-body">
<div class="tab-content" id="discoveryTabContent">
<!-- Global Discovery Pane -->
<div class="tab-pane fade show active" id="global-pane" role="tabpanel">
<form id="discoveryForm" class="row g-3 align-items-end">
<div class="col-md-4">
<label for="activityType" class="form-label">Activity Type</label>
<select id="activityType" class="form-select">
<option value="cycling">Cycling</option>
<option value="running">Running</option>
<option value="hiking">Hiking</option>
</select>
</div>
<div class="col-md-4">
<label for="startDate" class="form-label">Start Date</label>
<input type="date" class="form-control" id="startDate"
value="{{ (now - timedelta(days=90)).strftime('%Y-%m-%d') }}">
</div>
<div class="col-md-2 d-flex align-items-end mb-2">
<div class="form-check form-switch">
<input class="form-check-input" type="checkbox" id="showDebugLayer">
<label class="form-check-label small" for="showDebugLayer">Show All</label>
</div>
</div>
<div class="col-md-2">
<button type="submit" class="btn btn-primary w-100" id="searchBtn">
<span class="spinner-border spinner-border-sm d-none" role="status"
aria-hidden="true"></span>
Discover
</button>
</div>
</form>
</div>
<!-- Single Activity Pane -->
<div class="tab-pane fade" id="single-pane" role="tabpanel">
<form id="singleForm" class="row g-3 align-items-end">
<div class="col-md-8">
<label for="activityId" class="form-label">Activity ID</label>
<input type="number" class="form-control" id="activityId" placeholder="e.g. 12345" required>
<div class="form-text">Enter the ID of the activity to slice into segments.</div>
</div>
<div class="col-md-4">
<button type="submit" class="btn btn-primary w-100" id="singleSearchBtn">
<span class="spinner-border spinner-border-sm d-none" role="status"
aria-hidden="true"></span>
Analyze Activity
</button>
</div>
</form>
</div>
</div>
</div>
</div>
</div>
<!-- Save Modal -->
<div class="modal fade" id="saveSegmentModal" tabindex="-1">
<div class="modal-dialog">
<div class="modal-content">
<div class="modal-header">
<h5 class="modal-title">Save Segment</h5>
<button type="button" class="btn-close" data-bs-dismiss="modal"></button>
</div>
<div class="modal-body">
<div class="mb-3">
<label for="segmentName" class="form-label">Segment Name</label>
<input type="text" class="form-control" id="segmentName" required>
</div>
<div class="mb-3">
<label for="segmentDesc" class="form-label">Description (Optional)</label>
<textarea class="form-control" id="segmentDesc" rows="2"></textarea>
</div>
<input type="hidden" id="saveCandidateIndex">
</div>
<div class="modal-footer">
<button type="button" class="btn btn-secondary" data-bs-dismiss="modal">Cancel</button>
<button type="button" class="btn btn-primary" onclick="confirmSave()">Save</button>
</div>
</div>
</div>
</div>
<div class="row">
<!-- List Column -->
<div class="col-md-5">
<div id="resultsArea" class="row">
<!-- Results will be injected here -->
<div class="col-12 text-center text-muted" id="placeholder">
Run a search to see recommendations.
</div>
</div>
</div>
<!-- Map Column -->
<div class="col-md-7">
<div class="sticky-top" style="top: 20px; z-index: 0;">
<div id="map"></div>
</div>
</div>
</div>
<script>
// Initialize Map
const map = L.map('map').setView([0, 0], 2);
L.tileLayer('https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png', {
attribution: '&copy; OpenStreetMap contributors'
}).addTo(map);
let currentLayers = [];
let debugLayerGroup = L.layerGroup().addTo(map);
let debugPathsData = [];
let currentCandidates = []; // Store data for saving
// Results DOM
const resultsArea = document.getElementById('resultsArea');
// Toggle listener
document.getElementById('showDebugLayer').addEventListener('change', function (e) {
if (e.target.checked) {
renderDebugLayer();
} else {
debugLayerGroup.clearLayers();
}
});
function renderDebugLayer() {
debugLayerGroup.clearLayers();
if (!debugPathsData || debugPathsData.length === 0) return;
debugPathsData.forEach(path => {
const latlngs = path.map(p => [p[1], p[0]]);
L.polyline(latlngs, {
color: '#999',
weight: 1,
opacity: 0.3,
dashArray: '5, 10'
}).addTo(debugLayerGroup);
});
}
function renderCandidates(candidates, append = false) {
if (!append) {
resultsArea.innerHTML = '';
currentCandidates = candidates;
} else {
currentCandidates = currentCandidates.concat(candidates);
}
const bounds = L.latLngBounds();
if (!candidates || candidates.length === 0) {
resultsArea.innerHTML = '<div class="col-12 text-center">No segments found matching criteria.</div>';
return;
}
candidates.forEach((cand, index) => {
// Check if bounds valid
let latlngs;
try {
latlngs = cand.points.map(p => [p[1], p[0]]);
} catch (e) { return; }
if (latlngs.length < 2) return;
const polyline = L.polyline(latlngs, {
color: 'blue',
weight: 4,
opacity: 0.7
}).addTo(map);
polyline.bindPopup(`<strong>Matches: ${cand.frequency}</strong><br>Dist: ${(cand.distance / 1000).toFixed(2)}km`);
// Add interactions
polyline.on('mouseover', function () {
this.setStyle({ color: 'red', weight: 6 });
highlightCard(index);
});
polyline.on('mouseout', function () {
this.setStyle({ color: 'blue', weight: 4 });
unhighlightCard(index);
});
currentLayers.push(polyline);
bounds.extend(latlngs);
const card = `
<div class="col-12 mb-2">
<div class="card candidate-card" id="card-${index}"
onmouseover="highlightMap(${index})"
onmouseout="unhighlightMap(${index})"
onclick="focusMap(${index})">
<div class="card-body p-3">
<div class="d-flex justify-content-between align-items-center">
<h6 class="mb-0">Candidate #${index + 1}</h6>
<span class="badge bg-success">${cand.frequency} Runs</span>
</div>
<small class="text-muted">Distance: ${(cand.distance / 1000).toFixed(2)} km</small>
<div class="mt-2">
<div class="mt-2">
<button class="btn btn-sm btn-outline-primary w-100" onclick="openSaveModal(${index})">Save to Library</button>
</div>
</div>
</div>
</div>
</div>
`;
resultsArea.insertAdjacentHTML('beforeend', card);
const cardEl = document.getElementById(`card-${index}`);
if (cardEl) cardEl.dataset.layerId = currentLayers.length - 1;
});
if (currentLayers.length > 0) {
map.fitBounds(bounds, { padding: [50, 50] });
}
}
document.getElementById('discoveryForm').addEventListener('submit', async function (e) {
e.preventDefault();
const btn = document.getElementById('searchBtn');
const spinner = btn.querySelector('.spinner-border');
btn.disabled = true;
spinner.classList.remove('d-none');
const type = document.getElementById('activityType').value;
const start = document.getElementById('startDate').value;
try {
const response = await fetch('/api/discovery/segments', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
activity_type: type,
start_date: start ? new Date(start).toISOString() : null
})
});
if (!response.ok) throw new Error('Discovery failed');
const data = await response.json();
// Clear map
currentLayers.forEach(l => map.removeLayer(l));
currentLayers = [];
debugLayerGroup.clearLayers();
debugPathsData = data.debug_paths || [];
if (document.getElementById('showDebugLayer').checked) {
renderDebugLayer();
}
renderCandidates(data.candidates);
} catch (err) {
console.error(err);
resultsArea.innerHTML = `<div class="alert alert-danger">Error: ${err.message}</div>`;
} finally {
btn.disabled = false;
spinner.classList.add('d-none');
}
});
document.getElementById('singleForm').addEventListener('submit', async function (e) {
e.preventDefault();
const btn = document.getElementById('singleSearchBtn');
const spinner = btn.querySelector('.spinner-border');
const actId = document.getElementById('activityId').value;
btn.disabled = true;
spinner.classList.remove('d-none');
try {
const response = await fetch('/api/discovery/single', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ activity_id: parseInt(actId) })
});
if (!response.ok) throw new Error('Single Analysis failed');
const data = await response.json();
// Clear map
currentLayers.forEach(l => map.removeLayer(l));
currentLayers = [];
debugLayerGroup.clearLayers();
renderCandidates(data.candidates);
} catch (err) {
console.error(err);
resultsArea.innerHTML = `<div class="alert alert-danger">Error: ${err.message}</div>`;
} finally {
btn.disabled = false;
spinner.classList.add('d-none');
}
});
// Helper functions for interaction
function highlightMap(index) {
const layer = currentLayers[index];
if (layer) {
layer.setStyle({ color: 'red', weight: 6 }).bringToFront();
}
}
function unhighlightMap(index) {
const layer = currentLayers[index];
if (layer) {
layer.setStyle({ color: 'blue', weight: 4 });
}
}
function focusMap(index) {
const layer = currentLayers[index];
if (layer) {
map.fitBounds(layer.getBounds(), { maxZoom: 14 });
}
}
function highlightCard(index) {
const card = document.getElementById(`card-${index}`);
if (card) card.classList.add('active');
}
function unhighlightCard(index) {
const card = document.getElementById(`card-${index}`);
if (card) card.classList.remove('active');
}
// Save Logic
let saveModal = null;
function openSaveModal(index) {
if (!saveModal) {
saveModal = new bootstrap.Modal(document.getElementById('saveSegmentModal'));
}
document.getElementById('saveCandidateIndex').value = index;
document.getElementById('segmentName').value = `New Segment #${index + 1}`;
document.getElementById('segmentDesc').value = 'Discovered from activity analysis';
saveModal.show();
}
async function confirmSave() {
const index = document.getElementById('saveCandidateIndex').value;
const name = document.getElementById('segmentName').value;
const desc = document.getElementById('segmentDesc').value;
const cand = currentCandidates[index];
if (!name) {
alert("Name is required");
return;
}
try {
// Determine activity type (from form or candidate if we stored it?
// Candidate doesn't strictly have type, but we can infer or pass it.
// For now, let's grab from the global form or default to 'cycling'.
// Single Mode: We don't have type selector easily accessible if tab switched?
// Actually Activity ID analysis implies we know the activity...
// but the candidate obj doesn't have type.
// Let's assume 'cycling' or try to grab from UI.
let actType = document.getElementById('activityType').value;
// If single tab active, we might not know.
const payload = {
name: name,
description: desc,
activity_type: actType, // Best guess
points: cand.points
};
const response = await fetch('/api/segments/save_custom', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(payload)
});
if (!response.ok) throw new Error('Save failed');
const data = await response.json();
alert('Segment saved successfully!');
saveModal.hide();
} catch (err) {
console.error(err);
alert('Error saving segment: ' + err.message);
}
}
</script>
{% endblock %}

View File

@@ -30,7 +30,9 @@
<th>Name</th>
<th>Type</th>
<th>Distance</th>
<th>Distance</th>
<th>Elevation</th>
<th>Efforts</th>
<th>Actions</th>
</tr>
</thead>
@@ -141,7 +143,9 @@
<td><strong>${seg.name}</strong></td>
<td><span class="badge bg-secondary">${seg.activity_type}</span></td>
<td>${(seg.distance / 1000).toFixed(2)} km</td>
<td>${(seg.distance / 1000).toFixed(2)} km</td>
<td>${seg.elevation_gain ? seg.elevation_gain.toFixed(1) + ' m' : '-'}</td>
<td><span class="badge bg-info text-dark">${seg.effort_count || 0}</span></td>
<td>
<button class="btn btn-sm btn-outline-primary me-1" onclick='viewSegment(${JSON.stringify(seg)})'>
<i class="bi bi-eye"></i> View

View File

@@ -0,0 +1,31 @@
from fastapi.testclient import TestClient
from main import app
from unittest.mock import MagicMock, patch
client = TestClient(app)
def test_discovery_endpoint():
# Mock the service to avoid DB calls
with patch('src.api.discovery.SegmentDiscoveryService') as MockService:
instance = MockService.return_value
instance.discover_segments.return_value = ([], []) # Empty candidates, empty debug paths
response = client.post("/api/discovery/segments", json={
"activity_type": "cycling",
"start_date": "2025-01-01T00:00:00"
})
assert response.status_code == 200
data = response.json()
assert "candidates" in data
assert isinstance(data["candidates"], list)
assert "debug_paths" in data
assert isinstance(data["debug_paths"], list)
def test_discovery_page_render():
response = client.get("/discovery")
assert response.status_code == 200
assert "Segment Discovery" in response.text

View File

@@ -0,0 +1,12 @@
from fastapi.testclient import TestClient
from main import app
client = TestClient(app)
def test_discovery_page_assets():
response = client.get("/discovery")
assert response.status_code == 200
assert "leaflet.js" in response.text
assert "leaflet.css" in response.text
assert "map" in response.text

View File

@@ -0,0 +1,43 @@
from fastapi.testclient import TestClient
from main import app
from unittest.mock import MagicMock, patch
client = TestClient(app)
def test_single_discovery_endpoint():
# Mock the service
with patch('src.api.discovery.SegmentDiscoveryService') as MockService:
instance = MockService.return_value
# Mock analyze_single_activity return value
mock_cand = MagicMock()
mock_cand.points = [[10.0, 50.0], [10.1, 50.1]]
mock_cand.frequency = 1
mock_cand.distance = 1000.0
mock_cand.activity_ids = [123]
instance.analyze_single_activity.return_value = [mock_cand]
response = client.post("/api/discovery/single", json={
"activity_id": 123
})
assert response.status_code == 200
data = response.json()
assert "candidates" in data
assert len(data["candidates"]) == 1
assert data["candidates"][0]["frequency"] == 1
assert data["candidates"][0]["distance"] == 1000.0
def test_single_discovery_not_found():
with patch('src.api.discovery.SegmentDiscoveryService') as MockService:
instance = MockService.return_value
instance.analyze_single_activity.return_value = []
response = client.post("/api/discovery/single", json={
"activity_id": 999
})
assert response.status_code == 200
data = response.json()
assert len(data["candidates"]) == 0

View File

@@ -0,0 +1,130 @@
import pytest
from src.services.discovery import SegmentDiscoveryService
from src.models.activity import Activity
from unittest.mock import MagicMock
from datetime import datetime
from unittest.mock import patch
def test_decimate_points():
service = SegmentDiscoveryService(None)
# 3 points in a line, 20m apart. Min dist 30m.
# p0 (0,0) -> 20m -> p1 (20m, 0) -> 20m -> p2 (40m, 0)
# Result should be p0, p2.
# Approx: 0.00018 deg lat ~ 20m
points = [
[0.0, 0.0],
[0.0, 0.00018],
[0.0, 0.00036]
]
decimated = service._decimate_points(points, min_dist=30.0)
assert len(decimated) == 2
assert decimated[0] == [0.0, 0.0]
assert decimated[1] == [0.0, 0.00036]
def test_discovery_integration():
# Setup
mock_db = MagicMock()
service = SegmentDiscoveryService(mock_db)
# Mock Activity Objects
act1 = MagicMock(spec=Activity)
act1.id = 101
act1.activity_type = "cycling"
act1.start_time = datetime(2025,1,1)
act1.file_content = b"mock_content"
act1.file_type = "fit"
act2 = MagicMock(spec=Activity)
act2.id = 102
act2.activity_type = "cycling"
act2.start_time = datetime(2025,1,2)
act2.file_content = b"mock_content"
act2.file_type = "fit"
# Mock extract_points_from_file to return overlapping paths
# They share a segment: (0,0) -> (0, 0.0005) -> (0, 0.0010)
# act1 goes further: -> (0, 0.0020)
# act2 stops or goes elsewhere
# Approx 0.0002 deg grid step (~20m)
# 0.0000 -> 0.0010 is about 100m. Might be too short for 200m cutoff.
# Let's make it longer. 0.0030 deg is ~300m.
path_shared = [[0.0, y*0.0001] for y in range(30)] # 0 to 0.0029
service._decimate_points = MagicMock(side_effect=[
path_shared, # act1
path_shared # act2
])
# Mock DB query
# First query is for Activities
mock_db.query.return_value.filter.return_value.filter.return_value.filter.return_value.all.return_value = [act1, act2]
# Second query is for Segments (deduplication) - called in _deduplicate_against_db
# It calls db.query(Segment).filter(...).all()
# We need to distinguish them or just ensure the Segment query returns [].
# But current mock chain is generic.
# 'query' returns a Mock. Calling 'filter' on it returns SAME/NEW Mock.
# If we reuse the same mock chain, it returns [act1, act2].
# We should distinguish based on call args or set up side_effect.
def side_effect_query(model):
m = MagicMock()
if model == Activity:
# Depth 3
m.filter.return_value.filter.return_value.filter.return_value.all.return_value = [act1, act2]
# Depth 2 (type + start_date)
m.filter.return_value.filter.return_value.all.return_value = [act1, act2]
# Depth 1 (type only)
m.filter.return_value.all.return_value = [act1, act2]
else:
# Segment
m.filter.return_value.all.return_value = []
return m
mock_db.query.side_effect = side_effect_query
# We need to mock extract_points_from_file at module level or patch it
# But since we mocked _decimate_points, extract_points_from_file is called but result ignored/passed to decimate.
# Wait, discovery.py calls:
# raw_points = extract_points_from_file(...)
# simplified = self._decimate_points(raw_points, ...)
# So we need to patch extract_points_from_file to avoid error on "mock_content"
with patch('src.services.discovery.extract_points_from_file', return_value=[[0,0]]) as mock_extract:
candidates = service.discover_segments("cycling", datetime(2025,1,1))
# Assertions
# Since we mocked decimate to return identical long paths, they should cluster.
# result should contain 1 candidate.
# activity_ids should validly contain [101, 102]
assert len(candidates) == 1
c = candidates[0]
assert 101 in c.activity_ids
assert 102 in c.activity_ids
assert isinstance(c.activity_ids[0], int)
def test_connected_components():
service = SegmentDiscoveryService(None)
cells = {
(0,0): {1}, (0,1): {1}, (0,2): {1}, # Vertical loose chain
(0,3): {1}, (0,4): {1}, (0,5): {1}, # Total 6
(10,10): {2}, (10,11): {2} # Separate small cluster (length 2)
}
# Connected components with min size 5
# Since we set min cluster size to 5 in implementation
comps = service._find_connected_components(cells, 0.0002)
assert len(comps) == 1
assert len(comps[0]) == 6
assert (0,0) in comps[0]
assert (10,10) not in comps[0]

View File

@@ -0,0 +1,42 @@
============================= test session starts ==============================
platform linux -- Python 3.13.3, pytest-9.0.2, pluggy-1.6.0 -- /home/sstent/Projects/FitTrack2/FitnessSync/backend/.venv/bin/python3
cachedir: .pytest_cache
rootdir: /home/sstent/Projects/FitTrack2/FitnessSync/backend
configfile: pyproject.toml
plugins: asyncio-1.3.0, cov-7.0.0, anyio-4.12.1
asyncio: mode=Mode.AUTO, debug=False, asyncio_default_fixture_loop_scope=None, asyncio_default_test_loop_scope=function
collecting ... collected 0 items / 1 error
==================================== ERRORS ====================================
______________ ERROR collecting tests/services/test_discovery.py _______________
ImportError while importing test module '/home/sstent/Projects/FitTrack2/FitnessSync/backend/tests/services/test_discovery.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
/usr/lib/python3.13/importlib/__init__.py:88: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
backend/tests/services/test_discovery.py:2: in <module>
from backend.src.services.discovery import SegmentDiscoveryService
E ModuleNotFoundError: No module named 'backend'
=============================== warnings summary ===============================
backend/src/models/base.py:3
/home/sstent/Projects/FitTrack2/FitnessSync/backend/src/models/base.py:3: MovedIn20Warning: The ``declarative_base()`` function is now available as sqlalchemy.orm.declarative_base(). (deprecated since: 2.0) (Background on SQLAlchemy 2.0 at: https://sqlalche.me/e/b8d9)
Base = declarative_base()
backend/src/api/status.py:21
/home/sstent/Projects/FitTrack2/FitnessSync/backend/src/api/status.py:21: PydanticDeprecatedSince20: Support for class-based `config` is deprecated, use ConfigDict instead. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.12/migration/
class SyncLogResponse(BaseModel):
backend/src/api/scheduling.py:23
/home/sstent/Projects/FitTrack2/FitnessSync/backend/src/api/scheduling.py:23: PydanticDeprecatedSince20: Support for class-based `config` is deprecated, use ConfigDict instead. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.12/migration/
class ScheduledJobResponse(BaseModel):
backend/src/api/bike_setups.py:33
/home/sstent/Projects/FitTrack2/FitnessSync/backend/src/api/bike_setups.py:33: PydanticDeprecatedSince20: Support for class-based `config` is deprecated, use ConfigDict instead. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.12/migration/
class BikeSetupRead(BaseModel):
-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html
=========================== short test summary info ============================
ERROR backend/tests/services/test_discovery.py
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
========================= 4 warnings, 1 error in 0.14s =========================

View File

@@ -0,0 +1,73 @@
import sys
import os
sys.path.append('/app/backend')
import logging
from src.services.postgresql_manager import PostgreSQLManager
from src.utils.config import config
from src.models.activity import Activity
from src.models.bike_setup import BikeSetup
from sqlalchemy import or_, and_
logging.basicConfig(level=logging.WARNING)
def analyze():
print("Connecting to database...")
db_manager = PostgreSQLManager(config.DATABASE_URL)
session = db_manager.SessionLocal()
try:
# 1. Find the Stormchaser setup(s)
stormchasers = session.query(BikeSetup).filter(
or_(
BikeSetup.name.ilike('%Stormchaser%'),
BikeSetup.frame.ilike('%Stormchaser%')
)
).order_by(BikeSetup.purchase_date.asc()).all()
reference_date = None
for bike in stormchasers:
if bike.purchase_date:
if reference_date is None or bike.purchase_date < reference_date:
reference_date = bike.purchase_date
if not reference_date:
print("No Stormchaser purchase date found.")
return
print(f"Using Reference Date: {reference_date}")
# 2. Find Generic Bike ID
generic = session.query(BikeSetup).filter(BikeSetup.name == 'GenericBike').first()
if not generic:
print("GenericBike setup not found.")
return
# 3. Query Activities
# Find Generic Rides in range
query = session.query(Activity).filter(
Activity.bike_setup_id == generic.id,
Activity.start_time >= reference_date
)
count = query.count()
print(f"\nResult: {count} rides tagged as Generic since {reference_date}")
# Also check smart confidence stats on recently matched Stormchaser rides
print("\n[Smart Match Stats Sample (Stormchaser)]")
sc_rides = session.query(Activity).filter(
Activity.bike_setup_id.in_([b.id for b in stormchasers]),
Activity.start_time >= reference_date
).order_by(Activity.start_time.desc()).limit(10).all()
for r in sc_rides:
print(f"ID: {r.id} | Conf: {r.bike_match_confidence} | Type: {r.activity_type}")
except Exception as e:
print(f"Error during analysis: {e}")
finally:
session.close()
if __name__ == "__main__":
analyze()

View File

@@ -0,0 +1,138 @@
import logging
from sqlalchemy.orm import Session
from src.services.postgresql_manager import PostgreSQLManager
from src.utils.config import config
from src.models.activity import Activity
from src.models.segment import Segment
from src.services.segment_matcher import SegmentMatcher
from src.services.parsers import extract_points_from_file
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def debug_matching():
db_manager = PostgreSQLManager(config.DATABASE_URL)
with db_manager.get_db_session() as db:
# 1. Fetch Segment
segment = db.query(Segment).filter(Segment.name.like("%Etiwanda%")).first()
if not segment:
print("ERROR: Segment 'Etiwanda Climb' not found.")
return
print(f"DEBUG: Found Segment ID {segment.id}: {segment.name}")
print(f"DEBUG: Segment Distance: {segment.distance}")
# 2. Fetch Activity
# Try finding by ID or Garmin ID
act_id = 21072264737
activity = db.query(Activity).filter(Activity.id == act_id).first()
if not activity:
activity = db.query(Activity).filter(Activity.garmin_activity_id == str(act_id)).first()
if not activity:
print(f"ERROR: Activity {act_id} not found.")
return
print(f"DEBUG: Found Activity ID {activity.id} (Garmin: {activity.garmin_activity_id})")
# 3. Extract Points
if not activity.file_content:
print("ERROR: Activity has no file content")
return
points = extract_points_from_file(activity.file_content, activity.file_type)
print(f"DEBUG: Extracted {len(points)} points from activity.")
# 4. Trigger Match
matcher = SegmentMatcher(db)
# Manually invoke match parts to trace
# (Copying logic from match_activity wrapper)
import json
seg_points = json.loads(segment.points) if isinstance(segment.points, str) else segment.points
print(f"DEBUG: Segment has {len(seg_points)} points.")
print(f"DEBUG: Segment has {len(seg_points)} points.")
efforts = matcher.match_activity(activity, points)
if efforts:
print(f"SUCCESS: match_activity returned {len(efforts)} efforts.")
for e in efforts:
print(f" - Segment {e.segment_id} (Duration: {e.elapsed_time}s)")
else:
print("FAILURE: match_activity returned NO efforts.")
# --- Deep Trace ---
print("\n--- DEEP TRACE ---")
ENTRY_RADIUS = 25.0
CORRIDOR_RADIUS = 35.0
from src.utils.geo import haversine_distance, perpendicular_distance
start_node = seg_points[0]
end_node = seg_points[-1]
# Check Start Proximity
start_candidates = []
min_start_dist = float('inf')
for i, p in enumerate(points):
dist = haversine_distance(p[1], p[0], start_node[1], start_node[0])
if dist < min_start_dist: min_start_dist = dist
if dist <= ENTRY_RADIUS:
start_candidates.append(i)
print(f"Min distance to Start Node: {min_start_dist:.2f}m")
print(f"Start Candidates: {start_candidates}")
if not start_candidates:
print("FAIL: Start node never reached within 25m.")
return
# Trace Candidate 0 (or all)
for start_idx in start_candidates:
print(f"\nChecking candidate starting at index {start_idx}...")
effort_accum_dist = 0.0
deviated = False
completed = False
max_deviation = 0.0
for j in range(start_idx + 1, len(points)):
p = points[j]
prev_p = points[j-1]
# Accumulate distance
step_dist = haversine_distance(p[1], p[0], prev_p[1], prev_p[0])
effort_accum_dist += step_dist
# Check deviation
dev = matcher._min_dist_to_segment_path(p, seg_points)
if dev > max_deviation: max_deviation = dev
if dev > CORRIDOR_RADIUS:
print(f" DEVIATION at index {j}! Dist {dev:.2f}m > {CORRIDOR_RADIUS}m. AccumDist: {effort_accum_dist:.2f}m")
deviated = True
break
# Check completion
d_end = haversine_distance(p[1], p[0], end_node[1], end_node[0])
if d_end <= ENTRY_RADIUS:
if effort_accum_dist >= 0.8 * segment.distance:
print(f" COMPLETION possible at index {j}. d_end={d_end:.2f}m, dist={effort_accum_dist:.2f}m")
completed = True
# Don't break immediately, could get closer?
# But logic breaks on completion check?
# Logic: returns first valid end.
if not completed:
print(f" Candidate ended without completion. Max Deviation: {max_deviation:.2f}m. Total Dist: {effort_accum_dist:.2f}m vs Target {segment.distance:.2f}m")
if __name__ == "__main__":
debug_matching()

View File

@@ -0,0 +1,168 @@
import sys
import os
import statistics
import logging
sys.path.append('/app/backend')
from src.services.postgresql_manager import PostgreSQLManager
from src.utils.config import config
from src.models.activity import Activity
from src.models.bike_setup import BikeSetup
from src.services.parsers import extract_activity_data
from src.services.bike_matching import WHEEL_CIRCUMFERENCE_M
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Re-implement logic to capture samples
def analyze_streams_debug(speed_stream, cadence_stream, window_size=10):
if not speed_stream or not cadence_stream or len(speed_stream) != len(cadence_stream):
print(" - Streams missing or mismatched length")
return [], 0.0
ratios = []
samples = []
rejected_reasons = {'none': 0, 'threshold': 0, 'variance': 0}
n = len(speed_stream)
for i in range(0, n - window_size, 5):
window_speeds = speed_stream[i:i+window_size]
window_cadences = cadence_stream[i:i+window_size]
if any(v is None for v in window_speeds) or any(c is None for c in window_cadences):
rejected_reasons['none'] += 1
continue
if all(c > 55 for c in window_cadences) and all(v > 2.5 for v in window_speeds):
try:
cad_std = statistics.stdev(window_cadences)
spd_std = statistics.stdev(window_speeds)
if cad_std < 5 and spd_std < 0.5:
avg_speed = statistics.mean(window_speeds)
avg_cadence = statistics.mean(window_cadences)
ratio = (avg_speed * 60) / (avg_cadence * WHEEL_CIRCUMFERENCE_M)
ratios.append(ratio)
if len(samples) < 10:
samples.append({
'time_idx': i,
'avg_spd': avg_speed,
'avg_cad': avg_cadence,
'ratio': ratio,
'cad_std': cad_std
})
else:
rejected_reasons['variance'] += 1
except statistics.StatisticsError:
pass
else:
rejected_reasons['threshold'] += 1
if not ratios:
print(f" - No steady segments. Rejections: {rejected_reasons}")
return [], 0.0
return samples, statistics.median(ratios)
def main():
target_ids = ['21072264737', '18469350198', '18349164690']
db = PostgreSQLManager(config.DATABASE_URL).SessionLocal()
# Load all setups
setups = db.query(BikeSetup).all()
print(f"Loaded {len(setups)} bike setups.")
for s in setups:
if s.rear_cog == 0:
print(f" - {s.name or s.frame}: No gears configured (skipped)")
continue
mech_ratio = s.chainring / s.rear_cog
print(f" - {s.name or s.frame}: {s.chainring}/{s.rear_cog} = {mech_ratio:.3f} (Active: {s.purchase_date} to {s.retirement_date})")
print("\n" + "="*80)
# Add a control activity to verify script works
print("\n" + "="*80)
print("CONTROL CHECK: Finding a random activity WITH cadence to verify script logic...")
control_activity = db.query(Activity).filter(Activity.avg_cadence > 0, Activity.file_content != None).first()
if control_activity:
target_ids.append(control_activity.garmin_activity_id)
else:
print("No control activity found!")
for gid in target_ids:
print(f"\nAnalyzing Activity Garmin ID: {gid}")
activity = db.query(Activity).filter(Activity.garmin_activity_id == str(gid)).first()
if not activity:
print(" - Not found in DB")
continue
print(f" - Type: {activity.activity_type}")
print(f" - Date: {activity.start_time}")
print(f" - Global Avg Speed: {activity.avg_speed:.2f} m/s" if activity.avg_speed else " - Global Avg Speed: None")
print(f" - Global Avg Cadence: {activity.avg_cadence:.1f} rpm" if activity.avg_cadence else " - Global Avg Cadence: None")
if not activity.file_content:
print(" - No file content available")
continue
data = extract_activity_data(activity.file_content, activity.file_type)
speeds = data.get('speed') or []
cadences = data.get('cadence') or []
# Check if actual data exists
valid_speeds = [x for x in speeds if x is not None]
valid_cadences = [x for x in cadences if x is not None]
if len(valid_cadences) < 10:
print(" - CRITICAL: No cadence data stream found in file.")
print(" - Result: IMPOSSIBLE TO MATCH GEAR RATIO.")
continue
samples, observed_ratio = analyze_streams_debug(speeds, cadences)
print(f" - Steady Segments Found: {len(samples) if samples else 0}")
print(f" - Observed Ratio (Median): {observed_ratio:.3f}")
if samples:
print(" - First 10 Steady Samples:")
for s in samples:
print(f" - T={s['time_idx']}s | Spd={s['avg_spd']:.1f} | Cad={s['avg_cad']:.1f} | R={s['ratio']:.3f} (std_cad={s['cad_std']:.1f})")
# ... logic continues ...
print("\n - Matching Against Setups:")
if observed_ratio > 0:
for bike in setups:
if bike.rear_cog == 0:
continue
# Date Check
active = True
if bike.purchase_date and activity.start_time.date() < bike.purchase_date:
active = False
if bike.retirement_date and activity.start_time.date() > bike.retirement_date:
active = False
status_str = "ACTIVE" if active else "INACTIVE"
mech_ratio = bike.chainring / bike.rear_cog
diff = abs(observed_ratio - mech_ratio)
error_pct = diff / mech_ratio
confidence = max(0.0, 1.0 - error_pct)
marker = "<<< BEST MATCH" if confidence > 0.9 else ""
if not active: marker = "(Date Mismatch)"
print(f" - {bike.name or bike.frame} ({bike.chainring}/{bike.rear_cog}): Mech={mech_ratio:.3f} | Diff={diff:.3f} | Conf={confidence:.3f} [{status_str}] {marker}")
else:
print(" - Could not calculate valid observed ratio from streams.")
db.close()
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,28 @@
import requests
import json
URL = "http://localhost:8000/api/segments/save_custom"
def test_save_custom():
payload = {
"name": "Test Segment Discovered",
"description": "Created via API test",
"activity_type": "cycling",
"points": [
[151.2093, -33.8688, 10],
[151.2100, -33.8690, 15],
[151.2110, -33.8700, 20]
]
}
try:
res = requests.post(URL, json=payload)
print(f"Status: {res.status_code}")
print(f"Response: {res.text}")
except Exception as e:
print(f"Error: {e}")
if __name__ == "__main__":
test_save_custom()

View File

@@ -0,0 +1,146 @@
import sys
import os
import math
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import logging
sys.path.append('/app/backend')
from src.services.discovery import SegmentDiscoveryService
from src.models.activity import Activity
from src.utils.geo import calculate_bearing, haversine_distance
from src.services.parsers import extract_activity_data
logging.basicConfig(level=logging.INFO)
def trace_activity(activity_id):
DATABASE_URL = os.getenv("DATABASE_URL", "postgresql://postgres:password@localhost:5433/fitbit_garmin_sync")
engine = create_engine(DATABASE_URL)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
db = SessionLocal()
# 1. Fetch Activity
act = db.query(Activity).filter(Activity.garmin_activity_id == str(activity_id)).first()
if not act:
print(f"Activity {activity_id} NOT FOUND.")
return
print(f"Tracing Activity {act.id} ({act.garmin_activity_id})")
# 2. Extract Data
# Parser returns a dict: {'points': [], 'timestamps': [], ...}
result = extract_activity_data(act.file_content, act.file_type)
points = result['points']
timestamps = result['timestamps']
# Filter points (same logic as service)
clean_points = []
clean_ts = []
for i in range(len(points)):
if points[i][0] is not None and points[i][1] is not None:
clean_points.append(points[i])
clean_ts.append(timestamps[i])
print(f"Clean points: {len(clean_points)}")
# 3. Simulate Analysis Loop
current_segment = [clean_points[0]]
last_bearing = None
segments_found = 0
skipped_dist = 0
max_dist = 0
max_time_diff = 0
max_bearing_diff = 0
for i in range(1, len(clean_points)):
p1 = clean_points[i-1]
p2 = clean_points[i]
# dist
dist = haversine_distance(p1[1], p1[0], p2[1], p2[0])
max_dist = max(max_dist, dist)
if dist < 2.0:
skipped_dist += 1
continue
bearing = calculate_bearing(p1[1], p1[0], p2[1], p2[0])
is_turn = False
diff = 0
if last_bearing is not None:
diff = abs(bearing - last_bearing)
if diff > 180: diff = 360 - diff
max_bearing_diff = max(max_bearing_diff, diff)
if diff > 60:
is_turn = True
print(f"TURN DETECTED at index {i}: Bearing {last_bearing:.1f} -> {bearing:.1f} (Diff: {diff:.1f})")
elif diff > 10: # Log even smaller turns to see noise
print(f" Minor Turn at index {i}: Bearing {last_bearing:.1f} -> {bearing:.1f} (Diff: {diff:.1f})")
last_bearing = bearing
t1 = clean_ts[i-1]
t2 = clean_ts[i]
time_diff = (t2 - t1).total_seconds()
max_time_diff = max(max_time_diff, time_diff)
is_pause = time_diff > 10
if is_pause:
print(f"PAUSE DETECTED at index {i}: {time_diff}s")
if is_pause or is_turn:
if len(current_segment) > 10:
segments_found += 1
print(f" -> Segment Created (Points: {len(current_segment)})")
current_segment = [p2]
else:
current_segment.append(p2)
print(f"\nStats:")
print(f" Total Points: {len(clean_points)}")
print(f" Skipped (dist < 5m): {skipped_dist}")
print(f" Max Point-to-Point Dist: {max_dist:.2f}m")
print(f" Max Time Diff: {max_time_diff}s")
print(f" Max Bearing Diff: {max_bearing_diff:.1f}")
print(f" Segments Found (from splits): {segments_found}")
from src.utils.geo import ramer_douglas_peucker
for eps in [2.0, 5.0, 10.0, 15.0]:
print(f"\n--- Riper RDP Trace (epsilon={eps}m) ---")
simplified = ramer_douglas_peucker(clean_points, eps)
print(f"Simplified points: {len(simplified)} (from {len(clean_points)})")
last_bearing = None
turns_found = 0
for i in range(1, len(simplified)):
p1 = simplified[i-1]
p2 = simplified[i]
bearing = calculate_bearing(p1[1], p1[0], p2[1], p2[0])
if last_bearing is not None:
diff = abs(bearing - last_bearing)
if diff > 180: diff = 360 - diff
if diff > 60:
print(f"TURN: {last_bearing:.1f} -> {bearing:.1f} (Diff: {diff:.1f})")
turns_found += 1
elif diff > 45:
print(f" Minor Turn (>45): {last_bearing:.1f} -> {bearing:.1f} (Diff: {diff:.1f})")
last_bearing = bearing
print(f"Total Turns > 60: {turns_found}")
if __name__ == "__main__":
trace_activity(21465710074)

View File

@@ -0,0 +1,19 @@
import requests
import json
# Activity ID 31 matches Garmin ID 21072264737
ACT_ID = 21072264737
URL = f"http://localhost:8000/api/segments/scan/{ACT_ID}"
def trigger():
print(f"Triggering scan for Activity {ACT_ID}...")
try:
res = requests.post(URL)
print(f"Status: {res.status_code}")
print(f"Response: {res.text}")
except Exception as e:
print(f"Error: {e}")
if __name__ == "__main__":
trigger()

View File

@@ -0,0 +1,58 @@
import logging
import sys
import os
# Ensure we can import from src
# In container, app root handles this differently.
# If running from /app, we need /app/backend to see 'src'
sys.path.append('/app/backend')
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from src.utils.config import config
from src.models.activity import Activity
from src.services.parsers import extract_points_from_file
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def backfill_location():
db_string = config.DATABASE_URL
db = create_engine(db_string)
Session = sessionmaker(bind=db)
session = Session()
try:
activities = session.query(Activity).filter(
Activity.file_content != None,
Activity.start_lat == None
).all()
logger.info(f"Scanning {len(activities)} activities for backfill...")
count = 0
for act in activities:
try:
points = extract_points_from_file(act.file_content, act.file_type)
if points and len(points) > 0:
first_point = points[0]
# points are [lon, lat] or [lon, lat, ele]
act.start_lng = first_point[0]
act.start_lat = first_point[1]
count += 1
if count % 100 == 0:
session.commit()
logger.info(f"Processed {count} activities...")
except Exception as e:
logger.error(f"Failed to parse activity {act.id}: {e}")
session.commit()
logger.info(f"Backfill complete. Updated {count} activities.")
finally:
session.close()
if __name__ == "__main__":
backfill_location()