mirror of
https://github.com/sstent/AICycling_mcp.git
synced 2026-01-25 08:35:03 +00:00
restrcuted repo again
This commit is contained in:
@@ -54,6 +54,16 @@ class TrainingLoad:
|
||||
fatigue: float # Acute Training Load
|
||||
form: float # Training Stress Balance
|
||||
|
||||
@dataclass
|
||||
class PerformanceTrend:
|
||||
"""Performance trend for a metric"""
|
||||
metric_name: str
|
||||
current_value: float
|
||||
trend_7day: float # % change over 7 days
|
||||
trend_30day: float # % change over 30 days
|
||||
trend_direction: str # "improving", "stable", "declining"
|
||||
confidence: float # 0-1 based on data points
|
||||
|
||||
class CyclingMetricsCalculator:
|
||||
"""Calculate deterministic cycling metrics"""
|
||||
|
||||
@@ -360,6 +370,301 @@ class CyclingMetricsCalculator:
|
||||
return "declining"
|
||||
else:
|
||||
return "stable"
|
||||
|
||||
def calculate_performance_trends(self, workout_history: List[Dict[str, Any]],
|
||||
days: int = 30) -> List[PerformanceTrend]:
|
||||
"""Calculate performance trends over specified period"""
|
||||
if not workout_history:
|
||||
return []
|
||||
|
||||
cutoff_date = datetime.now() - timedelta(days=days)
|
||||
recent_metrics = []
|
||||
|
||||
for record in workout_history:
|
||||
workout_date = datetime.fromisoformat(record['date'].replace('Z', '+00:00'))
|
||||
if workout_date >= cutoff_date:
|
||||
recent_metrics.append({
|
||||
'date': workout_date,
|
||||
'metrics': self.calculate_workout_metrics(record)
|
||||
})
|
||||
|
||||
if len(recent_metrics) < 2:
|
||||
return []
|
||||
|
||||
# Sort by date
|
||||
recent_metrics.sort(key=lambda x: x['date'])
|
||||
|
||||
trends = []
|
||||
|
||||
# Calculate trends for key metrics
|
||||
metrics_to_track = [
|
||||
('avg_speed_kmh', 'Average Speed'),
|
||||
('avg_hr', 'Average Heart Rate'),
|
||||
('avg_power', 'Average Power'),
|
||||
('estimated_ftp', 'Estimated FTP'),
|
||||
('training_stress_score', 'Training Stress Score')
|
||||
]
|
||||
|
||||
for metric_attr, metric_name in metrics_to_track:
|
||||
trend = self._calculate_single_metric_trend(recent_metrics, metric_attr, metric_name, days)
|
||||
if trend:
|
||||
trends.append(trend)
|
||||
|
||||
return trends
|
||||
|
||||
def _calculate_single_metric_trend(self, recent_metrics: List[Dict],
|
||||
metric_attr: str, metric_name: str,
|
||||
days: int) -> Optional[PerformanceTrend]:
|
||||
"""Calculate trend for a single metric"""
|
||||
# Extract values, filtering out None values
|
||||
values_with_dates = []
|
||||
for record in recent_metrics:
|
||||
value = getattr(record['metrics'], metric_attr)
|
||||
if value is not None:
|
||||
values_with_dates.append((record['date'], value))
|
||||
|
||||
if len(values_with_dates) < 2:
|
||||
return None
|
||||
|
||||
# Calculate current value (average of last 3 workouts)
|
||||
recent_values = [v for _, v in values_with_dates[-3:]]
|
||||
current_value = sum(recent_values) / len(recent_values)
|
||||
|
||||
# Calculate 7-day trend if we have enough data
|
||||
week_ago = datetime.now() - timedelta(days=7)
|
||||
week_values = [v for d, v in values_with_dates if d >= week_ago]
|
||||
|
||||
if len(week_values) >= 2:
|
||||
week_old_avg = sum(week_values[:len(week_values)//2]) / (len(week_values)//2)
|
||||
week_recent_avg = sum(week_values[len(week_values)//2:]) / (len(week_values) - len(week_values)//2)
|
||||
trend_7day = ((week_recent_avg - week_old_avg) / week_old_avg * 100) if week_old_avg > 0 else 0
|
||||
else:
|
||||
trend_7day = 0
|
||||
|
||||
# Calculate 30-day trend
|
||||
if len(values_with_dates) >= 4:
|
||||
old_avg = sum(v for _, v in values_with_dates[:len(values_with_dates)//2]) / (len(values_with_dates)//2)
|
||||
recent_avg = sum(v for _, v in values_with_dates[len(values_with_dates)//2:]) / (len(values_with_dates) - len(values_with_dates)//2)
|
||||
trend_30day = ((recent_avg - old_avg) / old_avg * 100) if old_avg > 0 else 0
|
||||
else:
|
||||
trend_30day = 0
|
||||
|
||||
# Determine trend direction
|
||||
primary_trend = trend_7day if abs(trend_7day) > abs(trend_30day) else trend_30day
|
||||
if primary_trend > 2:
|
||||
trend_direction = "improving"
|
||||
elif primary_trend < -2:
|
||||
trend_direction = "declining"
|
||||
else:
|
||||
trend_direction = "stable"
|
||||
|
||||
# Calculate confidence based on data points
|
||||
confidence = min(len(values_with_dates) / 10, 1.0) # Max confidence at 10+ data points
|
||||
|
||||
return PerformanceTrend(
|
||||
metric_name=metric_name,
|
||||
current_value=round(current_value, 2),
|
||||
trend_7day=round(trend_7day, 1),
|
||||
trend_30day=round(trend_30day, 1),
|
||||
trend_direction=trend_direction,
|
||||
confidence=round(confidence, 2)
|
||||
)
|
||||
|
||||
def get_ftp_estimates_history(self, performance_history: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||
"""Get historical FTP estimates for tracking progress"""
|
||||
ftp_history = []
|
||||
|
||||
for record in performance_history:
|
||||
metrics = self.calculate_workout_metrics(record)
|
||||
if metrics.estimated_ftp:
|
||||
ftp_history.append({
|
||||
"date": record['date'],
|
||||
"activity_id": record['activity_id'],
|
||||
"estimated_ftp": metrics.estimated_ftp,
|
||||
"workout_type": record['metrics'].get('workout_classification', 'unknown')
|
||||
})
|
||||
|
||||
# Sort by date and return recent estimates
|
||||
ftp_history.sort(key=lambda x: x['date'], reverse=True)
|
||||
return ftp_history[:20] # Last 20 estimates
|
||||
|
||||
def get_gear_usage_analysis(self, performance_history: List[Dict[str, Any]]) -> Dict[str, Any]:
|
||||
"""Get single speed gear usage analysis"""
|
||||
gear_data = []
|
||||
|
||||
for record in performance_history:
|
||||
metrics = self.calculate_workout_metrics(record)
|
||||
if metrics.estimated_gear_ratio:
|
||||
gear_data.append({
|
||||
"date": record['date'],
|
||||
"estimated_ratio": metrics.estimated_gear_ratio,
|
||||
"chainring": metrics.estimated_chainring,
|
||||
"cog": metrics.estimated_cog,
|
||||
"avg_speed": metrics.avg_speed_kmh,
|
||||
"elevation_gain": metrics.elevation_gain_m,
|
||||
"terrain_type": self._classify_terrain(metrics)
|
||||
})
|
||||
|
||||
if not gear_data:
|
||||
return {"message": "No gear data available"}
|
||||
|
||||
# Analyze gear preferences by terrain
|
||||
gear_preferences = {}
|
||||
for data in gear_data:
|
||||
terrain = data['terrain_type']
|
||||
gear = f"{data['chainring']}x{data['cog']}"
|
||||
|
||||
if terrain not in gear_preferences:
|
||||
gear_preferences[terrain] = {}
|
||||
if gear not in gear_preferences[terrain]:
|
||||
gear_preferences[terrain][gear] = 0
|
||||
gear_preferences[terrain][gear] += 1
|
||||
|
||||
# Calculate most common gears
|
||||
all_gears = {}
|
||||
for data in gear_data:
|
||||
gear = f"{data['chainring']}x{data['cog']}"
|
||||
all_gears[gear] = all_gears.get(gear, 0) + 1
|
||||
|
||||
most_common_gear = max(all_gears.items(), key=lambda x: x[1])
|
||||
|
||||
return {
|
||||
"total_workouts_analyzed": len(gear_data),
|
||||
"most_common_gear": {
|
||||
"gear": most_common_gear[0],
|
||||
"usage_count": most_common_gear[1],
|
||||
"usage_percentage": round(most_common_gear[1] / len(gear_data) * 100, 1)
|
||||
},
|
||||
"gear_by_terrain": gear_preferences,
|
||||
"gear_recommendations": self._recommend_gears(gear_data)
|
||||
}
|
||||
|
||||
def _classify_terrain(self, metrics: WorkoutMetrics) -> str:
|
||||
"""Classify terrain type from workout metrics"""
|
||||
if metrics.distance_km == 0:
|
||||
return "unknown"
|
||||
|
||||
elevation_per_km = metrics.elevation_gain_m / metrics.distance_km
|
||||
|
||||
if elevation_per_km > 15:
|
||||
return "steep_climbing"
|
||||
elif elevation_per_km > 8:
|
||||
return "moderate_climbing"
|
||||
elif elevation_per_km > 3:
|
||||
return "rolling_hills"
|
||||
else:
|
||||
return "flat_terrain"
|
||||
|
||||
def _recommend_gears(self, gear_data: List[Dict]) -> Dict[str, str]:
|
||||
"""Recommend optimal gears for different conditions"""
|
||||
if not gear_data:
|
||||
return {}
|
||||
|
||||
# Group by terrain and find most efficient gears
|
||||
terrain_efficiency = {}
|
||||
|
||||
for data in gear_data:
|
||||
terrain = data['terrain_type']
|
||||
gear = f"{data['chainring']}x{data['cog']}"
|
||||
speed = data['avg_speed']
|
||||
|
||||
if terrain not in terrain_efficiency:
|
||||
terrain_efficiency[terrain] = {}
|
||||
if gear not in terrain_efficiency[terrain]:
|
||||
terrain_efficiency[terrain][gear] = []
|
||||
|
||||
terrain_efficiency[terrain][gear].append(speed)
|
||||
|
||||
# Calculate average speeds for each gear/terrain combo
|
||||
recommendations = {}
|
||||
for terrain, gears in terrain_efficiency.items():
|
||||
best_gear = None
|
||||
best_avg_speed = 0
|
||||
|
||||
for gear, speeds in gears.items():
|
||||
avg_speed = sum(speeds) / len(speeds)
|
||||
if avg_speed > best_avg_speed:
|
||||
best_avg_speed = avg_speed
|
||||
best_gear = gear
|
||||
|
||||
if best_gear:
|
||||
recommendations[terrain] = best_gear
|
||||
|
||||
return recommendations
|
||||
|
||||
def load_metrics_history(self, metrics_file: str) -> List[Dict[str, Any]]:
|
||||
"""Load performance history from file"""
|
||||
performance_history = []
|
||||
try:
|
||||
with open(metrics_file, 'r') as f:
|
||||
data = json.load(f)
|
||||
performance_history = data.get('performance_history', [])
|
||||
logger.info(f"Loaded {len(performance_history)} workout records")
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading metrics history: {e}")
|
||||
performance_history = []
|
||||
return performance_history
|
||||
|
||||
def save_metrics_history(self, performance_history: List[Dict[str, Any]], metrics_file: str) -> None:
|
||||
"""Save performance history to file"""
|
||||
try:
|
||||
# Keep only last 200 workouts to prevent file from growing too large
|
||||
performance_history = performance_history[-200:]
|
||||
|
||||
data = {
|
||||
'performance_history': performance_history,
|
||||
'last_updated': datetime.now().isoformat()
|
||||
}
|
||||
|
||||
with open(metrics_file, 'w') as f:
|
||||
json.dump(data, f, indent=2, default=str)
|
||||
|
||||
logger.debug(f"Saved {len(performance_history)} workout records")
|
||||
except Exception as e:
|
||||
logger.error(f"Error saving metrics history: {e}")
|
||||
|
||||
def get_workout_summary_for_llm(self, activity_id: str, performance_history: List[Dict[str, Any]]) -> Dict[str, Any]:
|
||||
"""Get structured workout summary optimized for LLM analysis"""
|
||||
# Find the workout
|
||||
workout_record = next((record for record in performance_history if record['activity_id'] == activity_id), None)
|
||||
if not workout_record:
|
||||
return {"error": "No workout found for activity"}
|
||||
|
||||
metrics = self.calculate_workout_metrics(workout_record)
|
||||
training_load = self.calculate_training_load(performance_history)
|
||||
performance_trends = self.calculate_performance_trends(performance_history)
|
||||
|
||||
# Generate standardized assessment
|
||||
assessment = generate_standardized_assessment(metrics, training_load)
|
||||
|
||||
# Format data for LLM consumption
|
||||
summary = {
|
||||
"workout_classification": assessment["workout_classification"],
|
||||
"intensity_rating": f"{assessment['intensity_rating']}/10",
|
||||
"key_metrics": {
|
||||
"duration": f"{metrics.duration_minutes:.0f} minutes",
|
||||
"distance": f"{metrics.distance_km:.1f} km",
|
||||
"avg_speed": f"{metrics.avg_speed_kmh:.1f} km/h",
|
||||
"elevation_gain": f"{metrics.elevation_gain_m:.0f} m"
|
||||
},
|
||||
"performance_indicators": {
|
||||
"efficiency_score": assessment["efficiency_score"],
|
||||
"estimated_ftp": metrics.estimated_ftp,
|
||||
"intensity_factor": metrics.intensity_factor
|
||||
},
|
||||
"recovery_guidance": assessment["recovery_recommendation"],
|
||||
"training_load_context": {
|
||||
"fitness_level": training_load.fitness if training_load else None,
|
||||
"fatigue_level": training_load.fatigue if training_load else None,
|
||||
"form": training_load.form if training_load else None
|
||||
} if training_load else None,
|
||||
"single_speed_analysis": {
|
||||
"estimated_gear": f"{metrics.estimated_chainring}x{metrics.estimated_cog}" if metrics.estimated_gear_ratio else None,
|
||||
"gear_ratio": metrics.estimated_gear_ratio
|
||||
} if metrics.estimated_gear_ratio else None
|
||||
}
|
||||
|
||||
return summary
|
||||
|
||||
# Deterministic analysis helper
|
||||
def generate_standardized_assessment(metrics: WorkoutMetrics,
|
||||
|
||||
@@ -157,16 +157,16 @@ class CLI:
|
||||
async def _enhanced_analysis(self):
|
||||
"""Enhanced analysis menu"""
|
||||
print("\nSelect analysis type:")
|
||||
print("a) Performance trends")
|
||||
print("b) Training load analysis")
|
||||
print("c) Recovery assessment")
|
||||
print("a) FTP estimation")
|
||||
print("b) Gear analysis")
|
||||
print("c) Training load analysis")
|
||||
|
||||
choice = input("Enter choice (a-c): ").strip().lower()
|
||||
|
||||
analysis_types = {
|
||||
'a': 'performance trends',
|
||||
'b': 'training load',
|
||||
'c': 'recovery assessment'
|
||||
'a': 'ftp_estimation',
|
||||
'b': 'gear_analysis',
|
||||
'c': 'training_load'
|
||||
}
|
||||
|
||||
if choice not in analysis_types:
|
||||
@@ -174,7 +174,7 @@ class CLI:
|
||||
return
|
||||
|
||||
analysis_type = analysis_types[choice]
|
||||
print(f"\nPerforming {analysis_type} analysis...")
|
||||
print(f"\nPerforming {analysis_type.replace('_', ' ').title()} analysis...")
|
||||
|
||||
# Load training rules
|
||||
rules = self._load_training_rules()
|
||||
@@ -185,7 +185,7 @@ class CLI:
|
||||
)
|
||||
|
||||
print(f"\n{'='*50}")
|
||||
print(f"ENHANCED {analysis_type.upper()} ANALYSIS")
|
||||
print(f"ENHANCED {analysis_type.replace('_', ' ').upper()} ANALYSIS")
|
||||
print("="*50)
|
||||
console = Console()
|
||||
console.print(Markdown(result))
|
||||
|
||||
@@ -1,12 +1,17 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Cache Manager - Handles caching of MCP responses and other data
|
||||
Cache Manager - Unified caching with TTL support, metrics tracking, and persistent history
|
||||
"""
|
||||
|
||||
import time
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Any, Dict, Optional, List
|
||||
from dataclasses import dataclass
|
||||
from dataclasses import dataclass, asdict
|
||||
|
||||
from ..analysis.cycling_metrics import CyclingMetricsCalculator, WorkoutMetrics, TrainingLoad, PerformanceTrend, generate_standardized_assessment
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -18,12 +23,17 @@ class CacheEntry:
|
||||
ttl: int # Time to live in seconds
|
||||
|
||||
class CacheManager:
|
||||
"""Manages caching of data with TTL support"""
|
||||
"""Unified cache manager for data with TTL, metrics tracking, and persistent history"""
|
||||
|
||||
def __init__(self, default_ttl: int = 300):
|
||||
def __init__(self, default_ttl: int = 300, metrics_file: str = "metrics_history.json"):
|
||||
self.default_ttl = default_ttl
|
||||
self._cache: Dict[str, CacheEntry] = {}
|
||||
self.metrics_calculator = None
|
||||
self.metrics_file = Path(metrics_file)
|
||||
self.performance_history = []
|
||||
self.load_metrics_history()
|
||||
|
||||
# Base caching methods
|
||||
def set(self, key: str, data: Any, ttl: Optional[int] = None) -> None:
|
||||
"""Set cache entry with TTL"""
|
||||
ttl = ttl or self.default_ttl
|
||||
@@ -109,11 +119,8 @@ class CacheManager:
|
||||
def get_multiple(self, keys: List[str]) -> Dict[str, Any]:
|
||||
"""Get multiple cache entries at once"""
|
||||
return {key: self.get(key) for key in keys}
|
||||
|
||||
# Specialized cache for common cycling data patterns
|
||||
class CyclingDataCache(CacheManager):
|
||||
"""Specialized cache for cycling data with helper methods"""
|
||||
|
||||
# Cycling-specific helper methods (from CyclingDataCache)
|
||||
def cache_user_profile(self, profile_data: Dict[str, Any]) -> None:
|
||||
"""Cache user profile data"""
|
||||
self.set("user_profile", profile_data, ttl=3600) # 1 hour TTL
|
||||
@@ -144,4 +151,474 @@ class CyclingDataCache(CacheManager):
|
||||
|
||||
def get_workout_analysis(self, workout_id: str) -> Optional[str]:
|
||||
"""Get cached workout analysis"""
|
||||
return self.get(f"analysis_{workout_id}")
|
||||
return self.get(f"analysis_{workout_id}")
|
||||
|
||||
# Enhanced metrics tracking methods
|
||||
def set_user_profile_metrics(self, ftp: Optional[float] = None, max_hr: Optional[int] = None):
|
||||
"""Set user profile for accurate calculations (renamed for clarity)"""
|
||||
self.metrics_calculator = CyclingMetricsCalculator(user_ftp=ftp, user_max_hr=max_hr)
|
||||
logger.info(f"Metrics calculator configured: FTP={ftp}, Max HR={max_hr}")
|
||||
|
||||
def cache_workout_with_metrics(self, activity_id: str, activity_data: Dict[str, Any]) -> WorkoutMetrics:
|
||||
"""Cache workout data and calculate comprehensive metrics with validation"""
|
||||
if not self.metrics_calculator:
|
||||
# Initialize with defaults if not set
|
||||
self.metrics_calculator = CyclingMetricsCalculator()
|
||||
|
||||
# Validate and normalize input data
|
||||
validated_data = self._validate_activity_data(activity_data)
|
||||
|
||||
# Calculate metrics with safe handling
|
||||
metrics = self.metrics_calculator.calculate_workout_metrics(validated_data)
|
||||
|
||||
# Cache the raw data and calculated metrics
|
||||
self.set(f"activity_raw_{activity_id}", activity_data, ttl=3600)
|
||||
self.set(f"activity_metrics_{activity_id}", asdict(metrics), ttl=3600)
|
||||
|
||||
# Add to performance history
|
||||
workout_record = {
|
||||
"activity_id": activity_id,
|
||||
"date": validated_data.get('startTimeGmt', datetime.now().isoformat()),
|
||||
"metrics": asdict(metrics),
|
||||
"data_quality": validated_data.get('data_quality', 'complete')
|
||||
}
|
||||
|
||||
self.performance_history.append(workout_record)
|
||||
self.save_metrics_history()
|
||||
|
||||
# Update performance trends
|
||||
self._update_performance_trends()
|
||||
|
||||
logger.info(f"Cached workout {activity_id} with calculated metrics (quality: {workout_record['data_quality']})")
|
||||
return metrics
|
||||
|
||||
def _validate_activity_data(self, activity_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Validate and normalize activity data for safe metric calculation"""
|
||||
if not isinstance(activity_data, dict):
|
||||
logger.warning("Invalid activity data - creating minimal structure")
|
||||
return {"data_quality": "invalid", "summaryDTO": {}}
|
||||
|
||||
summary_dto = activity_data.get('summaryDTO', {})
|
||||
if not isinstance(summary_dto, dict):
|
||||
summary_dto = {}
|
||||
|
||||
data_quality = "complete"
|
||||
warnings = []
|
||||
|
||||
# Check critical fields
|
||||
critical_fields = ['duration', 'distance']
|
||||
for field in critical_fields:
|
||||
if summary_dto.get(field) is None:
|
||||
data_quality = "incomplete"
|
||||
warnings.append(f"Missing {field}")
|
||||
# Set reasonable defaults
|
||||
if field == 'duration':
|
||||
summary_dto['duration'] = 0
|
||||
elif field == 'distance':
|
||||
summary_dto['distance'] = 0
|
||||
|
||||
# Indoor activity adjustments
|
||||
is_indoor = activity_data.get('is_indoor', False)
|
||||
if is_indoor:
|
||||
# For indoor, speed may be None - estimate from power if available
|
||||
if summary_dto.get('averageSpeed') is None and summary_dto.get('averagePower') is not None:
|
||||
# Rough estimate: speed = power / (weight * constant), but without weight, use placeholder
|
||||
summary_dto['averageSpeed'] = None # Keep None, let calculator handle
|
||||
warnings.append("Indoor activity - speed estimated from power")
|
||||
|
||||
# Elevation not applicable for indoor
|
||||
if 'elevationGain' in summary_dto:
|
||||
summary_dto['elevationGain'] = 0
|
||||
summary_dto['elevationLoss'] = 0
|
||||
warnings.append("Indoor activity - elevation set to 0")
|
||||
|
||||
# Ensure all expected fields exist (from custom_garth_mcp normalization)
|
||||
expected_fields = [
|
||||
'averageSpeed', 'maxSpeed', 'averageHR', 'maxHR', 'averagePower',
|
||||
'maxPower', 'normalizedPower', 'trainingStressScore', 'elevationGain',
|
||||
'elevationLoss', 'distance', 'duration'
|
||||
]
|
||||
for field in expected_fields:
|
||||
if field not in summary_dto:
|
||||
summary_dto[field] = None
|
||||
if data_quality == "complete":
|
||||
data_quality = "incomplete"
|
||||
warnings.append(f"Missing {field}")
|
||||
|
||||
activity_data['summaryDTO'] = summary_dto
|
||||
activity_data['data_quality'] = data_quality
|
||||
activity_data['validation_warnings'] = warnings
|
||||
|
||||
if warnings:
|
||||
logger.debug(f"Activity validation warnings: {', '.join(warnings)}")
|
||||
|
||||
return activity_data
|
||||
|
||||
def get_workout_metrics(self, activity_id: str) -> Optional[WorkoutMetrics]:
|
||||
"""Get calculated metrics for a workout"""
|
||||
metrics_data = self.get(f"activity_metrics_{activity_id}")
|
||||
if metrics_data:
|
||||
return WorkoutMetrics(**metrics_data)
|
||||
return None
|
||||
|
||||
def get_training_load(self, days: int = 42) -> Optional[TrainingLoad]:
|
||||
"""Calculate current training load metrics"""
|
||||
if not self.metrics_calculator:
|
||||
return None
|
||||
|
||||
# Get recent workout history
|
||||
cutoff_date = datetime.now() - timedelta(days=days)
|
||||
recent_workouts = []
|
||||
|
||||
for record in self.performance_history:
|
||||
workout_date = datetime.fromisoformat(record['date'].replace('Z', '+00:00'))
|
||||
if workout_date >= cutoff_date:
|
||||
# Reconstruct activity data for training load calculation
|
||||
activity_data = self.get(f"activity_raw_{record['activity_id']}")
|
||||
if activity_data:
|
||||
recent_workouts.append(activity_data)
|
||||
|
||||
if not recent_workouts:
|
||||
return None
|
||||
|
||||
training_load = self.metrics_calculator.calculate_training_load(recent_workouts)
|
||||
|
||||
# Cache training load
|
||||
self.set("current_training_load", asdict(training_load), ttl=3600)
|
||||
|
||||
return training_load
|
||||
|
||||
def get_performance_trends(self, days: int = 30) -> List[PerformanceTrend]:
|
||||
"""Get performance trends for key metrics"""
|
||||
trends = self.get(f"performance_trends_{days}d")
|
||||
if trends:
|
||||
return [PerformanceTrend(**trend) for trend in trends]
|
||||
|
||||
# Calculate if not cached
|
||||
return self._calculate_performance_trends(days)
|
||||
|
||||
def _calculate_performance_trends(self, days: int) -> List[PerformanceTrend]:
|
||||
"""Calculate performance trends over specified period"""
|
||||
if not self.performance_history:
|
||||
return []
|
||||
|
||||
cutoff_date = datetime.now() - timedelta(days=days)
|
||||
recent_metrics = []
|
||||
|
||||
for record in self.performance_history:
|
||||
workout_date = datetime.fromisoformat(record['date'].replace('Z', '+00:00'))
|
||||
if workout_date >= cutoff_date:
|
||||
recent_metrics.append({
|
||||
'date': workout_date,
|
||||
'metrics': WorkoutMetrics(**record['metrics'])
|
||||
})
|
||||
|
||||
if len(recent_metrics) < 2:
|
||||
return []
|
||||
|
||||
# Sort by date
|
||||
recent_metrics.sort(key=lambda x: x['date'])
|
||||
|
||||
trends = []
|
||||
|
||||
# Calculate trends for key metrics
|
||||
metrics_to_track = [
|
||||
('avg_speed_kmh', 'Average Speed'),
|
||||
('avg_hr', 'Average Heart Rate'),
|
||||
('avg_power', 'Average Power'),
|
||||
('estimated_ftp', 'Estimated FTP'),
|
||||
('training_stress_score', 'Training Stress Score')
|
||||
]
|
||||
|
||||
for metric_attr, metric_name in metrics_to_track:
|
||||
trend = self._calculate_single_metric_trend(recent_metrics, metric_attr, metric_name, days)
|
||||
if trend:
|
||||
trends.append(trend)
|
||||
|
||||
# Cache trends
|
||||
self.set(f"performance_trends_{days}d", [asdict(trend) for trend in trends], ttl=1800)
|
||||
|
||||
return trends
|
||||
|
||||
def _calculate_single_metric_trend(self, recent_metrics: List[Dict],
|
||||
metric_attr: str, metric_name: str,
|
||||
days: int) -> Optional[PerformanceTrend]:
|
||||
"""Calculate trend for a single metric"""
|
||||
# Extract values, filtering out None values
|
||||
values_with_dates = []
|
||||
for record in recent_metrics:
|
||||
value = getattr(record['metrics'], metric_attr)
|
||||
if value is not None:
|
||||
values_with_dates.append((record['date'], value))
|
||||
|
||||
if len(values_with_dates) < 2:
|
||||
return None
|
||||
|
||||
# Calculate current value (average of last 3 workouts)
|
||||
recent_values = [v for _, v in values_with_dates[-3:]]
|
||||
current_value = sum(recent_values) / len(recent_values)
|
||||
|
||||
# Calculate 7-day trend if we have enough data
|
||||
week_ago = datetime.now() - timedelta(days=7)
|
||||
week_values = [v for d, v in values_with_dates if d >= week_ago]
|
||||
|
||||
if len(week_values) >= 2:
|
||||
week_old_avg = sum(week_values[:len(week_values)//2]) / (len(week_values)//2)
|
||||
week_recent_avg = sum(week_values[len(week_values)//2:]) / (len(week_values) - len(week_values)//2)
|
||||
trend_7day = ((week_recent_avg - week_old_avg) / week_old_avg * 100) if week_old_avg > 0 else 0
|
||||
else:
|
||||
trend_7day = 0
|
||||
|
||||
# Calculate 30-day trend
|
||||
if len(values_with_dates) >= 4:
|
||||
old_avg = sum(v for _, v in values_with_dates[:len(values_with_dates)//2]) / (len(values_with_dates)//2)
|
||||
recent_avg = sum(v for _, v in values_with_dates[len(values_with_dates)//2:]) / (len(values_with_dates) - len(values_with_dates)//2)
|
||||
trend_30day = ((recent_avg - old_avg) / old_avg * 100) if old_avg > 0 else 0
|
||||
else:
|
||||
trend_30day = 0
|
||||
|
||||
# Determine trend direction
|
||||
primary_trend = trend_7day if abs(trend_7day) > abs(trend_30day) else trend_30day
|
||||
if primary_trend > 2:
|
||||
trend_direction = "improving"
|
||||
elif primary_trend < -2:
|
||||
trend_direction = "declining"
|
||||
else:
|
||||
trend_direction = "stable"
|
||||
|
||||
# Calculate confidence based on data points
|
||||
confidence = min(len(values_with_dates) / 10, 1.0) # Max confidence at 10+ data points
|
||||
|
||||
return PerformanceTrend(
|
||||
metric_name=metric_name,
|
||||
current_value=round(current_value, 2),
|
||||
trend_7day=round(trend_7day, 1),
|
||||
trend_30day=round(trend_30day, 1),
|
||||
trend_direction=trend_direction,
|
||||
confidence=round(confidence, 2)
|
||||
)
|
||||
|
||||
def _update_performance_trends(self):
|
||||
"""Update cached performance trends after new workout"""
|
||||
# Clear cached trends to force recalculation
|
||||
keys_to_clear = [key for key in self._cache.keys() if key.startswith("performance_trends_")]
|
||||
for key in keys_to_clear:
|
||||
self.delete(key)
|
||||
|
||||
def get_deterministic_analysis_data(self, activity_id: str) -> Dict[str, Any]:
|
||||
"""Get all deterministic data for analysis with validation"""
|
||||
metrics = self.get_workout_metrics(activity_id)
|
||||
training_load = self.get_training_load()
|
||||
performance_trends = self.get_performance_trends()
|
||||
|
||||
if not metrics:
|
||||
return {"error": "No metrics available for activity"}
|
||||
|
||||
# Generate standardized assessment with safe handling
|
||||
try:
|
||||
assessment = generate_standardized_assessment(metrics, training_load)
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not generate standardized assessment: {e}")
|
||||
assessment = {"error": "Assessment calculation failed", "workout_classification": "unknown"}
|
||||
|
||||
return {
|
||||
"workout_metrics": asdict(metrics),
|
||||
"training_load": asdict(training_load) if training_load else None,
|
||||
"performance_trends": [asdict(trend) for trend in performance_trends if trend],
|
||||
"standardized_assessment": assessment,
|
||||
"analysis_timestamp": datetime.now().isoformat()
|
||||
}
|
||||
|
||||
def get_ftp_estimates_history(self) -> List[Dict[str, Any]]:
|
||||
"""Get historical FTP estimates for tracking progress"""
|
||||
ftp_history = []
|
||||
|
||||
for record in self.performance_history:
|
||||
metrics = WorkoutMetrics(**record['metrics'])
|
||||
if metrics.estimated_ftp:
|
||||
ftp_history.append({
|
||||
"date": record['date'],
|
||||
"activity_id": record['activity_id'],
|
||||
"estimated_ftp": metrics.estimated_ftp,
|
||||
"workout_type": record['metrics'].get('workout_classification', 'unknown')
|
||||
})
|
||||
|
||||
# Sort by date and return recent estimates
|
||||
ftp_history.sort(key=lambda x: x['date'], reverse=True)
|
||||
return ftp_history[:20] # Last 20 estimates
|
||||
|
||||
def get_gear_usage_analysis(self) -> Dict[str, Any]:
|
||||
"""Get single speed gear usage analysis"""
|
||||
gear_data = []
|
||||
|
||||
for record in self.performance_history:
|
||||
metrics = WorkoutMetrics(**record['metrics'])
|
||||
if metrics.estimated_gear_ratio:
|
||||
gear_data.append({
|
||||
"date": record['date'],
|
||||
"estimated_ratio": metrics.estimated_gear_ratio,
|
||||
"chainring": metrics.estimated_chainring,
|
||||
"cog": metrics.estimated_cog,
|
||||
"avg_speed": metrics.avg_speed_kmh,
|
||||
"elevation_gain": metrics.elevation_gain_m,
|
||||
"terrain_type": self._classify_terrain(metrics)
|
||||
})
|
||||
|
||||
if not gear_data:
|
||||
return {"message": "No gear data available"}
|
||||
|
||||
# Analyze gear preferences by terrain
|
||||
gear_preferences = {}
|
||||
for data in gear_data:
|
||||
terrain = data['terrain_type']
|
||||
gear = f"{data['chainring']}x{data['cog']}"
|
||||
|
||||
if terrain not in gear_preferences:
|
||||
gear_preferences[terrain] = {}
|
||||
if gear not in gear_preferences[terrain]:
|
||||
gear_preferences[terrain][gear] = 0
|
||||
gear_preferences[terrain][gear] += 1
|
||||
|
||||
# Calculate most common gears
|
||||
all_gears = {}
|
||||
for data in gear_data:
|
||||
gear = f"{data['chainring']}x{data['cog']}"
|
||||
all_gears[gear] = all_gears.get(gear, 0) + 1
|
||||
|
||||
most_common_gear = max(all_gears.items(), key=lambda x: x[1])
|
||||
|
||||
return {
|
||||
"total_workouts_analyzed": len(gear_data),
|
||||
"most_common_gear": {
|
||||
"gear": most_common_gear[0],
|
||||
"usage_count": most_common_gear[1],
|
||||
"usage_percentage": round(most_common_gear[1] / len(gear_data) * 100, 1)
|
||||
},
|
||||
"gear_by_terrain": gear_preferences,
|
||||
"gear_recommendations": self._recommend_gears(gear_data)
|
||||
}
|
||||
|
||||
def _classify_terrain(self, metrics: WorkoutMetrics) -> str:
|
||||
"""Classify terrain type from workout metrics"""
|
||||
if metrics.distance_km == 0:
|
||||
return "unknown"
|
||||
|
||||
elevation_per_km = metrics.elevation_gain_m / metrics.distance_km
|
||||
|
||||
if elevation_per_km > 15:
|
||||
return "steep_climbing"
|
||||
elif elevation_per_km > 8:
|
||||
return "moderate_climbing"
|
||||
elif elevation_per_km > 3:
|
||||
return "rolling_hills"
|
||||
else:
|
||||
return "flat_terrain"
|
||||
|
||||
def _recommend_gears(self, gear_data: List[Dict]) -> Dict[str, str]:
|
||||
"""Recommend optimal gears for different conditions"""
|
||||
if not gear_data:
|
||||
return {}
|
||||
|
||||
# Group by terrain and find most efficient gears
|
||||
terrain_efficiency = {}
|
||||
|
||||
for data in gear_data:
|
||||
terrain = data['terrain_type']
|
||||
gear = f"{data['chainring']}x{data['cog']}"
|
||||
speed = data['avg_speed']
|
||||
|
||||
if terrain not in terrain_efficiency:
|
||||
terrain_efficiency[terrain] = {}
|
||||
if gear not in terrain_efficiency[terrain]:
|
||||
terrain_efficiency[terrain][gear] = []
|
||||
|
||||
terrain_efficiency[terrain][gear].append(speed)
|
||||
|
||||
# Calculate average speeds for each gear/terrain combo
|
||||
recommendations = {}
|
||||
for terrain, gears in terrain_efficiency.items():
|
||||
best_gear = None
|
||||
best_avg_speed = 0
|
||||
|
||||
for gear, speeds in gears.items():
|
||||
avg_speed = sum(speeds) / len(speeds)
|
||||
if avg_speed > best_avg_speed:
|
||||
best_avg_speed = avg_speed
|
||||
best_gear = gear
|
||||
|
||||
if best_gear:
|
||||
recommendations[terrain] = best_gear
|
||||
|
||||
return recommendations
|
||||
|
||||
def load_metrics_history(self):
|
||||
"""Load performance history from file"""
|
||||
if self.metrics_file.exists():
|
||||
try:
|
||||
with open(self.metrics_file, 'r') as f:
|
||||
data = json.load(f)
|
||||
self.performance_history = data.get('performance_history', [])
|
||||
logger.info(f"Loaded {len(self.performance_history)} workout records")
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading metrics history: {e}")
|
||||
self.performance_history = []
|
||||
else:
|
||||
self.performance_history = []
|
||||
|
||||
def save_metrics_history(self):
|
||||
"""Save performance history to file"""
|
||||
try:
|
||||
# Keep only last 200 workouts to prevent file from growing too large
|
||||
self.performance_history = self.performance_history[-200:]
|
||||
|
||||
data = {
|
||||
'performance_history': self.performance_history,
|
||||
'last_updated': datetime.now().isoformat()
|
||||
}
|
||||
|
||||
with open(self.metrics_file, 'w') as f:
|
||||
json.dump(data, f, indent=2, default=str)
|
||||
|
||||
logger.debug(f"Saved {len(self.performance_history)} workout records")
|
||||
except Exception as e:
|
||||
logger.error(f"Error saving metrics history: {e}")
|
||||
|
||||
def get_workout_summary_for_llm(self, activity_id: str) -> Dict[str, Any]:
|
||||
"""Get structured workout summary optimized for LLM analysis"""
|
||||
deterministic_data = self.get_deterministic_analysis_data(activity_id)
|
||||
|
||||
if "error" in deterministic_data:
|
||||
return deterministic_data
|
||||
|
||||
# Format data for LLM consumption
|
||||
metrics = deterministic_data["workout_metrics"]
|
||||
assessment = deterministic_data["standardized_assessment"]
|
||||
training_load = deterministic_data.get("training_load")
|
||||
|
||||
summary = {
|
||||
"workout_classification": assessment["workout_classification"],
|
||||
"intensity_rating": f"{assessment['intensity_rating']}/10",
|
||||
"key_metrics": {
|
||||
"duration": f"{metrics['duration_minutes']:.0f} minutes",
|
||||
"distance": f"{metrics['distance_km']:.1f} km",
|
||||
"avg_speed": f"{metrics['avg_speed_kmh']:.1f} km/h",
|
||||
"elevation_gain": f"{metrics['elevation_gain_m']:.0f} m"
|
||||
},
|
||||
"performance_indicators": {
|
||||
"efficiency_score": assessment["efficiency_score"],
|
||||
"estimated_ftp": metrics.get("estimated_ftp"),
|
||||
"intensity_factor": metrics.get("intensity_factor")
|
||||
},
|
||||
"recovery_guidance": assessment["recovery_recommendation"],
|
||||
"training_load_context": {
|
||||
"fitness_level": training_load["fitness"] if training_load else None,
|
||||
"fatigue_level": training_load["fatigue"] if training_load else None,
|
||||
"form": training_load["form"] if training_load else None
|
||||
} if training_load else None,
|
||||
"single_speed_analysis": {
|
||||
"estimated_gear": f"{metrics.get('estimated_chainring', 'N/A')}x{metrics.get('estimated_cog', 'N/A')}",
|
||||
"gear_ratio": metrics.get("estimated_gear_ratio")
|
||||
} if metrics.get("estimated_gear_ratio") else None
|
||||
}
|
||||
|
||||
return summary
|
||||
659
core/core_app.py
659
core/core_app.py
@@ -1,87 +1,163 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Core Application - Clean skeleton with separated concerns
|
||||
Core Application - Unified orchestrator with enhanced metrics and analysis
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any, Optional
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
from ..config import Config, load_config
|
||||
from ..llm.llm_client import LLMClient
|
||||
from ..mcp.mcp_client import MCPClient
|
||||
from .cache_manager import CacheManager
|
||||
from .template_engine import TemplateEngine
|
||||
from ..analysis.cycling_metrics import CyclingMetricsCalculator, generate_standardized_assessment, WorkoutMetrics, TrainingLoad, PerformanceTrend
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class CyclingAnalyzerApp:
|
||||
"""Main application class - orchestrates all components"""
|
||||
"""Unified main application class - orchestrates all components with enhanced metrics support"""
|
||||
|
||||
def __init__(self, config: Config, test_mode: bool = False):
|
||||
self.config = config
|
||||
self.test_mode = test_mode
|
||||
self.llm_client = LLMClient(config)
|
||||
self.mcp_client = MCPClient(config)
|
||||
self.cache_manager = CacheManager()
|
||||
self.template_engine = TemplateEngine(config.templates_dir)
|
||||
|
||||
logger.info("DEBUG: Cache contents after init:")
|
||||
for key in ["user_profile", "last_cycling_details"]:
|
||||
data = self.cache_manager.get(key, {})
|
||||
logger.info(f" {key}: keys={list(data.keys()) if data else 'EMPTY'}, length={len(data) if data else 0}")
|
||||
self.config = config
|
||||
self.test_mode = test_mode
|
||||
self.llm_client = LLMClient(config)
|
||||
self.mcp_client = MCPClient(config)
|
||||
|
||||
# Use unified cache manager with metrics tracking
|
||||
self.cache_manager = CacheManager(
|
||||
default_ttl=config.cache_ttl if hasattr(config, 'cache_ttl') else 300,
|
||||
metrics_file="workout_metrics.json"
|
||||
)
|
||||
|
||||
self.template_engine = TemplateEngine(config.templates_dir)
|
||||
|
||||
# User settings for accurate calculations
|
||||
self.user_ftp = None
|
||||
self.user_max_hr = None
|
||||
|
||||
logger.info("DEBUG: Cache contents after init:")
|
||||
for key in ["user_profile", "last_cycling_details"]:
|
||||
data = self.cache_manager.get(key, {})
|
||||
logger.info(f" {key}: keys={list(data.keys()) if data else 'EMPTY'}, length={len(data) if data else 0}")
|
||||
|
||||
async def initialize(self):
|
||||
"""Initialize all components"""
|
||||
"""Initialize all components with metrics support"""
|
||||
logger.info("Initializing application components...")
|
||||
|
||||
await self.llm_client.initialize()
|
||||
await self.mcp_client.initialize()
|
||||
await self._preload_cache()
|
||||
await self._setup_user_metrics()
|
||||
await self._preload_cache_with_metrics()
|
||||
|
||||
logger.info("Application initialization complete")
|
||||
|
||||
async def cleanup(self):
|
||||
"""Cleanup all components"""
|
||||
# Save metrics before cleanup
|
||||
self.cache_manager.save_metrics_history()
|
||||
|
||||
await self.mcp_client.cleanup()
|
||||
await self.llm_client.cleanup()
|
||||
|
||||
async def _preload_cache(self):
|
||||
"""Pre-load and cache common MCP responses"""
|
||||
logger.info("Pre-loading cache...")
|
||||
|
||||
# Cache user profile
|
||||
if await self.mcp_client.has_tool("user_profile"):
|
||||
profile = await self.mcp_client.call_tool("user_profile", {})
|
||||
self.cache_manager.set("user_profile", profile)
|
||||
|
||||
# Cache recent activities
|
||||
if await self.mcp_client.has_tool("get_activities"):
|
||||
activities = await self.mcp_client.call_tool("get_activities", {"limit": 10})
|
||||
self.cache_manager.set("recent_activities", activities)
|
||||
async def _setup_user_metrics(self):
|
||||
"""Setup user profile for accurate metric calculations"""
|
||||
try:
|
||||
# Try to get user profile from MCP
|
||||
if await self.mcp_client.has_tool("user_profile"):
|
||||
profile = await self.mcp_client.call_tool("user_profile", {})
|
||||
|
||||
# Extract FTP and max HR if available
|
||||
self.user_ftp = profile.get("ftp") or profile.get("functionalThresholdPower")
|
||||
self.user_max_hr = profile.get("maxHR") or profile.get("maxHeartRate")
|
||||
|
||||
# Also try user settings
|
||||
if await self.mcp_client.has_tool("user_settings"):
|
||||
settings = await self.mcp_client.call_tool("user_settings", {})
|
||||
if not self.user_ftp:
|
||||
self.user_ftp = settings.get("ftp")
|
||||
if not self.user_max_hr:
|
||||
self.user_max_hr = settings.get("maxHeartRate")
|
||||
|
||||
logger.info(f"User metrics configured: FTP={self.user_ftp}W, Max HR={self.user_max_hr}bpm")
|
||||
|
||||
# Find and cache last cycling activity details
|
||||
cycling_activity = self._find_last_cycling_activity(activities)
|
||||
if cycling_activity and await self.mcp_client.has_tool("get_activity_details"):
|
||||
details = await self.mcp_client.call_tool(
|
||||
"get_activity_details",
|
||||
{"activity_id": cycling_activity["activityId"]}
|
||||
)
|
||||
self.cache_manager.set("last_cycling_details", details)
|
||||
# Set up cache manager with user profile
|
||||
self.cache_manager.set_user_profile(ftp=self.user_ftp, max_hr=self.user_max_hr)
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not setup user metrics: {e}")
|
||||
# Initialize with defaults
|
||||
self.cache_manager.set_user_profile()
|
||||
|
||||
async def _preload_cache_with_metrics(self):
|
||||
"""Pre-load cache with calculated metrics (enhanced version of _preload_cache)"""
|
||||
logger.info("Pre-loading cache with metrics calculation...")
|
||||
|
||||
try:
|
||||
# Cache user profile (from base)
|
||||
if await self.mcp_client.has_tool("user_profile"):
|
||||
profile = await self.mcp_client.call_tool("user_profile", {})
|
||||
self.cache_manager.set("user_profile", profile)
|
||||
|
||||
# Cache recent activities
|
||||
if await self.mcp_client.has_tool("get_activities"):
|
||||
activities = await self.mcp_client.call_tool("get_activities", {"limit": 15})
|
||||
self.cache_manager.set("recent_activities", activities)
|
||||
|
||||
# Process cycling activities with metrics
|
||||
cycling_count = 0
|
||||
for activity in activities:
|
||||
activity_type = activity.get("activityType", {})
|
||||
if isinstance(activity_type, dict):
|
||||
type_key = activity_type.get("typeKey", "").lower()
|
||||
else:
|
||||
type_key = str(activity_type).lower()
|
||||
|
||||
if "cycling" in type_key or "bike" in type_key:
|
||||
activity_id = activity.get("activityId")
|
||||
if activity_id and cycling_count < 5: # Limit to 5 recent cycling activities
|
||||
try:
|
||||
# Get detailed activity data
|
||||
if await self.mcp_client.has_tool("get_activity_details"):
|
||||
details = await self.mcp_client.call_tool(
|
||||
"get_activity_details",
|
||||
{"activity_id": str(activity_id)}
|
||||
)
|
||||
|
||||
# Calculate and cache metrics
|
||||
metrics = self.cache_manager.cache_workout_with_metrics(
|
||||
str(activity_id), details
|
||||
)
|
||||
|
||||
logger.info(f"Processed activity {activity_id}: {metrics.workout_classification}")
|
||||
cycling_count += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not process activity {activity_id}: {e}")
|
||||
|
||||
logger.info(f"Processed {cycling_count} cycling activities with metrics")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error preloading cache with metrics: {e}")
|
||||
|
||||
def _find_last_cycling_activity(self, activities: list) -> Optional[Dict[str, Any]]:
|
||||
"""Find the most recent cycling activity from activities list"""
|
||||
"""Find the most recent cycling activity from activities list (from base)"""
|
||||
cycling_activities = [
|
||||
act for act in activities
|
||||
if "cycling" in act.get("activityType", {}).get("typeKey", "").lower()
|
||||
]
|
||||
return max(cycling_activities, key=lambda x: x.get("startTimeGmt", 0)) if cycling_activities else None
|
||||
|
||||
# Core functionality methods
|
||||
# Core functionality methods (enhanced)
|
||||
|
||||
async def analyze_workout(self, analysis_type: str = "last_workout", **kwargs) -> str:
|
||||
"""Analyze workout using LLM with cached data"""
|
||||
"""Analyze workout with deterministic metrics (enhanced version)"""
|
||||
if analysis_type == "deterministic":
|
||||
return await self.analyze_workout_deterministic(**kwargs)
|
||||
# Fallback to base logic if needed
|
||||
template_name = f"workflows/{analysis_type}.txt"
|
||||
|
||||
# Prepare enhanced context with data quality assessment
|
||||
@@ -104,30 +180,30 @@ class CyclingAnalyzerApp:
|
||||
return await self.llm_client.generate(prompt)
|
||||
|
||||
def _prepare_analysis_context(self, **kwargs) -> Dict[str, Any]:
|
||||
"""Prepare analysis context with data quality assessment"""
|
||||
user_info = self.cache_manager.get("user_profile", {})
|
||||
activity_summary = self.cache_manager.get("last_cycling_details", {})
|
||||
|
||||
logger.info(f"DEBUG: user_info keys: {list(user_info.keys()) if user_info else 'EMPTY'}, length: {len(user_info) if user_info else 0}")
|
||||
logger.info(f"DEBUG: activity_summary keys: {list(activity_summary.keys()) if activity_summary else 'EMPTY'}, length: {len(activity_summary) if activity_summary else 0}")
|
||||
|
||||
# Assess data quality
|
||||
data_quality = self._assess_data_quality(activity_summary)
|
||||
logger.info(f"DEBUG: data_quality: {data_quality}")
|
||||
|
||||
context = {
|
||||
"user_info": user_info,
|
||||
"activity_summary": activity_summary,
|
||||
"data_quality": data_quality,
|
||||
"missing_metrics": data_quality.get("missing", []),
|
||||
**kwargs
|
||||
}
|
||||
|
||||
logger.debug(f"Prepared context with data quality: {data_quality.get('overall', 'N/A')}")
|
||||
return context
|
||||
"""Prepare analysis context with data quality assessment (from base)"""
|
||||
user_info = self.cache_manager.get("user_profile", {})
|
||||
activity_summary = self.cache_manager.get("last_cycling_details", {})
|
||||
|
||||
logger.info(f"DEBUG: user_info keys: {list(user_info.keys()) if user_info else 'EMPTY'}, length: {len(user_info) if user_info else 0}")
|
||||
logger.info(f"DEBUG: activity_summary keys: {list(activity_summary.keys()) if activity_summary else 'EMPTY'}, length: {len(activity_summary) if activity_summary else 0}")
|
||||
|
||||
# Assess data quality
|
||||
data_quality = self._assess_data_quality(activity_summary)
|
||||
logger.info(f"DEBUG: data_quality: {data_quality}")
|
||||
|
||||
context = {
|
||||
"user_info": user_info,
|
||||
"activity_summary": activity_summary,
|
||||
"data_quality": data_quality,
|
||||
"missing_metrics": data_quality.get("missing", []),
|
||||
**kwargs
|
||||
}
|
||||
|
||||
logger.debug(f"Prepared context with data quality: {data_quality.get('overall', 'N/A')}")
|
||||
return context
|
||||
|
||||
def _assess_data_quality(self, activity_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Assess quality and completeness of activity data"""
|
||||
"""Assess quality and completeness of activity data (from base)"""
|
||||
summary_dto = activity_data.get('summaryDTO', {})
|
||||
is_indoor = activity_data.get('is_indoor', False)
|
||||
|
||||
@@ -163,54 +239,393 @@ class CyclingAnalyzerApp:
|
||||
}
|
||||
|
||||
async def suggest_next_workout(self, **kwargs) -> str:
|
||||
"""Generate workout suggestion using MCP tools and LLM"""
|
||||
# Use MCP-enabled agent for dynamic tool usage
|
||||
template_name = "workflows/suggest_next_workout.txt"
|
||||
|
||||
# Prepare enhanced context
|
||||
context = self._prepare_analysis_context(**kwargs)
|
||||
context["training_rules"] = kwargs.get("training_rules", "")
|
||||
|
||||
prompt = self.template_engine.render(template_name, **context)
|
||||
|
||||
if self.test_mode:
|
||||
logger.info("Test mode: Printing rendered prompt instead of calling LLM with tools")
|
||||
print("\n" + "="*60)
|
||||
print("RENDERED PROMPT FOR LLM WITH TOOLS:")
|
||||
print("="*60)
|
||||
print(prompt)
|
||||
print("="*60 + "\n")
|
||||
return f"TEST MODE: Prompt rendered (length: {len(prompt)} characters)"
|
||||
|
||||
# Use MCP-enabled LLM client for this
|
||||
return await self.llm_client.generate_with_tools(prompt, self.mcp_client)
|
||||
"""Generate data-driven workout suggestion (enhanced version)"""
|
||||
return await self.suggest_next_workout_data_driven(**kwargs)
|
||||
|
||||
async def enhanced_analysis(self, analysis_type: str, **kwargs) -> str:
|
||||
"""Perform enhanced analysis with full MCP tool access"""
|
||||
template_name = "workflows/enhanced_analysis.txt"
|
||||
"""Perform enhanced analysis based on type (enhanced version)"""
|
||||
if analysis_type == "ftp_estimation":
|
||||
return await self.estimate_ftp_without_power(**kwargs)
|
||||
elif analysis_type == "gear_analysis":
|
||||
return await self.analyze_single_speed_gears(**kwargs)
|
||||
elif analysis_type == "training_load":
|
||||
return await self.get_training_load_analysis(**kwargs)
|
||||
else:
|
||||
# Fallback to deterministic analysis
|
||||
return await self.analyze_workout_deterministic(**kwargs)
|
||||
|
||||
# Enhanced analysis methods from enhanced_core_app
|
||||
async def analyze_workout_deterministic(self, activity_id: str = None, **kwargs) -> str:
|
||||
"""Analyze workout using deterministic metrics"""
|
||||
if not activity_id:
|
||||
activity_id = self._get_last_cycling_activity_id()
|
||||
|
||||
# Prepare enhanced context
|
||||
context = self._prepare_analysis_context(**kwargs)
|
||||
context.update({
|
||||
"analysis_type": analysis_type,
|
||||
"cached_data": self.cache_manager.get_all(),
|
||||
})
|
||||
if not activity_id:
|
||||
return "No cycling activity found for analysis"
|
||||
|
||||
# Get deterministic analysis data
|
||||
analysis_data = self.cache_manager.get_workout_summary_for_llm(activity_id)
|
||||
|
||||
if "error" in analysis_data:
|
||||
return f"Error: {analysis_data['error']}"
|
||||
|
||||
# Get performance trends
|
||||
performance_trends = self.cache_manager.get_performance_trends(30)
|
||||
|
||||
# Use enhanced template
|
||||
template_name = "workflows/analyze_workout_with_metrics.txt"
|
||||
|
||||
context = {
|
||||
"workout_summary": analysis_data,
|
||||
"performance_trends": [
|
||||
{
|
||||
"metric_name": trend.metric_name,
|
||||
"current_value": trend.current_value,
|
||||
"trend_direction": trend.trend_direction,
|
||||
"trend_7day": trend.trend_7day
|
||||
}
|
||||
for trend in performance_trends
|
||||
],
|
||||
"training_rules": kwargs.get("training_rules", ""),
|
||||
**kwargs
|
||||
}
|
||||
|
||||
prompt = self.template_engine.render(template_name, **context)
|
||||
|
||||
if self.test_mode:
|
||||
logger.info("Test mode: Printing rendered prompt instead of calling LLM with tools")
|
||||
print("\n" + "="*60)
|
||||
print("RENDERED PROMPT FOR ENHANCED ANALYSIS:")
|
||||
print("="*60)
|
||||
print(prompt)
|
||||
print("="*60 + "\n")
|
||||
return f"TEST MODE: Prompt rendered (length: {len(prompt)} characters)"
|
||||
|
||||
return await self.llm_client.generate_with_tools(prompt, self.mcp_client)
|
||||
return await self.llm_client.generate(prompt)
|
||||
|
||||
# Utility methods
|
||||
async def estimate_ftp_without_power(self, activity_id: str = None, **kwargs) -> str:
|
||||
"""Estimate FTP for workouts without power meter"""
|
||||
if not activity_id:
|
||||
activity_id = self._get_last_cycling_activity_id()
|
||||
|
||||
if not activity_id:
|
||||
return "No cycling activity found for FTP estimation"
|
||||
|
||||
# Get workout metrics
|
||||
metrics = self.cache_manager.get_workout_metrics(activity_id)
|
||||
if not metrics:
|
||||
return "No metrics available for FTP estimation"
|
||||
|
||||
# Get FTP estimation history
|
||||
ftp_history = self.cache_manager.get_ftp_estimates_history()
|
||||
|
||||
# Calculate additional metrics for FTP estimation
|
||||
hr_intensity = 0
|
||||
if metrics.avg_hr and self.user_max_hr:
|
||||
hr_intensity = metrics.avg_hr / self.user_max_hr
|
||||
elif metrics.avg_hr:
|
||||
# Estimate max HR if not provided
|
||||
estimated_max_hr = 220 - 30 # Assume 30 years old, should be configurable
|
||||
hr_intensity = metrics.avg_hr / estimated_max_hr
|
||||
|
||||
# Estimate power from speed
|
||||
avg_speed_ms = metrics.avg_speed_kmh / 3.6
|
||||
estimated_power_from_speed = (avg_speed_ms ** 2.5) * 3.5
|
||||
|
||||
# Adjust for elevation
|
||||
elevation_per_km = metrics.elevation_gain_m / metrics.distance_km if metrics.distance_km > 0 else 0
|
||||
elevation_factor = 1 + (elevation_per_km / 1000) * 0.1
|
||||
elevation_adjusted_power = estimated_power_from_speed * elevation_factor
|
||||
|
||||
template_name = "workflows/estimate_ftp_no_power.txt"
|
||||
|
||||
context = {
|
||||
"duration_minutes": metrics.duration_minutes,
|
||||
"distance_km": metrics.distance_km,
|
||||
"avg_speed_kmh": metrics.avg_speed_kmh,
|
||||
"elevation_gain_m": metrics.elevation_gain_m,
|
||||
"avg_hr": metrics.avg_hr,
|
||||
"max_hr": metrics.max_hr,
|
||||
"hr_intensity": hr_intensity,
|
||||
"estimated_power_from_speed": round(estimated_power_from_speed, 0),
|
||||
"elevation_adjusted_power": round(elevation_adjusted_power, 0),
|
||||
"estimated_ftp": metrics.estimated_ftp,
|
||||
"elevation_per_km": round(elevation_per_km, 1),
|
||||
"elevation_factor": elevation_factor,
|
||||
"ftp_history": ftp_history[:10], # Last 10 estimates
|
||||
**kwargs
|
||||
}
|
||||
|
||||
prompt = self.template_engine.render(template_name, **context)
|
||||
return await self.llm_client.generate(prompt)
|
||||
|
||||
async def analyze_single_speed_gears(self, activity_id: str = None, **kwargs) -> str:
|
||||
"""Analyze single speed gear selection and optimization"""
|
||||
if not activity_id:
|
||||
activity_id = self._get_last_cycling_activity_id()
|
||||
|
||||
if not activity_id:
|
||||
return "No cycling activity found for gear analysis"
|
||||
|
||||
# Get workout metrics
|
||||
metrics = self.cache_manager.get_workout_metrics(activity_id)
|
||||
if not metrics:
|
||||
return "No metrics available for gear analysis"
|
||||
|
||||
# Get gear usage analysis
|
||||
gear_analysis = self.cache_manager.get_gear_usage_analysis()
|
||||
|
||||
# Calculate additional gear metrics
|
||||
chainrings = [46, 38]
|
||||
cogs = [14, 15, 16, 17, 18, 19, 20]
|
||||
wheel_circumference = 2.096 # meters
|
||||
|
||||
available_gears = []
|
||||
for chainring in chainrings:
|
||||
for cog in cogs:
|
||||
ratio = chainring / cog
|
||||
gear_inches = ratio * 27 # 700c wheel ≈ 27" diameter
|
||||
development = ratio * wheel_circumference
|
||||
available_gears.append({
|
||||
"chainring": chainring,
|
||||
"cog": cog,
|
||||
"ratio": round(ratio, 2),
|
||||
"gear_inches": round(gear_inches, 1),
|
||||
"development": round(development, 1)
|
||||
})
|
||||
|
||||
# Estimate cadence
|
||||
if metrics.avg_speed_kmh > 0 and metrics.estimated_gear_ratio:
|
||||
speed_ms = metrics.avg_speed_kmh / 3.6
|
||||
estimated_cadence = (speed_ms / (metrics.estimated_gear_ratio * wheel_circumference)) * 60
|
||||
else:
|
||||
estimated_cadence = 85 # Default assumption
|
||||
|
||||
# Classify terrain
|
||||
elevation_per_km = metrics.elevation_gain_m / metrics.distance_km if metrics.distance_km > 0 else 0
|
||||
if elevation_per_km > 15:
|
||||
terrain_type = "steep_climbing"
|
||||
elif elevation_per_km > 8:
|
||||
terrain_type = "moderate_climbing"
|
||||
elif elevation_per_km > 3:
|
||||
terrain_type = "rolling_hills"
|
||||
else:
|
||||
terrain_type = "flat_terrain"
|
||||
|
||||
template_name = "workflows/single_speed_gear_analysis.txt"
|
||||
|
||||
context = {
|
||||
"avg_speed_kmh": metrics.avg_speed_kmh,
|
||||
"duration_minutes": metrics.duration_minutes,
|
||||
"elevation_gain_m": metrics.elevation_gain_m,
|
||||
"terrain_type": terrain_type,
|
||||
"estimated_chainring": metrics.estimated_chainring,
|
||||
"estimated_cog": metrics.estimated_cog,
|
||||
"estimated_gear_ratio": metrics.estimated_gear_ratio,
|
||||
"gear_inches": round((metrics.estimated_gear_ratio or 2.5) * 27, 1),
|
||||
"development_meters": round((metrics.estimated_gear_ratio or 2.5) * wheel_circumference, 1),
|
||||
"available_gears": available_gears,
|
||||
"gear_usage_by_terrain": gear_analysis.get("gear_by_terrain", {}),
|
||||
"best_flat_gear": "46x16", # Example, should be calculated
|
||||
"best_climbing_gear": "38x20", # Example, should be calculated
|
||||
"most_versatile_gear": gear_analysis.get("most_common_gear", {}).get("gear", "46x17"),
|
||||
"efficiency_rating": 7, # Should be calculated based on speed/effort
|
||||
"estimated_cadence": round(estimated_cadence, 0),
|
||||
"elevation_per_km": round(elevation_per_km, 1),
|
||||
**kwargs
|
||||
}
|
||||
|
||||
prompt = self.template_engine.render(template_name, **context)
|
||||
return await self.llm_client.generate(prompt)
|
||||
|
||||
async def get_training_load_analysis(self, **kwargs) -> str:
|
||||
"""Analyze training load and recovery status"""
|
||||
training_load = self.cache_manager.get_training_load()
|
||||
if not training_load:
|
||||
return "Insufficient workout history for training load analysis"
|
||||
|
||||
# Get performance trends
|
||||
performance_trends = self.cache_manager.get_performance_trends(42) # 6 weeks
|
||||
|
||||
# Classify training load status
|
||||
if training_load.training_stress_balance > 5:
|
||||
form_status = "fresh_and_ready"
|
||||
elif training_load.training_stress_balance > -5:
|
||||
form_status = "maintaining_fitness"
|
||||
elif training_load.training_stress_balance > -15:
|
||||
form_status = "building_fitness"
|
||||
else:
|
||||
form_status = "high_fatigue_risk"
|
||||
|
||||
template_name = "workflows/training_load_analysis.txt"
|
||||
|
||||
context = {
|
||||
"training_load": {
|
||||
"fitness": training_load.fitness,
|
||||
"fatigue": training_load.fatigue,
|
||||
"form": training_load.form,
|
||||
"acute_load": training_load.acute_training_load,
|
||||
"chronic_load": training_load.chronic_training_load
|
||||
},
|
||||
"form_status": form_status,
|
||||
"performance_trends": [
|
||||
{
|
||||
"metric": trend.metric_name,
|
||||
"trend_direction": trend.trend_direction,
|
||||
"trend_7day": trend.trend_7day,
|
||||
"trend_30day": trend.trend_30day,
|
||||
"confidence": trend.confidence
|
||||
}
|
||||
for trend in performance_trends
|
||||
],
|
||||
"training_rules": kwargs.get("training_rules", ""),
|
||||
**kwargs
|
||||
}
|
||||
|
||||
prompt = self.template_engine.render(template_name, **context)
|
||||
return await self.llm_client.generate(prompt)
|
||||
|
||||
async def suggest_next_workout_data_driven(self, **kwargs) -> str:
|
||||
"""Generate data-driven workout suggestions"""
|
||||
# Get training load status
|
||||
training_load = self.cache_manager.get_training_load()
|
||||
performance_trends = self.cache_manager.get_performance_trends(14) # 2 weeks
|
||||
|
||||
# Get recent workout pattern
|
||||
recent_activities = self.cache_manager.get("recent_activities", [])
|
||||
recent_cycling = [act for act in recent_activities
|
||||
if "cycling" in act.get("activityType", {}).get("typeKey", "").lower()]
|
||||
|
||||
# Analyze recent workout pattern
|
||||
recent_intensities = []
|
||||
recent_durations = []
|
||||
recent_types = []
|
||||
|
||||
for activity in recent_cycling[:7]: # Last 7 cycling activities
|
||||
activity_id = str(activity.get("activityId"))
|
||||
metrics = self.cache_manager.get_workout_metrics(activity_id)
|
||||
if metrics:
|
||||
recent_intensities.append(self._rate_intensity(metrics))
|
||||
recent_durations.append(metrics.duration_minutes)
|
||||
recent_types.append(self._classify_workout(metrics))
|
||||
|
||||
# Calculate training pattern analysis
|
||||
avg_intensity = sum(recent_intensities) / len(recent_intensities) if recent_intensities else 5
|
||||
avg_duration = sum(recent_durations) / len(recent_durations) if recent_durations else 60
|
||||
|
||||
# Determine workout recommendation based on data
|
||||
if training_load and training_load.form < -10:
|
||||
recommendation_type = "recovery_focus"
|
||||
elif avg_intensity > 7:
|
||||
recommendation_type = "endurance_focus"
|
||||
elif avg_intensity < 4:
|
||||
recommendation_type = "intensity_focus"
|
||||
else:
|
||||
recommendation_type = "balanced_progression"
|
||||
|
||||
template_name = "workflows/suggest_next_workout_data_driven.txt"
|
||||
|
||||
context = {
|
||||
"training_load": training_load,
|
||||
"performance_trends": performance_trends,
|
||||
"recent_workout_analysis": {
|
||||
"avg_intensity": round(avg_intensity, 1),
|
||||
"avg_duration": round(avg_duration, 0),
|
||||
"workout_types": recent_types,
|
||||
"pattern_analysis": self._analyze_workout_pattern(recent_types)
|
||||
},
|
||||
"recommendation_type": recommendation_type,
|
||||
"user_ftp": self.user_ftp,
|
||||
"training_rules": kwargs.get("training_rules", ""),
|
||||
**kwargs
|
||||
}
|
||||
|
||||
prompt = self.template_engine.render(template_name, **context)
|
||||
return await self.llm_client.generate(prompt)
|
||||
|
||||
# Utility methods from enhanced
|
||||
def _get_last_cycling_activity_id(self) -> Optional[str]:
|
||||
"""Get the ID of the most recent cycling activity"""
|
||||
activities = self.cache_manager.get("recent_activities", [])
|
||||
for activity in activities:
|
||||
activity_type = activity.get("activityType", {})
|
||||
if isinstance(activity_type, dict):
|
||||
type_key = activity_type.get("typeKey", "").lower()
|
||||
else:
|
||||
type_key = str(activity_type).lower()
|
||||
|
||||
if "cycling" in type_key or "bike" in type_key:
|
||||
return str(activity.get("activityId"))
|
||||
return None
|
||||
|
||||
def _rate_intensity(self, metrics) -> int:
|
||||
"""Rate workout intensity 1-10 based on metrics"""
|
||||
factors = []
|
||||
|
||||
# Speed factor
|
||||
if metrics.avg_speed_kmh > 40:
|
||||
factors.append(9)
|
||||
elif metrics.avg_speed_kmh > 35:
|
||||
factors.append(7)
|
||||
elif metrics.avg_speed_kmh > 25:
|
||||
factors.append(5)
|
||||
else:
|
||||
factors.append(3)
|
||||
|
||||
# Duration factor
|
||||
duration_intensity = min(metrics.duration_minutes / 60 * 2, 6)
|
||||
factors.append(duration_intensity)
|
||||
|
||||
# Elevation factor
|
||||
if metrics.distance_km > 0:
|
||||
elevation_per_km = metrics.elevation_gain_m / metrics.distance_km
|
||||
if elevation_per_km > 15:
|
||||
factors.append(8)
|
||||
elif elevation_per_km > 10:
|
||||
factors.append(6)
|
||||
elif elevation_per_km > 5:
|
||||
factors.append(4)
|
||||
else:
|
||||
factors.append(2)
|
||||
|
||||
return min(int(sum(factors) / len(factors)), 10)
|
||||
|
||||
def _classify_workout(self, metrics) -> str:
|
||||
"""Classify workout type"""
|
||||
duration = metrics.duration_minutes
|
||||
avg_speed = metrics.avg_speed_kmh
|
||||
elevation_gain = metrics.elevation_gain_m / metrics.distance_km if metrics.distance_km > 0 else 0
|
||||
|
||||
if duration < 30:
|
||||
return "short_intensity"
|
||||
elif duration > 180:
|
||||
return "long_endurance"
|
||||
elif elevation_gain > 10:
|
||||
return "climbing_focused"
|
||||
elif avg_speed > 35:
|
||||
return "high_speed"
|
||||
elif avg_speed < 20:
|
||||
return "recovery_easy"
|
||||
else:
|
||||
return "moderate_endurance"
|
||||
|
||||
def _analyze_workout_pattern(self, recent_types: list) -> str:
|
||||
"""Analyze recent workout pattern"""
|
||||
if not recent_types:
|
||||
return "insufficient_data"
|
||||
|
||||
type_counts = {}
|
||||
for workout_type in recent_types:
|
||||
type_counts[workout_type] = type_counts.get(workout_type, 0) + 1
|
||||
|
||||
total_workouts = len(recent_types)
|
||||
intensity_workouts = sum(1 for t in recent_types if "intensity" in t or "speed" in t)
|
||||
endurance_workouts = sum(1 for t in recent_types if "endurance" in t)
|
||||
recovery_workouts = sum(1 for t in recent_types if "recovery" in t)
|
||||
|
||||
intensity_ratio = intensity_workouts / total_workouts
|
||||
endurance_ratio = endurance_workouts / total_workouts
|
||||
|
||||
if intensity_ratio > 0.5:
|
||||
return "high_intensity_bias"
|
||||
elif recovery_workouts > total_workouts * 0.4:
|
||||
return "recovery_heavy"
|
||||
elif endurance_ratio > 0.6:
|
||||
return "endurance_focused"
|
||||
else:
|
||||
return "balanced_training"
|
||||
|
||||
# Utility methods (from base and enhanced)
|
||||
async def list_available_tools(self) -> list:
|
||||
"""Get list of available MCP tools"""
|
||||
return await self.mcp_client.list_tools()
|
||||
@@ -222,6 +637,46 @@ class CyclingAnalyzerApp:
|
||||
def get_cached_data(self, key: str = None) -> Any:
|
||||
"""Get cached data by key, or all if no key provided"""
|
||||
return self.cache_manager.get(key) if key else self.cache_manager.get_all()
|
||||
|
||||
# New deterministic data access methods from enhanced
|
||||
def get_performance_summary(self) -> Dict[str, Any]:
|
||||
"""Get comprehensive performance summary"""
|
||||
performance_trends = self.cache_manager.get_performance_trends(30)
|
||||
training_load = self.cache_manager.get_training_load()
|
||||
ftp_history = self.cache_manager.get_ftp_estimates_history()
|
||||
gear_analysis = self.cache_manager.get_gear_usage_analysis()
|
||||
|
||||
return {
|
||||
"performance_trends": [
|
||||
{
|
||||
"metric": trend.metric_name,
|
||||
"current": trend.current_value,
|
||||
"trend_7d": f"{trend.trend_7day:+.1f}%",
|
||||
"trend_30d": f"{trend.trend_30day:+.1f}%",
|
||||
"direction": trend.trend_direction,
|
||||
"confidence": trend.confidence
|
||||
}
|
||||
for trend in performance_trends
|
||||
],
|
||||
"training_load": {
|
||||
"fitness": training_load.fitness if training_load else None,
|
||||
"fatigue": training_load.fatigue if training_load else None,
|
||||
"form": training_load.form if training_load else None
|
||||
},
|
||||
"ftp_estimates": {
|
||||
"latest": ftp_history[0]["estimated_ftp"] if ftp_history else None,
|
||||
"trend": "improving" if len(ftp_history) > 1 and ftp_history[0]["estimated_ftp"] > ftp_history[1]["estimated_ftp"] else "stable",
|
||||
"history_count": len(ftp_history)
|
||||
},
|
||||
"gear_usage": {
|
||||
"most_common": gear_analysis.get("most_common_gear", {}),
|
||||
"total_analyzed": gear_analysis.get("total_workouts_analyzed", 0)
|
||||
}
|
||||
}
|
||||
|
||||
def get_metrics_for_activity(self, activity_id: str) -> Optional[Dict[str, Any]]:
|
||||
"""Get all calculated metrics for a specific activity"""
|
||||
return self.cache_manager.get_deterministic_analysis_data(activity_id)
|
||||
|
||||
async def main():
|
||||
"""Main entry point"""
|
||||
|
||||
@@ -1,595 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Enhanced Cache Manager with Metrics Tracking
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Dict, List, Any, Optional
|
||||
from dataclasses import dataclass, asdict
|
||||
|
||||
from cache_manager import CacheManager
|
||||
from cycling_metrics import CyclingMetricsCalculator, WorkoutMetrics, TrainingLoad
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@dataclass
|
||||
class PerformanceTrend:
|
||||
"""Track performance trends over time"""
|
||||
metric_name: str
|
||||
current_value: float
|
||||
trend_7day: float # % change over 7 days
|
||||
trend_30day: float # % change over 30 days
|
||||
trend_direction: str # "improving", "stable", "declining"
|
||||
confidence: float # 0-1, based on data points available
|
||||
|
||||
class MetricsTrackingCache(CacheManager):
|
||||
"""Enhanced cache that calculates and tracks cycling metrics"""
|
||||
|
||||
def __init__(self, default_ttl: int = 300, metrics_file: str = "metrics_history.json"):
|
||||
super().__init__(default_ttl)
|
||||
self.metrics_calculator = None
|
||||
self.metrics_file = Path(metrics_file)
|
||||
self.performance_history = []
|
||||
self.load_metrics_history()
|
||||
|
||||
def set_user_profile(self, ftp: Optional[float] = None, max_hr: Optional[int] = None):
|
||||
"""Set user profile for accurate calculations"""
|
||||
self.metrics_calculator = CyclingMetricsCalculator(user_ftp=ftp, user_max_hr=max_hr)
|
||||
logger.info(f"Metrics calculator configured: FTP={ftp}, Max HR={max_hr}")
|
||||
|
||||
def cache_workout_with_metrics(self, activity_id: str, activity_data: Dict[str, Any]) -> WorkoutMetrics:
|
||||
"""Cache workout data and calculate comprehensive metrics with validation"""
|
||||
if not self.metrics_calculator:
|
||||
# Initialize with defaults if not set
|
||||
self.metrics_calculator = CyclingMetricsCalculator()
|
||||
|
||||
# Validate and normalize input data
|
||||
validated_data = self._validate_activity_data(activity_data)
|
||||
|
||||
# Calculate metrics with safe handling
|
||||
metrics = self.metrics_calculator.calculate_workout_metrics(validated_data)
|
||||
|
||||
# Cache the raw data and calculated metrics
|
||||
self.set(f"activity_raw_{activity_id}", activity_data, ttl=3600)
|
||||
self.set(f"activity_metrics_{activity_id}", asdict(metrics), ttl=3600)
|
||||
|
||||
# Add to performance history
|
||||
workout_record = {
|
||||
"activity_id": activity_id,
|
||||
"date": validated_data.get('startTimeGmt', datetime.now().isoformat()),
|
||||
"metrics": asdict(metrics),
|
||||
"data_quality": validated_data.get('data_quality', 'complete')
|
||||
}
|
||||
|
||||
self.performance_history.append(workout_record)
|
||||
self.save_metrics_history()
|
||||
|
||||
# Update performance trends
|
||||
self._update_performance_trends()
|
||||
|
||||
logger.info(f"Cached workout {activity_id} with calculated metrics (quality: {workout_record['data_quality']})")
|
||||
return metrics
|
||||
|
||||
def _validate_activity_data(self, activity_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Validate and normalize activity data for safe metric calculation"""
|
||||
if not isinstance(activity_data, dict):
|
||||
logger.warning("Invalid activity data - creating minimal structure")
|
||||
return {"data_quality": "invalid", "summaryDTO": {}}
|
||||
|
||||
summary_dto = activity_data.get('summaryDTO', {})
|
||||
if not isinstance(summary_dto, dict):
|
||||
summary_dto = {}
|
||||
|
||||
data_quality = "complete"
|
||||
warnings = []
|
||||
|
||||
# Check critical fields
|
||||
critical_fields = ['duration', 'distance']
|
||||
for field in critical_fields:
|
||||
if summary_dto.get(field) is None:
|
||||
data_quality = "incomplete"
|
||||
warnings.append(f"Missing {field}")
|
||||
# Set reasonable defaults
|
||||
if field == 'duration':
|
||||
summary_dto['duration'] = 0
|
||||
elif field == 'distance':
|
||||
summary_dto['distance'] = 0
|
||||
|
||||
# Indoor activity adjustments
|
||||
is_indoor = activity_data.get('is_indoor', False)
|
||||
if is_indoor:
|
||||
# For indoor, speed may be None - estimate from power if available
|
||||
if summary_dto.get('averageSpeed') is None and summary_dto.get('averagePower') is not None:
|
||||
# Rough estimate: speed = power / (weight * constant), but without weight, use placeholder
|
||||
summary_dto['averageSpeed'] = None # Keep None, let calculator handle
|
||||
warnings.append("Indoor activity - speed estimated from power")
|
||||
|
||||
# Elevation not applicable for indoor
|
||||
if 'elevationGain' in summary_dto:
|
||||
summary_dto['elevationGain'] = 0
|
||||
summary_dto['elevationLoss'] = 0
|
||||
warnings.append("Indoor activity - elevation set to 0")
|
||||
|
||||
# Ensure all expected fields exist (from custom_garth_mcp normalization)
|
||||
expected_fields = [
|
||||
'averageSpeed', 'maxSpeed', 'averageHR', 'maxHR', 'averagePower',
|
||||
'maxPower', 'normalizedPower', 'trainingStressScore', 'elevationGain',
|
||||
'elevationLoss', 'distance', 'duration'
|
||||
]
|
||||
for field in expected_fields:
|
||||
if field not in summary_dto:
|
||||
summary_dto[field] = None
|
||||
if data_quality == "complete":
|
||||
data_quality = "incomplete"
|
||||
warnings.append(f"Missing {field}")
|
||||
|
||||
activity_data['summaryDTO'] = summary_dto
|
||||
activity_data['data_quality'] = data_quality
|
||||
activity_data['validation_warnings'] = warnings
|
||||
|
||||
if warnings:
|
||||
logger.debug(f"Activity validation warnings: {', '.join(warnings)}")
|
||||
|
||||
return activity_data
|
||||
|
||||
def get_workout_metrics(self, activity_id: str) -> Optional[WorkoutMetrics]:
|
||||
"""Get calculated metrics for a workout"""
|
||||
metrics_data = self.get(f"activity_metrics_{activity_id}")
|
||||
if metrics_data:
|
||||
return WorkoutMetrics(**metrics_data)
|
||||
return None
|
||||
|
||||
def get_training_load(self, days: int = 42) -> Optional[TrainingLoad]:
|
||||
"""Calculate current training load metrics"""
|
||||
if not self.metrics_calculator:
|
||||
return None
|
||||
|
||||
# Get recent workout history
|
||||
cutoff_date = datetime.now() - timedelta(days=days)
|
||||
recent_workouts = []
|
||||
|
||||
for record in self.performance_history:
|
||||
workout_date = datetime.fromisoformat(record['date'].replace('Z', '+00:00'))
|
||||
if workout_date >= cutoff_date:
|
||||
# Reconstruct activity data for training load calculation
|
||||
activity_data = self.get(f"activity_raw_{record['activity_id']}")
|
||||
if activity_data:
|
||||
recent_workouts.append(activity_data)
|
||||
|
||||
if not recent_workouts:
|
||||
return None
|
||||
|
||||
training_load = self.metrics_calculator.calculate_training_load(recent_workouts)
|
||||
|
||||
# Cache training load
|
||||
self.set("current_training_load", asdict(training_load), ttl=3600)
|
||||
|
||||
return training_load
|
||||
|
||||
def get_performance_trends(self, days: int = 30) -> List[PerformanceTrend]:
|
||||
"""Get performance trends for key metrics"""
|
||||
trends = self.get(f"performance_trends_{days}d")
|
||||
if trends:
|
||||
return [PerformanceTrend(**trend) for trend in trends]
|
||||
|
||||
# Calculate if not cached
|
||||
return self._calculate_performance_trends(days)
|
||||
|
||||
def _calculate_performance_trends(self, days: int) -> List[PerformanceTrend]:
|
||||
"""Calculate performance trends over specified period"""
|
||||
if not self.performance_history:
|
||||
return []
|
||||
|
||||
cutoff_date = datetime.now() - timedelta(days=days)
|
||||
recent_metrics = []
|
||||
|
||||
for record in self.performance_history:
|
||||
workout_date = datetime.fromisoformat(record['date'].replace('Z', '+00:00'))
|
||||
if workout_date >= cutoff_date:
|
||||
recent_metrics.append({
|
||||
'date': workout_date,
|
||||
'metrics': WorkoutMetrics(**record['metrics'])
|
||||
})
|
||||
|
||||
if len(recent_metrics) < 2:
|
||||
return []
|
||||
|
||||
# Sort by date
|
||||
recent_metrics.sort(key=lambda x: x['date'])
|
||||
|
||||
trends = []
|
||||
|
||||
# Calculate trends for key metrics
|
||||
metrics_to_track = [
|
||||
('avg_speed_kmh', 'Average Speed'),
|
||||
('avg_hr', 'Average Heart Rate'),
|
||||
('avg_power', 'Average Power'),
|
||||
('estimated_ftp', 'Estimated FTP'),
|
||||
('training_stress_score', 'Training Stress Score')
|
||||
]
|
||||
|
||||
for metric_attr, metric_name in metrics_to_track:
|
||||
trend = self._calculate_single_metric_trend(recent_metrics, metric_attr, metric_name, days)
|
||||
if trend:
|
||||
trends.append(trend)
|
||||
|
||||
# Cache trends
|
||||
self.set(f"performance_trends_{days}d", [asdict(trend) for trend in trends], ttl=1800)
|
||||
|
||||
return trends
|
||||
|
||||
def _calculate_single_metric_trend(self, recent_metrics: List[Dict],
|
||||
metric_attr: str, metric_name: str,
|
||||
days: int) -> Optional[PerformanceTrend]:
|
||||
"""Calculate trend for a single metric"""
|
||||
# Extract values, filtering out None values
|
||||
values_with_dates = []
|
||||
for record in recent_metrics:
|
||||
value = getattr(record['metrics'], metric_attr)
|
||||
if value is not None:
|
||||
values_with_dates.append((record['date'], value))
|
||||
|
||||
if len(values_with_dates) < 2:
|
||||
return None
|
||||
|
||||
# Calculate current value (average of last 3 workouts)
|
||||
recent_values = [v for _, v in values_with_dates[-3:]]
|
||||
current_value = sum(recent_values) / len(recent_values)
|
||||
|
||||
# Calculate 7-day trend if we have enough data
|
||||
week_ago = datetime.now() - timedelta(days=7)
|
||||
week_values = [v for d, v in values_with_dates if d >= week_ago]
|
||||
|
||||
if len(week_values) >= 2:
|
||||
week_old_avg = sum(week_values[:len(week_values)//2]) / (len(week_values)//2)
|
||||
week_recent_avg = sum(week_values[len(week_values)//2:]) / (len(week_values) - len(week_values)//2)
|
||||
trend_7day = ((week_recent_avg - week_old_avg) / week_old_avg * 100) if week_old_avg > 0 else 0
|
||||
else:
|
||||
trend_7day = 0
|
||||
|
||||
# Calculate 30-day trend
|
||||
if len(values_with_dates) >= 4:
|
||||
old_avg = sum(v for _, v in values_with_dates[:len(values_with_dates)//2]) / (len(values_with_dates)//2)
|
||||
recent_avg = sum(v for _, v in values_with_dates[len(values_with_dates)//2:]) / (len(values_with_dates) - len(values_with_dates)//2)
|
||||
trend_30day = ((recent_avg - old_avg) / old_avg * 100) if old_avg > 0 else 0
|
||||
else:
|
||||
trend_30day = 0
|
||||
|
||||
# Determine trend direction
|
||||
primary_trend = trend_7day if abs(trend_7day) > abs(trend_30day) else trend_30day
|
||||
if primary_trend > 2:
|
||||
trend_direction = "improving"
|
||||
elif primary_trend < -2:
|
||||
trend_direction = "declining"
|
||||
else:
|
||||
trend_direction = "stable"
|
||||
|
||||
# Calculate confidence based on data points
|
||||
confidence = min(len(values_with_dates) / 10, 1.0) # Max confidence at 10+ data points
|
||||
|
||||
return PerformanceTrend(
|
||||
metric_name=metric_name,
|
||||
current_value=round(current_value, 2),
|
||||
trend_7day=round(trend_7day, 1),
|
||||
trend_30day=round(trend_30day, 1),
|
||||
trend_direction=trend_direction,
|
||||
confidence=round(confidence, 2)
|
||||
)
|
||||
|
||||
def _update_performance_trends(self):
|
||||
"""Update cached performance trends after new workout"""
|
||||
# Clear cached trends to force recalculation
|
||||
keys_to_clear = [key for key in self._cache.keys() if key.startswith("performance_trends_")]
|
||||
for key in keys_to_clear:
|
||||
self.delete(key)
|
||||
|
||||
def get_deterministic_analysis_data(self, activity_id: str) -> Dict[str, Any]:
|
||||
"""Get all deterministic data for analysis with validation"""
|
||||
metrics = self.get_workout_metrics(activity_id)
|
||||
training_load = self.get_training_load()
|
||||
performance_trends = self.get_performance_trends()
|
||||
|
||||
if not metrics:
|
||||
return {"error": "No metrics available for activity"}
|
||||
|
||||
# Generate standardized assessment with safe handling
|
||||
try:
|
||||
from cycling_metrics import generate_standardized_assessment
|
||||
assessment = generate_standardized_assessment(metrics, training_load)
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not generate standardized assessment: {e}")
|
||||
assessment = {"error": "Assessment calculation failed", "workout_classification": "unknown"}
|
||||
|
||||
return {
|
||||
"workout_metrics": asdict(metrics),
|
||||
"training_load": asdict(training_load) if training_load else None,
|
||||
"performance_trends": [asdict(trend) for trend in performance_trends if trend],
|
||||
"standardized_assessment": assessment,
|
||||
"analysis_timestamp": datetime.now().isoformat()
|
||||
}
|
||||
|
||||
def get_ftp_estimates_history(self) -> List[Dict[str, Any]]:
|
||||
"""Get historical FTP estimates for tracking progress"""
|
||||
ftp_history = []
|
||||
|
||||
for record in self.performance_history:
|
||||
metrics = WorkoutMetrics(**record['metrics'])
|
||||
if metrics.estimated_ftp:
|
||||
ftp_history.append({
|
||||
"date": record['date'],
|
||||
"activity_id": record['activity_id'],
|
||||
"estimated_ftp": metrics.estimated_ftp,
|
||||
"workout_type": record['metrics'].get('workout_classification', 'unknown')
|
||||
})
|
||||
|
||||
# Sort by date and return recent estimates
|
||||
ftp_history.sort(key=lambda x: x['date'], reverse=True)
|
||||
return ftp_history[:20] # Last 20 estimates
|
||||
|
||||
def get_gear_usage_analysis(self) -> Dict[str, Any]:
|
||||
"""Get single speed gear usage analysis"""
|
||||
gear_data = []
|
||||
|
||||
for record in self.performance_history:
|
||||
metrics = WorkoutMetrics(**record['metrics'])
|
||||
if metrics.estimated_gear_ratio:
|
||||
gear_data.append({
|
||||
"date": record['date'],
|
||||
"estimated_ratio": metrics.estimated_gear_ratio,
|
||||
"chainring": metrics.estimated_chainring,
|
||||
"cog": metrics.estimated_cog,
|
||||
"avg_speed": metrics.avg_speed_kmh,
|
||||
"elevation_gain": metrics.elevation_gain_m,
|
||||
"terrain_type": self._classify_terrain(metrics)
|
||||
})
|
||||
|
||||
if not gear_data:
|
||||
return {"message": "No gear data available"}
|
||||
|
||||
# Analyze gear preferences by terrain
|
||||
gear_preferences = {}
|
||||
for data in gear_data:
|
||||
terrain = data['terrain_type']
|
||||
gear = f"{data['chainring']}x{data['cog']}"
|
||||
|
||||
if terrain not in gear_preferences:
|
||||
gear_preferences[terrain] = {}
|
||||
if gear not in gear_preferences[terrain]:
|
||||
gear_preferences[terrain][gear] = 0
|
||||
gear_preferences[terrain][gear] += 1
|
||||
|
||||
# Calculate most common gears
|
||||
all_gears = {}
|
||||
for data in gear_data:
|
||||
gear = f"{data['chainring']}x{data['cog']}"
|
||||
all_gears[gear] = all_gears.get(gear, 0) + 1
|
||||
|
||||
most_common_gear = max(all_gears.items(), key=lambda x: x[1])
|
||||
|
||||
return {
|
||||
"total_workouts_analyzed": len(gear_data),
|
||||
"most_common_gear": {
|
||||
"gear": most_common_gear[0],
|
||||
"usage_count": most_common_gear[1],
|
||||
"usage_percentage": round(most_common_gear[1] / len(gear_data) * 100, 1)
|
||||
},
|
||||
"gear_by_terrain": gear_preferences,
|
||||
"gear_recommendations": self._recommend_gears(gear_data)
|
||||
}
|
||||
|
||||
def _classify_terrain(self, metrics: WorkoutMetrics) -> str:
|
||||
"""Classify terrain type from workout metrics"""
|
||||
if metrics.distance_km == 0:
|
||||
return "unknown"
|
||||
|
||||
elevation_per_km = metrics.elevation_gain_m / metrics.distance_km
|
||||
|
||||
if elevation_per_km > 15:
|
||||
return "steep_climbing"
|
||||
elif elevation_per_km > 8:
|
||||
return "moderate_climbing"
|
||||
elif elevation_per_km > 3:
|
||||
return "rolling_hills"
|
||||
else:
|
||||
return "flat_terrain"
|
||||
|
||||
def _recommend_gears(self, gear_data: List[Dict]) -> Dict[str, str]:
|
||||
"""Recommend optimal gears for different conditions"""
|
||||
if not gear_data:
|
||||
return {}
|
||||
|
||||
# Group by terrain and find most efficient gears
|
||||
terrain_efficiency = {}
|
||||
|
||||
for data in gear_data:
|
||||
terrain = data['terrain_type']
|
||||
gear = f"{data['chainring']}x{data['cog']}"
|
||||
speed = data['avg_speed']
|
||||
|
||||
if terrain not in terrain_efficiency:
|
||||
terrain_efficiency[terrain] = {}
|
||||
if gear not in terrain_efficiency[terrain]:
|
||||
terrain_efficiency[terrain][gear] = []
|
||||
|
||||
terrain_efficiency[terrain][gear].append(speed)
|
||||
|
||||
# Calculate average speeds for each gear/terrain combo
|
||||
recommendations = {}
|
||||
for terrain, gears in terrain_efficiency.items():
|
||||
best_gear = None
|
||||
best_avg_speed = 0
|
||||
|
||||
for gear, speeds in gears.items():
|
||||
avg_speed = sum(speeds) / len(speeds)
|
||||
if avg_speed > best_avg_speed:
|
||||
best_avg_speed = avg_speed
|
||||
best_gear = gear
|
||||
|
||||
if best_gear:
|
||||
recommendations[terrain] = best_gear
|
||||
|
||||
return recommendations
|
||||
|
||||
def load_metrics_history(self):
|
||||
"""Load performance history from file"""
|
||||
if self.metrics_file.exists():
|
||||
try:
|
||||
with open(self.metrics_file, 'r') as f:
|
||||
data = json.load(f)
|
||||
self.performance_history = data.get('performance_history', [])
|
||||
logger.info(f"Loaded {len(self.performance_history)} workout records")
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading metrics history: {e}")
|
||||
self.performance_history = []
|
||||
else:
|
||||
self.performance_history = []
|
||||
|
||||
def save_metrics_history(self):
|
||||
"""Save performance history to file"""
|
||||
try:
|
||||
# Keep only last 200 workouts to prevent file from growing too large
|
||||
self.performance_history = self.performance_history[-200:]
|
||||
|
||||
data = {
|
||||
'performance_history': self.performance_history,
|
||||
'last_updated': datetime.now().isoformat()
|
||||
}
|
||||
|
||||
with open(self.metrics_file, 'w') as f:
|
||||
json.dump(data, f, indent=2, default=str)
|
||||
|
||||
logger.debug(f"Saved {len(self.performance_history)} workout records")
|
||||
except Exception as e:
|
||||
logger.error(f"Error saving metrics history: {e}")
|
||||
|
||||
def get_workout_summary_for_llm(self, activity_id: str) -> Dict[str, Any]:
|
||||
"""Get structured workout summary optimized for LLM analysis"""
|
||||
deterministic_data = self.get_deterministic_analysis_data(activity_id)
|
||||
|
||||
if "error" in deterministic_data:
|
||||
return deterministic_data
|
||||
|
||||
# Format data for LLM consumption
|
||||
metrics = deterministic_data["workout_metrics"]
|
||||
assessment = deterministic_data["standardized_assessment"]
|
||||
training_load = deterministic_data.get("training_load")
|
||||
|
||||
summary = {
|
||||
"workout_classification": assessment["workout_classification"],
|
||||
"intensity_rating": f"{assessment['intensity_rating']}/10",
|
||||
"key_metrics": {
|
||||
"duration": f"{metrics['duration_minutes']:.0f} minutes",
|
||||
"distance": f"{metrics['distance_km']:.1f} km",
|
||||
"avg_speed": f"{metrics['avg_speed_kmh']:.1f} km/h",
|
||||
"elevation_gain": f"{metrics['elevation_gain_m']:.0f} m"
|
||||
},
|
||||
"performance_indicators": {
|
||||
"efficiency_score": assessment["efficiency_score"],
|
||||
"estimated_ftp": metrics.get("estimated_ftp"),
|
||||
"intensity_factor": metrics.get("intensity_factor")
|
||||
},
|
||||
"recovery_guidance": assessment["recovery_recommendation"],
|
||||
"training_load_context": {
|
||||
"fitness_level": training_load["fitness"] if training_load else None,
|
||||
"fatigue_level": training_load["fatigue"] if training_load else None,
|
||||
"form": training_load["form"] if training_load else None
|
||||
} if training_load else None,
|
||||
"single_speed_analysis": {
|
||||
"estimated_gear": f"{metrics.get('estimated_chainring', 'N/A')}x{metrics.get('estimated_cog', 'N/A')}",
|
||||
"gear_ratio": metrics.get("estimated_gear_ratio")
|
||||
} if metrics.get("estimated_gear_ratio") else None
|
||||
}
|
||||
|
||||
return summary
|
||||
|
||||
# Integration with existing core app
|
||||
def enhance_core_app_with_metrics():
|
||||
"""Example of how to integrate metrics tracking with the core app"""
|
||||
integration_code = '''
|
||||
# In core_app.py, replace the cache manager initialization:
|
||||
|
||||
from enhanced_cache_manager import MetricsTrackingCache
|
||||
|
||||
class CyclingAnalyzerApp:
|
||||
def __init__(self, config: Config):
|
||||
self.config = config
|
||||
self.llm_client = LLMClient(config)
|
||||
self.mcp_client = MCPClient(config)
|
||||
|
||||
# Use enhanced cache with metrics tracking
|
||||
self.cache_manager = MetricsTrackingCache(
|
||||
default_ttl=config.cache_ttl,
|
||||
metrics_file="workout_metrics.json"
|
||||
)
|
||||
|
||||
self.template_engine = TemplateEngine(config.templates_dir)
|
||||
|
||||
async def _preload_cache(self):
|
||||
"""Enhanced preloading with metrics calculation"""
|
||||
logger.info("Pre-loading cache with metrics calculation...")
|
||||
|
||||
# Set user profile for accurate calculations
|
||||
profile = await self.mcp_client.call_tool("user_profile", {})
|
||||
if profile:
|
||||
# Extract FTP and max HR from profile if available
|
||||
ftp = profile.get("ftp") or None
|
||||
max_hr = profile.get("maxHR") or None
|
||||
self.cache_manager.set_user_profile(ftp=ftp, max_hr=max_hr)
|
||||
|
||||
# Cache recent activities with metrics
|
||||
activities = await self.mcp_client.call_tool("get_activities", {"limit": 10})
|
||||
if activities:
|
||||
self.cache_manager.set("recent_activities", activities)
|
||||
|
||||
# Find and analyze last cycling activity
|
||||
cycling_activity = self._find_last_cycling_activity(activities)
|
||||
if cycling_activity:
|
||||
activity_details = await self.mcp_client.call_tool(
|
||||
"get_activity_details",
|
||||
{"activity_id": cycling_activity["activityId"]}
|
||||
)
|
||||
|
||||
# Cache with metrics calculation
|
||||
metrics = self.cache_manager.cache_workout_with_metrics(
|
||||
cycling_activity["activityId"],
|
||||
activity_details
|
||||
)
|
||||
|
||||
logger.info(f"Calculated metrics for last workout: {metrics.workout_classification}")
|
||||
|
||||
async def analyze_workout_with_metrics(self, activity_id: str = None, **kwargs) -> str:
|
||||
"""Enhanced analysis using calculated metrics"""
|
||||
if not activity_id:
|
||||
# Get last cached cycling activity
|
||||
activities = self.cache_manager.get("recent_activities", [])
|
||||
cycling_activity = self._find_last_cycling_activity(activities)
|
||||
activity_id = cycling_activity["activityId"] if cycling_activity else None
|
||||
|
||||
if not activity_id:
|
||||
return "No cycling activity found for analysis"
|
||||
|
||||
# Get deterministic analysis data
|
||||
analysis_data = self.cache_manager.get_workout_summary_for_llm(activity_id)
|
||||
|
||||
if "error" in analysis_data:
|
||||
return f"Error: {analysis_data['error']}"
|
||||
|
||||
# Use template with deterministic data
|
||||
template_name = "workflows/analyze_workout_with_metrics.txt"
|
||||
|
||||
context = {
|
||||
"workout_summary": analysis_data,
|
||||
"performance_trends": self.cache_manager.get_performance_trends(30),
|
||||
"training_rules": kwargs.get("training_rules", ""),
|
||||
**kwargs
|
||||
}
|
||||
|
||||
prompt = self.template_engine.render(template_name, **context)
|
||||
return await self.llm_client.generate(prompt)
|
||||
'''
|
||||
|
||||
return integration_code
|
||||
|
||||
@@ -1,579 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Enhanced Core Application with Deterministic Metrics
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any, Optional
|
||||
|
||||
from config import Config, load_config
|
||||
from llm_client import LLMClient
|
||||
from mcp_client import MCPClient
|
||||
from enhanced_cache_manager import MetricsTrackingCache
|
||||
from template_engine import TemplateEngine
|
||||
from cycling_metrics import CyclingMetricsCalculator, generate_standardized_assessment
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class EnhancedCyclingAnalyzerApp:
|
||||
"""Enhanced application with deterministic metrics and analysis"""
|
||||
|
||||
def __init__(self, config: Config):
|
||||
self.config = config
|
||||
self.llm_client = LLMClient(config)
|
||||
self.mcp_client = MCPClient(config)
|
||||
|
||||
# Use enhanced cache with metrics tracking
|
||||
self.cache_manager = MetricsTrackingCache(
|
||||
default_ttl=config.cache_ttl,
|
||||
metrics_file="workout_metrics.json"
|
||||
)
|
||||
|
||||
self.template_engine = TemplateEngine(config.templates_dir)
|
||||
|
||||
# User settings for accurate calculations
|
||||
self.user_ftp = None
|
||||
self.user_max_hr = None
|
||||
|
||||
async def initialize(self):
|
||||
"""Initialize all components with metrics support"""
|
||||
logger.info("Initializing enhanced application components...")
|
||||
|
||||
await self.llm_client.initialize()
|
||||
await self.mcp_client.initialize()
|
||||
await self._setup_user_metrics()
|
||||
await self._preload_cache_with_metrics()
|
||||
|
||||
logger.info("Enhanced application initialization complete")
|
||||
|
||||
async def cleanup(self):
|
||||
"""Cleanup all components"""
|
||||
# Save metrics before cleanup
|
||||
self.cache_manager.save_metrics_history()
|
||||
|
||||
await self.mcp_client.cleanup()
|
||||
await self.llm_client.cleanup()
|
||||
|
||||
async def _setup_user_metrics(self):
|
||||
"""Setup user profile for accurate metric calculations"""
|
||||
try:
|
||||
# Try to get user profile from MCP
|
||||
if await self.mcp_client.has_tool("user_profile"):
|
||||
profile = await self.mcp_client.call_tool("user_profile", {})
|
||||
|
||||
# Extract FTP and max HR if available
|
||||
self.user_ftp = profile.get("ftp") or profile.get("functionalThresholdPower")
|
||||
self.user_max_hr = profile.get("maxHR") or profile.get("maxHeartRate")
|
||||
|
||||
# Also try user settings
|
||||
if await self.mcp_client.has_tool("user_settings"):
|
||||
settings = await self.mcp_client.call_tool("user_settings", {})
|
||||
if not self.user_ftp:
|
||||
self.user_ftp = settings.get("ftp")
|
||||
if not self.user_max_hr:
|
||||
self.user_max_hr = settings.get("maxHeartRate")
|
||||
|
||||
logger.info(f"User metrics configured: FTP={self.user_ftp}W, Max HR={self.user_max_hr}bpm")
|
||||
|
||||
# Set up cache manager with user profile
|
||||
self.cache_manager.set_user_profile(ftp=self.user_ftp, max_hr=self.user_max_hr)
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not setup user metrics: {e}")
|
||||
# Initialize with defaults
|
||||
self.cache_manager.set_user_profile()
|
||||
|
||||
async def _preload_cache_with_metrics(self):
|
||||
"""Pre-load cache with calculated metrics"""
|
||||
logger.info("Pre-loading cache with metrics calculation...")
|
||||
|
||||
try:
|
||||
# Cache recent activities
|
||||
if await self.mcp_client.has_tool("get_activities"):
|
||||
activities = await self.mcp_client.call_tool("get_activities", {"limit": 15})
|
||||
self.cache_manager.set("recent_activities", activities)
|
||||
|
||||
# Process cycling activities with metrics
|
||||
cycling_count = 0
|
||||
for activity in activities:
|
||||
activity_type = activity.get("activityType", {})
|
||||
if isinstance(activity_type, dict):
|
||||
type_key = activity_type.get("typeKey", "").lower()
|
||||
else:
|
||||
type_key = str(activity_type).lower()
|
||||
|
||||
if "cycling" in type_key or "bike" in type_key:
|
||||
activity_id = activity.get("activityId")
|
||||
if activity_id and cycling_count < 5: # Limit to 5 recent cycling activities
|
||||
try:
|
||||
# Get detailed activity data
|
||||
if await self.mcp_client.has_tool("get_activity_details"):
|
||||
details = await self.mcp_client.call_tool(
|
||||
"get_activity_details",
|
||||
{"activity_id": str(activity_id)}
|
||||
)
|
||||
|
||||
# Calculate and cache metrics
|
||||
metrics = self.cache_manager.cache_workout_with_metrics(
|
||||
str(activity_id), details
|
||||
)
|
||||
|
||||
logger.info(f"Processed activity {activity_id}: {metrics.workout_classification}")
|
||||
cycling_count += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not process activity {activity_id}: {e}")
|
||||
|
||||
logger.info(f"Processed {cycling_count} cycling activities with metrics")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error preloading cache with metrics: {e}")
|
||||
|
||||
# Enhanced analysis methods
|
||||
|
||||
async def analyze_workout_deterministic(self, activity_id: str = None, **kwargs) -> str:
|
||||
"""Analyze workout using deterministic metrics"""
|
||||
if not activity_id:
|
||||
activity_id = self._get_last_cycling_activity_id()
|
||||
|
||||
if not activity_id:
|
||||
return "No cycling activity found for analysis"
|
||||
|
||||
# Get deterministic analysis data
|
||||
analysis_data = self.cache_manager.get_workout_summary_for_llm(activity_id)
|
||||
|
||||
if "error" in analysis_data:
|
||||
return f"Error: {analysis_data['error']}"
|
||||
|
||||
# Get performance trends
|
||||
performance_trends = self.cache_manager.get_performance_trends(30)
|
||||
|
||||
# Use enhanced template
|
||||
template_name = "workflows/analyze_workout_with_metrics.txt"
|
||||
|
||||
context = {
|
||||
"workout_summary": analysis_data,
|
||||
"performance_trends": [
|
||||
{
|
||||
"metric_name": trend.metric_name,
|
||||
"current_value": trend.current_value,
|
||||
"trend_direction": trend.trend_direction,
|
||||
"trend_7day": trend.trend_7day
|
||||
}
|
||||
for trend in performance_trends
|
||||
],
|
||||
"training_rules": kwargs.get("training_rules", ""),
|
||||
**kwargs
|
||||
}
|
||||
|
||||
prompt = self.template_engine.render(template_name, **context)
|
||||
return await self.llm_client.generate(prompt)
|
||||
|
||||
async def estimate_ftp_without_power(self, activity_id: str = None, **kwargs) -> str:
|
||||
"""Estimate FTP for workouts without power meter"""
|
||||
if not activity_id:
|
||||
activity_id = self._get_last_cycling_activity_id()
|
||||
|
||||
if not activity_id:
|
||||
return "No cycling activity found for FTP estimation"
|
||||
|
||||
# Get workout metrics
|
||||
metrics = self.cache_manager.get_workout_metrics(activity_id)
|
||||
if not metrics:
|
||||
return "No metrics available for FTP estimation"
|
||||
|
||||
# Get FTP estimation history
|
||||
ftp_history = self.cache_manager.get_ftp_estimates_history()
|
||||
|
||||
# Calculate additional metrics for FTP estimation
|
||||
hr_intensity = 0
|
||||
if metrics.avg_hr and self.user_max_hr:
|
||||
hr_intensity = metrics.avg_hr / self.user_max_hr
|
||||
elif metrics.avg_hr:
|
||||
# Estimate max HR if not provided
|
||||
estimated_max_hr = 220 - 30 # Assume 30 years old, should be configurable
|
||||
hr_intensity = metrics.avg_hr / estimated_max_hr
|
||||
|
||||
# Estimate power from speed
|
||||
avg_speed_ms = metrics.avg_speed_kmh / 3.6
|
||||
estimated_power_from_speed = (avg_speed_ms ** 2.5) * 3.5
|
||||
|
||||
# Adjust for elevation
|
||||
elevation_per_km = metrics.elevation_gain_m / metrics.distance_km if metrics.distance_km > 0 else 0
|
||||
elevation_factor = 1 + (elevation_per_km / 1000) * 0.1
|
||||
elevation_adjusted_power = estimated_power_from_speed * elevation_factor
|
||||
|
||||
template_name = "workflows/estimate_ftp_no_power.txt"
|
||||
|
||||
context = {
|
||||
"duration_minutes": metrics.duration_minutes,
|
||||
"distance_km": metrics.distance_km,
|
||||
"avg_speed_kmh": metrics.avg_speed_kmh,
|
||||
"elevation_gain_m": metrics.elevation_gain_m,
|
||||
"avg_hr": metrics.avg_hr,
|
||||
"max_hr": metrics.max_hr,
|
||||
"hr_intensity": hr_intensity,
|
||||
"estimated_power_from_speed": round(estimated_power_from_speed, 0),
|
||||
"elevation_adjusted_power": round(elevation_adjusted_power, 0),
|
||||
"estimated_ftp": metrics.estimated_ftp,
|
||||
"elevation_per_km": round(elevation_per_km, 1),
|
||||
"elevation_factor": elevation_factor,
|
||||
"ftp_history": ftp_history[:10], # Last 10 estimates
|
||||
**kwargs
|
||||
}
|
||||
|
||||
prompt = self.template_engine.render(template_name, **context)
|
||||
return await self.llm_client.generate(prompt)
|
||||
|
||||
async def analyze_single_speed_gears(self, activity_id: str = None, **kwargs) -> str:
|
||||
"""Analyze single speed gear selection and optimization"""
|
||||
if not activity_id:
|
||||
activity_id = self._get_last_cycling_activity_id()
|
||||
|
||||
if not activity_id:
|
||||
return "No cycling activity found for gear analysis"
|
||||
|
||||
# Get workout metrics
|
||||
metrics = self.cache_manager.get_workout_metrics(activity_id)
|
||||
if not metrics:
|
||||
return "No metrics available for gear analysis"
|
||||
|
||||
# Get gear usage analysis
|
||||
gear_analysis = self.cache_manager.get_gear_usage_analysis()
|
||||
|
||||
# Calculate additional gear metrics
|
||||
chainrings = [46, 38]
|
||||
cogs = [14, 15, 16, 17, 18, 19, 20]
|
||||
wheel_circumference = 2.096 # meters
|
||||
|
||||
available_gears = []
|
||||
for chainring in chainrings:
|
||||
for cog in cogs:
|
||||
ratio = chainring / cog
|
||||
gear_inches = ratio * 27 # 700c wheel ≈ 27" diameter
|
||||
development = ratio * wheel_circumference
|
||||
available_gears.append({
|
||||
"chainring": chainring,
|
||||
"cog": cog,
|
||||
"ratio": round(ratio, 2),
|
||||
"gear_inches": round(gear_inches, 1),
|
||||
"development": round(development, 1)
|
||||
})
|
||||
|
||||
# Estimate cadence
|
||||
if metrics.avg_speed_kmh > 0 and metrics.estimated_gear_ratio:
|
||||
speed_ms = metrics.avg_speed_kmh / 3.6
|
||||
estimated_cadence = (speed_ms / (metrics.estimated_gear_ratio * wheel_circumference)) * 60
|
||||
else:
|
||||
estimated_cadence = 85 # Default assumption
|
||||
|
||||
# Classify terrain
|
||||
elevation_per_km = metrics.elevation_gain_m / metrics.distance_km if metrics.distance_km > 0 else 0
|
||||
if elevation_per_km > 15:
|
||||
terrain_type = "steep_climbing"
|
||||
elif elevation_per_km > 8:
|
||||
terrain_type = "moderate_climbing"
|
||||
elif elevation_per_km > 3:
|
||||
terrain_type = "rolling_hills"
|
||||
else:
|
||||
terrain_type = "flat_terrain"
|
||||
|
||||
template_name = "workflows/single_speed_gear_analysis.txt"
|
||||
|
||||
context = {
|
||||
"avg_speed_kmh": metrics.avg_speed_kmh,
|
||||
"duration_minutes": metrics.duration_minutes,
|
||||
"elevation_gain_m": metrics.elevation_gain_m,
|
||||
"terrain_type": terrain_type,
|
||||
"estimated_chainring": metrics.estimated_chainring,
|
||||
"estimated_cog": metrics.estimated_cog,
|
||||
"estimated_gear_ratio": metrics.estimated_gear_ratio,
|
||||
"gear_inches": round((metrics.estimated_gear_ratio or 2.5) * 27, 1),
|
||||
"development_meters": round((metrics.estimated_gear_ratio or 2.5) * wheel_circumference, 1),
|
||||
"available_gears": available_gears,
|
||||
"gear_usage_by_terrain": gear_analysis.get("gear_by_terrain", {}),
|
||||
"best_flat_gear": "46x16", # Example, should be calculated
|
||||
"best_climbing_gear": "38x20", # Example, should be calculated
|
||||
"most_versatile_gear": gear_analysis.get("most_common_gear", {}).get("gear", "46x17"),
|
||||
"efficiency_rating": 7, # Should be calculated based on speed/effort
|
||||
"estimated_cadence": round(estimated_cadence, 0),
|
||||
"elevation_per_km": round(elevation_per_km, 1),
|
||||
**kwargs
|
||||
}
|
||||
|
||||
prompt = self.template_engine.render(template_name, **context)
|
||||
return await self.llm_client.generate(prompt)
|
||||
|
||||
async def get_training_load_analysis(self, **kwargs) -> str:
|
||||
"""Analyze training load and recovery status"""
|
||||
training_load = self.cache_manager.get_training_load()
|
||||
if not training_load:
|
||||
return "Insufficient workout history for training load analysis"
|
||||
|
||||
# Get performance trends
|
||||
performance_trends = self.cache_manager.get_performance_trends(42) # 6 weeks
|
||||
|
||||
# Classify training load status
|
||||
if training_load.training_stress_balance > 5:
|
||||
form_status = "fresh_and_ready"
|
||||
elif training_load.training_stress_balance > -5:
|
||||
form_status = "maintaining_fitness"
|
||||
elif training_load.training_stress_balance > -15:
|
||||
form_status = "building_fitness"
|
||||
else:
|
||||
form_status = "high_fatigue_risk"
|
||||
|
||||
template_name = "workflows/training_load_analysis.txt"
|
||||
|
||||
context = {
|
||||
"training_load": {
|
||||
"fitness": training_load.fitness,
|
||||
"fatigue": training_load.fatigue,
|
||||
"form": training_load.form,
|
||||
"acute_load": training_load.acute_training_load,
|
||||
"chronic_load": training_load.chronic_training_load
|
||||
},
|
||||
"form_status": form_status,
|
||||
"performance_trends": [
|
||||
{
|
||||
"metric": trend.metric_name,
|
||||
"trend_direction": trend.trend_direction,
|
||||
"trend_7day": trend.trend_7day,
|
||||
"trend_30day": trend.trend_30day,
|
||||
"confidence": trend.confidence
|
||||
}
|
||||
for trend in performance_trends
|
||||
],
|
||||
"training_rules": kwargs.get("training_rules", ""),
|
||||
**kwargs
|
||||
}
|
||||
|
||||
prompt = self.template_engine.render(template_name, **context)
|
||||
return await self.llm_client.generate(prompt)
|
||||
|
||||
async def suggest_next_workout_data_driven(self, **kwargs) -> str:
|
||||
"""Generate data-driven workout suggestions"""
|
||||
# Get training load status
|
||||
training_load = self.cache_manager.get_training_load()
|
||||
performance_trends = self.cache_manager.get_performance_trends(14) # 2 weeks
|
||||
|
||||
# Get recent workout pattern
|
||||
recent_activities = self.cache_manager.get("recent_activities", [])
|
||||
recent_cycling = [act for act in recent_activities
|
||||
if "cycling" in act.get("activityType", {}).get("typeKey", "").lower()]
|
||||
|
||||
# Analyze recent workout pattern
|
||||
recent_intensities = []
|
||||
recent_durations = []
|
||||
recent_types = []
|
||||
|
||||
for activity in recent_cycling[:7]: # Last 7 cycling activities
|
||||
activity_id = str(activity.get("activityId"))
|
||||
metrics = self.cache_manager.get_workout_metrics(activity_id)
|
||||
if metrics:
|
||||
recent_intensities.append(self._rate_intensity(metrics))
|
||||
recent_durations.append(metrics.duration_minutes)
|
||||
recent_types.append(self._classify_workout(metrics))
|
||||
|
||||
# Calculate training pattern analysis
|
||||
avg_intensity = sum(recent_intensities) / len(recent_intensities) if recent_intensities else 5
|
||||
avg_duration = sum(recent_durations) / len(recent_durations) if recent_durations else 60
|
||||
|
||||
# Determine workout recommendation based on data
|
||||
if training_load and training_load.form < -10:
|
||||
recommendation_type = "recovery_focus"
|
||||
elif avg_intensity > 7:
|
||||
recommendation_type = "endurance_focus"
|
||||
elif avg_intensity < 4:
|
||||
recommendation_type = "intensity_focus"
|
||||
else:
|
||||
recommendation_type = "balanced_progression"
|
||||
|
||||
template_name = "workflows/suggest_next_workout_data_driven.txt"
|
||||
|
||||
context = {
|
||||
"training_load": training_load,
|
||||
"performance_trends": performance_trends,
|
||||
"recent_workout_analysis": {
|
||||
"avg_intensity": round(avg_intensity, 1),
|
||||
"avg_duration": round(avg_duration, 0),
|
||||
"workout_types": recent_types,
|
||||
"pattern_analysis": self._analyze_workout_pattern(recent_types)
|
||||
},
|
||||
"recommendation_type": recommendation_type,
|
||||
"user_ftp": self.user_ftp,
|
||||
"training_rules": kwargs.get("training_rules", ""),
|
||||
**kwargs
|
||||
}
|
||||
|
||||
prompt = self.template_engine.render(template_name, **context)
|
||||
return await self.llm_client.generate(prompt)
|
||||
|
||||
# Utility methods
|
||||
|
||||
def _get_last_cycling_activity_id(self) -> Optional[str]:
|
||||
"""Get the ID of the most recent cycling activity"""
|
||||
activities = self.cache_manager.get("recent_activities", [])
|
||||
for activity in activities:
|
||||
activity_type = activity.get("activityType", {})
|
||||
if isinstance(activity_type, dict):
|
||||
type_key = activity_type.get("typeKey", "").lower()
|
||||
else:
|
||||
type_key = str(activity_type).lower()
|
||||
|
||||
if "cycling" in type_key or "bike" in type_key:
|
||||
return str(activity.get("activityId"))
|
||||
return None
|
||||
|
||||
def _rate_intensity(self, metrics) -> int:
|
||||
"""Rate workout intensity 1-10 based on metrics"""
|
||||
factors = []
|
||||
|
||||
# Speed factor
|
||||
if metrics.avg_speed_kmh > 40:
|
||||
factors.append(9)
|
||||
elif metrics.avg_speed_kmh > 35:
|
||||
factors.append(7)
|
||||
elif metrics.avg_speed_kmh > 25:
|
||||
factors.append(5)
|
||||
else:
|
||||
factors.append(3)
|
||||
|
||||
# Duration factor
|
||||
duration_intensity = min(metrics.duration_minutes / 60 * 2, 6)
|
||||
factors.append(duration_intensity)
|
||||
|
||||
# Elevation factor
|
||||
if metrics.distance_km > 0:
|
||||
elevation_per_km = metrics.elevation_gain_m / metrics.distance_km
|
||||
if elevation_per_km > 15:
|
||||
factors.append(8)
|
||||
elif elevation_per_km > 10:
|
||||
factors.append(6)
|
||||
elif elevation_per_km > 5:
|
||||
factors.append(4)
|
||||
else:
|
||||
factors.append(2)
|
||||
|
||||
return min(int(sum(factors) / len(factors)), 10)
|
||||
|
||||
def _classify_workout(self, metrics) -> str:
|
||||
"""Classify workout type"""
|
||||
duration = metrics.duration_minutes
|
||||
avg_speed = metrics.avg_speed_kmh
|
||||
elevation_gain = metrics.elevation_gain_m / metrics.distance_km if metrics.distance_km > 0 else 0
|
||||
|
||||
if duration < 30:
|
||||
return "short_intensity"
|
||||
elif duration > 180:
|
||||
return "long_endurance"
|
||||
elif elevation_gain > 10:
|
||||
return "climbing_focused"
|
||||
elif avg_speed > 35:
|
||||
return "high_speed"
|
||||
elif avg_speed < 20:
|
||||
return "recovery_easy"
|
||||
else:
|
||||
return "moderate_endurance"
|
||||
|
||||
def _analyze_workout_pattern(self, recent_types: list) -> str:
|
||||
"""Analyze recent workout pattern"""
|
||||
if not recent_types:
|
||||
return "insufficient_data"
|
||||
|
||||
type_counts = {}
|
||||
for workout_type in recent_types:
|
||||
type_counts[workout_type] = type_counts.get(workout_type, 0) + 1
|
||||
|
||||
total_workouts = len(recent_types)
|
||||
intensity_workouts = sum(1 for t in recent_types if "intensity" in t or "speed" in t)
|
||||
endurance_workouts = sum(1 for t in recent_types if "endurance" in t)
|
||||
recovery_workouts = sum(1 for t in recent_types if "recovery" in t)
|
||||
|
||||
intensity_ratio = intensity_workouts / total_workouts
|
||||
endurance_ratio = endurance_workouts / total_workouts
|
||||
|
||||
if intensity_ratio > 0.5:
|
||||
return "high_intensity_bias"
|
||||
elif recovery_workouts > total_workouts * 0.4:
|
||||
return "recovery_heavy"
|
||||
elif endurance_ratio > 0.6:
|
||||
return "endurance_focused"
|
||||
else:
|
||||
return "balanced_training"
|
||||
|
||||
# Compatibility methods for existing interface
|
||||
|
||||
async def analyze_workout(self, analysis_type: str = "deterministic", **kwargs) -> str:
|
||||
"""Analyze workout with deterministic metrics (enhanced version)"""
|
||||
return await self.analyze_workout_deterministic(**kwargs)
|
||||
|
||||
async def suggest_next_workout(self, **kwargs) -> str:
|
||||
"""Generate data-driven workout suggestion"""
|
||||
return await self.suggest_next_workout_data_driven(**kwargs)
|
||||
|
||||
async def enhanced_analysis(self, analysis_type: str, **kwargs) -> str:
|
||||
"""Perform enhanced analysis based on type"""
|
||||
if analysis_type == "ftp_estimation":
|
||||
return await self.estimate_ftp_without_power(**kwargs)
|
||||
elif analysis_type == "gear_analysis":
|
||||
return await self.analyze_single_speed_gears(**kwargs)
|
||||
elif analysis_type == "training_load":
|
||||
return await self.get_training_load_analysis(**kwargs)
|
||||
else:
|
||||
# Fallback to deterministic analysis
|
||||
return await self.analyze_workout_deterministic(**kwargs)
|
||||
|
||||
# Existing interface compatibility
|
||||
|
||||
async def list_available_tools(self) -> list:
|
||||
return await self.mcp_client.list_tools()
|
||||
|
||||
def list_templates(self) -> list:
|
||||
return self.template_engine.list_templates()
|
||||
|
||||
def get_cached_data(self, key: str = None) -> Any:
|
||||
return self.cache_manager.get(key) if key else self.cache_manager.get_all()
|
||||
|
||||
# New deterministic data access methods
|
||||
|
||||
def get_performance_summary(self) -> Dict[str, Any]:
|
||||
"""Get comprehensive performance summary"""
|
||||
performance_trends = self.cache_manager.get_performance_trends(30)
|
||||
training_load = self.cache_manager.get_training_load()
|
||||
ftp_history = self.cache_manager.get_ftp_estimates_history()
|
||||
gear_analysis = self.cache_manager.get_gear_usage_analysis()
|
||||
|
||||
return {
|
||||
"performance_trends": [
|
||||
{
|
||||
"metric": trend.metric_name,
|
||||
"current": trend.current_value,
|
||||
"trend_7d": f"{trend.trend_7day:+.1f}%",
|
||||
"trend_30d": f"{trend.trend_30day:+.1f}%",
|
||||
"direction": trend.trend_direction,
|
||||
"confidence": trend.confidence
|
||||
}
|
||||
for trend in performance_trends
|
||||
],
|
||||
"training_load": {
|
||||
"fitness": training_load.fitness if training_load else None,
|
||||
"fatigue": training_load.fatigue if training_load else None,
|
||||
"form": training_load.form if training_load else None
|
||||
},
|
||||
"ftp_estimates": {
|
||||
"latest": ftp_history[0]["estimated_ftp"] if ftp_history else None,
|
||||
"trend": "improving" if len(ftp_history) > 1 and ftp_history[0]["estimated_ftp"] > ftp_history[1]["estimated_ftp"] else "stable",
|
||||
"history_count": len(ftp_history)
|
||||
},
|
||||
"gear_usage": {
|
||||
"most_common": gear_analysis.get("most_common_gear", {}),
|
||||
"total_analyzed": gear_analysis.get("total_workouts_analyzed", 0)
|
||||
}
|
||||
}
|
||||
|
||||
def get_metrics_for_activity(self, activity_id: str) -> Optional[Dict[str, Any]]:
|
||||
"""Get all calculated metrics for a specific activity"""
|
||||
return self.cache_manager.get_deterministic_analysis_data(activity_id)
|
||||
3
data/__init__.py
Normal file
3
data/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
"""
|
||||
Data layer for persistent storage and models
|
||||
"""
|
||||
361
data/cache.py
Normal file
361
data/cache.py
Normal file
@@ -0,0 +1,361 @@
|
||||
"""
|
||||
Persistent Cache Manager using SQLite and SQLAlchemy
|
||||
"""
|
||||
|
||||
import time
|
||||
import logging
|
||||
from typing import Any, Dict, Optional, List
|
||||
from sqlalchemy import Column, Integer, String, Float, DateTime, Text, Boolean, desc
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
from datetime import datetime
|
||||
|
||||
from .database import Database
|
||||
from .models import Base, Workout, Metrics
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class PersistentCacheManager:
|
||||
"""Persistent cache using SQLite database with TTL support"""
|
||||
|
||||
def __init__(self, db: Database, default_ttl: int = 300):
|
||||
self.db = db
|
||||
self.default_ttl = default_ttl
|
||||
self.Session = sessionmaker(bind=self.db.engine)
|
||||
|
||||
def set(self, key: str, data: Any, ttl: Optional[int] = None) -> None:
|
||||
"""Set cache entry with TTL (stored in DB)"""
|
||||
ttl = ttl or self.default_ttl
|
||||
session = self.Session()
|
||||
try:
|
||||
cache_entry = session.query(CacheEntry).filter_by(key=key).first()
|
||||
if cache_entry:
|
||||
cache_entry.data = data
|
||||
cache_entry.timestamp = time.time()
|
||||
cache_entry.ttl = ttl
|
||||
else:
|
||||
cache_entry = CacheEntry(key=key, data=data, timestamp=time.time(), ttl=ttl)
|
||||
session.add(cache_entry)
|
||||
session.commit()
|
||||
logger.debug(f"Persisted data for key '{key}' with TTL {ttl}s")
|
||||
except Exception as e:
|
||||
logger.error(f"Error setting cache for '{key}': {e}")
|
||||
finally:
|
||||
session.close()
|
||||
|
||||
def get(self, key: str, default: Any = None) -> Any:
|
||||
"""Get cache entry, return default if expired or missing"""
|
||||
session = self.Session()
|
||||
try:
|
||||
cache_entry = session.query(CacheEntry).filter_by(key=key).first()
|
||||
if not cache_entry:
|
||||
logger.debug(f"Cache miss for key '{key}'")
|
||||
return default
|
||||
|
||||
if time.time() - cache_entry.timestamp > cache_entry.ttl:
|
||||
logger.debug(f"Cache expired for key '{key}'")
|
||||
session.delete(cache_entry)
|
||||
session.commit()
|
||||
return default
|
||||
|
||||
logger.debug(f"Cache hit for key '{key}'")
|
||||
return cache_entry.data
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting cache for '{key}': {e}")
|
||||
return default
|
||||
finally:
|
||||
session.close()
|
||||
|
||||
def has(self, key: str) -> bool:
|
||||
"""Check if key exists and is not expired"""
|
||||
return self.get(key) is not None
|
||||
|
||||
def delete(self, key: str) -> bool:
|
||||
"""Delete cache entry"""
|
||||
session = self.Session()
|
||||
try:
|
||||
cache_entry = session.query(CacheEntry).filter_by(key=key).first()
|
||||
if cache_entry:
|
||||
session.delete(cache_entry)
|
||||
session.commit()
|
||||
logger.debug(f"Deleted cache entry for key '{key}'")
|
||||
return True
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(f"Error deleting cache for '{key}': {e}")
|
||||
return False
|
||||
finally:
|
||||
session.close()
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear all cache entries"""
|
||||
session = self.Session()
|
||||
try:
|
||||
count = session.query(CacheEntry).delete()
|
||||
session.commit()
|
||||
logger.debug(f"Cleared {count} cache entries")
|
||||
except Exception as e:
|
||||
logger.error(f"Error clearing cache: {e}")
|
||||
finally:
|
||||
session.close()
|
||||
|
||||
def cleanup_expired(self) -> int:
|
||||
"""Remove expired cache entries and return count removed"""
|
||||
session = self.Session()
|
||||
try:
|
||||
expired_count = session.query(CacheEntry).filter(
|
||||
time.time() - CacheEntry.timestamp > CacheEntry.ttl
|
||||
).delete()
|
||||
session.commit()
|
||||
if expired_count:
|
||||
logger.debug(f"Cleaned up {expired_count} expired cache entries")
|
||||
return expired_count
|
||||
except Exception as e:
|
||||
logger.error(f"Error cleaning up cache: {e}")
|
||||
return 0
|
||||
finally:
|
||||
session.close()
|
||||
|
||||
def get_all(self) -> Dict[str, Any]:
|
||||
"""Get all non-expired cache entries"""
|
||||
self.cleanup_expired()
|
||||
session = self.Session()
|
||||
try:
|
||||
entries = session.query(CacheEntry).filter(
|
||||
time.time() - CacheEntry.timestamp <= CacheEntry.ttl
|
||||
).all()
|
||||
return {entry.key: entry.data for entry in entries}
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting all cache: {e}")
|
||||
return {}
|
||||
finally:
|
||||
session.close()
|
||||
|
||||
def get_stats(self) -> Dict[str, Any]:
|
||||
"""Get cache statistics"""
|
||||
self.cleanup_expired()
|
||||
session = self.Session()
|
||||
try:
|
||||
total = session.query(CacheEntry).count()
|
||||
active = session.query(CacheEntry).filter(
|
||||
time.time() - CacheEntry.timestamp <= CacheEntry.ttl
|
||||
).count()
|
||||
return {
|
||||
"total_entries": total,
|
||||
"active_entries": active,
|
||||
"expired_entries": total - active
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting cache stats: {e}")
|
||||
return {}
|
||||
finally:
|
||||
session.close()
|
||||
|
||||
def set_multiple(self, data: Dict[str, Any], ttl: Optional[int] = None) -> None:
|
||||
"""Set multiple cache entries at once"""
|
||||
session = self.Session()
|
||||
try:
|
||||
for key, value in data.items():
|
||||
self.set(key, value, ttl)
|
||||
finally:
|
||||
session.close()
|
||||
|
||||
def get_multiple(self, keys: List[str]) -> Dict[str, Any]:
|
||||
"""Get multiple cache entries at once"""
|
||||
return {key: self.get(key) for key in keys}
|
||||
|
||||
# Cycling-specific methods (adapted for persistence)
|
||||
def cache_user_profile(self, profile_data: Dict[str, Any]) -> None:
|
||||
"""Cache user profile data"""
|
||||
self.set("user_profile", profile_data, ttl=3600)
|
||||
|
||||
def cache_activities(self, activities: List[Dict[str, Any]]) -> None:
|
||||
"""Cache activities list"""
|
||||
self.set("recent_activities", activities, ttl=900)
|
||||
|
||||
def cache_activity_details(self, activity_id: str, details: Dict[str, Any]) -> None:
|
||||
"""Cache specific activity details"""
|
||||
self.set(f"activity_details_{activity_id}", details, ttl=3600)
|
||||
|
||||
def get_user_profile(self) -> Optional[Dict[str, Any]]:
|
||||
"""Get cached user profile"""
|
||||
return self.get("user_profile")
|
||||
|
||||
def get_recent_activities(self) -> List[Dict[str, Any]]:
|
||||
"""Get cached recent activities"""
|
||||
return self.get("recent_activities", [])
|
||||
|
||||
def get_activity_details(self, activity_id: str) -> Optional[Dict[str, Any]]:
|
||||
"""Get cached activity details"""
|
||||
return self.get(f"activity_details_{activity_id}")
|
||||
|
||||
def cache_workout_analysis(self, workout_id: str, analysis: str) -> None:
|
||||
"""Cache workout analysis results"""
|
||||
self.set(f"analysis_{workout_id}", analysis, ttl=86400)
|
||||
|
||||
def get_workout_analysis(self, workout_id: str) -> Optional[str]:
|
||||
"""Get cached workout analysis"""
|
||||
return self.get(f"analysis_{workout_id}")
|
||||
|
||||
# Enhanced metrics tracking (adapted for DB)
|
||||
def set_user_profile_metrics(self, ftp: Optional[float] = None, max_hr: Optional[int] = None):
|
||||
"""Set user profile for accurate calculations"""
|
||||
# Store in DB if needed, but for now, keep in memory for calculator
|
||||
self.metrics_calculator = CyclingMetricsCalculator(user_ftp=ftp, user_max_hr=max_hr)
|
||||
logger.info(f"Metrics calculator configured: FTP={ftp}, Max HR={max_hr}")
|
||||
|
||||
def cache_workout_with_metrics(self, activity_id: str, activity_data: Dict[str, Any]) -> WorkoutMetrics:
|
||||
"""Cache workout data and calculate comprehensive metrics with validation"""
|
||||
if not hasattr(self, 'metrics_calculator') or not self.metrics_calculator:
|
||||
self.metrics_calculator = CyclingMetricsCalculator()
|
||||
|
||||
# Validate and normalize input data
|
||||
validated_data = self._validate_activity_data(activity_data)
|
||||
|
||||
# Calculate metrics
|
||||
metrics = self.metrics_calculator.calculate_workout_metrics(validated_data)
|
||||
|
||||
# Store raw data and metrics in DB
|
||||
session = self.Session()
|
||||
try:
|
||||
# Store raw activity as Workout
|
||||
workout = session.query(Workout).filter_by(activity_id=activity_id).first()
|
||||
if not workout:
|
||||
workout = Workout(activity_id=activity_id)
|
||||
session.add(workout)
|
||||
|
||||
workout.date = validated_data.get('startTimeGmt', datetime.now())
|
||||
workout.data_quality = validated_data.get('data_quality', 'complete')
|
||||
workout.is_indoor = validated_data.get('is_indoor', False)
|
||||
workout.raw_data = json.dumps(activity_data)
|
||||
workout.validation_warnings = json.dumps(validated_data.get('validation_warnings', []))
|
||||
|
||||
# Store metrics
|
||||
metrics_entry = session.query(Metrics).filter_by(workout_id=activity_id).first()
|
||||
if not metrics_entry:
|
||||
metrics_entry = Metrics(workout_id=activity_id)
|
||||
session.add(metrics_entry)
|
||||
|
||||
# Update metrics fields
|
||||
for field in ['avg_speed_kmh', 'avg_hr', 'avg_power', 'estimated_ftp', 'duration_minutes', 'distance_km', 'elevation_gain_m', 'training_stress_score', 'intensity_factor', 'workout_classification']:
|
||||
if hasattr(metrics, field):
|
||||
setattr(metrics_entry, field, getattr(metrics, field))
|
||||
|
||||
session.commit()
|
||||
logger.info(f"Persisted workout {activity_id} with calculated metrics")
|
||||
return metrics
|
||||
except Exception as e:
|
||||
logger.error(f"Error caching workout {activity_id}: {e}")
|
||||
return metrics
|
||||
finally:
|
||||
session.close()
|
||||
|
||||
def _validate_activity_data(self, activity_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Validate and normalize activity data for safe metric calculation (from cache_manager)"""
|
||||
# Same as in cache_manager
|
||||
if not isinstance(activity_data, dict):
|
||||
logger.warning("Invalid activity data - creating minimal structure")
|
||||
return {"data_quality": "invalid", "summaryDTO": {}}
|
||||
|
||||
summary_dto = activity_data.get('summaryDTO', {})
|
||||
if not isinstance(summary_dto, dict):
|
||||
summary_dto = {}
|
||||
|
||||
data_quality = "complete"
|
||||
warnings = []
|
||||
|
||||
critical_fields = ['duration', 'distance']
|
||||
for field in critical_fields:
|
||||
if summary_dto.get(field) is None:
|
||||
data_quality = "incomplete"
|
||||
warnings.append(f"Missing {field}")
|
||||
if field == 'duration':
|
||||
summary_dto['duration'] = 0
|
||||
elif field == 'distance':
|
||||
summary_dto['distance'] = 0
|
||||
|
||||
is_indoor = activity_data.get('is_indoor', False)
|
||||
if is_indoor:
|
||||
if summary_dto.get('averageSpeed') is None and summary_dto.get('averagePower') is not None:
|
||||
summary_dto['averageSpeed'] = None
|
||||
warnings.append("Indoor activity - speed estimated from power")
|
||||
|
||||
if 'elevationGain' in summary_dto:
|
||||
summary_dto['elevationGain'] = 0
|
||||
summary_dto['elevationLoss'] = 0
|
||||
warnings.append("Indoor activity - elevation set to 0")
|
||||
|
||||
expected_fields = [
|
||||
'averageSpeed', 'maxSpeed', 'averageHR', 'maxHR', 'averagePower',
|
||||
'maxPower', 'normalizedPower', 'trainingStressScore', 'elevationGain',
|
||||
'elevationLoss', 'distance', 'duration'
|
||||
]
|
||||
for field in expected_fields:
|
||||
if field not in summary_dto:
|
||||
summary_dto[field] = None
|
||||
if data_quality == "complete":
|
||||
data_quality = "incomplete"
|
||||
warnings.append(f"Missing {field}")
|
||||
|
||||
activity_data['summaryDTO'] = summary_dto
|
||||
activity_data['data_quality'] = data_quality
|
||||
activity_data['validation_warnings'] = warnings
|
||||
|
||||
if warnings:
|
||||
logger.debug(f"Activity validation warnings: {', '.join(warnings)}")
|
||||
|
||||
return activity_data
|
||||
|
||||
def get_workout_metrics(self, activity_id: str) -> Optional[WorkoutMetrics]:
|
||||
"""Get calculated metrics for a workout from DB"""
|
||||
session = self.Session()
|
||||
try:
|
||||
metrics_entry = session.query(Metrics).filter_by(workout_id=activity_id).first()
|
||||
if metrics_entry:
|
||||
return WorkoutMetrics(
|
||||
avg_speed_kmh=metrics_entry.avg_speed_kmh,
|
||||
avg_hr=metrics_entry.avg_hr,
|
||||
avg_power=metrics_entry.avg_power,
|
||||
estimated_ftp=metrics_entry.estimated_ftp,
|
||||
duration_minutes=metrics_entry.duration_minutes,
|
||||
distance_km=metrics_entry.distance_km,
|
||||
elevation_gain_m=metrics_entry.elevation_gain_m,
|
||||
training_stress_score=metrics_entry.training_stress_score,
|
||||
intensity_factor=metrics_entry.intensity_factor,
|
||||
workout_classification=metrics_entry.workout_classification
|
||||
)
|
||||
return None
|
||||
finally:
|
||||
session.close()
|
||||
|
||||
def get_training_load(self, days: int = 42) -> Optional[TrainingLoad]:
|
||||
"""Calculate current training load metrics from DB"""
|
||||
if not hasattr(self, 'metrics_calculator') or not self.metrics_calculator:
|
||||
self.metrics_calculator = CyclingMetricsCalculator()
|
||||
|
||||
session = self.Session()
|
||||
try:
|
||||
cutoff_date = datetime.now() - timedelta(days=days)
|
||||
recent_workouts = session.query(Workout).filter(Workout.date >= cutoff_date).all()
|
||||
|
||||
if not recent_workouts:
|
||||
return None
|
||||
|
||||
# Reconstruct activity data
|
||||
recent_activity_data = []
|
||||
for workout in recent_workouts:
|
||||
raw_data = json.loads(workout.raw_data) if workout.raw_data else {}
|
||||
recent_activity_data.append(raw_data)
|
||||
|
||||
training_load = self.metrics_calculator.calculate_training_load(recent_activity_data)
|
||||
|
||||
# Cache in DB if needed, but for now, return
|
||||
return training_load
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting training load: {e}")
|
||||
return None
|
||||
finally:
|
||||
session.close()
|
||||
|
||||
# Add other methods as needed, adapting from cache_manager
|
||||
# Note: Performance history will be handled via DB queries in future steps
|
||||
64
data/models.py
Normal file
64
data/models.py
Normal file
@@ -0,0 +1,64 @@
|
||||
"""
|
||||
SQLAlchemy models for persistent data storage
|
||||
"""
|
||||
|
||||
from sqlalchemy import Column, Integer, Float, String, DateTime, Text, Boolean, ForeignKey
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
from sqlalchemy.orm import relationship
|
||||
from datetime import datetime
|
||||
|
||||
Base = declarative_base()
|
||||
|
||||
class UserProfile(Base):
|
||||
"""User profile data"""
|
||||
__tablename__ = 'user_profiles'
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
ftp = Column(Float, nullable=True)
|
||||
max_hr = Column(Integer, nullable=True)
|
||||
# Add other profile fields as needed
|
||||
|
||||
# Relationship to workouts if needed
|
||||
workouts = relationship("Workout", back_populates="user_profile")
|
||||
|
||||
class Workout(Base):
|
||||
"""Workout data from Garmin"""
|
||||
__tablename__ = 'workouts'
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
activity_id = Column(String, unique=True, index=True)
|
||||
date = Column(DateTime, default=datetime.utcnow)
|
||||
data_quality = Column(String, default='complete')
|
||||
is_indoor = Column(Boolean, default=False)
|
||||
# Raw activity data as JSON for flexibility
|
||||
raw_data = Column(Text)
|
||||
validation_warnings = Column(Text, nullable=True)
|
||||
|
||||
# Relationship to metrics
|
||||
metrics = relationship("Metrics", back_populates="workout", uselist=True)
|
||||
|
||||
# Relationship to user profile (optional, for future)
|
||||
user_profile_id = Column(Integer, ForeignKey('user_profiles.id'), nullable=True)
|
||||
user_profile = relationship("UserProfile", back_populates="workouts")
|
||||
|
||||
class Metrics(Base):
|
||||
"""Calculated metrics for workouts"""
|
||||
__tablename__ = 'metrics'
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
workout_id = Column(String, ForeignKey('workouts.activity_id'), unique=True)
|
||||
|
||||
# Key metrics from Garmin and calculations
|
||||
avg_speed_kmh = Column(Float, nullable=True)
|
||||
avg_hr = Column(Float, nullable=True)
|
||||
avg_power = Column(Float, nullable=True)
|
||||
estimated_ftp = Column(Float, nullable=True)
|
||||
duration_minutes = Column(Float, nullable=True)
|
||||
distance_km = Column(Float, nullable=True)
|
||||
elevation_gain_m = Column(Float, nullable=True)
|
||||
training_stress_score = Column(Float, nullable=True)
|
||||
intensity_factor = Column(Float, nullable=True)
|
||||
workout_classification = Column(String, nullable=True)
|
||||
|
||||
# Relationship back to workout
|
||||
workout = relationship("Workout", back_populates="metrics")
|
||||
@@ -18,4 +18,6 @@ garth>=0.4.0
|
||||
|
||||
# Development and testing
|
||||
pytest>=7.0.0
|
||||
pytest-asyncio>=0.21.0
|
||||
pytest-asyncio>=0.21.0
|
||||
sqlalchemy>=2.0.0
|
||||
alembic>=1.13.0
|
||||
Reference in New Issue
Block a user