This commit is contained in:
2025-09-25 18:42:01 -07:00
parent 415126770b
commit 997028f0e1
17 changed files with 2389 additions and 37 deletions

View File

@@ -8,6 +8,8 @@ import logging
from pathlib import Path from pathlib import Path
import argparse import argparse
import sys import sys
from rich.console import Console
from rich.markdown import Markdown
from config import Config, load_config, create_sample_config from config import Config, load_config, create_sample_config
from core_app import CyclingAnalyzerApp from core_app import CyclingAnalyzerApp
@@ -24,7 +26,7 @@ class CLI:
# Setup configuration # Setup configuration
try: try:
config = self._setup_config() config = self._setup_config()
self.app = CyclingAnalyzerApp(config) self.app = CyclingAnalyzerApp(config, test_mode=args.test)
# Setup logging # Setup logging
logging.basicConfig(level=getattr(logging, config.log_level.upper())) logging.basicConfig(level=getattr(logging, config.log_level.upper()))
@@ -134,7 +136,8 @@ class CLI:
print("\n" + "="*50) print("\n" + "="*50)
print("WORKOUT ANALYSIS") print("WORKOUT ANALYSIS")
print("="*50) print("="*50)
print(result) console = Console()
console.print(Markdown(result))
async def _suggest_next_workout(self): async def _suggest_next_workout(self):
"""Suggest next workout""" """Suggest next workout"""
@@ -148,7 +151,8 @@ class CLI:
print("\n" + "="*50) print("\n" + "="*50)
print("NEXT WORKOUT SUGGESTION") print("NEXT WORKOUT SUGGESTION")
print("="*50) print("="*50)
print(result) console = Console()
console.print(Markdown(result))
async def _enhanced_analysis(self): async def _enhanced_analysis(self):
"""Enhanced analysis menu""" """Enhanced analysis menu"""
@@ -183,7 +187,8 @@ class CLI:
print(f"\n{'='*50}") print(f"\n{'='*50}")
print(f"ENHANCED {analysis_type.upper()} ANALYSIS") print(f"ENHANCED {analysis_type.upper()} ANALYSIS")
print("="*50) print("="*50)
print(result) console = Console()
console.print(Markdown(result))
async def _list_tools(self): async def _list_tools(self):
"""List available tools""" """List available tools"""
@@ -259,6 +264,7 @@ async def main():
"""CLI entry point""" """CLI entry point"""
parser = argparse.ArgumentParser(description="Cycling Workout Analyzer") parser = argparse.ArgumentParser(description="Cycling Workout Analyzer")
parser.add_argument('command', nargs='?', help="Command to execute (e.g., analyze_last)") parser.add_argument('command', nargs='?', help="Command to execute (e.g., analyze_last)")
parser.add_argument('--test', action='store_true', help="Test mode: print rendered prompt without calling LLM")
args = parser.parse_args() args = parser.parse_args()
cli = CLI() cli = CLI()

View File

@@ -19,12 +19,18 @@ logger = logging.getLogger(__name__)
class CyclingAnalyzerApp: class CyclingAnalyzerApp:
"""Main application class - orchestrates all components""" """Main application class - orchestrates all components"""
def __init__(self, config: Config): def __init__(self, config: Config, test_mode: bool = False):
self.config = config self.config = config
self.llm_client = LLMClient(config) self.test_mode = test_mode
self.mcp_client = MCPClient(config) self.llm_client = LLMClient(config)
self.cache_manager = CacheManager() self.mcp_client = MCPClient(config)
self.template_engine = TemplateEngine(config.templates_dir) self.cache_manager = CacheManager()
self.template_engine = TemplateEngine(config.templates_dir)
logger.info("DEBUG: Cache contents after init:")
for key in ["user_profile", "last_cycling_details"]:
data = self.cache_manager.get(key, {})
logger.info(f" {key}: keys={list(data.keys()) if data else 'EMPTY'}, length={len(data) if data else 0}")
async def initialize(self): async def initialize(self):
"""Initialize all components""" """Initialize all components"""
@@ -78,32 +84,104 @@ class CyclingAnalyzerApp:
"""Analyze workout using LLM with cached data""" """Analyze workout using LLM with cached data"""
template_name = f"workflows/{analysis_type}.txt" template_name = f"workflows/{analysis_type}.txt"
# Prepare context data # Prepare enhanced context with data quality assessment
context = { context = self._prepare_analysis_context(**kwargs)
"user_info": self.cache_manager.get("user_profile", {}),
"activity_summary": self.cache_manager.get("last_cycling_details", {}),
**kwargs
}
# Load and render template # Load and render template
logger.info(f"Rendering template {template_name} with context keys: {list(context.keys())}") logger.info(f"Rendering template {template_name} with context keys: {list(context.keys())}")
prompt = self.template_engine.render(template_name, **context) prompt = self.template_engine.render(template_name, **context)
if self.test_mode:
logger.info("Test mode: Printing rendered prompt instead of calling LLM")
print("\n" + "="*60)
print("RENDERED PROMPT FOR LLM:")
print("="*60)
print(prompt)
print("="*60 + "\n")
return f"TEST MODE: Prompt rendered (length: {len(prompt)} characters)"
# Call LLM # Call LLM
return await self.llm_client.generate(prompt) return await self.llm_client.generate(prompt)
def _prepare_analysis_context(self, **kwargs) -> Dict[str, Any]:
"""Prepare analysis context with data quality assessment"""
user_info = self.cache_manager.get("user_profile", {})
activity_summary = self.cache_manager.get("last_cycling_details", {})
logger.info(f"DEBUG: user_info keys: {list(user_info.keys()) if user_info else 'EMPTY'}, length: {len(user_info) if user_info else 0}")
logger.info(f"DEBUG: activity_summary keys: {list(activity_summary.keys()) if activity_summary else 'EMPTY'}, length: {len(activity_summary) if activity_summary else 0}")
# Assess data quality
data_quality = self._assess_data_quality(activity_summary)
logger.info(f"DEBUG: data_quality: {data_quality}")
context = {
"user_info": user_info,
"activity_summary": activity_summary,
"data_quality": data_quality,
"missing_metrics": data_quality.get("missing", []),
**kwargs
}
logger.debug(f"Prepared context with data quality: {data_quality.get('overall', 'N/A')}")
return context
def _assess_data_quality(self, activity_data: Dict[str, Any]) -> Dict[str, Any]:
"""Assess quality and completeness of activity data"""
summary_dto = activity_data.get('summaryDTO', {})
is_indoor = activity_data.get('is_indoor', False)
missing = []
overall = "complete"
# Key metrics for outdoor cycling
outdoor_metrics = ['averageSpeed', 'maxSpeed', 'elevationGain', 'elevationLoss']
# Key metrics for indoor cycling
indoor_metrics = ['averagePower', 'maxPower', 'averageHR', 'maxHR']
if is_indoor:
expected = indoor_metrics
note = "Indoor activity - focus on power and heart rate metrics"
else:
expected = outdoor_metrics
note = "Outdoor activity - full metrics expected"
for metric in expected:
if summary_dto.get(metric) is None:
missing.append(metric)
if missing:
overall = "incomplete"
note += f" | Missing: {', '.join(missing)}"
return {
"overall": overall,
"is_indoor": is_indoor,
"missing": missing,
"note": note,
"available_metrics": [k for k, v in summary_dto.items() if v is not None]
}
async def suggest_next_workout(self, **kwargs) -> str: async def suggest_next_workout(self, **kwargs) -> str:
"""Generate workout suggestion using MCP tools and LLM""" """Generate workout suggestion using MCP tools and LLM"""
# Use MCP-enabled agent for dynamic tool usage # Use MCP-enabled agent for dynamic tool usage
template_name = "workflows/suggest_next_workout.txt" template_name = "workflows/suggest_next_workout.txt"
context = { # Prepare enhanced context
"training_rules": kwargs.get("training_rules", ""), context = self._prepare_analysis_context(**kwargs)
**kwargs context["training_rules"] = kwargs.get("training_rules", "")
}
prompt = self.template_engine.render(template_name, **context) prompt = self.template_engine.render(template_name, **context)
if self.test_mode:
logger.info("Test mode: Printing rendered prompt instead of calling LLM with tools")
print("\n" + "="*60)
print("RENDERED PROMPT FOR LLM WITH TOOLS:")
print("="*60)
print(prompt)
print("="*60 + "\n")
return f"TEST MODE: Prompt rendered (length: {len(prompt)} characters)"
# Use MCP-enabled LLM client for this # Use MCP-enabled LLM client for this
return await self.llm_client.generate_with_tools(prompt, self.mcp_client) return await self.llm_client.generate_with_tools(prompt, self.mcp_client)
@@ -111,13 +189,24 @@ class CyclingAnalyzerApp:
"""Perform enhanced analysis with full MCP tool access""" """Perform enhanced analysis with full MCP tool access"""
template_name = "workflows/enhanced_analysis.txt" template_name = "workflows/enhanced_analysis.txt"
context = { # Prepare enhanced context
context = self._prepare_analysis_context(**kwargs)
context.update({
"analysis_type": analysis_type, "analysis_type": analysis_type,
"cached_data": self.cache_manager.get_all(), "cached_data": self.cache_manager.get_all(),
**kwargs })
}
prompt = self.template_engine.render(template_name, **context) prompt = self.template_engine.render(template_name, **context)
if self.test_mode:
logger.info("Test mode: Printing rendered prompt instead of calling LLM with tools")
print("\n" + "="*60)
print("RENDERED PROMPT FOR ENHANCED ANALYSIS:")
print("="*60)
print(prompt)
print("="*60 + "\n")
return f"TEST MODE: Prompt rendered (length: {len(prompt)} characters)"
return await self.llm_client.generate_with_tools(prompt, self.mcp_client) return await self.llm_client.generate_with_tools(prompt, self.mcp_client)
# Utility methods # Utility methods

View File

@@ -319,7 +319,11 @@ class CustomGarthMCP:
elif tool_name == "get_activity_details": elif tool_name == "get_activity_details":
activity_id = parameters["activity_id"] activity_id = parameters["activity_id"]
return garth.connectapi(f"/activity-service/activity/{activity_id}") raw_data = garth.connectapi(f"/activity-service/activity/{activity_id}")
# Normalize activity data for consistent field access
normalized_data = self._normalize_activity_data(raw_data)
return normalized_data
# Daily metrics - many don't work, so provide fallbacks # Daily metrics - many don't work, so provide fallbacks
elif tool_name == "daily_steps": elif tool_name == "daily_steps":
@@ -444,6 +448,49 @@ class CustomGarthMCP:
else: else:
raise ValueError(f"Unknown tool: {tool_name}") raise ValueError(f"Unknown tool: {tool_name}")
def _normalize_activity_data(self, raw_data: Dict[str, Any]) -> Dict[str, Any]:
"""Normalize activity data to ensure consistent field structure"""
if not isinstance(raw_data, dict):
logger.warning("Invalid activity data received")
return {}
# Ensure summaryDTO exists
summary_dto = raw_data.get('summaryDTO', {})
if not isinstance(summary_dto, dict):
summary_dto = {}
# Define expected fields with defaults
expected_fields = {
'averageSpeed': None,
'maxSpeed': None,
'averageHR': None,
'maxHR': None,
'averagePower': None,
'maxPower': None,
'normalizedPower': None,
'trainingStressScore': None,
'elevationGain': None,
'elevationLoss': None,
'distance': None,
'duration': None,
}
# Fill missing fields
for field, default in expected_fields.items():
if field not in summary_dto:
summary_dto[field] = default
logger.debug(f"Set default for missing field: {field}")
# Update raw_data with normalized summaryDTO
raw_data['summaryDTO'] = summary_dto
# Add activity type indicator for indoor detection
activity_type = raw_data.get('activityType', {}).get('typeKey', '').lower()
raw_data['is_indoor'] = 'indoor' in activity_type or 'trainer' in activity_type
logger.debug(f"Normalized activity data for ID {raw_data.get('activityId', 'unknown')}")
return raw_data
def print_tools(self): def print_tools(self):
"""Pretty print available tools""" """Pretty print available tools"""
print(f"\n{'='*60}") print(f"\n{'='*60}")

491
cycling_metrics.py Normal file
View File

@@ -0,0 +1,491 @@
#!/usr/bin/env python3
"""
Cycling Metrics Calculator - Deterministic metrics for cycling workouts
"""
import math
import logging
from typing import Dict, List, Any, Optional, Tuple
from dataclasses import dataclass
from datetime import datetime, timedelta
logger = logging.getLogger(__name__)
@dataclass
class WorkoutMetrics:
"""Standardized workout metrics"""
# Basic metrics
duration_minutes: float
distance_km: float
avg_speed_kmh: float
max_speed_kmh: float
elevation_gain_m: float
# Heart rate metrics (if available)
avg_hr: Optional[float] = None
max_hr: Optional[float] = None
hr_zones: Optional[Dict[str, float]] = None # Time in each zone
# Power metrics (if available)
avg_power: Optional[float] = None
max_power: Optional[float] = None
normalized_power: Optional[float] = None
power_zones: Optional[Dict[str, float]] = None
# Calculated metrics
intensity_factor: Optional[float] = None
training_stress_score: Optional[float] = None
estimated_ftp: Optional[float] = None
variability_index: Optional[float] = None
# Single speed specific
estimated_gear_ratio: Optional[float] = None
estimated_chainring: Optional[int] = None
estimated_cog: Optional[int] = None
gear_usage_distribution: Optional[Dict[str, float]] = None
@dataclass
class TrainingLoad:
"""Training load metrics over time"""
acute_training_load: float # 7-day average
chronic_training_load: float # 42-day average
training_stress_balance: float # CTL - ATL
fitness: float # Chronic Training Load
fatigue: float # Acute Training Load
form: float # Training Stress Balance
class CyclingMetricsCalculator:
"""Calculate deterministic cycling metrics"""
def __init__(self, user_ftp: Optional[float] = None, user_max_hr: Optional[int] = None):
self.user_ftp = user_ftp
self.user_max_hr = user_max_hr
# Single speed gear options
self.chainrings = [46, 38] # teeth
self.cogs = [14, 15, 16, 17, 18, 19, 20] # teeth
self.wheel_circumference_m = 2.096 # 700x25c wheel circumference in meters
def calculate_workout_metrics(self, activity_data: Dict[str, Any]) -> WorkoutMetrics:
"""Calculate comprehensive metrics for a workout"""
# Extract basic data
duration_seconds = activity_data.get('duration', 0)
duration_minutes = duration_seconds / 60.0
distance_m = activity_data.get('distance', 0)
distance_km = distance_m / 1000.0
avg_speed_ms = activity_data.get('averageSpeed', 0)
avg_speed_kmh = avg_speed_ms * 3.6
max_speed_ms = activity_data.get('maxSpeed', 0)
max_speed_kmh = max_speed_ms * 3.6
elevation_gain = activity_data.get('elevationGain', 0)
# Heart rate data
avg_hr = activity_data.get('averageHR')
max_hr = activity_data.get('maxHR')
# Power data
avg_power = activity_data.get('avgPower')
max_power = activity_data.get('maxPower')
# Calculate derived metrics
metrics = WorkoutMetrics(
duration_minutes=duration_minutes,
distance_km=distance_km,
avg_speed_kmh=avg_speed_kmh,
max_speed_kmh=max_speed_kmh,
elevation_gain_m=elevation_gain,
avg_hr=avg_hr,
max_hr=max_hr,
avg_power=avg_power,
max_power=max_power
)
# Calculate advanced metrics if power data available
if avg_power and self.user_ftp:
metrics.intensity_factor = avg_power / self.user_ftp
metrics.training_stress_score = self._calculate_tss(duration_minutes, avg_power, self.user_ftp)
if max_power and avg_power:
metrics.variability_index = max_power / avg_power
# Estimate FTP if no power meter but have HR data
if not self.user_ftp and avg_hr and max_hr:
metrics.estimated_ftp = self._estimate_ftp_from_hr(avg_hr, max_hr, duration_minutes, distance_km, elevation_gain)
# Calculate gear ratios for single speed
if avg_speed_kmh > 0:
gear_analysis = self._analyze_single_speed_gears(avg_speed_kmh, duration_minutes, elevation_gain)
metrics.estimated_gear_ratio = gear_analysis['estimated_ratio']
metrics.estimated_chainring = gear_analysis['estimated_chainring']
metrics.estimated_cog = gear_analysis['estimated_cog']
metrics.gear_usage_distribution = gear_analysis['gear_distribution']
return metrics
def _calculate_tss(self, duration_minutes: float, avg_power: float, ftp: float) -> float:
"""Calculate Training Stress Score"""
if_factor = avg_power / ftp
tss = (duration_minutes * avg_power * if_factor) / (ftp * 60) * 100
return round(tss, 1)
def _estimate_ftp_from_hr(self, avg_hr: float, max_hr: float, duration_minutes: float,
distance_km: float, elevation_gain: float) -> float:
"""Estimate FTP from heart rate and performance data"""
# Basic estimation using heart rate zones and performance
# This is a simplified model - real FTP estimation requires more sophisticated analysis
# Calculate relative intensity from HR
if self.user_max_hr:
hr_intensity = avg_hr / self.user_max_hr
else:
# Estimate max HR using age formula (less accurate)
estimated_max_hr = 220 - 30 # Assuming 30 years old, should be configurable
hr_intensity = avg_hr / estimated_max_hr
# Calculate speed-based power estimate
# This is very rough and assumes flat terrain
avg_speed_ms = (distance_km * 1000) / (duration_minutes * 60)
# Rough power estimation based on speed (watts = speed^3 * factor)
# Adjusted for elevation gain
elevation_factor = 1 + (elevation_gain / distance_km / 1000) * 0.1
estimated_power = (avg_speed_ms ** 2.5) * 3.5 * elevation_factor
# Estimate FTP as power at ~75% max HR
ftp_ratio = 0.75 / hr_intensity if hr_intensity > 0.75 else 1.0
estimated_ftp = estimated_power * ftp_ratio
return round(estimated_ftp, 0)
def _analyze_single_speed_gears(self, avg_speed_kmh: float, duration_minutes: float,
elevation_gain: float) -> Dict[str, Any]:
"""Analyze single speed gear usage"""
# Calculate average cadence assumption (80-90 RPM is typical)
assumed_cadence = 85 # RPM
# Calculate required gear ratio for average speed
speed_ms = avg_speed_kmh / 3.6
distance_per_pedal_revolution = speed_ms * 60 / assumed_cadence # meters per revolution
required_gear_ratio = distance_per_pedal_revolution / self.wheel_circumference_m
# Find best matching gear combinations
gear_options = []
for chainring in self.chainrings:
for cog in self.cogs:
ratio = chainring / cog
ratio_error = abs(ratio - required_gear_ratio) / required_gear_ratio
gear_options.append({
'chainring': chainring,
'cog': cog,
'ratio': ratio,
'error': ratio_error
})
# Sort by best match
gear_options.sort(key=lambda x: x['error'])
best_gear = gear_options[0]
# Estimate gear usage distribution based on terrain
gear_distribution = self._estimate_gear_distribution(elevation_gain, duration_minutes, gear_options)
return {
'estimated_ratio': best_gear['ratio'],
'estimated_chainring': best_gear['chainring'],
'estimated_cog': best_gear['cog'],
'gear_distribution': gear_distribution,
'all_options': gear_options[:3] # Top 3 matches
}
def _estimate_gear_distribution(self, elevation_gain: float, duration_minutes: float,
gear_options: List[Dict]) -> Dict[str, float]:
"""Estimate how much time was spent in each gear"""
# Simplified model based on elevation profile
climbing_factor = elevation_gain / (duration_minutes * 10) # rough climbing intensity
distribution = {}
for gear in gear_options[:4]: # Top 4 gears
gear_name = f"{gear['chainring']}x{gear['cog']}"
if climbing_factor > 2.0: # Lots of climbing
# Favor easier gears (lower ratios)
weight = 1.0 / gear['ratio']
elif climbing_factor < 0.5: # Mostly flat
# Favor harder gears (higher ratios)
weight = gear['ratio']
else:
# Mixed terrain
weight = 1.0
distribution[gear_name] = weight
# Normalize to percentages
total_weight = sum(distribution.values())
if total_weight > 0:
distribution = {k: round(v / total_weight * 100, 1) for k, v in distribution.items()}
return distribution
def calculate_training_load(self, workout_history: List[Dict[str, Any]],
current_date: datetime = None) -> TrainingLoad:
"""Calculate training load metrics"""
if not current_date:
current_date = datetime.now()
# Calculate TSS for each workout
tss_by_date = {}
for workout in workout_history:
workout_date = datetime.fromisoformat(workout.get('startTimeGmt', '').replace('Z', '+00:00'))
metrics = self.calculate_workout_metrics(workout)
tss = metrics.training_stress_score or self._estimate_tss_without_power(metrics)
tss_by_date[workout_date.date()] = tss
# Calculate Acute Training Load (7-day average)
atl_days = 7
atl_total = 0
atl_count = 0
for i in range(atl_days):
date = (current_date - timedelta(days=i)).date()
if date in tss_by_date:
atl_total += tss_by_date[date]
atl_count += 1
atl = atl_total / atl_days if atl_count > 0 else 0
# Calculate Chronic Training Load (42-day average)
ctl_days = 42
ctl_total = 0
ctl_count = 0
for i in range(ctl_days):
date = (current_date - timedelta(days=i)).date()
if date in tss_by_date:
ctl_total += tss_by_date[date]
ctl_count += 1
ctl = ctl_total / ctl_days if ctl_count > 0 else 0
# Training Stress Balance
tsb = ctl - atl
return TrainingLoad(
acute_training_load=round(atl, 1),
chronic_training_load=round(ctl, 1),
training_stress_balance=round(tsb, 1),
fitness=round(ctl, 1),
fatigue=round(atl, 1),
form=round(tsb, 1)
)
def _estimate_tss_without_power(self, metrics: WorkoutMetrics) -> float:
"""Estimate TSS without power data using HR and duration"""
if metrics.avg_hr and self.user_max_hr:
# Use TRIMP method as TSS proxy
hr_ratio = metrics.avg_hr / self.user_max_hr
duration_hours = metrics.duration_minutes / 60
# Simplified TSS estimation
estimated_tss = duration_hours * 60 * (hr_ratio ** 1.92)
return round(estimated_tss, 1)
else:
# Very rough estimation based on duration and intensity
duration_hours = metrics.duration_minutes / 60
speed_factor = min(metrics.avg_speed_kmh / 25, 2.0) # Cap at 2x for high speeds
elevation_factor = 1 + (metrics.elevation_gain_m / (metrics.distance_km * 1000) * 0.1)
estimated_tss = duration_hours * 40 * speed_factor * elevation_factor
return round(estimated_tss, 1)
def get_performance_trends(self, workout_history: List[Dict[str, Any]],
days: int = 30) -> Dict[str, Any]:
"""Calculate performance trends over time"""
cutoff_date = datetime.now() - timedelta(days=days)
recent_workouts = []
for workout in workout_history:
workout_date = datetime.fromisoformat(workout.get('startTimeGmt', '').replace('Z', '+00:00'))
if workout_date >= cutoff_date:
recent_workouts.append(workout)
if not recent_workouts:
return {"error": "No recent workouts found"}
# Calculate metrics for each workout
metrics_list = [self.calculate_workout_metrics(w) for w in recent_workouts]
# Calculate trends
avg_speed_trend = [m.avg_speed_kmh for m in metrics_list]
avg_hr_trend = [m.avg_hr for m in metrics_list if m.avg_hr]
avg_power_trend = [m.avg_power for m in metrics_list if m.avg_power]
return {
"period_days": days,
"total_workouts": len(recent_workouts),
"avg_speed": {
"current": round(sum(avg_speed_trend) / len(avg_speed_trend), 1),
"max": round(max(avg_speed_trend), 1),
"min": round(min(avg_speed_trend), 1),
"trend": "improving" if len(avg_speed_trend) > 1 and avg_speed_trend[-1] > avg_speed_trend[0] else "stable"
},
"avg_heart_rate": {
"current": round(sum(avg_hr_trend) / len(avg_hr_trend), 1) if avg_hr_trend else None,
"trend": "improving" if len(avg_hr_trend) > 1 and avg_hr_trend[-1] < avg_hr_trend[0] else "stable"
} if avg_hr_trend else None,
"power_data_available": len(avg_power_trend) > 0,
"estimated_fitness_change": self._calculate_fitness_change(metrics_list)
}
def _calculate_fitness_change(self, metrics_list: List[WorkoutMetrics]) -> str:
"""Calculate estimated fitness change"""
if len(metrics_list) < 3:
return "insufficient_data"
# Look at speed and HR efficiency
recent_metrics = metrics_list[-3:] # Last 3 workouts
older_metrics = metrics_list[:3] if len(metrics_list) >= 6 else metrics_list[:-3]
if not older_metrics:
return "insufficient_data"
recent_speed_avg = sum(m.avg_speed_kmh for m in recent_metrics) / len(recent_metrics)
older_speed_avg = sum(m.avg_speed_kmh for m in older_metrics) / len(older_metrics)
speed_improvement = (recent_speed_avg - older_speed_avg) / older_speed_avg * 100
if speed_improvement > 5:
return "improving"
elif speed_improvement < -5:
return "declining"
else:
return "stable"
# Deterministic analysis helper
def generate_standardized_assessment(metrics: WorkoutMetrics,
training_load: TrainingLoad = None) -> Dict[str, Any]:
"""Generate standardized, deterministic workout assessment"""
assessment = {
"workout_classification": classify_workout(metrics),
"intensity_rating": rate_intensity(metrics),
"efficiency_score": calculate_efficiency_score(metrics),
"recovery_recommendation": recommend_recovery(metrics, training_load),
"key_metrics_summary": summarize_key_metrics(metrics)
}
return assessment
def classify_workout(metrics: WorkoutMetrics) -> str:
"""Classify workout type based on metrics"""
duration = metrics.duration_minutes
avg_speed = metrics.avg_speed_kmh
elevation_gain = metrics.elevation_gain_m / metrics.distance_km if metrics.distance_km > 0 else 0
if duration < 30:
return "short_intensity"
elif duration > 180:
return "long_endurance"
elif elevation_gain > 10: # >10m elevation per km
return "climbing_focused"
elif avg_speed > 35:
return "high_speed"
elif avg_speed < 20:
return "recovery_easy"
else:
return "moderate_endurance"
def rate_intensity(metrics: WorkoutMetrics) -> int:
"""Rate workout intensity 1-10"""
factors = []
# Speed factor
if metrics.avg_speed_kmh > 40:
factors.append(9)
elif metrics.avg_speed_kmh > 35:
factors.append(7)
elif metrics.avg_speed_kmh > 25:
factors.append(5)
else:
factors.append(3)
# Duration factor
duration_intensity = min(metrics.duration_minutes / 60 * 2, 6)
factors.append(duration_intensity)
# Elevation factor
if metrics.distance_km > 0:
elevation_per_km = metrics.elevation_gain_m / metrics.distance_km
if elevation_per_km > 15:
factors.append(8)
elif elevation_per_km > 10:
factors.append(6)
elif elevation_per_km > 5:
factors.append(4)
else:
factors.append(2)
# HR factor (if available)
if metrics.avg_hr and metrics.max_hr:
hr_ratio = metrics.avg_hr / metrics.max_hr
if hr_ratio > 0.85:
factors.append(9)
elif hr_ratio > 0.75:
factors.append(7)
elif hr_ratio > 0.65:
factors.append(5)
else:
factors.append(3)
return min(int(sum(factors) / len(factors)), 10)
def calculate_efficiency_score(metrics: WorkoutMetrics) -> float:
"""Calculate efficiency score (higher = more efficient)"""
# Speed per heart rate beat (if HR available)
if metrics.avg_hr and metrics.avg_hr > 0:
speed_hr_efficiency = metrics.avg_speed_kmh / metrics.avg_hr * 100
return round(speed_hr_efficiency, 2)
else:
# Fallback: speed per elevation gain
if metrics.elevation_gain_m > 0:
speed_elevation_efficiency = metrics.avg_speed_kmh / (metrics.elevation_gain_m / 100)
return round(speed_elevation_efficiency, 2)
else:
return metrics.avg_speed_kmh # Just speed as efficiency
def recommend_recovery(metrics: WorkoutMetrics, training_load: TrainingLoad = None) -> str:
"""Recommend recovery based on workout intensity"""
intensity = rate_intensity(metrics)
if training_load and training_load.training_stress_balance < -10:
return "high_fatigue_rest_recommended"
elif intensity >= 8:
return "24_48_hours_easy"
elif intensity >= 6:
return "24_hours_easy"
elif intensity >= 4:
return "active_recovery_optional"
else:
return "ready_for_next_workout"
def summarize_key_metrics(metrics: WorkoutMetrics) -> Dict[str, str]:
"""Summarize key metrics in human readable format"""
summary = {
"duration": f"{metrics.duration_minutes:.0f} minutes",
"distance": f"{metrics.distance_km:.1f} km",
"avg_speed": f"{metrics.avg_speed_kmh:.1f} km/h",
"elevation_gain": f"{metrics.elevation_gain_m:.0f} m"
}
if metrics.avg_hr:
summary["avg_heart_rate"] = f"{metrics.avg_hr:.0f} bpm"
if metrics.avg_power:
summary["avg_power"] = f"{metrics.avg_power:.0f} W"
if metrics.estimated_ftp:
summary["estimated_ftp"] = f"{metrics.estimated_ftp:.0f} W"
if metrics.estimated_gear_ratio:
summary["estimated_gear"] = f"{metrics.estimated_chainring}x{metrics.estimated_cog} ({metrics.estimated_gear_ratio:.1f} ratio)"
return summary

595
enhanced_cache_manager.py Normal file
View File

@@ -0,0 +1,595 @@
#!/usr/bin/env python3
"""
Enhanced Cache Manager with Metrics Tracking
"""
import json
import logging
from pathlib import Path
from datetime import datetime, timedelta
from typing import Dict, List, Any, Optional
from dataclasses import dataclass, asdict
from cache_manager import CacheManager
from cycling_metrics import CyclingMetricsCalculator, WorkoutMetrics, TrainingLoad
logger = logging.getLogger(__name__)
@dataclass
class PerformanceTrend:
"""Track performance trends over time"""
metric_name: str
current_value: float
trend_7day: float # % change over 7 days
trend_30day: float # % change over 30 days
trend_direction: str # "improving", "stable", "declining"
confidence: float # 0-1, based on data points available
class MetricsTrackingCache(CacheManager):
"""Enhanced cache that calculates and tracks cycling metrics"""
def __init__(self, default_ttl: int = 300, metrics_file: str = "metrics_history.json"):
super().__init__(default_ttl)
self.metrics_calculator = None
self.metrics_file = Path(metrics_file)
self.performance_history = []
self.load_metrics_history()
def set_user_profile(self, ftp: Optional[float] = None, max_hr: Optional[int] = None):
"""Set user profile for accurate calculations"""
self.metrics_calculator = CyclingMetricsCalculator(user_ftp=ftp, user_max_hr=max_hr)
logger.info(f"Metrics calculator configured: FTP={ftp}, Max HR={max_hr}")
def cache_workout_with_metrics(self, activity_id: str, activity_data: Dict[str, Any]) -> WorkoutMetrics:
"""Cache workout data and calculate comprehensive metrics with validation"""
if not self.metrics_calculator:
# Initialize with defaults if not set
self.metrics_calculator = CyclingMetricsCalculator()
# Validate and normalize input data
validated_data = self._validate_activity_data(activity_data)
# Calculate metrics with safe handling
metrics = self.metrics_calculator.calculate_workout_metrics(validated_data)
# Cache the raw data and calculated metrics
self.set(f"activity_raw_{activity_id}", activity_data, ttl=3600)
self.set(f"activity_metrics_{activity_id}", asdict(metrics), ttl=3600)
# Add to performance history
workout_record = {
"activity_id": activity_id,
"date": validated_data.get('startTimeGmt', datetime.now().isoformat()),
"metrics": asdict(metrics),
"data_quality": validated_data.get('data_quality', 'complete')
}
self.performance_history.append(workout_record)
self.save_metrics_history()
# Update performance trends
self._update_performance_trends()
logger.info(f"Cached workout {activity_id} with calculated metrics (quality: {workout_record['data_quality']})")
return metrics
def _validate_activity_data(self, activity_data: Dict[str, Any]) -> Dict[str, Any]:
"""Validate and normalize activity data for safe metric calculation"""
if not isinstance(activity_data, dict):
logger.warning("Invalid activity data - creating minimal structure")
return {"data_quality": "invalid", "summaryDTO": {}}
summary_dto = activity_data.get('summaryDTO', {})
if not isinstance(summary_dto, dict):
summary_dto = {}
data_quality = "complete"
warnings = []
# Check critical fields
critical_fields = ['duration', 'distance']
for field in critical_fields:
if summary_dto.get(field) is None:
data_quality = "incomplete"
warnings.append(f"Missing {field}")
# Set reasonable defaults
if field == 'duration':
summary_dto['duration'] = 0
elif field == 'distance':
summary_dto['distance'] = 0
# Indoor activity adjustments
is_indoor = activity_data.get('is_indoor', False)
if is_indoor:
# For indoor, speed may be None - estimate from power if available
if summary_dto.get('averageSpeed') is None and summary_dto.get('averagePower') is not None:
# Rough estimate: speed = power / (weight * constant), but without weight, use placeholder
summary_dto['averageSpeed'] = None # Keep None, let calculator handle
warnings.append("Indoor activity - speed estimated from power")
# Elevation not applicable for indoor
if 'elevationGain' in summary_dto:
summary_dto['elevationGain'] = 0
summary_dto['elevationLoss'] = 0
warnings.append("Indoor activity - elevation set to 0")
# Ensure all expected fields exist (from custom_garth_mcp normalization)
expected_fields = [
'averageSpeed', 'maxSpeed', 'averageHR', 'maxHR', 'averagePower',
'maxPower', 'normalizedPower', 'trainingStressScore', 'elevationGain',
'elevationLoss', 'distance', 'duration'
]
for field in expected_fields:
if field not in summary_dto:
summary_dto[field] = None
if data_quality == "complete":
data_quality = "incomplete"
warnings.append(f"Missing {field}")
activity_data['summaryDTO'] = summary_dto
activity_data['data_quality'] = data_quality
activity_data['validation_warnings'] = warnings
if warnings:
logger.debug(f"Activity validation warnings: {', '.join(warnings)}")
return activity_data
def get_workout_metrics(self, activity_id: str) -> Optional[WorkoutMetrics]:
"""Get calculated metrics for a workout"""
metrics_data = self.get(f"activity_metrics_{activity_id}")
if metrics_data:
return WorkoutMetrics(**metrics_data)
return None
def get_training_load(self, days: int = 42) -> Optional[TrainingLoad]:
"""Calculate current training load metrics"""
if not self.metrics_calculator:
return None
# Get recent workout history
cutoff_date = datetime.now() - timedelta(days=days)
recent_workouts = []
for record in self.performance_history:
workout_date = datetime.fromisoformat(record['date'].replace('Z', '+00:00'))
if workout_date >= cutoff_date:
# Reconstruct activity data for training load calculation
activity_data = self.get(f"activity_raw_{record['activity_id']}")
if activity_data:
recent_workouts.append(activity_data)
if not recent_workouts:
return None
training_load = self.metrics_calculator.calculate_training_load(recent_workouts)
# Cache training load
self.set("current_training_load", asdict(training_load), ttl=3600)
return training_load
def get_performance_trends(self, days: int = 30) -> List[PerformanceTrend]:
"""Get performance trends for key metrics"""
trends = self.get(f"performance_trends_{days}d")
if trends:
return [PerformanceTrend(**trend) for trend in trends]
# Calculate if not cached
return self._calculate_performance_trends(days)
def _calculate_performance_trends(self, days: int) -> List[PerformanceTrend]:
"""Calculate performance trends over specified period"""
if not self.performance_history:
return []
cutoff_date = datetime.now() - timedelta(days=days)
recent_metrics = []
for record in self.performance_history:
workout_date = datetime.fromisoformat(record['date'].replace('Z', '+00:00'))
if workout_date >= cutoff_date:
recent_metrics.append({
'date': workout_date,
'metrics': WorkoutMetrics(**record['metrics'])
})
if len(recent_metrics) < 2:
return []
# Sort by date
recent_metrics.sort(key=lambda x: x['date'])
trends = []
# Calculate trends for key metrics
metrics_to_track = [
('avg_speed_kmh', 'Average Speed'),
('avg_hr', 'Average Heart Rate'),
('avg_power', 'Average Power'),
('estimated_ftp', 'Estimated FTP'),
('training_stress_score', 'Training Stress Score')
]
for metric_attr, metric_name in metrics_to_track:
trend = self._calculate_single_metric_trend(recent_metrics, metric_attr, metric_name, days)
if trend:
trends.append(trend)
# Cache trends
self.set(f"performance_trends_{days}d", [asdict(trend) for trend in trends], ttl=1800)
return trends
def _calculate_single_metric_trend(self, recent_metrics: List[Dict],
metric_attr: str, metric_name: str,
days: int) -> Optional[PerformanceTrend]:
"""Calculate trend for a single metric"""
# Extract values, filtering out None values
values_with_dates = []
for record in recent_metrics:
value = getattr(record['metrics'], metric_attr)
if value is not None:
values_with_dates.append((record['date'], value))
if len(values_with_dates) < 2:
return None
# Calculate current value (average of last 3 workouts)
recent_values = [v for _, v in values_with_dates[-3:]]
current_value = sum(recent_values) / len(recent_values)
# Calculate 7-day trend if we have enough data
week_ago = datetime.now() - timedelta(days=7)
week_values = [v for d, v in values_with_dates if d >= week_ago]
if len(week_values) >= 2:
week_old_avg = sum(week_values[:len(week_values)//2]) / (len(week_values)//2)
week_recent_avg = sum(week_values[len(week_values)//2:]) / (len(week_values) - len(week_values)//2)
trend_7day = ((week_recent_avg - week_old_avg) / week_old_avg * 100) if week_old_avg > 0 else 0
else:
trend_7day = 0
# Calculate 30-day trend
if len(values_with_dates) >= 4:
old_avg = sum(v for _, v in values_with_dates[:len(values_with_dates)//2]) / (len(values_with_dates)//2)
recent_avg = sum(v for _, v in values_with_dates[len(values_with_dates)//2:]) / (len(values_with_dates) - len(values_with_dates)//2)
trend_30day = ((recent_avg - old_avg) / old_avg * 100) if old_avg > 0 else 0
else:
trend_30day = 0
# Determine trend direction
primary_trend = trend_7day if abs(trend_7day) > abs(trend_30day) else trend_30day
if primary_trend > 2:
trend_direction = "improving"
elif primary_trend < -2:
trend_direction = "declining"
else:
trend_direction = "stable"
# Calculate confidence based on data points
confidence = min(len(values_with_dates) / 10, 1.0) # Max confidence at 10+ data points
return PerformanceTrend(
metric_name=metric_name,
current_value=round(current_value, 2),
trend_7day=round(trend_7day, 1),
trend_30day=round(trend_30day, 1),
trend_direction=trend_direction,
confidence=round(confidence, 2)
)
def _update_performance_trends(self):
"""Update cached performance trends after new workout"""
# Clear cached trends to force recalculation
keys_to_clear = [key for key in self._cache.keys() if key.startswith("performance_trends_")]
for key in keys_to_clear:
self.delete(key)
def get_deterministic_analysis_data(self, activity_id: str) -> Dict[str, Any]:
"""Get all deterministic data for analysis with validation"""
metrics = self.get_workout_metrics(activity_id)
training_load = self.get_training_load()
performance_trends = self.get_performance_trends()
if not metrics:
return {"error": "No metrics available for activity"}
# Generate standardized assessment with safe handling
try:
from cycling_metrics import generate_standardized_assessment
assessment = generate_standardized_assessment(metrics, training_load)
except Exception as e:
logger.warning(f"Could not generate standardized assessment: {e}")
assessment = {"error": "Assessment calculation failed", "workout_classification": "unknown"}
return {
"workout_metrics": asdict(metrics),
"training_load": asdict(training_load) if training_load else None,
"performance_trends": [asdict(trend) for trend in performance_trends if trend],
"standardized_assessment": assessment,
"analysis_timestamp": datetime.now().isoformat()
}
def get_ftp_estimates_history(self) -> List[Dict[str, Any]]:
"""Get historical FTP estimates for tracking progress"""
ftp_history = []
for record in self.performance_history:
metrics = WorkoutMetrics(**record['metrics'])
if metrics.estimated_ftp:
ftp_history.append({
"date": record['date'],
"activity_id": record['activity_id'],
"estimated_ftp": metrics.estimated_ftp,
"workout_type": record['metrics'].get('workout_classification', 'unknown')
})
# Sort by date and return recent estimates
ftp_history.sort(key=lambda x: x['date'], reverse=True)
return ftp_history[:20] # Last 20 estimates
def get_gear_usage_analysis(self) -> Dict[str, Any]:
"""Get single speed gear usage analysis"""
gear_data = []
for record in self.performance_history:
metrics = WorkoutMetrics(**record['metrics'])
if metrics.estimated_gear_ratio:
gear_data.append({
"date": record['date'],
"estimated_ratio": metrics.estimated_gear_ratio,
"chainring": metrics.estimated_chainring,
"cog": metrics.estimated_cog,
"avg_speed": metrics.avg_speed_kmh,
"elevation_gain": metrics.elevation_gain_m,
"terrain_type": self._classify_terrain(metrics)
})
if not gear_data:
return {"message": "No gear data available"}
# Analyze gear preferences by terrain
gear_preferences = {}
for data in gear_data:
terrain = data['terrain_type']
gear = f"{data['chainring']}x{data['cog']}"
if terrain not in gear_preferences:
gear_preferences[terrain] = {}
if gear not in gear_preferences[terrain]:
gear_preferences[terrain][gear] = 0
gear_preferences[terrain][gear] += 1
# Calculate most common gears
all_gears = {}
for data in gear_data:
gear = f"{data['chainring']}x{data['cog']}"
all_gears[gear] = all_gears.get(gear, 0) + 1
most_common_gear = max(all_gears.items(), key=lambda x: x[1])
return {
"total_workouts_analyzed": len(gear_data),
"most_common_gear": {
"gear": most_common_gear[0],
"usage_count": most_common_gear[1],
"usage_percentage": round(most_common_gear[1] / len(gear_data) * 100, 1)
},
"gear_by_terrain": gear_preferences,
"gear_recommendations": self._recommend_gears(gear_data)
}
def _classify_terrain(self, metrics: WorkoutMetrics) -> str:
"""Classify terrain type from workout metrics"""
if metrics.distance_km == 0:
return "unknown"
elevation_per_km = metrics.elevation_gain_m / metrics.distance_km
if elevation_per_km > 15:
return "steep_climbing"
elif elevation_per_km > 8:
return "moderate_climbing"
elif elevation_per_km > 3:
return "rolling_hills"
else:
return "flat_terrain"
def _recommend_gears(self, gear_data: List[Dict]) -> Dict[str, str]:
"""Recommend optimal gears for different conditions"""
if not gear_data:
return {}
# Group by terrain and find most efficient gears
terrain_efficiency = {}
for data in gear_data:
terrain = data['terrain_type']
gear = f"{data['chainring']}x{data['cog']}"
speed = data['avg_speed']
if terrain not in terrain_efficiency:
terrain_efficiency[terrain] = {}
if gear not in terrain_efficiency[terrain]:
terrain_efficiency[terrain][gear] = []
terrain_efficiency[terrain][gear].append(speed)
# Calculate average speeds for each gear/terrain combo
recommendations = {}
for terrain, gears in terrain_efficiency.items():
best_gear = None
best_avg_speed = 0
for gear, speeds in gears.items():
avg_speed = sum(speeds) / len(speeds)
if avg_speed > best_avg_speed:
best_avg_speed = avg_speed
best_gear = gear
if best_gear:
recommendations[terrain] = best_gear
return recommendations
def load_metrics_history(self):
"""Load performance history from file"""
if self.metrics_file.exists():
try:
with open(self.metrics_file, 'r') as f:
data = json.load(f)
self.performance_history = data.get('performance_history', [])
logger.info(f"Loaded {len(self.performance_history)} workout records")
except Exception as e:
logger.error(f"Error loading metrics history: {e}")
self.performance_history = []
else:
self.performance_history = []
def save_metrics_history(self):
"""Save performance history to file"""
try:
# Keep only last 200 workouts to prevent file from growing too large
self.performance_history = self.performance_history[-200:]
data = {
'performance_history': self.performance_history,
'last_updated': datetime.now().isoformat()
}
with open(self.metrics_file, 'w') as f:
json.dump(data, f, indent=2, default=str)
logger.debug(f"Saved {len(self.performance_history)} workout records")
except Exception as e:
logger.error(f"Error saving metrics history: {e}")
def get_workout_summary_for_llm(self, activity_id: str) -> Dict[str, Any]:
"""Get structured workout summary optimized for LLM analysis"""
deterministic_data = self.get_deterministic_analysis_data(activity_id)
if "error" in deterministic_data:
return deterministic_data
# Format data for LLM consumption
metrics = deterministic_data["workout_metrics"]
assessment = deterministic_data["standardized_assessment"]
training_load = deterministic_data.get("training_load")
summary = {
"workout_classification": assessment["workout_classification"],
"intensity_rating": f"{assessment['intensity_rating']}/10",
"key_metrics": {
"duration": f"{metrics['duration_minutes']:.0f} minutes",
"distance": f"{metrics['distance_km']:.1f} km",
"avg_speed": f"{metrics['avg_speed_kmh']:.1f} km/h",
"elevation_gain": f"{metrics['elevation_gain_m']:.0f} m"
},
"performance_indicators": {
"efficiency_score": assessment["efficiency_score"],
"estimated_ftp": metrics.get("estimated_ftp"),
"intensity_factor": metrics.get("intensity_factor")
},
"recovery_guidance": assessment["recovery_recommendation"],
"training_load_context": {
"fitness_level": training_load["fitness"] if training_load else None,
"fatigue_level": training_load["fatigue"] if training_load else None,
"form": training_load["form"] if training_load else None
} if training_load else None,
"single_speed_analysis": {
"estimated_gear": f"{metrics.get('estimated_chainring', 'N/A')}x{metrics.get('estimated_cog', 'N/A')}",
"gear_ratio": metrics.get("estimated_gear_ratio")
} if metrics.get("estimated_gear_ratio") else None
}
return summary
# Integration with existing core app
def enhance_core_app_with_metrics():
"""Example of how to integrate metrics tracking with the core app"""
integration_code = '''
# In core_app.py, replace the cache manager initialization:
from enhanced_cache_manager import MetricsTrackingCache
class CyclingAnalyzerApp:
def __init__(self, config: Config):
self.config = config
self.llm_client = LLMClient(config)
self.mcp_client = MCPClient(config)
# Use enhanced cache with metrics tracking
self.cache_manager = MetricsTrackingCache(
default_ttl=config.cache_ttl,
metrics_file="workout_metrics.json"
)
self.template_engine = TemplateEngine(config.templates_dir)
async def _preload_cache(self):
"""Enhanced preloading with metrics calculation"""
logger.info("Pre-loading cache with metrics calculation...")
# Set user profile for accurate calculations
profile = await self.mcp_client.call_tool("user_profile", {})
if profile:
# Extract FTP and max HR from profile if available
ftp = profile.get("ftp") or None
max_hr = profile.get("maxHR") or None
self.cache_manager.set_user_profile(ftp=ftp, max_hr=max_hr)
# Cache recent activities with metrics
activities = await self.mcp_client.call_tool("get_activities", {"limit": 10})
if activities:
self.cache_manager.set("recent_activities", activities)
# Find and analyze last cycling activity
cycling_activity = self._find_last_cycling_activity(activities)
if cycling_activity:
activity_details = await self.mcp_client.call_tool(
"get_activity_details",
{"activity_id": cycling_activity["activityId"]}
)
# Cache with metrics calculation
metrics = self.cache_manager.cache_workout_with_metrics(
cycling_activity["activityId"],
activity_details
)
logger.info(f"Calculated metrics for last workout: {metrics.workout_classification}")
async def analyze_workout_with_metrics(self, activity_id: str = None, **kwargs) -> str:
"""Enhanced analysis using calculated metrics"""
if not activity_id:
# Get last cached cycling activity
activities = self.cache_manager.get("recent_activities", [])
cycling_activity = self._find_last_cycling_activity(activities)
activity_id = cycling_activity["activityId"] if cycling_activity else None
if not activity_id:
return "No cycling activity found for analysis"
# Get deterministic analysis data
analysis_data = self.cache_manager.get_workout_summary_for_llm(activity_id)
if "error" in analysis_data:
return f"Error: {analysis_data['error']}"
# Use template with deterministic data
template_name = "workflows/analyze_workout_with_metrics.txt"
context = {
"workout_summary": analysis_data,
"performance_trends": self.cache_manager.get_performance_trends(30),
"training_rules": kwargs.get("training_rules", ""),
**kwargs
}
prompt = self.template_engine.render(template_name, **context)
return await self.llm_client.generate(prompt)
'''
return integration_code

579
enhanced_core_app.py Normal file
View File

@@ -0,0 +1,579 @@
#!/usr/bin/env python3
"""
Enhanced Core Application with Deterministic Metrics
"""
import asyncio
import logging
from pathlib import Path
from typing import Dict, Any, Optional
from config import Config, load_config
from llm_client import LLMClient
from mcp_client import MCPClient
from enhanced_cache_manager import MetricsTrackingCache
from template_engine import TemplateEngine
from cycling_metrics import CyclingMetricsCalculator, generate_standardized_assessment
logger = logging.getLogger(__name__)
class EnhancedCyclingAnalyzerApp:
"""Enhanced application with deterministic metrics and analysis"""
def __init__(self, config: Config):
self.config = config
self.llm_client = LLMClient(config)
self.mcp_client = MCPClient(config)
# Use enhanced cache with metrics tracking
self.cache_manager = MetricsTrackingCache(
default_ttl=config.cache_ttl,
metrics_file="workout_metrics.json"
)
self.template_engine = TemplateEngine(config.templates_dir)
# User settings for accurate calculations
self.user_ftp = None
self.user_max_hr = None
async def initialize(self):
"""Initialize all components with metrics support"""
logger.info("Initializing enhanced application components...")
await self.llm_client.initialize()
await self.mcp_client.initialize()
await self._setup_user_metrics()
await self._preload_cache_with_metrics()
logger.info("Enhanced application initialization complete")
async def cleanup(self):
"""Cleanup all components"""
# Save metrics before cleanup
self.cache_manager.save_metrics_history()
await self.mcp_client.cleanup()
await self.llm_client.cleanup()
async def _setup_user_metrics(self):
"""Setup user profile for accurate metric calculations"""
try:
# Try to get user profile from MCP
if await self.mcp_client.has_tool("user_profile"):
profile = await self.mcp_client.call_tool("user_profile", {})
# Extract FTP and max HR if available
self.user_ftp = profile.get("ftp") or profile.get("functionalThresholdPower")
self.user_max_hr = profile.get("maxHR") or profile.get("maxHeartRate")
# Also try user settings
if await self.mcp_client.has_tool("user_settings"):
settings = await self.mcp_client.call_tool("user_settings", {})
if not self.user_ftp:
self.user_ftp = settings.get("ftp")
if not self.user_max_hr:
self.user_max_hr = settings.get("maxHeartRate")
logger.info(f"User metrics configured: FTP={self.user_ftp}W, Max HR={self.user_max_hr}bpm")
# Set up cache manager with user profile
self.cache_manager.set_user_profile(ftp=self.user_ftp, max_hr=self.user_max_hr)
except Exception as e:
logger.warning(f"Could not setup user metrics: {e}")
# Initialize with defaults
self.cache_manager.set_user_profile()
async def _preload_cache_with_metrics(self):
"""Pre-load cache with calculated metrics"""
logger.info("Pre-loading cache with metrics calculation...")
try:
# Cache recent activities
if await self.mcp_client.has_tool("get_activities"):
activities = await self.mcp_client.call_tool("get_activities", {"limit": 15})
self.cache_manager.set("recent_activities", activities)
# Process cycling activities with metrics
cycling_count = 0
for activity in activities:
activity_type = activity.get("activityType", {})
if isinstance(activity_type, dict):
type_key = activity_type.get("typeKey", "").lower()
else:
type_key = str(activity_type).lower()
if "cycling" in type_key or "bike" in type_key:
activity_id = activity.get("activityId")
if activity_id and cycling_count < 5: # Limit to 5 recent cycling activities
try:
# Get detailed activity data
if await self.mcp_client.has_tool("get_activity_details"):
details = await self.mcp_client.call_tool(
"get_activity_details",
{"activity_id": str(activity_id)}
)
# Calculate and cache metrics
metrics = self.cache_manager.cache_workout_with_metrics(
str(activity_id), details
)
logger.info(f"Processed activity {activity_id}: {metrics.workout_classification}")
cycling_count += 1
except Exception as e:
logger.warning(f"Could not process activity {activity_id}: {e}")
logger.info(f"Processed {cycling_count} cycling activities with metrics")
except Exception as e:
logger.error(f"Error preloading cache with metrics: {e}")
# Enhanced analysis methods
async def analyze_workout_deterministic(self, activity_id: str = None, **kwargs) -> str:
"""Analyze workout using deterministic metrics"""
if not activity_id:
activity_id = self._get_last_cycling_activity_id()
if not activity_id:
return "No cycling activity found for analysis"
# Get deterministic analysis data
analysis_data = self.cache_manager.get_workout_summary_for_llm(activity_id)
if "error" in analysis_data:
return f"Error: {analysis_data['error']}"
# Get performance trends
performance_trends = self.cache_manager.get_performance_trends(30)
# Use enhanced template
template_name = "workflows/analyze_workout_with_metrics.txt"
context = {
"workout_summary": analysis_data,
"performance_trends": [
{
"metric_name": trend.metric_name,
"current_value": trend.current_value,
"trend_direction": trend.trend_direction,
"trend_7day": trend.trend_7day
}
for trend in performance_trends
],
"training_rules": kwargs.get("training_rules", ""),
**kwargs
}
prompt = self.template_engine.render(template_name, **context)
return await self.llm_client.generate(prompt)
async def estimate_ftp_without_power(self, activity_id: str = None, **kwargs) -> str:
"""Estimate FTP for workouts without power meter"""
if not activity_id:
activity_id = self._get_last_cycling_activity_id()
if not activity_id:
return "No cycling activity found for FTP estimation"
# Get workout metrics
metrics = self.cache_manager.get_workout_metrics(activity_id)
if not metrics:
return "No metrics available for FTP estimation"
# Get FTP estimation history
ftp_history = self.cache_manager.get_ftp_estimates_history()
# Calculate additional metrics for FTP estimation
hr_intensity = 0
if metrics.avg_hr and self.user_max_hr:
hr_intensity = metrics.avg_hr / self.user_max_hr
elif metrics.avg_hr:
# Estimate max HR if not provided
estimated_max_hr = 220 - 30 # Assume 30 years old, should be configurable
hr_intensity = metrics.avg_hr / estimated_max_hr
# Estimate power from speed
avg_speed_ms = metrics.avg_speed_kmh / 3.6
estimated_power_from_speed = (avg_speed_ms ** 2.5) * 3.5
# Adjust for elevation
elevation_per_km = metrics.elevation_gain_m / metrics.distance_km if metrics.distance_km > 0 else 0
elevation_factor = 1 + (elevation_per_km / 1000) * 0.1
elevation_adjusted_power = estimated_power_from_speed * elevation_factor
template_name = "workflows/estimate_ftp_no_power.txt"
context = {
"duration_minutes": metrics.duration_minutes,
"distance_km": metrics.distance_km,
"avg_speed_kmh": metrics.avg_speed_kmh,
"elevation_gain_m": metrics.elevation_gain_m,
"avg_hr": metrics.avg_hr,
"max_hr": metrics.max_hr,
"hr_intensity": hr_intensity,
"estimated_power_from_speed": round(estimated_power_from_speed, 0),
"elevation_adjusted_power": round(elevation_adjusted_power, 0),
"estimated_ftp": metrics.estimated_ftp,
"elevation_per_km": round(elevation_per_km, 1),
"elevation_factor": elevation_factor,
"ftp_history": ftp_history[:10], # Last 10 estimates
**kwargs
}
prompt = self.template_engine.render(template_name, **context)
return await self.llm_client.generate(prompt)
async def analyze_single_speed_gears(self, activity_id: str = None, **kwargs) -> str:
"""Analyze single speed gear selection and optimization"""
if not activity_id:
activity_id = self._get_last_cycling_activity_id()
if not activity_id:
return "No cycling activity found for gear analysis"
# Get workout metrics
metrics = self.cache_manager.get_workout_metrics(activity_id)
if not metrics:
return "No metrics available for gear analysis"
# Get gear usage analysis
gear_analysis = self.cache_manager.get_gear_usage_analysis()
# Calculate additional gear metrics
chainrings = [46, 38]
cogs = [14, 15, 16, 17, 18, 19, 20]
wheel_circumference = 2.096 # meters
available_gears = []
for chainring in chainrings:
for cog in cogs:
ratio = chainring / cog
gear_inches = ratio * 27 # 700c wheel ≈ 27" diameter
development = ratio * wheel_circumference
available_gears.append({
"chainring": chainring,
"cog": cog,
"ratio": round(ratio, 2),
"gear_inches": round(gear_inches, 1),
"development": round(development, 1)
})
# Estimate cadence
if metrics.avg_speed_kmh > 0 and metrics.estimated_gear_ratio:
speed_ms = metrics.avg_speed_kmh / 3.6
estimated_cadence = (speed_ms / (metrics.estimated_gear_ratio * wheel_circumference)) * 60
else:
estimated_cadence = 85 # Default assumption
# Classify terrain
elevation_per_km = metrics.elevation_gain_m / metrics.distance_km if metrics.distance_km > 0 else 0
if elevation_per_km > 15:
terrain_type = "steep_climbing"
elif elevation_per_km > 8:
terrain_type = "moderate_climbing"
elif elevation_per_km > 3:
terrain_type = "rolling_hills"
else:
terrain_type = "flat_terrain"
template_name = "workflows/single_speed_gear_analysis.txt"
context = {
"avg_speed_kmh": metrics.avg_speed_kmh,
"duration_minutes": metrics.duration_minutes,
"elevation_gain_m": metrics.elevation_gain_m,
"terrain_type": terrain_type,
"estimated_chainring": metrics.estimated_chainring,
"estimated_cog": metrics.estimated_cog,
"estimated_gear_ratio": metrics.estimated_gear_ratio,
"gear_inches": round((metrics.estimated_gear_ratio or 2.5) * 27, 1),
"development_meters": round((metrics.estimated_gear_ratio or 2.5) * wheel_circumference, 1),
"available_gears": available_gears,
"gear_usage_by_terrain": gear_analysis.get("gear_by_terrain", {}),
"best_flat_gear": "46x16", # Example, should be calculated
"best_climbing_gear": "38x20", # Example, should be calculated
"most_versatile_gear": gear_analysis.get("most_common_gear", {}).get("gear", "46x17"),
"efficiency_rating": 7, # Should be calculated based on speed/effort
"estimated_cadence": round(estimated_cadence, 0),
"elevation_per_km": round(elevation_per_km, 1),
**kwargs
}
prompt = self.template_engine.render(template_name, **context)
return await self.llm_client.generate(prompt)
async def get_training_load_analysis(self, **kwargs) -> str:
"""Analyze training load and recovery status"""
training_load = self.cache_manager.get_training_load()
if not training_load:
return "Insufficient workout history for training load analysis"
# Get performance trends
performance_trends = self.cache_manager.get_performance_trends(42) # 6 weeks
# Classify training load status
if training_load.training_stress_balance > 5:
form_status = "fresh_and_ready"
elif training_load.training_stress_balance > -5:
form_status = "maintaining_fitness"
elif training_load.training_stress_balance > -15:
form_status = "building_fitness"
else:
form_status = "high_fatigue_risk"
template_name = "workflows/training_load_analysis.txt"
context = {
"training_load": {
"fitness": training_load.fitness,
"fatigue": training_load.fatigue,
"form": training_load.form,
"acute_load": training_load.acute_training_load,
"chronic_load": training_load.chronic_training_load
},
"form_status": form_status,
"performance_trends": [
{
"metric": trend.metric_name,
"trend_direction": trend.trend_direction,
"trend_7day": trend.trend_7day,
"trend_30day": trend.trend_30day,
"confidence": trend.confidence
}
for trend in performance_trends
],
"training_rules": kwargs.get("training_rules", ""),
**kwargs
}
prompt = self.template_engine.render(template_name, **context)
return await self.llm_client.generate(prompt)
async def suggest_next_workout_data_driven(self, **kwargs) -> str:
"""Generate data-driven workout suggestions"""
# Get training load status
training_load = self.cache_manager.get_training_load()
performance_trends = self.cache_manager.get_performance_trends(14) # 2 weeks
# Get recent workout pattern
recent_activities = self.cache_manager.get("recent_activities", [])
recent_cycling = [act for act in recent_activities
if "cycling" in act.get("activityType", {}).get("typeKey", "").lower()]
# Analyze recent workout pattern
recent_intensities = []
recent_durations = []
recent_types = []
for activity in recent_cycling[:7]: # Last 7 cycling activities
activity_id = str(activity.get("activityId"))
metrics = self.cache_manager.get_workout_metrics(activity_id)
if metrics:
recent_intensities.append(self._rate_intensity(metrics))
recent_durations.append(metrics.duration_minutes)
recent_types.append(self._classify_workout(metrics))
# Calculate training pattern analysis
avg_intensity = sum(recent_intensities) / len(recent_intensities) if recent_intensities else 5
avg_duration = sum(recent_durations) / len(recent_durations) if recent_durations else 60
# Determine workout recommendation based on data
if training_load and training_load.form < -10:
recommendation_type = "recovery_focus"
elif avg_intensity > 7:
recommendation_type = "endurance_focus"
elif avg_intensity < 4:
recommendation_type = "intensity_focus"
else:
recommendation_type = "balanced_progression"
template_name = "workflows/suggest_next_workout_data_driven.txt"
context = {
"training_load": training_load,
"performance_trends": performance_trends,
"recent_workout_analysis": {
"avg_intensity": round(avg_intensity, 1),
"avg_duration": round(avg_duration, 0),
"workout_types": recent_types,
"pattern_analysis": self._analyze_workout_pattern(recent_types)
},
"recommendation_type": recommendation_type,
"user_ftp": self.user_ftp,
"training_rules": kwargs.get("training_rules", ""),
**kwargs
}
prompt = self.template_engine.render(template_name, **context)
return await self.llm_client.generate(prompt)
# Utility methods
def _get_last_cycling_activity_id(self) -> Optional[str]:
"""Get the ID of the most recent cycling activity"""
activities = self.cache_manager.get("recent_activities", [])
for activity in activities:
activity_type = activity.get("activityType", {})
if isinstance(activity_type, dict):
type_key = activity_type.get("typeKey", "").lower()
else:
type_key = str(activity_type).lower()
if "cycling" in type_key or "bike" in type_key:
return str(activity.get("activityId"))
return None
def _rate_intensity(self, metrics) -> int:
"""Rate workout intensity 1-10 based on metrics"""
factors = []
# Speed factor
if metrics.avg_speed_kmh > 40:
factors.append(9)
elif metrics.avg_speed_kmh > 35:
factors.append(7)
elif metrics.avg_speed_kmh > 25:
factors.append(5)
else:
factors.append(3)
# Duration factor
duration_intensity = min(metrics.duration_minutes / 60 * 2, 6)
factors.append(duration_intensity)
# Elevation factor
if metrics.distance_km > 0:
elevation_per_km = metrics.elevation_gain_m / metrics.distance_km
if elevation_per_km > 15:
factors.append(8)
elif elevation_per_km > 10:
factors.append(6)
elif elevation_per_km > 5:
factors.append(4)
else:
factors.append(2)
return min(int(sum(factors) / len(factors)), 10)
def _classify_workout(self, metrics) -> str:
"""Classify workout type"""
duration = metrics.duration_minutes
avg_speed = metrics.avg_speed_kmh
elevation_gain = metrics.elevation_gain_m / metrics.distance_km if metrics.distance_km > 0 else 0
if duration < 30:
return "short_intensity"
elif duration > 180:
return "long_endurance"
elif elevation_gain > 10:
return "climbing_focused"
elif avg_speed > 35:
return "high_speed"
elif avg_speed < 20:
return "recovery_easy"
else:
return "moderate_endurance"
def _analyze_workout_pattern(self, recent_types: list) -> str:
"""Analyze recent workout pattern"""
if not recent_types:
return "insufficient_data"
type_counts = {}
for workout_type in recent_types:
type_counts[workout_type] = type_counts.get(workout_type, 0) + 1
total_workouts = len(recent_types)
intensity_workouts = sum(1 for t in recent_types if "intensity" in t or "speed" in t)
endurance_workouts = sum(1 for t in recent_types if "endurance" in t)
recovery_workouts = sum(1 for t in recent_types if "recovery" in t)
intensity_ratio = intensity_workouts / total_workouts
endurance_ratio = endurance_workouts / total_workouts
if intensity_ratio > 0.5:
return "high_intensity_bias"
elif recovery_workouts > total_workouts * 0.4:
return "recovery_heavy"
elif endurance_ratio > 0.6:
return "endurance_focused"
else:
return "balanced_training"
# Compatibility methods for existing interface
async def analyze_workout(self, analysis_type: str = "deterministic", **kwargs) -> str:
"""Analyze workout with deterministic metrics (enhanced version)"""
return await self.analyze_workout_deterministic(**kwargs)
async def suggest_next_workout(self, **kwargs) -> str:
"""Generate data-driven workout suggestion"""
return await self.suggest_next_workout_data_driven(**kwargs)
async def enhanced_analysis(self, analysis_type: str, **kwargs) -> str:
"""Perform enhanced analysis based on type"""
if analysis_type == "ftp_estimation":
return await self.estimate_ftp_without_power(**kwargs)
elif analysis_type == "gear_analysis":
return await self.analyze_single_speed_gears(**kwargs)
elif analysis_type == "training_load":
return await self.get_training_load_analysis(**kwargs)
else:
# Fallback to deterministic analysis
return await self.analyze_workout_deterministic(**kwargs)
# Existing interface compatibility
async def list_available_tools(self) -> list:
return await self.mcp_client.list_tools()
def list_templates(self) -> list:
return self.template_engine.list_templates()
def get_cached_data(self, key: str = None) -> Any:
return self.cache_manager.get(key) if key else self.cache_manager.get_all()
# New deterministic data access methods
def get_performance_summary(self) -> Dict[str, Any]:
"""Get comprehensive performance summary"""
performance_trends = self.cache_manager.get_performance_trends(30)
training_load = self.cache_manager.get_training_load()
ftp_history = self.cache_manager.get_ftp_estimates_history()
gear_analysis = self.cache_manager.get_gear_usage_analysis()
return {
"performance_trends": [
{
"metric": trend.metric_name,
"current": trend.current_value,
"trend_7d": f"{trend.trend_7day:+.1f}%",
"trend_30d": f"{trend.trend_30day:+.1f}%",
"direction": trend.trend_direction,
"confidence": trend.confidence
}
for trend in performance_trends
],
"training_load": {
"fitness": training_load.fitness if training_load else None,
"fatigue": training_load.fatigue if training_load else None,
"form": training_load.form if training_load else None
},
"ftp_estimates": {
"latest": ftp_history[0]["estimated_ftp"] if ftp_history else None,
"trend": "improving" if len(ftp_history) > 1 and ftp_history[0]["estimated_ftp"] > ftp_history[1]["estimated_ftp"] else "stable",
"history_count": len(ftp_history)
},
"gear_usage": {
"most_common": gear_analysis.get("most_common_gear", {}),
"total_analyzed": gear_analysis.get("total_workouts_analyzed", 0)
}
}
def get_metrics_for_activity(self, activity_id: str) -> Optional[Dict[str, Any]]:
"""Get all calculated metrics for a specific activity"""
return self.cache_manager.get_deterministic_analysis_data(activity_id)

View File

@@ -2,6 +2,7 @@
pydantic-ai>=0.0.1 pydantic-ai>=0.0.1
pyyaml>=6.0 pyyaml>=6.0
aiohttp>=3.8.0 aiohttp>=3.8.0
rich>=13.0.0
# LLM dependencies # LLM dependencies
pydantic>=2.0.0 pydantic>=2.0.0

View File

@@ -76,26 +76,103 @@ class TemplateEngine:
raise raise
def render(self, template_name: str, **kwargs) -> str: def render(self, template_name: str, **kwargs) -> str:
"""Load and render template with variables""" """Load and render template with variables, supporting conditionals and nested access"""
content = self.load_template(template_name) content = self.load_template(template_name)
# Handle section includes (simple replacement) # Flatten context for safe nested access
content = self._process_includes(content, **kwargs) flat_context = self._flatten_context(kwargs)
# Handle section includes
content = self._process_includes(content, **flat_context)
# Process conditionals
content = self._process_conditionals(content, **flat_context)
try: try:
rendered = content.format(**kwargs) rendered = content.format(**flat_context)
logger.debug(f"Rendered template: {template_name}") logger.debug(f"Rendered template: {template_name}")
return rendered return rendered
except KeyError as e: except KeyError as e:
logger.error(f"Missing variable in template {template_name}: {e}") logger.error(f"Missing variable in template {template_name}: {e}")
logger.debug(f"Available variables: {list(kwargs.keys())}") logger.debug(f"Available variables: {list(flat_context.keys())}")
raise ValueError(f"Missing variable in template {template_name}: {e}") raise ValueError(f"Missing variable in template {template_name}: {e}")
except Exception as e: except Exception as e:
logger.error(f"Error rendering template {template_name}: {e}") logger.error(f"Error rendering template {template_name}: {e}")
raise raise
def _flatten_context(self, context: Dict[str, Any]) -> Dict[str, Any]:
"""Flatten nested context with safe access and None handling"""
flat = {}
def flatten_item(key_path: str, value: Any):
if value is None:
flat[key_path] = "N/A"
elif isinstance(value, dict):
for subkey, subvalue in value.items():
new_path = f"{key_path}_{subkey}" if key_path else subkey
flatten_item(new_path, subvalue)
elif isinstance(value, list):
for i, item in enumerate(value[:5]): # Limit list length
flatten_item(f"{key_path}_{i}", item)
else:
flat[key_path] = str(value)
for key, value in context.items():
flatten_item(key, value)
return flat
def _process_conditionals(self, content: str, **context) -> str:
"""Process {if condition}content{endif} blocks"""
import re
# Find all conditional blocks
conditional_pattern = re.compile(r'\{if\s+([^\}]+)\}(.*?)\{endif\}', re.DOTALL)
def evaluate_condition(condition: str, context: Dict[str, Any]) -> bool:
"""Simple condition evaluator supporting dot and bracket notation"""
# Handle dot and bracket notation by replacing . and [ ] with _ for flat context lookup
flat_condition = condition.replace('.', '_').replace('[', '_').replace(']', '_')
# Handle simple variable checks like 'var' or 'var == True'
if flat_condition in context:
value = context[flat_condition]
if value in ['True', 'true', True]:
return True
if value == 'N/A' or value is None or value == '':
return False
return bool(str(value).lower() in ['true', 'yes', '1'])
# Handle simple equality like 'var == value'
if ' == ' in condition:
var, val = [part.strip() for part in condition.split(' == ', 1)]
flat_var = var.replace('.', '_').replace('[', '_').replace(']', '_')
if flat_var in context:
return str(context[flat_var]).lower() == str(val).lower()
logger.warning(f"Unknown condition: {condition}")
return False
matches = list(conditional_pattern.finditer(content))
if not matches:
return content
# Process from end to start to avoid index shifts
for match in reversed(matches):
condition = match.group(1)
block_content = match.group(2)
if evaluate_condition(condition, context):
replacement = block_content.strip()
else:
replacement = ""
content = content[:match.start()] + replacement + content[match.end():]
return content
def _process_includes(self, content: str, **kwargs) -> str: def _process_includes(self, content: str, **kwargs) -> str:
"""Process section includes like {activity_summary_section}""" """Process section includes like {activity_summary_section}"""
import re import re

View File

@@ -0,0 +1,159 @@
# templates/workflows/training_load_analysis.txt
Analyze training load, fatigue, and recovery status using calculated metrics.
TRAINING LOAD ANALYSIS:
=======================
Current Training Load Status:
- Fitness (Chronic Training Load): {training_load[chronic_load]:.1f}
- Fatigue (Acute Training Load): {training_load[acute_load]:.1f}
- Form (Training Stress Balance): {training_load[form]:+.1f}
- Overall Status: {form_status}
Training Load Interpretation:
- Fitness Level: {% if training_load.fitness > 60 %}High{% elif training_load.fitness > 30 %}Moderate{% else %}Building{% endif %}
- Fatigue Level: {% if training_load.fatigue > 80 %}Very High{% elif training_load.fatigue > 50 %}High{% elif training_load.fatigue > 25 %}Moderate{% else %}Low{% endif %}
- Form Status: {% if training_load.form > 5 %}Fresh{% elif training_load.form > -5 %}Maintaining{% elif training_load.form > -15 %}Building{% else %}Overreached{% endif %}
PERFORMANCE TRENDS (6 weeks):
{% for trend in performance_trends %}
- {trend.metric}: {trend.trend_direction} ({trend.trend_7day:+.1f}% vs 1wk, {trend.trend_30day:+.1f}% vs 1mo)
Confidence: {trend.confidence:.0%}
{% endfor %}
MY TRAINING RULES:
{training_rules}
Based on this training load analysis, provide:
1. **Current Training Status Assessment**:
- Is the form score of {training_load[form]:+.1f} optimal for continued training?
- What does the fitness/fatigue balance indicate about training adaptation?
2. **Fatigue Management**:
- With current fatigue at {training_load[fatigue]:.1f}, what's the risk level?
- How many recovery days are recommended before next hard session?
3. **Performance Trend Analysis**:
- Which metrics show the strongest positive trends?
- Are there concerning negative trends that need addressing?
- How do the trends align with the current training load?
4. **Training Periodization Recommendations**:
- Should training intensity increase, decrease, or maintain?
- What's the optimal training focus for the next 7-14 days?
- When would be the best time for a recovery week?
5. **Risk Assessment**:
- Overtraining risk: {% if training_load.form < -20 %}HIGH{% elif training_load.form < -10 %}MODERATE{% else %}LOW{% endif %}
- Detraining risk: {% if training_load.form > 10 %}MODERATE{% else %}LOW{% endif %}
- Plateau risk: {% if performance_trends|length == 0 %}UNKNOWN{% else %}LOW{% endif %}
6. **Specific Action Plan**: Based on form score of {training_load[form]:+.1f}, recommend:
- Training intensity for next 3 workouts
- Target training stress score range
- Recovery activities and timing
Focus on actionable recommendations based on the calculated training load metrics.
---
# templates/workflows/suggest_next_workout_data_driven.txt
Data-driven workout suggestion based on training load and performance trends.
WORKOUT RECOMMENDATION ANALYSIS:
================================
Training Load Context:
{% if training_load %}
- Fitness: {training_load.fitness:.1f}
- Fatigue: {training_load.fatigue:.1f}
- Form: {training_load.form:+.1f}
{% else %}
- Training load data: Not available
{% endif %}
Recent Workout Pattern (last 7 cycling sessions):
- Average Intensity: {recent_workout_analysis[avg_intensity]}/10
- Average Duration: {recent_workout_analysis[avg_duration]:.0f} minutes
- Pattern Analysis: {recent_workout_analysis[pattern_analysis]}
- Workout Types: {', '.join(recent_workout_analysis[workout_types])}
Performance Trends:
{% for trend in performance_trends %}
- {trend.metric_name}: {trend.trend_direction} ({trend.trend_7day:+.1f}% change)
{% endfor %}
Recommendation Category: {recommendation_type}
MY TRAINING RULES:
{training_rules}
{% if user_ftp %}Current FTP: {user_ftp}W{% endif %}
Based on this comprehensive data analysis, provide:
1. **Next Workout Prescription**:
- Workout Type: [Specify: Recovery, Endurance, Tempo, Threshold, VO2max, Neuromuscular]
- Duration: [Specific minute range]
- Intensity: [Power zones if FTP available, or HR zones/RPE]
- Structure: [Intervals, steady state, fartlek, etc.]
2. **Scientific Rationale**:
- How does this workout address the "{recommendation_type}" need?
- Why is this optimal given form score of {training_load.form if training_load else 'unknown'}?
- How will it impact the current performance trends?
3. **Specific Workout Details**:
{% if user_ftp %}
Power-Based Workout (FTP: {user_ftp}W):
- Zone 1 (Active Recovery): < {(user_ftp * 0.55)|int}W
- Zone 2 (Endurance): {(user_ftp * 0.56)|int}-{(user_ftp * 0.75)|int}W
- Zone 3 (Tempo): {(user_ftp * 0.76)|int}-{(user_ftp * 0.90)|int}W
- Zone 4 (Threshold): {(user_ftp * 0.91)|int}-{(user_ftp * 1.05)|int}W
- Zone 5 (VO2max): {(user_ftp * 1.06)|int}-{(user_ftp * 1.20)|int}W
{% else %}
RPE/HR-Based Workout (no power meter):
- Easy: RPE 3-4, conversational pace
- Moderate: RPE 5-6, can speak in short phrases
- Hard: RPE 7-8, difficult to speak
- Very Hard: RPE 9-10, all-out effort
{% endif %}
4. **Single Speed Considerations** (if applicable):
- Recommended gear ratio for this workout
- Terrain considerations for optimal execution
- Cadence targets and adjustments
5. **Recovery Integration**:
- How much recovery time after this workout?
- What recovery activities are recommended?
- When can the next structured workout occur?
6. **Progressive Overload Strategy**:
- How does this workout build on recent training?
- What metrics should improve from this session?
- Next week's progression plan
7. **Alternative Options**: Provide 2-3 alternative workouts if conditions change:
- Weather-dependent alternatives
- Time-constrained alternatives
- Equipment-limited alternatives
Focus on specific, measurable workout prescriptions based on the data trends and training load analysis.
---
# templates/base/data_sections/workout_data.txt
DETAILED WORKOUT DATA:
Duration: {duration_minutes:.0f} minutes ({duration_minutes//60:.0f}h {duration_minutes%60:.0f}m)
Distance: {distance_km:.1f} km
Average Speed: {avg_speed_kmh:.1f} km/h (max: {max_speed_kmh:.1f} km/h)
Elevation Gain: {elevation_gain_m:.0f} m
{% if avg_hr %}Average Heart Rate: {avg_hr:.0f} bpm{% endif %}
{% if max_hr %}Maximum Heart Rate: {max_hr:.0f} bpm{% endif %}
{% if avg_power %}Average Power: {avg_power:.0f} W{% endif %}
{% if max_power %}Maximum Power: {max_power:.0f} W{% endif %}

View File

@@ -1,2 +1,43 @@
ACTIVITY SUMMARY: ACTIVITY SUMMARY:
Activity ID: {activity_summary[activityId]} Activity ID: {activity_summary_activityId}
Activity Name: {activity_summary_activityName}
Activity Type: {activity_summary_activityTypeDTO_typeKey} (Indoor: {activity_summary_is_indoor})
Start Time: {activity_summary_summaryDTO_startTimeLocal}
Duration: {activity_summary_summaryDTO_duration} seconds if available, otherwise N/A
Distance: {activity_summary_summaryDTO_distance} meters if available, otherwise N/A
SPEED DATA:
{if data_quality_is_indoor}
Note: Indoor cycling - speed data typically not available or estimated from power/cadence
{else}
Average Speed: {activity_summary_summaryDTO_averageSpeed} m/s
Max Speed: {activity_summary_summaryDTO_maxSpeed} m/s
{endif}
HEART RATE:
Average HR: {activity_summary_summaryDTO_averageHR} bpm if available
Max HR: {activity_summary_summaryDTO_maxHR} bpm if available
POWER (if available):
Average Power: {activity_summary_summaryDTO_averagePower} watts
Max Power: {activity_summary_summaryDTO_maxPower} watts
Normalized Power: {activity_summary_summaryDTO_normalizedPower} watts
Training Stress Score: {activity_summary_summaryDTO_trainingStressScore}
ELEVATION DATA:
{if data_quality_is_indoor}
Note: Indoor cycling - elevation data not applicable
{else}
Total Ascent: {activity_summary_summaryDTO_elevationGain} meters
Total Descent: {activity_summary_summaryDTO_elevationLoss} meters
{endif}
GPS DATA:
{if data_quality_is_indoor}
Note: Indoor cycling - GPS data not available
{else}
GPS tracking available - full route data present
{endif}
Note: Missing fields shown as descriptions above. For indoor activities, focus analysis on power, heart rate, and duration metrics.

View File

@@ -1,2 +1,2 @@
USER INFO: USER INFO:
User ID: {user_info[id]} User ID: {user_info_id}

View File

@@ -0,0 +1,146 @@
# templates/workflows/estimate_ftp_no_power.txt
Estimate FTP (Functional Threshold Power) for cycling workouts without a power meter.
WORKOUT DATA FOR FTP ESTIMATION:
================================
Basic Metrics:
- Duration: {duration_minutes} minutes
- Distance: {distance_km} km
- Average Speed: {avg_speed_kmh} km/h
- Elevation Gain: {elevation_gain_m} m
- Average Heart Rate: {avg_hr} bpm
- Max Heart Rate: {max_hr} bpm
Calculated Intensity Factors:
- Heart Rate Intensity: {hr_intensity:.1%} of max HR
- Speed-Power Estimation: {estimated_power_from_speed} watts
- Elevation-Adjusted Power: {elevation_adjusted_power} watts
- Estimated FTP: {estimated_ftp} watts
Historical FTP Estimates:
{% for estimate in ftp_history %}
- {estimate.date}: {estimate.estimated_ftp}W ({estimate.workout_type})
{% endfor %}
ANALYSIS FRAMEWORK:
1. **Speed-Based Power Estimation**:
- Uses relationship between speed and power requirements
- Accounts for air resistance (speed²) and rolling resistance
- Formula: Power ≈ (speed_m/s)^2.5 × 3.5 × elevation_factor
2. **Heart Rate Validation**:
- FTP typically occurs at 75-85% of max heart rate
- Current workout intensity: {hr_intensity:.1%}
- Scaling factor applied based on HR zones
3. **Terrain Adjustment**:
- Elevation gain per km: {elevation_per_km:.1f} m/km
- Power requirement increases ~10W per 1% gradient
- Applied elevation factor: {elevation_factor:.2f}
Please provide:
1. **FTP Estimate Validation**: Is the estimated {estimated_ftp}W reasonable for this workout intensity?
2. **Comparison with History**: How does this estimate compare to previous estimates? Are there trends?
3. **Confidence Assessment**: Rate confidence in this estimate (1-10) based on:
- Heart rate data quality
- Workout duration and intensity
- Terrain consistency
- Historical estimate consistency
4. **Improvement Recommendations**: To get more accurate FTP estimates without power:
- What types of workouts provide better estimates?
- How to improve heart rate data quality?
- When might a power meter become worthwhile?
5. **Training Zone Implications**: Based on estimated FTP of {estimated_ftp}W, what are the training zones?
---
# templates/workflows/single_speed_gear_analysis.txt
Analyze single speed bike gear selection and optimization.
SINGLE SPEED GEAR ANALYSIS:
===========================
Current Ride Analysis:
- Average Speed: {avg_speed_kmh} km/h
- Duration: {duration_minutes} minutes
- Elevation Gain: {elevation_gain_m} m
- Terrain Classification: {terrain_type}
Estimated Gear Usage:
- Primary Gear: {estimated_chainring}t × {estimated_cog}t
- Gear Ratio: {estimated_gear_ratio:.1f}
- Gear Inches: {gear_inches:.1f}"
- Development: {development_meters:.1f}m per pedal stroke
Available Gear Options:
{% for gear in available_gears %}
- {gear.chainring}t × {gear.cog}t = {gear.ratio:.1f} ratio ({gear.gear_inches:.1f}", {gear.development:.1f}m)
{% endfor %}
Gear Usage by Terrain Type:
{% for terrain, usage in gear_usage_by_terrain.items() %}
{terrain}:
{% for gear, percentage in usage.items() %}
- {gear}: {percentage}% of rides
{% endfor %}
{% endfor %}
Optimization Analysis:
- Best gear for flat terrain: {best_flat_gear}
- Best gear for climbing: {best_climbing_gear}
- Most versatile gear: {most_versatile_gear}
- Current efficiency rating: {efficiency_rating}/10
GEAR SELECTION SCIENCE:
1. **Gear Ratio Calculation**:
- Ratio = Chainring teeth ÷ Rear cog teeth
- Higher ratio = harder to pedal, higher top speed
- Lower ratio = easier to pedal, better for climbing
2. **Development (Rollout)**:
- Distance traveled per pedal stroke
- Formula: (Chainring ÷ Cog) × Wheel circumference
- Typical range: 3.5-7.0 meters
3. **Cadence Relationship**:
- Speed = Cadence (RPM) × Development ÷ 60
- Optimal cadence typically 80-100 RPM
- Current estimated cadence: {estimated_cadence} RPM
Please analyze:
1. **Current Gear Efficiency**: How well suited was the {estimated_chainring}×{estimated_cog} gear for this ride's terrain and intensity?
2. **Alternative Gear Performance**: Which of the available gear options would have been better/worse and why?
3. **Terrain-Specific Recommendations**:
- Best gear for steep climbs ({elevation_per_km:.1f}m/km elevation rate)
- Best gear for flat speed sections
- Most versatile single gear for mixed terrain
4. **Cadence Analysis**:
- Was the estimated {estimated_cadence} RPM cadence in optimal range?
- How would different gears affect cadence for this speed?
5. **Future Gear Selection**: Based on your riding patterns, recommend:
- Primary chainring/cog combination
- Backup gear for different conditions
- When gear changes might be worth the effort
6. **Training Implications**: How does single speed riding with this gear ratio affect:
- Power development
- Cadence skills
- Climbing technique
- Overall fitness adaptation
Focus on practical gear optimization based on the calculated ratios and usage patterns.

View File

@@ -0,0 +1,61 @@
# templates/workflows/analyze_workout_with_metrics.txt
Analyze this cycling workout using the calculated metrics and deterministic data provided.
DETERMINISTIC WORKOUT ANALYSIS:
================================
Workout Classification: {workout_summary[workout_classification]}
Intensity Rating: {workout_summary[intensity_rating]}
KEY METRICS:
- Duration: {workout_summary[key_metrics][duration]}
- Distance: {workout_summary[key_metrics][distance]}
- Average Speed: {workout_summary[key_metrics][avg_speed]}
- Elevation Gain: {workout_summary[key_metrics][elevation_gain]}
PERFORMANCE INDICATORS:
- Efficiency Score: {workout_summary[performance_indicators][efficiency_score]}
- Estimated FTP: {workout_summary[performance_indicators][estimated_ftp]} W
- Intensity Factor: {workout_summary[performance_indicators][intensity_factor]}
SINGLE SPEED ANALYSIS:
{% if workout_summary.single_speed_analysis %}
- Estimated Gear Used: {workout_summary[single_speed_analysis][estimated_gear]}
- Gear Ratio: {workout_summary[single_speed_analysis][gear_ratio]:.1f}
{% endif %}
TRAINING LOAD CONTEXT:
{% if workout_summary.training_load_context %}
- Current Fitness Level: {workout_summary[training_load_context][fitness_level]}
- Fatigue Level: {workout_summary[training_load_context][fatigue_level]}
- Training Form: {workout_summary[training_load_context][form]}
{% endif %}
PERFORMANCE TRENDS (30 days):
{% for trend in performance_trends %}
- {trend.metric_name}: {trend.current_value} ({trend.trend_direction}, {trend.trend_7day:+.1f}% vs 7d ago)
{% endfor %}
CALCULATED RECOVERY RECOMMENDATION: {workout_summary[recovery_guidance]}
MY TRAINING RULES:
{training_rules}
Based on this deterministic analysis, provide:
1. **Workout Assessment**: How well did this workout align with the calculated intensity rating and classification?
2. **Performance Analysis**: Comment on the efficiency score and how it compares to the performance trends shown above.
3. **FTP Analysis**: {% if workout_summary.performance_indicators.estimated_ftp %}Evaluate the estimated FTP of {workout_summary[performance_indicators][estimated_ftp]}W. Is this reasonable based on the workout intensity?{% else %}No power data available for FTP estimation.{% endif %}
4. **Single Speed Optimization**: {% if workout_summary.single_speed_analysis %}The analysis suggests you used a {workout_summary[single_speed_analysis][estimated_gear]} gear (ratio {workout_summary[single_speed_analysis][gear_ratio]:.1f}). Is this optimal for the terrain and intensity?{% else %}No single speed analysis available.{% endif %}
5. **Training Load Impact**: {% if workout_summary.training_load_context %}With current fitness at {workout_summary[training_load_context][fitness_level]} and form at {workout_summary[training_load_context][form]}, how does this workout fit your training progression?{% else %}Consider tracking training load for better progression insights.{% endif %}
6. **Specific Improvements**: Based on the deterministic metrics, what 2-3 specific areas could be optimized?
7. **Next Workout Suggestion**: Given the calculated recovery recommendation of "{workout_summary[recovery_guidance]}" and current trends, what should your next workout focus on?
Focus your analysis on the calculated metrics rather than general coaching advice.

View File

@@ -1,5 +1,8 @@
Analyze my most recent cycling workout using the provided data. Analyze my most recent cycling workout using the provided data.
DATA QUALITY ASSESSMENT:
{data_quality_note}
{activity_summary_section} {activity_summary_section}
{user_info_section} {user_info_section}
@@ -8,4 +11,17 @@ Analyze my most recent cycling workout using the provided data.
{assessment_points} {assessment_points}
Focus on the provided activity details for your analysis. IMPORTANT ANALYSIS GUIDANCE:
{if data_quality_is_indoor}
This is an indoor cycling activity. Speed, elevation, and GPS data may be missing or estimated. Focus analysis on:
- Power metrics (average power, max power, normalized power)
- Heart rate zones and distribution
- Duration and intensity relative to FTP
- Cadence consistency
- Training stress score (TSS)
Avoid speed-based analysis or comparisons with outdoor rides.
{else}
This is an outdoor cycling activity with full metrics available. Consider all data points including speed, elevation, and route characteristics.
{endif}
Focus on the provided activity details for your analysis, accounting for any missing data as noted above.

View File

@@ -1,5 +1,17 @@
Perform enhanced {analysis_type} analysis using all available data and tools. Perform enhanced {analysis_type} analysis using all available data and tools.
DATA QUALITY CONTEXT:
{data_quality.note}
Available cached data: {cached_data} Available cached data: {cached_data}
Use MCP tools as needed to gather additional data for comprehensive analysis. Use MCP tools as needed to gather additional data for comprehensive analysis. When using tools, be aware of potential data limitations in recent activities.
ANALYSIS GUIDANCE:
{if data_quality.is_indoor}
This is an indoor activity analysis. Focus on power-based metrics and heart rate zones. Speed and elevation data may be limited or estimated.
{else}
This is an outdoor activity analysis. Full metrics including speed, elevation, and route data are available.
{endif}
Account for missing or estimated data when performing the analysis.

View File

@@ -1,10 +1,22 @@
Please suggest my next cycling workout based on my recent training history. Please suggest my next cycling workout based on my recent training history.
RECENT DATA QUALITY:
{data_quality.note}
{training_rules_section} {training_rules_section}
Please provide: Please provide:
1. Analysis of my recent training pattern 1. Analysis of my recent training pattern, noting any data limitations from recent activities
2. Identified gaps or imbalances in my training 2. Identified gaps or imbalances in my training, considering data quality
3. Specific workout recommendation for my next session 3. Specific workout recommendation for my next session
4. Target zones (power, heart rate, duration) 4. Target zones (power, heart rate, duration) - adjust for indoor vs outdoor based on recent data quality
5. Rationale for the recommendation based on recent performance 5. Rationale for the recommendation based on recent performance and data availability
GUIDANCE FOR SUGGESTIONS:
{if data_quality.is_indoor}
Recent activities were indoor. Consider recommending outdoor rides if weather permits, or advanced indoor intervals if continuing indoor training.
{else}
Recent activities were outdoor. Balance with indoor options if needed for recovery or weather.
{endif}
Account for missing metrics in recent data when making recommendations.

20
test_template_render.py Normal file
View File

@@ -0,0 +1,20 @@
import asyncio
from config import load_config
from core_app import CyclingAnalyzerApp
async def main():
config = load_config()
app = CyclingAnalyzerApp(config, test_mode=True)
await app.initialize()
activity_data = app.cache_manager.get("last_cycling_details")
print("=== ACTIVITY DATA KEYS ===")
print(list(activity_data.keys()) if activity_data else "No activity data")
print("\nSample data:", dict(list(activity_data.items())[:10]) if activity_data else "No data")
result = await app.analyze_workout("analyze_last_workout")
print("=== TEST RESULT ===")
print(result)
await app.cleanup()
if __name__ == "__main__":
asyncio.run(main())