mirror of
https://github.com/sstent/AICycling_mcp.git
synced 2026-04-29 08:13:38 +00:00
restrcuted repo
This commit is contained in:
0
core/__init__.py
Normal file
0
core/__init__.py
Normal file
147
core/cache_manager.py
Normal file
147
core/cache_manager.py
Normal file
@@ -0,0 +1,147 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Cache Manager - Handles caching of MCP responses and other data
|
||||
"""
|
||||
|
||||
import time
|
||||
import logging
|
||||
from typing import Any, Dict, Optional, List
|
||||
from dataclasses import dataclass
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@dataclass
|
||||
class CacheEntry:
|
||||
"""Cache entry with TTL"""
|
||||
data: Any
|
||||
timestamp: float
|
||||
ttl: int # Time to live in seconds
|
||||
|
||||
class CacheManager:
|
||||
"""Manages caching of data with TTL support"""
|
||||
|
||||
def __init__(self, default_ttl: int = 300):
|
||||
self.default_ttl = default_ttl
|
||||
self._cache: Dict[str, CacheEntry] = {}
|
||||
|
||||
def set(self, key: str, data: Any, ttl: Optional[int] = None) -> None:
|
||||
"""Set cache entry with TTL"""
|
||||
ttl = ttl or self.default_ttl
|
||||
self._cache[key] = CacheEntry(
|
||||
data=data,
|
||||
timestamp=time.time(),
|
||||
ttl=ttl
|
||||
)
|
||||
logger.debug(f"Cached data for key '{key}' with TTL {ttl}s")
|
||||
|
||||
def get(self, key: str, default: Any = None) -> Any:
|
||||
"""Get cache entry, return default if expired or missing"""
|
||||
if key not in self._cache:
|
||||
logger.debug(f"Cache miss for key '{key}'")
|
||||
return default
|
||||
|
||||
entry = self._cache[key]
|
||||
|
||||
# Check if expired
|
||||
if time.time() - entry.timestamp > entry.ttl:
|
||||
logger.debug(f"Cache expired for key '{key}'")
|
||||
del self._cache[key]
|
||||
return default
|
||||
|
||||
logger.debug(f"Cache hit for key '{key}'")
|
||||
return entry.data
|
||||
|
||||
def has(self, key: str) -> bool:
|
||||
"""Check if key exists and is not expired"""
|
||||
return self.get(key) is not None
|
||||
|
||||
def delete(self, key: str) -> bool:
|
||||
"""Delete cache entry"""
|
||||
if key in self._cache:
|
||||
del self._cache[key]
|
||||
logger.debug(f"Deleted cache entry for key '{key}'")
|
||||
return True
|
||||
return False
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear all cache entries"""
|
||||
count = len(self._cache)
|
||||
self._cache.clear()
|
||||
logger.debug(f"Cleared {count} cache entries")
|
||||
|
||||
def cleanup_expired(self) -> int:
|
||||
"""Remove expired cache entries and return count removed"""
|
||||
current_time = time.time()
|
||||
expired_keys = [
|
||||
key for key, entry in self._cache.items()
|
||||
if current_time - entry.timestamp > entry.ttl
|
||||
]
|
||||
|
||||
for key in expired_keys:
|
||||
del self._cache[key]
|
||||
|
||||
if expired_keys:
|
||||
logger.debug(f"Cleaned up {len(expired_keys)} expired cache entries")
|
||||
|
||||
return len(expired_keys)
|
||||
|
||||
def get_all(self) -> Dict[str, Any]:
|
||||
"""Get all non-expired cache entries"""
|
||||
self.cleanup_expired()
|
||||
return {key: entry.data for key, entry in self._cache.items()}
|
||||
|
||||
def get_stats(self) -> Dict[str, Any]:
|
||||
"""Get cache statistics"""
|
||||
self.cleanup_expired()
|
||||
return {
|
||||
"total_entries": len(self._cache),
|
||||
"keys": list(self._cache.keys()),
|
||||
"memory_usage_estimate": sum(
|
||||
len(str(entry.data)) for entry in self._cache.values()
|
||||
)
|
||||
}
|
||||
|
||||
def set_multiple(self, data: Dict[str, Any], ttl: Optional[int] = None) -> None:
|
||||
"""Set multiple cache entries at once"""
|
||||
for key, value in data.items():
|
||||
self.set(key, value, ttl)
|
||||
|
||||
def get_multiple(self, keys: List[str]) -> Dict[str, Any]:
|
||||
"""Get multiple cache entries at once"""
|
||||
return {key: self.get(key) for key in keys}
|
||||
|
||||
# Specialized cache for common cycling data patterns
|
||||
class CyclingDataCache(CacheManager):
|
||||
"""Specialized cache for cycling data with helper methods"""
|
||||
|
||||
def cache_user_profile(self, profile_data: Dict[str, Any]) -> None:
|
||||
"""Cache user profile data"""
|
||||
self.set("user_profile", profile_data, ttl=3600) # 1 hour TTL
|
||||
|
||||
def cache_activities(self, activities: List[Dict[str, Any]]) -> None:
|
||||
"""Cache activities list"""
|
||||
self.set("recent_activities", activities, ttl=900) # 15 minutes TTL
|
||||
|
||||
def cache_activity_details(self, activity_id: str, details: Dict[str, Any]) -> None:
|
||||
"""Cache specific activity details"""
|
||||
self.set(f"activity_details_{activity_id}", details, ttl=3600)
|
||||
|
||||
def get_user_profile(self) -> Optional[Dict[str, Any]]:
|
||||
"""Get cached user profile"""
|
||||
return self.get("user_profile")
|
||||
|
||||
def get_recent_activities(self) -> List[Dict[str, Any]]:
|
||||
"""Get cached recent activities"""
|
||||
return self.get("recent_activities", [])
|
||||
|
||||
def get_activity_details(self, activity_id: str) -> Optional[Dict[str, Any]]:
|
||||
"""Get cached activity details"""
|
||||
return self.get(f"activity_details_{activity_id}")
|
||||
|
||||
def cache_workout_analysis(self, workout_id: str, analysis: str) -> None:
|
||||
"""Cache workout analysis results"""
|
||||
self.set(f"analysis_{workout_id}", analysis, ttl=86400) # 24 hours TTL
|
||||
|
||||
def get_workout_analysis(self, workout_id: str) -> Optional[str]:
|
||||
"""Get cached workout analysis"""
|
||||
return self.get(f"analysis_{workout_id}")
|
||||
252
core/core_app.py
Normal file
252
core/core_app.py
Normal file
@@ -0,0 +1,252 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Core Application - Clean skeleton with separated concerns
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any, Optional
|
||||
|
||||
from ..config import Config, load_config
|
||||
from ..llm.llm_client import LLMClient
|
||||
from ..mcp.mcp_client import MCPClient
|
||||
from .cache_manager import CacheManager
|
||||
from .template_engine import TemplateEngine
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class CyclingAnalyzerApp:
|
||||
"""Main application class - orchestrates all components"""
|
||||
|
||||
def __init__(self, config: Config, test_mode: bool = False):
|
||||
self.config = config
|
||||
self.test_mode = test_mode
|
||||
self.llm_client = LLMClient(config)
|
||||
self.mcp_client = MCPClient(config)
|
||||
self.cache_manager = CacheManager()
|
||||
self.template_engine = TemplateEngine(config.templates_dir)
|
||||
|
||||
logger.info("DEBUG: Cache contents after init:")
|
||||
for key in ["user_profile", "last_cycling_details"]:
|
||||
data = self.cache_manager.get(key, {})
|
||||
logger.info(f" {key}: keys={list(data.keys()) if data else 'EMPTY'}, length={len(data) if data else 0}")
|
||||
|
||||
async def initialize(self):
|
||||
"""Initialize all components"""
|
||||
logger.info("Initializing application components...")
|
||||
|
||||
await self.llm_client.initialize()
|
||||
await self.mcp_client.initialize()
|
||||
await self._preload_cache()
|
||||
|
||||
logger.info("Application initialization complete")
|
||||
|
||||
async def cleanup(self):
|
||||
"""Cleanup all components"""
|
||||
await self.mcp_client.cleanup()
|
||||
await self.llm_client.cleanup()
|
||||
|
||||
async def _preload_cache(self):
|
||||
"""Pre-load and cache common MCP responses"""
|
||||
logger.info("Pre-loading cache...")
|
||||
|
||||
# Cache user profile
|
||||
if await self.mcp_client.has_tool("user_profile"):
|
||||
profile = await self.mcp_client.call_tool("user_profile", {})
|
||||
self.cache_manager.set("user_profile", profile)
|
||||
|
||||
# Cache recent activities
|
||||
if await self.mcp_client.has_tool("get_activities"):
|
||||
activities = await self.mcp_client.call_tool("get_activities", {"limit": 10})
|
||||
self.cache_manager.set("recent_activities", activities)
|
||||
|
||||
# Find and cache last cycling activity details
|
||||
cycling_activity = self._find_last_cycling_activity(activities)
|
||||
if cycling_activity and await self.mcp_client.has_tool("get_activity_details"):
|
||||
details = await self.mcp_client.call_tool(
|
||||
"get_activity_details",
|
||||
{"activity_id": cycling_activity["activityId"]}
|
||||
)
|
||||
self.cache_manager.set("last_cycling_details", details)
|
||||
|
||||
def _find_last_cycling_activity(self, activities: list) -> Optional[Dict[str, Any]]:
|
||||
"""Find the most recent cycling activity from activities list"""
|
||||
cycling_activities = [
|
||||
act for act in activities
|
||||
if "cycling" in act.get("activityType", {}).get("typeKey", "").lower()
|
||||
]
|
||||
return max(cycling_activities, key=lambda x: x.get("startTimeGmt", 0)) if cycling_activities else None
|
||||
|
||||
# Core functionality methods
|
||||
|
||||
async def analyze_workout(self, analysis_type: str = "last_workout", **kwargs) -> str:
|
||||
"""Analyze workout using LLM with cached data"""
|
||||
template_name = f"workflows/{analysis_type}.txt"
|
||||
|
||||
# Prepare enhanced context with data quality assessment
|
||||
context = self._prepare_analysis_context(**kwargs)
|
||||
|
||||
# Load and render template
|
||||
logger.info(f"Rendering template {template_name} with context keys: {list(context.keys())}")
|
||||
prompt = self.template_engine.render(template_name, **context)
|
||||
|
||||
if self.test_mode:
|
||||
logger.info("Test mode: Printing rendered prompt instead of calling LLM")
|
||||
print("\n" + "="*60)
|
||||
print("RENDERED PROMPT FOR LLM:")
|
||||
print("="*60)
|
||||
print(prompt)
|
||||
print("="*60 + "\n")
|
||||
return f"TEST MODE: Prompt rendered (length: {len(prompt)} characters)"
|
||||
|
||||
# Call LLM
|
||||
return await self.llm_client.generate(prompt)
|
||||
|
||||
def _prepare_analysis_context(self, **kwargs) -> Dict[str, Any]:
|
||||
"""Prepare analysis context with data quality assessment"""
|
||||
user_info = self.cache_manager.get("user_profile", {})
|
||||
activity_summary = self.cache_manager.get("last_cycling_details", {})
|
||||
|
||||
logger.info(f"DEBUG: user_info keys: {list(user_info.keys()) if user_info else 'EMPTY'}, length: {len(user_info) if user_info else 0}")
|
||||
logger.info(f"DEBUG: activity_summary keys: {list(activity_summary.keys()) if activity_summary else 'EMPTY'}, length: {len(activity_summary) if activity_summary else 0}")
|
||||
|
||||
# Assess data quality
|
||||
data_quality = self._assess_data_quality(activity_summary)
|
||||
logger.info(f"DEBUG: data_quality: {data_quality}")
|
||||
|
||||
context = {
|
||||
"user_info": user_info,
|
||||
"activity_summary": activity_summary,
|
||||
"data_quality": data_quality,
|
||||
"missing_metrics": data_quality.get("missing", []),
|
||||
**kwargs
|
||||
}
|
||||
|
||||
logger.debug(f"Prepared context with data quality: {data_quality.get('overall', 'N/A')}")
|
||||
return context
|
||||
|
||||
def _assess_data_quality(self, activity_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Assess quality and completeness of activity data"""
|
||||
summary_dto = activity_data.get('summaryDTO', {})
|
||||
is_indoor = activity_data.get('is_indoor', False)
|
||||
|
||||
missing = []
|
||||
overall = "complete"
|
||||
|
||||
# Key metrics for outdoor cycling
|
||||
outdoor_metrics = ['averageSpeed', 'maxSpeed', 'elevationGain', 'elevationLoss']
|
||||
# Key metrics for indoor cycling
|
||||
indoor_metrics = ['averagePower', 'maxPower', 'averageHR', 'maxHR']
|
||||
|
||||
if is_indoor:
|
||||
expected = indoor_metrics
|
||||
note = "Indoor activity - focus on power and heart rate metrics"
|
||||
else:
|
||||
expected = outdoor_metrics
|
||||
note = "Outdoor activity - full metrics expected"
|
||||
|
||||
for metric in expected:
|
||||
if summary_dto.get(metric) is None:
|
||||
missing.append(metric)
|
||||
|
||||
if missing:
|
||||
overall = "incomplete"
|
||||
note += f" | Missing: {', '.join(missing)}"
|
||||
|
||||
return {
|
||||
"overall": overall,
|
||||
"is_indoor": is_indoor,
|
||||
"missing": missing,
|
||||
"note": note,
|
||||
"available_metrics": [k for k, v in summary_dto.items() if v is not None]
|
||||
}
|
||||
|
||||
async def suggest_next_workout(self, **kwargs) -> str:
|
||||
"""Generate workout suggestion using MCP tools and LLM"""
|
||||
# Use MCP-enabled agent for dynamic tool usage
|
||||
template_name = "workflows/suggest_next_workout.txt"
|
||||
|
||||
# Prepare enhanced context
|
||||
context = self._prepare_analysis_context(**kwargs)
|
||||
context["training_rules"] = kwargs.get("training_rules", "")
|
||||
|
||||
prompt = self.template_engine.render(template_name, **context)
|
||||
|
||||
if self.test_mode:
|
||||
logger.info("Test mode: Printing rendered prompt instead of calling LLM with tools")
|
||||
print("\n" + "="*60)
|
||||
print("RENDERED PROMPT FOR LLM WITH TOOLS:")
|
||||
print("="*60)
|
||||
print(prompt)
|
||||
print("="*60 + "\n")
|
||||
return f"TEST MODE: Prompt rendered (length: {len(prompt)} characters)"
|
||||
|
||||
# Use MCP-enabled LLM client for this
|
||||
return await self.llm_client.generate_with_tools(prompt, self.mcp_client)
|
||||
|
||||
async def enhanced_analysis(self, analysis_type: str, **kwargs) -> str:
|
||||
"""Perform enhanced analysis with full MCP tool access"""
|
||||
template_name = "workflows/enhanced_analysis.txt"
|
||||
|
||||
# Prepare enhanced context
|
||||
context = self._prepare_analysis_context(**kwargs)
|
||||
context.update({
|
||||
"analysis_type": analysis_type,
|
||||
"cached_data": self.cache_manager.get_all(),
|
||||
})
|
||||
|
||||
prompt = self.template_engine.render(template_name, **context)
|
||||
|
||||
if self.test_mode:
|
||||
logger.info("Test mode: Printing rendered prompt instead of calling LLM with tools")
|
||||
print("\n" + "="*60)
|
||||
print("RENDERED PROMPT FOR ENHANCED ANALYSIS:")
|
||||
print("="*60)
|
||||
print(prompt)
|
||||
print("="*60 + "\n")
|
||||
return f"TEST MODE: Prompt rendered (length: {len(prompt)} characters)"
|
||||
|
||||
return await self.llm_client.generate_with_tools(prompt, self.mcp_client)
|
||||
|
||||
# Utility methods
|
||||
|
||||
async def list_available_tools(self) -> list:
|
||||
"""Get list of available MCP tools"""
|
||||
return await self.mcp_client.list_tools()
|
||||
|
||||
def list_templates(self) -> list:
|
||||
"""Get list of available templates"""
|
||||
return self.template_engine.list_templates()
|
||||
|
||||
def get_cached_data(self, key: str = None) -> Any:
|
||||
"""Get cached data by key, or all if no key provided"""
|
||||
return self.cache_manager.get(key) if key else self.cache_manager.get_all()
|
||||
|
||||
async def main():
|
||||
"""Main entry point"""
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
try:
|
||||
config = load_config()
|
||||
app = CyclingAnalyzerApp(config)
|
||||
|
||||
await app.initialize()
|
||||
|
||||
# Example usage
|
||||
print("Available tools:", len(await app.list_available_tools()))
|
||||
print("Available templates:", len(app.list_templates()))
|
||||
|
||||
# Run analysis
|
||||
# analysis = await app.analyze_workout("analyze_last_workout",
|
||||
# training_rules="Sample rules")
|
||||
# print("Analysis:", analysis[:200] + "...")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Application error: {e}")
|
||||
finally:
|
||||
if 'app' in locals():
|
||||
await app.cleanup()
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
595
core/enhanced_cache_manager.py
Normal file
595
core/enhanced_cache_manager.py
Normal file
@@ -0,0 +1,595 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Enhanced Cache Manager with Metrics Tracking
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Dict, List, Any, Optional
|
||||
from dataclasses import dataclass, asdict
|
||||
|
||||
from cache_manager import CacheManager
|
||||
from cycling_metrics import CyclingMetricsCalculator, WorkoutMetrics, TrainingLoad
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@dataclass
|
||||
class PerformanceTrend:
|
||||
"""Track performance trends over time"""
|
||||
metric_name: str
|
||||
current_value: float
|
||||
trend_7day: float # % change over 7 days
|
||||
trend_30day: float # % change over 30 days
|
||||
trend_direction: str # "improving", "stable", "declining"
|
||||
confidence: float # 0-1, based on data points available
|
||||
|
||||
class MetricsTrackingCache(CacheManager):
|
||||
"""Enhanced cache that calculates and tracks cycling metrics"""
|
||||
|
||||
def __init__(self, default_ttl: int = 300, metrics_file: str = "metrics_history.json"):
|
||||
super().__init__(default_ttl)
|
||||
self.metrics_calculator = None
|
||||
self.metrics_file = Path(metrics_file)
|
||||
self.performance_history = []
|
||||
self.load_metrics_history()
|
||||
|
||||
def set_user_profile(self, ftp: Optional[float] = None, max_hr: Optional[int] = None):
|
||||
"""Set user profile for accurate calculations"""
|
||||
self.metrics_calculator = CyclingMetricsCalculator(user_ftp=ftp, user_max_hr=max_hr)
|
||||
logger.info(f"Metrics calculator configured: FTP={ftp}, Max HR={max_hr}")
|
||||
|
||||
def cache_workout_with_metrics(self, activity_id: str, activity_data: Dict[str, Any]) -> WorkoutMetrics:
|
||||
"""Cache workout data and calculate comprehensive metrics with validation"""
|
||||
if not self.metrics_calculator:
|
||||
# Initialize with defaults if not set
|
||||
self.metrics_calculator = CyclingMetricsCalculator()
|
||||
|
||||
# Validate and normalize input data
|
||||
validated_data = self._validate_activity_data(activity_data)
|
||||
|
||||
# Calculate metrics with safe handling
|
||||
metrics = self.metrics_calculator.calculate_workout_metrics(validated_data)
|
||||
|
||||
# Cache the raw data and calculated metrics
|
||||
self.set(f"activity_raw_{activity_id}", activity_data, ttl=3600)
|
||||
self.set(f"activity_metrics_{activity_id}", asdict(metrics), ttl=3600)
|
||||
|
||||
# Add to performance history
|
||||
workout_record = {
|
||||
"activity_id": activity_id,
|
||||
"date": validated_data.get('startTimeGmt', datetime.now().isoformat()),
|
||||
"metrics": asdict(metrics),
|
||||
"data_quality": validated_data.get('data_quality', 'complete')
|
||||
}
|
||||
|
||||
self.performance_history.append(workout_record)
|
||||
self.save_metrics_history()
|
||||
|
||||
# Update performance trends
|
||||
self._update_performance_trends()
|
||||
|
||||
logger.info(f"Cached workout {activity_id} with calculated metrics (quality: {workout_record['data_quality']})")
|
||||
return metrics
|
||||
|
||||
def _validate_activity_data(self, activity_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Validate and normalize activity data for safe metric calculation"""
|
||||
if not isinstance(activity_data, dict):
|
||||
logger.warning("Invalid activity data - creating minimal structure")
|
||||
return {"data_quality": "invalid", "summaryDTO": {}}
|
||||
|
||||
summary_dto = activity_data.get('summaryDTO', {})
|
||||
if not isinstance(summary_dto, dict):
|
||||
summary_dto = {}
|
||||
|
||||
data_quality = "complete"
|
||||
warnings = []
|
||||
|
||||
# Check critical fields
|
||||
critical_fields = ['duration', 'distance']
|
||||
for field in critical_fields:
|
||||
if summary_dto.get(field) is None:
|
||||
data_quality = "incomplete"
|
||||
warnings.append(f"Missing {field}")
|
||||
# Set reasonable defaults
|
||||
if field == 'duration':
|
||||
summary_dto['duration'] = 0
|
||||
elif field == 'distance':
|
||||
summary_dto['distance'] = 0
|
||||
|
||||
# Indoor activity adjustments
|
||||
is_indoor = activity_data.get('is_indoor', False)
|
||||
if is_indoor:
|
||||
# For indoor, speed may be None - estimate from power if available
|
||||
if summary_dto.get('averageSpeed') is None and summary_dto.get('averagePower') is not None:
|
||||
# Rough estimate: speed = power / (weight * constant), but without weight, use placeholder
|
||||
summary_dto['averageSpeed'] = None # Keep None, let calculator handle
|
||||
warnings.append("Indoor activity - speed estimated from power")
|
||||
|
||||
# Elevation not applicable for indoor
|
||||
if 'elevationGain' in summary_dto:
|
||||
summary_dto['elevationGain'] = 0
|
||||
summary_dto['elevationLoss'] = 0
|
||||
warnings.append("Indoor activity - elevation set to 0")
|
||||
|
||||
# Ensure all expected fields exist (from custom_garth_mcp normalization)
|
||||
expected_fields = [
|
||||
'averageSpeed', 'maxSpeed', 'averageHR', 'maxHR', 'averagePower',
|
||||
'maxPower', 'normalizedPower', 'trainingStressScore', 'elevationGain',
|
||||
'elevationLoss', 'distance', 'duration'
|
||||
]
|
||||
for field in expected_fields:
|
||||
if field not in summary_dto:
|
||||
summary_dto[field] = None
|
||||
if data_quality == "complete":
|
||||
data_quality = "incomplete"
|
||||
warnings.append(f"Missing {field}")
|
||||
|
||||
activity_data['summaryDTO'] = summary_dto
|
||||
activity_data['data_quality'] = data_quality
|
||||
activity_data['validation_warnings'] = warnings
|
||||
|
||||
if warnings:
|
||||
logger.debug(f"Activity validation warnings: {', '.join(warnings)}")
|
||||
|
||||
return activity_data
|
||||
|
||||
def get_workout_metrics(self, activity_id: str) -> Optional[WorkoutMetrics]:
|
||||
"""Get calculated metrics for a workout"""
|
||||
metrics_data = self.get(f"activity_metrics_{activity_id}")
|
||||
if metrics_data:
|
||||
return WorkoutMetrics(**metrics_data)
|
||||
return None
|
||||
|
||||
def get_training_load(self, days: int = 42) -> Optional[TrainingLoad]:
|
||||
"""Calculate current training load metrics"""
|
||||
if not self.metrics_calculator:
|
||||
return None
|
||||
|
||||
# Get recent workout history
|
||||
cutoff_date = datetime.now() - timedelta(days=days)
|
||||
recent_workouts = []
|
||||
|
||||
for record in self.performance_history:
|
||||
workout_date = datetime.fromisoformat(record['date'].replace('Z', '+00:00'))
|
||||
if workout_date >= cutoff_date:
|
||||
# Reconstruct activity data for training load calculation
|
||||
activity_data = self.get(f"activity_raw_{record['activity_id']}")
|
||||
if activity_data:
|
||||
recent_workouts.append(activity_data)
|
||||
|
||||
if not recent_workouts:
|
||||
return None
|
||||
|
||||
training_load = self.metrics_calculator.calculate_training_load(recent_workouts)
|
||||
|
||||
# Cache training load
|
||||
self.set("current_training_load", asdict(training_load), ttl=3600)
|
||||
|
||||
return training_load
|
||||
|
||||
def get_performance_trends(self, days: int = 30) -> List[PerformanceTrend]:
|
||||
"""Get performance trends for key metrics"""
|
||||
trends = self.get(f"performance_trends_{days}d")
|
||||
if trends:
|
||||
return [PerformanceTrend(**trend) for trend in trends]
|
||||
|
||||
# Calculate if not cached
|
||||
return self._calculate_performance_trends(days)
|
||||
|
||||
def _calculate_performance_trends(self, days: int) -> List[PerformanceTrend]:
|
||||
"""Calculate performance trends over specified period"""
|
||||
if not self.performance_history:
|
||||
return []
|
||||
|
||||
cutoff_date = datetime.now() - timedelta(days=days)
|
||||
recent_metrics = []
|
||||
|
||||
for record in self.performance_history:
|
||||
workout_date = datetime.fromisoformat(record['date'].replace('Z', '+00:00'))
|
||||
if workout_date >= cutoff_date:
|
||||
recent_metrics.append({
|
||||
'date': workout_date,
|
||||
'metrics': WorkoutMetrics(**record['metrics'])
|
||||
})
|
||||
|
||||
if len(recent_metrics) < 2:
|
||||
return []
|
||||
|
||||
# Sort by date
|
||||
recent_metrics.sort(key=lambda x: x['date'])
|
||||
|
||||
trends = []
|
||||
|
||||
# Calculate trends for key metrics
|
||||
metrics_to_track = [
|
||||
('avg_speed_kmh', 'Average Speed'),
|
||||
('avg_hr', 'Average Heart Rate'),
|
||||
('avg_power', 'Average Power'),
|
||||
('estimated_ftp', 'Estimated FTP'),
|
||||
('training_stress_score', 'Training Stress Score')
|
||||
]
|
||||
|
||||
for metric_attr, metric_name in metrics_to_track:
|
||||
trend = self._calculate_single_metric_trend(recent_metrics, metric_attr, metric_name, days)
|
||||
if trend:
|
||||
trends.append(trend)
|
||||
|
||||
# Cache trends
|
||||
self.set(f"performance_trends_{days}d", [asdict(trend) for trend in trends], ttl=1800)
|
||||
|
||||
return trends
|
||||
|
||||
def _calculate_single_metric_trend(self, recent_metrics: List[Dict],
|
||||
metric_attr: str, metric_name: str,
|
||||
days: int) -> Optional[PerformanceTrend]:
|
||||
"""Calculate trend for a single metric"""
|
||||
# Extract values, filtering out None values
|
||||
values_with_dates = []
|
||||
for record in recent_metrics:
|
||||
value = getattr(record['metrics'], metric_attr)
|
||||
if value is not None:
|
||||
values_with_dates.append((record['date'], value))
|
||||
|
||||
if len(values_with_dates) < 2:
|
||||
return None
|
||||
|
||||
# Calculate current value (average of last 3 workouts)
|
||||
recent_values = [v for _, v in values_with_dates[-3:]]
|
||||
current_value = sum(recent_values) / len(recent_values)
|
||||
|
||||
# Calculate 7-day trend if we have enough data
|
||||
week_ago = datetime.now() - timedelta(days=7)
|
||||
week_values = [v for d, v in values_with_dates if d >= week_ago]
|
||||
|
||||
if len(week_values) >= 2:
|
||||
week_old_avg = sum(week_values[:len(week_values)//2]) / (len(week_values)//2)
|
||||
week_recent_avg = sum(week_values[len(week_values)//2:]) / (len(week_values) - len(week_values)//2)
|
||||
trend_7day = ((week_recent_avg - week_old_avg) / week_old_avg * 100) if week_old_avg > 0 else 0
|
||||
else:
|
||||
trend_7day = 0
|
||||
|
||||
# Calculate 30-day trend
|
||||
if len(values_with_dates) >= 4:
|
||||
old_avg = sum(v for _, v in values_with_dates[:len(values_with_dates)//2]) / (len(values_with_dates)//2)
|
||||
recent_avg = sum(v for _, v in values_with_dates[len(values_with_dates)//2:]) / (len(values_with_dates) - len(values_with_dates)//2)
|
||||
trend_30day = ((recent_avg - old_avg) / old_avg * 100) if old_avg > 0 else 0
|
||||
else:
|
||||
trend_30day = 0
|
||||
|
||||
# Determine trend direction
|
||||
primary_trend = trend_7day if abs(trend_7day) > abs(trend_30day) else trend_30day
|
||||
if primary_trend > 2:
|
||||
trend_direction = "improving"
|
||||
elif primary_trend < -2:
|
||||
trend_direction = "declining"
|
||||
else:
|
||||
trend_direction = "stable"
|
||||
|
||||
# Calculate confidence based on data points
|
||||
confidence = min(len(values_with_dates) / 10, 1.0) # Max confidence at 10+ data points
|
||||
|
||||
return PerformanceTrend(
|
||||
metric_name=metric_name,
|
||||
current_value=round(current_value, 2),
|
||||
trend_7day=round(trend_7day, 1),
|
||||
trend_30day=round(trend_30day, 1),
|
||||
trend_direction=trend_direction,
|
||||
confidence=round(confidence, 2)
|
||||
)
|
||||
|
||||
def _update_performance_trends(self):
|
||||
"""Update cached performance trends after new workout"""
|
||||
# Clear cached trends to force recalculation
|
||||
keys_to_clear = [key for key in self._cache.keys() if key.startswith("performance_trends_")]
|
||||
for key in keys_to_clear:
|
||||
self.delete(key)
|
||||
|
||||
def get_deterministic_analysis_data(self, activity_id: str) -> Dict[str, Any]:
|
||||
"""Get all deterministic data for analysis with validation"""
|
||||
metrics = self.get_workout_metrics(activity_id)
|
||||
training_load = self.get_training_load()
|
||||
performance_trends = self.get_performance_trends()
|
||||
|
||||
if not metrics:
|
||||
return {"error": "No metrics available for activity"}
|
||||
|
||||
# Generate standardized assessment with safe handling
|
||||
try:
|
||||
from cycling_metrics import generate_standardized_assessment
|
||||
assessment = generate_standardized_assessment(metrics, training_load)
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not generate standardized assessment: {e}")
|
||||
assessment = {"error": "Assessment calculation failed", "workout_classification": "unknown"}
|
||||
|
||||
return {
|
||||
"workout_metrics": asdict(metrics),
|
||||
"training_load": asdict(training_load) if training_load else None,
|
||||
"performance_trends": [asdict(trend) for trend in performance_trends if trend],
|
||||
"standardized_assessment": assessment,
|
||||
"analysis_timestamp": datetime.now().isoformat()
|
||||
}
|
||||
|
||||
def get_ftp_estimates_history(self) -> List[Dict[str, Any]]:
|
||||
"""Get historical FTP estimates for tracking progress"""
|
||||
ftp_history = []
|
||||
|
||||
for record in self.performance_history:
|
||||
metrics = WorkoutMetrics(**record['metrics'])
|
||||
if metrics.estimated_ftp:
|
||||
ftp_history.append({
|
||||
"date": record['date'],
|
||||
"activity_id": record['activity_id'],
|
||||
"estimated_ftp": metrics.estimated_ftp,
|
||||
"workout_type": record['metrics'].get('workout_classification', 'unknown')
|
||||
})
|
||||
|
||||
# Sort by date and return recent estimates
|
||||
ftp_history.sort(key=lambda x: x['date'], reverse=True)
|
||||
return ftp_history[:20] # Last 20 estimates
|
||||
|
||||
def get_gear_usage_analysis(self) -> Dict[str, Any]:
|
||||
"""Get single speed gear usage analysis"""
|
||||
gear_data = []
|
||||
|
||||
for record in self.performance_history:
|
||||
metrics = WorkoutMetrics(**record['metrics'])
|
||||
if metrics.estimated_gear_ratio:
|
||||
gear_data.append({
|
||||
"date": record['date'],
|
||||
"estimated_ratio": metrics.estimated_gear_ratio,
|
||||
"chainring": metrics.estimated_chainring,
|
||||
"cog": metrics.estimated_cog,
|
||||
"avg_speed": metrics.avg_speed_kmh,
|
||||
"elevation_gain": metrics.elevation_gain_m,
|
||||
"terrain_type": self._classify_terrain(metrics)
|
||||
})
|
||||
|
||||
if not gear_data:
|
||||
return {"message": "No gear data available"}
|
||||
|
||||
# Analyze gear preferences by terrain
|
||||
gear_preferences = {}
|
||||
for data in gear_data:
|
||||
terrain = data['terrain_type']
|
||||
gear = f"{data['chainring']}x{data['cog']}"
|
||||
|
||||
if terrain not in gear_preferences:
|
||||
gear_preferences[terrain] = {}
|
||||
if gear not in gear_preferences[terrain]:
|
||||
gear_preferences[terrain][gear] = 0
|
||||
gear_preferences[terrain][gear] += 1
|
||||
|
||||
# Calculate most common gears
|
||||
all_gears = {}
|
||||
for data in gear_data:
|
||||
gear = f"{data['chainring']}x{data['cog']}"
|
||||
all_gears[gear] = all_gears.get(gear, 0) + 1
|
||||
|
||||
most_common_gear = max(all_gears.items(), key=lambda x: x[1])
|
||||
|
||||
return {
|
||||
"total_workouts_analyzed": len(gear_data),
|
||||
"most_common_gear": {
|
||||
"gear": most_common_gear[0],
|
||||
"usage_count": most_common_gear[1],
|
||||
"usage_percentage": round(most_common_gear[1] / len(gear_data) * 100, 1)
|
||||
},
|
||||
"gear_by_terrain": gear_preferences,
|
||||
"gear_recommendations": self._recommend_gears(gear_data)
|
||||
}
|
||||
|
||||
def _classify_terrain(self, metrics: WorkoutMetrics) -> str:
|
||||
"""Classify terrain type from workout metrics"""
|
||||
if metrics.distance_km == 0:
|
||||
return "unknown"
|
||||
|
||||
elevation_per_km = metrics.elevation_gain_m / metrics.distance_km
|
||||
|
||||
if elevation_per_km > 15:
|
||||
return "steep_climbing"
|
||||
elif elevation_per_km > 8:
|
||||
return "moderate_climbing"
|
||||
elif elevation_per_km > 3:
|
||||
return "rolling_hills"
|
||||
else:
|
||||
return "flat_terrain"
|
||||
|
||||
def _recommend_gears(self, gear_data: List[Dict]) -> Dict[str, str]:
|
||||
"""Recommend optimal gears for different conditions"""
|
||||
if not gear_data:
|
||||
return {}
|
||||
|
||||
# Group by terrain and find most efficient gears
|
||||
terrain_efficiency = {}
|
||||
|
||||
for data in gear_data:
|
||||
terrain = data['terrain_type']
|
||||
gear = f"{data['chainring']}x{data['cog']}"
|
||||
speed = data['avg_speed']
|
||||
|
||||
if terrain not in terrain_efficiency:
|
||||
terrain_efficiency[terrain] = {}
|
||||
if gear not in terrain_efficiency[terrain]:
|
||||
terrain_efficiency[terrain][gear] = []
|
||||
|
||||
terrain_efficiency[terrain][gear].append(speed)
|
||||
|
||||
# Calculate average speeds for each gear/terrain combo
|
||||
recommendations = {}
|
||||
for terrain, gears in terrain_efficiency.items():
|
||||
best_gear = None
|
||||
best_avg_speed = 0
|
||||
|
||||
for gear, speeds in gears.items():
|
||||
avg_speed = sum(speeds) / len(speeds)
|
||||
if avg_speed > best_avg_speed:
|
||||
best_avg_speed = avg_speed
|
||||
best_gear = gear
|
||||
|
||||
if best_gear:
|
||||
recommendations[terrain] = best_gear
|
||||
|
||||
return recommendations
|
||||
|
||||
def load_metrics_history(self):
|
||||
"""Load performance history from file"""
|
||||
if self.metrics_file.exists():
|
||||
try:
|
||||
with open(self.metrics_file, 'r') as f:
|
||||
data = json.load(f)
|
||||
self.performance_history = data.get('performance_history', [])
|
||||
logger.info(f"Loaded {len(self.performance_history)} workout records")
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading metrics history: {e}")
|
||||
self.performance_history = []
|
||||
else:
|
||||
self.performance_history = []
|
||||
|
||||
def save_metrics_history(self):
|
||||
"""Save performance history to file"""
|
||||
try:
|
||||
# Keep only last 200 workouts to prevent file from growing too large
|
||||
self.performance_history = self.performance_history[-200:]
|
||||
|
||||
data = {
|
||||
'performance_history': self.performance_history,
|
||||
'last_updated': datetime.now().isoformat()
|
||||
}
|
||||
|
||||
with open(self.metrics_file, 'w') as f:
|
||||
json.dump(data, f, indent=2, default=str)
|
||||
|
||||
logger.debug(f"Saved {len(self.performance_history)} workout records")
|
||||
except Exception as e:
|
||||
logger.error(f"Error saving metrics history: {e}")
|
||||
|
||||
def get_workout_summary_for_llm(self, activity_id: str) -> Dict[str, Any]:
|
||||
"""Get structured workout summary optimized for LLM analysis"""
|
||||
deterministic_data = self.get_deterministic_analysis_data(activity_id)
|
||||
|
||||
if "error" in deterministic_data:
|
||||
return deterministic_data
|
||||
|
||||
# Format data for LLM consumption
|
||||
metrics = deterministic_data["workout_metrics"]
|
||||
assessment = deterministic_data["standardized_assessment"]
|
||||
training_load = deterministic_data.get("training_load")
|
||||
|
||||
summary = {
|
||||
"workout_classification": assessment["workout_classification"],
|
||||
"intensity_rating": f"{assessment['intensity_rating']}/10",
|
||||
"key_metrics": {
|
||||
"duration": f"{metrics['duration_minutes']:.0f} minutes",
|
||||
"distance": f"{metrics['distance_km']:.1f} km",
|
||||
"avg_speed": f"{metrics['avg_speed_kmh']:.1f} km/h",
|
||||
"elevation_gain": f"{metrics['elevation_gain_m']:.0f} m"
|
||||
},
|
||||
"performance_indicators": {
|
||||
"efficiency_score": assessment["efficiency_score"],
|
||||
"estimated_ftp": metrics.get("estimated_ftp"),
|
||||
"intensity_factor": metrics.get("intensity_factor")
|
||||
},
|
||||
"recovery_guidance": assessment["recovery_recommendation"],
|
||||
"training_load_context": {
|
||||
"fitness_level": training_load["fitness"] if training_load else None,
|
||||
"fatigue_level": training_load["fatigue"] if training_load else None,
|
||||
"form": training_load["form"] if training_load else None
|
||||
} if training_load else None,
|
||||
"single_speed_analysis": {
|
||||
"estimated_gear": f"{metrics.get('estimated_chainring', 'N/A')}x{metrics.get('estimated_cog', 'N/A')}",
|
||||
"gear_ratio": metrics.get("estimated_gear_ratio")
|
||||
} if metrics.get("estimated_gear_ratio") else None
|
||||
}
|
||||
|
||||
return summary
|
||||
|
||||
# Integration with existing core app
|
||||
def enhance_core_app_with_metrics():
|
||||
"""Example of how to integrate metrics tracking with the core app"""
|
||||
integration_code = '''
|
||||
# In core_app.py, replace the cache manager initialization:
|
||||
|
||||
from enhanced_cache_manager import MetricsTrackingCache
|
||||
|
||||
class CyclingAnalyzerApp:
|
||||
def __init__(self, config: Config):
|
||||
self.config = config
|
||||
self.llm_client = LLMClient(config)
|
||||
self.mcp_client = MCPClient(config)
|
||||
|
||||
# Use enhanced cache with metrics tracking
|
||||
self.cache_manager = MetricsTrackingCache(
|
||||
default_ttl=config.cache_ttl,
|
||||
metrics_file="workout_metrics.json"
|
||||
)
|
||||
|
||||
self.template_engine = TemplateEngine(config.templates_dir)
|
||||
|
||||
async def _preload_cache(self):
|
||||
"""Enhanced preloading with metrics calculation"""
|
||||
logger.info("Pre-loading cache with metrics calculation...")
|
||||
|
||||
# Set user profile for accurate calculations
|
||||
profile = await self.mcp_client.call_tool("user_profile", {})
|
||||
if profile:
|
||||
# Extract FTP and max HR from profile if available
|
||||
ftp = profile.get("ftp") or None
|
||||
max_hr = profile.get("maxHR") or None
|
||||
self.cache_manager.set_user_profile(ftp=ftp, max_hr=max_hr)
|
||||
|
||||
# Cache recent activities with metrics
|
||||
activities = await self.mcp_client.call_tool("get_activities", {"limit": 10})
|
||||
if activities:
|
||||
self.cache_manager.set("recent_activities", activities)
|
||||
|
||||
# Find and analyze last cycling activity
|
||||
cycling_activity = self._find_last_cycling_activity(activities)
|
||||
if cycling_activity:
|
||||
activity_details = await self.mcp_client.call_tool(
|
||||
"get_activity_details",
|
||||
{"activity_id": cycling_activity["activityId"]}
|
||||
)
|
||||
|
||||
# Cache with metrics calculation
|
||||
metrics = self.cache_manager.cache_workout_with_metrics(
|
||||
cycling_activity["activityId"],
|
||||
activity_details
|
||||
)
|
||||
|
||||
logger.info(f"Calculated metrics for last workout: {metrics.workout_classification}")
|
||||
|
||||
async def analyze_workout_with_metrics(self, activity_id: str = None, **kwargs) -> str:
|
||||
"""Enhanced analysis using calculated metrics"""
|
||||
if not activity_id:
|
||||
# Get last cached cycling activity
|
||||
activities = self.cache_manager.get("recent_activities", [])
|
||||
cycling_activity = self._find_last_cycling_activity(activities)
|
||||
activity_id = cycling_activity["activityId"] if cycling_activity else None
|
||||
|
||||
if not activity_id:
|
||||
return "No cycling activity found for analysis"
|
||||
|
||||
# Get deterministic analysis data
|
||||
analysis_data = self.cache_manager.get_workout_summary_for_llm(activity_id)
|
||||
|
||||
if "error" in analysis_data:
|
||||
return f"Error: {analysis_data['error']}"
|
||||
|
||||
# Use template with deterministic data
|
||||
template_name = "workflows/analyze_workout_with_metrics.txt"
|
||||
|
||||
context = {
|
||||
"workout_summary": analysis_data,
|
||||
"performance_trends": self.cache_manager.get_performance_trends(30),
|
||||
"training_rules": kwargs.get("training_rules", ""),
|
||||
**kwargs
|
||||
}
|
||||
|
||||
prompt = self.template_engine.render(template_name, **context)
|
||||
return await self.llm_client.generate(prompt)
|
||||
'''
|
||||
|
||||
return integration_code
|
||||
|
||||
579
core/enhanced_core_app.py
Normal file
579
core/enhanced_core_app.py
Normal file
@@ -0,0 +1,579 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Enhanced Core Application with Deterministic Metrics
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any, Optional
|
||||
|
||||
from config import Config, load_config
|
||||
from llm_client import LLMClient
|
||||
from mcp_client import MCPClient
|
||||
from enhanced_cache_manager import MetricsTrackingCache
|
||||
from template_engine import TemplateEngine
|
||||
from cycling_metrics import CyclingMetricsCalculator, generate_standardized_assessment
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class EnhancedCyclingAnalyzerApp:
|
||||
"""Enhanced application with deterministic metrics and analysis"""
|
||||
|
||||
def __init__(self, config: Config):
|
||||
self.config = config
|
||||
self.llm_client = LLMClient(config)
|
||||
self.mcp_client = MCPClient(config)
|
||||
|
||||
# Use enhanced cache with metrics tracking
|
||||
self.cache_manager = MetricsTrackingCache(
|
||||
default_ttl=config.cache_ttl,
|
||||
metrics_file="workout_metrics.json"
|
||||
)
|
||||
|
||||
self.template_engine = TemplateEngine(config.templates_dir)
|
||||
|
||||
# User settings for accurate calculations
|
||||
self.user_ftp = None
|
||||
self.user_max_hr = None
|
||||
|
||||
async def initialize(self):
|
||||
"""Initialize all components with metrics support"""
|
||||
logger.info("Initializing enhanced application components...")
|
||||
|
||||
await self.llm_client.initialize()
|
||||
await self.mcp_client.initialize()
|
||||
await self._setup_user_metrics()
|
||||
await self._preload_cache_with_metrics()
|
||||
|
||||
logger.info("Enhanced application initialization complete")
|
||||
|
||||
async def cleanup(self):
|
||||
"""Cleanup all components"""
|
||||
# Save metrics before cleanup
|
||||
self.cache_manager.save_metrics_history()
|
||||
|
||||
await self.mcp_client.cleanup()
|
||||
await self.llm_client.cleanup()
|
||||
|
||||
async def _setup_user_metrics(self):
|
||||
"""Setup user profile for accurate metric calculations"""
|
||||
try:
|
||||
# Try to get user profile from MCP
|
||||
if await self.mcp_client.has_tool("user_profile"):
|
||||
profile = await self.mcp_client.call_tool("user_profile", {})
|
||||
|
||||
# Extract FTP and max HR if available
|
||||
self.user_ftp = profile.get("ftp") or profile.get("functionalThresholdPower")
|
||||
self.user_max_hr = profile.get("maxHR") or profile.get("maxHeartRate")
|
||||
|
||||
# Also try user settings
|
||||
if await self.mcp_client.has_tool("user_settings"):
|
||||
settings = await self.mcp_client.call_tool("user_settings", {})
|
||||
if not self.user_ftp:
|
||||
self.user_ftp = settings.get("ftp")
|
||||
if not self.user_max_hr:
|
||||
self.user_max_hr = settings.get("maxHeartRate")
|
||||
|
||||
logger.info(f"User metrics configured: FTP={self.user_ftp}W, Max HR={self.user_max_hr}bpm")
|
||||
|
||||
# Set up cache manager with user profile
|
||||
self.cache_manager.set_user_profile(ftp=self.user_ftp, max_hr=self.user_max_hr)
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not setup user metrics: {e}")
|
||||
# Initialize with defaults
|
||||
self.cache_manager.set_user_profile()
|
||||
|
||||
async def _preload_cache_with_metrics(self):
|
||||
"""Pre-load cache with calculated metrics"""
|
||||
logger.info("Pre-loading cache with metrics calculation...")
|
||||
|
||||
try:
|
||||
# Cache recent activities
|
||||
if await self.mcp_client.has_tool("get_activities"):
|
||||
activities = await self.mcp_client.call_tool("get_activities", {"limit": 15})
|
||||
self.cache_manager.set("recent_activities", activities)
|
||||
|
||||
# Process cycling activities with metrics
|
||||
cycling_count = 0
|
||||
for activity in activities:
|
||||
activity_type = activity.get("activityType", {})
|
||||
if isinstance(activity_type, dict):
|
||||
type_key = activity_type.get("typeKey", "").lower()
|
||||
else:
|
||||
type_key = str(activity_type).lower()
|
||||
|
||||
if "cycling" in type_key or "bike" in type_key:
|
||||
activity_id = activity.get("activityId")
|
||||
if activity_id and cycling_count < 5: # Limit to 5 recent cycling activities
|
||||
try:
|
||||
# Get detailed activity data
|
||||
if await self.mcp_client.has_tool("get_activity_details"):
|
||||
details = await self.mcp_client.call_tool(
|
||||
"get_activity_details",
|
||||
{"activity_id": str(activity_id)}
|
||||
)
|
||||
|
||||
# Calculate and cache metrics
|
||||
metrics = self.cache_manager.cache_workout_with_metrics(
|
||||
str(activity_id), details
|
||||
)
|
||||
|
||||
logger.info(f"Processed activity {activity_id}: {metrics.workout_classification}")
|
||||
cycling_count += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not process activity {activity_id}: {e}")
|
||||
|
||||
logger.info(f"Processed {cycling_count} cycling activities with metrics")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error preloading cache with metrics: {e}")
|
||||
|
||||
# Enhanced analysis methods
|
||||
|
||||
async def analyze_workout_deterministic(self, activity_id: str = None, **kwargs) -> str:
|
||||
"""Analyze workout using deterministic metrics"""
|
||||
if not activity_id:
|
||||
activity_id = self._get_last_cycling_activity_id()
|
||||
|
||||
if not activity_id:
|
||||
return "No cycling activity found for analysis"
|
||||
|
||||
# Get deterministic analysis data
|
||||
analysis_data = self.cache_manager.get_workout_summary_for_llm(activity_id)
|
||||
|
||||
if "error" in analysis_data:
|
||||
return f"Error: {analysis_data['error']}"
|
||||
|
||||
# Get performance trends
|
||||
performance_trends = self.cache_manager.get_performance_trends(30)
|
||||
|
||||
# Use enhanced template
|
||||
template_name = "workflows/analyze_workout_with_metrics.txt"
|
||||
|
||||
context = {
|
||||
"workout_summary": analysis_data,
|
||||
"performance_trends": [
|
||||
{
|
||||
"metric_name": trend.metric_name,
|
||||
"current_value": trend.current_value,
|
||||
"trend_direction": trend.trend_direction,
|
||||
"trend_7day": trend.trend_7day
|
||||
}
|
||||
for trend in performance_trends
|
||||
],
|
||||
"training_rules": kwargs.get("training_rules", ""),
|
||||
**kwargs
|
||||
}
|
||||
|
||||
prompt = self.template_engine.render(template_name, **context)
|
||||
return await self.llm_client.generate(prompt)
|
||||
|
||||
async def estimate_ftp_without_power(self, activity_id: str = None, **kwargs) -> str:
|
||||
"""Estimate FTP for workouts without power meter"""
|
||||
if not activity_id:
|
||||
activity_id = self._get_last_cycling_activity_id()
|
||||
|
||||
if not activity_id:
|
||||
return "No cycling activity found for FTP estimation"
|
||||
|
||||
# Get workout metrics
|
||||
metrics = self.cache_manager.get_workout_metrics(activity_id)
|
||||
if not metrics:
|
||||
return "No metrics available for FTP estimation"
|
||||
|
||||
# Get FTP estimation history
|
||||
ftp_history = self.cache_manager.get_ftp_estimates_history()
|
||||
|
||||
# Calculate additional metrics for FTP estimation
|
||||
hr_intensity = 0
|
||||
if metrics.avg_hr and self.user_max_hr:
|
||||
hr_intensity = metrics.avg_hr / self.user_max_hr
|
||||
elif metrics.avg_hr:
|
||||
# Estimate max HR if not provided
|
||||
estimated_max_hr = 220 - 30 # Assume 30 years old, should be configurable
|
||||
hr_intensity = metrics.avg_hr / estimated_max_hr
|
||||
|
||||
# Estimate power from speed
|
||||
avg_speed_ms = metrics.avg_speed_kmh / 3.6
|
||||
estimated_power_from_speed = (avg_speed_ms ** 2.5) * 3.5
|
||||
|
||||
# Adjust for elevation
|
||||
elevation_per_km = metrics.elevation_gain_m / metrics.distance_km if metrics.distance_km > 0 else 0
|
||||
elevation_factor = 1 + (elevation_per_km / 1000) * 0.1
|
||||
elevation_adjusted_power = estimated_power_from_speed * elevation_factor
|
||||
|
||||
template_name = "workflows/estimate_ftp_no_power.txt"
|
||||
|
||||
context = {
|
||||
"duration_minutes": metrics.duration_minutes,
|
||||
"distance_km": metrics.distance_km,
|
||||
"avg_speed_kmh": metrics.avg_speed_kmh,
|
||||
"elevation_gain_m": metrics.elevation_gain_m,
|
||||
"avg_hr": metrics.avg_hr,
|
||||
"max_hr": metrics.max_hr,
|
||||
"hr_intensity": hr_intensity,
|
||||
"estimated_power_from_speed": round(estimated_power_from_speed, 0),
|
||||
"elevation_adjusted_power": round(elevation_adjusted_power, 0),
|
||||
"estimated_ftp": metrics.estimated_ftp,
|
||||
"elevation_per_km": round(elevation_per_km, 1),
|
||||
"elevation_factor": elevation_factor,
|
||||
"ftp_history": ftp_history[:10], # Last 10 estimates
|
||||
**kwargs
|
||||
}
|
||||
|
||||
prompt = self.template_engine.render(template_name, **context)
|
||||
return await self.llm_client.generate(prompt)
|
||||
|
||||
async def analyze_single_speed_gears(self, activity_id: str = None, **kwargs) -> str:
|
||||
"""Analyze single speed gear selection and optimization"""
|
||||
if not activity_id:
|
||||
activity_id = self._get_last_cycling_activity_id()
|
||||
|
||||
if not activity_id:
|
||||
return "No cycling activity found for gear analysis"
|
||||
|
||||
# Get workout metrics
|
||||
metrics = self.cache_manager.get_workout_metrics(activity_id)
|
||||
if not metrics:
|
||||
return "No metrics available for gear analysis"
|
||||
|
||||
# Get gear usage analysis
|
||||
gear_analysis = self.cache_manager.get_gear_usage_analysis()
|
||||
|
||||
# Calculate additional gear metrics
|
||||
chainrings = [46, 38]
|
||||
cogs = [14, 15, 16, 17, 18, 19, 20]
|
||||
wheel_circumference = 2.096 # meters
|
||||
|
||||
available_gears = []
|
||||
for chainring in chainrings:
|
||||
for cog in cogs:
|
||||
ratio = chainring / cog
|
||||
gear_inches = ratio * 27 # 700c wheel ≈ 27" diameter
|
||||
development = ratio * wheel_circumference
|
||||
available_gears.append({
|
||||
"chainring": chainring,
|
||||
"cog": cog,
|
||||
"ratio": round(ratio, 2),
|
||||
"gear_inches": round(gear_inches, 1),
|
||||
"development": round(development, 1)
|
||||
})
|
||||
|
||||
# Estimate cadence
|
||||
if metrics.avg_speed_kmh > 0 and metrics.estimated_gear_ratio:
|
||||
speed_ms = metrics.avg_speed_kmh / 3.6
|
||||
estimated_cadence = (speed_ms / (metrics.estimated_gear_ratio * wheel_circumference)) * 60
|
||||
else:
|
||||
estimated_cadence = 85 # Default assumption
|
||||
|
||||
# Classify terrain
|
||||
elevation_per_km = metrics.elevation_gain_m / metrics.distance_km if metrics.distance_km > 0 else 0
|
||||
if elevation_per_km > 15:
|
||||
terrain_type = "steep_climbing"
|
||||
elif elevation_per_km > 8:
|
||||
terrain_type = "moderate_climbing"
|
||||
elif elevation_per_km > 3:
|
||||
terrain_type = "rolling_hills"
|
||||
else:
|
||||
terrain_type = "flat_terrain"
|
||||
|
||||
template_name = "workflows/single_speed_gear_analysis.txt"
|
||||
|
||||
context = {
|
||||
"avg_speed_kmh": metrics.avg_speed_kmh,
|
||||
"duration_minutes": metrics.duration_minutes,
|
||||
"elevation_gain_m": metrics.elevation_gain_m,
|
||||
"terrain_type": terrain_type,
|
||||
"estimated_chainring": metrics.estimated_chainring,
|
||||
"estimated_cog": metrics.estimated_cog,
|
||||
"estimated_gear_ratio": metrics.estimated_gear_ratio,
|
||||
"gear_inches": round((metrics.estimated_gear_ratio or 2.5) * 27, 1),
|
||||
"development_meters": round((metrics.estimated_gear_ratio or 2.5) * wheel_circumference, 1),
|
||||
"available_gears": available_gears,
|
||||
"gear_usage_by_terrain": gear_analysis.get("gear_by_terrain", {}),
|
||||
"best_flat_gear": "46x16", # Example, should be calculated
|
||||
"best_climbing_gear": "38x20", # Example, should be calculated
|
||||
"most_versatile_gear": gear_analysis.get("most_common_gear", {}).get("gear", "46x17"),
|
||||
"efficiency_rating": 7, # Should be calculated based on speed/effort
|
||||
"estimated_cadence": round(estimated_cadence, 0),
|
||||
"elevation_per_km": round(elevation_per_km, 1),
|
||||
**kwargs
|
||||
}
|
||||
|
||||
prompt = self.template_engine.render(template_name, **context)
|
||||
return await self.llm_client.generate(prompt)
|
||||
|
||||
async def get_training_load_analysis(self, **kwargs) -> str:
|
||||
"""Analyze training load and recovery status"""
|
||||
training_load = self.cache_manager.get_training_load()
|
||||
if not training_load:
|
||||
return "Insufficient workout history for training load analysis"
|
||||
|
||||
# Get performance trends
|
||||
performance_trends = self.cache_manager.get_performance_trends(42) # 6 weeks
|
||||
|
||||
# Classify training load status
|
||||
if training_load.training_stress_balance > 5:
|
||||
form_status = "fresh_and_ready"
|
||||
elif training_load.training_stress_balance > -5:
|
||||
form_status = "maintaining_fitness"
|
||||
elif training_load.training_stress_balance > -15:
|
||||
form_status = "building_fitness"
|
||||
else:
|
||||
form_status = "high_fatigue_risk"
|
||||
|
||||
template_name = "workflows/training_load_analysis.txt"
|
||||
|
||||
context = {
|
||||
"training_load": {
|
||||
"fitness": training_load.fitness,
|
||||
"fatigue": training_load.fatigue,
|
||||
"form": training_load.form,
|
||||
"acute_load": training_load.acute_training_load,
|
||||
"chronic_load": training_load.chronic_training_load
|
||||
},
|
||||
"form_status": form_status,
|
||||
"performance_trends": [
|
||||
{
|
||||
"metric": trend.metric_name,
|
||||
"trend_direction": trend.trend_direction,
|
||||
"trend_7day": trend.trend_7day,
|
||||
"trend_30day": trend.trend_30day,
|
||||
"confidence": trend.confidence
|
||||
}
|
||||
for trend in performance_trends
|
||||
],
|
||||
"training_rules": kwargs.get("training_rules", ""),
|
||||
**kwargs
|
||||
}
|
||||
|
||||
prompt = self.template_engine.render(template_name, **context)
|
||||
return await self.llm_client.generate(prompt)
|
||||
|
||||
async def suggest_next_workout_data_driven(self, **kwargs) -> str:
|
||||
"""Generate data-driven workout suggestions"""
|
||||
# Get training load status
|
||||
training_load = self.cache_manager.get_training_load()
|
||||
performance_trends = self.cache_manager.get_performance_trends(14) # 2 weeks
|
||||
|
||||
# Get recent workout pattern
|
||||
recent_activities = self.cache_manager.get("recent_activities", [])
|
||||
recent_cycling = [act for act in recent_activities
|
||||
if "cycling" in act.get("activityType", {}).get("typeKey", "").lower()]
|
||||
|
||||
# Analyze recent workout pattern
|
||||
recent_intensities = []
|
||||
recent_durations = []
|
||||
recent_types = []
|
||||
|
||||
for activity in recent_cycling[:7]: # Last 7 cycling activities
|
||||
activity_id = str(activity.get("activityId"))
|
||||
metrics = self.cache_manager.get_workout_metrics(activity_id)
|
||||
if metrics:
|
||||
recent_intensities.append(self._rate_intensity(metrics))
|
||||
recent_durations.append(metrics.duration_minutes)
|
||||
recent_types.append(self._classify_workout(metrics))
|
||||
|
||||
# Calculate training pattern analysis
|
||||
avg_intensity = sum(recent_intensities) / len(recent_intensities) if recent_intensities else 5
|
||||
avg_duration = sum(recent_durations) / len(recent_durations) if recent_durations else 60
|
||||
|
||||
# Determine workout recommendation based on data
|
||||
if training_load and training_load.form < -10:
|
||||
recommendation_type = "recovery_focus"
|
||||
elif avg_intensity > 7:
|
||||
recommendation_type = "endurance_focus"
|
||||
elif avg_intensity < 4:
|
||||
recommendation_type = "intensity_focus"
|
||||
else:
|
||||
recommendation_type = "balanced_progression"
|
||||
|
||||
template_name = "workflows/suggest_next_workout_data_driven.txt"
|
||||
|
||||
context = {
|
||||
"training_load": training_load,
|
||||
"performance_trends": performance_trends,
|
||||
"recent_workout_analysis": {
|
||||
"avg_intensity": round(avg_intensity, 1),
|
||||
"avg_duration": round(avg_duration, 0),
|
||||
"workout_types": recent_types,
|
||||
"pattern_analysis": self._analyze_workout_pattern(recent_types)
|
||||
},
|
||||
"recommendation_type": recommendation_type,
|
||||
"user_ftp": self.user_ftp,
|
||||
"training_rules": kwargs.get("training_rules", ""),
|
||||
**kwargs
|
||||
}
|
||||
|
||||
prompt = self.template_engine.render(template_name, **context)
|
||||
return await self.llm_client.generate(prompt)
|
||||
|
||||
# Utility methods
|
||||
|
||||
def _get_last_cycling_activity_id(self) -> Optional[str]:
|
||||
"""Get the ID of the most recent cycling activity"""
|
||||
activities = self.cache_manager.get("recent_activities", [])
|
||||
for activity in activities:
|
||||
activity_type = activity.get("activityType", {})
|
||||
if isinstance(activity_type, dict):
|
||||
type_key = activity_type.get("typeKey", "").lower()
|
||||
else:
|
||||
type_key = str(activity_type).lower()
|
||||
|
||||
if "cycling" in type_key or "bike" in type_key:
|
||||
return str(activity.get("activityId"))
|
||||
return None
|
||||
|
||||
def _rate_intensity(self, metrics) -> int:
|
||||
"""Rate workout intensity 1-10 based on metrics"""
|
||||
factors = []
|
||||
|
||||
# Speed factor
|
||||
if metrics.avg_speed_kmh > 40:
|
||||
factors.append(9)
|
||||
elif metrics.avg_speed_kmh > 35:
|
||||
factors.append(7)
|
||||
elif metrics.avg_speed_kmh > 25:
|
||||
factors.append(5)
|
||||
else:
|
||||
factors.append(3)
|
||||
|
||||
# Duration factor
|
||||
duration_intensity = min(metrics.duration_minutes / 60 * 2, 6)
|
||||
factors.append(duration_intensity)
|
||||
|
||||
# Elevation factor
|
||||
if metrics.distance_km > 0:
|
||||
elevation_per_km = metrics.elevation_gain_m / metrics.distance_km
|
||||
if elevation_per_km > 15:
|
||||
factors.append(8)
|
||||
elif elevation_per_km > 10:
|
||||
factors.append(6)
|
||||
elif elevation_per_km > 5:
|
||||
factors.append(4)
|
||||
else:
|
||||
factors.append(2)
|
||||
|
||||
return min(int(sum(factors) / len(factors)), 10)
|
||||
|
||||
def _classify_workout(self, metrics) -> str:
|
||||
"""Classify workout type"""
|
||||
duration = metrics.duration_minutes
|
||||
avg_speed = metrics.avg_speed_kmh
|
||||
elevation_gain = metrics.elevation_gain_m / metrics.distance_km if metrics.distance_km > 0 else 0
|
||||
|
||||
if duration < 30:
|
||||
return "short_intensity"
|
||||
elif duration > 180:
|
||||
return "long_endurance"
|
||||
elif elevation_gain > 10:
|
||||
return "climbing_focused"
|
||||
elif avg_speed > 35:
|
||||
return "high_speed"
|
||||
elif avg_speed < 20:
|
||||
return "recovery_easy"
|
||||
else:
|
||||
return "moderate_endurance"
|
||||
|
||||
def _analyze_workout_pattern(self, recent_types: list) -> str:
|
||||
"""Analyze recent workout pattern"""
|
||||
if not recent_types:
|
||||
return "insufficient_data"
|
||||
|
||||
type_counts = {}
|
||||
for workout_type in recent_types:
|
||||
type_counts[workout_type] = type_counts.get(workout_type, 0) + 1
|
||||
|
||||
total_workouts = len(recent_types)
|
||||
intensity_workouts = sum(1 for t in recent_types if "intensity" in t or "speed" in t)
|
||||
endurance_workouts = sum(1 for t in recent_types if "endurance" in t)
|
||||
recovery_workouts = sum(1 for t in recent_types if "recovery" in t)
|
||||
|
||||
intensity_ratio = intensity_workouts / total_workouts
|
||||
endurance_ratio = endurance_workouts / total_workouts
|
||||
|
||||
if intensity_ratio > 0.5:
|
||||
return "high_intensity_bias"
|
||||
elif recovery_workouts > total_workouts * 0.4:
|
||||
return "recovery_heavy"
|
||||
elif endurance_ratio > 0.6:
|
||||
return "endurance_focused"
|
||||
else:
|
||||
return "balanced_training"
|
||||
|
||||
# Compatibility methods for existing interface
|
||||
|
||||
async def analyze_workout(self, analysis_type: str = "deterministic", **kwargs) -> str:
|
||||
"""Analyze workout with deterministic metrics (enhanced version)"""
|
||||
return await self.analyze_workout_deterministic(**kwargs)
|
||||
|
||||
async def suggest_next_workout(self, **kwargs) -> str:
|
||||
"""Generate data-driven workout suggestion"""
|
||||
return await self.suggest_next_workout_data_driven(**kwargs)
|
||||
|
||||
async def enhanced_analysis(self, analysis_type: str, **kwargs) -> str:
|
||||
"""Perform enhanced analysis based on type"""
|
||||
if analysis_type == "ftp_estimation":
|
||||
return await self.estimate_ftp_without_power(**kwargs)
|
||||
elif analysis_type == "gear_analysis":
|
||||
return await self.analyze_single_speed_gears(**kwargs)
|
||||
elif analysis_type == "training_load":
|
||||
return await self.get_training_load_analysis(**kwargs)
|
||||
else:
|
||||
# Fallback to deterministic analysis
|
||||
return await self.analyze_workout_deterministic(**kwargs)
|
||||
|
||||
# Existing interface compatibility
|
||||
|
||||
async def list_available_tools(self) -> list:
|
||||
return await self.mcp_client.list_tools()
|
||||
|
||||
def list_templates(self) -> list:
|
||||
return self.template_engine.list_templates()
|
||||
|
||||
def get_cached_data(self, key: str = None) -> Any:
|
||||
return self.cache_manager.get(key) if key else self.cache_manager.get_all()
|
||||
|
||||
# New deterministic data access methods
|
||||
|
||||
def get_performance_summary(self) -> Dict[str, Any]:
|
||||
"""Get comprehensive performance summary"""
|
||||
performance_trends = self.cache_manager.get_performance_trends(30)
|
||||
training_load = self.cache_manager.get_training_load()
|
||||
ftp_history = self.cache_manager.get_ftp_estimates_history()
|
||||
gear_analysis = self.cache_manager.get_gear_usage_analysis()
|
||||
|
||||
return {
|
||||
"performance_trends": [
|
||||
{
|
||||
"metric": trend.metric_name,
|
||||
"current": trend.current_value,
|
||||
"trend_7d": f"{trend.trend_7day:+.1f}%",
|
||||
"trend_30d": f"{trend.trend_30day:+.1f}%",
|
||||
"direction": trend.trend_direction,
|
||||
"confidence": trend.confidence
|
||||
}
|
||||
for trend in performance_trends
|
||||
],
|
||||
"training_load": {
|
||||
"fitness": training_load.fitness if training_load else None,
|
||||
"fatigue": training_load.fatigue if training_load else None,
|
||||
"form": training_load.form if training_load else None
|
||||
},
|
||||
"ftp_estimates": {
|
||||
"latest": ftp_history[0]["estimated_ftp"] if ftp_history else None,
|
||||
"trend": "improving" if len(ftp_history) > 1 and ftp_history[0]["estimated_ftp"] > ftp_history[1]["estimated_ftp"] else "stable",
|
||||
"history_count": len(ftp_history)
|
||||
},
|
||||
"gear_usage": {
|
||||
"most_common": gear_analysis.get("most_common_gear", {}),
|
||||
"total_analyzed": gear_analysis.get("total_workouts_analyzed", 0)
|
||||
}
|
||||
}
|
||||
|
||||
def get_metrics_for_activity(self, activity_id: str) -> Optional[Dict[str, Any]]:
|
||||
"""Get all calculated metrics for a specific activity"""
|
||||
return self.cache_manager.get_deterministic_analysis_data(activity_id)
|
||||
303
core/template_engine.py
Normal file
303
core/template_engine.py
Normal file
@@ -0,0 +1,303 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Template Engine - Simplified template loading and rendering
|
||||
"""
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class TemplateEngine:
|
||||
"""Simple template engine for prompt management"""
|
||||
|
||||
def __init__(self, templates_dir: str):
|
||||
self.templates_dir = Path(templates_dir)
|
||||
self.templates_dir.mkdir(exist_ok=True)
|
||||
|
||||
# Create basic directory structure
|
||||
self._ensure_structure()
|
||||
|
||||
def _ensure_structure(self):
|
||||
"""Ensure basic template directory structure exists"""
|
||||
dirs = [
|
||||
"workflows",
|
||||
"base/system_prompts",
|
||||
"base/data_sections",
|
||||
"base/analysis_frameworks"
|
||||
]
|
||||
|
||||
for dir_path in dirs:
|
||||
(self.templates_dir / dir_path).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
def list_templates(self) -> List[str]:
|
||||
"""List all available templates"""
|
||||
templates = []
|
||||
|
||||
# Get all .txt files in templates directory and subdirectories
|
||||
for template_file in self.templates_dir.rglob("*.txt"):
|
||||
rel_path = template_file.relative_to(self.templates_dir)
|
||||
templates.append(str(rel_path))
|
||||
|
||||
return sorted(templates)
|
||||
|
||||
def template_exists(self, template_name: str) -> bool:
|
||||
"""Check if template exists"""
|
||||
template_path = self._resolve_template_path(template_name)
|
||||
return template_path.exists() if template_path else False
|
||||
|
||||
def _resolve_template_path(self, template_name: str) -> Path:
|
||||
"""Resolve template name to full path"""
|
||||
# Handle different template name formats
|
||||
if template_name.endswith('.txt'):
|
||||
template_path = self.templates_dir / template_name
|
||||
else:
|
||||
template_path = self.templates_dir / f"{template_name}.txt"
|
||||
|
||||
return template_path
|
||||
|
||||
def load_template(self, template_name: str) -> str:
|
||||
"""Load raw template content"""
|
||||
template_path = self._resolve_template_path(template_name)
|
||||
|
||||
if not template_path.exists():
|
||||
raise FileNotFoundError(f"Template not found: {template_name}")
|
||||
|
||||
try:
|
||||
with open(template_path, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
|
||||
logger.debug(f"Loaded template: {template_name}")
|
||||
return content
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading template {template_name}: {e}")
|
||||
raise
|
||||
|
||||
def render(self, template_name: str, **kwargs) -> str:
|
||||
"""Load and render template with variables, supporting conditionals and nested access"""
|
||||
content = self.load_template(template_name)
|
||||
|
||||
# Flatten context for safe nested access
|
||||
flat_context = self._flatten_context(kwargs)
|
||||
|
||||
# Handle section includes
|
||||
content = self._process_includes(content, **flat_context)
|
||||
|
||||
# Process conditionals
|
||||
content = self._process_conditionals(content, **flat_context)
|
||||
|
||||
try:
|
||||
rendered = content.format(**flat_context)
|
||||
logger.debug(f"Rendered template: {template_name}")
|
||||
return rendered
|
||||
|
||||
except KeyError as e:
|
||||
logger.error(f"Missing variable in template {template_name}: {e}")
|
||||
logger.debug(f"Available variables: {list(flat_context.keys())}")
|
||||
raise ValueError(f"Missing variable in template {template_name}: {e}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error rendering template {template_name}: {e}")
|
||||
raise
|
||||
|
||||
def _flatten_context(self, context: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Flatten nested context with safe access and None handling"""
|
||||
flat = {}
|
||||
|
||||
def flatten_item(key_path: str, value: Any):
|
||||
if value is None:
|
||||
flat[key_path] = "N/A"
|
||||
elif isinstance(value, dict):
|
||||
for subkey, subvalue in value.items():
|
||||
new_path = f"{key_path}_{subkey}" if key_path else subkey
|
||||
flatten_item(new_path, subvalue)
|
||||
elif isinstance(value, list):
|
||||
for i, item in enumerate(value[:5]): # Limit list length
|
||||
flatten_item(f"{key_path}_{i}", item)
|
||||
else:
|
||||
flat[key_path] = str(value)
|
||||
|
||||
for key, value in context.items():
|
||||
flatten_item(key, value)
|
||||
|
||||
return flat
|
||||
|
||||
def _process_conditionals(self, content: str, **context) -> str:
|
||||
"""Process {if condition}content{endif} blocks"""
|
||||
import re
|
||||
|
||||
# Find all conditional blocks
|
||||
conditional_pattern = re.compile(r'\{if\s+([^\}]+)\}(.*?)\{endif\}', re.DOTALL)
|
||||
|
||||
def evaluate_condition(condition: str, context: Dict[str, Any]) -> bool:
|
||||
"""Simple condition evaluator supporting dot and bracket notation"""
|
||||
# Handle dot and bracket notation by replacing . and [ ] with _ for flat context lookup
|
||||
flat_condition = condition.replace('.', '_').replace('[', '_').replace(']', '_')
|
||||
|
||||
# Handle simple variable checks like 'var' or 'var == True'
|
||||
if flat_condition in context:
|
||||
value = context[flat_condition]
|
||||
if value in ['True', 'true', True]:
|
||||
return True
|
||||
if value == 'N/A' or value is None or value == '':
|
||||
return False
|
||||
return bool(str(value).lower() in ['true', 'yes', '1'])
|
||||
|
||||
# Handle simple equality like 'var == value'
|
||||
if ' == ' in condition:
|
||||
var, val = [part.strip() for part in condition.split(' == ', 1)]
|
||||
flat_var = var.replace('.', '_').replace('[', '_').replace(']', '_')
|
||||
if flat_var in context:
|
||||
return str(context[flat_var]).lower() == str(val).lower()
|
||||
|
||||
logger.warning(f"Unknown condition: {condition}")
|
||||
return False
|
||||
|
||||
matches = list(conditional_pattern.finditer(content))
|
||||
if not matches:
|
||||
return content
|
||||
|
||||
# Process from end to start to avoid index shifts
|
||||
for match in reversed(matches):
|
||||
condition = match.group(1)
|
||||
block_content = match.group(2)
|
||||
|
||||
if evaluate_condition(condition, context):
|
||||
replacement = block_content.strip()
|
||||
else:
|
||||
replacement = ""
|
||||
|
||||
content = content[:match.start()] + replacement + content[match.end():]
|
||||
|
||||
return content
|
||||
|
||||
def _process_includes(self, content: str, **kwargs) -> str:
|
||||
"""Process section includes like {activity_summary_section}"""
|
||||
import re
|
||||
|
||||
# Define section mappings
|
||||
section_mappings = {
|
||||
'activity_summary_section': 'base/data_sections/activity_summary.txt',
|
||||
'user_info_section': 'base/data_sections/user_info.txt',
|
||||
'training_rules_section': 'base/data_sections/training_rules.txt',
|
||||
'workout_data_section': 'base/data_sections/workout_data.txt',
|
||||
'assessment_points': 'base/analysis_frameworks/assessment_points.txt',
|
||||
'performance_analysis': 'base/analysis_frameworks/performance_analysis.txt',
|
||||
}
|
||||
|
||||
# Find and replace section placeholders
|
||||
section_pattern = re.compile(r'\{(\w+_section|\w+_points|\w+_analysis)\}')
|
||||
|
||||
for match in section_pattern.finditer(content):
|
||||
placeholder = match.group(0)
|
||||
section_name = match.group(1)
|
||||
|
||||
if section_name in section_mappings:
|
||||
section_file = section_mappings[section_name]
|
||||
try:
|
||||
section_content = self.load_template(section_file)
|
||||
# Render section with same kwargs
|
||||
# Recursively render the section content
|
||||
section_rendered = self.render(section_file, **kwargs)
|
||||
content = content.replace(placeholder, section_rendered)
|
||||
except (FileNotFoundError, KeyError, ValueError) as e:
|
||||
logger.warning(f"Could not process section {section_name}: {e}")
|
||||
# Replace with empty string if section fails
|
||||
content = content.replace(placeholder, "")
|
||||
|
||||
return content
|
||||
|
||||
def create_template(self, template_name: str, content: str) -> None:
|
||||
"""Create a new template file"""
|
||||
template_path = self._resolve_template_path(template_name)
|
||||
template_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
with open(template_path, 'w', encoding='utf-8') as f:
|
||||
f.write(content)
|
||||
|
||||
logger.info(f"Created template: {template_name}")
|
||||
|
||||
def get_template_info(self, template_name: str) -> Dict[str, Any]:
|
||||
"""Get information about a template"""
|
||||
if not self.template_exists(template_name):
|
||||
return {"exists": False}
|
||||
|
||||
template_path = self._resolve_template_path(template_name)
|
||||
content = self.load_template(template_name)
|
||||
|
||||
# Extract variables used in template
|
||||
import re
|
||||
variables = set(re.findall(r'\{(\w+)\}', content))
|
||||
|
||||
return {
|
||||
"exists": True,
|
||||
"path": str(template_path),
|
||||
"size": len(content),
|
||||
"variables": sorted(list(variables)),
|
||||
"line_count": len(content.splitlines())
|
||||
}
|
||||
|
||||
# Utility functions for template management
|
||||
def create_default_templates(templates_dir: str) -> None:
|
||||
"""Create default template files if they don't exist"""
|
||||
engine = TemplateEngine(templates_dir)
|
||||
|
||||
# Default system prompts
|
||||
default_templates = {
|
||||
"base/system_prompts/main_agent.txt":
|
||||
"You are an expert cycling coach with access to comprehensive Garmin Connect data.\n"
|
||||
"You analyze cycling workouts, provide performance insights, and give actionable training recommendations.\n"
|
||||
"Use the available tools to gather detailed workout data and provide comprehensive analysis.",
|
||||
|
||||
"base/system_prompts/no_tools_analysis.txt":
|
||||
"You are an expert cycling coach. Perform comprehensive analysis using the provided data.\n"
|
||||
"Do not use any tools - all relevant data is included in the prompt.",
|
||||
|
||||
"base/data_sections/activity_summary.txt":
|
||||
"ACTIVITY SUMMARY:\n{activity_summary}",
|
||||
|
||||
"base/data_sections/user_info.txt":
|
||||
"USER INFO:\n{user_info}",
|
||||
|
||||
"base/data_sections/training_rules.txt":
|
||||
"My training rules and goals:\n{training_rules}",
|
||||
|
||||
"base/analysis_frameworks/assessment_points.txt":
|
||||
"Please provide:\n"
|
||||
"1. Overall assessment of the workout\n"
|
||||
"2. How well it aligns with my rules and goals\n"
|
||||
"3. Areas for improvement\n"
|
||||
"4. Specific feedback on power, heart rate, duration, and intensity\n"
|
||||
"5. Recovery recommendations\n"
|
||||
"6. Comparison with typical performance metrics",
|
||||
|
||||
"workflows/analyze_last_workout.txt":
|
||||
"Analyze my most recent cycling workout using the provided data.\n\n"
|
||||
"{activity_summary_section}\n\n"
|
||||
"{user_info_section}\n\n"
|
||||
"{training_rules_section}\n\n"
|
||||
"{assessment_points}\n\n"
|
||||
"Focus on the provided activity details for your analysis.",
|
||||
|
||||
"workflows/suggest_next_workout.txt":
|
||||
"Please suggest my next cycling workout based on my recent training history.\n\n"
|
||||
"{training_rules_section}\n\n"
|
||||
"Please provide:\n"
|
||||
"1. Analysis of my recent training pattern\n"
|
||||
"2. Identified gaps or imbalances in my training\n"
|
||||
"3. Specific workout recommendation for my next session\n"
|
||||
"4. Target zones (power, heart rate, duration)\n"
|
||||
"5. Rationale for the recommendation based on recent performance",
|
||||
|
||||
"workflows/enhanced_analysis.txt":
|
||||
"Perform enhanced {analysis_type} analysis using all available data and tools.\n\n"
|
||||
"Available cached data: {cached_data}\n\n"
|
||||
"Use MCP tools as needed to gather additional data for comprehensive analysis."
|
||||
}
|
||||
|
||||
for template_name, content in default_templates.items():
|
||||
if not engine.template_exists(template_name):
|
||||
engine.create_template(template_name, content)
|
||||
logger.info(f"Created default template: {template_name}")
|
||||
Reference in New Issue
Block a user