This commit is contained in:
2025-09-12 07:32:32 -07:00
parent 45a62e7c3b
commit 7c7dcb5b10
29 changed files with 2493 additions and 394 deletions

View File

@@ -43,12 +43,12 @@ class AIService:
response = await self._make_ai_request(prompt)
return self._parse_workout_analysis(response)
async def generate_plan(self, rules: List[Dict], goals: Dict[str, Any]) -> Dict[str, Any]:
"""Generate a training plan using AI."""
async def generate_plan(self, rules_text: str, goals: Dict[str, Any]) -> Dict[str, Any]:
"""Generate a training plan using AI with plaintext rules as per design spec."""
prompt_template = await self.prompt_manager.get_active_prompt("plan_generation")
context = {
"rules": rules,
"rules_text": rules_text, # Use plaintext rules directly
"goals": goals,
"current_fitness_level": goals.get("fitness_level", "intermediate")
}
@@ -57,13 +57,80 @@ class AIService:
response = await self._make_ai_request(prompt)
return self._parse_plan_response(response)
async def generate_training_plan(self, rules_text: str, goals: Dict[str, Any], preferred_routes: List[int]) -> Dict[str, Any]:
"""Generate a training plan using AI with plaintext rules as per design specification."""
prompt_template = await self.prompt_manager.get_active_prompt("training_plan_generation")
if not prompt_template:
# Fallback to general plan generation prompt
prompt_template = await self.prompt_manager.get_active_prompt("plan_generation")
context = {
"rules_text": rules_text, # Use plaintext rules directly without parsing
"goals": goals,
"preferred_routes": preferred_routes,
"current_fitness_level": goals.get("fitness_level", "intermediate")
}
prompt = prompt_template.format(**context)
response = await self._make_ai_request(prompt)
return self._parse_plan_response(response)
async def parse_rules_from_natural_language(self, natural_language: str) -> Dict[str, Any]:
"""Parse natural language rules into structured format."""
prompt_template = await self.prompt_manager.get_active_prompt("rule_parsing")
prompt = prompt_template.format(user_rules=natural_language)
response = await self._make_ai_request(prompt)
return self._parse_rules_response(response)
parsed_rules = self._parse_rules_response(response)
# Add confidence scoring to the parsed rules
parsed_rules = self._add_confidence_scoring(parsed_rules)
return parsed_rules
def _add_confidence_scoring(self, parsed_rules: Dict[str, Any]) -> Dict[str, Any]:
"""Add confidence scoring to parsed rules based on parsing quality."""
confidence_score = self._calculate_confidence_score(parsed_rules)
# Add confidence score to the parsed rules
if isinstance(parsed_rules, dict):
parsed_rules["_confidence"] = confidence_score
parsed_rules["_parsing_quality"] = self._get_parsing_quality(confidence_score)
return parsed_rules
def _calculate_confidence_score(self, parsed_rules: Dict[str, Any]) -> float:
"""Calculate confidence score based on parsing quality."""
if not isinstance(parsed_rules, dict):
return 0.5 # Default confidence for non-dict responses
score = 0.0
# Score based on presence of key cycling training rule fields
key_fields = {
"max_rides_per_week": 0.3,
"min_rest_between_hard": 0.2,
"max_duration_hours": 0.2,
"weather_constraints": 0.3,
"intensity_limits": 0.2,
"schedule_constraints": 0.2
}
for field, weight in key_fields.items():
if parsed_rules.get(field) is not None:
score += weight
return min(score, 1.0)
def _get_parsing_quality(self, confidence_score: float) -> str:
"""Get parsing quality description based on confidence score."""
if confidence_score >= 0.8:
return "excellent"
elif confidence_score >= 0.6:
return "good"
elif confidence_score >= 0.4:
return "fair"
else:
return "poor"
async def evolve_plan(self, evolution_context: Dict[str, Any]) -> Dict[str, Any]:
"""Evolve a training plan using AI based on workout analysis."""