diff --git a/CL_implementation_guide.md b/CL_implementation_guide.md new file mode 100644 index 0000000..1ea9149 --- /dev/null +++ b/CL_implementation_guide.md @@ -0,0 +1,1565 @@ +# Junior Developer Implementation Guide +## AI Cycling Coach - Critical Features Implementation + +This guide provides step-by-step instructions to implement the missing core features identified in the codebase evaluation. + +--- + +## 🎯 **Implementation Phases Overview** + +| Phase | Focus | Duration | Difficulty | +|-------|-------|----------|------------| +| **Phase 1** | Backend Core APIs | 2-3 weeks | Medium | +| **Phase 2** | Frontend Core Features | 3-4 weeks | Medium | +| **Phase 3** | Integration & Testing | 1-2 weeks | Easy-Medium | +| **Phase 4** | Polish & Production | 1-2 weeks | Easy | + +--- + +# Phase 1: Backend Core APIs Implementation + +## Step 1.1: Plan Generation Endpoint + +### **File:** `backend/app/routes/plan.py` + +**Add this endpoint to the existing router:** + +```python +from app.schemas.plan import PlanGenerationRequest, PlanGenerationResponse +from app.services.ai_service import AIService, AIServiceError + +@router.post("/generate", response_model=PlanGenerationResponse) +async def generate_plan( + request: PlanGenerationRequest, + db: AsyncSession = Depends(get_db) +): + """Generate a new training plan using AI based on rules and goals.""" + try: + # Fetch rules from database + rules_query = select(Rule).where(Rule.id.in_(request.rule_ids)) + result = await db.execute(rules_query) + rules = result.scalars().all() + + if len(rules) != len(request.rule_ids): + raise HTTPException(status_code=404, detail="One or more rules not found") + + # Get plaintext rules + rule_texts = [rule.rule_text for rule in rules] + + # Initialize AI service + ai_service = AIService(db) + + # Generate plan + plan_data = await ai_service.generate_plan(rule_texts, request.goals.dict()) + + # Create plan record + db_plan = Plan( + jsonb_plan=plan_data, + version=1, + parent_plan_id=None + ) + db.add(db_plan) + await db.commit() + await db.refresh(db_plan) + + return PlanGenerationResponse( + plan=db_plan, + generation_metadata={ + "rules_used": len(rules), + "goals": request.goals.dict(), + "generated_at": datetime.utcnow().isoformat() + } + ) + + except AIServiceError as e: + raise HTTPException(status_code=503, detail=f"AI service error: {str(e)}") + except Exception as e: + raise HTTPException(status_code=500, detail=f"Plan generation failed: {str(e)}") +``` + +### **File:** `backend/app/schemas/plan.py` + +**Add these new schemas:** + +```python +from typing import Dict, List, Optional, Any +from pydantic import BaseModel, Field +from uuid import UUID + +class TrainingGoals(BaseModel): + """Training goals for plan generation.""" + primary_goal: str = Field(..., description="Primary training goal") + target_weekly_hours: int = Field(..., ge=3, le=20, description="Target hours per week") + fitness_level: str = Field(..., description="Current fitness level") + event_date: Optional[str] = Field(None, description="Target event date (YYYY-MM-DD)") + preferred_routes: List[int] = Field(default=[], description="Preferred route IDs") + avoid_days: List[str] = Field(default=[], description="Days to avoid training") + +class PlanGenerationRequest(BaseModel): + """Request schema for plan generation.""" + rule_ids: List[UUID] = Field(..., description="Rule set IDs to apply") + goals: TrainingGoals = Field(..., description="Training goals") + duration_weeks: int = Field(4, ge=1, le=20, description="Plan duration in weeks") + user_preferences: Optional[Dict[str, Any]] = Field(None, description="Additional preferences") + +class PlanGenerationResponse(BaseModel): + """Response schema for plan generation.""" + plan: Plan = Field(..., description="Generated training plan") + generation_metadata: Dict[str, Any] = Field(..., description="Generation metadata") + + class Config: + orm_mode = True +``` + +--- + + + + + +**Add these endpoints after the existing routes:** + +```python +from app.schemas.rule import NaturalLanguageRuleRequest, ParsedRuleResponse + +@router.post("/parse-natural-language", response_model=ParsedRuleResponse) +async def parse_natural_language_rules( + request: NaturalLanguageRuleRequest, + db: AsyncSession = Depends(get_db) +): + """Parse natural language text into structured training rules.""" + try: + # Initialize AI service + ai_service = AIService(db) + + # Parse rules using AI + parsed_data = await ai_service.parse_rules_from_natural_language( + request.natural_language_text + ) + + # Validate parsed rules + validation_result = _validate_parsed_rules(parsed_data) + + return ParsedRuleResponse( + parsed_rules=parsed_data, + confidence_score=parsed_data.get("confidence", 0.0), + suggestions=validation_result.get("suggestions", []), + validation_errors=validation_result.get("errors", []), + rule_name=request.rule_name + ) + + except AIServiceError as e: + raise HTTPException(status_code=503, detail=f"AI parsing failed: {str(e)}") + except Exception as e: + raise HTTPException(status_code=500, detail=f"Rule parsing failed: {str(e)}") + +@router.post("/validate-rules") +async def validate_rule_consistency( + rules_data: Dict[str, Any], + db: AsyncSession = Depends(get_db) +): + """Validate rule consistency and detect conflicts.""" + validation_result = _validate_parsed_rules(rules_data) + return { + "is_valid": len(validation_result.get("errors", [])) == 0, + "errors": validation_result.get("errors", []), + "warnings": validation_result.get("warnings", []), + "suggestions": validation_result.get("suggestions", []) + } + +def _validate_parsed_rules(parsed_rules: Dict[str, Any]) -> Dict[str, List[str]]: + """Validate parsed rules for consistency and completeness.""" + errors = [] + warnings = [] + suggestions = [] + + # Check for required fields + required_fields = ["max_rides_per_week", "min_rest_between_hard"] + for field in required_fields: + if field not in parsed_rules: + errors.append(f"Missing required field: {field}") + + # Validate numeric ranges + max_rides = parsed_rules.get("max_rides_per_week", 0) + if max_rides > 7: + errors.append("Maximum rides per week cannot exceed 7") + elif max_rides < 1: + errors.append("Must have at least 1 ride per week") + + # Check for conflicts + max_hours = parsed_rules.get("max_duration_hours", 0) + if max_rides and max_hours: + avg_duration = max_hours / max_rides + if avg_duration > 5: + warnings.append("Very long average ride duration detected") + elif avg_duration < 0.5: + warnings.append("Very short average ride duration detected") + + # Provide suggestions + if "weather_constraints" not in parsed_rules: + suggestions.append("Consider adding weather constraints for outdoor rides") + + return { + "errors": errors, + "warnings": warnings, + "suggestions": suggestions + } +``` + +### **File:** `backend/app/schemas/rule.py` + +**Replace the existing content with:** + +```python +from pydantic import BaseModel, Field, validator +from typing import Optional, Dict, Any, List + +class NaturalLanguageRuleRequest(BaseModel): + """Request schema for natural language rule parsing.""" + natural_language_text: str = Field( + ..., + min_length=10, + max_length=5000, + description="Natural language rule description" + ) + rule_name: str = Field(..., min_length=1, max_length=100, description="Rule set name") + + @validator('natural_language_text') + def validate_text_content(cls, v): + # Check for required keywords + required_keywords = ['ride', 'week', 'hour', 'day', 'rest', 'training'] + if not any(keyword in v.lower() for keyword in required_keywords): + raise ValueError("Text must contain training-related keywords") + return v + +class ParsedRuleResponse(BaseModel): + """Response schema for parsed rules.""" + parsed_rules: Dict[str, Any] = Field(..., description="Structured rule data") + confidence_score: Optional[float] = Field(None, ge=0.0, le=1.0, description="Parsing confidence") + suggestions: List[str] = Field(default=[], description="Improvement suggestions") + validation_errors: List[str] = Field(default=[], description="Validation errors") + rule_name: str = Field(..., description="Rule set name") + +class RuleBase(BaseModel): + """Base rule schema.""" + name: str = Field(..., min_length=1, max_length=100) + description: Optional[str] = Field(None, max_length=500) + user_defined: bool = Field(True, description="Whether rule is user-defined") + jsonb_rules: Dict[str, Any] = Field(..., description="Structured rule data") + version: int = Field(1, ge=1, description="Rule version") + parent_rule_id: Optional[int] = Field(None, description="Parent rule for versioning") + +class RuleCreate(RuleBase): + """Schema for creating new rules.""" + pass + +class Rule(RuleBase): + """Complete rule schema with database fields.""" + id: int + created_at: datetime + updated_at: datetime + + class Config: + orm_mode = True +``` + +--- + + + + + +**Add these enhanced methods:** + +```python +async def parse_rules_from_natural_language(self, natural_language: str) -> Dict[str, Any]: + """Enhanced natural language rule parsing with better prompts.""" + prompt_template = await self.prompt_manager.get_active_prompt("rule_parsing") + + if not prompt_template: + # Fallback prompt if none exists in database + prompt_template = """ + Parse the following natural language training rules into structured JSON format. + + Input: "{user_rules}" + + Required output format: + {{ + "max_rides_per_week": , + "min_rest_between_hard": , + "max_duration_hours": , + "intensity_limits": {{ + "max_zone_5_minutes_per_week": , + "max_consecutive_hard_days": + }}, + "weather_constraints": {{ + "min_temperature": , + "max_wind_speed": , + "no_rain": + }}, + "schedule_constraints": {{ + "preferred_days": [], + "avoid_days": [] + }}, + "confidence": <0.0-1.0> + }} + + Extract specific numbers and constraints. If information is missing, omit the field. + """ + + prompt = prompt_template.format(user_rules=natural_language) + response = await self._make_ai_request(prompt) + parsed_data = self._parse_rules_response(response) + + # Add confidence scoring + if "confidence" not in parsed_data: + parsed_data["confidence"] = self._calculate_parsing_confidence( + natural_language, parsed_data + ) + + return parsed_data + +def _calculate_parsing_confidence(self, input_text: str, parsed_data: Dict) -> float: + """Calculate confidence score for rule parsing.""" + confidence = 0.5 # Base confidence + + # Increase confidence for explicit numbers + import re + numbers = re.findall(r'\d+', input_text) + if len(numbers) >= 2: + confidence += 0.2 + + # Increase confidence for key training terms + training_terms = ['rides', 'hours', 'week', 'rest', 'recovery', 'training'] + found_terms = sum(1 for term in training_terms if term in input_text.lower()) + confidence += min(0.3, found_terms * 0.05) + + # Decrease confidence if parsed data is sparse + if len(parsed_data) < 3: + confidence -= 0.2 + + return max(0.0, min(1.0, confidence)) +``` + +--- + +# Phase 2: Frontend Core Features Implementation + +## Step 2.1: Simplified Rules Management + +### **File:** `frontend/src/pages/Rules.jsx` + +**Replace with simplified plaintext rules interface:** + +```jsx +import React, { useState, useEffect } from 'react'; +import { toast } from 'react-toastify'; +import RuleEditor from '../components/rules/RuleEditor'; +import RulePreview from '../components/rules/RulePreview'; +import RulesList from '../components/rules/RulesList'; +import { useAuth } from '../context/AuthContext'; +import * as ruleService from '../services/ruleService'; + +const Rules = () => { + const { apiKey } = useAuth(); + const [activeTab, setActiveTab] = useState('list'); + const [rules, setRules] = useState([]); + const [selectedRule, setSelectedRule] = useState(null); + const [naturalLanguageText, setNaturalLanguageText] = useState(''); + const [parsedRules, setParsedRules] = useState(null); + const [isLoading, setIsLoading] = useState(false); + + useEffect(() => { + loadRules(); + }, []); + + const loadRules = async () => { + try { + const response = await ruleService.getRules(); + setRules(response.data); + } catch (error) { + console.error('Failed to load rules:', error); + toast.error('Failed to load rules'); + } + }; + + const handleParseRules = async (parsedData) => { + setParsedRules(parsedData); + setActiveTab('preview'); + }; + + const handleSaveRules = async (ruleName, finalRules) => { + setIsLoading(true); + try { + const ruleData = { + name: ruleName, + jsonb_rules: finalRules, + user_defined: true, + version: 1 + }; + + await ruleService.createRule(ruleData); + toast.success('Rules saved successfully!'); + + // Reset form and reload rules + setNaturalLanguageText(''); + setParsedRules(null); + setActiveTab('list'); + await loadRules(); + } catch (error) { + console.error('Failed to save rules:', error); + toast.error('Failed to save rules'); + } finally { + setIsLoading(false); + } + }; + + const handleEditRule = (rule) => { + setSelectedRule(rule); + setNaturalLanguageText(rule.description || ''); + setParsedRules(rule.jsonb_rules); + setActiveTab('edit'); + }; + + return ( +
+
+
+

Training Rules

+

+ Define your training constraints and preferences using natural language +

+
+ + +
+ + {/* Tab Navigation */} +
+ +
+ + {/* Tab Content */} + {activeTab === 'list' && ( + { + try { + await ruleService.deleteRule(ruleId); + toast.success('Rule deleted'); + await loadRules(); + } catch (error) { + toast.error('Failed to delete rule'); + } + }} + /> + )} + + {isEditing ? ( +
+
+ + setRuleName(e.target.value)} + className="w-full p-3 border border-gray-300 rounded-lg" + /> +
+ +
+ +