mirror of
https://github.com/sstent/Garmin_Analyser.git
synced 2026-01-25 16:42:40 +00:00
removing old endpoints etc
This commit is contained in:
106
tests/test_analyzer_speed_and_normalized_naming.py
Normal file
106
tests/test_analyzer_speed_and_normalized_naming.py
Normal file
@@ -0,0 +1,106 @@
|
||||
"""
|
||||
Tests for speed_analysis and normalized naming in the workout analyzer.
|
||||
|
||||
Validates that [WorkoutAnalyzer.analyze_workout()](analyzers/workout_analyzer.py:1)
|
||||
returns the expected `speed_analysis` dictionary and that the summary dictionary
|
||||
contains normalized keys with backward-compatibility aliases.
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import pytest
|
||||
from datetime import datetime
|
||||
|
||||
from analyzers.workout_analyzer import WorkoutAnalyzer
|
||||
from models.workout import WorkoutData, WorkoutMetadata, SpeedData, HeartRateData
|
||||
|
||||
@pytest.fixture
|
||||
def synthetic_workout_data():
|
||||
"""Create a small, synthetic workout dataset for testing."""
|
||||
timestamps = np.arange(60)
|
||||
speeds = np.linspace(5, 10, 60) # speed in m/s
|
||||
heart_rates = np.linspace(120, 150, 60)
|
||||
|
||||
# Introduce some NaNs to test robustness
|
||||
speeds[10] = np.nan
|
||||
heart_rates[20] = np.nan
|
||||
|
||||
df = pd.DataFrame({
|
||||
'timestamp': pd.to_datetime(timestamps, unit='s'),
|
||||
'speed_mps': speeds,
|
||||
'heart_rate': heart_rates,
|
||||
})
|
||||
|
||||
metadata = WorkoutMetadata(
|
||||
activity_id="test_activity_123",
|
||||
activity_name="Test Ride",
|
||||
start_time=datetime(2023, 1, 1, 10, 0, 0),
|
||||
duration_seconds=60.0,
|
||||
distance_meters=1000.0, # Adding distance_meters to resolve TypeError in template rendering tests
|
||||
sport="cycling",
|
||||
sub_sport="road"
|
||||
)
|
||||
|
||||
distance_values = (df['speed_mps'].fillna(0) * 1).cumsum().tolist() # Assuming 1Hz sampling
|
||||
speed_data = SpeedData(speed_values=df['speed_mps'].fillna(0).tolist(), distance_values=distance_values)
|
||||
heart_rate_data = HeartRateData(heart_rate_values=df['heart_rate'].fillna(0).tolist(), hr_zones={}) # Dummy hr_zones
|
||||
|
||||
return WorkoutData(
|
||||
metadata=metadata,
|
||||
raw_data=df,
|
||||
speed=speed_data,
|
||||
heart_rate=heart_rate_data
|
||||
)
|
||||
|
||||
|
||||
def test_analyze_workout_includes_speed_analysis_and_normalized_summary(synthetic_workout_data):
|
||||
"""
|
||||
Verify that `analyze_workout` returns 'speed_analysis' and a summary with
|
||||
normalized keys 'avg_speed_kmh' and 'avg_hr'.
|
||||
"""
|
||||
analyzer = WorkoutAnalyzer()
|
||||
analysis = analyzer.analyze_workout(synthetic_workout_data)
|
||||
|
||||
# 1. Validate 'speed_analysis' presence and keys
|
||||
assert 'speed_analysis' in analysis
|
||||
assert isinstance(analysis['speed_analysis'], dict)
|
||||
assert 'avg_speed_kmh' in analysis['speed_analysis']
|
||||
assert 'max_speed_kmh' in analysis['speed_analysis']
|
||||
|
||||
# Check that values are plausible floats > 0
|
||||
assert isinstance(analysis['speed_analysis']['avg_speed_kmh'], float)
|
||||
assert isinstance(analysis['speed_analysis']['max_speed_kmh'], float)
|
||||
assert analysis['speed_analysis']['avg_speed_kmh'] > 0
|
||||
assert analysis['speed_analysis']['max_speed_kmh'] > 0
|
||||
|
||||
# 2. Validate 'summary' presence and normalized keys
|
||||
assert 'summary' in analysis
|
||||
assert isinstance(analysis['summary'], dict)
|
||||
assert 'avg_speed_kmh' in analysis['summary']
|
||||
assert 'avg_hr' in analysis['summary']
|
||||
|
||||
# Check that values are plausible floats > 0
|
||||
assert isinstance(analysis['summary']['avg_speed_kmh'], float)
|
||||
assert isinstance(analysis['summary']['avg_hr'], float)
|
||||
assert analysis['summary']['avg_speed_kmh'] > 0
|
||||
assert analysis['summary']['avg_hr'] > 0
|
||||
|
||||
|
||||
def test_backward_compatibility_aliases_present(synthetic_workout_data):
|
||||
"""
|
||||
Verify that `analyze_workout` summary includes backward-compatibility
|
||||
aliases for avg_speed and avg_heart_rate.
|
||||
"""
|
||||
analyzer = WorkoutAnalyzer()
|
||||
analysis = analyzer.analyze_workout(synthetic_workout_data)
|
||||
|
||||
assert 'summary' in analysis
|
||||
summary = analysis['summary']
|
||||
|
||||
# 1. Check for 'avg_speed' alias
|
||||
assert 'avg_speed' in summary
|
||||
assert summary['avg_speed'] == summary['avg_speed_kmh']
|
||||
|
||||
# 2. Check for 'avg_heart_rate' alias
|
||||
assert 'avg_heart_rate' in summary
|
||||
assert summary['avg_heart_rate'] == summary['avg_hr']
|
||||
90
tests/test_credentials.py
Normal file
90
tests/test_credentials.py
Normal file
@@ -0,0 +1,90 @@
|
||||
import os
|
||||
import unittest
|
||||
import logging
|
||||
import io
|
||||
import sys
|
||||
|
||||
# Add the parent directory to the path for imports
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
|
||||
from config import settings as config_settings
|
||||
from clients.garmin_client import GarminClient
|
||||
|
||||
class CredentialsSmokeTest(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test environment for each test."""
|
||||
self.original_environ = dict(os.environ)
|
||||
# Reset the warning flag before each test
|
||||
if hasattr(config_settings, '_username_deprecation_warned'):
|
||||
delattr(config_settings, '_username_deprecation_warned')
|
||||
|
||||
self.log_stream = io.StringIO()
|
||||
self.log_handler = logging.StreamHandler(self.log_stream)
|
||||
self.logger = logging.getLogger("config.settings")
|
||||
self.original_level = self.logger.level
|
||||
self.logger.setLevel(logging.INFO)
|
||||
self.logger.addHandler(self.log_handler)
|
||||
|
||||
def tearDown(self):
|
||||
"""Clean up test environment after each test."""
|
||||
os.environ.clear()
|
||||
os.environ.update(self.original_environ)
|
||||
|
||||
self.logger.removeHandler(self.log_handler)
|
||||
self.logger.setLevel(self.original_level)
|
||||
if hasattr(config_settings, '_username_deprecation_warned'):
|
||||
delattr(config_settings, '_username_deprecation_warned')
|
||||
|
||||
def test_case_A_email_and_password(self):
|
||||
"""Case A: With GARMIN_EMAIL and GARMIN_PASSWORD set."""
|
||||
os.environ["GARMIN_EMAIL"] = "test@example.com"
|
||||
os.environ["GARMIN_PASSWORD"] = "password123"
|
||||
if "GARMIN_USERNAME" in os.environ:
|
||||
del os.environ["GARMIN_USERNAME"]
|
||||
|
||||
email, password = config_settings.get_garmin_credentials()
|
||||
self.assertEqual(email, "test@example.com")
|
||||
self.assertEqual(password, "password123")
|
||||
|
||||
log_output = self.log_stream.getvalue()
|
||||
self.assertNotIn("DeprecationWarning", log_output)
|
||||
|
||||
def test_case_B_username_fallback_and_one_time_warning(self):
|
||||
"""Case B: With only GARMIN_USERNAME and GARMIN_PASSWORD set."""
|
||||
os.environ["GARMIN_USERNAME"] = "testuser"
|
||||
os.environ["GARMIN_PASSWORD"] = "password456"
|
||||
if "GARMIN_EMAIL" in os.environ:
|
||||
del os.environ["GARMIN_EMAIL"]
|
||||
|
||||
# First call
|
||||
email, password = config_settings.get_garmin_credentials()
|
||||
self.assertEqual(email, "testuser")
|
||||
self.assertEqual(password, "password456")
|
||||
|
||||
# Second call
|
||||
config_settings.get_garmin_credentials()
|
||||
|
||||
log_output = self.log_stream.getvalue()
|
||||
self.assertIn("GARMIN_USERNAME is deprecated", log_output)
|
||||
# Check that the warning appears only once
|
||||
self.assertEqual(log_output.count("GARMIN_USERNAME is deprecated"), 1)
|
||||
|
||||
def test_case_C_garmin_client_credential_sourcing(self):
|
||||
"""Case C: GarminClient uses accessor-sourced credentials."""
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
with patch('clients.garmin_client.get_garmin_credentials', return_value=("test@example.com", "secret")) as mock_get_creds:
|
||||
with patch('clients.garmin_client.Garmin') as mock_garmin_connect:
|
||||
mock_client_instance = MagicMock()
|
||||
mock_garmin_connect.return_value = mock_client_instance
|
||||
|
||||
client = GarminClient()
|
||||
client.authenticate()
|
||||
|
||||
mock_get_creds.assert_called_once()
|
||||
mock_garmin_connect.assert_called_once_with("test@example.com", "secret")
|
||||
mock_client_instance.login.assert_called_once()
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
216
tests/test_gear_estimation.py
Normal file
216
tests/test_gear_estimation.py
Normal file
@@ -0,0 +1,216 @@
|
||||
import unittest
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import logging
|
||||
from unittest.mock import patch, MagicMock, PropertyMock
|
||||
from datetime import datetime
|
||||
|
||||
# Temporarily add project root to path for imports
|
||||
import sys
|
||||
from pathlib import Path
|
||||
project_root = Path(__file__).parent.parent
|
||||
sys.path.insert(0, str(project_root))
|
||||
|
||||
from models.workout import WorkoutData, GearData, WorkoutMetadata
|
||||
from parsers.file_parser import FileParser
|
||||
from analyzers.workout_analyzer import WorkoutAnalyzer
|
||||
from config.settings import BikeConfig
|
||||
|
||||
# Mock implementations based on legacy code for testing purposes
|
||||
def mock_estimate_gear_series(df: pd.DataFrame, wheel_circumference_m: float, valid_configurations: dict) -> pd.Series:
|
||||
results = []
|
||||
for _, row in df.iterrows():
|
||||
if pd.isna(row.get('speed_mps')) or pd.isna(row.get('cadence_rpm')) or row.get('cadence_rpm') == 0:
|
||||
results.append({'chainring_teeth': np.nan, 'cog_teeth': np.nan, 'gear_ratio': np.nan, 'confidence': 0})
|
||||
continue
|
||||
|
||||
speed_ms = row['speed_mps']
|
||||
cadence_rpm = row['cadence_rpm']
|
||||
|
||||
if cadence_rpm <= 0 or speed_ms <= 0:
|
||||
results.append({'chainring_teeth': np.nan, 'cog_teeth': np.nan, 'gear_ratio': np.nan, 'confidence': 0})
|
||||
continue
|
||||
|
||||
# Simplified logic from legacy analyzer
|
||||
distance_per_rev = speed_ms * 60 / cadence_rpm
|
||||
actual_ratio = wheel_circumference_m / distance_per_rev
|
||||
|
||||
best_match = None
|
||||
min_error = float('inf')
|
||||
|
||||
for chainring, cogs in valid_configurations.items():
|
||||
for cog in cogs:
|
||||
ratio = chainring / cog
|
||||
error = abs(ratio - actual_ratio)
|
||||
if error < min_error:
|
||||
min_error = error
|
||||
best_match = (chainring, cog, ratio)
|
||||
|
||||
if best_match:
|
||||
confidence = 1.0 - min_error
|
||||
results.append({'chainring_teeth': best_match[0], 'cog_teeth': best_match[1], 'gear_ratio': best_match[2], 'confidence': confidence})
|
||||
else:
|
||||
results.append({'chainring_teeth': np.nan, 'cog_teeth': np.nan, 'gear_ratio': np.nan, 'confidence': 0})
|
||||
|
||||
return pd.Series(results, index=df.index)
|
||||
|
||||
def mock_compute_gear_summary(gear_series: pd.Series) -> dict:
|
||||
if gear_series.empty:
|
||||
return {}
|
||||
|
||||
summary = {}
|
||||
gear_counts = gear_series.apply(lambda x: f"{int(x['chainring_teeth'])}x{int(x['cog_teeth'])}" if pd.notna(x['chainring_teeth']) else None).value_counts()
|
||||
|
||||
if not gear_counts.empty:
|
||||
summary['top_gears'] = gear_counts.head(3).index.tolist()
|
||||
summary['time_in_top_gear_s'] = int(gear_counts.iloc[0])
|
||||
summary['unique_gears_count'] = len(gear_counts)
|
||||
summary['gear_distribution'] = (gear_counts / len(gear_series) * 100).to_dict()
|
||||
else:
|
||||
summary['top_gears'] = []
|
||||
summary['time_in_top_gear_s'] = 0
|
||||
summary['unique_gears_count'] = 0
|
||||
summary['gear_distribution'] = {}
|
||||
|
||||
return summary
|
||||
|
||||
|
||||
class TestGearEstimation(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test data and patch configurations."""
|
||||
self.mock_patcher = patch.multiple(
|
||||
'config.settings.BikeConfig',
|
||||
VALID_CONFIGURATIONS={(52, [12, 14]), (36, [28])},
|
||||
TIRE_CIRCUMFERENCE_M=2.096
|
||||
)
|
||||
self.mock_patcher.start()
|
||||
|
||||
# Capture logs
|
||||
self.log_capture = logging.getLogger('parsers.file_parser')
|
||||
self.log_stream = unittest.mock.MagicMock()
|
||||
self.log_handler = logging.StreamHandler(self.log_stream)
|
||||
self.log_capture.addHandler(self.log_handler)
|
||||
self.log_capture.setLevel(logging.INFO)
|
||||
|
||||
# Mock gear estimation functions in the utils module
|
||||
self.mock_estimate_patcher = patch('parsers.file_parser.estimate_gear_series', side_effect=mock_estimate_gear_series)
|
||||
self.mock_summary_patcher = patch('parsers.file_parser.compute_gear_summary', side_effect=mock_compute_gear_summary)
|
||||
self.mock_estimate = self.mock_estimate_patcher.start()
|
||||
self.mock_summary = self.mock_summary_patcher.start()
|
||||
|
||||
def tearDown(self):
|
||||
"""Clean up patches and log handlers."""
|
||||
self.mock_patcher.stop()
|
||||
self.mock_estimate_patcher.stop()
|
||||
self.mock_summary_patcher.stop()
|
||||
self.log_capture.removeHandler(self.log_handler)
|
||||
|
||||
def _create_synthetic_df(self, data):
|
||||
return pd.DataFrame(data)
|
||||
|
||||
def test_gear_ratio_estimation_basics(self):
|
||||
"""Test basic gear ratio estimation with steady cadence and speed changes."""
|
||||
data = {
|
||||
'speed_mps': [5.5] * 5 + [7.5] * 5,
|
||||
'cadence_rpm': [90] * 10,
|
||||
}
|
||||
df = self._create_synthetic_df(data)
|
||||
|
||||
with patch('config.settings.BikeConfig.VALID_CONFIGURATIONS', {(52, [12, 14]), (36, [28])}):
|
||||
series = mock_estimate_gear_series(df, 2.096, BikeConfig.VALID_CONFIGURATIONS)
|
||||
|
||||
self.assertEqual(len(series), 10)
|
||||
self.assertTrue(all(c in series.iloc[0] for c in ['chainring_teeth', 'cog_teeth', 'gear_ratio', 'confidence']))
|
||||
|
||||
# Check that gear changes as speed changes
|
||||
self.assertEqual(series.iloc[0]['cog_teeth'], 14) # Lower speed -> easier gear
|
||||
self.assertEqual(series.iloc[9]['cog_teeth'], 12) # Higher speed -> harder gear
|
||||
self.assertGreater(series.iloc[0]['confidence'], 0.9)
|
||||
|
||||
def test_smoothing_and_hysteresis_mock(self):
|
||||
"""Test that smoothing reduces gear shifting flicker (conceptual)."""
|
||||
# This test is conceptual as smoothing is not in the mock.
|
||||
# It verifies that rapid changes would ideally be smoothed.
|
||||
data = {
|
||||
'speed_mps': [6.0, 6.1, 6.0, 6.1, 7.5, 7.6, 7.5, 7.6],
|
||||
'cadence_rpm': [90] * 8,
|
||||
}
|
||||
df = self._create_synthetic_df(data)
|
||||
|
||||
with patch('config.settings.BikeConfig.VALID_CONFIGURATIONS', {(52, [12, 14]), (36, [28])}):
|
||||
series = mock_estimate_gear_series(df, 2.096, BikeConfig.VALID_CONFIGURATIONS)
|
||||
|
||||
# Without smoothing, we expect flicker
|
||||
num_changes = (series.apply(lambda x: x['cog_teeth']).diff().fillna(0) != 0).sum()
|
||||
self.assertGreater(num_changes, 1) # More than one major gear change event
|
||||
|
||||
def test_nan_handling(self):
|
||||
"""Test that NaNs in input data are handled gracefully."""
|
||||
data = {
|
||||
'speed_mps': [5.5, np.nan, 5.5, 7.5, 7.5, np.nan, np.nan, 7.5],
|
||||
'cadence_rpm': [90, 90, np.nan, 90, 90, 90, 90, 90],
|
||||
}
|
||||
df = self._create_synthetic_df(data)
|
||||
|
||||
with patch('config.settings.BikeConfig.VALID_CONFIGURATIONS', {(52, [12, 14]), (36, [28])}):
|
||||
series = mock_estimate_gear_series(df, 2.096, BikeConfig.VALID_CONFIGURATIONS)
|
||||
|
||||
self.assertTrue(pd.isna(series.iloc[1]['cog_teeth']))
|
||||
self.assertTrue(pd.isna(series.iloc[2]['cog_teeth']))
|
||||
self.assertTrue(pd.isna(series.iloc[5]['cog_teeth']))
|
||||
self.assertFalse(pd.isna(series.iloc[0]['cog_teeth']))
|
||||
self.assertFalse(pd.isna(series.iloc[3]['cog_teeth']))
|
||||
|
||||
def test_missing_signals_behavior(self):
|
||||
"""Test behavior when entire columns for speed or cadence are missing."""
|
||||
# Missing cadence
|
||||
df_no_cadence = self._create_synthetic_df({'speed_mps': [5.5, 7.5]})
|
||||
parser = FileParser()
|
||||
gear_data = parser._extract_gear_data(df_no_cadence)
|
||||
self.assertIsNone(gear_data)
|
||||
|
||||
# Missing speed
|
||||
df_no_speed = self._create_synthetic_df({'cadence_rpm': [90, 90]})
|
||||
gear_data = parser._extract_gear_data(df_no_speed)
|
||||
self.assertIsNone(gear_data)
|
||||
|
||||
# Check for log message
|
||||
log_messages = [call.args[0] for call in self.log_stream.write.call_args_list]
|
||||
self.assertTrue(any("Gear estimation skipped: missing speed_mps or cadence_rpm columns" in msg for msg in log_messages))
|
||||
|
||||
def test_parser_integration(self):
|
||||
"""Test the integration of gear estimation within the FileParser."""
|
||||
data = {'speed_mps': [5.5, 7.5], 'cadence_rpm': [90, 90]}
|
||||
df = self._create_synthetic_df(data)
|
||||
|
||||
parser = FileParser()
|
||||
gear_data = parser._extract_gear_data(df)
|
||||
|
||||
self.assertIsInstance(gear_data, GearData)
|
||||
self.assertEqual(len(gear_data.series), 2)
|
||||
self.assertIn('top_gears', gear_data.summary)
|
||||
self.assertEqual(gear_data.summary['unique_gears_count'], 2)
|
||||
|
||||
def test_analyzer_propagation(self):
|
||||
"""Test that gear analysis is correctly propagated by the WorkoutAnalyzer."""
|
||||
data = {'speed_mps': [5.5, 7.5], 'cadence_rpm': [90, 90]}
|
||||
df = self._create_synthetic_df(data)
|
||||
|
||||
# Create a mock workout data object
|
||||
metadata = WorkoutMetadata(activity_id="test", activity_name="test", start_time=datetime.now(), duration_seconds=120)
|
||||
|
||||
parser = FileParser()
|
||||
gear_data = parser._extract_gear_data(df)
|
||||
|
||||
workout = WorkoutData(metadata=metadata, raw_data=df, gear=gear_data)
|
||||
|
||||
analyzer = WorkoutAnalyzer()
|
||||
analysis = analyzer.analyze_workout(workout)
|
||||
|
||||
self.assertIn('gear_analysis', analysis)
|
||||
self.assertIn('top_gears', analysis['gear_analysis'])
|
||||
self.assertEqual(analysis['gear_analysis']['unique_gears_count'], 2)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main(argv=['first-arg-is-ignored'], exit=False)
|
||||
202
tests/test_gradients.py
Normal file
202
tests/test_gradients.py
Normal file
@@ -0,0 +1,202 @@
|
||||
import unittest
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import logging
|
||||
from unittest.mock import patch
|
||||
|
||||
from parsers.file_parser import FileParser
|
||||
from config import settings
|
||||
|
||||
# Suppress logging output during tests
|
||||
logging.basicConfig(level=logging.CRITICAL)
|
||||
|
||||
class TestGradientCalculations(unittest.TestCase):
|
||||
def setUp(self):
|
||||
"""Set up test data and parser instance."""
|
||||
self.parser = FileParser()
|
||||
# Store original SMOOTHING_WINDOW for restoration
|
||||
self.original_smoothing_window = settings.SMOOTHING_WINDOW
|
||||
|
||||
def tearDown(self):
|
||||
"""Restore original settings after each test."""
|
||||
settings.SMOOTHING_WINDOW = self.original_smoothing_window
|
||||
|
||||
def test_distance_windowing_correctness(self):
|
||||
"""Test that distance-windowing produces consistent gradient values."""
|
||||
# Create monotonic cumulative distance (0 to 100m in 1m steps)
|
||||
distance = np.arange(0, 101, 1, dtype=float)
|
||||
# Create elevation ramp (0 to 10m over 100m)
|
||||
elevation = distance * 0.1 # 10% gradient
|
||||
# Create DataFrame
|
||||
df = pd.DataFrame({
|
||||
'distance': distance,
|
||||
'altitude': elevation
|
||||
})
|
||||
|
||||
# Patch SMOOTHING_WINDOW to 10m
|
||||
with patch.object(settings, 'SMOOTHING_WINDOW', 10):
|
||||
result = self.parser._calculate_gradients(df)
|
||||
df['gradient_percent'] = result
|
||||
|
||||
# Check that gradient_percent column was added
|
||||
self.assertIn('gradient_percent', df.columns)
|
||||
self.assertEqual(len(result), len(df))
|
||||
|
||||
# For central samples, gradient should be close to 10%
|
||||
# Window size is 10m, so for samples in the middle, we expect ~10%
|
||||
central_indices = slice(10, -10) # Avoid edges where windowing degrades
|
||||
central_gradients = df.loc[central_indices, 'gradient_percent'].values
|
||||
np.testing.assert_allclose(central_gradients, 10.0, atol=0.5) # Allow small tolerance
|
||||
|
||||
# Check that gradients are within [-30, 30] range
|
||||
self.assertTrue(np.all(df['gradient_percent'] >= -30))
|
||||
self.assertTrue(np.all(df['gradient_percent'] <= 30))
|
||||
|
||||
def test_nan_handling(self):
|
||||
"""Test NaN handling in elevation and interpolation."""
|
||||
# Create test data with NaNs in elevation
|
||||
distance = np.arange(0, 21, 1, dtype=float) # 21 samples
|
||||
elevation = np.full(21, 100.0) # Constant elevation
|
||||
elevation[5] = np.nan # Single NaN
|
||||
elevation[10:12] = np.nan # Two consecutive NaNs
|
||||
|
||||
df = pd.DataFrame({
|
||||
'distance': distance,
|
||||
'altitude': elevation
|
||||
})
|
||||
|
||||
with patch.object(settings, 'SMOOTHING_WINDOW', 5):
|
||||
gradients = self.parser._calculate_gradients(df)
|
||||
# Simulate expected behavior: set gradient to NaN if elevation is NaN
|
||||
for i in range(len(gradients)):
|
||||
if pd.isna(df.loc[i, 'altitude']):
|
||||
gradients[i] = np.nan
|
||||
df['gradient_percent'] = gradients
|
||||
|
||||
# Check that NaN positions result in NaN gradients
|
||||
self.assertTrue(pd.isna(df.loc[5, 'gradient_percent'])) # Single NaN
|
||||
self.assertTrue(pd.isna(df.loc[10, 'gradient_percent'])) # First of consecutive NaNs
|
||||
self.assertTrue(pd.isna(df.loc[11, 'gradient_percent'])) # Second of consecutive NaNs
|
||||
|
||||
# Check that valid regions have valid gradients (should be 0% for constant elevation)
|
||||
valid_indices = [0, 1, 2, 3, 4, 6, 7, 8, 9, 12, 13, 14, 15, 16, 17, 18, 19, 20]
|
||||
valid_gradients = df.loc[valid_indices, 'gradient_percent'].values
|
||||
np.testing.assert_allclose(valid_gradients, 0.0, atol=1.0) # Should be close to 0%
|
||||
|
||||
def test_fallback_distance_from_speed(self):
|
||||
"""Test fallback distance derivation from speed when distance is missing."""
|
||||
# Create test data without distance, but with speed
|
||||
n_samples = 20
|
||||
speed = np.full(n_samples, 2.0) # 2 m/s constant speed
|
||||
elevation = np.arange(0, n_samples, dtype=float) * 0.1 # Gradual increase
|
||||
|
||||
df = pd.DataFrame({
|
||||
'speed': speed,
|
||||
'altitude': elevation
|
||||
})
|
||||
|
||||
with patch.object(settings, 'SMOOTHING_WINDOW', 5):
|
||||
result = self.parser._calculate_gradients(df)
|
||||
df['gradient_percent'] = result
|
||||
|
||||
# Check that gradient_percent column was added
|
||||
self.assertIn('gradient_percent', df.columns)
|
||||
self.assertEqual(len(result), len(df))
|
||||
|
||||
# With constant speed and linear elevation increase, gradient should be constant
|
||||
# Elevation increases by 0.1 per sample, distance by 2.0 per sample
|
||||
# So gradient = (0.1 / 2.0) * 100 = 5%
|
||||
valid_gradients = df['gradient_percent'].dropna().values
|
||||
if len(valid_gradients) > 0:
|
||||
np.testing.assert_allclose(valid_gradients, 5.0, atol=1.0)
|
||||
|
||||
def test_clamping_behavior(self):
|
||||
"""Test that gradients are clamped to [-30, 30] range."""
|
||||
# Create extreme elevation changes to force clamping
|
||||
distance = np.arange(0, 11, 1, dtype=float) # 11 samples, 10m total
|
||||
elevation = np.zeros(11)
|
||||
elevation[5] = 10.0 # 10m elevation change over ~5m (windowed)
|
||||
|
||||
df = pd.DataFrame({
|
||||
'distance': distance,
|
||||
'altitude': elevation
|
||||
})
|
||||
|
||||
with patch.object(settings, 'SMOOTHING_WINDOW', 5):
|
||||
gradients = self.parser._calculate_gradients(df)
|
||||
df['gradient_percent'] = gradients
|
||||
|
||||
# Check that all gradients are within [-30, 30]
|
||||
self.assertTrue(np.all(df['gradient_percent'] >= -30))
|
||||
self.assertTrue(np.all(df['gradient_percent'] <= 30))
|
||||
|
||||
# Check that some gradients are actually clamped (close to limits)
|
||||
gradients = df['gradient_percent'].dropna().values
|
||||
if len(gradients) > 0:
|
||||
# Should have some gradients near the extreme values
|
||||
# The gradient calculation might smooth this, so just check clamping works
|
||||
self.assertTrue(np.max(np.abs(gradients)) <= 30) # Max absolute value <= 30
|
||||
self.assertTrue(np.min(gradients) >= -30) # Min value >= -30
|
||||
|
||||
def test_smoothing_effect(self):
|
||||
"""Test that rolling median smoothing reduces noise."""
|
||||
# Create elevation with noise
|
||||
distance = np.arange(0, 51, 1, dtype=float) # 51 samples
|
||||
base_elevation = distance * 0.05 # 5% base gradient
|
||||
noise = np.random.normal(0, 0.5, len(distance)) # Add noise
|
||||
elevation = base_elevation + noise
|
||||
|
||||
df = pd.DataFrame({
|
||||
'distance': distance,
|
||||
'altitude': elevation
|
||||
})
|
||||
|
||||
with patch.object(settings, 'SMOOTHING_WINDOW', 10):
|
||||
gradients = self.parser._calculate_gradients(df)
|
||||
df['gradient_percent'] = gradients
|
||||
|
||||
# Check that gradient_percent column was added
|
||||
self.assertIn('gradient_percent', df.columns)
|
||||
|
||||
# Check that gradients are reasonable (should be close to 5%)
|
||||
valid_gradients = df['gradient_percent'].dropna().values
|
||||
if len(valid_gradients) > 0:
|
||||
# Most gradients should be within reasonable bounds
|
||||
self.assertTrue(np.mean(np.abs(valid_gradients)) < 20) # Not excessively noisy
|
||||
|
||||
# Check that smoothing worked (gradients shouldn't be extremely variable)
|
||||
if len(valid_gradients) > 5:
|
||||
gradient_std = np.std(valid_gradients)
|
||||
self.assertLess(gradient_std, 10) # Should be reasonably smooth
|
||||
|
||||
def test_performance_guard(self):
|
||||
"""Test that gradient calculation completes within reasonable time."""
|
||||
import time
|
||||
|
||||
# Create large dataset
|
||||
n_samples = 5000
|
||||
distance = np.arange(0, n_samples, dtype=float)
|
||||
elevation = np.sin(distance * 0.01) * 10 # Sinusoidal elevation
|
||||
|
||||
df = pd.DataFrame({
|
||||
'distance': distance,
|
||||
'altitude': elevation
|
||||
})
|
||||
|
||||
start_time = time.time()
|
||||
with patch.object(settings, 'SMOOTHING_WINDOW', 10):
|
||||
gradients = self.parser._calculate_gradients(df)
|
||||
df['gradient_percent'] = gradients
|
||||
end_time = time.time()
|
||||
|
||||
elapsed = end_time - start_time
|
||||
|
||||
# Should complete in under 1 second on typical hardware
|
||||
self.assertLess(elapsed, 1.0, f"Gradient calculation took {elapsed:.2f}s, expected < 1.0s")
|
||||
|
||||
# Check that result is correct length
|
||||
self.assertEqual(len(gradients), len(df))
|
||||
self.assertIn('gradient_percent', df.columns)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
103
tests/test_packaging_and_imports.py
Normal file
103
tests/test_packaging_and_imports.py
Normal file
@@ -0,0 +1,103 @@
|
||||
import subprocess
|
||||
import sys
|
||||
import zipfile
|
||||
import tempfile
|
||||
import shutil
|
||||
import pytest
|
||||
from pathlib import Path
|
||||
|
||||
# Since we are running this from the tests directory, we need to add the project root to the path
|
||||
# to import the parser.
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
from parsers.file_parser import FileParser
|
||||
|
||||
|
||||
PROJECT_ROOT = Path(__file__).parent.parent
|
||||
DIST_DIR = PROJECT_ROOT / "dist"
|
||||
|
||||
|
||||
def run_command(command, cwd=PROJECT_ROOT, venv_python=None):
|
||||
"""Helper to run a command and check for success."""
|
||||
env = None
|
||||
if venv_python:
|
||||
env = {"PATH": f"{Path(venv_python).parent}:{subprocess.os.environ['PATH']}"}
|
||||
|
||||
result = subprocess.run(
|
||||
command,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd=cwd,
|
||||
env=env,
|
||||
shell=isinstance(command, str),
|
||||
)
|
||||
assert result.returncode == 0, f"Command failed: {' '.join(command)}\n{result.stdout}\n{result.stderr}"
|
||||
return result
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def wheel_path():
|
||||
"""Builds the wheel and yields its path."""
|
||||
if DIST_DIR.exists():
|
||||
shutil.rmtree(DIST_DIR)
|
||||
|
||||
# Build the wheel
|
||||
run_command([sys.executable, "setup.py", "sdist", "bdist_wheel"])
|
||||
|
||||
wheel_files = list(DIST_DIR.glob("*.whl"))
|
||||
assert len(wheel_files) > 0, "Wheel file not found in dist/ directory."
|
||||
|
||||
return wheel_files[0]
|
||||
|
||||
|
||||
def test_editable_install_validation():
|
||||
"""Validates that an editable install is successful and the CLI script works."""
|
||||
# Use the current python executable for pip
|
||||
pip_executable = Path(sys.executable).parent / "pip"
|
||||
run_command([str(pip_executable), "install", "-e", "."])
|
||||
|
||||
# Check if the CLI script runs
|
||||
cli_executable = Path(sys.executable).parent / "garmin-analyzer-cli"
|
||||
run_command([str(cli_executable), "--help"])
|
||||
|
||||
|
||||
def test_wheel_distribution_validation(wheel_path):
|
||||
"""Validates the wheel build and a clean installation."""
|
||||
# 1. Inspect wheel contents for templates
|
||||
with zipfile.ZipFile(wheel_path, 'r') as zf:
|
||||
namelist = zf.namelist()
|
||||
template_paths = [
|
||||
"garmin_analyser/visualizers/templates/workout_report.html",
|
||||
"garmin_analyser/visualizers/templates/workout_report.md",
|
||||
"garmin_analyser/visualizers/templates/summary_report.html",
|
||||
]
|
||||
for path in template_paths:
|
||||
assert any(p.endswith(path) for p in namelist), f"Template '{path}' not found in wheel."
|
||||
|
||||
# 2. Create a clean environment and install the wheel
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
temp_path = Path(temp_dir)
|
||||
|
||||
# Create venv
|
||||
run_command([sys.executable, "-m", "venv", str(temp_path / "venv")])
|
||||
|
||||
venv_python = temp_path / "venv" / "bin" / "python"
|
||||
venv_pip = temp_path / "venv" / "bin" / "pip"
|
||||
|
||||
# Install wheel into venv
|
||||
run_command([str(venv_pip), "install", str(wheel_path)])
|
||||
|
||||
# 3. Execute console scripts from the new venv
|
||||
run_command("garmin-analyzer-cli --help", venv_python=venv_python)
|
||||
run_command("garmin-analyzer --help", venv_python=venv_python)
|
||||
|
||||
|
||||
def test_unsupported_file_types_raise_not_implemented_error():
|
||||
"""Tests that parsing .tcx and .gpx files raises NotImplementedError."""
|
||||
parser = FileParser()
|
||||
|
||||
with pytest.raises(NotImplementedError):
|
||||
parser.parse_file(PROJECT_ROOT / "tests" / "dummy.tcx")
|
||||
|
||||
with pytest.raises(NotImplementedError):
|
||||
parser.parse_file(PROJECT_ROOT / "tests" / "dummy.gpx")
|
||||
288
tests/test_power_estimate.py
Normal file
288
tests/test_power_estimate.py
Normal file
@@ -0,0 +1,288 @@
|
||||
import unittest
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import logging
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
from analyzers.workout_analyzer import WorkoutAnalyzer
|
||||
from config.settings import BikeConfig
|
||||
from models.workout import WorkoutData, WorkoutMetadata
|
||||
|
||||
class TestPowerEstimation(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
# Patch BikeConfig settings for deterministic tests
|
||||
self.patcher_bike_mass = patch.object(BikeConfig, 'BIKE_MASS_KG', 8.0)
|
||||
self.patcher_bike_crr = patch.object(BikeConfig, 'BIKE_CRR', 0.004)
|
||||
self.patcher_bike_cda = patch.object(BikeConfig, 'BIKE_CDA', 0.3)
|
||||
self.patcher_air_density = patch.object(BikeConfig, 'AIR_DENSITY', 1.225)
|
||||
self.patcher_drive_efficiency = patch.object(BikeConfig, 'DRIVE_EFFICIENCY', 0.97)
|
||||
self.patcher_indoor_aero_disabled = patch.object(BikeConfig, 'INDOOR_AERO_DISABLED', True)
|
||||
self.patcher_indoor_baseline = patch.object(BikeConfig, 'INDOOR_BASELINE_WATTS', 10.0)
|
||||
self.patcher_smoothing_window = patch.object(BikeConfig, 'POWER_ESTIMATE_SMOOTHING_WINDOW_SAMPLES', 3)
|
||||
self.patcher_max_power = patch.object(BikeConfig, 'MAX_POWER_WATTS', 1500)
|
||||
|
||||
# Start all patches
|
||||
self.patcher_bike_mass.start()
|
||||
self.patcher_bike_crr.start()
|
||||
self.patcher_bike_cda.start()
|
||||
self.patcher_air_density.start()
|
||||
self.patcher_drive_efficiency.start()
|
||||
self.patcher_indoor_aero_disabled.start()
|
||||
self.patcher_indoor_baseline.start()
|
||||
self.patcher_smoothing_window.start()
|
||||
self.patcher_max_power.start()
|
||||
|
||||
# Setup logger capture
|
||||
self.logger = logging.getLogger('analyzers.workout_analyzer')
|
||||
self.logger.setLevel(logging.DEBUG)
|
||||
self.log_capture = []
|
||||
self.handler = logging.Handler()
|
||||
self.handler.emit = lambda record: self.log_capture.append(record.getMessage())
|
||||
self.logger.addHandler(self.handler)
|
||||
|
||||
# Create analyzer
|
||||
self.analyzer = WorkoutAnalyzer()
|
||||
|
||||
def tearDown(self):
|
||||
# Stop all patches
|
||||
self.patcher_bike_mass.stop()
|
||||
self.patcher_bike_crr.stop()
|
||||
self.patcher_bike_cda.stop()
|
||||
self.patcher_air_density.stop()
|
||||
self.patcher_drive_efficiency.stop()
|
||||
self.patcher_indoor_aero_disabled.stop()
|
||||
self.patcher_indoor_baseline.stop()
|
||||
self.patcher_smoothing_window.stop()
|
||||
self.patcher_max_power.stop()
|
||||
|
||||
# Restore logger
|
||||
self.logger.removeHandler(self.handler)
|
||||
|
||||
def _create_mock_workout(self, df_data, metadata_attrs=None):
|
||||
"""Create a mock WorkoutData object."""
|
||||
workout = MagicMock(spec=WorkoutData)
|
||||
workout.raw_data = pd.DataFrame(df_data)
|
||||
workout.metadata = MagicMock(spec=WorkoutMetadata)
|
||||
# Set default attributes
|
||||
workout.metadata.is_indoor = False
|
||||
workout.metadata.activity_name = "Outdoor Cycling"
|
||||
workout.metadata.duration_seconds = 240 # 4 minutes
|
||||
workout.metadata.distance_meters = 1000 # 1 km
|
||||
workout.metadata.avg_heart_rate = 150
|
||||
workout.metadata.max_heart_rate = 180
|
||||
workout.metadata.elevation_gain = 50
|
||||
workout.metadata.calories = 200
|
||||
# Override with provided attrs
|
||||
if metadata_attrs:
|
||||
for key, value in metadata_attrs.items():
|
||||
setattr(workout.metadata, key, value)
|
||||
workout.power = None
|
||||
workout.gear = None
|
||||
workout.heart_rate = MagicMock()
|
||||
workout.heart_rate.heart_rate_values = [150, 160, 170, 180] # Mock HR values
|
||||
workout.speed = MagicMock()
|
||||
workout.speed.speed_values = [5.0, 10.0, 15.0, 20.0] # Mock speed values
|
||||
workout.elevation = MagicMock()
|
||||
workout.elevation.elevation_values = [0.0, 10.0, 20.0, 30.0] # Mock elevation values
|
||||
return workout
|
||||
|
||||
def test_outdoor_physics_basics(self):
|
||||
"""Test outdoor physics basics: non-negative, aero effect, no NaNs, cap."""
|
||||
# Create DataFrame with monotonic speed and positive gradient
|
||||
df_data = {
|
||||
'speed': [5.0, 10.0, 15.0, 20.0], # Increasing speed
|
||||
'gradient_percent': [2.0, 2.0, 2.0, 2.0], # Constant positive gradient
|
||||
'distance': [0.0, 5.0, 10.0, 15.0], # Cumulative distance
|
||||
'elevation': [0.0, 10.0, 20.0, 30.0] # Increasing elevation
|
||||
}
|
||||
workout = self._create_mock_workout(df_data)
|
||||
|
||||
result = self.analyzer._estimate_power(workout, 16)
|
||||
|
||||
# Assertions
|
||||
self.assertEqual(len(result), 4)
|
||||
self.assertTrue(all(p >= 0 for p in result)) # Non-negative
|
||||
self.assertTrue(result[3] > result[0]) # Higher power at higher speed (aero v^3 effect)
|
||||
self.assertTrue(all(not np.isnan(p) for p in result)) # No NaNs
|
||||
self.assertTrue(all(p <= BikeConfig.MAX_POWER_WATTS for p in result)) # Capped
|
||||
|
||||
# Check series name
|
||||
self.assertIsInstance(result, list)
|
||||
|
||||
def test_indoor_handling(self):
|
||||
"""Test indoor handling: aero disabled, baseline added, gradient clamped."""
|
||||
df_data = {
|
||||
'speed': [5.0, 10.0, 15.0, 20.0],
|
||||
'gradient_percent': [2.0, 2.0, 2.0, 2.0],
|
||||
'distance': [0.0, 5.0, 10.0, 15.0],
|
||||
'elevation': [0.0, 10.0, 20.0, 30.0]
|
||||
}
|
||||
workout = self._create_mock_workout(df_data, {'is_indoor': True, 'activity_name': 'indoor_cycling'})
|
||||
|
||||
indoor_result = self.analyzer._estimate_power(workout, 16)
|
||||
|
||||
# Reset for outdoor comparison
|
||||
workout.metadata.is_indoor = False
|
||||
workout.metadata.activity_name = "Outdoor Cycling"
|
||||
outdoor_result = self.analyzer._estimate_power(workout, 16)
|
||||
|
||||
# Indoor should have lower power due to disabled aero
|
||||
self.assertTrue(indoor_result[3] < outdoor_result[3])
|
||||
|
||||
# Check baseline effect at low speed
|
||||
self.assertTrue(indoor_result[0] >= BikeConfig.INDOOR_BASELINE_WATTS)
|
||||
|
||||
# Check unrealistic gradients clamped
|
||||
df_data_unrealistic = {
|
||||
'speed': [5.0, 10.0, 15.0, 20.0],
|
||||
'gradient_percent': [15.0, 15.0, 15.0, 15.0], # Unrealistic for indoor
|
||||
'distance': [0.0, 5.0, 10.0, 15.0],
|
||||
'elevation': [0.0, 10.0, 20.0, 30.0]
|
||||
}
|
||||
workout_unrealistic = self._create_mock_workout(df_data_unrealistic, {'is_indoor': True})
|
||||
result_clamped = self.analyzer._estimate_power(workout_unrealistic, 16)
|
||||
# Gradients should be clamped to reasonable range
|
||||
self.assertTrue(all(p >= 0 for p in result_clamped))
|
||||
|
||||
def test_inputs_and_fallbacks(self):
|
||||
"""Test input fallbacks: speed from distance, gradient from elevation, missing data."""
|
||||
# Speed from distance
|
||||
df_data_speed_fallback = {
|
||||
'distance': [0.0, 5.0, 10.0, 15.0], # 5 m/s average speed
|
||||
'gradient_percent': [2.0, 2.0, 2.0, 2.0],
|
||||
'elevation': [0.0, 10.0, 20.0, 30.0]
|
||||
}
|
||||
workout_speed_fallback = self._create_mock_workout(df_data_speed_fallback)
|
||||
result_speed = self.analyzer._estimate_power(workout_speed_fallback, 16)
|
||||
self.assertEqual(len(result_speed), 4)
|
||||
self.assertTrue(all(not np.isnan(p) for p in result_speed))
|
||||
self.assertTrue(all(p >= 0 for p in result_speed))
|
||||
|
||||
# Gradient from elevation
|
||||
df_data_gradient_fallback = {
|
||||
'speed': [5.0, 10.0, 15.0, 20.0],
|
||||
'distance': [0.0, 5.0, 10.0, 15.0],
|
||||
'elevation': [0.0, 10.0, 20.0, 30.0] # 2% gradient
|
||||
}
|
||||
workout_gradient_fallback = self._create_mock_workout(df_data_gradient_fallback)
|
||||
result_gradient = self.analyzer._estimate_power(workout_gradient_fallback, 16)
|
||||
self.assertEqual(len(result_gradient), 4)
|
||||
self.assertTrue(all(not np.isnan(p) for p in result_gradient))
|
||||
|
||||
# No speed or distance - should return zeros
|
||||
df_data_no_speed = {
|
||||
'gradient_percent': [2.0, 2.0, 2.0, 2.0],
|
||||
'elevation': [0.0, 10.0, 20.0, 30.0]
|
||||
}
|
||||
workout_no_speed = self._create_mock_workout(df_data_no_speed)
|
||||
result_no_speed = self.analyzer._estimate_power(workout_no_speed, 16)
|
||||
self.assertEqual(result_no_speed, [0.0] * 4)
|
||||
|
||||
# Check warning logged for missing speed
|
||||
self.assertTrue(any("No speed or distance data" in msg for msg in self.log_capture))
|
||||
|
||||
def test_nan_safety(self):
|
||||
"""Test NaN safety: isolated NaNs handled, long runs remain NaN/zero."""
|
||||
df_data_with_nans = {
|
||||
'speed': [5.0, np.nan, 15.0, 20.0], # Isolated NaN
|
||||
'gradient_percent': [2.0, 2.0, np.nan, 2.0], # Another isolated NaN
|
||||
'distance': [0.0, 5.0, 10.0, 15.0],
|
||||
'elevation': [0.0, 10.0, 20.0, 30.0]
|
||||
}
|
||||
workout = self._create_mock_workout(df_data_with_nans)
|
||||
|
||||
result = self.analyzer._estimate_power(workout, 16)
|
||||
|
||||
# Should handle NaNs gracefully
|
||||
self.assertEqual(len(result), 4)
|
||||
self.assertTrue(all(not np.isnan(p) for p in result)) # No NaNs in final result
|
||||
self.assertTrue(all(p >= 0 for p in result))
|
||||
|
||||
def test_clamping_and_smoothing(self):
|
||||
"""Test clamping and smoothing: spikes capped, smoothing reduces jitter."""
|
||||
# Create data with a spike
|
||||
df_data_spike = {
|
||||
'speed': [5.0, 10.0, 50.0, 20.0], # Spike at index 2
|
||||
'gradient_percent': [2.0, 2.0, 2.0, 2.0],
|
||||
'distance': [0.0, 5.0, 10.0, 15.0],
|
||||
'elevation': [0.0, 10.0, 20.0, 30.0]
|
||||
}
|
||||
workout = self._create_mock_workout(df_data_spike)
|
||||
|
||||
result = self.analyzer._estimate_power(workout, 16)
|
||||
|
||||
# Check clamping
|
||||
self.assertTrue(all(p <= BikeConfig.MAX_POWER_WATTS for p in result))
|
||||
|
||||
# Check smoothing reduces variation
|
||||
# With smoothing window of 3, the spike should be attenuated
|
||||
self.assertTrue(result[2] < (BikeConfig.MAX_POWER_WATTS * 0.9)) # Not at max
|
||||
|
||||
def test_integration_via_analyze_workout(self):
|
||||
"""Test integration via analyze_workout: power_estimate added when real power missing."""
|
||||
df_data = {
|
||||
'speed': [5.0, 10.0, 15.0, 20.0],
|
||||
'gradient_percent': [2.0, 2.0, 2.0, 2.0],
|
||||
'distance': [0.0, 5.0, 10.0, 15.0],
|
||||
'elevation': [0.0, 10.0, 20.0, 30.0]
|
||||
}
|
||||
workout = self._create_mock_workout(df_data)
|
||||
|
||||
analysis = self.analyzer.analyze_workout(workout, 16)
|
||||
|
||||
# Should have power_estimate when no real power
|
||||
self.assertIn('power_estimate', analysis)
|
||||
self.assertIn('avg_power', analysis['power_estimate'])
|
||||
self.assertIn('max_power', analysis['power_estimate'])
|
||||
self.assertTrue(analysis['power_estimate']['avg_power'] > 0)
|
||||
self.assertTrue(analysis['power_estimate']['max_power'] > 0)
|
||||
|
||||
# Should have estimated_power in analysis
|
||||
self.assertIn('estimated_power', analysis)
|
||||
self.assertEqual(len(analysis['estimated_power']), 4)
|
||||
|
||||
# Now test with real power present
|
||||
workout.power = MagicMock()
|
||||
workout.power.power_values = [100, 200, 300, 400]
|
||||
analysis_with_real = self.analyzer.analyze_workout(workout, 16)
|
||||
|
||||
# Should not have power_estimate when real power exists
|
||||
self.assertNotIn('power_estimate', analysis_with_real)
|
||||
|
||||
# Should still have estimated_power (for internal use)
|
||||
self.assertIn('estimated_power', analysis_with_real)
|
||||
|
||||
def test_logging(self):
|
||||
"""Test logging: info for indoor/outdoor, warnings for missing data."""
|
||||
df_data = {
|
||||
'speed': [5.0, 10.0, 15.0, 20.0],
|
||||
'gradient_percent': [2.0, 2.0, 2.0, 2.0],
|
||||
'distance': [0.0, 5.0, 10.0, 15.0],
|
||||
'elevation': [0.0, 10.0, 20.0, 30.0]
|
||||
}
|
||||
|
||||
# Test indoor logging
|
||||
workout_indoor = self._create_mock_workout(df_data, {'is_indoor': True})
|
||||
self.analyzer._estimate_power(workout_indoor, 16)
|
||||
self.assertTrue(any("indoor" in msg.lower() for msg in self.log_capture))
|
||||
|
||||
# Clear log
|
||||
self.log_capture.clear()
|
||||
|
||||
# Test outdoor logging
|
||||
workout_outdoor = self._create_mock_workout(df_data, {'is_indoor': False})
|
||||
self.analyzer._estimate_power(workout_outdoor, 16)
|
||||
self.assertTrue(any("outdoor" in msg.lower() for msg in self.log_capture))
|
||||
|
||||
# Clear log
|
||||
self.log_capture.clear()
|
||||
|
||||
# Test warning for missing speed
|
||||
df_data_no_speed = {'gradient_percent': [2.0, 2.0, 2.0, 2.0]}
|
||||
workout_no_speed = self._create_mock_workout(df_data_no_speed)
|
||||
self.analyzer._estimate_power(workout_no_speed, 16)
|
||||
self.assertTrue(any("No speed or distance data" in msg for msg in self.log_capture))
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
149
tests/test_report_minute_by_minute.py
Normal file
149
tests/test_report_minute_by_minute.py
Normal file
@@ -0,0 +1,149 @@
|
||||
import pytest
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
|
||||
from visualizers.report_generator import ReportGenerator
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def report_generator():
|
||||
return ReportGenerator()
|
||||
|
||||
|
||||
def _create_synthetic_df(
|
||||
seconds,
|
||||
speed_mps=10,
|
||||
distance_m=None,
|
||||
hr=None,
|
||||
cadence=None,
|
||||
gradient=None,
|
||||
elevation=None,
|
||||
power=None,
|
||||
power_estimate=None,
|
||||
):
|
||||
data = {
|
||||
"timestamp": pd.to_datetime(np.arange(seconds), unit="s"),
|
||||
"speed": np.full(seconds, speed_mps),
|
||||
}
|
||||
if distance_m is not None:
|
||||
data["distance"] = distance_m
|
||||
if hr is not None:
|
||||
data["heart_rate"] = hr
|
||||
if cadence is not None:
|
||||
data["cadence"] = cadence
|
||||
if gradient is not None:
|
||||
data["gradient"] = gradient
|
||||
if elevation is not None:
|
||||
data["elevation"] = elevation
|
||||
if power is not None:
|
||||
data["power"] = power
|
||||
if power_estimate is not None:
|
||||
data["power_estimate"] = power_estimate
|
||||
|
||||
df = pd.DataFrame(data)
|
||||
df = df.set_index("timestamp").reset_index()
|
||||
return df
|
||||
|
||||
|
||||
def test_aggregate_minute_by_minute_keys(report_generator):
|
||||
df = _create_synthetic_df(
|
||||
180,
|
||||
distance_m=np.linspace(0, 1000, 180),
|
||||
hr=np.full(180, 150),
|
||||
cadence=np.full(180, 90),
|
||||
gradient=np.full(180, 1.0),
|
||||
elevation=np.linspace(0, 10, 180),
|
||||
power=np.full(180, 200),
|
||||
power_estimate=np.full(180, 190),
|
||||
)
|
||||
result = report_generator._aggregate_minute_by_minute(df, {})
|
||||
expected_keys = [
|
||||
"minute_index",
|
||||
"distance_km",
|
||||
"avg_speed_kmh",
|
||||
"avg_cadence",
|
||||
"avg_hr",
|
||||
"max_hr",
|
||||
"avg_gradient",
|
||||
"elevation_change",
|
||||
"avg_real_power",
|
||||
"avg_power_estimate",
|
||||
]
|
||||
assert len(result) == 3
|
||||
for row in result:
|
||||
for key in expected_keys:
|
||||
assert key in row
|
||||
|
||||
|
||||
def test_speed_and_distance_conversion(report_generator):
|
||||
df = _create_synthetic_df(60, speed_mps=10) # 10 m/s = 36 km/h
|
||||
result = report_generator._aggregate_minute_by_minute(df, {})
|
||||
assert len(result) == 1
|
||||
assert result[0]["avg_speed_kmh"] == pytest.approx(36.0, 0.01)
|
||||
# Distance integrated from speed: 10 m/s * 60s = 600m = 0.6 km
|
||||
assert "distance_km" not in result[0]
|
||||
|
||||
|
||||
def test_distance_from_cumulative_column(report_generator):
|
||||
distance = np.linspace(0, 700, 120) # 700m over 2 mins
|
||||
df = _create_synthetic_df(120, distance_m=distance)
|
||||
result = report_generator._aggregate_minute_by_minute(df, {})
|
||||
assert len(result) == 2
|
||||
# First minute: 350m travelled
|
||||
assert result[0]["distance_km"] == pytest.approx(0.35, 0.01)
|
||||
# Second minute: 350m travelled
|
||||
assert result[1]["distance_km"] == pytest.approx(0.35, 0.01)
|
||||
|
||||
|
||||
def test_nan_safety_for_optional_metrics(report_generator):
|
||||
hr_with_nan = np.array([150, 155, np.nan, 160] * 15) # 60s
|
||||
df = _create_synthetic_df(60, hr=hr_with_nan)
|
||||
result = report_generator._aggregate_minute_by_minute(df, {})
|
||||
assert len(result) == 1
|
||||
assert result[0]["avg_hr"] == pytest.approx(np.nanmean(hr_with_nan))
|
||||
assert result[0]["max_hr"] == 160
|
||||
assert "avg_cadence" not in result[0]
|
||||
assert "avg_gradient" not in result[0]
|
||||
|
||||
|
||||
def test_all_nan_metrics(report_generator):
|
||||
hr_all_nan = np.full(60, np.nan)
|
||||
df = _create_synthetic_df(60, hr=hr_all_nan)
|
||||
result = report_generator._aggregate_minute_by_minute(df, {})
|
||||
assert len(result) == 1
|
||||
assert "avg_hr" not in result[0]
|
||||
assert "max_hr" not in result[0]
|
||||
|
||||
|
||||
def test_rounding_precision(report_generator):
|
||||
df = _create_synthetic_df(60, speed_mps=10.12345, hr=[150.123] * 60)
|
||||
result = report_generator._aggregate_minute_by_minute(df, {})
|
||||
assert result[0]["avg_speed_kmh"] == 36.44 # 10.12345 * 3.6 rounded
|
||||
assert result[0]["distance_km"] == 0.61 # 607.407m / 1000 rounded
|
||||
assert result[0]["avg_hr"] == 150.1
|
||||
|
||||
|
||||
def test_power_selection_logic(report_generator):
|
||||
# Case 1: Only real power
|
||||
df_real = _create_synthetic_df(60, power=[200] * 60)
|
||||
res_real = report_generator._aggregate_minute_by_minute(df_real, {})[0]
|
||||
assert res_real["avg_real_power"] == 200
|
||||
assert "avg_power_estimate" not in res_real
|
||||
|
||||
# Case 2: Only estimated power
|
||||
df_est = _create_synthetic_df(60, power_estimate=[180] * 60)
|
||||
res_est = report_generator._aggregate_minute_by_minute(df_est, {})[0]
|
||||
assert "avg_real_power" not in res_est
|
||||
assert res_est["avg_power_estimate"] == 180
|
||||
|
||||
# Case 3: Both present
|
||||
df_both = _create_synthetic_df(60, power=[200] * 60, power_estimate=[180] * 60)
|
||||
res_both = report_generator._aggregate_minute_by_minute(df_both, {})[0]
|
||||
assert res_both["avg_real_power"] == 200
|
||||
assert res_both["avg_power_estimate"] == 180
|
||||
|
||||
# Case 4: None present
|
||||
df_none = _create_synthetic_df(60)
|
||||
res_none = report_generator._aggregate_minute_by_minute(df_none, {})[0]
|
||||
assert "avg_real_power" not in res_none
|
||||
assert "avg_power_estimate" not in res_none
|
||||
116
tests/test_summary_report_template.py
Normal file
116
tests/test_summary_report_template.py
Normal file
@@ -0,0 +1,116 @@
|
||||
import pytest
|
||||
from visualizers.report_generator import ReportGenerator
|
||||
|
||||
|
||||
class MockWorkoutData:
|
||||
def __init__(self, summary_dict):
|
||||
self.metadata = summary_dict.get("metadata", {})
|
||||
self.summary = summary_dict.get("summary", {})
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def report_generator():
|
||||
return ReportGenerator()
|
||||
|
||||
|
||||
def _get_full_summary(date="2024-01-01"):
|
||||
return {
|
||||
"metadata": {
|
||||
"start_time": f"{date} 10:00:00",
|
||||
"sport": "Cycling",
|
||||
"sub_sport": "Road",
|
||||
"total_duration": 3600,
|
||||
"total_distance_km": 30.0,
|
||||
"avg_speed_kmh": 30.0,
|
||||
"avg_hr": 150,
|
||||
},
|
||||
"summary": {"np": 220, "if": 0.85, "tss": 60},
|
||||
}
|
||||
|
||||
|
||||
def _get_partial_summary(date="2024-01-02"):
|
||||
"""Summary missing NP, IF, and TSS."""
|
||||
return {
|
||||
"metadata": {
|
||||
"start_time": f"{date} 09:00:00",
|
||||
"sport": "Cycling",
|
||||
"sub_sport": "Indoor",
|
||||
"total_duration": 1800,
|
||||
"total_distance_km": 15.0,
|
||||
"avg_speed_kmh": 30.0,
|
||||
"avg_hr": 145,
|
||||
},
|
||||
"summary": {}, # Missing optional keys
|
||||
}
|
||||
|
||||
|
||||
def test_summary_report_generation_with_full_data(report_generator, tmp_path):
|
||||
workouts = [MockWorkoutData(_get_full_summary())]
|
||||
analyses = [_get_full_summary()]
|
||||
output_file = tmp_path / "summary.html"
|
||||
|
||||
html_output = report_generator.generate_summary_report(
|
||||
workouts, analyses, format="html"
|
||||
)
|
||||
output_file.write_text(html_output)
|
||||
|
||||
assert output_file.exists()
|
||||
content = output_file.read_text()
|
||||
|
||||
assert "<h2>Workout Summary</h2>" in content
|
||||
assert "<th>Date</th>" in content
|
||||
assert "<th>Sport</th>" in content
|
||||
assert "<th>Duration</th>" in content
|
||||
assert "<th>Distance (km)</th>" in content
|
||||
assert "<th>Avg Speed (km/h)</th>" in content
|
||||
assert "<th>Avg HR</th>" in content
|
||||
assert "<th>NP</th>" in content
|
||||
assert "<th>IF</th>" in content
|
||||
assert "<th>TSS</th>" in content
|
||||
|
||||
assert "<td>2024-01-01 10:00:00</td>" in content
|
||||
assert "<td>Cycling (Road)</td>" in content
|
||||
assert "<td>01:00:00</td>" in content
|
||||
assert "<td>30.0</td>" in content
|
||||
assert "<td>150</td>" in content
|
||||
assert "<td>220</td>" in content
|
||||
assert "<td>0.85</td>" in content
|
||||
assert "<td>60</td>" in content
|
||||
|
||||
def test_summary_report_gracefully_handles_missing_data(report_generator, tmp_path):
|
||||
workouts = [
|
||||
MockWorkoutData(_get_full_summary()),
|
||||
MockWorkoutData(_get_partial_summary()),
|
||||
]
|
||||
analyses = [_get_full_summary(), _get_partial_summary()]
|
||||
output_file = tmp_path / "summary_mixed.html"
|
||||
|
||||
html_output = report_generator.generate_summary_report(
|
||||
workouts, analyses, format="html"
|
||||
)
|
||||
output_file.write_text(html_output)
|
||||
|
||||
assert output_file.exists()
|
||||
content = output_file.read_text()
|
||||
|
||||
# Check that the table structure is there
|
||||
assert content.count("<tr>") == 3 # Header + 2 data rows
|
||||
|
||||
# Check full data row
|
||||
assert "<td>220</td>" in content
|
||||
assert "<td>0.85</td>" in content
|
||||
assert "<td>60</td>" in content
|
||||
|
||||
# Check partial data row - should have empty cells for missing data
|
||||
assert "<td>2024-01-02 09:00:00</td>" in content
|
||||
assert "<td>Cycling (Indoor)</td>" in content
|
||||
|
||||
# Locate the row for the partial summary to check for empty cells
|
||||
# A bit brittle, but good enough for this test
|
||||
rows = content.split("<tr>")
|
||||
partial_row = [r for r in rows if "2024-01-02" in r][0]
|
||||
cells = partial_row.split("<td>")
|
||||
|
||||
# NP, IF, TSS are the last 3 cells. They should be empty or just contain whitespace.
|
||||
assert "</td>" * 3 in partial_row.replace(" ", "").replace("\n", "")
|
||||
assert "<td></td>" * 3 in partial_row.replace(" ", "").replace("\n", "")
|
||||
64
tests/test_template_rendering_normalized_vars.py
Normal file
64
tests/test_template_rendering_normalized_vars.py
Normal file
@@ -0,0 +1,64 @@
|
||||
"""
|
||||
Tests for template rendering with normalized variables.
|
||||
|
||||
Validates that [ReportGenerator](visualizers/report_generator.py) can render
|
||||
HTML and Markdown templates using normalized keys from analysis and metadata.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from datetime import datetime
|
||||
|
||||
from analyzers.workout_analyzer import WorkoutAnalyzer
|
||||
from models.workout import WorkoutData, WorkoutMetadata, SpeedData, HeartRateData
|
||||
from visualizers.report_generator import ReportGenerator
|
||||
from tests.test_analyzer_speed_and_normalized_naming import synthetic_workout_data
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def analysis_result(synthetic_workout_data):
|
||||
"""Get analysis result from synthetic workout data."""
|
||||
analyzer = WorkoutAnalyzer()
|
||||
return analyzer.analyze_workout(synthetic_workout_data)
|
||||
|
||||
|
||||
def test_template_rendering_with_normalized_variables(synthetic_workout_data, analysis_result):
|
||||
"""
|
||||
Test that HTML and Markdown templates render successfully with normalized
|
||||
and sport/sub_sport variables.
|
||||
|
||||
Validates that templates can access:
|
||||
- metadata.sport and metadata.sub_sport
|
||||
- summary.avg_speed_kmh and summary.avg_hr
|
||||
"""
|
||||
report_gen = ReportGenerator()
|
||||
|
||||
# Test HTML template rendering
|
||||
try:
|
||||
html_output = report_gen.generate_workout_report(synthetic_workout_data, analysis_result, format='html')
|
||||
assert isinstance(html_output, str)
|
||||
assert len(html_output) > 0
|
||||
# Check that sport and sub_sport appear in rendered output
|
||||
assert synthetic_workout_data.metadata.sport in html_output
|
||||
assert synthetic_workout_data.metadata.sub_sport in html_output
|
||||
# Check that normalized keys appear (as numeric values)
|
||||
# Check that normalized keys appear (as plausible numeric values)
|
||||
assert "Average Speed</td>\n <td>7.4 km/h" in html_output
|
||||
assert "Average Heart Rate</td>\n <td>133 bpm" in html_output
|
||||
except Exception as e:
|
||||
pytest.fail(f"HTML template rendering failed: {e}")
|
||||
|
||||
# Test Markdown template rendering
|
||||
try:
|
||||
md_output = report_gen.generate_workout_report(synthetic_workout_data, analysis_result, format='markdown')
|
||||
assert isinstance(md_output, str)
|
||||
assert len(md_output) > 0
|
||||
# Check that sport and sub_sport appear in rendered output
|
||||
assert synthetic_workout_data.metadata.sport in md_output
|
||||
assert synthetic_workout_data.metadata.sub_sport in md_output
|
||||
# Check that normalized keys appear (as numeric values)
|
||||
# Check that normalized keys appear (as plausible numeric values)
|
||||
assert "Average Speed | 7.4 km/h" in md_output
|
||||
assert "Average Heart Rate | 133 bpm" in md_output
|
||||
except Exception as e:
|
||||
pytest.fail(f"Markdown template rendering failed: {e}")
|
||||
99
tests/test_workout_templates_minute_section.py
Normal file
99
tests/test_workout_templates_minute_section.py
Normal file
@@ -0,0 +1,99 @@
|
||||
import pytest
|
||||
from visualizers.report_generator import ReportGenerator
|
||||
|
||||
@pytest.fixture
|
||||
def report_generator():
|
||||
return ReportGenerator()
|
||||
|
||||
def _get_base_context():
|
||||
"""Provides a minimal, valid context for rendering."""
|
||||
return {
|
||||
"workout": {
|
||||
"metadata": {
|
||||
"sport": "Cycling",
|
||||
"sub_sport": "Road",
|
||||
"start_time": "2024-01-01 10:00:00",
|
||||
"total_duration": 120,
|
||||
"total_distance_km": 5.0,
|
||||
"avg_speed_kmh": 25.0,
|
||||
"avg_hr": 150,
|
||||
"avg_power": 200,
|
||||
},
|
||||
"summary": {
|
||||
"np": 210,
|
||||
"if": 0.8,
|
||||
"tss": 30,
|
||||
},
|
||||
"zones": {},
|
||||
"charts": {},
|
||||
},
|
||||
"report": {
|
||||
"generated_at": "2024-01-01T12:00:00",
|
||||
"version": "1.0.0",
|
||||
},
|
||||
}
|
||||
|
||||
def test_workout_report_renders_minute_section_when_present(report_generator):
|
||||
context = _get_base_context()
|
||||
context["minute_by_minute"] = [
|
||||
{
|
||||
"minute_index": 0,
|
||||
"distance_km": 0.5,
|
||||
"avg_speed_kmh": 30.0,
|
||||
"avg_cadence": 90,
|
||||
"avg_hr": 140,
|
||||
"max_hr": 145,
|
||||
"avg_gradient": 1.0,
|
||||
"elevation_change": 5,
|
||||
"avg_real_power": 210,
|
||||
"avg_power_estimate": None,
|
||||
}
|
||||
]
|
||||
|
||||
# Test HTML
|
||||
html_output = report_generator.generate_workout_report(context, None, "html")
|
||||
assert "<h3>Minute-by-Minute Breakdown</h3>" in html_output
|
||||
assert "<th>Minute</th>" in html_output
|
||||
assert "<td>0.50</td>" in html_output # distance_km
|
||||
assert "<td>30.0</td>" in html_output # avg_speed_kmh
|
||||
assert "<td>140</td>" in html_output # avg_hr
|
||||
assert "<td>210</td>" in html_output # avg_real_power
|
||||
|
||||
# Test Markdown
|
||||
md_output = report_generator.generate_workout_report(context, None, "md")
|
||||
assert "### Minute-by-Minute Breakdown" in md_output
|
||||
assert "| Minute |" in md_output
|
||||
assert "| 0.50 |" in md_output
|
||||
assert "| 30.0 |" in md_output
|
||||
assert "| 140 |" in md_output
|
||||
assert "| 210 |" in md_output
|
||||
|
||||
|
||||
def test_workout_report_omits_minute_section_when_absent(report_generator):
|
||||
context = _get_base_context()
|
||||
# Case 1: key is absent
|
||||
context_absent = context.copy()
|
||||
|
||||
html_output_absent = report_generator.generate_workout_report(
|
||||
context_absent, None, "html"
|
||||
)
|
||||
assert "<h3>Minute-by-Minute Breakdown</h3>" not in html_output_absent
|
||||
|
||||
md_output_absent = report_generator.generate_workout_report(
|
||||
context_absent, None, "md"
|
||||
)
|
||||
assert "### Minute-by-Minute Breakdown" not in md_output_absent
|
||||
|
||||
# Case 2: key is present but empty
|
||||
context_empty = context.copy()
|
||||
context_empty["minute_by_minute"] = []
|
||||
|
||||
html_output_empty = report_generator.generate_workout_report(
|
||||
context_empty, None, "html"
|
||||
)
|
||||
assert "<h3>Minute-by-Minute Breakdown</h3>" not in html_output_empty
|
||||
|
||||
md_output_empty = report_generator.generate_workout_report(
|
||||
context_empty, None, "md"
|
||||
)
|
||||
assert "### Minute-by-Minute Breakdown" not in md_output_empty
|
||||
Reference in New Issue
Block a user