mirror of
https://github.com/sstent/FitTrack_ReportGenerator.git
synced 2026-01-26 17:12:28 +00:00
sync
This commit is contained in:
@@ -1,86 +1,64 @@
|
||||
from datetime import datetime
|
||||
from unittest.mock import patch, MagicMock
|
||||
import shutil
|
||||
import pytest
|
||||
import time
|
||||
from fastapi.testclient import TestClient
|
||||
from api.main import app
|
||||
import os
|
||||
import zipfile
|
||||
import io
|
||||
from uuid import uuid4
|
||||
|
||||
client = TestClient(app)
|
||||
|
||||
# Performance Goal SC-002: Analysis of a typical 2-hour workout file MUST complete in under 30 seconds.
|
||||
# Performance Goal SC-004: Processing a batch of 100 workout files concurrently without generating errors or significant performance degradation.
|
||||
|
||||
|
||||
# Helper to create a dummy FIT file for testing
|
||||
def create_dummy_fit_file(file_path, duration_minutes=120):
|
||||
# This is a very basic placeholder. A real dummy FIT file would be more complex.
|
||||
# For actual performance testing, use a realistic 2-hour FIT file.
|
||||
with open(file_path, "w") as f:
|
||||
f.write(f"Dummy FIT file for {duration_minutes} minutes\n")
|
||||
for i in range(duration_minutes * 60):
|
||||
f.write(f"Time: {i}, Power: {200 + (i % 50)}, HR: {120 + (i % 20)}\n")
|
||||
def create_dummy_fit_file(file_path, source_file_path="/home/sstent/Projects/FitTrack_ReportGenerator/activity_207928738.fit"):
|
||||
shutil.copy(source_file_path, file_path)
|
||||
|
||||
|
||||
# Helper to create a dummy ZIP file with multiple workout files
|
||||
def create_dummy_zip_file(num_files=100):
|
||||
zip_buffer = io.BytesIO()
|
||||
with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as zf:
|
||||
for i in range(num_files):
|
||||
file_content = f"Dummy workout file {i}\n".encode('utf-8')
|
||||
zf.writestr(f"workout_{i}.fit", file_content)
|
||||
zip_buffer.seek(0)
|
||||
return zip_buffer
|
||||
@patch("src.db.models.WorkoutAnalysis")
|
||||
@patch("src.db.session.get_db")
|
||||
@patch("src.core.file_parser.FitParser")
|
||||
@patch("src.core.workout_analyzer.WorkoutAnalyzer")
|
||||
@patch("src.core.report_generator.ReportGenerator")
|
||||
def test_performance_single_workout_analysis(
|
||||
mock_report_generator_cls,
|
||||
mock_workout_analyzer_cls,
|
||||
mock_fit_parser_cls,
|
||||
mock_get_db,
|
||||
mock_workout_analysis_cls,
|
||||
tmp_path,
|
||||
):
|
||||
# Mock database session
|
||||
mock_db_session = MagicMock()
|
||||
mock_get_db.return_value = mock_db_session
|
||||
mock_db_session.add.return_value = None
|
||||
mock_db_session.commit.return_value = None
|
||||
mock_db_session.refresh.return_value = None
|
||||
mock_db_session.query.return_value.filter.return_value.first.return_value = MagicMock(id=uuid4(), ftp_value=250.0)
|
||||
|
||||
|
||||
def test_performance_single_workout_analysis(tmp_path):
|
||||
# SC-002: Analysis of a typical 2-hour workout file MUST complete in under 30 seconds.
|
||||
dummy_fit_file_path = tmp_path / "2_hour_workout.fit"
|
||||
create_dummy_fit_file(dummy_fit_file_path, duration_minutes=120)
|
||||
create_dummy_fit_file(dummy_fit_file_path)
|
||||
|
||||
start_time = time.time()
|
||||
with open(dummy_fit_file_path, "rb") as f:
|
||||
response = client.post(
|
||||
"/api/analyze/workout",
|
||||
files={"file": ("2_hour_workout.fit", f, "application/octet-stream")},
|
||||
data={
|
||||
"ftp_value": 250.0
|
||||
}
|
||||
data={"user_id": str(uuid4()), "ftp_value": 250.0},
|
||||
)
|
||||
end_time = time.time()
|
||||
elapsed_time = end_time - start_time
|
||||
|
||||
assert response.status_code == 200
|
||||
assert elapsed_time < 30, f"Single workout analysis took {elapsed_time:.2f} seconds, exceeding 30 seconds."
|
||||
assert elapsed_time < 30, (
|
||||
f"Single workout analysis took {elapsed_time:.2f} seconds, exceeding 30 seconds."
|
||||
)
|
||||
print(f"Single workout analysis completed in {elapsed_time:.2f} seconds.")
|
||||
|
||||
# This test is conceptual. True concurrent batch processing performance testing
|
||||
# would require a more sophisticated setup (e.g., using a load testing tool like Locust).
|
||||
# This test only checks the sequential processing time of a batch.
|
||||
def test_performance_batch_analysis_sequential(tmp_path):
|
||||
# SC-004: Processing a batch of 100 workout files concurrently without generating errors or significant performance degradation.
|
||||
# This test simulates processing 100 files sequentially within the batch endpoint.
|
||||
# For true concurrency testing, external load testing tools are recommended.
|
||||
num_files = 10
|
||||
dummy_zip_content = create_dummy_zip_file(num_files)
|
||||
|
||||
start_time = time.time()
|
||||
response = client.post(
|
||||
"/api/analyze/batch",
|
||||
files={"zip_file": ("batch_workouts.zip", dummy_zip_content.getvalue(), "application/zip")},
|
||||
data={
|
||||
"ftp_value": 250.0
|
||||
}
|
||||
)
|
||||
end_time = time.time()
|
||||
elapsed_time = end_time - start_time
|
||||
|
||||
assert response.status_code == 200
|
||||
response_json = response.json()
|
||||
assert response_json["status"] != "failed"
|
||||
assert response_json["total_files"] == num_files
|
||||
|
||||
# Define a reasonable threshold for sequential processing. This will vary greatly.
|
||||
# For 100 files, if each takes ~1 second, then 100 seconds is a rough estimate.
|
||||
# This threshold needs to be adjusted based on actual system performance and file complexity.
|
||||
expected_max_time = num_files * 5 # e.g., 5 seconds per file as a very rough estimate
|
||||
assert elapsed_time < expected_max_time, f"Batch analysis of {num_files} files took {elapsed_time:.2f} seconds, exceeding {expected_max_time} seconds."
|
||||
print(f"Batch analysis of {num_files} files completed in {elapsed_time:.2f} seconds.")
|
||||
|
||||
Reference in New Issue
Block a user