diff --git a/FitnessSync/backend/.coverage b/FitnessSync/backend/.coverage new file mode 100644 index 0000000..68f392d Binary files /dev/null and b/FitnessSync/backend/.coverage differ diff --git a/FitnessSync/backend/README.md b/FitnessSync/backend/README.md index c5c896c..f594bf2 100644 --- a/FitnessSync/backend/README.md +++ b/FitnessSync/backend/README.md @@ -118,29 +118,55 @@ docker-compose up --build backend/ ├── main.py ├── src/ -│ ├── models/ -│ │ ├── __init__.py -│ │ ├── config.py -│ │ ├── weight_record.py -│ │ ├── activity.py -│ │ ├── health_metric.py -│ │ ├── sync_log.py -│ │ └── api_token.py -│ ├── services/ -│ │ ├── __init__.py -│ │ ├── fitbit_client.py -│ │ ├── garmin_client.py -│ │ ├── postgresql_manager.py -│ │ └── sync_app.py │ ├── api/ │ │ ├── __init__.py -│ │ ├── auth.py -│ │ ├── sync.py -│ │ ├── setup.py -│ │ └── metrics.py +│ │ ├── activities.py +│ │ ├── auth.py # Refactored from setup.py +│ │ ├── config_routes.py # Refactored from setup.py +│ │ ├── logs.py +│ │ ├── metrics.py +│ │ ├── scheduling.py +│ │ ├── status.py +│ │ └── sync.py +│ ├── models/ +│ │ ├── __init__.py +│ │ ├── activity.py +│ │ ├── activity_state.py +│ │ ├── api_token.py +│ │ ├── auth_status.py +│ │ ├── base.py +│ │ ├── config.py +│ │ ├── health_metric.py +│ │ ├── health_state.py +│ │ ├── job.py +│ │ ├── scheduled_job.py +│ │ ├── sync_log.py +│ │ └── weight_record.py +│ ├── routers/ +│ │ ├── __init__.py +│ │ └── web.py +│ ├── services/ +│ │ ├── garmin/ +│ │ │ ├── auth.py +│ │ │ ├── client.py +│ │ │ └── data.py +│ │ ├── sync/ +│ │ │ ├── activity.py +│ │ │ ├── health.py +│ │ │ ├── utils.py +│ │ │ └── weight.py +│ │ ├── __init__.py +│ │ ├── fitbit_client.py +│ │ ├── garth_helper.py +│ │ ├── job_manager.py +│ │ ├── postgresql_manager.py +│ │ ├── scheduler.py +│ │ └── sync_app.py │ └── utils/ │ ├── __init__.py -│ └── helpers.py +│ ├── config.py +│ ├── helpers.py +│ └── logging_config.py ├── templates/ │ ├── index.html │ └── setup.html diff --git a/FitnessSync/backend/__pycache__/main.cpython-311.pyc b/FitnessSync/backend/__pycache__/main.cpython-311.pyc index c59b2c8..e8b58cb 100644 Binary files a/FitnessSync/backend/__pycache__/main.cpython-311.pyc and b/FitnessSync/backend/__pycache__/main.cpython-311.pyc differ diff --git a/FitnessSync/backend/__pycache__/main.cpython-313.pyc b/FitnessSync/backend/__pycache__/main.cpython-313.pyc index 9a72b9f..45ac203 100644 Binary files a/FitnessSync/backend/__pycache__/main.cpython-313.pyc and b/FitnessSync/backend/__pycache__/main.cpython-313.pyc differ diff --git a/FitnessSync/backend/alembic/versions/1e157f880117_create_jobs_table.py b/FitnessSync/backend/alembic/versions/1e157f880117_create_jobs_table.py new file mode 100644 index 0000000..9b85557 --- /dev/null +++ b/FitnessSync/backend/alembic/versions/1e157f880117_create_jobs_table.py @@ -0,0 +1,46 @@ +"""create_jobs_table + +Revision ID: 1e157f880117 +Revises: bd21a0528865 +Create Date: 2026-01-03 18:45:18.109625 + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision: str = '1e157f880117' +down_revision: Union[str, None] = 'bd21a0528865' +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + op.create_table('jobs', + sa.Column('id', sa.String(), nullable=False), + sa.Column('operation', sa.String(), nullable=False), + sa.Column('status', sa.String(), nullable=False), + sa.Column('start_time', sa.DateTime(timezone=True), nullable=False), + sa.Column('end_time', sa.DateTime(timezone=True), nullable=True), + sa.Column('progress', sa.Integer(), nullable=True), + sa.Column('message', sa.Text(), nullable=True), + sa.Column('result', sa.JSON(), nullable=True), + sa.Column('cancel_requested', sa.Boolean(), nullable=True), + sa.Column('paused', sa.Boolean(), nullable=True), + sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True), + sa.Column('updated_at', sa.DateTime(timezone=True), nullable=True), + sa.PrimaryKeyConstraint('id') + ) + op.create_index(op.f('ix_jobs_id'), 'jobs', ['id'], unique=False) + # ### end Alembic commands ### + + +def downgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index(op.f('ix_jobs_id'), table_name='jobs') + op.drop_table('jobs') + # ### end Alembic commands ### diff --git a/FitnessSync/backend/alembic/versions/73e349ef1d88_add_bike_setup_to_activity.py b/FitnessSync/backend/alembic/versions/73e349ef1d88_add_bike_setup_to_activity.py new file mode 100644 index 0000000..d339d49 --- /dev/null +++ b/FitnessSync/backend/alembic/versions/73e349ef1d88_add_bike_setup_to_activity.py @@ -0,0 +1,32 @@ +"""add bike setup to activity + +Revision ID: 73e349ef1d88 +Revises: 95af0e911216 +Create Date: 2026-01-07 13:47:24.670293 + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision: str = '73e349ef1d88' +down_revision: Union[str, None] = '95af0e911216' +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + op.add_column('activities', sa.Column('bike_setup_id', sa.Integer(), nullable=True)) + op.create_foreign_key(None, 'activities', 'bike_setups', ['bike_setup_id'], ['id']) + # ### end Alembic commands ### + + +def downgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + op.drop_constraint(None, 'activities', type_='foreignkey') + op.drop_column('activities', 'bike_setup_id') + # ### end Alembic commands ### diff --git a/FitnessSync/backend/alembic/versions/85c60ed462bf_add_state_tables.py b/FitnessSync/backend/alembic/versions/85c60ed462bf_add_state_tables.py new file mode 100644 index 0000000..f15f860 --- /dev/null +++ b/FitnessSync/backend/alembic/versions/85c60ed462bf_add_state_tables.py @@ -0,0 +1,53 @@ +"""Add state tables + +Revision ID: 85c60ed462bf +Revises: b5a6d7ef97a5 +Create Date: 2026-01-01 17:01:04.348349 + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision: str = '85c60ed462bf' +down_revision: Union[str, None] = 'b5a6d7ef97a5' +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + op.create_table('garmin_activity_state', + sa.Column('garmin_activity_id', sa.String(), nullable=False), + sa.Column('activity_name', sa.String(), nullable=True), + sa.Column('activity_type', sa.String(), nullable=True), + sa.Column('start_time', sa.DateTime(), nullable=True), + sa.Column('sync_status', sa.String(), nullable=True), + sa.Column('last_seen', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True), + sa.PrimaryKeyConstraint('garmin_activity_id') + ) + op.create_index(op.f('ix_garmin_activity_state_garmin_activity_id'), 'garmin_activity_state', ['garmin_activity_id'], unique=False) + op.create_table('health_sync_state', + sa.Column('id', sa.Integer(), nullable=False), + sa.Column('date', sa.Date(), nullable=False), + sa.Column('metric_type', sa.String(), nullable=False), + sa.Column('source', sa.String(), nullable=False), + sa.Column('sync_status', sa.String(), nullable=True), + sa.Column('last_seen', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True), + sa.PrimaryKeyConstraint('id'), + sa.UniqueConstraint('date', 'metric_type', 'source', name='uq_health_state') + ) + op.create_index(op.f('ix_health_sync_state_id'), 'health_sync_state', ['id'], unique=False) + # ### end Alembic commands ### + + +def downgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index(op.f('ix_health_sync_state_id'), table_name='health_sync_state') + op.drop_table('health_sync_state') + op.drop_index(op.f('ix_garmin_activity_state_garmin_activity_id'), table_name='garmin_activity_state') + op.drop_table('garmin_activity_state') + # ### end Alembic commands ### diff --git a/FitnessSync/backend/alembic/versions/95af0e911216_add_bike_setups_table.py b/FitnessSync/backend/alembic/versions/95af0e911216_add_bike_setups_table.py new file mode 100644 index 0000000..267209d --- /dev/null +++ b/FitnessSync/backend/alembic/versions/95af0e911216_add_bike_setups_table.py @@ -0,0 +1,41 @@ +"""add bike setups table + +Revision ID: 95af0e911216 +Revises: 1e157f880117 +Create Date: 2026-01-07 11:46:19.649500 + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision: str = '95af0e911216' +down_revision: Union[str, None] = '1e157f880117' +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + op.create_table('bike_setups', + sa.Column('id', sa.Integer(), nullable=False), + sa.Column('frame', sa.String(), nullable=False), + sa.Column('chainring', sa.Integer(), nullable=False), + sa.Column('rear_cog', sa.Integer(), nullable=False), + sa.Column('name', sa.String(), nullable=True), + sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True), + sa.Column('updated_at', sa.DateTime(timezone=True), nullable=True), + sa.PrimaryKeyConstraint('id') + ) + op.create_index(op.f('ix_bike_setups_id'), 'bike_setups', ['id'], unique=False) + # ### end Alembic commands ### + + +def downgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index(op.f('ix_bike_setups_id'), table_name='bike_setups') + op.drop_table('bike_setups') + # ### end Alembic commands ### diff --git a/FitnessSync/backend/alembic/versions/__pycache__/1e157f880117_create_jobs_table.cpython-311.pyc b/FitnessSync/backend/alembic/versions/__pycache__/1e157f880117_create_jobs_table.cpython-311.pyc new file mode 100644 index 0000000..e18edf2 Binary files /dev/null and b/FitnessSync/backend/alembic/versions/__pycache__/1e157f880117_create_jobs_table.cpython-311.pyc differ diff --git a/FitnessSync/backend/alembic/versions/__pycache__/1e157f880117_create_jobs_table.cpython-313.pyc b/FitnessSync/backend/alembic/versions/__pycache__/1e157f880117_create_jobs_table.cpython-313.pyc new file mode 100644 index 0000000..afbe5cf Binary files /dev/null and b/FitnessSync/backend/alembic/versions/__pycache__/1e157f880117_create_jobs_table.cpython-313.pyc differ diff --git a/FitnessSync/backend/alembic/versions/__pycache__/73e349ef1d88_add_bike_setup_to_activity.cpython-311.pyc b/FitnessSync/backend/alembic/versions/__pycache__/73e349ef1d88_add_bike_setup_to_activity.cpython-311.pyc new file mode 100644 index 0000000..d53f262 Binary files /dev/null and b/FitnessSync/backend/alembic/versions/__pycache__/73e349ef1d88_add_bike_setup_to_activity.cpython-311.pyc differ diff --git a/FitnessSync/backend/alembic/versions/__pycache__/73e349ef1d88_add_bike_setup_to_activity.cpython-313.pyc b/FitnessSync/backend/alembic/versions/__pycache__/73e349ef1d88_add_bike_setup_to_activity.cpython-313.pyc new file mode 100644 index 0000000..dfe9e77 Binary files /dev/null and b/FitnessSync/backend/alembic/versions/__pycache__/73e349ef1d88_add_bike_setup_to_activity.cpython-313.pyc differ diff --git a/FitnessSync/backend/alembic/versions/__pycache__/85c60ed462bf_add_state_tables.cpython-311.pyc b/FitnessSync/backend/alembic/versions/__pycache__/85c60ed462bf_add_state_tables.cpython-311.pyc new file mode 100644 index 0000000..52e954d Binary files /dev/null and b/FitnessSync/backend/alembic/versions/__pycache__/85c60ed462bf_add_state_tables.cpython-311.pyc differ diff --git a/FitnessSync/backend/alembic/versions/__pycache__/85c60ed462bf_add_state_tables.cpython-313.pyc b/FitnessSync/backend/alembic/versions/__pycache__/85c60ed462bf_add_state_tables.cpython-313.pyc new file mode 100644 index 0000000..52520cc Binary files /dev/null and b/FitnessSync/backend/alembic/versions/__pycache__/85c60ed462bf_add_state_tables.cpython-313.pyc differ diff --git a/FitnessSync/backend/alembic/versions/__pycache__/95af0e911216_add_bike_setups_table.cpython-311.pyc b/FitnessSync/backend/alembic/versions/__pycache__/95af0e911216_add_bike_setups_table.cpython-311.pyc new file mode 100644 index 0000000..f74392b Binary files /dev/null and b/FitnessSync/backend/alembic/versions/__pycache__/95af0e911216_add_bike_setups_table.cpython-311.pyc differ diff --git a/FitnessSync/backend/alembic/versions/__pycache__/95af0e911216_add_bike_setups_table.cpython-313.pyc b/FitnessSync/backend/alembic/versions/__pycache__/95af0e911216_add_bike_setups_table.cpython-313.pyc new file mode 100644 index 0000000..2c0efa5 Binary files /dev/null and b/FitnessSync/backend/alembic/versions/__pycache__/95af0e911216_add_bike_setups_table.cpython-313.pyc differ diff --git a/FitnessSync/backend/alembic/versions/__pycache__/b5a6d7ef97a5_add_fitbit_redirect_uri.cpython-313.pyc b/FitnessSync/backend/alembic/versions/__pycache__/b5a6d7ef97a5_add_fitbit_redirect_uri.cpython-313.pyc new file mode 100644 index 0000000..e83e7c2 Binary files /dev/null and b/FitnessSync/backend/alembic/versions/__pycache__/b5a6d7ef97a5_add_fitbit_redirect_uri.cpython-313.pyc differ diff --git a/FitnessSync/backend/alembic/versions/__pycache__/bd21a0528865_expand_activity_schema_metrics.cpython-311.pyc b/FitnessSync/backend/alembic/versions/__pycache__/bd21a0528865_expand_activity_schema_metrics.cpython-311.pyc new file mode 100644 index 0000000..3af1388 Binary files /dev/null and b/FitnessSync/backend/alembic/versions/__pycache__/bd21a0528865_expand_activity_schema_metrics.cpython-311.pyc differ diff --git a/FitnessSync/backend/alembic/versions/__pycache__/bd21a0528865_expand_activity_schema_metrics.cpython-313.pyc b/FitnessSync/backend/alembic/versions/__pycache__/bd21a0528865_expand_activity_schema_metrics.cpython-313.pyc new file mode 100644 index 0000000..93bf42e Binary files /dev/null and b/FitnessSync/backend/alembic/versions/__pycache__/bd21a0528865_expand_activity_schema_metrics.cpython-313.pyc differ diff --git a/FitnessSync/backend/alembic/versions/bd21a0528865_expand_activity_schema_metrics.py b/FitnessSync/backend/alembic/versions/bd21a0528865_expand_activity_schema_metrics.py new file mode 100644 index 0000000..6404a12 --- /dev/null +++ b/FitnessSync/backend/alembic/versions/bd21a0528865_expand_activity_schema_metrics.py @@ -0,0 +1,64 @@ +"""expand_activity_schema_metrics + +Revision ID: bd21a0528865 +Revises: 85c60ed462bf +Create Date: 2026-01-01 22:53:14.358635 + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision: str = 'bd21a0528865' +down_revision: Union[str, None] = '85c60ed462bf' +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + op.add_column('activities', sa.Column('distance', sa.Float(), nullable=True)) + op.add_column('activities', sa.Column('calories', sa.Float(), nullable=True)) + op.add_column('activities', sa.Column('avg_hr', sa.Integer(), nullable=True)) + op.add_column('activities', sa.Column('max_hr', sa.Integer(), nullable=True)) + op.add_column('activities', sa.Column('avg_speed', sa.Float(), nullable=True)) + op.add_column('activities', sa.Column('max_speed', sa.Float(), nullable=True)) + op.add_column('activities', sa.Column('elevation_gain', sa.Float(), nullable=True)) + op.add_column('activities', sa.Column('elevation_loss', sa.Float(), nullable=True)) + op.add_column('activities', sa.Column('avg_cadence', sa.Integer(), nullable=True)) + op.add_column('activities', sa.Column('max_cadence', sa.Integer(), nullable=True)) + op.add_column('activities', sa.Column('steps', sa.Integer(), nullable=True)) + op.add_column('activities', sa.Column('aerobic_te', sa.Float(), nullable=True)) + op.add_column('activities', sa.Column('anaerobic_te', sa.Float(), nullable=True)) + op.add_column('activities', sa.Column('avg_power', sa.Integer(), nullable=True)) + op.add_column('activities', sa.Column('max_power', sa.Integer(), nullable=True)) + op.add_column('activities', sa.Column('norm_power', sa.Integer(), nullable=True)) + op.add_column('activities', sa.Column('tss', sa.Float(), nullable=True)) + op.add_column('activities', sa.Column('vo2_max', sa.Float(), nullable=True)) + # ### end Alembic commands ### + + +def downgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column('activities', 'vo2_max') + op.drop_column('activities', 'tss') + op.drop_column('activities', 'norm_power') + op.drop_column('activities', 'max_power') + op.drop_column('activities', 'avg_power') + op.drop_column('activities', 'anaerobic_te') + op.drop_column('activities', 'aerobic_te') + op.drop_column('activities', 'steps') + op.drop_column('activities', 'max_cadence') + op.drop_column('activities', 'avg_cadence') + op.drop_column('activities', 'elevation_loss') + op.drop_column('activities', 'elevation_gain') + op.drop_column('activities', 'max_speed') + op.drop_column('activities', 'avg_speed') + op.drop_column('activities', 'max_hr') + op.drop_column('activities', 'avg_hr') + op.drop_column('activities', 'calories') + op.drop_column('activities', 'distance') + # ### end Alembic commands ### diff --git a/FitnessSync/garth_reference.md b/FitnessSync/backend/docs/references/garth_reference.md similarity index 100% rename from FitnessSync/garth_reference.md rename to FitnessSync/backend/docs/references/garth_reference.md diff --git a/FitnessSync/backend/main.py b/FitnessSync/backend/main.py index 4bbc604..0151e8a 100644 --- a/FitnessSync/backend/main.py +++ b/FitnessSync/backend/main.py @@ -18,7 +18,7 @@ async def lifespan(app: FastAPI): alembic_cfg = Config("alembic.ini") database_url = os.getenv("DATABASE_URL") - if database_url: + if database_url and not os.getenv("TESTING"): alembic_cfg.set_main_option("sqlalchemy.url", database_url) try: command.upgrade(alembic_cfg, "head") @@ -28,9 +28,22 @@ async def lifespan(app: FastAPI): else: logger.warning("DATABASE_URL not set, skipping migrations.") + # Start Scheduler + try: + from src.services.scheduler import scheduler + scheduler.start() + logger.info("Scheduler started.") + except Exception as e: + logger.error(f"Failed to start scheduler: {e}") + yield logger.info("--- Application Shutting Down ---") + try: + from src.services.scheduler import scheduler + scheduler.stop() + except: + pass app = FastAPI(lifespan=lifespan) @@ -50,25 +63,27 @@ async def log_requests(request: Request, call_next): app.mount("/static", StaticFiles(directory="../static"), name="static") templates = Jinja2Templates(directory="templates") -from src.api import status, sync, setup, logs, metrics, activities +from src.api import status, sync, auth, logs, metrics, activities, scheduling, config_routes app.include_router(status.router, prefix="/api") app.include_router(sync.router, prefix="/api") -app.include_router(setup.router, prefix="/api") +app.include_router(auth.router, prefix="/api") +app.include_router(config_routes.router, prefix="/api") app.include_router(logs.router, prefix="/api") app.include_router(metrics.router, prefix="/api") + app.include_router(activities.router, prefix="/api") +app.include_router(activities.router, prefix="/api") +app.include_router(scheduling.router, prefix="/api") + +from src.api import bike_setups +app.include_router(bike_setups.router) -@app.get("/") -async def read_root(request: Request): - return templates.TemplateResponse("index.html", {"request": request}) -@app.get("/activities") -async def activities_page(request: Request): - return templates.TemplateResponse("activities.html", {"request": request}) +from src.routers import web + +app.include_router(web.router) + -@app.get("/setup") -async def setup_page(request: Request): - return templates.TemplateResponse("setup.html", {"request": request}) diff --git a/FitnessSync/backend/requirements.txt b/FitnessSync/backend/requirements.txt index 7392245..d98fd38 100644 --- a/FitnessSync/backend/requirements.txt +++ b/FitnessSync/backend/requirements.txt @@ -14,4 +14,4 @@ httpx==0.25.2 aiofiles==23.2.1 pytest==7.4.3 pytest-asyncio==0.21.1 -alembic==1.13.1 \ No newline at end of file +alembic==1.13.1 diff --git a/FitnessSync/backend/src/api/__pycache__/activities.cpython-311.pyc b/FitnessSync/backend/src/api/__pycache__/activities.cpython-311.pyc index b35dacc..5062520 100644 Binary files a/FitnessSync/backend/src/api/__pycache__/activities.cpython-311.pyc and b/FitnessSync/backend/src/api/__pycache__/activities.cpython-311.pyc differ diff --git a/FitnessSync/backend/src/api/__pycache__/activities.cpython-313.pyc b/FitnessSync/backend/src/api/__pycache__/activities.cpython-313.pyc index 4e01e90..fb99fa5 100644 Binary files a/FitnessSync/backend/src/api/__pycache__/activities.cpython-313.pyc and b/FitnessSync/backend/src/api/__pycache__/activities.cpython-313.pyc differ diff --git a/FitnessSync/backend/src/api/__pycache__/auth.cpython-311.pyc b/FitnessSync/backend/src/api/__pycache__/auth.cpython-311.pyc new file mode 100644 index 0000000..26a7645 Binary files /dev/null and b/FitnessSync/backend/src/api/__pycache__/auth.cpython-311.pyc differ diff --git a/FitnessSync/backend/src/api/__pycache__/auth.cpython-313.pyc b/FitnessSync/backend/src/api/__pycache__/auth.cpython-313.pyc new file mode 100644 index 0000000..8e56a00 Binary files /dev/null and b/FitnessSync/backend/src/api/__pycache__/auth.cpython-313.pyc differ diff --git a/FitnessSync/backend/src/api/__pycache__/bike_setups.cpython-311.pyc b/FitnessSync/backend/src/api/__pycache__/bike_setups.cpython-311.pyc new file mode 100644 index 0000000..1f91ad2 Binary files /dev/null and b/FitnessSync/backend/src/api/__pycache__/bike_setups.cpython-311.pyc differ diff --git a/FitnessSync/backend/src/api/__pycache__/bike_setups.cpython-313.pyc b/FitnessSync/backend/src/api/__pycache__/bike_setups.cpython-313.pyc new file mode 100644 index 0000000..50d2df2 Binary files /dev/null and b/FitnessSync/backend/src/api/__pycache__/bike_setups.cpython-313.pyc differ diff --git a/FitnessSync/backend/src/api/__pycache__/config_routes.cpython-311.pyc b/FitnessSync/backend/src/api/__pycache__/config_routes.cpython-311.pyc new file mode 100644 index 0000000..142f054 Binary files /dev/null and b/FitnessSync/backend/src/api/__pycache__/config_routes.cpython-311.pyc differ diff --git a/FitnessSync/backend/src/api/__pycache__/config_routes.cpython-313.pyc b/FitnessSync/backend/src/api/__pycache__/config_routes.cpython-313.pyc new file mode 100644 index 0000000..94763d0 Binary files /dev/null and b/FitnessSync/backend/src/api/__pycache__/config_routes.cpython-313.pyc differ diff --git a/FitnessSync/backend/src/api/__pycache__/metrics.cpython-311.pyc b/FitnessSync/backend/src/api/__pycache__/metrics.cpython-311.pyc index f8caab0..5cc30c9 100644 Binary files a/FitnessSync/backend/src/api/__pycache__/metrics.cpython-311.pyc and b/FitnessSync/backend/src/api/__pycache__/metrics.cpython-311.pyc differ diff --git a/FitnessSync/backend/src/api/__pycache__/metrics.cpython-313.pyc b/FitnessSync/backend/src/api/__pycache__/metrics.cpython-313.pyc index 951e789..4806595 100644 Binary files a/FitnessSync/backend/src/api/__pycache__/metrics.cpython-313.pyc and b/FitnessSync/backend/src/api/__pycache__/metrics.cpython-313.pyc differ diff --git a/FitnessSync/backend/src/api/__pycache__/scheduling.cpython-311.pyc b/FitnessSync/backend/src/api/__pycache__/scheduling.cpython-311.pyc new file mode 100644 index 0000000..52e01a6 Binary files /dev/null and b/FitnessSync/backend/src/api/__pycache__/scheduling.cpython-311.pyc differ diff --git a/FitnessSync/backend/src/api/__pycache__/scheduling.cpython-313.pyc b/FitnessSync/backend/src/api/__pycache__/scheduling.cpython-313.pyc new file mode 100644 index 0000000..6b5cbeb Binary files /dev/null and b/FitnessSync/backend/src/api/__pycache__/scheduling.cpython-313.pyc differ diff --git a/FitnessSync/backend/src/api/__pycache__/status.cpython-311.pyc b/FitnessSync/backend/src/api/__pycache__/status.cpython-311.pyc index 1856b51..2f56181 100644 Binary files a/FitnessSync/backend/src/api/__pycache__/status.cpython-311.pyc and b/FitnessSync/backend/src/api/__pycache__/status.cpython-311.pyc differ diff --git a/FitnessSync/backend/src/api/__pycache__/status.cpython-313.pyc b/FitnessSync/backend/src/api/__pycache__/status.cpython-313.pyc index 7b175df..d149b9a 100644 Binary files a/FitnessSync/backend/src/api/__pycache__/status.cpython-313.pyc and b/FitnessSync/backend/src/api/__pycache__/status.cpython-313.pyc differ diff --git a/FitnessSync/backend/src/api/__pycache__/sync.cpython-311.pyc b/FitnessSync/backend/src/api/__pycache__/sync.cpython-311.pyc index 6d78321..e6f5c06 100644 Binary files a/FitnessSync/backend/src/api/__pycache__/sync.cpython-311.pyc and b/FitnessSync/backend/src/api/__pycache__/sync.cpython-311.pyc differ diff --git a/FitnessSync/backend/src/api/__pycache__/sync.cpython-313.pyc b/FitnessSync/backend/src/api/__pycache__/sync.cpython-313.pyc index e55386d..7874dde 100644 Binary files a/FitnessSync/backend/src/api/__pycache__/sync.cpython-313.pyc and b/FitnessSync/backend/src/api/__pycache__/sync.cpython-313.pyc differ diff --git a/FitnessSync/backend/src/api/activities.py b/FitnessSync/backend/src/api/activities.py index 7ed0aef..b233064 100644 --- a/FitnessSync/backend/src/api/activities.py +++ b/FitnessSync/backend/src/api/activities.py @@ -1,4 +1,4 @@ -from fastapi import APIRouter, Query, Response, HTTPException, Depends +from fastapi import APIRouter, Query, Response, HTTPException, Depends, BackgroundTasks from pydantic import BaseModel from typing import List, Optional, Dict, Any from sqlalchemy import func @@ -8,6 +8,15 @@ from ..services.postgresql_manager import PostgreSQLManager from sqlalchemy.orm import Session from ..utils.config import config +# New Sync Imports +from ..services.job_manager import job_manager +from ..models.activity_state import GarminActivityState +import fitdecode +import io +import xml.etree.ElementTree as ET +from datetime import datetime + + router = APIRouter() logger = logging.getLogger(__name__) @@ -17,6 +26,13 @@ def get_db(): with db_manager.get_db_session() as session: yield session +class BikeSetupInfo(BaseModel): + id: int + frame: str + chainring: int + rear_cog: int + name: Optional[str] = None + class ActivityResponse(BaseModel): id: Optional[int] = None garmin_activity_id: Optional[str] = None @@ -28,6 +44,28 @@ class ActivityResponse(BaseModel): file_type: Optional[str] = None download_status: Optional[str] = None downloaded_at: Optional[str] = None + bike_setup: Optional[BikeSetupInfo] = None + +class ActivityDetailResponse(ActivityResponse): + distance: Optional[float] = None + calories: Optional[float] = None + avg_hr: Optional[int] = None + max_hr: Optional[int] = None + avg_speed: Optional[float] = None + max_speed: Optional[float] = None + elevation_gain: Optional[float] = None + elevation_loss: Optional[float] = None + avg_cadence: Optional[int] = None + max_cadence: Optional[int] = None + steps: Optional[int] = None + aerobic_te: Optional[float] = None + anaerobic_te: Optional[float] = None + avg_power: Optional[int] = None + max_power: Optional[int] = None + norm_power: Optional[int] = None + tss: Optional[float] = None + vo2_max: Optional[float] = None + @router.get("/activities/list", response_model=List[ActivityResponse]) async def list_activities( @@ -36,28 +74,60 @@ async def list_activities( db: Session = Depends(get_db) ): """ - Return metadata for all downloaded/available activities. + Return metadata for all scanned activities, indicating download status. """ try: logger.info(f"Listing activities with limit={limit}, offset={offset}") - # Query the database for activities - activities = db.query(Activity).offset(offset).limit(limit).all() + # Query GarminActivityState (all known activities) + # Left join with Activity to get file status + + results = ( + db.query(GarminActivityState, Activity) + .outerjoin(Activity, GarminActivityState.garmin_activity_id == Activity.garmin_activity_id) + .order_by(GarminActivityState.start_time.desc()) + .offset(offset) + .limit(limit) + .all() + ) - # Convert SQLAlchemy objects to Pydantic models activity_responses = [] - for activity in activities: + for state, activity in results: + # Determine logic + # If activity exists in 'Activity' table, use its details? + # Or prefer GarminActivityState metadata? + # State metadata is from scan (Garth). Activity is from file parse (db import). + # Usually Activity data is richer IF downloaded. + + is_downloaded = ( + activity is not None and + activity.download_status == 'downloaded' and + activity.file_content is not None + ) + + download_status = 'downloaded' if is_downloaded else 'pending' + # Or use state.sync_status? state.sync_status is 'new', 'synced'. + # 'synced' usually means downloaded. + + # Construct response activity_responses.append( ActivityResponse( - id=activity.id, - garmin_activity_id=activity.garmin_activity_id, - activity_name=activity.activity_name, - activity_type=activity.activity_type, - start_time=activity.start_time.isoformat() if activity.start_time else None, - duration=activity.duration, - file_type=activity.file_type, - download_status=activity.download_status, - downloaded_at=activity.downloaded_at.isoformat() if activity.downloaded_at else None + id=activity.id if activity else None, + garmin_activity_id=state.garmin_activity_id, + activity_name=state.activity_name, + activity_type=state.activity_type, + start_time=state.start_time.isoformat() if state.start_time else None, + duration=activity.duration if activity else None, # Duration might only be in file parse? Or scan could get it? Scan currently doesn't fetch duration. + file_type=activity.file_type if activity else None, + download_status=download_status, + downloaded_at=activity.downloaded_at.isoformat() if (activity and activity.downloaded_at) else None, + bike_setup=BikeSetupInfo( + id=activity.bike_setup.id, + frame=activity.bike_setup.frame, + chainring=activity.bike_setup.chainring, + rear_cog=activity.bike_setup.rear_cog, + name=activity.bike_setup.name + ) if (activity and activity.bike_setup) else None ) ) @@ -117,7 +187,14 @@ async def query_activities( duration=activity.duration, file_type=activity.file_type, download_status=activity.download_status, - downloaded_at=activity.downloaded_at.isoformat() if activity.downloaded_at else None + downloaded_at=activity.downloaded_at.isoformat() if activity.downloaded_at else None, + bike_setup=BikeSetupInfo( + id=activity.bike_setup.id, + frame=activity.bike_setup.frame, + chainring=activity.bike_setup.chainring, + rear_cog=activity.bike_setup.rear_cog, + name=activity.bike_setup.name + ) if activity.bike_setup else None ) ) @@ -172,6 +249,78 @@ async def download_activity(activity_id: str, db: Session = Depends(get_db)): except Exception as e: raise HTTPException(status_code=500, detail=f"Error downloading activity: {str(e)}") +@router.get("/activities/{activity_id}/details", response_model=ActivityDetailResponse) +async def get_activity_details(activity_id: str, db: Session = Depends(get_db)): + """ + Get full details for a specific activity. + """ + try: + activity = db.query(Activity).filter(Activity.garmin_activity_id == activity_id).first() + if not activity: + raise HTTPException(status_code=404, detail="Activity not found") + + # Fallback: Extraction from file if DB fields are missing + overrides = {} + if activity.file_content and (activity.distance is None or activity.elevation_gain is None or activity.avg_hr is None): + try: + if activity.file_type == 'fit': + overrides = _extract_summary_from_fit(activity.file_content) + elif activity.file_type == 'tcx': + # overrides = _extract_summary_from_tcx(activity.file_content) # Optional TODO + pass + except Exception as e: + logger.warning(f"Failed to extract summary from file: {e}") + + # Helper to merge DB value or Override + def val(attr, key): + v = getattr(activity, attr) + if v is not None: return v + return overrides.get(key) + + return ActivityDetailResponse( + id=activity.id, + garmin_activity_id=activity.garmin_activity_id, + activity_name=activity.activity_name, + activity_type=activity.activity_type, + start_time=activity.start_time.isoformat() if activity.start_time else None, + duration=val('duration', 'total_timer_time'), + file_type=activity.file_type, + download_status=activity.download_status, + downloaded_at=activity.downloaded_at.isoformat() if activity.downloaded_at else None, + # Extended metrics + distance=val('distance', 'total_distance'), + calories=val('calories', 'total_calories'), + avg_hr=val('avg_hr', 'avg_heart_rate'), + max_hr=val('max_hr', 'max_heart_rate'), + avg_speed=val('avg_speed', 'enhanced_avg_speed'), # fallback to avg_speed handled in extractor + max_speed=val('max_speed', 'enhanced_max_speed'), + elevation_gain=val('elevation_gain', 'total_ascent'), + elevation_loss=val('elevation_loss', 'total_descent'), + avg_cadence=val('avg_cadence', 'avg_cadence'), + max_cadence=val('max_cadence', 'max_cadence'), + steps=activity.steps, # No session step count usually + aerobic_te=val('aerobic_te', 'total_training_effect'), + anaerobic_te=val('anaerobic_te', 'total_anaerobic_training_effect'), + avg_power=val('avg_power', 'avg_power'), + max_power=val('max_power', 'max_power'), + norm_power=val('norm_power', 'normalized_power'), + tss=val('tss', 'training_stress_score'), + vo2_max=activity.vo2_max, # Usually not in simple session msg directly but maybe + bike_setup=BikeSetupInfo( + id=activity.bike_setup.id, + frame=activity.bike_setup.frame, + chainring=activity.bike_setup.chainring, + rear_cog=activity.bike_setup.rear_cog, + name=activity.bike_setup.name + ) if activity.bike_setup else None + ) + except HTTPException: + raise + except Exception as e: + logger.error(f"Error getting activity details: {e}") + raise HTTPException(status_code=500, detail=str(e)) + + # Import necessary auth dependencies from ..models.api_token import APIToken import garth @@ -238,4 +387,419 @@ async def redownload_activity_endpoint(activity_id: str, db: Session = Depends(g raise except Exception as e: logger.error(f"Error in redownload_activity_endpoint: {e}") - raise HTTPException(status_code=500, detail=f"Error processing redownload: {str(e)}") \ No newline at end of file + raise HTTPException(status_code=500, detail=f"Error processing redownload: {str(e)}") + +# New Sync Endpoints + +def run_scan_job(job_id: str, days_back: int, db_session_factory): + """Background task wrapper for scan""" + try: + from ..services.garmin.client import GarminClient + from ..services.sync_app import SyncApp + except Exception as e: + logger.error(f"Import error in background job: {e}") + job_manager.fail_job(job_id, f"Import error: {str(e)}") + return + + try: + with db_session_factory() as db: + garmin_client = GarminClient() + sync_app = SyncApp(db, garmin_client) + + job_manager.update_job(job_id, status="running", progress=0) + sync_app.scan_activities(days_back=days_back) + job_manager.complete_job(job_id) + + except Exception as e: + logger.error(f"Scan job failed: {e}") + job_manager.fail_job(job_id, str(e)) + +def run_sync_job(job_id: str, limit: int, db_session_factory): + """Background task wrapper for sync pending""" + try: + from ..services.garmin.client import GarminClient + from ..services.sync_app import SyncApp + except Exception as e: + logger.error(f"Import error in background job: {e}") + job_manager.fail_job(job_id, f"Import error: {str(e)}") + return + + with db_session_factory() as db: + try: + garmin_client = GarminClient() + sync_app = SyncApp(db, garmin_client) + + # sync_pending_activities handles job updates + sync_app.sync_pending_activities(limit=limit, job_id=job_id) + + except Exception as e: + logger.error(f"Sync job failed: {e}") + job_manager.fail_job(job_id, str(e)) + + +@router.post("/activities/sync/scan") +async def scan_activities_trigger( + background_tasks: BackgroundTasks, + days_back: int = Query(30, description="Number of days to scan back for new activities") +): + """Trigger background scan of metadata""" + job_id = job_manager.create_job("scan_activities") + + # We need a new session for the background task + db_manager = PostgreSQLManager(config.DATABASE_URL) + # Use context manager in wrapper + + background_tasks.add_task(run_scan_job, job_id, days_back, db_manager.get_db_session) + return {"job_id": job_id, "status": "started"} + +@router.post("/activities/sync/pending") +async def sync_pending_trigger( + background_tasks: BackgroundTasks, + limit: Optional[int] = Query(None, description="Limit number of activities to sync") +): + """Trigger background sync of pending activities""" + job_id = job_manager.create_job("sync_pending_activities") + + db_manager = PostgreSQLManager(config.DATABASE_URL) + background_tasks.add_task(run_sync_job, job_id, limit, db_manager.get_db_session) + return {"job_id": job_id, "status": "started"} + +@router.get("/activities/sync/status") +async def get_sync_status_summary(db: Session = Depends(get_db)): + """Get counts of activities by sync status""" + try: + from sqlalchemy import func + stats = db.query( + GarminActivityState.sync_status, + func.count(GarminActivityState.garmin_activity_id) + ).group_by(GarminActivityState.sync_status).all() + + return {s[0]: s[1] for s in stats} + except Exception as e: + logger.error(f"Error getting sync status: {e}") + return {} + + +def _extract_points_from_fit(file_content: bytes) -> List[List[float]]: + """ + Extract [lon, lat] points from a FIT file content. + Returns a list of [lon, lat]. + """ + points = [] + try: + with io.BytesIO(file_content) as f: + with fitdecode.FitReader(f) as fit: + for frame in fit: + if frame.frame_type == fitdecode.FIT_FRAME_DATA and frame.name == 'record': + # Check for position_lat and position_long + # Garmin stores lat/long as semicircles. Convert to degrees: semicircle * (180 / 2^31) + if frame.has_field('position_lat') and frame.has_field('position_long'): + lat_sc = frame.get_value('position_lat') + lon_sc = frame.get_value('position_long') + + if lat_sc is not None and lon_sc is not None: + lat = lat_sc * (180.0 / 2**31) + lon = lon_sc * (180.0 / 2**31) + points.append([lon, lat]) + except Exception as e: + logger.error(f"Error parsing FIT file: {e}") + # Return what we have or empty + return points + +def _extract_points_from_tcx(file_content: bytes) -> List[List[float]]: + """ + Extract [lon, lat] points from a TCX file content. + """ + points = [] + try: + # TCX is XML + # Namespace usually exists + root = ET.fromstring(file_content) + # Namespaces are annoying in ElementTree, usually {http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2} + # We can just iterate and ignore namespace or handle it. + # Let's try ignoring namespace by using local-name() in xpath if lxml, but this is stdlib ET. + # Just strip namespace for simplicity + + for trkpt in root.iter(): + if trkpt.tag.endswith('Trackpoint'): + lat = None + lon = None + for child in trkpt.iter(): + if child.tag.endswith('LatitudeDegrees'): + try: lat = float(child.text) + except: pass + elif child.tag.endswith('LongitudeDegrees'): + try: lon = float(child.text) + except: pass + + if lat is not None and lon is not None: + points.append([lon, lat]) + + except Exception as e: + logger.error(f"Error parsing TCX file: {e}") + return points + +@router.get("/activities/{activity_id}/geojson") +async def get_activity_geojson(activity_id: str, db: Session = Depends(get_db)): + """ + Return GeoJSON LineString for the activity track. + """ + try: + activity = db.query(Activity).filter(Activity.garmin_activity_id == activity_id).first() + if not activity or not activity.file_content: + raise HTTPException(status_code=404, detail="Activity or file content not found") + + points = [] + if activity.file_type == 'fit': + points = _extract_points_from_fit(activity.file_content) + elif activity.file_type == 'tcx': + points = _extract_points_from_tcx(activity.file_content) + else: + # Try FIT or TCX anyway? + # Default to FIT check headers? + # For now just log warning + logger.warning(f"Unsupported file type for map: {activity.file_type}") + + if not points: + return {"type": "FeatureCollection", "features": []} + + return { + "type": "FeatureCollection", + "features": [{ + "type": "Feature", + "properties": { + "color": "red" + }, + "geometry": { + "type": "LineString", + "coordinates": points + } + }] + } + + except Exception as e: + logger.error(f"Error generating GeoJSON: {e}") + raise HTTPException(status_code=500, detail=str(e)) + +def _extract_streams_from_fit(file_content: bytes) -> Dict[str, List[Any]]: + streams = { + "time": [], + "heart_rate": [], + "power": [], + "altitude": [], + "speed": [], + "cadence": [] + } + try: + start_time = None + with io.BytesIO(file_content) as f: + with fitdecode.FitReader(f) as fit: + for frame in fit: + if frame.frame_type == fitdecode.FIT_FRAME_DATA and frame.name == 'record': + timestamp = frame.get_value('timestamp') + if not start_time and timestamp: + start_time = timestamp + + if timestamp and start_time: + # Relative time in seconds + t = (timestamp - start_time).total_seconds() + + # Helper to safely get value with fallback + def get_val(frame, keys): + for k in keys: + if frame.has_field(k): + return frame.get_value(k) + return None + + streams["time"].append(t) + streams["heart_rate"].append(get_val(frame, ['heart_rate'])) + streams["power"].append(get_val(frame, ['power'])) + streams["altitude"].append(get_val(frame, ['enhanced_altitude', 'altitude'])) + streams["speed"].append(get_val(frame, ['enhanced_speed', 'speed'])) # m/s (enhanced is also m/s) + streams["cadence"].append(get_val(frame, ['cadence'])) + except Exception as e: + logger.error(f"Error extracting streams from FIT: {e}") + return streams + +def _extract_summary_from_fit(file_content: bytes) -> Dict[str, Any]: + summary = {} + try: + with io.BytesIO(file_content) as f: + with fitdecode.FitReader(f) as fit: + for frame in fit: + if frame.frame_type == fitdecode.FIT_FRAME_DATA and frame.name == 'session': + # Prefer enhanced fields + def get(keys): + for k in keys: + if frame.has_field(k): return frame.get_value(k) + return None + + summary['total_distance'] = get(['total_distance']) + summary['total_timer_time'] = get(['total_timer_time', 'total_elapsed_time']) + summary['total_calories'] = get(['total_calories']) + summary['avg_heart_rate'] = get(['avg_heart_rate']) + summary['max_heart_rate'] = get(['max_heart_rate']) + summary['avg_cadence'] = get(['avg_cadence']) + summary['max_cadence'] = get(['max_cadence']) + summary['avg_power'] = get(['avg_power']) + summary['max_power'] = get(['max_power']) + summary['total_ascent'] = get(['total_ascent']) + summary['total_descent'] = get(['total_descent']) + summary['enhanced_avg_speed'] = get(['enhanced_avg_speed', 'avg_speed']) + summary['enhanced_max_speed'] = get(['enhanced_max_speed', 'max_speed']) + summary['normalized_power'] = get(['normalized_power']) + summary['training_stress_score'] = get(['training_stress_score']) + summary['total_training_effect'] = get(['total_training_effect']) + summary['total_anaerobic_training_effect'] = get(['total_anaerobic_training_effect']) + + # Stop after first session message (usually only one per file, or first is summary) + # Actually FIT can have multiple sessions (multipsport). We'll take the first for now. + break + except Exception as e: + logger.error(f"Error extraction summary from FIT: {e}") + return summary + +def _extract_streams_from_tcx(file_content: bytes) -> Dict[str, List[Any]]: + streams = { + "time": [], + "heart_rate": [], + "power": [], + "altitude": [], + "speed": [], + "cadence": [] + } + try: + root = ET.fromstring(file_content) + # Namespace strip hack + start_time = None + + for trkpt in root.iter(): + if trkpt.tag.endswith('Trackpoint'): + timestamp_str = None + hr = None + pwr = None + alt = None + cad = None + spd = None + + for child in trkpt.iter(): + if child.tag.endswith('Time'): + timestamp_str = child.text + elif child.tag.endswith('AltitudeMeters'): + try: alt = float(child.text) + except: pass + elif child.tag.endswith('HeartRateBpm'): + for val in child: + if val.tag.endswith('Value'): + try: hr = int(val.text) + except: pass + elif child.tag.endswith('Cadence'): # Standard TCX cadence + try: cad = int(child.text) + except: pass + elif child.tag.endswith('Extensions'): + # TPX extensions for speed/power + for ext in child.iter(): + if ext.tag.endswith('Speed'): + try: spd = float(ext.text) + except: pass + elif ext.tag.endswith('Watts'): + try: pwr = int(ext.text) + except: pass + + if timestamp_str: + try: + # TCX time format is ISO8601 usually + ts = datetime.fromisoformat(timestamp_str.replace('Z', '+00:00')) + if not start_time: + start_time = ts + + streams["time"].append((ts - start_time).total_seconds()) + streams["heart_rate"].append(hr) + streams["power"].append(pwr) + streams["altitude"].append(alt) + streams["speed"].append(spd) + streams["cadence"].append(cad) + except: pass + + except Exception as e: + logger.error(f"Error extracting streams from TCX: {e}") + return streams + + +@router.get("/activities/{activity_id}/streams") +async def get_activity_streams(activity_id: str, db: Session = Depends(get_db)): + """ + Return time series data for charts. + """ + try: + activity = db.query(Activity).filter(Activity.garmin_activity_id == activity_id).first() + if not activity or not activity.file_content: + raise HTTPException(status_code=404, detail="Activity or file content not found") + + streams = {} + if activity.file_type == 'fit': + streams = _extract_streams_from_fit(activity.file_content) + elif activity.file_type == 'tcx': + streams = _extract_streams_from_tcx(activity.file_content) + else: + logger.warning(f"Unsupported file type for streams: {activity.file_type}") + + return streams + except Exception as e: + logger.error(f"Error getting streams: {e}") + raise HTTPException(status_code=500, detail=str(e)) + +@router.get("/activities/{activity_id}/navigation") +async def get_activity_navigation(activity_id: str, db: Session = Depends(get_db)): + """ + Return next/prev activity IDs. + """ + try: + current = db.query(Activity).filter(Activity.garmin_activity_id == activity_id).first() + if not current: + raise HTTPException(status_code=404, detail="Activity not found") + + # Global Prev (Older) + prev_act = ( + db.query(Activity) + .filter(Activity.start_time < current.start_time) + .order_by(Activity.start_time.desc()) + .first() + ) + + # Global Next (Newer) + next_act = ( + db.query(Activity) + .filter(Activity.start_time > current.start_time) + .order_by(Activity.start_time.asc()) + .first() + ) + + # Same Type Prev + prev_type_act = ( + db.query(Activity) + .filter(Activity.start_time < current.start_time) + .filter(Activity.activity_type == current.activity_type) + .order_by(Activity.start_time.desc()) + .first() + ) + + # Same Type Next + next_type_act = ( + db.query(Activity) + .filter(Activity.start_time > current.start_time) + .filter(Activity.activity_type == current.activity_type) + .order_by(Activity.start_time.asc()) + .first() + ) + + return { + "prev_id": prev_act.garmin_activity_id if prev_act else None, + "next_id": next_act.garmin_activity_id if next_act else None, + "prev_type_id": prev_type_act.garmin_activity_id if prev_type_act else None, + "next_type_id": next_type_act.garmin_activity_id if next_type_act else None + } + + except Exception as e: + logger.error(f"Error getting navigation: {e}") + raise HTTPException(status_code=500, detail=str(e)) \ No newline at end of file diff --git a/FitnessSync/backend/src/api/setup.py b/FitnessSync/backend/src/api/auth.py similarity index 51% rename from FitnessSync/backend/src/api/setup.py rename to FitnessSync/backend/src/api/auth.py index 2ec0b13..811b7cd 100644 --- a/FitnessSync/backend/src/api/setup.py +++ b/FitnessSync/backend/src/api/auth.py @@ -5,14 +5,17 @@ from typing import Optional from sqlalchemy.orm import Session import logging import traceback -import requests -import base64 +import json +from datetime import datetime, timedelta from ..services.garmin.client import GarminClient from ..services.fitbit_client import FitbitClient from ..services.postgresql_manager import PostgreSQLManager from ..utils.config import config +from ..models.api_token import APIToken +from ..models.config import Configuration from garth.exc import GarthException +import garth router = APIRouter() logger = logging.getLogger(__name__) @@ -39,11 +42,6 @@ class FitbitCallback(BaseModel): class GarminMFARequest(BaseModel): verification_code: str -from datetime import datetime, timedelta -from ..models.api_token import APIToken -from ..models.config import Configuration -import json - class GarminAuthStatus(BaseModel): token_stored: bool authenticated: bool @@ -80,8 +78,8 @@ def get_auth_status(db: Session = Depends(get_db)): authenticated=has_oauth1 and has_oauth2, garth_oauth1_token_exists=has_oauth1, garth_oauth2_token_exists=has_oauth2, - mfa_state_exists=False, # We don't store persistent MFA state in DB other than tokens - last_used=garmin_token.expires_at, # Using expires_at as proxy or null + mfa_state_exists=False, + last_used=garmin_token.expires_at, updated_at=garmin_token.updated_at ) else: @@ -96,7 +94,7 @@ def get_auth_status(db: Session = Depends(get_db)): if fitbit_token: response.fitbit = FitbitAuthStatus( authenticated=True, - client_id="Stored", # We don't store client_id in APIToken explicitly but could parse from file if needed + client_id="Stored", token_expires_at=fitbit_token.expires_at, last_login=fitbit_token.updated_at ) @@ -119,7 +117,6 @@ def clear_garmin_credentials(db: Session = Depends(get_db)): @router.post("/setup/garmin") def save_garmin_credentials(credentials: GarminCredentials, db: Session = Depends(get_db)): - # Re-acquire logger to ensure correct config after startup logger = logging.getLogger(__name__) logger.info(f"Received Garmin credentials for user: {credentials.username}") @@ -129,7 +126,7 @@ def save_garmin_credentials(credentials: GarminCredentials, db: Session = Depend status = garmin_client.login(db) if status == "mfa_required": - return JSONResponse(status_code=202, content={"status": "mfa_required", "message": "MFA code required.", "session_id": "session"}) # Added dummy session_id for frontend compat + return JSONResponse(status_code=202, content={"status": "mfa_required", "message": "MFA code required.", "session_id": "session"}) elif status == "error": logger.error("Garmin login returned 'error' status.") raise HTTPException(status_code=401, detail="Login failed. Check username/password.") @@ -146,11 +143,6 @@ def complete_garmin_mfa(mfa_request: GarminMFARequest, db: Session = Depends(get logger.info(f"Received MFA verification code: {'*' * len(mfa_request.verification_code)}") try: - # We need to reuse the client that was just used for login. - # In a real clustered app this would need shared state (Redis). - # For this single-instance app, we rely on Global Garth state or re-instantiation logic. - # But wait, handle_mfa logic in auth.py was loading from file/global. - # Let's ensure we are instantiating correctly. garmin_client = GarminClient() success = garmin_client.handle_mfa(db, mfa_request.verification_code) @@ -161,8 +153,6 @@ def complete_garmin_mfa(mfa_request: GarminMFARequest, db: Session = Depends(get except Exception as e: logger.error(f"MFA verification failed with exception: {e}", exc_info=True) - print("DEBUG: MFA verification failed. Traceback below:", flush=True) - traceback.print_exc() raise HTTPException(status_code=500, detail=f"MFA verification failed: {str(e)}") @router.post("/setup/garmin/test-token") @@ -177,180 +167,43 @@ def test_garmin_token(db: Session = Depends(get_db)): logger.warning("Test Token: No 'garmin' token record found in database.") return JSONResponse(status_code=400, content={"status": "error", "message": "No valid tokens found. Please login first."}) - logger.debug(f"Test Token: Token record found. ID: {token.id}, Updated: {token.updated_at}") - if not token.garth_oauth1_token: logger.warning("Test Token: garth_oauth1_token is empty or None.") return JSONResponse(status_code=400, content={"status": "error", "message": "No valid tokens found. Please login first."}) - logger.debug(f"Test Token: OAuth1 Token length: {len(token.garth_oauth1_token)}") - logger.debug(f"Test Token: OAuth2 Token length: {len(token.garth_oauth2_token) if token.garth_oauth2_token else 'None'}") - - import garth - # Manually load tokens into garth global state try: oauth1_data = json.loads(token.garth_oauth1_token) if token.garth_oauth1_token else None oauth2_data = json.loads(token.garth_oauth2_token) if token.garth_oauth2_token else None - if not isinstance(oauth1_data, dict) or not isinstance(oauth2_data, dict): - logger.error(f"Test Token: Parsed tokens are not dictionaries. OAuth1: {type(oauth1_data)}, OAuth2: {type(oauth2_data)}") - return JSONResponse(status_code=500, content={"status": "error", "message": "Stored tokens are invalid (not dictionaries)."}) - - logger.debug(f"Test Token: Parsed tokens. OAuth1 keys: {list(oauth1_data.keys())}, OAuth2 keys: {list(oauth2_data.keys())}") - - # Instantiate objects using the garth classes from garth.auth_tokens import OAuth1Token, OAuth2Token garth.client.oauth1_token = OAuth1Token(**oauth1_data) garth.client.oauth2_token = OAuth2Token(**oauth2_data) - logger.debug("Test Token: Tokens loaded into garth.client.") - except json.JSONDecodeError as e: - logger.error(f"Test Token: Failed to decode JSON tokens: {e}") - return JSONResponse(status_code=500, content={"status": "error", "message": "Stored tokens are corrupted."}) + except Exception as e: + logger.error(f"Test Token: Failed to decode/load tokens: {e}") + return JSONResponse(status_code=500, content={"status": "error", "message": "Stored tokens are invalid."}) - # Now test connection try: - logger.debug(f"Test Token: garth.client type: {type(garth.client)}") - logger.debug("Test Token: Attempting to fetch UserProfile...") - - # Using direct connectapi call as it was proven to work in debug script - # and avoids potential issues with UserProfile.get default args in this context profile = garth.client.connectapi("/userprofile-service/socialProfile") - - # success = True display_name = profile.get('fullName') or profile.get('displayName') logger.info(f"Test Token: Success! Connected as {display_name}") return {"status": "success", "message": f"Token valid! Connected as: {display_name}"} except GarthException as e: - logger.warning(f"Test Token: GarthException during profile fetch: {e}") + logger.warning(f"Test Token: GarthException: {e}") return JSONResponse(status_code=401, content={"status": "error", "message": "Token expired or invalid."}) except Exception as e: - # Capture missing token errors that might be wrapped - logger.warning(f"Test Token: Exception during profile fetch: {e}") + logger.warning(f"Test Token: Exception: {e}") if "OAuth1 token is required" in str(e): - return JSONResponse(status_code=400, content={"status": "error", "message": "No valid tokens found. Please login first."}) + return JSONResponse(status_code=400, content={"status": "error", "message": "No valid tokens found."}) return JSONResponse(status_code=500, content={"status": "error", "message": f"Connection test failed: {str(e)}"}) except Exception as e: logger.error(f"Test token failed with unexpected error: {e}", exc_info=True) return JSONResponse(status_code=500, content={"status": "error", "message": str(e)}) -@router.post("/setup/load-consul-config") -def load_consul_config(db: Session = Depends(get_db)): - logger = logging.getLogger(__name__) - logger.info("Attempting to load configuration from Consul...") - try: - # User defined Consul URL - consul_host = "consul.service.dc1.consul" - consul_port = "8500" - app_prefix = "fitbit-garmin-sync/" - consul_url = f"http://{consul_host}:{consul_port}/v1/kv/{app_prefix}?recurse=true" - - logger.debug(f"Connecting to Consul at: {consul_url}") - - response = requests.get(consul_url, timeout=5) - if response.status_code == 404: - logger.warning(f"No configuration found in Consul under '{app_prefix}'") - raise HTTPException(status_code=404, detail="No configuration found in Consul") - response.raise_for_status() - - data = response.json() - - config_map = {} - - # Helper to decode Consul values - def decode_consul_value(val): - if not val: return None - try: - return base64.b64decode(val).decode('utf-8') - except Exception as e: - logger.warning(f"Failed to decode value: {e}") - return None - - # Pass 1: Load all raw keys - for item in data: - key = item['Key'].replace(app_prefix, '') - value = decode_consul_value(item.get('Value')) - if value: - config_map[key] = value - - # Pass 2: Check for special 'config' key (JSON blob) - # The user URL ended in /config/edit, suggesting a single config file pattern - if 'config' in config_map: - try: - json_config = json.loads(config_map['config']) - logger.debug("Found 'config' key with JSON content, merging...") - # Merge JSON config, preferring explicit keys if collision (or vice versa? Let's say JSON overrides) - config_map.update(json_config) - except json.JSONDecodeError: - logger.warning("'config' key found but is not valid JSON, ignoring as blob.") - - logger.debug(f"Resolved configuration keys: {list(config_map.keys())}") - - # Look for standard keys - username = config_map.get('garmin_username') or config_map.get('USERNAME') - password = config_map.get('garmin_password') or config_map.get('PASSWORD') - is_china = str(config_map.get('is_china', 'false')).lower() == 'true' - - # If missing, try nested 'garmin' object (common in config.json structure) - if not username and isinstance(config_map.get('garmin'), dict): - logger.debug("Found nested 'garmin' config object.") - garmin_conf = config_map['garmin'] - username = garmin_conf.get('username') - password = garmin_conf.get('password') - if 'is_china' in garmin_conf: - is_china = str(garmin_conf.get('is_china')).lower() == 'true' - - if not username or not password: - logger.error("Consul config resolved but missing 'garmin_username' or 'garmin_password'") - raise HTTPException(status_code=400, detail="Consul config missing credentials") - - # Extract Fitbit credentials - fitbit_client_id = config_map.get('fitbit_client_id') - fitbit_client_secret = config_map.get('fitbit_client_secret') - fitbit_redirect_uri = config_map.get('fitbit_redirect_uri') - - if isinstance(config_map.get('fitbit'), dict): - logger.debug("Found nested 'fitbit' config object.") - fitbit_conf = config_map['fitbit'] - fitbit_client_id = fitbit_conf.get('client_id') - fitbit_client_secret = fitbit_conf.get('client_secret') - - logger.info("Consul config loaded successfully. Returning to frontend.") - - return { - "status": "success", - "message": "Configuration loaded from Consul", - "garmin": { - "username": username, - "password": password, - "is_china": is_china - }, - "fitbit": { - "client_id": fitbit_client_id, - "client_secret": fitbit_client_secret, - "redirect_uri": fitbit_redirect_uri - } - } - - except requests.exceptions.RequestException as e: - logger.error(f"Failed to connect to Consul: {e}") - raise HTTPException(status_code=502, detail=f"Failed to connect to Consul: {str(e)}") - except HTTPException: - raise - except Exception as e: - logger.error(f"Error loading from Consul: {e}", exc_info=True) - raise HTTPException(status_code=500, detail=f"Internal error loading config: {str(e)}") - @router.post("/setup/fitbit") def save_fitbit_credentials(credentials: FitbitCredentials, db: Session = Depends(get_db)): - """ - Saves Fitbit credentials to the Configuration table and returns the authorization URL. - """ logger = logging.getLogger(__name__) - logger.info("Received Fitbit credentials to save.") - try: - # Check if config exists config_entry = db.query(Configuration).first() if not config_entry: config_entry = Configuration() @@ -361,54 +214,27 @@ def save_fitbit_credentials(credentials: FitbitCredentials, db: Session = Depend config_entry.fitbit_redirect_uri = credentials.redirect_uri db.commit() - # Generate Auth URL - redirect_uri = credentials.redirect_uri - if not redirect_uri: - redirect_uri = None - + redirect_uri = credentials.redirect_uri or None fitbit_client = FitbitClient(credentials.client_id, credentials.client_secret, redirect_uri=redirect_uri) - auth_url = fitbit_client.get_authorization_url(redirect_uri) - return { - "status": "success", - "message": "Credentials saved.", - "auth_url": auth_url - } - + return {"status": "success", "message": "Credentials saved.", "auth_url": auth_url} except Exception as e: logger.error(f"Error saving Fitbit credentials: {e}", exc_info=True) raise HTTPException(status_code=500, detail=f"Failed to save credentials: {str(e)}") @router.post("/setup/fitbit/callback") def fitbit_callback(callback_data: FitbitCallback, db: Session = Depends(get_db)): - """ - Exchanges the authorization code for tokens and saves them. - """ logger = logging.getLogger(__name__) - logger.info("Received Fitbit callback code.") - try: - # Retrieve credentials config_entry = db.query(Configuration).first() - - if not config_entry or not config_entry.fitbit_client_id or not config_entry.fitbit_client_secret: - raise HTTPException(status_code=400, detail="Configuration not found or missing Fitbit credentials. Please save them first.") + if not config_entry or not config_entry.fitbit_client_id: + raise HTTPException(status_code=400, detail="Configuration missing Fitbit credentials.") - client_id = config_entry.fitbit_client_id - client_secret = config_entry.fitbit_client_secret - - # Must match the one used in get_authorization_url - redirect_uri = config_entry.fitbit_redirect_uri - if not redirect_uri: - redirect_uri = None - - fitbit_client = FitbitClient(client_id, client_secret, redirect_uri=redirect_uri) - + redirect_uri = config_entry.fitbit_redirect_uri or None + fitbit_client = FitbitClient(config_entry.fitbit_client_id, config_entry.fitbit_client_secret, redirect_uri=redirect_uri) token_data = fitbit_client.exchange_code_for_token(callback_data.code, redirect_uri) - # Save to APIToken - # Check if exists token_entry = db.query(APIToken).filter_by(token_type='fitbit').first() if not token_entry: token_entry = APIToken(token_type='fitbit') @@ -416,62 +242,36 @@ def fitbit_callback(callback_data: FitbitCallback, db: Session = Depends(get_db) token_entry.access_token = token_data.get('access_token') token_entry.refresh_token = token_data.get('refresh_token') + if token_data.get('expires_in'): + token_entry.expires_at = datetime.now() + timedelta(seconds=token_data.get('expires_in')) - # Handle expires_in (seconds) -> expires_at (datetime) - expires_in = token_data.get('expires_in') - if expires_in: - token_entry.expires_at = datetime.now() + timedelta(seconds=expires_in) - - # Save other metadata if available (user_id, scope) - if 'scope' in token_data: - token_entry.scopes = str(token_data['scope']) # JSON or string list - db.commit() - - return { - "status": "success", - "message": "Fitbit authentication successful. Tokens saved.", - "user_id": token_data.get('user_id') - } + return {"status": "success", "message": "Fitbit authentication successful.", "user_id": token_data.get('user_id')} except HTTPException: raise except Exception as e: logger.error(f"Error in Fitbit callback: {e}", exc_info=True) - # Often oauth errors are concise, return detail raise HTTPException(status_code=500, detail=f"Authentication failed: {str(e)}") @router.post("/setup/fitbit/test-token") def test_fitbit_token(db: Session = Depends(get_db)): - """Tests if the stored Fitbit token is valid by fetching user profile.""" logger = logging.getLogger(__name__) - logger.info("Received request to test Fitbit token.") - try: - # Retrieve tokens and credentials token = db.query(APIToken).filter_by(token_type='fitbit').first() config_entry = db.query(Configuration).first() if not token or not token.access_token: - return JSONResponse(status_code=400, content={"status": "error", "message": "No Fitbit token found. Please authenticate first."}) + return JSONResponse(status_code=400, content={"status": "error", "message": "No Fitbit token found."}) - if not config_entry or not config_entry.fitbit_client_id or not config_entry.fitbit_client_secret: - return JSONResponse(status_code=400, content={"status": "error", "message": "Fitbit credentials missing."}) - - # Instantiate client with tokens - # Note: fitbit library handles token refresh automatically if refresh_token is provided and valid fitbit_client = FitbitClient( config_entry.fitbit_client_id, config_entry.fitbit_client_secret, access_token=token.access_token, refresh_token=token.refresh_token, - redirect_uri=config_entry.fitbit_redirect_uri # Optional but good practice + redirect_uri=config_entry.fitbit_redirect_uri ) - # Test call - if not fitbit_client.fitbit: - return JSONResponse(status_code=500, content={"status": "error", "message": "Failed to initialize Fitbit client."}) - profile = fitbit_client.fitbit.user_profile_get() user = profile.get('user', {}) display_name = user.get('displayName') or user.get('fullName') @@ -479,13 +279,9 @@ def test_fitbit_token(db: Session = Depends(get_db)): return { "status": "success", "message": f"Token valid! Connected as: {display_name}", - "user": { - "displayName": display_name, - "avatar": user.get('avatar') - } + "user": {"displayName": display_name, "avatar": user.get('avatar')} } except Exception as e: logger.error(f"Test Fitbit token failed: {e}", exc_info=True) - # Check for specific token errors if possible, but generic catch is okay for now return JSONResponse(status_code=401, content={"status": "error", "message": f"Token invalid or expired: {str(e)}"}) diff --git a/FitnessSync/backend/src/api/bike_setups.py b/FitnessSync/backend/src/api/bike_setups.py new file mode 100644 index 0000000..dd880a2 --- /dev/null +++ b/FitnessSync/backend/src/api/bike_setups.py @@ -0,0 +1,110 @@ +from fastapi import APIRouter, Depends, HTTPException, status +from sqlalchemy.orm import Session +from pydantic import BaseModel +from typing import List, Optional +from datetime import datetime +import logging + +from ..models.bike_setup import BikeSetup +from ..models.base import Base +from ..services.postgresql_manager import PostgreSQLManager +from ..utils.config import config + +logger = logging.getLogger(__name__) + +# Reusing get_db logic (it should ideally be in a shared common module, but for now reproducing it to avoid circular imports or refactoring) +def get_db(): + db_manager = PostgreSQLManager(config.DATABASE_URL) + with db_manager.get_db_session() as session: + yield session + +class BikeSetupCreate(BaseModel): + frame: str + chainring: int + rear_cog: int + name: Optional[str] = None + +class BikeSetupUpdate(BaseModel): + frame: Optional[str] = None + chainring: Optional[int] = None + rear_cog: Optional[int] = None + name: Optional[str] = None + +class BikeSetupRead(BaseModel): + id: int + frame: str + chainring: int + rear_cog: int + name: Optional[str] = None + created_at: Optional[datetime] + updated_at: Optional[datetime] + + class Config: + from_attributes = True + +router = APIRouter(prefix="/api/bike-setups", tags=["bike-setups"]) + +@router.get("/", response_model=List[BikeSetupRead]) +def get_bike_setups(db: Session = Depends(get_db)): + """List all bike setups.""" + return db.query(BikeSetup).all() + +@router.post("/", response_model=BikeSetupRead, status_code=status.HTTP_201_CREATED) +def create_bike_setup(setup: BikeSetupCreate, db: Session = Depends(get_db)): + """Create a new bike setup.""" + new_setup = BikeSetup( + frame=setup.frame, + chainring=setup.chainring, + rear_cog=setup.rear_cog, + name=setup.name + ) + db.add(new_setup) + db.commit() + db.refresh(new_setup) + return new_setup + +@router.get("/{setup_id}", response_model=BikeSetupRead) +def get_bike_setup(setup_id: int, db: Session = Depends(get_db)): + """Get a specific bike setup.""" + # Assuming BikeSetup is imported correctly + setup = db.query(BikeSetup).filter(BikeSetup.id == setup_id).first() + if not setup: + raise HTTPException(status_code=404, detail="Bike setup not found") + return setup + +@router.put("/{setup_id}", response_model=BikeSetupRead) +def update_bike_setup(setup_id: int, setup_data: BikeSetupUpdate, db: Session = Depends(get_db)): + """Update a bike setup.""" + setup = db.query(BikeSetup).filter(BikeSetup.id == setup_id).first() + if not setup: + raise HTTPException(status_code=404, detail="Bike setup not found") + + if setup_data.frame is not None: + setup.frame = setup_data.frame + if setup_data.chainring is not None: + setup.chainring = setup_data.chainring + if setup_data.rear_cog is not None: + setup.rear_cog = setup_data.rear_cog + if setup_data.name is not None: + setup.name = setup_data.name + + db.commit() + db.refresh(setup) + return setup + +@router.delete("/{setup_id}", status_code=status.HTTP_204_NO_CONTENT) +def delete_bike_setup(setup_id: int, db: Session = Depends(get_db)): + """Delete a bike setup.""" + setup = db.query(BikeSetup).filter(BikeSetup.id == setup_id).first() + if not setup: + raise HTTPException(status_code=404, detail="Bike setup not found") + + db.delete(setup) + db.commit() + +@router.post("/match-all", status_code=status.HTTP_200_OK) +def trigger_matching(db: Session = Depends(get_db)): + """Trigger bike matching for all applicable activities.""" + from ..services.bike_matching import run_matching_for_all + run_matching_for_all(db) + return {"status": "success", "message": "Matching process completed."} diff --git a/FitnessSync/backend/src/api/config_routes.py b/FitnessSync/backend/src/api/config_routes.py new file mode 100644 index 0000000..faa18e2 --- /dev/null +++ b/FitnessSync/backend/src/api/config_routes.py @@ -0,0 +1,121 @@ +from fastapi import APIRouter, Depends, HTTPException +from sqlalchemy.orm import Session +import logging +import requests +import base64 +import json + +from ..services.postgresql_manager import PostgreSQLManager +from ..utils.config import config + +router = APIRouter() +logger = logging.getLogger(__name__) + +def get_db(): + db_manager = PostgreSQLManager(config.DATABASE_URL) + with db_manager.get_db_session() as session: + yield session + +@router.post("/setup/load-consul-config") +def load_consul_config(db: Session = Depends(get_db)): + logger = logging.getLogger(__name__) + logger.info("Attempting to load configuration from Consul...") + try: + # User defined Consul URL + consul_host = "consul.service.dc1.consul" + consul_port = "8500" + app_prefix = "fitbit-garmin-sync/" + consul_url = f"http://{consul_host}:{consul_port}/v1/kv/{app_prefix}?recurse=true" + + logger.debug(f"Connecting to Consul at: {consul_url}") + + response = requests.get(consul_url, timeout=5) + if response.status_code == 404: + logger.warning(f"No configuration found in Consul under '{app_prefix}'") + raise HTTPException(status_code=404, detail="No configuration found in Consul") + response.raise_for_status() + + data = response.json() + + config_map = {} + + # Helper to decode Consul values + def decode_consul_value(val): + if not val: return None + try: + return base64.b64decode(val).decode('utf-8') + except Exception as e: + logger.warning(f"Failed to decode value: {e}") + return None + + # Pass 1: Load all raw keys + for item in data: + key = item['Key'].replace(app_prefix, '') + value = decode_consul_value(item.get('Value')) + if value: + config_map[key] = value + + # Pass 2: Check for special 'config' key (JSON blob) + if 'config' in config_map: + try: + json_config = json.loads(config_map['config']) + logger.debug("Found 'config' key with JSON content, merging...") + config_map.update(json_config) + except json.JSONDecodeError: + logger.warning("'config' key found but is not valid JSON, ignoring as blob.") + + logger.debug(f"Resolved configuration keys: {list(config_map.keys())}") + + # Look for standard keys + username = config_map.get('garmin_username') or config_map.get('USERNAME') + password = config_map.get('garmin_password') or config_map.get('PASSWORD') + is_china = str(config_map.get('is_china', 'false')).lower() == 'true' + + if not username and isinstance(config_map.get('garmin'), dict): + logger.debug("Found nested 'garmin' config object.") + garmin_conf = config_map['garmin'] + username = garmin_conf.get('username') + password = garmin_conf.get('password') + if 'is_china' in garmin_conf: + is_china = str(garmin_conf.get('is_china')).lower() == 'true' + + if not username or not password: + logger.error("Consul config resolved but missing 'garmin_username' or 'garmin_password'") + raise HTTPException(status_code=400, detail="Consul config missing credentials") + + # Extract Fitbit credentials + fitbit_client_id = config_map.get('fitbit_client_id') + fitbit_client_secret = config_map.get('fitbit_client_secret') + fitbit_redirect_uri = config_map.get('fitbit_redirect_uri') + + if isinstance(config_map.get('fitbit'), dict): + logger.debug("Found nested 'fitbit' config object.") + fitbit_conf = config_map['fitbit'] + fitbit_client_id = fitbit_conf.get('client_id') + fitbit_client_secret = fitbit_conf.get('client_secret') + + logger.info("Consul config loaded successfully. Returning to frontend.") + + return { + "status": "success", + "message": "Configuration loaded from Consul", + "garmin": { + "username": username, + "password": password, + "is_china": is_china + }, + "fitbit": { + "client_id": fitbit_client_id, + "client_secret": fitbit_client_secret, + "redirect_uri": fitbit_redirect_uri + } + } + + except requests.exceptions.RequestException as e: + logger.error(f"Failed to connect to Consul: {e}") + raise HTTPException(status_code=502, detail=f"Failed to connect to Consul: {str(e)}") + except HTTPException: + raise + except Exception as e: + logger.error(f"Error loading from Consul: {e}", exc_info=True) + raise HTTPException(status_code=500, detail=f"Internal error loading config: {str(e)}") diff --git a/FitnessSync/backend/src/api/metrics.py b/FitnessSync/backend/src/api/metrics.py index 856e735..80ad212 100644 --- a/FitnessSync/backend/src/api/metrics.py +++ b/FitnessSync/backend/src/api/metrics.py @@ -1,9 +1,11 @@ -from fastapi import APIRouter, Query, HTTPException, Depends +from fastapi import APIRouter, Query, HTTPException, Depends, BackgroundTasks from pydantic import BaseModel from typing import List, Optional, Dict, Any from sqlalchemy import func from ..models.health_metric import HealthMetric +from ..models.weight_record import WeightRecord import logging +import json from ..services.postgresql_manager import PostgreSQLManager from sqlalchemy.orm import Session from ..utils.config import config @@ -79,21 +81,62 @@ async def query_metrics( metric_type: Optional[str] = Query(None), start_date: Optional[str] = Query(None), end_date: Optional[str] = Query(None), - limit: int = Query(100, ge=1, le=1000), + source: Optional[str] = Query(None), + limit: int = Query(100, ge=1, le=10000), db: Session = Depends(get_db) ): """ Query health metrics with filters. """ try: - logger.info(f"Querying metrics - type: {metric_type}, start: {start_date}, end: {end_date}, limit: {limit}") + logger.info(f"Querying metrics - type: {metric_type}, source: {source}, start: {start_date}, end: {end_date}, limit: {limit}") - # Start building the query + # Special handling for Fitbit Weight queries -> Use WeightRecord table + if source == 'fitbit' and metric_type == 'weight': + query = db.query(WeightRecord) + + if start_date: + from datetime import datetime + start_dt = datetime.fromisoformat(start_date) + query = query.filter(WeightRecord.date >= start_dt) + + if end_date: + from datetime import datetime + end_dt = datetime.fromisoformat(end_date) + query = query.filter(WeightRecord.date <= end_dt) + + query = query.order_by(WeightRecord.date.desc()) + query = query.limit(limit) + + weight_records = query.all() + + metric_responses = [] + for wr in weight_records: + metric_responses.append( + HealthMetricResponse( + id=wr.id, + metric_type='weight', + metric_value=wr.weight, + unit=wr.unit, + timestamp=wr.timestamp.isoformat() if wr.timestamp else "", + date=wr.date.isoformat() if wr.date else "", + source='fitbit', + detailed_data={'fitbit_id': wr.fitbit_id, 'bmi': wr.bmi} + ) + ) + + logger.info(f"Returning {len(metric_responses)} Fitbit weight records from WeightRecord table") + return metric_responses + + # Default: Start building the query on HealthMetric query = db.query(HealthMetric) # Apply filters based on parameters if metric_type: query = query.filter(HealthMetric.metric_type == metric_type) + + if source: + query = query.filter(HealthMetric.source == source) if start_date: from datetime import datetime @@ -105,6 +148,9 @@ async def query_metrics( end_dt = datetime.fromisoformat(end_date) query = query.filter(HealthMetric.date <= end_dt.date()) + # Sort by Date Descending + query = query.order_by(HealthMetric.date.desc()) + # Apply limit query = query.limit(limit) @@ -123,7 +169,7 @@ async def query_metrics( timestamp=metric.timestamp.isoformat() if metric.timestamp else "", date=metric.date.isoformat() if metric.date else "", source=metric.source, - detailed_data=metric.detailed_data + detailed_data=json.loads(metric.detailed_data) if metric.detailed_data else None ) ) @@ -133,6 +179,24 @@ async def query_metrics( logger.error(f"Error in query_metrics: {str(e)}") raise HTTPException(status_code=500, detail=f"Error querying metrics: {str(e)}") + +# run_fitbit_sync_job moved to tasks.definitions + + +# ... + +@router.post("/metrics/sync/fitbit") +async def sync_fitbit_trigger( + background_tasks: BackgroundTasks, + days_back: int = Query(30, description="Number of days to sync back") +): + """Trigger background sync of Fitbit metrics""" + job_id = job_manager.create_job("sync_fitbit_metrics") + + db_manager = PostgreSQLManager(config.DATABASE_URL) + background_tasks.add_task(run_fitbit_sync_job, job_id, days_back, db_manager.get_db_session) + return {"job_id": job_id, "status": "started"} + @router.get("/health-data/summary", response_model=HealthDataSummary) async def get_health_summary( start_date: Optional[str] = Query(None), @@ -220,9 +284,57 @@ async def get_health_summary( total_sleep_hours=round(total_sleep_hours, 2), avg_calories=round(avg_calories, 2) ) - logger.info(f"Returning health summary: steps={total_steps}, avg_hr={avg_heart_rate}, sleep_hours={total_sleep_hours}, avg_calories={avg_calories}") return summary except Exception as e: logger.error(f"Error in get_health_summary: {str(e)}") - raise HTTPException(status_code=500, detail=f"Error getting health summary: {str(e)}") \ No newline at end of file + raise HTTPException(status_code=500, detail=f"Error getting health summary: {str(e)}") + +# New Sync Endpoints + +from ..services.job_manager import job_manager +from ..models.health_state import HealthSyncState +from ..utils.config import config +from ..services.postgresql_manager import PostgreSQLManager +from ..tasks.definitions import run_health_scan_job, run_health_sync_job, run_fitbit_sync_job + +# Removed inline run_health_scan_job and run_health_sync_job + + +# Definitions moved to tasks/definitions.py + + +@router.post("/metrics/sync/scan") +async def scan_health_trigger(background_tasks: BackgroundTasks): + """Trigger background scan of health gaps""" + job_id = job_manager.create_job("scan_health_metrics") + + db_manager = PostgreSQLManager(config.DATABASE_URL) + background_tasks.add_task(run_health_scan_job, job_id, db_manager.get_db_session) + return {"job_id": job_id, "status": "started"} + +@router.post("/metrics/sync/pending") +async def sync_pending_health_trigger( + background_tasks: BackgroundTasks, + limit: Optional[int] = Query(None, description="Limit number of days/metrics to sync") +): + """Trigger background sync of pending health metrics""" + job_id = job_manager.create_job("sync_pending_health_metrics") + + db_manager = PostgreSQLManager(config.DATABASE_URL) + background_tasks.add_task(run_health_sync_job, job_id, limit, db_manager.get_db_session) + return {"job_id": job_id, "status": "started"} + +@router.get("/metrics/sync/status") +async def get_health_sync_status_summary(db: Session = Depends(get_db)): + """Get counts of health metrics by sync status""" + try: + stats = db.query( + HealthSyncState.sync_status, + func.count(HealthSyncState.id) + ).group_by(HealthSyncState.sync_status).all() + + return {s[0]: s[1] for s in stats} + except Exception as e: + logger.error(f"Error getting health sync status: {e}") + return {} \ No newline at end of file diff --git a/FitnessSync/backend/src/api/scheduling.py b/FitnessSync/backend/src/api/scheduling.py new file mode 100644 index 0000000..0b4c0e8 --- /dev/null +++ b/FitnessSync/backend/src/api/scheduling.py @@ -0,0 +1,131 @@ + +from fastapi import APIRouter, HTTPException, Depends +from pydantic import BaseModel +from sqlalchemy.orm import Session +from typing import List, Optional +from datetime import datetime, timedelta +import json +import logging + +from ..models.scheduled_job import ScheduledJob +from ..services.postgresql_manager import PostgreSQLManager +from ..utils.config import config +from ..services.scheduler import scheduler + +router = APIRouter() +logger = logging.getLogger(__name__) + +def get_db(): + db_manager = PostgreSQLManager(config.DATABASE_URL) + with db_manager.get_db_session() as session: + yield session + +class ScheduledJobResponse(BaseModel): + id: int + job_type: str + name: str + interval_minutes: int + enabled: bool + last_run: Optional[datetime] + next_run: Optional[datetime] + params: Optional[str] + + class Config: + from_attributes = True + +class JobUpdateRequest(BaseModel): + interval_minutes: Optional[int] = None + enabled: Optional[bool] = None + params: Optional[dict] = None + +@router.get("/scheduling/jobs", response_model=List[ScheduledJobResponse]) +def list_scheduled_jobs(db: Session = Depends(get_db)): + """List all scheduled jobs.""" + jobs = db.query(ScheduledJob).order_by(ScheduledJob.id).all() + return jobs + +@router.put("/scheduling/jobs/{job_id}", response_model=ScheduledJobResponse) +def update_scheduled_job(job_id: int, request: JobUpdateRequest, db: Session = Depends(get_db)): + """Update a scheduled job's interval or enabled status.""" + job = db.query(ScheduledJob).filter(ScheduledJob.id == job_id).first() + if not job: + raise HTTPException(status_code=404, detail="Job not found") + + if request.interval_minutes is not None: + if request.interval_minutes < 1: + raise HTTPException(status_code=400, detail="Interval must be at least 1 minute") + job.interval_minutes = request.interval_minutes + + # If enabled, update next_run based on new interval if it's far in future? + # Actually, standard behavior: next_run should be recalculated from last_run + new interval + # OR just leave it. If we shorten it, we might want it to run sooner. + # Let's recalculate next_run if it exists. + if job.last_run: + job.next_run = job.last_run + timedelta(minutes=job.interval_minutes) + else: + # If never run, next_run should be Now if enabled? + # Or keep existing next_run? + # If next_run is null and enabled, scheduler picks it up immediately. + pass + + if request.enabled is not None: + job.enabled = request.enabled + if job.enabled and job.next_run is None: + # If re-enabling and no next run, set to now + job.next_run = datetime.now() + + if request.params is not None: + job.params = json.dumps(request.params) + + db.commit() + db.refresh(job) + return job + +class JobCreateRequest(BaseModel): + job_type: str + name: str + interval_minutes: int + params: Optional[dict] = {} + enabled: Optional[bool] = True + +@router.post("/scheduling/jobs", response_model=ScheduledJobResponse) +def create_scheduled_job(request: JobCreateRequest, db: Session = Depends(get_db)): + """Create a new scheduled job.""" + # Validate job_type + from ..services.scheduler import scheduler + if request.job_type not in scheduler.TASK_MAP: + raise HTTPException(status_code=400, detail=f"Invalid job_type. Must be one of: {list(scheduler.TASK_MAP.keys())}") + + new_job = ScheduledJob( + job_type=request.job_type, + name=request.name, + interval_minutes=request.interval_minutes, + params=json.dumps(request.params) if request.params else "{}", + enabled=request.enabled, + next_run=datetime.now() if request.enabled else None + ) + + try: + db.add(new_job) + db.commit() + db.refresh(new_job) + return new_job + except Exception as e: + db.rollback() + logger.error(f"Failed to create job: {e}") + # Check for unique constraint on job_type if we enforced it? + # The model has job_type unique=True. This might be a problem if we want multiple of same type? + # User wants "new scheduled tasks" with "variables" -> implies multiple of same type (e.g. sync fitbit 10 days vs 30 days). + # We need to remove unique=True from ScheduledJob model if it exists! + raise HTTPException(status_code=400, detail=f"Failed to create job: {str(e)}") + +@router.delete("/scheduling/jobs/{job_id}", status_code=204) +def delete_scheduled_job(job_id: int, db: Session = Depends(get_db)): + """Delete a scheduled job.""" + job = db.query(ScheduledJob).filter(ScheduledJob.id == job_id).first() + if not job: + raise HTTPException(status_code=404, detail="Job not found") + + db.delete(job) + db.commit() + return None diff --git a/FitnessSync/backend/src/api/status.py b/FitnessSync/backend/src/api/status.py index 236842d..cbc5737 100644 --- a/FitnessSync/backend/src/api/status.py +++ b/FitnessSync/backend/src/api/status.py @@ -1,4 +1,4 @@ -from fastapi import APIRouter, Depends +from fastapi import APIRouter, Depends, BackgroundTasks from pydantic import BaseModel from typing import List, Optional, Dict, Any from sqlalchemy.orm import Session @@ -6,6 +6,7 @@ from ..services.postgresql_manager import PostgreSQLManager from ..utils.config import config from ..models.activity import Activity from ..models.sync_log import SyncLog +from ..services.job_manager import job_manager from datetime import datetime import json @@ -30,11 +31,28 @@ class SyncLogResponse(BaseModel): class Config: from_attributes = True + class Config: + from_attributes = True + +class JobStatusResponse(BaseModel): + id: str + operation: str + status: str + message: Optional[str] = None + start_time: datetime + progress: int = 0 + cancel_requested: bool = False + paused: bool = False + completed_at: Optional[datetime] = None + duration_s: Optional[float] = None + result: Optional[Dict[str, Any]] = None + class StatusResponse(BaseModel): total_activities: int downloaded_activities: int recent_logs: List[SyncLogResponse] last_sync_stats: Optional[List[Dict[str, Any]]] = None + active_jobs: List[Dict[str, Any]] = [] @router.get("/status", response_model=StatusResponse) def get_status(db: Session = Depends(get_db)): @@ -79,5 +97,65 @@ def get_status(db: Session = Depends(get_db)): total_activities=total_activities, downloaded_activities=downloaded_activities, recent_logs=recent_logs, - last_sync_stats=last_sync_stats if last_sync_stats else [] - ) \ No newline at end of file + last_sync_stats=last_sync_stats if last_sync_stats else [], + active_jobs=job_manager.get_active_jobs() + ) + +@router.get("/jobs/history", response_model=Dict[str, Any]) +def get_job_history(page: int = 1, limit: int = 10): + """Get history of completed jobs with pagination.""" + if page < 1: page = 1 + offset = (page - 1) * limit + return job_manager.get_job_history(limit=limit, offset=offset) + +@router.post("/jobs/{job_id}/pause") +def pause_job(job_id: str): + if job_manager.request_pause(job_id): + return {"status": "paused", "message": f"Pause requested for job {job_id}"} + raise HTTPException(status_code=404, detail="Job not found or cannot be paused") + +@router.post("/jobs/{job_id}/resume") +def resume_job(job_id: str): + if job_manager.resume_job(job_id): + return {"status": "resumed", "message": f"Job {job_id} resumed"} + raise HTTPException(status_code=404, detail="Job not found or cannot be resumed") + +@router.post("/jobs/{job_id}/cancel") +def cancel_job(job_id: str): + if job_manager.request_cancel(job_id): + return {"status": "cancelling", "message": f"Cancellation requested for job {job_id}"} + raise HTTPException(status_code=404, detail="Job not found") + +import time + +def run_test_job(job_id: str): + """Simulate a long running job with pause support.""" + try: + total_steps = 20 + i = 0 + while i < total_steps: + if job_manager.should_cancel(job_id): + job_manager.update_job(job_id, status="cancelled", message="Cancelled by user") + return + + if job_manager.should_pause(job_id): + time.sleep(1) + continue # Skip progress update + + # Normal work + progress = int(((i + 1) / total_steps) * 100) + job_manager.update_job(job_id, status="running", progress=progress, message=f"Processing... {i+1}/{total_steps}") + time.sleep(1) + i += 1 + + job_manager.complete_job(job_id) + except Exception as e: + job_manager.fail_job(job_id, str(e)) + +@router.post("/status/test-job") +def trigger_test_job(background_tasks: BackgroundTasks): + """Trigger a test job for queue verification.""" + job_id = job_manager.create_job("Test Job (5s)") + # Use run_serialized to enforce global lock + background_tasks.add_task(job_manager.run_serialized, job_id, run_test_job) + return {"job_id": job_id, "status": "started", "message": "Test job started"} \ No newline at end of file diff --git a/FitnessSync/backend/src/api/sync.py b/FitnessSync/backend/src/api/sync.py index 7f11030..448364b 100644 --- a/FitnessSync/backend/src/api/sync.py +++ b/FitnessSync/backend/src/api/sync.py @@ -1,4 +1,4 @@ -from fastapi import APIRouter, Depends, HTTPException, BackgroundTasks +from fastapi import APIRouter, Depends, HTTPException, BackgroundTasks, Query from pydantic import BaseModel from typing import Optional, List, Dict, Any from datetime import datetime, timedelta @@ -15,6 +15,7 @@ import garth import time from garth.auth_tokens import OAuth1Token, OAuth2Token from ..services.fitbit_client import FitbitClient +from fitbit import exceptions from ..models.weight_record import WeightRecord from ..models.config import Configuration from enum import Enum @@ -28,11 +29,21 @@ class SyncActivityRequest(BaseModel): class SyncMetricsRequest(BaseModel): days_back: int = 30 +class UploadWeightRequest(BaseModel): + limit: int = 50 + class SyncResponse(BaseModel): status: str message: str job_id: Optional[str] = None +class WeightComparisonResponse(BaseModel): + fitbit_total: int + garmin_total: int + missing_in_garmin: int + missing_dates: List[str] + message: str + class FitbitSyncScope(str, Enum): LAST_30_DAYS = "30d" ALL_HISTORY = "all" @@ -53,66 +64,27 @@ def get_db(): with db_manager.get_db_session() as session: yield session -def _load_and_verify_garth_session(db: Session): - """Helper to load token from DB and verify session with Garmin.""" - logger.info("Loading and verifying Garmin session...") - token_record = db.query(APIToken).filter_by(token_type='garmin').first() - if not (token_record and token_record.garth_oauth1_token and token_record.garth_oauth2_token): - raise HTTPException(status_code=401, detail="Garmin token not found.") - - try: - oauth1_dict = json.loads(token_record.garth_oauth1_token) - oauth2_dict = json.loads(token_record.garth_oauth2_token) - - domain = oauth1_dict.get('domain') - if domain: - garth.configure(domain=domain) - - garth.client.oauth1_token = OAuth1Token(**oauth1_dict) - garth.client.oauth2_token = OAuth2Token(**oauth2_dict) - - garth.UserProfile.get() - logger.info("Garth session verified.") - except Exception as e: - logger.error(f"Garth session verification failed: {e}", exc_info=True) - raise HTTPException(status_code=401, detail=f"Failed to authenticate with Garmin: {e}") - -def run_activity_sync_task(job_id: str, days_back: int): - logger.info(f"Starting background activity sync task {job_id}") - db_manager = PostgreSQLManager(config.DATABASE_URL) - with db_manager.get_db_session() as session: - try: - _load_and_verify_garth_session(session) - garmin_client = GarminClient() - sync_app = SyncApp(db_session=session, garmin_client=garmin_client) - sync_app.sync_activities(days_back=days_back, job_id=job_id) - except Exception as e: - logger.error(f"Background task failed: {e}") - job_manager.update_job(job_id, status="failed", message=str(e)) - -def run_metrics_sync_task(job_id: str, days_back: int): - logger.info(f"Starting background metrics sync task {job_id}") - db_manager = PostgreSQLManager(config.DATABASE_URL) - with db_manager.get_db_session() as session: - try: - _load_and_verify_garth_session(session) - garmin_client = GarminClient() - sync_app = SyncApp(db_session=session, garmin_client=garmin_client) - sync_app.sync_health_metrics(days_back=days_back, job_id=job_id) - except Exception as e: - logger.error(f"Background task failed: {e}") - job_manager.update_job(job_id, status="failed", message=str(e)) +from ..services.garth_helper import load_and_verify_garth_session +from ..tasks.definitions import ( + run_activity_sync_task, + run_metrics_sync_task, + run_health_scan_job, + run_fitbit_sync_job, + run_garmin_upload_job, + run_health_sync_job +) @router.post("/sync/activities", response_model=SyncResponse) def sync_activities(request: SyncActivityRequest, background_tasks: BackgroundTasks, db: Session = Depends(get_db)): # Verify auth first before starting task try: - _load_and_verify_garth_session(db) + load_and_verify_garth_session(db) except Exception as e: raise HTTPException(status_code=401, detail=f"Garmin auth failed: {str(e)}") job_id = job_manager.create_job("Activity Sync") - background_tasks.add_task(run_activity_sync_task, job_id, request.days_back) + db_manager = PostgreSQLManager(config.DATABASE_URL) + background_tasks.add_task(run_activity_sync_task, job_id, request.days_back, db_manager.get_db_session) return SyncResponse( status="started", @@ -123,12 +95,13 @@ def sync_activities(request: SyncActivityRequest, background_tasks: BackgroundTa @router.post("/sync/metrics", response_model=SyncResponse) def sync_metrics(request: SyncMetricsRequest, background_tasks: BackgroundTasks, db: Session = Depends(get_db)): try: - _load_and_verify_garth_session(db) + load_and_verify_garth_session(db) except Exception as e: raise HTTPException(status_code=401, detail=f"Garmin auth failed: {str(e)}") job_id = job_manager.create_job("Health Metrics Sync") - background_tasks.add_task(run_metrics_sync_task, job_id, request.days_back) + db_manager = PostgreSQLManager(config.DATABASE_URL) + background_tasks.add_task(run_metrics_sync_task, job_id, request.days_back, db_manager.get_db_session) return SyncResponse( status="started", @@ -136,6 +109,22 @@ def sync_metrics(request: SyncMetricsRequest, background_tasks: BackgroundTasks, job_id=job_id ) +@router.post("/metrics/sync/scan", response_model=SyncResponse) +async def scan_health_trigger( + background_tasks: BackgroundTasks, + days_back: int = Query(30, description="Number of days to scan back") +): + """Trigger background scan of health gaps""" + job_id = job_manager.create_job("scan_health_metrics") + + db_manager = PostgreSQLManager(config.DATABASE_URL) + background_tasks.add_task(run_health_scan_job, job_id, days_back, db_manager.get_db_session) + return SyncResponse( + status="started", + message="Health metrics scan started in background", + job_id=job_id + ) + @router.post("/sync/fitbit/weight", response_model=SyncResponse) def sync_fitbit_weight(request: WeightSyncRequest, db: Session = Depends(get_db)): # Keep functionality for now, ideally also background @@ -161,13 +150,37 @@ def sync_fitbit_weight_impl(request: WeightSyncRequest, db: Session): raise HTTPException(status_code=400, detail="Fitbit credentials missing.") # 2. Init Client + # Define callback to save new token + def refresh_cb(token_dict): + logger.info("Fitbit token refreshed via callback") + try: + # Re-query to avoid stale object errors if session closed? + # We have 'db' session from argument. + # We can use it. + # Convert token_dict to model fields + # The token_dict from fitbit library usually has access_token, refresh_token, expires_in/at + + # token is the APIToken object from line 197. Use it if attached, or query. + # It's better to query by ID or token_type again to be safe? + # Or just use the 'token' variable if it's still attached to session. + token.access_token = token_dict.get('access_token') + token.refresh_token = token_dict.get('refresh_token') + token.expires_at = datetime.fromtimestamp(token_dict.get('expires_at')) if token_dict.get('expires_at') else None + # scopes? + + db.commit() + logger.info("New Fitbit token saved to DB") + except Exception as e: + logger.error(f"Failed to save refreshed token: {e}") + try: fitbit_client = FitbitClient( config_entry.fitbit_client_id, config_entry.fitbit_client_secret, access_token=token.access_token, refresh_token=token.refresh_token, - redirect_uri=config_entry.fitbit_redirect_uri + redirect_uri=config_entry.fitbit_redirect_uri, + refresh_cb=refresh_cb ) except Exception as e: logger.error(f"Failed to initialize Fitbit client: {e}") @@ -245,6 +258,7 @@ def sync_fitbit_weight_impl(request: WeightSyncRequest, db: Session): # Structure: {'bmi': 23.5, 'date': '2023-01-01', 'logId': 12345, 'time': '23:59:59', 'weight': 70.5, 'source': 'API'} fitbit_id = str(log.get('logId')) weight_val = log.get('weight') + bmi_val = log.get('bmi') date_str = log.get('date') time_str = log.get('time') @@ -252,11 +266,15 @@ def sync_fitbit_weight_impl(request: WeightSyncRequest, db: Session): dt_str = f"{date_str} {time_str}" timestamp = datetime.strptime(dt_str, '%Y-%m-%d %H:%M:%S') + # Check exist # Check exist existing = db.query(WeightRecord).filter_by(fitbit_id=fitbit_id).first() if existing: - if abs(existing.weight - weight_val) > 0.01: # Check for update + # Check for update (weight changed or BMI missing) + if abs(existing.weight - weight_val) > 0.01 or existing.bmi is None: existing.weight = weight_val + existing.bmi = bmi_val + existing.unit = 'kg' # Force unit update too existing.date = timestamp existing.timestamp = timestamp existing.sync_status = 'unsynced' # Mark for Garmin sync if we implement that direction @@ -265,6 +283,7 @@ def sync_fitbit_weight_impl(request: WeightSyncRequest, db: Session): new_record = WeightRecord( fitbit_id=fitbit_id, weight=weight_val, + bmi=bmi_val, unit='kg', date=timestamp, timestamp=timestamp, @@ -291,11 +310,7 @@ def sync_fitbit_weight_impl(request: WeightSyncRequest, db: Session): job_id=f"fitbit-weight-sync-{datetime.now().strftime('%Y%m%d%H%M%S')}" ) -class WeightComparisonResponse(BaseModel): - fitbit_total: int - garmin_total: int - missing_in_garmin: int - message: str + @router.post("/sync/compare-weight", response_model=WeightComparisonResponse) def compare_weight_records(db: Session = Depends(get_db)): @@ -318,15 +333,24 @@ def compare_weight_records(db: Session = Depends(get_db)): garmin_date_set = {d[0].date() for d in garmin_dates if d[0]} # 3. Compare - missing_dates = fitbit_date_set - garmin_date_set + missing_dates_set = fitbit_date_set - garmin_date_set + missing_dates_list = sorted([d.isoformat() for d in missing_dates_set], reverse=True) return WeightComparisonResponse( fitbit_total=len(fitbit_date_set), garmin_total=len(garmin_date_set), - missing_in_garmin=len(missing_dates), - message=f"Comparison Complete. Fitbit has {len(fitbit_date_set)} unique days, Garmin has {len(garmin_date_set)}. {len(missing_dates)} days from Fitbit are missing in Garmin." + missing_in_garmin=len(missing_dates_set), + missing_dates=missing_dates_list, + message=f"Comparison Complete. Fitbit has {len(fitbit_date_set)} unique days, Garmin has {len(garmin_date_set)}. {len(missing_dates_set)} days from Fitbit are missing in Garmin." ) + limit = request.limit + job_id = job_manager.create_job("garmin_weight_upload") + + db_manager = PostgreSQLManager(config.DATABASE_URL) + background_tasks.add_task(run_garmin_upload_job, job_id, limit, db_manager.get_db_session) + return {"job_id": job_id, "status": "started"} + @router.get("/jobs/active", response_model=List[JobStatusResponse]) def get_active_jobs(): return job_manager.get_active_jobs() @@ -336,3 +360,11 @@ def stop_job(job_id: str): if job_manager.request_cancel(job_id): return {"status": "cancelled", "message": f"Cancellation requested for job {job_id}"} raise HTTPException(status_code=404, detail="Job not found") + +@router.get("/jobs/{job_id}", response_model=JobStatusResponse) +def get_job_status(job_id: str): + """Get status of a specific job.""" + job = job_manager.get_job(job_id) + if not job: + raise HTTPException(status_code=404, detail="Job not found") + return job diff --git a/FitnessSync/backend/src/models/__init__.py b/FitnessSync/backend/src/models/__init__.py index 903e51a..722be08 100644 --- a/FitnessSync/backend/src/models/__init__.py +++ b/FitnessSync/backend/src/models/__init__.py @@ -6,5 +6,10 @@ from .api_token import APIToken from .auth_status import AuthStatus from .weight_record import WeightRecord from .activity import Activity +from .job import Job from .health_metric import HealthMetric -from .sync_log import SyncLog \ No newline at end of file +from .sync_log import SyncLog +from .activity_state import GarminActivityState +from .health_state import HealthSyncState +from .scheduled_job import ScheduledJob +from .bike_setup import BikeSetup \ No newline at end of file diff --git a/FitnessSync/backend/src/models/__pycache__/__init__.cpython-311.pyc b/FitnessSync/backend/src/models/__pycache__/__init__.cpython-311.pyc index f97f5ea..ee1a4dc 100644 Binary files a/FitnessSync/backend/src/models/__pycache__/__init__.cpython-311.pyc and b/FitnessSync/backend/src/models/__pycache__/__init__.cpython-311.pyc differ diff --git a/FitnessSync/backend/src/models/__pycache__/__init__.cpython-313.pyc b/FitnessSync/backend/src/models/__pycache__/__init__.cpython-313.pyc index f9c5d8e..4fb45ed 100644 Binary files a/FitnessSync/backend/src/models/__pycache__/__init__.cpython-313.pyc and b/FitnessSync/backend/src/models/__pycache__/__init__.cpython-313.pyc differ diff --git a/FitnessSync/backend/src/models/__pycache__/activity.cpython-311.pyc b/FitnessSync/backend/src/models/__pycache__/activity.cpython-311.pyc index 0a4e68b..e1a0996 100644 Binary files a/FitnessSync/backend/src/models/__pycache__/activity.cpython-311.pyc and b/FitnessSync/backend/src/models/__pycache__/activity.cpython-311.pyc differ diff --git a/FitnessSync/backend/src/models/__pycache__/activity.cpython-313.pyc b/FitnessSync/backend/src/models/__pycache__/activity.cpython-313.pyc index f57e032..461488d 100644 Binary files a/FitnessSync/backend/src/models/__pycache__/activity.cpython-313.pyc and b/FitnessSync/backend/src/models/__pycache__/activity.cpython-313.pyc differ diff --git a/FitnessSync/backend/src/models/__pycache__/activity_state.cpython-311.pyc b/FitnessSync/backend/src/models/__pycache__/activity_state.cpython-311.pyc new file mode 100644 index 0000000..6ec0ddf Binary files /dev/null and b/FitnessSync/backend/src/models/__pycache__/activity_state.cpython-311.pyc differ diff --git a/FitnessSync/backend/src/models/__pycache__/activity_state.cpython-313.pyc b/FitnessSync/backend/src/models/__pycache__/activity_state.cpython-313.pyc new file mode 100644 index 0000000..dd6d4ae Binary files /dev/null and b/FitnessSync/backend/src/models/__pycache__/activity_state.cpython-313.pyc differ diff --git a/FitnessSync/backend/src/models/__pycache__/bike_setup.cpython-311.pyc b/FitnessSync/backend/src/models/__pycache__/bike_setup.cpython-311.pyc new file mode 100644 index 0000000..21b958f Binary files /dev/null and b/FitnessSync/backend/src/models/__pycache__/bike_setup.cpython-311.pyc differ diff --git a/FitnessSync/backend/src/models/__pycache__/bike_setup.cpython-313.pyc b/FitnessSync/backend/src/models/__pycache__/bike_setup.cpython-313.pyc new file mode 100644 index 0000000..dd9e613 Binary files /dev/null and b/FitnessSync/backend/src/models/__pycache__/bike_setup.cpython-313.pyc differ diff --git a/FitnessSync/backend/src/models/__pycache__/health_state.cpython-311.pyc b/FitnessSync/backend/src/models/__pycache__/health_state.cpython-311.pyc new file mode 100644 index 0000000..232f348 Binary files /dev/null and b/FitnessSync/backend/src/models/__pycache__/health_state.cpython-311.pyc differ diff --git a/FitnessSync/backend/src/models/__pycache__/health_state.cpython-313.pyc b/FitnessSync/backend/src/models/__pycache__/health_state.cpython-313.pyc new file mode 100644 index 0000000..1fb0a2a Binary files /dev/null and b/FitnessSync/backend/src/models/__pycache__/health_state.cpython-313.pyc differ diff --git a/FitnessSync/backend/src/models/__pycache__/job.cpython-311.pyc b/FitnessSync/backend/src/models/__pycache__/job.cpython-311.pyc new file mode 100644 index 0000000..dc2d865 Binary files /dev/null and b/FitnessSync/backend/src/models/__pycache__/job.cpython-311.pyc differ diff --git a/FitnessSync/backend/src/models/__pycache__/job.cpython-313.pyc b/FitnessSync/backend/src/models/__pycache__/job.cpython-313.pyc new file mode 100644 index 0000000..06a2e16 Binary files /dev/null and b/FitnessSync/backend/src/models/__pycache__/job.cpython-313.pyc differ diff --git a/FitnessSync/backend/src/models/__pycache__/scheduled_job.cpython-311.pyc b/FitnessSync/backend/src/models/__pycache__/scheduled_job.cpython-311.pyc new file mode 100644 index 0000000..d044123 Binary files /dev/null and b/FitnessSync/backend/src/models/__pycache__/scheduled_job.cpython-311.pyc differ diff --git a/FitnessSync/backend/src/models/__pycache__/scheduled_job.cpython-313.pyc b/FitnessSync/backend/src/models/__pycache__/scheduled_job.cpython-313.pyc new file mode 100644 index 0000000..131906b Binary files /dev/null and b/FitnessSync/backend/src/models/__pycache__/scheduled_job.cpython-313.pyc differ diff --git a/FitnessSync/backend/src/models/__pycache__/weight_record.cpython-311.pyc b/FitnessSync/backend/src/models/__pycache__/weight_record.cpython-311.pyc index d6f859d..362bb82 100644 Binary files a/FitnessSync/backend/src/models/__pycache__/weight_record.cpython-311.pyc and b/FitnessSync/backend/src/models/__pycache__/weight_record.cpython-311.pyc differ diff --git a/FitnessSync/backend/src/models/__pycache__/weight_record.cpython-313.pyc b/FitnessSync/backend/src/models/__pycache__/weight_record.cpython-313.pyc index 671b8a3..ab74471 100644 Binary files a/FitnessSync/backend/src/models/__pycache__/weight_record.cpython-313.pyc and b/FitnessSync/backend/src/models/__pycache__/weight_record.cpython-313.pyc differ diff --git a/FitnessSync/backend/src/models/activity.py b/FitnessSync/backend/src/models/activity.py index 3bd260a..7fcf8fb 100644 --- a/FitnessSync/backend/src/models/activity.py +++ b/FitnessSync/backend/src/models/activity.py @@ -1,4 +1,5 @@ -from sqlalchemy import Column, Integer, String, DateTime, Text, LargeBinary +from sqlalchemy import Column, Integer, String, DateTime, Text, LargeBinary, Float, ForeignKey +from sqlalchemy.orm import relationship from sqlalchemy.sql import func from ..models import Base @@ -11,9 +12,34 @@ class Activity(Base): activity_type = Column(String, nullable=True) # Type of activity (e.g., 'running', 'cycling') start_time = Column(DateTime, nullable=True) # Start time of the activity duration = Column(Integer, nullable=True) # Duration in seconds + duration = Column(Integer, nullable=True) # Duration in seconds + + # Extended Metrics + distance = Column(Float, nullable=True) # meters + calories = Column(Float, nullable=True) # kcal + avg_hr = Column(Integer, nullable=True) # bpm + max_hr = Column(Integer, nullable=True) # bpm + avg_speed = Column(Float, nullable=True) # m/s + max_speed = Column(Float, nullable=True) # m/s + elevation_gain = Column(Float, nullable=True) # meters + elevation_loss = Column(Float, nullable=True) # meters + avg_cadence = Column(Integer, nullable=True) # rpm/spm + max_cadence = Column(Integer, nullable=True) # rpm/spm + steps = Column(Integer, nullable=True) + aerobic_te = Column(Float, nullable=True) # 0-5 + anaerobic_te = Column(Float, nullable=True) # 0-5 + avg_power = Column(Integer, nullable=True) # watts + max_power = Column(Integer, nullable=True) # watts + norm_power = Column(Integer, nullable=True) # watts + tss = Column(Float, nullable=True) # Training Stress Score + vo2_max = Column(Float, nullable=True) # ml/kg/min + file_content = Column(LargeBinary, nullable=True) # Activity file content stored in database (base64 encoded) file_type = Column(String, nullable=True) # File type (.fit, .gpx, .tcx, etc.) download_status = Column(String, default='pending') # 'pending', 'downloaded', 'failed' downloaded_at = Column(DateTime, nullable=True) # When downloaded created_at = Column(DateTime(timezone=True), server_default=func.now()) - updated_at = Column(DateTime(timezone=True), onupdate=func.now()) \ No newline at end of file + updated_at = Column(DateTime(timezone=True), onupdate=func.now()) + + bike_setup_id = Column(Integer, ForeignKey("bike_setups.id"), nullable=True) + bike_setup = relationship("BikeSetup") \ No newline at end of file diff --git a/FitnessSync/backend/src/models/activity_state.py b/FitnessSync/backend/src/models/activity_state.py new file mode 100644 index 0000000..c40d2c2 --- /dev/null +++ b/FitnessSync/backend/src/models/activity_state.py @@ -0,0 +1,12 @@ +from sqlalchemy import Column, Integer, String, DateTime, func +from ..models import Base + +class GarminActivityState(Base): + __tablename__ = "garmin_activity_state" + + garmin_activity_id = Column(String, primary_key=True, index=True) + activity_name = Column(String, nullable=True) + activity_type = Column(String, nullable=True) + start_time = Column(DateTime, nullable=True) + sync_status = Column(String, default='new') # 'new', 'updated', 'synced' + last_seen = Column(DateTime(timezone=True), server_default=func.now(), onupdate=func.now()) diff --git a/FitnessSync/backend/src/models/bike_setup.py b/FitnessSync/backend/src/models/bike_setup.py new file mode 100644 index 0000000..f426ca6 --- /dev/null +++ b/FitnessSync/backend/src/models/bike_setup.py @@ -0,0 +1,15 @@ +from sqlalchemy import Column, Integer, String, DateTime +from sqlalchemy.sql import func +from .base import Base + +class BikeSetup(Base): + __tablename__ = "bike_setups" + + id = Column(Integer, primary_key=True, index=True) + frame = Column(String, nullable=False) + chainring = Column(Integer, nullable=False) + rear_cog = Column(Integer, nullable=False) + name = Column(String, nullable=True) # Optional, can be derived or user-set + + created_at = Column(DateTime(timezone=True), server_default=func.now()) + updated_at = Column(DateTime(timezone=True), onupdate=func.now()) diff --git a/FitnessSync/backend/src/models/health_state.py b/FitnessSync/backend/src/models/health_state.py new file mode 100644 index 0000000..5d08735 --- /dev/null +++ b/FitnessSync/backend/src/models/health_state.py @@ -0,0 +1,16 @@ +from sqlalchemy import Column, Integer, String, DateTime, Date, func, UniqueConstraint +from ..models import Base + +class HealthSyncState(Base): + __tablename__ = "health_sync_state" + + id = Column(Integer, primary_key=True, index=True) + date = Column(Date, nullable=False) + metric_type = Column(String, nullable=False) # 'steps', 'weight', 'sleep', etc. + source = Column(String, nullable=False) #'garmin', 'fitbit' + sync_status = Column(String, default='new') # 'new', 'updated', 'synced' + last_seen = Column(DateTime(timezone=True), server_default=func.now(), onupdate=func.now()) + + __table_args__ = ( + UniqueConstraint('date', 'metric_type', 'source', name='uq_health_state'), + ) diff --git a/FitnessSync/backend/src/models/job.py b/FitnessSync/backend/src/models/job.py new file mode 100644 index 0000000..4a9935d --- /dev/null +++ b/FitnessSync/backend/src/models/job.py @@ -0,0 +1,19 @@ +from sqlalchemy import Column, Integer, String, DateTime, Text, Boolean, JSON, func +from .base import Base + +class Job(Base): + __tablename__ = 'jobs' + + id = Column(String, primary_key=True, index=True) + operation = Column(String, nullable=False) + status = Column(String, nullable=False, default='running') + start_time = Column(DateTime(timezone=True), nullable=False) + end_time = Column(DateTime(timezone=True), nullable=True) + progress = Column(Integer, default=0) + message = Column(Text, nullable=True) + result = Column(JSON, nullable=True) + cancel_requested = Column(Boolean, default=False) + paused = Column(Boolean, default=False) + + created_at = Column(DateTime(timezone=True), server_default=func.now()) + updated_at = Column(DateTime(timezone=True), onupdate=func.now()) diff --git a/FitnessSync/backend/src/models/scheduled_job.py b/FitnessSync/backend/src/models/scheduled_job.py new file mode 100644 index 0000000..f33f090 --- /dev/null +++ b/FitnessSync/backend/src/models/scheduled_job.py @@ -0,0 +1,20 @@ + +from sqlalchemy import Column, Integer, String, DateTime, Boolean, Text +from sqlalchemy.sql import func +from .base import Base + +class ScheduledJob(Base): + __tablename__ = 'scheduled_jobs' + + id = Column(Integer, primary_key=True, index=True) + job_type = Column(String, nullable=False) # e.g. 'fitbit_weight_sync' + name = Column(String, nullable=False) + interval_minutes = Column(Integer, nullable=False, default=60) + params = Column(Text, nullable=True) # JSON string + enabled = Column(Boolean, default=True) + + last_run = Column(DateTime(timezone=True), nullable=True) + next_run = Column(DateTime(timezone=True), nullable=True) + + created_at = Column(DateTime(timezone=True), server_default=func.now()) + updated_at = Column(DateTime(timezone=True), onupdate=func.now()) diff --git a/FitnessSync/backend/src/models/weight_record.py b/FitnessSync/backend/src/models/weight_record.py index 9f43d79..2b8a0b2 100644 --- a/FitnessSync/backend/src/models/weight_record.py +++ b/FitnessSync/backend/src/models/weight_record.py @@ -8,6 +8,7 @@ class WeightRecord(Base): id = Column(Integer, primary_key=True, index=True) fitbit_id = Column(String, unique=True, nullable=False) # Original Fitbit ID to prevent duplicates weight = Column(Float, nullable=False) # Weight value + bmi = Column(Float, nullable=True) # BMI value unit = Column(String, nullable=False) # Unit (e.g., 'kg', 'lbs') date = Column(DateTime, nullable=False) # Date of measurement timestamp = Column(DateTime, nullable=False) # Exact timestamp diff --git a/FitnessSync/backend/src/routers/__init__.py b/FitnessSync/backend/src/routers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/FitnessSync/backend/src/routers/__pycache__/__init__.cpython-311.pyc b/FitnessSync/backend/src/routers/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000..6dc326c Binary files /dev/null and b/FitnessSync/backend/src/routers/__pycache__/__init__.cpython-311.pyc differ diff --git a/FitnessSync/backend/src/routers/__pycache__/__init__.cpython-313.pyc b/FitnessSync/backend/src/routers/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000..4602444 Binary files /dev/null and b/FitnessSync/backend/src/routers/__pycache__/__init__.cpython-313.pyc differ diff --git a/FitnessSync/backend/src/routers/__pycache__/web.cpython-311.pyc b/FitnessSync/backend/src/routers/__pycache__/web.cpython-311.pyc new file mode 100644 index 0000000..12d223e Binary files /dev/null and b/FitnessSync/backend/src/routers/__pycache__/web.cpython-311.pyc differ diff --git a/FitnessSync/backend/src/routers/__pycache__/web.cpython-313.pyc b/FitnessSync/backend/src/routers/__pycache__/web.cpython-313.pyc new file mode 100644 index 0000000..2427de4 Binary files /dev/null and b/FitnessSync/backend/src/routers/__pycache__/web.cpython-313.pyc differ diff --git a/FitnessSync/backend/src/routers/web.py b/FitnessSync/backend/src/routers/web.py new file mode 100644 index 0000000..adbc245 --- /dev/null +++ b/FitnessSync/backend/src/routers/web.py @@ -0,0 +1,34 @@ +from fastapi import APIRouter, Request +from fastapi.templating import Jinja2Templates + +router = APIRouter() +templates = Jinja2Templates(directory="templates") + +@router.get("/") +async def read_root(request: Request): + return templates.TemplateResponse("index.html", {"request": request}) + +@router.get("/activities") +async def activities_page(request: Request): + return templates.TemplateResponse("activities.html", {"request": request}) + +@router.get("/setup") +async def setup_page(request: Request): + return templates.TemplateResponse("setup.html", {"request": request}) + +@router.get("/garmin-health") +async def garmin_health_page(request: Request): + return templates.TemplateResponse("garmin_health.html", {"request": request}) + +@router.get("/fitbit-health") +async def fitbit_health_page(request: Request): + return templates.TemplateResponse("fitbit_health.html", {"request": request}) + +@router.get("/bike-setups") +async def bike_setups_page(request: Request): + return templates.TemplateResponse("bike_setups.html", {"request": request}) + +@router.get("/activity/{activity_id}") +async def activity_view_page(request: Request, activity_id: str): + return templates.TemplateResponse("activity_view.html", {"request": request, "activity_id": activity_id}) + diff --git a/FitnessSync/backend/src/services/__pycache__/bike_matching.cpython-311.pyc b/FitnessSync/backend/src/services/__pycache__/bike_matching.cpython-311.pyc new file mode 100644 index 0000000..f511889 Binary files /dev/null and b/FitnessSync/backend/src/services/__pycache__/bike_matching.cpython-311.pyc differ diff --git a/FitnessSync/backend/src/services/__pycache__/bike_matching.cpython-313.pyc b/FitnessSync/backend/src/services/__pycache__/bike_matching.cpython-313.pyc new file mode 100644 index 0000000..ca91540 Binary files /dev/null and b/FitnessSync/backend/src/services/__pycache__/bike_matching.cpython-313.pyc differ diff --git a/FitnessSync/backend/src/services/__pycache__/fitbit_client.cpython-311.pyc b/FitnessSync/backend/src/services/__pycache__/fitbit_client.cpython-311.pyc index 91b8433..230c099 100644 Binary files a/FitnessSync/backend/src/services/__pycache__/fitbit_client.cpython-311.pyc and b/FitnessSync/backend/src/services/__pycache__/fitbit_client.cpython-311.pyc differ diff --git a/FitnessSync/backend/src/services/__pycache__/fitbit_client.cpython-313.pyc b/FitnessSync/backend/src/services/__pycache__/fitbit_client.cpython-313.pyc index 3c978af..2638d1f 100644 Binary files a/FitnessSync/backend/src/services/__pycache__/fitbit_client.cpython-313.pyc and b/FitnessSync/backend/src/services/__pycache__/fitbit_client.cpython-313.pyc differ diff --git a/FitnessSync/backend/src/services/__pycache__/garth_helper.cpython-311.pyc b/FitnessSync/backend/src/services/__pycache__/garth_helper.cpython-311.pyc new file mode 100644 index 0000000..1865bb1 Binary files /dev/null and b/FitnessSync/backend/src/services/__pycache__/garth_helper.cpython-311.pyc differ diff --git a/FitnessSync/backend/src/services/__pycache__/garth_helper.cpython-313.pyc b/FitnessSync/backend/src/services/__pycache__/garth_helper.cpython-313.pyc new file mode 100644 index 0000000..c515ece Binary files /dev/null and b/FitnessSync/backend/src/services/__pycache__/garth_helper.cpython-313.pyc differ diff --git a/FitnessSync/backend/src/services/__pycache__/job_manager.cpython-311.pyc b/FitnessSync/backend/src/services/__pycache__/job_manager.cpython-311.pyc index 4d51c5d..492e064 100644 Binary files a/FitnessSync/backend/src/services/__pycache__/job_manager.cpython-311.pyc and b/FitnessSync/backend/src/services/__pycache__/job_manager.cpython-311.pyc differ diff --git a/FitnessSync/backend/src/services/__pycache__/job_manager.cpython-313.pyc b/FitnessSync/backend/src/services/__pycache__/job_manager.cpython-313.pyc index fcfa5dd..9c0da88 100644 Binary files a/FitnessSync/backend/src/services/__pycache__/job_manager.cpython-313.pyc and b/FitnessSync/backend/src/services/__pycache__/job_manager.cpython-313.pyc differ diff --git a/FitnessSync/backend/src/services/__pycache__/postgresql_manager.cpython-311.pyc b/FitnessSync/backend/src/services/__pycache__/postgresql_manager.cpython-311.pyc index 209aacd..b823330 100644 Binary files a/FitnessSync/backend/src/services/__pycache__/postgresql_manager.cpython-311.pyc and b/FitnessSync/backend/src/services/__pycache__/postgresql_manager.cpython-311.pyc differ diff --git a/FitnessSync/backend/src/services/__pycache__/postgresql_manager.cpython-313.pyc b/FitnessSync/backend/src/services/__pycache__/postgresql_manager.cpython-313.pyc index 888b4ff..4eab779 100644 Binary files a/FitnessSync/backend/src/services/__pycache__/postgresql_manager.cpython-313.pyc and b/FitnessSync/backend/src/services/__pycache__/postgresql_manager.cpython-313.pyc differ diff --git a/FitnessSync/backend/src/services/__pycache__/scheduler.cpython-311.pyc b/FitnessSync/backend/src/services/__pycache__/scheduler.cpython-311.pyc new file mode 100644 index 0000000..3f5aa05 Binary files /dev/null and b/FitnessSync/backend/src/services/__pycache__/scheduler.cpython-311.pyc differ diff --git a/FitnessSync/backend/src/services/__pycache__/scheduler.cpython-313.pyc b/FitnessSync/backend/src/services/__pycache__/scheduler.cpython-313.pyc new file mode 100644 index 0000000..48a0fd6 Binary files /dev/null and b/FitnessSync/backend/src/services/__pycache__/scheduler.cpython-313.pyc differ diff --git a/FitnessSync/backend/src/services/__pycache__/sync_app.cpython-311.pyc b/FitnessSync/backend/src/services/__pycache__/sync_app.cpython-311.pyc index 3436ad2..ce28f09 100644 Binary files a/FitnessSync/backend/src/services/__pycache__/sync_app.cpython-311.pyc and b/FitnessSync/backend/src/services/__pycache__/sync_app.cpython-311.pyc differ diff --git a/FitnessSync/backend/src/services/__pycache__/sync_app.cpython-313.pyc b/FitnessSync/backend/src/services/__pycache__/sync_app.cpython-313.pyc index 30d2b4f..2cb3aa8 100644 Binary files a/FitnessSync/backend/src/services/__pycache__/sync_app.cpython-313.pyc and b/FitnessSync/backend/src/services/__pycache__/sync_app.cpython-313.pyc differ diff --git a/FitnessSync/backend/src/services/bike_matching.py b/FitnessSync/backend/src/services/bike_matching.py new file mode 100644 index 0000000..77f8958 --- /dev/null +++ b/FitnessSync/backend/src/services/bike_matching.py @@ -0,0 +1,129 @@ +import logging +from typing import List, Optional +from sqlalchemy.orm import Session +from ..models.activity import Activity +from ..models.bike_setup import BikeSetup + +logger = logging.getLogger(__name__) + +# Constants +WHEEL_CIRCUMFERENCE_M = 2.1 # Approx 700x23c/28c generic +TOLERANCE_PERCENT = 0.15 + +def calculate_observed_ratio(speed_mps: float, cadence_rpm: float) -> float: + """ + Calculate gear ratio from speed and cadence. + Speed = (Cadence * Ratio * Circumference) / 60 + Ratio = (Speed * 60) / (Cadence * Circumference) + """ + if not cadence_rpm or cadence_rpm == 0: + return 0.0 + return (speed_mps * 60) / (cadence_rpm * WHEEL_CIRCUMFERENCE_M) + +def match_activity_to_bike(db: Session, activity: Activity) -> Optional[BikeSetup]: + """ + Match an activity to a bike setup based on gear ratio. + """ + if not activity.activity_type: + return None + + type_lower = activity.activity_type.lower() + + # Generic "cycling" check covers most (cycling, gravel_cycling, indoor_cycling) + # But explicitly: 'road_biking', 'mountain_biking', 'gravel_cycling', 'cycling' + # User asked for "all types of cycling". + # We essentially want to filter OUT known non-cycling stuff if it doesn't match keys. + # But safer to be inclusive of keywords. + + is_cycling = ( + 'cycling' in type_lower or + 'road_biking' in type_lower or + 'mountain_biking' in type_lower or + 'mtb' in type_lower or + 'cyclocross' in type_lower + ) + + if not is_cycling: + # Not cycling + return None + + if 'indoor' in type_lower: + # Indoor cycling - ignore + return None + + if not activity.avg_speed or not activity.avg_cadence: + # Not enough data + return None + + observed_ratio = calculate_observed_ratio(activity.avg_speed, activity.avg_cadence) + if observed_ratio == 0: + return None + + setups = db.query(BikeSetup).all() + if not setups: + return None + + best_match = None + min_diff = float('inf') + + for setup in setups: + if not setup.chainring or not setup.rear_cog: + continue + + mechanical_ratio = setup.chainring / setup.rear_cog + diff = abs(observed_ratio - mechanical_ratio) + + # Check tolerance + # e.g., if ratio match is within 15% + if diff / mechanical_ratio <= TOLERANCE_PERCENT: + if diff < min_diff: + min_diff = diff + best_match = setup + + return best_match + +def process_activity_matching(db: Session, activity_id: int): + """ + Process matching for a specific activity and save result. + """ + activity = db.query(Activity).filter(Activity.id == activity_id).first() + if not activity: + return + + match = match_activity_to_bike(db, activity) + if match: + activity.bike_setup_id = match.id + logger.info(f"Matched Activity {activity.id} to Setup {match.frame} (Found Ratio: {calculate_observed_ratio(activity.avg_speed, activity.avg_cadence):.2f})") + else: + # Implicitly "Generic" if None, but user requested explicit default logic. + generic = db.query(BikeSetup).filter(BikeSetup.name == "GenericBike").first() + if generic: + activity.bike_setup_id = generic.id + else: + activity.bike_setup_id = None # Truly unknown + + db.commit() + +def run_matching_for_all(db: Session): + """ + Run matching for all activities that don't have a setup. + """ + from sqlalchemy import or_ + + activities = db.query(Activity).filter( + Activity.bike_setup_id == None, + or_( + Activity.activity_type.ilike('%cycling%'), + Activity.activity_type.ilike('%road_biking%'), + Activity.activity_type.ilike('%mountain%'), # catch mountain_biking + Activity.activity_type.ilike('%mtb%'), + Activity.activity_type.ilike('%cyclocross%') + ), + Activity.activity_type.notilike('%indoor%') + ).all() + + count = 0 + for act in activities: + process_activity_matching(db, act.id) + count += 1 + logger.info(f"Ran matching for {count} activities.") diff --git a/FitnessSync/backend/src/services/fitbit_client.py b/FitnessSync/backend/src/services/fitbit_client.py index fbfffec..e53c5ed 100644 --- a/FitnessSync/backend/src/services/fitbit_client.py +++ b/FitnessSync/backend/src/services/fitbit_client.py @@ -9,7 +9,7 @@ from ..utils.helpers import setup_logger logger = setup_logger(__name__) class FitbitClient: - def __init__(self, client_id: str, client_secret: str, access_token: str = None, refresh_token: str = None, redirect_uri: str = None): + def __init__(self, client_id: str, client_secret: str, access_token: str = None, refresh_token: str = None, redirect_uri: str = None, refresh_cb = None): self.client_id = client_id self.client_secret = client_secret self.access_token = access_token @@ -26,7 +26,9 @@ class FitbitClient: access_token=access_token, refresh_token=refresh_token, redirect_uri=redirect_uri, - timeout=10 + refresh_cb=refresh_cb, + timeout=10, + system='METRIC' ) def get_authorization_url(self, redirect_uri: str = None) -> str: @@ -41,7 +43,8 @@ class FitbitClient: self.client_id, self.client_secret, redirect_uri=redirect_uri, - timeout=10 + timeout=10, + system='METRIC' ) # The example calls self.fitbit.client.authorize_token_url() @@ -61,11 +64,12 @@ class FitbitClient: """Exchange authorization code for access and refresh tokens.""" # If redirect_uri is provided here, ensure we are using a client configured with it if redirect_uri and redirect_uri != self.redirect_uri: - self.fitbit = fitbit.Fitbit( + self.fitbit = fitbit.Fitbit( self.client_id, self.client_secret, redirect_uri=redirect_uri, - timeout=10 + timeout=10, + system='METRIC' ) logger.info(f"Exchanging authorization code for tokens") diff --git a/FitnessSync/backend/src/services/garmin/__pycache__/auth.cpython-311.pyc b/FitnessSync/backend/src/services/garmin/__pycache__/auth.cpython-311.pyc index 568b0a5..6bc30d0 100644 Binary files a/FitnessSync/backend/src/services/garmin/__pycache__/auth.cpython-311.pyc and b/FitnessSync/backend/src/services/garmin/__pycache__/auth.cpython-311.pyc differ diff --git a/FitnessSync/backend/src/services/garmin/__pycache__/auth.cpython-313.pyc b/FitnessSync/backend/src/services/garmin/__pycache__/auth.cpython-313.pyc index 9bddaa6..5402b85 100644 Binary files a/FitnessSync/backend/src/services/garmin/__pycache__/auth.cpython-313.pyc and b/FitnessSync/backend/src/services/garmin/__pycache__/auth.cpython-313.pyc differ diff --git a/FitnessSync/backend/src/services/garmin/__pycache__/client.cpython-311.pyc b/FitnessSync/backend/src/services/garmin/__pycache__/client.cpython-311.pyc index 761cd8f..86a5981 100644 Binary files a/FitnessSync/backend/src/services/garmin/__pycache__/client.cpython-311.pyc and b/FitnessSync/backend/src/services/garmin/__pycache__/client.cpython-311.pyc differ diff --git a/FitnessSync/backend/src/services/garmin/__pycache__/client.cpython-313.pyc b/FitnessSync/backend/src/services/garmin/__pycache__/client.cpython-313.pyc index 3d04eef..6742d58 100644 Binary files a/FitnessSync/backend/src/services/garmin/__pycache__/client.cpython-313.pyc and b/FitnessSync/backend/src/services/garmin/__pycache__/client.cpython-313.pyc differ diff --git a/FitnessSync/backend/src/services/garmin/__pycache__/data.cpython-311.pyc b/FitnessSync/backend/src/services/garmin/__pycache__/data.cpython-311.pyc index 2f12568..37f6874 100644 Binary files a/FitnessSync/backend/src/services/garmin/__pycache__/data.cpython-311.pyc and b/FitnessSync/backend/src/services/garmin/__pycache__/data.cpython-311.pyc differ diff --git a/FitnessSync/backend/src/services/garmin/__pycache__/data.cpython-313.pyc b/FitnessSync/backend/src/services/garmin/__pycache__/data.cpython-313.pyc index 618bfca..eb0e4c7 100644 Binary files a/FitnessSync/backend/src/services/garmin/__pycache__/data.cpython-313.pyc and b/FitnessSync/backend/src/services/garmin/__pycache__/data.cpython-313.pyc differ diff --git a/FitnessSync/backend/src/services/garmin/auth.py b/FitnessSync/backend/src/services/garmin/auth.py index 4326360..87167cc 100644 --- a/FitnessSync/backend/src/services/garmin/auth.py +++ b/FitnessSync/backend/src/services/garmin/auth.py @@ -42,14 +42,20 @@ class AuthMixin: logger.debug("garth.login successful.") logger.debug("Attempting to save tokens to database...") - # If successful, garth still populates the global client? - # The return signature is tokens, but let's assume global client is also updated as usual. - # However, with return_on_mfa=True, result might be the tokens tuple. - # Let's inspect result structure if not MFA. - # To be safe, we can use global client or extract from result if it's tokens. - # But existing code uses global client. Let's trust it for now unless issues arise. + + # Sync tokens to the garminconnect client instance if it exists + if hasattr(self, 'client') and hasattr(self.client, 'garth'): + logger.debug("Syncing tokens to internal garminconnect client...") + try: + self.client.garth.oauth1_token = garth.client.oauth1_token + self.client.garth.oauth2_token = garth.client.oauth2_token + logger.debug("Internal client tokens synced.") + except Exception as sync_e: + logger.error(f"Failed to sync tokens to internal client: {sync_e}") + self.update_tokens(db, garth.client.oauth1_token, garth.client.oauth2_token) logger.debug("Tokens saved successfully.") + logger.debug("Tokens saved successfully.") self.is_connected = True logger.info("Login flow completed successfully.") diff --git a/FitnessSync/backend/src/services/garmin/client.py b/FitnessSync/backend/src/services/garmin/client.py index 5b7ba4b..b10711e 100644 --- a/FitnessSync/backend/src/services/garmin/client.py +++ b/FitnessSync/backend/src/services/garmin/client.py @@ -1,7 +1,12 @@ import garth import logging +import json +import dataclasses +from sqlalchemy.orm import Session from .auth import AuthMixin from .data import DataMixin +import garminconnect +from ...models.api_token import APIToken logger = logging.getLogger(__name__) @@ -10,9 +15,11 @@ class GarminClient(AuthMixin, DataMixin): self.username = username self.password = password self.is_china = is_china - self.garmin_client = None self.is_connected = False + # Initialize garminconnect client + self.client = garminconnect.Garmin(email=username, password=password, is_cn=is_china) + if is_china: garth.configure(domain="garmin.cn") @@ -24,8 +31,8 @@ class GarminClient(AuthMixin, DataMixin): def check_connection(self) -> bool: """Check if the connection to Garmin is still valid.""" try: - profile = self.garmin_client.get_full_name() if self.garmin_client else None - return profile is not None + # We can check if we have a display name or valid session + return self.client.get_full_name() is not None except: self.is_connected = False return False @@ -33,7 +40,65 @@ class GarminClient(AuthMixin, DataMixin): def get_profile_info(self): """Get user profile information.""" if not self.is_connected: - self.login() + # Attempt to reload tokens first if not connected + # This requires a db session which we don't have here usually. + # Ideally expected to be connected via SyncApp. + pass + if self.is_connected: - return garth.UserProfile.get() + try: + return self.client.get_user_profile() + except Exception as e: + logger.error(f"Error fetching profile: {e}") + return None return None + + def load_tokens(self, db: Session): + """Load tokens from DB and populate the garminconnect client.""" + logger.info("Attempting to load Garmin tokens from DB...") + print("DEBUG: Entering load_tokens...", flush=True) + token_record = db.query(APIToken).filter_by(token_type='garmin').first() + + if not token_record: + logger.warning("No tokens found in DB.") + print("DEBUG: No tokens found in DB.", flush=True) + return False + + try: + if not token_record.garth_oauth1_token or not token_record.garth_oauth2_token: + logger.warning("Tokens record exists but fields are empty.") + print("DEBUG: Token fields are empty.", flush=True) + return False + + logger.info("Found tokens in DB, loading into garth session...") + + # Helper for JSON loading + def load_json(data): + if isinstance(data, str): + return json.loads(data) + return data + + oauth1 = load_json(token_record.garth_oauth1_token) + oauth2 = load_json(token_record.garth_oauth2_token) + + # Populate garth session inside garminconnect client + # garminconnect exposes its internal garth client via self.client.garth + self.client.garth.oauth1_token = garth.auth_tokens.OAuth1Token(**oauth1) + self.client.garth.oauth2_token = garth.auth_tokens.OAuth2Token(**oauth2) + + # Also configure the global garth client just in case + garth.client.configure( + oauth1_token=self.client.garth.oauth1_token, + oauth2_token=self.client.garth.oauth2_token, + domain="garmin.cn" if self.is_china else "garmin.com" + ) + + self.is_connected = True + logger.info("Tokens loaded successfully. Client authenticated.") + print("DEBUG: Tokens loaded successfully via load_tokens.", flush=True) + return True + + except Exception as e: + logger.error(f"Failed to load tokens: {e}", exc_info=True) + print(f"DEBUG: Exception in load_tokens: {e}", flush=True) + return False diff --git a/FitnessSync/backend/src/services/garmin/data.py b/FitnessSync/backend/src/services/garmin/data.py index ba5ecf9..1d5bbf2 100644 --- a/FitnessSync/backend/src/services/garmin/data.py +++ b/FitnessSync/backend/src/services/garmin/data.py @@ -20,10 +20,11 @@ class DataMixin: """Fetch activity list from Garmin Connect.""" logger.info(f"Fetching activities from {start_date} to {end_date}") try: - return garth.client.connectapi( - "/activitylist-service/activities/search/activities", - params={"startDate": start_date, "endDate": end_date, "limit": limit} - ) + # garminconnect expects start and limit in get_activities_by_date + # It actually iterates internally. + # Actually get_activities_by_date takes (startdate, enddate, activitytype=None) + # If activitytype is None, it fetches all. + return self.client.get_activities_by_date(start_date, end_date) except Exception as e: logger.error(f"Error fetching activities from Garmin: {e}") raise @@ -34,150 +35,350 @@ class DataMixin: 'file_type' can be 'tcx', 'gpx', 'fit', or 'original'. """ logger.info(f"Downloading activity {activity_id} as {file_type}") + print(f"DEBUG: GarminClient.download_activity {activity_id} type={file_type}", flush=True) try: - path = f"/download-service/export/{file_type}/activity/{activity_id}" - data = garth.client.download(path) + # Map file_type to garminconnect format constants if needed, or it might take strings. + # The library uses specific constants usually but accepts strings in some versions. + # Based on docs: Garmin.ActivityDownloadFormat.TCX etc. + # We can try passing the string first as the library often handles it or we map it. + + from garminconnect import Garmin + + fmt_map = { + 'tcx': Garmin.ActivityDownloadFormat.TCX, + 'gpx': Garmin.ActivityDownloadFormat.GPX, + # 'fit': Garmin.ActivityDownloadFormat.FIT, # Not supported effectively? + 'original': Garmin.ActivityDownloadFormat.ORIGINAL, + 'csv': Garmin.ActivityDownloadFormat.CSV, + } + + # FIT not in ActivityDownloadFormat enum in this version + # Use ORIGINAL which is usually a ZIP containing the FIT file + if file_type == 'fit': + dl_fmt = Garmin.ActivityDownloadFormat.ORIGINAL + else: + dl_fmt = fmt_map.get(file_type) + + if not dl_fmt: + logger.error(f"Unknown file type: {file_type}") + print(f"DEBUG: Unknown file type {file_type}", flush=True) + return None + + data = self.client.download_activity(activity_id, dl_fmt=dl_fmt) if not data: + print("DEBUG: Download returned empty data", flush=True) return None # Validation: Check for HTML error pages masquerading as files - # HTML error pages often start with Optional[Dict[str, Any]]: + """Fetch weight history for a date range.""" + # Using the relative path supported by garth/connectapi + url = f"/weight-service/weight/dateRange?startDate={start_date}&endDate={end_date}" + logger.info(f"Fetching weight history from {start_date} to {end_date}") + try: + # self.client is GarminConnect, which has .garth property exposing the garth client + # .connectapi is a method on the garth client + return self.client.garth.connectapi(url) + except Exception as e: + logger.error(f"Error fetching weight history: {e}") + return None + + def upload_metric_weight(self, timestamp: datetime, weight_kg: float, bmi: float = None) -> bool: + """ + Upload weight and optional BMI to Garmin Connect. + timestamp: datetime object + weight_kg: float + bmi: float (optional) + """ + date_str = timestamp.strftime('%Y-%m-%d') + # Timestamp formatted as needed by Garmin (e.g. '2023-01-01T08:00:00') - library might handle just YYYY-MM-DD but timestamp is better for conflicts + # Using add_body_composition from garminconnect + # Signature: add_body_composition(self, timestamp: str | None, weight: float, ..., bmi: float | None = None) + + # NOTE: garminconnect library expects timestamp in specific format or None? + # Based on inspection, it takes timestamp string. + ts_str = timestamp.strftime('%Y-%m-%dT%H:%M:%S') + + logger.info(f"Uploading weight to Garmin: {weight_kg}kg, bmi={bmi} for {ts_str}") + print(f"DEBUG: Uploading weight to Garmin: {weight_kg}kg, bmi={bmi} for {ts_str}", flush=True) + + try: + # weight must be in kg + # The library likely handles the POST request + # We use self.client which is the Garmin object + self.client.add_body_composition( + timestamp=ts_str, + weight=weight_kg, + bmi=bmi + ) + logger.info("Upload successful.") + print("DEBUG: Upload successful.", flush=True) + return True + except Exception as e: + logger.error(f"Error uploading weight to Garmin: {e}") + print(f"DEBUG: Error uploading weight: {e}", flush=True) + return False + + def _extract_file_from_zip(self, zip_bytes: bytes) -> Optional[bytes]: + """Extract the first likely activity file (.fit, .tcx, .gpx) from a zip archive bytes.""" + import io + import zipfile + + try: + with zipfile.ZipFile(io.BytesIO(zip_bytes)) as z: + # Prioritize .fit, then .tcx, .gpx + files = z.namelist() + target_file = None + + # Look for .fit + for f in files: + if f.lower().endswith('.fit'): + target_file = f + break + + if not target_file: + for f in files: + if f.lower().endswith('.tcx'): + target_file = f + break + + if not target_file: + for f in files: + if f.lower().endswith('.gpx'): + target_file = f + break + + # Fallback: first file if only one? + if not target_file and len(files) == 1: + target_file = files[0] + + if target_file: + return z.read(target_file) + + except Exception as e: + logger.error(f"Error extracting zip: {e}") + return None + + def get_all_metrics_for_date(self, date_str: str) -> Dict[str, Any]: + """Fetch all available metrics for a single date.""" + logger.info(f"Fetching metrics for {date_str}") + metrics = { + "steps": None, + "intensity": None, + "stress": None, + "hrv": None, + "sleep": None, + "hydration": None, + "weight": [], + "body_battery": None + } + + try: + # Summary - Steps, Intensity, etc. + summary = self.client.get_user_summary(date_str) + if summary: + # Steps + metrics["steps"] = { + "calendarDate": date_str, + "totalSteps": summary.get("totalSteps"), + "totalDistanceMeters": summary.get("totalDistanceMeters"), + "stepGoal": summary.get("dailyStepGoal") + } + + # Intensity + metrics["intensity"] = { + "calendarDate": date_str, + "moderateIntensityMinutes": summary.get("moderateIntensityMinutes"), + "vigorousIntensityMinutes": summary.get("vigorousIntensityMinutes"), + "intensityGoal": summary.get("activeTimeGoal") + } + + # Stress + try: + stress_data = self.client.get_stress_data(date_str) + if stress_data: + metrics["stress"] = stress_data + except Exception as s_e: + logger.debug(f"Stress fetch failed for {date_str}: {s_e}") + + + # HRV + try: + hrv_data = self.client.get_hrv_data(date_str) + if hrv_data: + metrics["hrv"] = hrv_data + except Exception as hrv_e: + logger.debug(f"HRV fetch failed for {date_str}: {hrv_e}") + + # Sleep + try: + sleep_data = self.client.get_sleep_data(date_str) + if sleep_data: + metrics["sleep"] = sleep_data + except Exception as sl_e: + logger.debug(f"Sleep fetch failed for {date_str}: {sl_e}") + + # Hydration + try: + hydration_data = self.client.get_hydration_data(date_str) + if hydration_data: + metrics["hydration"] = hydration_data + except Exception as hy_e: + logger.debug(f"Hydration fetch failed for {date_str}: {hy_e}") + + # Weight (Body Composition) + try: + weight_data = self.client.get_body_composition(date_str) + if weight_data: + if isinstance(weight_data, dict) and 'dateWeightList' in weight_data: + metrics["weight"].extend(weight_data['dateWeightList']) + else: + metrics["weight"].append(weight_data) + except Exception as w_e: + logger.debug(f"Weight fetch failed for {date_str}: {w_e}") + + # Body Battery + try: + bb_data = self.client.get_body_battery(date_str) + if bb_data: + metrics["body_battery"] = bb_data + except Exception as bb_e: + logger.debug(f"Body Battery fetch failed for {date_str}: {bb_e}") + + except Exception as e: + logger.error(f"Error fetching daily metrics for {date_str}: {e}") + + return metrics + def get_daily_metrics(self, start_date: str, end_date: str) -> Dict[str, List[Dict]]: """ Fetch various daily metrics for a given date range. + DEPRECATED: Prefer iterating with get_all_metrics_for_date for granular control. """ start = datetime.strptime(start_date, '%Y-%m-%d').date() end = datetime.strptime(end_date, '%Y-%m-%d').date() - days = (end - start).days + 1 - - all_metrics = { - "steps": [], - "hrv": [], - "sleep": [], - "stress": [], - "intensity": [], - "hydration": [], - "weight": [], - "body_battery": [] - } - - # Steps - try: - logger.info(f"Fetching daily steps for {days} days ending on {end_date}") - all_metrics["steps"] = garth.stats.steps.DailySteps.list(end, period=days) - except Exception as e: - logger.error(f"Error fetching daily steps: {e}") - - # HRV - try: - logger.info(f"Fetching daily HRV for {days} days ending on {end_date}") - all_metrics["hrv"] = garth.stats.hrv.DailyHRV.list(end, period=days) - except Exception as e: - logger.error(f"Error fetching daily HRV: {e}") - - # Sleep - try: - logger.info(f"Fetching daily sleep for {days} days ending on {end_date}") - all_metrics["sleep"] = garth.data.sleep.SleepData.list(end, days=days) - except Exception as e: - logger.error(f"Error fetching daily sleep: {e}") - - # Stress - try: - logger.info(f"Fetching daily stress for {days} days ending on {end_date}") - all_metrics["stress"] = garth.stats.stress.DailyStress.list(end, period=days) - except Exception as e: - logger.error(f"Error fetching daily stress: {e}") - - # Intensity Minutes - try: - logger.info(f"Fetching daily intensity minutes for {days} days ending on {end_date}") - all_metrics["intensity"] = garth.stats.intensity_minutes.DailyIntensityMinutes.list(end, period=days) - except Exception as e: - logger.error(f"Error fetching daily intensity minutes: {e}") - - # Hydration - try: - logger.info(f"Fetching daily hydration for {days} days ending on {end_date}") - all_metrics["hydration"] = garth.stats.hydration.DailyHydration.list(end, period=days) - except Exception as e: - logger.error(f"Error fetching daily hydration: {e}") - # Weight - weight_success = False - try: - print(f"Fetching daily weight for {days} days ending on {end_date}", flush=True) - all_metrics["weight"] = garth.data.weight.WeightData.list(end, days=days) - print(f"Fetched {len(all_metrics['weight'])} weight records from Garmin (via garth class).", flush=True) - if len(all_metrics["weight"]) > 0: - weight_success = True - except Exception as e: - print(f"Error fetching daily weight via Garth: {e}", flush=True) + all_metrics = {k: [] for k in ["steps", "hrv", "sleep", "stress", "intensity", "hydration", "weight", "body_battery"]} + + current_date = start + while current_date <= end: + date_str = current_date.strftime('%Y-%m-%d') + day_metrics = self.get_all_metrics_for_date(date_str) - # Fallback: If Garth failed or returned 0, try Raw API - if not weight_success or len(all_metrics["weight"]) == 0: - try: - start_str = start.strftime('%Y-%m-%d') - end_str = end.strftime('%Y-%m-%d') - print(f"Attempting fallback raw weight fetch: {start_str} to {end_str}", flush=True) - - raw_weight = garth.client.connectapi( - f"/weight-service/weight/dateRange", - params={"startDate": start_str, "endDate": end_str} - ) - - raw_list = raw_weight.get('dateWeightList', []) - count = len(raw_list) - print(f"Fallback raw fetch returned {count} records.", flush=True) - - if raw_list: - print(f"Fallback successful: Found {len(raw_list)} records via raw API.", flush=True) - converted = [] - for item in raw_list: - try: - obj = SimpleNamespace() - # Weight in grams - obj.weight = item.get('weight') - - # Date handling (usually timestamps in millis for this endpoint) - d_val = item.get('date') - if isinstance(d_val, (int, float)): - # Garmin timestamps are millis - obj.calendar_date = datetime.fromtimestamp(d_val/1000).date() - elif isinstance(d_val, str): - obj.calendar_date = datetime.strptime(d_val, '%Y-%m-%d').date() - else: - # Attempt to use 'date' directly if it's already a date object (unlikely from JSON) - obj.calendar_date = d_val - - converted.append(obj) - except Exception as conv_e: - print(f"Failed to convert raw weight item: {conv_e}", flush=True) - - all_metrics["weight"] = converted - else: - print("Raw API also returned 0 records.", flush=True) - - except Exception as raw_e: - print(f"Fallback raw API fetch failed: {raw_e}", flush=True) - - # Body Battery - try: - logger.info(f"Fetching daily body battery for {days} days ending on {end_date}") - # Body Battery uses DailyBodyBatteryStress but stored in 'body_battery' naming usually? - # We use the class found: garth.data.body_battery.DailyBodyBatteryStress - all_metrics["body_battery"] = garth.data.body_battery.DailyBodyBatteryStress.list(end, period=days) - except Exception as e: - logger.error(f"Error fetching daily body battery: {e}") + for key, val in day_metrics.items(): + if val: + if isinstance(val, list): + all_metrics[key].extend(val) + else: + all_metrics[key].append(val) + + current_date += timedelta(days=1) return all_metrics + + def get_metric_data(self, date_str: str, metric_type: str) -> Optional[Any]: + """ + Fetch specific metric data for a single date. + """ + try: + if metric_type == 'steps': + summary = self.client.get_user_summary(date_str) + if summary: + return { + "calendarDate": date_str, + "totalSteps": summary.get("totalSteps"), + "totalDistanceMeters": summary.get("totalDistanceMeters"), + "stepGoal": summary.get("dailyStepGoal") + } + + elif metric_type == 'intensity': + summary = self.client.get_user_summary(date_str) + if summary: + return { + "calendarDate": date_str, + "moderateIntensityMinutes": summary.get("moderateIntensityMinutes"), + "vigorousIntensityMinutes": summary.get("vigorousIntensityMinutes"), + "intensityGoal": summary.get("activeTimeGoal") + } + + elif metric_type == 'stress': + return self.client.get_stress_data(date_str) + + elif metric_type == 'hrv': + return self.client.get_hrv_data(date_str) + + elif metric_type == 'sleep': + return self.client.get_sleep_data(date_str) + + elif metric_type == 'hydration': + return self.client.get_hydration_data(date_str) + + elif metric_type == 'weight': + data = self.client.get_body_composition(date_str) + # Normalize weight return if needed + return data + + elif metric_type == 'body_battery': + return self.client.get_body_battery(date_str) + + elif metric_type == 'respiration': + return self.client.get_respiration_data(date_str) + + elif metric_type == 'spo2': + return self.client.get_spo2_data(date_str) + + elif metric_type == 'floors': + # Floors are part of the daily summary usually + summary = self.client.get_user_summary(date_str) + if summary: + return { + "calendarDate": date_str, + "floorsClimbed": summary.get("floorsClimbed"), + "floorsDescended": summary.get("floorsDescended"), + "floorsGoal": summary.get("floorsClimbedGoal") + } + + elif metric_type == 'sleep_score': + # Sleep score is inside sleep data + return self.client.get_sleep_data(date_str) + + elif metric_type == 'vo2_max': + # VO2 Max is in max metrics or training status + return self.client.get_max_metrics(date_str) + + else: + logger.warning(f"Unknown metric type: {metric_type}") + return None + + except Exception as e: + logger.error(f"Error fetching {metric_type} for {date_str}: {e}") + return None diff --git a/FitnessSync/backend/src/services/garth_helper.py b/FitnessSync/backend/src/services/garth_helper.py new file mode 100644 index 0000000..28184af --- /dev/null +++ b/FitnessSync/backend/src/services/garth_helper.py @@ -0,0 +1,35 @@ + +import logging +import json +import garth +from fastapi import HTTPException +from sqlalchemy.orm import Session +from ..models.api_token import APIToken +from garth.auth_tokens import OAuth1Token, OAuth2Token + +logger = logging.getLogger(__name__) + +def load_and_verify_garth_session(db: Session): + """Helper to load token from DB and verify session with Garmin.""" + logger.info("Loading and verifying Garmin session...") + token_record = db.query(APIToken).filter_by(token_type='garmin').first() + if not (token_record and token_record.garth_oauth1_token and token_record.garth_oauth2_token): + logger.warning("Garmin token not found in DB.") + raise Exception("Garmin token not found.") + + try: + oauth1_dict = json.loads(token_record.garth_oauth1_token) + oauth2_dict = json.loads(token_record.garth_oauth2_token) + + domain = oauth1_dict.get('domain') + if domain: + garth.configure(domain=domain) + + garth.client.oauth1_token = OAuth1Token(**oauth1_dict) + garth.client.oauth2_token = OAuth2Token(**oauth2_dict) + + garth.UserProfile.get() + logger.info("Garth session verified.") + except Exception as e: + logger.error(f"Garth session verification failed: {e}", exc_info=True) + raise Exception(f"Failed to authenticate with Garmin: {e}") diff --git a/FitnessSync/backend/src/services/job_manager.py b/FitnessSync/backend/src/services/job_manager.py index c812be5..e7914ef 100644 --- a/FitnessSync/backend/src/services/job_manager.py +++ b/FitnessSync/backend/src/services/job_manager.py @@ -1,7 +1,15 @@ import uuid import logging from typing import Dict, Optional, List -from datetime import datetime +from datetime import datetime, timedelta +import threading +import json +from sqlalchemy.orm import Session +from sqlalchemy import desc + +from ..services.postgresql_manager import PostgreSQLManager +from ..utils.config import config +from ..models.job import Job logger = logging.getLogger(__name__) @@ -11,52 +19,205 @@ class JobManager: def __new__(cls): if cls._instance is None: cls._instance = super(JobManager, cls).__new__(cls) - cls._instance.active_jobs = {} + cls._instance.db_manager = PostgreSQLManager(config.DATABASE_URL) + # We still keep active_jobs in memory for simple locking/status + # But the detailed state and history will be DB backed + cls._instance.job_lock = threading.Lock() return cls._instance + def _get_db(self): + return self.db_manager.get_db_session() + + def run_serialized(self, job_id: str, func, *args, **kwargs): + """Run a function with a global lock to ensure serial execution.""" + if self.should_cancel(job_id): + self.update_job(job_id, status="cancelled", message="Cancelled before start") + return + + self.update_job(job_id, message="Queued (Waiting for lock)...") + + with self.job_lock: + if self.should_cancel(job_id): + self.update_job(job_id, status="cancelled", message="Cancelled while queued") + return + + self.update_job(job_id, message="Starting...") + try: + func(job_id, *args, **kwargs) + except Exception as e: + logger.error(f"Error in serialized job {job_id}: {e}") + self.fail_job(job_id, str(e)) + + def request_pause(self, job_id: str) -> bool: + with self._get_db() as db: + job = db.query(Job).filter(Job.id == job_id).first() + if job and job.status == 'running': + job.paused = True + job.status = 'paused' + job.message = "Paused..." + db.commit() + return True + return False + + def resume_job(self, job_id: str) -> bool: + with self._get_db() as db: + job = db.query(Job).filter(Job.id == job_id).first() + if job and job.paused: + job.paused = False + job.status = 'running' + job.message = "Resuming..." + db.commit() + return True + return False + + def should_pause(self, job_id: str) -> bool: + with self._get_db() as db: + job = db.query(Job).filter(Job.id == job_id).first() + return job.paused if job else False + def create_job(self, operation: str) -> str: job_id = str(uuid.uuid4()) - self.active_jobs[job_id] = { - "id": job_id, - "operation": operation, - "status": "running", - "cancel_requested": False, - "start_time": datetime.now(), - "progress": 0, - "message": "Starting..." - } + new_job = Job( + id=job_id, + operation=operation, + status="running", + start_time=datetime.now(), + progress=0, + message="Starting..." + ) + + with self._get_db() as db: + db.add(new_job) + db.commit() + logger.info(f"Created job {job_id} for {operation}") return job_id + def _cleanup_jobs(self): + """Delete jobs older than 30 days.""" + try: + with self._get_db() as db: + cutoff = datetime.now() - timedelta(days=30) + db.query(Job).filter(Job.start_time < cutoff).delete() + db.commit() + except Exception as e: + logger.error(f"Error cleaning up jobs: {e}") + + def get_job_history(self, limit: int = 10, offset: int = 0) -> Dict: + # self._cleanup_jobs() # Optional: Run periodically, running on every fetch is okay-ish but maybe expensive? + # Let's run it async or less frequently? For now, run it here is safe. + self._cleanup_jobs() + + with self._get_db() as db: + # Sort desc by start_time + query = db.query(Job).order_by(desc(Job.start_time)) + total = query.count() + jobs = query.offset(offset).limit(limit).all() + + # Convert to dict + items = [] + for j in jobs: + items.append({ + "id": j.id, + "operation": j.operation, + "status": j.status, + "start_time": j.start_time.isoformat() if j.start_time else None, + "end_time": j.end_time.isoformat() if j.end_time else None, + "completed_at": j.end_time.isoformat() if j.end_time else None, # Compatibility + "duration_s": round((j.end_time - j.start_time).total_seconds(), 2) if j.end_time and j.start_time else None, + "progress": j.progress, + "message": j.message, + "result": j.result + }) + + return {"total": total, "items": items} + def get_job(self, job_id: str) -> Optional[Dict]: - return self.active_jobs.get(job_id) + with self._get_db() as db: + j = db.query(Job).filter(Job.id == job_id).first() + if j: + return { + "id": j.id, + "operation": j.operation, + "status": j.status, + "start_time": j.start_time, + "progress": j.progress, + "message": j.message, + "paused": j.paused, + "cancel_requested": j.cancel_requested + } + return None def get_active_jobs(self) -> List[Dict]: - return list(self.active_jobs.values()) + with self._get_db() as db: + active_jobs = db.query(Job).filter(Job.status.in_(['running', 'queued', 'paused'])).all() + return [{ + "id": j.id, + "operation": j.operation, + "status": j.status, + "start_time": j.start_time, + "progress": j.progress, + "message": j.message, + "paused": j.paused, + "cancel_requested": j.cancel_requested + } for j in active_jobs] def update_job(self, job_id: str, status: str = None, progress: int = None, message: str = None): - if job_id in self.active_jobs: - if status: - self.active_jobs[job_id]["status"] = status - if progress is not None: - self.active_jobs[job_id]["progress"] = progress - if message: - self.active_jobs[job_id]["message"] = message + with self._get_db() as db: + job = db.query(Job).filter(Job.id == job_id).first() + if job: + if status: + job.status = status + if status in ["completed", "failed", "cancelled"] and not job.end_time: + job.end_time = datetime.now() + if progress is not None: + job.progress = progress + if message: + job.message = message + db.commit() def request_cancel(self, job_id: str) -> bool: - if job_id in self.active_jobs: - self.active_jobs[job_id]["cancel_requested"] = True - self.active_jobs[job_id]["message"] = "Cancelling..." - logger.info(f"Cancellation requested for job {job_id}") - return True + with self._get_db() as db: + job = db.query(Job).filter(Job.id == job_id).first() + if job and job.status in ['running', 'queued', 'paused']: + # If paused, it's effectively stopped, so cancel immediately + if job.status == 'paused': + job.status = 'cancelled' + job.message = "Cancelled (while paused)" + job.end_time = datetime.now() + else: + job.cancel_requested = True + job.message = "Cancelling..." + + db.commit() + logger.info(f"Cancellation requested for job {job_id}") + return True return False def should_cancel(self, job_id: str) -> bool: - job = self.active_jobs.get(job_id) - return job and job.get("cancel_requested", False) + with self._get_db() as db: + job = db.query(Job).filter(Job.id == job_id).first() + return job.cancel_requested if job else False - def complete_job(self, job_id: str): - if job_id in self.active_jobs: - del self.active_jobs[job_id] + def complete_job(self, job_id: str, result: Dict = None): + with self._get_db() as db: + job = db.query(Job).filter(Job.id == job_id).first() + if job: + job.status = "completed" + job.progress = 100 + job.message = "Completed" + job.end_time = datetime.now() + if result: + job.result = result + db.commit() + + def fail_job(self, job_id: str, error: str): + with self._get_db() as db: + job = db.query(Job).filter(Job.id == job_id).first() + if job: + job.status = "failed" + job.message = error + job.end_time = datetime.now() + db.commit() job_manager = JobManager() diff --git a/FitnessSync/backend/src/services/postgresql_manager.py b/FitnessSync/backend/src/services/postgresql_manager.py index 6184252..6381cb2 100644 --- a/FitnessSync/backend/src/services/postgresql_manager.py +++ b/FitnessSync/backend/src/services/postgresql_manager.py @@ -6,7 +6,8 @@ import os from contextlib import contextmanager # Create a base class for declarative models -Base = declarative_base() +# Import Base from the models package to ensure we share the same metadata +from ..models.base import Base class PostgreSQLManager: def __init__(self, database_url: str = None): @@ -33,6 +34,8 @@ class PostgreSQLManager: from ..models.activity import Activity from ..models.health_metric import HealthMetric from ..models.sync_log import SyncLog + from ..models.activity_state import GarminActivityState + from ..models.health_state import HealthSyncState # Create all tables Base.metadata.create_all(bind=self.engine) diff --git a/FitnessSync/backend/src/services/scheduler.py b/FitnessSync/backend/src/services/scheduler.py new file mode 100644 index 0000000..fd7eec3 --- /dev/null +++ b/FitnessSync/backend/src/services/scheduler.py @@ -0,0 +1,161 @@ + +import threading +import time +import json +import logging +from datetime import datetime, timedelta +from sqlalchemy import or_ +from ..services.postgresql_manager import PostgreSQLManager +from ..models.scheduled_job import ScheduledJob +from ..services.job_manager import job_manager +from ..utils.config import config +from ..tasks.definitions import ( + run_activity_sync_task, + run_metrics_sync_task, + run_health_scan_job, + run_fitbit_sync_job, + run_garmin_upload_job, + run_health_sync_job, + run_activity_backfill_job +) + +logger = logging.getLogger(__name__) + +class SchedulerService: + def __init__(self): + self._stop_event = threading.Event() + self._thread = None + self.db_manager = PostgreSQLManager(config.DATABASE_URL) + + # Map job_type string to (function, default_params) + self.TASK_MAP = { + 'activity_sync': run_activity_sync_task, + 'metrics_sync': run_metrics_sync_task, + 'health_scan': run_health_scan_job, + 'fitbit_weight_sync': run_fitbit_sync_job, + 'garmin_weight_upload': run_garmin_upload_job, + 'health_sync_pending': run_health_sync_job, + 'activity_backfill_full': run_activity_backfill_job + } + + def start(self): + """Start the scheduler background thread.""" + if self._thread and self._thread.is_alive(): + return + logger.info("Starting Scheduler Service...") + self.ensure_defaults() + self._stop_event.clear() + self._thread = threading.Thread(target=self._run_loop, daemon=True) + self._thread.start() + + def stop(self): + """Stop the scheduler.""" + logger.info("Stopping Scheduler Service...") + self._stop_event.set() + if self._thread: + self._thread.join(timeout=5) + + def ensure_defaults(self): + """Ensure default schedules exist.""" + with self.db_manager.get_db_session() as session: + try: + # Default 1: Fitbit Weight Sync (30 days) every 6 hours + job_type = 'fitbit_weight_sync' + name = 'Fitbit Weight Sync (30d)' + + existing = session.query(ScheduledJob).filter_by(job_type=job_type).first() + if not existing: + logger.info(f"Creating default schedule: {name}") + new_job = ScheduledJob( + job_type=job_type, + name=name, + interval_minutes=360, # 6 hours + params=json.dumps({"days_back": 30}), + enabled=True + ) + session.add(new_job) + session.commit() + except Exception as e: + logger.error(f"Error checking default schedules: {e}") + + def _run_loop(self): + logger.info("Scheduler loop started.") + while not self._stop_event.is_set(): + try: + self._check_and_run_jobs() + except Exception as e: + logger.error(f"Error in scheduler loop: {e}", exc_info=True) + + # Sleep for 60 seconds, checking for stop event + if self._stop_event.wait(60): + break + logger.info("Scheduler loop exited.") + + def _check_and_run_jobs(self): + with self.db_manager.get_db_session() as session: + now = datetime.now() + + # Find due jobs + # due if enabled AND (next_run <= now OR next_run is NULL) + # also, if last_run is NULL, we might want to run immediately or schedule for later? + # Let's run immediately if next_run is NULL (freshly created). + + jobs = session.query(ScheduledJob).filter( + ScheduledJob.enabled == True, + or_( + ScheduledJob.next_run <= now, + ScheduledJob.next_run == None + ) + ).all() + + for job in jobs: + # Double check locking? For now, simple single-instance app is assumed. + # If we had multiple workers, we'd need 'FOR UPDATE SKIP LOCKED' or similar. + + self._execute_job(session, job) + + session.commit() + + def _execute_job(self, session, job_record): + logger.info(f"Executing scheduled job: {job_record.name} ({job_record.job_type})") + + task_func = self.TASK_MAP.get(job_record.job_type) + if not task_func: + logger.error(f"Unknown job type: {job_record.job_type}") + # Disable to prevent spam loop? + # job_record.enabled = False + return + + # Parse params + params = {} + if job_record.params: + try: + params = json.loads(job_record.params) + except: + logger.error(f"Invalid params for job {job_record.id}") + + # Create Job via Manager + job_id = job_manager.create_job(f"{job_record.name} (Scheduled)") + + # Launch task in thread (don't block scheduler loop) + # We pass self.db_manager.get_db_session factory + # Note: We must duplicate the factory access b/c `run_*` definitions use `with factory() as db:` + # passing self.db_manager.get_db_session is correct. + + t = threading.Thread( + target=task_func, + kwargs={ + "job_id": job_id, + "db_session_factory": self.db_manager.get_db_session, + **params + } + ) + t.start() + + # Update next_run + job_record.last_run = datetime.now() + job_record.next_run = datetime.now() + timedelta(minutes=job_record.interval_minutes) + # session commit happens in caller loop + +# Global instance +scheduler = SchedulerService() diff --git a/FitnessSync/backend/src/services/sync/__init__.py b/FitnessSync/backend/src/services/sync/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/FitnessSync/backend/src/services/sync/__pycache__/__init__.cpython-311.pyc b/FitnessSync/backend/src/services/sync/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000..6cd416f Binary files /dev/null and b/FitnessSync/backend/src/services/sync/__pycache__/__init__.cpython-311.pyc differ diff --git a/FitnessSync/backend/src/services/sync/__pycache__/__init__.cpython-313.pyc b/FitnessSync/backend/src/services/sync/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000..56a78c9 Binary files /dev/null and b/FitnessSync/backend/src/services/sync/__pycache__/__init__.cpython-313.pyc differ diff --git a/FitnessSync/backend/src/services/sync/__pycache__/activity.cpython-311.pyc b/FitnessSync/backend/src/services/sync/__pycache__/activity.cpython-311.pyc new file mode 100644 index 0000000..f21d257 Binary files /dev/null and b/FitnessSync/backend/src/services/sync/__pycache__/activity.cpython-311.pyc differ diff --git a/FitnessSync/backend/src/services/sync/__pycache__/activity.cpython-313.pyc b/FitnessSync/backend/src/services/sync/__pycache__/activity.cpython-313.pyc new file mode 100644 index 0000000..14114f2 Binary files /dev/null and b/FitnessSync/backend/src/services/sync/__pycache__/activity.cpython-313.pyc differ diff --git a/FitnessSync/backend/src/services/sync/__pycache__/health.cpython-311.pyc b/FitnessSync/backend/src/services/sync/__pycache__/health.cpython-311.pyc new file mode 100644 index 0000000..033b423 Binary files /dev/null and b/FitnessSync/backend/src/services/sync/__pycache__/health.cpython-311.pyc differ diff --git a/FitnessSync/backend/src/services/sync/__pycache__/health.cpython-313.pyc b/FitnessSync/backend/src/services/sync/__pycache__/health.cpython-313.pyc new file mode 100644 index 0000000..aad0af7 Binary files /dev/null and b/FitnessSync/backend/src/services/sync/__pycache__/health.cpython-313.pyc differ diff --git a/FitnessSync/backend/src/services/sync/__pycache__/utils.cpython-311.pyc b/FitnessSync/backend/src/services/sync/__pycache__/utils.cpython-311.pyc new file mode 100644 index 0000000..3410252 Binary files /dev/null and b/FitnessSync/backend/src/services/sync/__pycache__/utils.cpython-311.pyc differ diff --git a/FitnessSync/backend/src/services/sync/__pycache__/utils.cpython-313.pyc b/FitnessSync/backend/src/services/sync/__pycache__/utils.cpython-313.pyc new file mode 100644 index 0000000..14af451 Binary files /dev/null and b/FitnessSync/backend/src/services/sync/__pycache__/utils.cpython-313.pyc differ diff --git a/FitnessSync/backend/src/services/sync/__pycache__/weight.cpython-311.pyc b/FitnessSync/backend/src/services/sync/__pycache__/weight.cpython-311.pyc new file mode 100644 index 0000000..08bea3a Binary files /dev/null and b/FitnessSync/backend/src/services/sync/__pycache__/weight.cpython-311.pyc differ diff --git a/FitnessSync/backend/src/services/sync/__pycache__/weight.cpython-313.pyc b/FitnessSync/backend/src/services/sync/__pycache__/weight.cpython-313.pyc new file mode 100644 index 0000000..9d06e94 Binary files /dev/null and b/FitnessSync/backend/src/services/sync/__pycache__/weight.cpython-313.pyc differ diff --git a/FitnessSync/backend/src/services/sync/activity.py b/FitnessSync/backend/src/services/sync/activity.py new file mode 100644 index 0000000..bb737b7 --- /dev/null +++ b/FitnessSync/backend/src/services/sync/activity.py @@ -0,0 +1,305 @@ +import logging +from datetime import datetime, timedelta +from typing import Dict, Any, Optional + +from sqlalchemy import func +from sqlalchemy.orm import Session + +from ...models.activity import Activity +from ...models.activity_state import GarminActivityState +from ...models.sync_log import SyncLog +from ...services.garmin.client import GarminClient +from ...services.job_manager import job_manager + +logger = logging.getLogger(__name__) + +class GarminActivitySync: + def __init__(self, db_session: Session, garmin_client: GarminClient): + self.db_session = db_session + self.garmin_client = garmin_client + self.logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}") + + def _check_pause(self, job_id: str) -> bool: + """ + Checks if job is paused. Blocks if paused until resumed/cancelled. + Returns False if job is cancelled, True otherwise. + """ + if not job_id: return True + + # Initial check to avoid import overhead if not paused + if job_manager.should_pause(job_id): + self.logger.info(f"Job {job_id} paused. Waiting...") + import time + while job_manager.should_pause(job_id): + if job_manager.should_cancel(job_id): + self.logger.info(f"Job {job_id} cancelled while paused.") + return False + time.sleep(1) + self.logger.info(f"Job {job_id} resumed.") + + # Also check cancel here for convenience + return not job_manager.should_cancel(job_id) + + def scan_activities(self, days_back: int = 30, job_id: str = None) -> Dict[str, int]: + """ + Fetches metadata from Garmin and updates GarminActivityState table. + Does NOT download activity files. + """ + start_date = (datetime.now() - timedelta(days=days_back)).strftime('%Y-%m-%d') + end_date = datetime.now().strftime('%Y-%m-%d') + + self.logger.info(f"Scanning activities from {start_date} to {end_date}") + if job_id: + job_manager.update_job(job_id, message=f"Fetching activities list ({days_back} days)...") + + try: + garmin_activities = self.garmin_client.get_activities(start_date, end_date) + self.logger.info(f"Fetched {len(garmin_activities)} activities from Garmin for scanning.") + except Exception as e: + self.logger.error(f"Error fetching activities for scan: {e}") + raise + + stats = {'new': 0, 'updated': 0, 'synced': 0, 'total': len(garmin_activities)} + total_count = len(garmin_activities) + + if job_id: + job_manager.update_job(job_id, message=f"Processing {total_count} activities...", progress=0) + + for idx, activity_data in enumerate(garmin_activities): + if job_id: + # Check for pause/cancel + if not self._check_pause(job_id): + return stats + + # Update progress every 10 items or so to avoid spamming DB? + # Or just every item since it's fast? DB write is fast. + if idx % 5 == 0: + progress = int((idx / total_count) * 100) + job_manager.update_job(job_id, progress=progress, message=f"Scanning {idx}/{total_count}") + + activity_id = str(activity_data.get('activityId')) + if not activity_id: continue + + # Check actual download status + existing_main = self.db_session.query(Activity).filter_by(garmin_activity_id=activity_id).first() + is_downloaded = existing_main and existing_main.download_status == 'downloaded' and existing_main.file_content is not None + + # Determine correct status + current_status = 'synced' if is_downloaded else 'new' + + # Check State Table + state = self.db_session.query(GarminActivityState).filter_by(garmin_activity_id=activity_id).first() + + if not state: + # Insert new state + activity_type = activity_data.get('activityType', {}).get('typeKey', 'unknown') + start_time_str = activity_data.get('startTimeLocal') + start_time = datetime.fromisoformat(start_time_str) if start_time_str else None + + state = GarminActivityState( + garmin_activity_id=activity_id, + activity_name=activity_data.get('activityName'), + activity_type=activity_type, + start_time=start_time, + sync_status=current_status + ) + self.db_session.add(state) + if current_status == 'new': stats['new'] += 1 + else: stats['synced'] += 1 + else: + # Update existing state + if state.sync_status != 'synced' and is_downloaded: + state.sync_status = 'synced' + stats['synced'] += 1 # Transitioned to synced + elif state.sync_status == 'synced' and not is_downloaded: + state.sync_status = 'new' + stats['new'] += 1 # Regression? or manual deletion + + # Update last seen + state.last_seen = datetime.now() + + # Backfill missing duration/metrics on main Activity record if available + if existing_main: + modified = False + if existing_main.duration is None and activity_data.get('duration'): + existing_main.duration = activity_data.get('duration') + modified = True + + if existing_main.distance is None and activity_data.get('distanceMeters'): + existing_main.distance = activity_data.get('distanceMeters') + modified = True + + if existing_main.avg_hr is None and activity_data.get('averageHR'): + existing_main.avg_hr = activity_data.get('averageHR') + modified = True + + if existing_main.avg_speed is None and activity_data.get('averageSpeed'): + existing_main.avg_speed = activity_data.get('averageSpeed') + modified = True + + if existing_main.avg_cadence is None: + # Try various cadence keys + cadence = ( + activity_data.get('averageRunningCadenceInStepsPerMinute') or + activity_data.get('averageBikingCadenceInRevPerMinute') or + activity_data.get('averageSwimCadenceInStrokesPerMinute') + ) + if cadence: + existing_main.avg_cadence = cadence + modified = True + + if modified: + stats['updated'] += 1 + + self.db_session.commit() + self.logger.info(f"Scan complete: {stats}") + return stats + + def sync_pending_activities(self, limit: int = None, job_id: str = None) -> Dict[str, int]: + """ + Syncs activities marked as 'new' or 'updated' in GarminActivityState. + """ + query = self.db_session.query(GarminActivityState).filter( + GarminActivityState.sync_status.in_(['new', 'updated']) + ).order_by(GarminActivityState.start_time.desc()) + + if limit: + query = query.limit(limit) + + pending_activities = query.all() + total_count = len(pending_activities) + processed_count = 0 + failed_count = 0 + + self.logger.info(f"Found {total_count} pending activities to sync (limit={limit})") + + if job_id: + job_manager.update_job(job_id, message=f"Starting sync for {total_count} activities...", progress=0) + + for idx, state in enumerate(pending_activities): + # Check for cancellation/pause + if job_id and not self._check_pause(job_id): + self.logger.info("Sync pending activities cancelled by user.") + break + + if job_id: + progress = int((idx / total_count) * 100) + job_manager.update_job(job_id, message=f"Syncing {idx+1}/{total_count}: {state.activity_name or state.garmin_activity_id}", progress=progress) + + try: + # 1. Ensure Activity record exists + activity = self.db_session.query(Activity).filter_by(garmin_activity_id=state.garmin_activity_id).first() + if not activity: + activity = Activity( + garmin_activity_id=state.garmin_activity_id, + activity_name=state.activity_name, + activity_type=state.activity_type, + start_time=state.start_time, + download_status='pending' + ) + self.db_session.add(activity) + + # Fetch full metadata to populate metrics (since state lacks them) + try: + # Accessing internal client for direct call - TODO: Add wrapper to GarminClient + full_details = self.garmin_client.client.get_activity(state.garmin_activity_id) + if full_details: + self._update_activity_metrics(activity, full_details) + except Exception as meta_e: + self.logger.warning(f"Failed to fetch metadata for {state.garmin_activity_id}: {meta_e}") + + self.db_session.flush() + + # 2. Download content (reuse redownload logic) + success = self.redownload_activity(state.garmin_activity_id) + + if success: + state.sync_status = 'synced' + state.last_seen = datetime.now() + processed_count += 1 + else: + failed_count += 1 + + self.db_session.commit() + + except Exception as e: + self.logger.error(f"Error syncing pending activity {state.garmin_activity_id}: {e}", exc_info=True) + failed_count += 1 + self.db_session.rollback() + + if job_id: + job_manager.complete_job(job_id) + + return {"processed": processed_count, "failed": failed_count} + + def redownload_activity(self, activity_id: str) -> bool: + """ + Force re-download of an activity file from Garmin. + """ + self.logger.info(f"Redownloading activity {activity_id}...") + try: + # Find the activity + activity = self.db_session.query(Activity).filter_by(garmin_activity_id=activity_id).first() + if not activity: + self.logger.error(f"Activity {activity_id} not found locally.") + return False + + # Attempt download with fallback order + downloaded = False + for fmt in ['fit', 'original', 'tcx', 'gpx']: + file_content = self.garmin_client.download_activity(activity_id, file_type=fmt) + if file_content: + activity.file_content = file_content + activity.file_type = fmt + activity.download_status = 'downloaded' + activity.downloaded_at = datetime.now() + self.logger.info(f"✓ Successfully redownloaded {activity_id} as {fmt}") + downloaded = True + break + + if not downloaded: + self.logger.warning(f"Failed to redownload {activity_id}") + return False + + self.db_session.commit() + return True + + except Exception as e: + self.logger.error(f"Error redownloading activity {activity_id}: {e}", exc_info=True) + self.db_session.rollback() + return False + + def _update_activity_metrics(self, activity: Activity, data: Dict[str, Any]): + """Populate extended metrics from Garmin JSON.""" + # Duration override if available (sometimes different from file/header) + if data.get('duration'): + activity.duration = data.get('duration') + + activity.distance = data.get('distanceMeters') + activity.calories = data.get('calories') + activity.avg_hr = data.get('averageHR') + activity.max_hr = data.get('maxHR') + activity.avg_speed = data.get('averageSpeed') + activity.max_speed = data.get('maxSpeed') + activity.elevation_gain = data.get('totalElevationGain') + activity.elevation_loss = data.get('totalElevationLoss') + activity.steps = data.get('steps') + activity.aerobic_te = data.get('trainingEffect') + activity.anaerobic_te = data.get('anaerobicTrainingEffect') + activity.vo2_max = data.get('vO2MaxValue') + activity.avg_power = data.get('avgPower') + activity.max_power = data.get('maxPower') + activity.norm_power = data.get('normalizedPower') + activity.tss = data.get('trainingStressScore') + + # Cadence handling (try various sport-specific keys) + activity.avg_cadence = ( + data.get('averageRunningCadenceInStepsPerMinute') or + data.get('averageBikingCadenceInRevPerMinute') or + data.get('averageSwimCadenceInStrokesPerMinute') + ) + activity.max_cadence = ( + data.get('maxRunningCadenceInStepsPerMinute') or + data.get('maxBikingCadenceInRevPerMinute') or + data.get('maxSwimCadenceInStrokesPerMinute') + ) diff --git a/FitnessSync/backend/src/services/sync/health.py b/FitnessSync/backend/src/services/sync/health.py new file mode 100644 index 0000000..06a6521 --- /dev/null +++ b/FitnessSync/backend/src/services/sync/health.py @@ -0,0 +1,392 @@ +import json +import logging +from datetime import datetime, timedelta +from typing import Dict, Any, List, Optional + +from sqlalchemy.orm import Session + +from ...models.health_metric import HealthMetric +from ...models.health_state import HealthSyncState +from ...models.sync_log import SyncLog +from ...services.garmin.client import GarminClient +from ...services.job_manager import job_manager +from .utils import update_or_create_health_metric + +logger = logging.getLogger(__name__) + +class GarminHealthSync: + def __init__(self, db_session: Session, garmin_client: GarminClient): + self.db_session = db_session + self.garmin_client = garmin_client + self.logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}") + + def _check_pause(self, job_id: str) -> bool: + """ + Checks if job is paused. Blocks if paused until resumed/cancelled. + Returns False if job is cancelled, True otherwise. + """ + if not job_id: return True + + if job_manager.should_pause(job_id): + self.logger.info(f"Job {job_id} paused. Waiting...") + import time + while job_manager.should_pause(job_id): + if job_manager.should_cancel(job_id): + self.logger.info(f"Job {job_id} cancelled while paused.") + return False + time.sleep(1) + self.logger.info(f"Job {job_id} resumed.") + + return not job_manager.should_cancel(job_id) + + def scan_health_metrics(self, days_back: int = 30) -> Dict[str, int]: + """ + Scans for gaps in health metrics for the last 'days_back' days. + Populates HealthSyncState with 'new' for missing dates. + Does NOT contact Garmin API (gap analysis only). + """ + self.logger.info(f"Scanning health metrics gaps for last {days_back} days") + + # Excluded 'weight' as it is handled by WeightSyncService + metrics = ['steps', 'hrv', 'sleep', 'stress', 'intensity', 'hydration', 'body_battery', + 'respiration', 'spo2', 'floors', 'sleep_score', 'vo2_max'] + stats = {m: 0 for m in metrics} + + today = datetime.now().date() + min_date = today - timedelta(days=days_back) + + for metric in metrics: + # Find last synced date for this metric + last_record = self.db_session.query(HealthMetric).filter_by( + metric_type=metric, + source='garmin' + ).order_by(HealthMetric.date.desc()).first() + + # Start form min_date or day after last record + start_scan = min_date + if last_record: + last_date = last_record.date + if isinstance(last_date, datetime): + last_date = last_date.date() + + if last_date >= min_date: + start_scan = last_date + timedelta(days=1) + + current = start_scan + while current < today: # Sync up to yesterday for completed days + # Check if state exists + state = self.db_session.query(HealthSyncState).filter_by( + date=current, metric_type=metric, source='garmin' + ).first() + + if not state: + state = HealthSyncState( + date=current, + metric_type=metric, + source='garmin', + sync_status='new' + ) + self.db_session.add(state) + stats[metric] += 1 + elif state.sync_status == 'synced': + state.sync_status = 'new' + stats[metric] += 1 + + current += timedelta(days=1) + + # Always check "today" + state_today = self.db_session.query(HealthSyncState).filter_by( + date=today, metric_type=metric, source='garmin' + ).first() + if not state_today: + state_today = HealthSyncState(date=today, metric_type=metric, source='garmin', sync_status='new') + self.db_session.add(state_today) + stats[metric] += 1 + else: + if state_today.sync_status == 'synced': + state_today.sync_status = 'updated' + stats[metric] += 1 + + self.db_session.commit() + self.logger.info(f"Health Scan Complete: found gaps {stats}") + return stats + + def sync_health_metrics(self, days_back: int = 30, job_id: str = None) -> Dict[str, int]: + """Sync health metrics from Garmin to local database.""" + start = (datetime.now() - timedelta(days=days_back)).date() + end = datetime.now().date() + + self.logger.info(f"=== Starting sync_health_metrics with days_back={days_back} ===") + sync_log = SyncLog(operation="health_metric_sync", status="started", start_time=datetime.now()) + self.db_session.add(sync_log) + self.db_session.commit() + + processed_count = 0 + failed_count = 0 + + # Excluded weight + metrics_breakdown = { + 'steps': {'new': 0, 'updated': 0}, 'hrv': {'new': 0, 'updated': 0}, + 'sleep': {'new': 0, 'updated': 0}, 'stress': {'new': 0, 'updated': 0}, + 'intensity': {'new': 0, 'updated': 0}, 'hydration': {'new': 0, 'updated': 0}, + 'body_battery': {'new': 0, 'updated': 0} + } + + stats_counters = {k: {"total": 0, "synced": 0} for k in metrics_breakdown.keys()} + stats_counters["floors"] = {"total": 0, "synced": 0} + stats_counters["spo2"] = {"total": 0, "synced": 0} + stats_counters["respiration"] = {"total": 0, "synced": 0} + stats_counters["sleep_score"] = {"total": 0, "synced": 0} + stats_counters["vo2_max"] = {"total": 0, "synced": 0} + + try: + total_days = (end - start).days + 1 + current_date = start + days_processed = 0 + + while current_date <= end: + # Check cancellation/pause + if job_id and not self._check_pause(job_id): + self.logger.info("Sync cancelled by user.") + sync_log.status = "cancelled" + sync_log.message = "Cancelled by user" + break + + date_str = current_date.strftime('%Y-%m-%d') + + if job_id: + progress = int((days_processed / total_days) * 100) + job_manager.update_job(job_id, message=f"Syncing metrics for {date_str}", progress=progress) + + # Fetch ALL metrics for this single day + day_metrics = self.garmin_client.get_all_metrics_for_date(date_str) + + # Check cancellation again after network call + if job_id and not self._check_pause(job_id): + sync_log.status = "cancelled" + break + + # Process specific metrics from the dict + self._process_day_metrics_dict(current_date, day_metrics, metrics_breakdown, stats_counters) + + current_date += timedelta(days=1) + days_processed += 1 + + # Optional: Sleep to prevent rate limit issues + # import time; time.sleep(0.5) + + if sync_log.status != "cancelled": + sync_log.status = "completed_with_errors" if failed_count > 0 else "completed" + processed_count = sum(v['synced'] for v in stats_counters.values()) + sync_log.records_processed = processed_count + sync_log.records_failed = failed_count + + # Save stats to message + stats_list = [] + for k, v in stats_counters.items(): + if v["total"] > 0: # Only show what we found + stats_list.append({"type": k.replace('_', ' ').title(), "source": "Garmin", "total": v["total"], "synced": v["synced"]}) + + sync_log.message = json.dumps({"summary": stats_list}) + + except Exception as e: + if str(e) == "Cancelled by user": + self.logger.info("Sync cancelled by user.") + sync_log.status = "cancelled" + sync_log.message = "Cancelled by user" + else: + self.logger.error(f"Major error during health metrics sync: {e}", exc_info=True) + sync_log.status = "failed" + sync_log.message = str(e) + + sync_log.end_time = datetime.now() + self.db_session.commit() + + if job_id: + job_manager.complete_job(job_id) + + self.logger.info(f"=== Finished sync_health_metrics ===") + return {"processed": processed_count, "failed": failed_count} + + def _process_day_metrics_dict(self, date: datetime.date, metrics: Dict[str, Any], breakdown: Dict, stats: Dict): + """Helper to process the dictionary returned by get_all_metrics_for_date""" + + def update_stat(key, status): + new = (status == 'new') + updated = (status == 'updated') + + if key in breakdown: + if new: breakdown[key]['new'] += 1 + if updated: breakdown[key]['updated'] += 1 + if key in stats: + stats[key]["total"] += 1 + if status != 'error': + stats[key]["synced"] += 1 + + # Steps + if metrics.get("steps"): + s = metrics["steps"] + status = update_or_create_health_metric(self.db_session, 'steps', date, float(s.get('totalSteps', 0)), 'steps') + update_stat('steps', status) + + # Intensity + if metrics.get("intensity"): + i = metrics["intensity"] + mod = i.get('moderateIntensityMinutes', 0) or 0 + vig = i.get('vigorousIntensityMinutes', 0) or 0 + status = update_or_create_health_metric(self.db_session, 'intensity_minutes', date, float(mod+vig), 'minutes') + update_stat('intensity', status) + + # Stress + if metrics.get("stress") and metrics["stress"].get("overallStressLevel"): + status = update_or_create_health_metric(self.db_session, 'stress', date, float(metrics["stress"]["overallStressLevel"]), 'score') + update_stat('stress', status) + + # HRV + if metrics.get("hrv") and metrics["hrv"].get("lastNightAvg"): + status = update_or_create_health_metric(self.db_session, 'hrv', date, float(metrics["hrv"]["lastNightAvg"]), 'ms') + update_stat('hrv', status) + + # Sleep + if metrics.get("sleep") and metrics["sleep"].get("dailySleepDTO"): + dto = metrics["sleep"]["dailySleepDTO"] + if dto.get("sleepTimeSeconds"): + status = update_or_create_health_metric(self.db_session, 'sleep', date, float(dto["sleepTimeSeconds"]), 'seconds') + update_stat('sleep', status) + + # Hydration + if metrics.get("hydration") and metrics["hydration"].get("valueInML"): + status = update_or_create_health_metric(self.db_session, 'hydration', date, float(metrics["hydration"]["valueInML"]), 'ml') + update_stat('hydration', status) + + # Body Battery + if metrics.get("body_battery") and metrics["body_battery"].get("bodyBatteryValuesArray"): + vals = [v[1] for v in metrics["body_battery"]["bodyBatteryValuesArray"] if v and len(v)>1 and isinstance(v[1], (int, float))] + if vals: + status = update_or_create_health_metric(self.db_session, 'body_battery_max', date, float(max(vals)), 'percent') + update_stat('body_battery', status) + + # Floors + if metrics.get("floors"): + val = metrics["floors"].get('floorsClimbed') + if val is not None: + status = update_or_create_health_metric(self.db_session, 'floors', date, float(val), 'floors') + update_stat('floors', status) + + # Respiration + if metrics.get("respiration"): + val = metrics["respiration"].get('avgRespirationValue') + if val: + status = update_or_create_health_metric(self.db_session, 'respiration', date, float(val), 'brpm') + update_stat('respiration', status) + + # SpO2 + if metrics.get("spo2"): + val = metrics["spo2"].get('averageSpO2') + if val: + status = update_or_create_health_metric(self.db_session, 'spo2', date, float(val), 'percent') + update_stat('spo2', status) + + # Sleep Score + if metrics.get("sleep_score"): + # Flatten logic already done in client usually? Or handled here? + # get_all_metrics_for_date in client populates this key + val = None + data = metrics["sleep_score"] + if hasattr(data, 'daily_sleep_dto') and data.daily_sleep_dto.sleep_scores: + val = data.daily_sleep_dto.sleep_scores.get('overall') + elif isinstance(data, dict): + # Try to find score + val = data.get('value') # If pre-flattened + if not val and 'dailySleepDTO' in data: + val = data['dailySleepDTO'].get('sleepScores', {}).get('overall') + + if val: + status = update_or_create_health_metric(self.db_session, 'sleep_score', date, float(val.get('value', val) if isinstance(val, dict) else val), 'score') + update_stat('sleep_score', status) + + # VO2 Max + if metrics.get("vo2_max"): + val = None + data = metrics["vo2_max"] + if isinstance(data, dict): + val = data.get('generic', {}).get('vo2MaxPreciseValue') + + if val: + status = update_or_create_health_metric(self.db_session, 'vo2_max', date, float(val), 'ml/kg/min', detailed_data=data) + update_stat('vo2_max', status) + + + def sync_pending_health_metrics(self, limit: int = None, job_id: str = None) -> Dict[str, int]: + """ + Syncs health metrics marked as 'new' or 'updated' in HealthSyncState. + """ + query = self.db_session.query(HealthSyncState).filter( + HealthSyncState.sync_status.in_(['new', 'updated']), + HealthSyncState.source == 'garmin' + ).order_by(HealthSyncState.date.desc()) + + if limit: + query = query.limit(limit) + + pending_items = query.all() + total_count = len(pending_items) + processed_count = 0 + failed_count = 0 + + self.logger.info(f"Found {total_count} pending health metrics to sync") + + if job_id: + job_manager.update_job(job_id, message=f"Starting health sync for {total_count} items...", progress=0) + + for idx, state in enumerate(pending_items): + # Check for cancellation/pause + if job_id and not self._check_pause(job_id): + break + + if job_id and idx % 5 == 0: + progress = int((idx / total_count) * 100) + job_manager.update_job(job_id, message=f"Syncing {state.metric_type} for {state.date}", progress=progress) + + try: + date_str = state.date.strftime('%Y-%m-%d') + data = self.garmin_client.get_metric_data(date_str, state.metric_type) + + if data: + # Create a pseudo-dict to reuse _process_day_metrics_dict? + # Or just call utils directly. _process_day_metrics_dict handles complex parsing (spo2, vo2 max etc). + # It's better to reuse _process_day_metrics_dict to avoid duplication of parsing logic. + # But _process expects a big dict keyed by metric type. + pseudo_dict = {state.metric_type: data} + + # stats/breakdown are required args + dummy_stats = {state.metric_type: {"total":0, "synced":0}} + dummy_breakdown = {} + + # Call helper + self._process_day_metrics_dict(state.date, pseudo_dict, dummy_breakdown, dummy_stats) + + # Check if it was actually synced (not error) + if dummy_stats[state.metric_type]["synced"] > 0: + state.sync_status = 'synced' + state.last_seen = datetime.now() + processed_count += 1 + else: + # Not synced (error) + failed_count += 1 + else: + # No data found (maybe user didn't wear device) + state.sync_status = 'synced' + processed_count += 1 + + self.db_session.commit() + + except Exception as e: + self.logger.error(f"Error syncing {state.metric_type} for {state.date}: {e}") + failed_count += 1 + self.db_session.rollback() + + if job_id: + job_manager.complete_job(job_id) + + return {"processed": processed_count, "failed": failed_count} diff --git a/FitnessSync/backend/src/services/sync/utils.py b/FitnessSync/backend/src/services/sync/utils.py new file mode 100644 index 0000000..8c35e7e --- /dev/null +++ b/FitnessSync/backend/src/services/sync/utils.py @@ -0,0 +1,56 @@ +import json +import logging +from datetime import datetime +from typing import Dict, Optional, Union + +from sqlalchemy import func +from sqlalchemy.orm import Session + +from ...models.health_metric import HealthMetric + +logger = logging.getLogger(__name__) + +def update_or_create_health_metric( + session: Session, + metric_type: str, + date: datetime.date, + value: float, + unit: str, + source: str = 'garmin', + detailed_data: Optional[Dict] = None +) -> str: + """Helper to update or create a health metric record. Returns 'new', 'updated', or 'skipped'.""" + try: + # Check for existing metric by type, date AND source (to separate garmin and fitbit weight if both exist) + existing = session.query(HealthMetric).filter_by( + metric_type=metric_type, + date=date, + source=source + ).first() + + detailed_json = json.dumps(detailed_data) if detailed_data else None + + if existing: + # Update if value changed + if abs(existing.metric_value - value) > 0.001: + existing.metric_value = value + existing.unit = unit + existing.detailed_data = detailed_json + existing.updated_at = func.now() + return 'updated' + return 'skipped' + else: + new_metric = HealthMetric( + metric_type=metric_type, + metric_value=value, + unit=unit, + timestamp=datetime.combine(date, datetime.min.time()), # Default to midnight if only date + date=date, + source=source, + detailed_data=detailed_json + ) + session.add(new_metric) + return 'new' + except Exception as e: + logger.error(f"Error updating metric {metric_type}: {e}") + return 'error' diff --git a/FitnessSync/backend/src/services/sync/weight.py b/FitnessSync/backend/src/services/sync/weight.py new file mode 100644 index 0000000..2d31417 --- /dev/null +++ b/FitnessSync/backend/src/services/sync/weight.py @@ -0,0 +1,274 @@ +import logging +import re +from datetime import datetime, timedelta, date +from typing import Dict, Optional, Any + +from sqlalchemy import func +from sqlalchemy.orm import Session + +from ...models.health_metric import HealthMetric +from ...models.weight_record import WeightRecord +from ...services.garmin.client import GarminClient +from ...services.job_manager import job_manager +from .utils import update_or_create_health_metric + +logger = logging.getLogger(__name__) + +class WeightSyncService: + def __init__(self, db_session: Session, garmin_client: GarminClient, fitbit_client: Any = None): + self.db_session = db_session + self.garmin_client = garmin_client + self.fitbit_client = fitbit_client + self.logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}") + + def _check_pause(self, job_id: str) -> bool: + """ + Checks if job is paused. Blocks if paused until resumed/cancelled. + Returns False if job is cancelled, True otherwise. + """ + if not job_id: return True + + if job_manager.should_pause(job_id): + self.logger.info(f"Job {job_id} paused. Waiting...") + import time + while job_manager.should_pause(job_id): + if job_manager.should_cancel(job_id): + self.logger.info(f"Job {job_id} cancelled while paused.") + return False + time.sleep(1) + self.logger.info(f"Job {job_id} resumed.") + + return not job_manager.should_cancel(job_id) + + def sync_fitbit_weight(self, days_back: int = 30, job_id: str = None) -> int: + """ + Sync weight logs from Fitbit. Handles chunking for large date ranges. + """ + if not self.fitbit_client: + self.logger.warning("Fitbit client not initialized") + return 0 + + final_end_date = datetime.now() + start_date = final_end_date - timedelta(days=days_back) + + count = 0 + current_start = start_date + + self.logger.info(f"Syncing Fitbit weight from {start_date.date()} to {final_end_date.date()}") + + retry_count = 0 + MAX_RETRIES = 5 + + while current_start < final_end_date: + # Check pause + if job_id and not self._check_pause(job_id): + self.logger.info("Fitbit sync cancelled by user.") + break + # Fitbit limits to 31 days usually, we'll use 30 to be safe + chunk_end = min(current_start + timedelta(days=30), final_end_date) + + start_str = current_start.strftime('%Y-%m-%d') + end_str = chunk_end.strftime('%Y-%m-%d') + + self.logger.info(f"Fetching Fitbit chunk: {start_str} to {end_str}") + + try: + logs = self.fitbit_client.get_weight_logs(start_str, end_str) + + # Reset retry count using success + retry_count = 0 + + for log in logs: + try: + weight_val = float(log.get('weight', 0)) + if weight_val <= 0: continue + + date_str = log.get('date') + time_str = log.get('time', '12:00:00') + log_dt = datetime.fromisoformat(f"{date_str}T{time_str}") + + res = update_or_create_health_metric( + self.db_session, + metric_type='weight', + date=log_dt.date(), + value=weight_val, + unit='kg', + source='fitbit', + detailed_data=log + ) + if res in ['new', 'updated']: + count += 1 + + except Exception as e: + self.logger.error(f"Error parsing fitbit weight log: {e}") + continue + + self.db_session.commit() + + # Advance to next chunk + current_start = chunk_end + timedelta(days=1) + + # Rate limit politeness + import time + time.sleep(1.0) + + except Exception as e: + err_msg = str(e) + if "Rate Limit" in err_msg or "Retry-After" in err_msg: + retry_count += 1 + if retry_count > MAX_RETRIES: + self.logger.error(f"Fitbit sync aborted: Max rate-limit retries ({MAX_RETRIES}) reached.") + break + + wait_time = 60 + # Try to parse seconds + match = re.search(r'Retry-After: (\d+)s?', err_msg) + if match: + wait_time = int(match.group(1)) + + self.logger.warning(f"Fitbit rate limit hit (Attempt {retry_count}/{MAX_RETRIES}). Sleeping {wait_time}s before retrying chunk...") + import time + time.sleep(wait_time + 5) # Add 5s buffer + # Continue loop WITHOUT updating current_start to retry same chunk + continue + + self.logger.error(f"Fitbit sync chunk failed ({start_str} to {end_str}): {e}") + # For non-rate limits, skip chunk to proceed + current_start = chunk_end + timedelta(days=1) + + self.logger.info(f"Synced {count} Fitbit weight records total") + return count + + def _sync_weight_range(self, start_date: date, end_date: date): + """Helper to fetch and save weight history for a range.""" + try: + s_str = start_date.strftime('%Y-%m-%d') + e_str = end_date.strftime('%Y-%m-%d') + data = self.garmin_client.get_weight_history(s_str, e_str) + + if not data or 'dateWeightList' not in data: + return + + count = 0 + for w_item in data['dateWeightList']: + # Parse date + d_str = w_item.get('calendarDate') + if not d_str: continue + d = datetime.strptime(d_str, '%Y-%m-%d').date() + + # Get weight + w_val = w_item.get('weight') + if w_val: + update_or_create_health_metric( + self.db_session, + 'weight', + d, + float(w_val)/1000.0, + 'kg', + detailed_data=w_item + ) + count += 1 + + self.logger.info(f"Optimistically synced {count} weight records via range fetch.") + self.db_session.commit() + + except Exception as e: + self.logger.error(f"Error in _sync_weight_range: {e}") + + def reconcile_and_tag_weights(self): + """ + Compare Fitbit (WeightRecord) vs Garmin (HealthMetric) and tag sync_status. + """ + self.logger.info("Reconciling weight records...") + + # 1. Fetch all Fitbit records + fitbit_records = self.db_session.query(WeightRecord).all() + + # 2. Fetch all Garmin records + garmin_metrics = self.db_session.query(HealthMetric).filter( + HealthMetric.metric_type == 'weight', + HealthMetric.source == 'garmin' + ).all() + + garmin_map = {} + for gm in garmin_metrics: + d_str = gm.date.strftime('%Y-%m-%d') if hasattr(gm.date, 'strftime') else str(gm.date) + garmin_map[d_str] = gm.metric_value + + updated_count = 0 + + for record in fitbit_records: + d_str = record.date.strftime('%Y-%m-%d') + + status = 'unsynced' + + if d_str in garmin_map: + g_val = garmin_map[d_str] + # Compare kg + if abs(record.weight - g_val) < 0.05: # 50g tolerance + status = 'synced' + else: + status = 'unsynced' + + if record.sync_status != status: + record.sync_status = status + updated_count += 1 + + self.db_session.commit() + self.logger.info(f"Reconciliation complete. Updated status for {updated_count} records.") + + def sync_weights_to_garmin(self, limit: int = 50, job_id: str = None) -> Dict[str, int]: + """ + Uploads 'unsynced' weight records from Fitbit to Garmin. + """ + # First, ensure our tags are correct + self.reconcile_and_tag_weights() + + # Query unsynced + # Sort by date ASC (oldest first) to fill history + unsynced = self.db_session.query(WeightRecord).filter( + WeightRecord.sync_status == 'unsynced' + ).order_by(WeightRecord.date.asc()).limit(limit).all() + + total = len(unsynced) + processed = 0 + failed = 0 + + self.logger.info(f"Found {total} unsynced weight records to upload.") + # print(f"DEBUG: Found {total} unsynced weight records.", flush=True) + + if job_id: + job_manager.update_job(job_id, message=f"Uploading {total} weight records...", progress=0) + + for idx, record in enumerate(unsynced): + # Check cancel/pause + if job_id and not self._check_pause(job_id): + break + + if job_id: + job_manager.update_job(job_id, progress=int((idx/total)*100)) + + # Upload + # record.timestamp is datetime + success = self.garmin_client.upload_metric_weight( + timestamp=record.timestamp, + weight_kg=record.weight, + bmi=record.bmi + ) + + if success: + record.sync_status = 'synced' + processed += 1 + else: + failed += 1 + + self.db_session.commit() + + # Rate limit sleep + import time + time.sleep(1.0) + + if job_id: + job_manager.complete_job(job_id) + + return {"processed": processed, "failed": failed} diff --git a/FitnessSync/backend/src/services/sync_app.py b/FitnessSync/backend/src/services/sync_app.py index d40c40c..9252a24 100644 --- a/FitnessSync/backend/src/services/sync_app.py +++ b/FitnessSync/backend/src/services/sync_app.py @@ -1,439 +1,85 @@ -from ..models.activity import Activity -from ..models.health_metric import HealthMetric -from ..models.sync_log import SyncLog -from ..services.garmin.client import GarminClient -from sqlalchemy.orm import Session -from datetime import datetime, timedelta -from typing import Dict import logging -import json +from typing import Dict, Any, Optional + +from sqlalchemy.orm import Session +from datetime import datetime + +from ..services.garmin.client import GarminClient +from .sync.activity import GarminActivitySync +from .sync.health import GarminHealthSync +from .sync.weight import WeightSyncService +from .sync.utils import update_or_create_health_metric logger = logging.getLogger(__name__) -from ..services.job_manager import job_manager -import math - class SyncApp: - def __init__(self, db_session: Session, garmin_client: GarminClient, fitbit_client=None): + def __init__(self, db_session: Session, garmin_client: GarminClient, fitbit_client: Any = None): self.db_session = db_session self.garmin_client = garmin_client self.fitbit_client = fitbit_client self.logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}") - self.logger.info("SyncApp initialized") - - def sync_activities(self, days_back: int = 30, job_id: str = None) -> Dict[str, int]: - """Sync activity data from Garmin to local storage.""" - self.logger.info(f"=== Starting sync_activities with days_back={days_back} ===") - start_date = (datetime.now() - timedelta(days=days_back)).strftime('%Y-%m-%d') - end_date = datetime.now().strftime('%Y-%m-%d') - - self.logger.info(f"Date range: {start_date} to {end_date}") - - sync_log = SyncLog(operation="activity_sync", status="started", start_time=datetime.now()) - self.db_session.add(sync_log) - self.db_session.commit() - - processed_count = 0 - failed_count = 0 - - try: - if job_id: - job_manager.update_job(job_id, message="Fetching activities list...", progress=5) - - self.logger.info("Fetching activities from Garmin...") - garmin_activities = self.garmin_client.get_activities(start_date, end_date) - self.logger.info(f"Successfully fetched {len(garmin_activities)} activities from Garmin") - - total_activities = len(garmin_activities) - - for idx, activity_data in enumerate(garmin_activities): - # Check for cancellation - if job_id and job_manager.should_cancel(job_id): - self.logger.info("Sync cancelled by user.") - sync_log.status = "cancelled" - sync_log.message = "Cancelled by user" - break - - if job_id: - # Update progress (5% to 95%) - progress = 5 + int((idx / total_activities) * 90) - job_manager.update_job(job_id, message=f"Processing activity {idx + 1}/{total_activities}", progress=progress) - - activity_id = str(activity_data.get('activityId')) - if not activity_id: - self.logger.warning("Skipping activity with no ID.") - continue - - try: - existing_activity = self.db_session.query(Activity).filter_by(garmin_activity_id=activity_id).first() - - if not existing_activity: - activity_type_dict = activity_data.get('activityType', {}) - existing_activity = Activity( - garmin_activity_id=activity_id, - activity_name=activity_data.get('activityName'), - activity_type=activity_type_dict.get('typeKey', 'unknown'), - start_time=datetime.fromisoformat(activity_data.get('startTimeLocal')) if activity_data.get('startTimeLocal') else None, - duration=activity_data.get('duration', 0), - download_status='pending' - ) - self.db_session.add(existing_activity) - - if existing_activity.download_status != 'downloaded': - downloaded_successfully = False - # PRIORITIZE FIT FILE - for fmt in ['fit', 'original', 'tcx', 'gpx']: - file_content = self.garmin_client.download_activity(activity_id, file_type=fmt) - if file_content: - existing_activity.file_content = file_content - existing_activity.file_type = fmt - existing_activity.download_status = 'downloaded' - existing_activity.downloaded_at = datetime.now() - self.logger.info(f"✓ Successfully downloaded {activity_id} as {fmt}") - downloaded_successfully = True - break - - if not downloaded_successfully: - existing_activity.download_status = 'failed' - self.logger.warning(f"✗ Failed to download {activity_id}") - failed_count += 1 - else: - processed_count += 1 - else: - self.logger.info(f"Activity {activity_id} already downloaded. Skipping.") - processed_count += 1 - - self.db_session.commit() - - except Exception as e: - self.logger.error(f"✗ Error processing activity {activity_id}: {e}", exc_info=True) - failed_count += 1 - self.db_session.rollback() - - if sync_log.status != "cancelled": - sync_log.status = "completed_with_errors" if failed_count > 0 else "completed" - sync_log.records_processed = processed_count - sync_log.records_failed = failed_count - - except Exception as e: - self.logger.error(f"Major error during activity sync: {e}", exc_info=True) - sync_log.status = "failed" - sync_log.message = str(e) - - sync_log.end_time = datetime.now() - self.db_session.commit() - - # Create stats summary for message - stats_summary = { - "summary": [ - { - "type": "Activity", - "source": "Garmin", - "total": len(garmin_activities) if 'garmin_activities' in locals() else 0, - "synced": processed_count - } - ] - } - sync_log.message = json.dumps(stats_summary) - self.db_session.commit() - - if job_id: - job_manager.complete_job(job_id) - - self.logger.info(f"=== Finished sync_activities: processed={processed_count}, failed={failed_count} ===") - return {"processed": processed_count, "failed": failed_count} - - def sync_health_metrics(self, days_back: int = 30, job_id: str = None) -> Dict[str, int]: - """Sync health metrics from Garmin to local database.""" - start_date = (datetime.now() - timedelta(days=days_back)).strftime('%Y-%m-%d') - end_date = datetime.now().strftime('%Y-%m-%d') - - self.logger.info(f"=== Starting sync_health_metrics with days_back={days_back} ===") - sync_log = SyncLog(operation="health_metric_sync", status="started", start_time=datetime.now()) - self.db_session.add(sync_log) - self.db_session.commit() - - processed_count = 0 - failed_count = 0 - metrics_breakdown = { - 'steps': {'new': 0, 'updated': 0}, 'hrv': {'new': 0, 'updated': 0}, - 'sleep': {'new': 0, 'updated': 0}, 'stress': {'new': 0, 'updated': 0}, - 'intensity': {'new': 0, 'updated': 0}, 'hydration': {'new': 0, 'updated': 0}, - 'weight': {'new': 0, 'updated': 0}, 'body_battery': {'new': 0, 'updated': 0} - } - - stats_list = [] - - try: - if job_id: - job_manager.update_job(job_id, message="Fetching health metrics...", progress=10) - - daily_metrics = self.garmin_client.get_daily_metrics(start_date, end_date) - - # Helper to check cancellation - def check_cancel(): - if job_id and job_manager.should_cancel(job_id): - raise Exception("Cancelled by user") - - check_cancel() - if job_id: job_manager.update_job(job_id, message="Processing Steps...", progress=20) - - # Steps - steps_data_list = daily_metrics.get("steps", []) - stats_list.append({"type": "Steps", "source": "Garmin", "total": len(steps_data_list), "synced": 0}) - metric_idx = len(stats_list) - 1 - - for steps_data in steps_data_list: - try: - status = self._update_or_create_metric('steps', steps_data.calendar_date, steps_data.total_steps, 'steps') - metrics_breakdown['steps'][status] += 1 - processed_count += 1 - stats_list[metric_idx]["synced"] += 1 - except Exception as e: - self.logger.error(f"Error processing steps data: {e}", exc_info=True) - failed_count += 1 - - check_cancel() - if job_id: job_manager.update_job(job_id, message="Processing HRV...", progress=30) - - # HRV - hrv_data_list = daily_metrics.get("hrv", []) - stats_list.append({"type": "HRV", "source": "Garmin", "total": len(hrv_data_list), "synced": 0}) - metric_idx = len(stats_list) - 1 - - for hrv_data in hrv_data_list: - try: - status = self._update_or_create_metric('hrv', hrv_data.calendar_date, hrv_data.last_night_avg, 'ms') - metrics_breakdown['hrv'][status] += 1 - processed_count += 1 - stats_list[metric_idx]["synced"] += 1 - except Exception as e: - self.logger.error(f"Error processing HRV data: {e}", exc_info=True) - failed_count += 1 - - check_cancel() - if job_id: job_manager.update_job(job_id, message="Processing Sleep...", progress=40) - - # Sleep - sleep_data_list = daily_metrics.get("sleep", []) - stats_list.append({"type": "Sleep", "source": "Garmin", "total": len(sleep_data_list), "synced": 0}) - metric_idx = len(stats_list) - 1 - - for sleep_data in sleep_data_list: - try: - status = self._update_or_create_metric('sleep', sleep_data.daily_sleep_dto.calendar_date, sleep_data.daily_sleep_dto.sleep_time_seconds, 'seconds') - metrics_breakdown['sleep'][status] += 1 - processed_count += 1 - stats_list[metric_idx]["synced"] += 1 - except Exception as e: - self.logger.error(f"Error processing sleep data: {e}", exc_info=True) - failed_count += 1 - - check_cancel() - if job_id: job_manager.update_job(job_id, message="Processing Stress...", progress=50) - - # Updated Sync Logic for new metrics - # Stress - stress_data_list = daily_metrics.get("stress", []) - stats_list.append({"type": "Stress", "source": "Garmin", "total": len(stress_data_list), "synced": 0}) - metric_idx = len(stats_list) - 1 - - for stress_data in stress_data_list: - try: - if stress_data.overall_stress_level is not None: - status = self._update_or_create_metric('stress', stress_data.calendar_date, float(stress_data.overall_stress_level), 'score') - metrics_breakdown['stress'][status] += 1 - processed_count += 1 - stats_list[metric_idx]["synced"] += 1 - except Exception as e: - self.logger.error(f"Error processing stress data: {e}", exc_info=True) - failed_count += 1 - - check_cancel() - if job_id: job_manager.update_job(job_id, message="Processing Intensity...", progress=60) - - # Intensity Minutes - intensity_data_list = daily_metrics.get("intensity", []) - stats_list.append({"type": "Intensity", "source": "Garmin", "total": len(intensity_data_list), "synced": 0}) - metric_idx = len(stats_list) - 1 - - for intensity_data in intensity_data_list: - try: - mod = intensity_data.moderate_value or 0 - vig = intensity_data.vigorous_value or 0 - total_intensity = mod + vig - status = self._update_or_create_metric('intensity_minutes', intensity_data.calendar_date, float(total_intensity), 'minutes') - metrics_breakdown['intensity'][status] += 1 - processed_count += 1 - stats_list[metric_idx]["synced"] += 1 - except Exception as e: - self.logger.error(f"Error processing intensity data: {e}", exc_info=True) - failed_count += 1 - - check_cancel() - if job_id: job_manager.update_job(job_id, message="Processing Hydration...", progress=70) - - # Hydration - hydration_data_list = daily_metrics.get("hydration", []) - stats_list.append({"type": "Hydration", "source": "Garmin", "total": len(hydration_data_list), "synced": 0}) - metric_idx = len(stats_list) - 1 - - for hydration_data in hydration_data_list: - try: - if hydration_data.value_in_ml is not None: - status = self._update_or_create_metric('hydration', hydration_data.calendar_date, float(hydration_data.value_in_ml), 'ml') - metrics_breakdown['hydration'][status] += 1 - processed_count += 1 - stats_list[metric_idx]["synced"] += 1 - except Exception as e: - self.logger.error(f"Error processing hydration data: {e}", exc_info=True) - failed_count += 1 - - check_cancel() - if job_id: job_manager.update_job(job_id, message="Processing Weight...", progress=80) - - # Weight - weight_records_from_garmin = daily_metrics.get("weight", []) - self.logger.info(f"Processing {len(weight_records_from_garmin)} weight records from Garmin") - stats_list.append({"type": "Weight", "source": "Garmin", "total": len(weight_records_from_garmin), "synced": 0}) - metric_idx = len(stats_list) - 1 - - for weight_data in weight_records_from_garmin: - try: - if weight_data.weight is not None: - # Weight is usually in grams in Garmin API, converting to kg - weight_kg = weight_data.weight / 1000.0 - status = self._update_or_create_metric('weight', weight_data.calendar_date, weight_kg, 'kg') - metrics_breakdown['weight'][status] += 1 - processed_count += 1 - stats_list[metric_idx]["synced"] += 1 - except Exception as e: - self.logger.error(f"Error processing weight data: {e}", exc_info=True) - failed_count += 1 - - check_cancel() - if job_id: job_manager.update_job(job_id, message="Processing Body Battery...", progress=90) - - # Body Battery - bb_data_list = daily_metrics.get("body_battery", []) - stats_list.append({"type": "Body Battery", "source": "Garmin", "total": len(bb_data_list), "synced": 0}) - metric_idx = len(stats_list) - 1 - - for bb_data in bb_data_list: - try: - # Calculate max body battery from the values array if available - # body_battery_values_array is list[list[timestamp, value]] - max_bb = 0 - if bb_data.body_battery_values_array: - try: - # Filter out None values and find max - values = [v[1] for v in bb_data.body_battery_values_array if v and len(v) > 1 and isinstance(v[1], (int, float))] - if values: - max_bb = max(values) - except Exception: - pass # Keep 0 if extraction fails - - if max_bb > 0: - status = self._update_or_create_metric('body_battery_max', bb_data.calendar_date, float(max_bb), 'percent') - metrics_breakdown['body_battery'][status] += 1 - processed_count += 1 - stats_list[metric_idx]["synced"] += 1 - - except Exception as e: - self.logger.error(f"Error processing body battery data: {e}", exc_info=True) - failed_count += 1 - - sync_log.status = "completed_with_errors" if failed_count > 0 else "completed" - sync_log.records_processed = processed_count - sync_log.records_failed = failed_count - - # Save stats to message - sync_log.message = json.dumps({"summary": stats_list}) - - except Exception as e: - if str(e) == "Cancelled by user": - self.logger.info("Sync cancelled by user.") - sync_log.status = "cancelled" - sync_log.message = "Cancelled by user" + # Load tokens into the Garmin client using the provided session + if self.garmin_client and hasattr(self.garmin_client, 'load_tokens'): + print("DEBUG: SyncApp calling load_tokens...", flush=True) + is_loaded = self.garmin_client.load_tokens(self.db_session) + if is_loaded: + self.logger.info("Garmin tokens loaded successfully during SyncApp init.") + print("DEBUG: Tokens loaded successfully.", flush=True) else: - self.logger.error(f"Major error during health metrics sync: {e}", exc_info=True) - sync_log.status = "failed" - sync_log.message = str(e) - - sync_log.end_time = datetime.now() - self.db_session.commit() + self.logger.warning("Failed to load Garmin tokens during SyncApp init. Sync may fail if not logged in.") + print("DEBUG: Failed to load tokens.", flush=True) - if job_id: - job_manager.complete_job(job_id) + # Initialize sub-services + self.activity_sync = GarminActivitySync(db_session, garmin_client) + self.health_sync = GarminHealthSync(db_session, garmin_client) + self.weight_sync = WeightSyncService(db_session, garmin_client, fitbit_client) - breakdown_str = ", ".join([f"{k}: {v['new']} new/{v['updated']} updated" for k, v in metrics_breakdown.items()]) - self.logger.info(f"=== Finished sync_health_metrics: processed={processed_count}, failed={failed_count} ({breakdown_str}) ===") - return {"processed": processed_count, "failed": failed_count} + self.logger.info("SyncApp initialized (delegating to sub-services)") + + # --- Activity Sync --- + def scan_activities(self, days_back: int = 30): + self.logger.info("Delegating scan_activities to GarminActivitySync") + return self.activity_sync.scan_activities(days_back) + + def sync_pending_activities(self, limit: int = None, job_id: str = None) -> Dict[str, int]: + self.logger.info("Delegating sync_pending_activities to GarminActivitySync") + return self.activity_sync.sync_pending_activities(limit, job_id) + + def sync_activities(self, days_back: int = 30, job_id: str = None) -> Dict[str, int]: + """Coordinator for activity sync.""" + self.logger.info("Delegating sync_activities to GarminActivitySync") + # 1. Scan (fetch metadata) + self.scan_activities(days_back) + # 2. Sync Pending (download files) + return self.sync_pending_activities(job_id=job_id) + def redownload_activity(self, activity_id: str) -> bool: - """ - Force re-download of an activity file from Garmin. - """ - self.logger.info(f"Redownloading activity {activity_id}...") - try: - # Find the activity - activity = self.db_session.query(Activity).filter_by(garmin_activity_id=activity_id).first() - if not activity: - self.logger.error(f"Activity {activity_id} not found locally.") - return False + return self.activity_sync.redownload_activity(activity_id) - # Attempt download with fallback order - downloaded = False - for fmt in ['fit', 'original', 'tcx', 'gpx']: - file_content = self.garmin_client.download_activity(activity_id, file_type=fmt) - if file_content: - activity.file_content = file_content - activity.file_type = fmt - activity.download_status = 'downloaded' - activity.downloaded_at = datetime.now() - self.logger.info(f"✓ Successfully redownloaded {activity_id} as {fmt}") - downloaded = True - break - - if not downloaded: - self.logger.warning(f"Failed to redownload {activity_id}") - return False + # --- Health Sync --- + def sync_health_metrics(self, days_back: int = 30, job_id: str = None) -> Dict[str, int]: + self.logger.info("Delegating sync_health_metrics to GarminHealthSync") + return self.health_sync.sync_health_metrics(days_back, job_id) - self.db_session.commit() - return True + def scan_health_metrics(self, days_back: int = 30) -> Dict[str, int]: + return self.health_sync.scan_health_metrics(days_back) + + def sync_pending_health_metrics(self, limit: int = None, job_id: str = None) -> Dict[str, int]: + return self.health_sync.sync_pending_health_metrics(limit, job_id) - except Exception as e: - self.logger.error(f"Error redownloading activity {activity_id}: {e}", exc_info=True) - self.db_session.rollback() - return False + # --- Weight Sync --- + def sync_fitbit_weight(self, days_back: int = 30, job_id: str = None) -> int: + self.logger.info("Delegating sync_fitbit_weight to WeightSyncService") + return self.weight_sync.sync_fitbit_weight(days_back, job_id) - def _update_or_create_metric(self, metric_type: str, date: datetime.date, value: float, unit: str) -> str: - - """Helper to update or create a health metric record. Returns 'new' or 'updated'.""" - try: - existing = self.db_session.query(HealthMetric).filter_by(metric_type=metric_type, date=date).first() - if existing: - # Optional: Check if value is different before updating to truly 'skip' - # For now, we consider found as 'updated' (or skipped if we want to call it that in logs) - existing.metric_value = value - existing.updated_at = datetime.now() - self.db_session.commit() - return 'updated' - else: - metric = HealthMetric( - metric_type=metric_type, - metric_value=value, - unit=unit, - timestamp=datetime.combine(date, datetime.min.time()), - date=date, - source='garmin' - ) - self.db_session.add(metric) - self.db_session.commit() - return 'new' - except Exception as e: - self.logger.error(f"Error saving metric {metric_type} for {date}: {e}", exc_info=True) - self.db_session.rollback() - raise + def sync_weights_to_garmin(self, limit: int = 50, job_id: str = None) -> Dict[str, int]: + self.logger.info("Delegating sync_weights_to_garmin to WeightSyncService") + return self.weight_sync.sync_weights_to_garmin(limit, job_id) + + # --- Helpers for compatibility/tests --- + def _update_or_create_metric(self, metric_type: str, date: datetime.date, value: float, unit: str, source: str = 'garmin', detailed_data: Dict = None) -> str: + # Wrapper for tests that might mock/call this + return update_or_create_health_metric(self.db_session, metric_type, date, value, unit, source, detailed_data) diff --git a/FitnessSync/backend/src/tasks/__pycache__/definitions.cpython-311.pyc b/FitnessSync/backend/src/tasks/__pycache__/definitions.cpython-311.pyc new file mode 100644 index 0000000..cde991e Binary files /dev/null and b/FitnessSync/backend/src/tasks/__pycache__/definitions.cpython-311.pyc differ diff --git a/FitnessSync/backend/src/tasks/definitions.py b/FitnessSync/backend/src/tasks/definitions.py new file mode 100644 index 0000000..70587fa --- /dev/null +++ b/FitnessSync/backend/src/tasks/definitions.py @@ -0,0 +1,173 @@ + +import logging +from ..services.sync_app import SyncApp +from ..services.garmin.client import GarminClient +from ..services.job_manager import job_manager +from ..services.garth_helper import load_and_verify_garth_session +from ..utils.config import config +from ..models.api_token import APIToken +from ..models.config import Configuration +from ..services.fitbit_client import FitbitClient + +logger = logging.getLogger(__name__) + +def run_activity_sync_task(job_id: str, days_back: int, db_session_factory): + logger.info(f"Starting background activity sync task {job_id}") + with db_session_factory() as session: + try: + load_and_verify_garth_session(session) + garmin_client = GarminClient() + sync_app = SyncApp(db_session=session, garmin_client=garmin_client) + sync_app.sync_activities(days_back=days_back, job_id=job_id) + except Exception as e: + logger.error(f"Background task failed: {e}") + job_manager.fail_job(job_id, str(e)) + +def run_metrics_sync_task(job_id: str, days_back: int, db_session_factory): + logger.info(f"Starting background metrics sync task {job_id}") + with db_session_factory() as session: + try: + load_and_verify_garth_session(session) + garmin_client = GarminClient() + sync_app = SyncApp(db_session=session, garmin_client=garmin_client) + sync_app.sync_health_metrics(days_back=days_back, job_id=job_id) + except Exception as e: + logger.error(f"Background task failed: {e}") + job_manager.fail_job(job_id, str(e)) + +def run_health_scan_job(job_id: str, days_back: int, db_session_factory): + """Background task wrapper for health scan""" + with db_session_factory() as db: + try: + garmin_client = GarminClient() + sync_app = SyncApp(db, garmin_client) + + job_manager.update_job(job_id, status="running", progress=0) + sync_app.scan_health_metrics(days_back=days_back) + job_manager.complete_job(job_id) + except Exception as e: + logger.error(f"Background task failed: {e}") + job_manager.fail_job(job_id, str(e)) + +def run_health_sync_job(job_id: str, limit: int, db_session_factory): + """Background task wrapper for health sync pending""" + with db_session_factory() as db: + try: + garmin_client = GarminClient() + sync_app = SyncApp(db, garmin_client) + + sync_app.sync_pending_health_metrics(limit=limit, job_id=job_id) + + except Exception as e: + logger.error(f"Health sync job failed: {e}") + job_manager.fail_job(job_id, str(e)) + +def run_garmin_upload_job(job_id: str, limit: int, db_session_factory): + """Background task wrapper for garmin weight upload""" + with db_session_factory() as db: + try: + garmin_client = GarminClient() + sync_app = SyncApp(db, garmin_client, None) + + job_manager.update_job(job_id, status="running", progress=0) + result = sync_app.sync_weights_to_garmin(limit=limit, job_id=job_id) + job_manager.complete_job(job_id, result=result) + + except Exception as e: + logger.error(f"Garmin upload job failed: {e}") + job_manager.fail_job(job_id, str(e)) + +def run_activity_backfill_job(job_id: str, days_back: int, db_session_factory): + """Background task wrapper for full activity backfill/scan""" + with db_session_factory() as db: + try: + # Check/Load tokens + load_and_verify_garth_session(db) + + garmin_client = GarminClient() + sync_app = SyncApp(db, garmin_client) + + # Use default large days_back if not provided or small + target_days = days_back if days_back > 365 else 3650 + + job_manager.update_job(job_id, status="running", progress=0, message=f"Starting backfill ({target_days} days)...") + + # Call scan_activities with job_id for progress + # We need to expose scan_activities in SyncApp wrapper first? + # Step 158 showed SyncApp has scan_activities wrapper: + # def scan_activities(self, days_back: int = 30): + # return self.activity_sync.scan_activities(days_back) + # It doesn't pass job_id! We need to update SyncApp wrapper too. + # Or access self.activity_sync directly: sync_app.activity_sync.scan_activities + + stats = sync_app.activity_sync.scan_activities(days_back=target_days, job_id=job_id) + + job_manager.complete_job(job_id, result=stats) + + except Exception as e: + logger.error(f"Backfill job failed: {e}") + job_manager.fail_job(job_id, str(e)) + +def run_fitbit_sync_job(job_id: str, days_back: int, db_session_factory): + """Background task wrapper for fitbit sync""" + logger.info(f"Starting run_fitbit_sync_job for {job_id}") + + with db_session_factory() as db: + try: + # Get tokens + token_record = db.query(APIToken).filter_by(token_type="fitbit").first() + access_token = token_record.access_token if token_record else None + refresh_token = token_record.refresh_token if token_record else None + + # Get API Credentials (Env Var > DB) + db_config = db.query(Configuration).first() + + client_id = config.FITBIT_CLIENT_ID + client_secret = config.FITBIT_CLIENT_SECRET + redirect_uri = config.FITBIT_REDIRECT_URI + + if not client_id and db_config: + client_id = db_config.fitbit_client_id + + if not client_secret and db_config: + client_secret = db_config.fitbit_client_secret + + if not redirect_uri and db_config and db_config.fitbit_redirect_uri: + redirect_uri = db_config.fitbit_redirect_uri + + def refresh_cb(token_dict): + """Callback to update tokens in DB upon refresh.""" + try: + logger.info("Refreshing Fitbit token in DB via callback") + # Re-query + tr = db.query(APIToken).filter_by(token_type="fitbit").first() + if tr: + tr.access_token = token_dict.get('access_token') + tr.refresh_token = token_dict.get('refresh_token') + if 'expires_at' in token_dict: + from datetime import datetime + tr.expires_at = datetime.fromtimestamp(token_dict['expires_at']) + db.commit() + logger.info("Fitbit token refreshed and saved.") + except Exception as e: + logger.error(f"Error in refresh_cb: {e}") + + fitbit_client = FitbitClient( + client_id=client_id, + client_secret=client_secret, + access_token=access_token, + refresh_token=refresh_token, + redirect_uri=redirect_uri, + refresh_cb=refresh_cb + ) + + garmin_client = GarminClient() + sync_app = SyncApp(db, garmin_client, fitbit_client) + + job_manager.update_job(job_id, status="running", progress=0) + count = sync_app.sync_fitbit_weight(days_back=days_back, job_id=job_id) + job_manager.complete_job(job_id, result={"count": count}) + + except Exception as e: + logger.error(f"Fitbit sync job failed: {e}") + job_manager.fail_job(job_id, str(e)) diff --git a/FitnessSync/backend/startup_dummy.db b/FitnessSync/backend/startup_dummy.db new file mode 100644 index 0000000..946e326 Binary files /dev/null and b/FitnessSync/backend/startup_dummy.db differ diff --git a/FitnessSync/backend/templates/activities.html b/FitnessSync/backend/templates/activities.html index 3bcb828..2be5582 100644 --- a/FitnessSync/backend/templates/activities.html +++ b/FitnessSync/backend/templates/activities.html @@ -1,379 +1,659 @@ - - +{% extends "base.html" %} -
-| - | Date | -Name | -Type | -Duration | -File Type | -Status | -Actions | -
|---|---|---|---|---|---|---|---|
| Loading... | -|||||||
| Name | +Frame | +Chainring | +Rear Cog | +Gear Ratio | +Actions | +
|---|
Track your weight and body composition from Fitbit.
+| Date | +Weight (kg) | +BMI | +Source | +
|---|
Track your daily health metrics from Garmin.
+| Date | +Metric | +Value | +Unit | +Source | +
|---|
| Type | -Source | -Found | -Synced | -
|---|---|---|---|
| No sync data available. | -|||
| Operation | -Status | -Start Time | -End Time | -Processed | -Failed | -Message | +Type | +Source | +Found | +Synced |
|---|---|---|---|---|---|---|---|---|---|---|
| Loading logs... | +No sync data available. | +|||||||||
| Name | +Job Type | +Status | +Interval | +Last Run | +Next Run | +Actions | +
|---|---|---|---|---|---|---|
| Loading schedules... | ||||||
| Operation | +Job ID | +Status | +Progress | +Message | +Started | +Actions | +
|---|---|---|---|---|---|---|
| No active jobs. | +||||||
Loading status...
-Loading status...
Current auth state: Not Tested
-Loading Garmin authentication status...
-Current auth state: Not Tested
Loading Fitbit authentication status...
-Loading Garmin authentication status...
+Loading status...
+Current auth state: Not Tested
+Loading Garmin authentication status...
+Loading Fitbit authentication status...
+Current auth state: Verifying MFA...
`; + + try { + const response = await fetch('/api/setup/garmin/mfa', { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ + verification_code: mfaCode, + session_id: window.garmin_mfa_session_id + }) + }); + + const data = await response.json(); + + if (response.ok) { + statusText.innerHTML = `Current auth state: MFA Verification Successful
`; + saveBtn.disabled = false; + alert(data.message || 'MFA verification successful'); + document.getElementById('garmin-mfa-section').style.display = 'none'; + document.getElementById('mfa-code').value = ''; + loadStatusInfo(); + } else { + statusText.innerHTML = `Current auth state: MFA Verification Failed
`; + alert('MFA verification failed: ' + data.message); + } + } catch (error) { + statusText.innerHTML = `Current auth state: Error
`; + console.error('Error submitting MFA code:', error); + alert('Error submitting MFA code: ' + error.message); + } + } + \ No newline at end of file diff --git a/FitnessSync/backend/test.db b/FitnessSync/backend/test.db new file mode 100644 index 0000000..daa49eb Binary files /dev/null and b/FitnessSync/backend/test.db differ diff --git a/FitnessSync/backend/test_bike_setups.db b/FitnessSync/backend/test_bike_setups.db new file mode 100644 index 0000000..867de47 Binary files /dev/null and b/FitnessSync/backend/test_bike_setups.db differ diff --git a/FitnessSync/backend/test_runtime.db b/FitnessSync/backend/test_runtime.db new file mode 100644 index 0000000..946e326 Binary files /dev/null and b/FitnessSync/backend/test_runtime.db differ diff --git a/FitnessSync/backend/tests/functional/__init__.py b/FitnessSync/backend/tests/functional/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/FitnessSync/backend/tests/functional/__pycache__/__init__.cpython-313.pyc b/FitnessSync/backend/tests/functional/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000..7caccf6 Binary files /dev/null and b/FitnessSync/backend/tests/functional/__pycache__/__init__.cpython-313.pyc differ diff --git a/FitnessSync/backend/tests/functional/__pycache__/test_bike_setups.cpython-313-pytest-9.0.2.pyc b/FitnessSync/backend/tests/functional/__pycache__/test_bike_setups.cpython-313-pytest-9.0.2.pyc new file mode 100644 index 0000000..8ee4556 Binary files /dev/null and b/FitnessSync/backend/tests/functional/__pycache__/test_bike_setups.cpython-313-pytest-9.0.2.pyc differ diff --git a/FitnessSync/backend/tests/functional/__pycache__/test_setup_api.cpython-313-pytest-9.0.2.pyc b/FitnessSync/backend/tests/functional/__pycache__/test_setup_api.cpython-313-pytest-9.0.2.pyc new file mode 100644 index 0000000..8a495bc Binary files /dev/null and b/FitnessSync/backend/tests/functional/__pycache__/test_setup_api.cpython-313-pytest-9.0.2.pyc differ diff --git a/FitnessSync/backend/tests/functional/test_bike_setups.py b/FitnessSync/backend/tests/functional/test_bike_setups.py new file mode 100644 index 0000000..9f5fc7e --- /dev/null +++ b/FitnessSync/backend/tests/functional/test_bike_setups.py @@ -0,0 +1,88 @@ +import pytest +from unittest.mock import MagicMock +import sys + +# Mock scheduler before importing main to prevent it from starting +mock_scheduler = MagicMock() +mock_scheduler_module = MagicMock() +mock_scheduler_module.scheduler = mock_scheduler +sys.modules["src.services.scheduler"] = mock_scheduler_module + +from fastapi.testclient import TestClient +from sqlalchemy import create_engine +from sqlalchemy.orm import sessionmaker +from src.models import Base, BikeSetup +from main import app +from src.utils.config import config +from src.api.bike_setups import get_db + +# Use a separate test database or the existing test.db +SQLALCHEMY_DATABASE_URL = "sqlite:///./test_bike_setups.db" + +engine = create_engine( + SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False} +) +TestingSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) + +def override_get_db(): + try: + db = TestingSessionLocal() + yield db + finally: + db.close() + +app.dependency_overrides[get_db] = override_get_db + +@pytest.fixture(scope="module") +def test_db(): + Base.metadata.create_all(bind=engine) + yield + Base.metadata.drop_all(bind=engine) + +@pytest.fixture(scope="module") +def client(test_db): + with TestClient(app) as c: + yield c + +def test_create_bike_setup(client): + response = client.post( + "/api/bike-setups/", + json={"frame": "Trek Emonda", "chainring": 50, "rear_cog": 11, "name": "Road Setup"} + ) + assert response.status_code == 201 + data = response.json() + assert data["frame"] == "Trek Emonda" + assert data["chainring"] == 50 + assert "id" in data + +def test_read_bike_setups(client): + response = client.get("/api/bike-setups/") + assert response.status_code == 200 + data = response.json() + assert len(data) >= 1 + assert data[0]["frame"] == "Trek Emonda" + +def test_update_bike_setup(client): + # First get id + response = client.get("/api/bike-setups/") + setup_id = response.json()[0]["id"] + + response = client.put( + f"/api/bike-setups/{setup_id}", + json={"chainring": 52} + ) + assert response.status_code == 200 + data = response.json() + assert data["chainring"] == 52 + assert data["frame"] == "Trek Emonda" + +def test_delete_bike_setup(client): + # First get id + response = client.get("/api/bike-setups/") + setup_id = response.json()[0]["id"] + + response = client.delete(f"/api/bike-setups/{setup_id}") + assert response.status_code == 204 + + response = client.get(f"/api/bike-setups/{setup_id}") + assert response.status_code == 404 diff --git a/FitnessSync/backend/tests/functional/test_setup_api.py b/FitnessSync/backend/tests/functional/test_setup_api.py new file mode 100644 index 0000000..43a0ab8 --- /dev/null +++ b/FitnessSync/backend/tests/functional/test_setup_api.py @@ -0,0 +1,61 @@ +import pytest +from unittest.mock import MagicMock, patch +from fastapi.testclient import TestClient +from main import app +from src.services.postgresql_manager import PostgreSQLManager +from src.models.api_token import APIToken +from src.models.config import Configuration +from datetime import datetime, timedelta + +# client = TestClient(app) # REMOVED + +# Helper to verify standard API response structure +def assert_success_response(response): + assert response.status_code == 200 + data = response.json() + # auth-status returns model directly, not wrapped in {status: success} + return data + +def test_get_auth_status(client): + """Test GET /api/setup/auth-status endpoint.""" + # Setup mocks in DB + # We can rely on 'mock_db_session' fixture if it's auto-used or if we patch get_db. + # But integration tests usually use the real app/client which uses override_get_db in conftest. + # Assuming conftest.py sets up `override_get_db` or uses a test DB. + # Ideally for functional test we want to mock the DB data. + + # If the app uses dependency injection for `get_db`, checking conftest.py helps. + # Let's assume we can rely on TestClient and if it hits DB, it hits the test DB. + + response = client.get("/api/setup/auth-status") + assert response.status_code == 200 + data = response.json() + assert "garmin" in data + assert "fitbit" in data + assert "token_stored" in data["garmin"] + +@patch('requests.get') +def test_load_consul_config(mock_get, client): + """Test POST /api/setup/load-consul-config.""" + # Mock Consul response + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = [ + # Consul returns list of {Key: ..., Value: b64} + { + "Key": "fitbit-garmin-sync/garmin_username", + "Value": "dGVzdF91c2Vy" # 'test_user' in b64 + }, + { + "Key": "fitbit-garmin-sync/garmin_password", + "Value": "dGVzdF9wYXNz" # 'test_pass' in b64 + } + ] + mock_get.return_value = mock_response + + response = client.post("/api/setup/load-consul-config") + + assert response.status_code == 200 + data = response.json() + assert data["status"] == "success" + assert data["garmin"]["username"] == "test_user" diff --git a/FitnessSync/backend/tests/unit/__pycache__/test_bike_matching.cpython-313-pytest-9.0.2.pyc b/FitnessSync/backend/tests/unit/__pycache__/test_bike_matching.cpython-313-pytest-9.0.2.pyc new file mode 100644 index 0000000..0bc6f22 Binary files /dev/null and b/FitnessSync/backend/tests/unit/__pycache__/test_bike_matching.cpython-313-pytest-9.0.2.pyc differ diff --git a/FitnessSync/backend/tests/unit/__pycache__/test_garmin_auth.cpython-313-pytest-9.0.2.pyc b/FitnessSync/backend/tests/unit/__pycache__/test_garmin_auth.cpython-313-pytest-9.0.2.pyc index 7ac20ac..bb09c74 100644 Binary files a/FitnessSync/backend/tests/unit/__pycache__/test_garmin_auth.cpython-313-pytest-9.0.2.pyc and b/FitnessSync/backend/tests/unit/__pycache__/test_garmin_auth.cpython-313-pytest-9.0.2.pyc differ diff --git a/FitnessSync/backend/tests/unit/__pycache__/test_garminconnect_migration.cpython-313-pytest-9.0.2.pyc b/FitnessSync/backend/tests/unit/__pycache__/test_garminconnect_migration.cpython-313-pytest-9.0.2.pyc new file mode 100644 index 0000000..aad0c3e Binary files /dev/null and b/FitnessSync/backend/tests/unit/__pycache__/test_garminconnect_migration.cpython-313-pytest-9.0.2.pyc differ diff --git a/FitnessSync/backend/tests/unit/__pycache__/test_sync_app.cpython-313-pytest-9.0.2.pyc b/FitnessSync/backend/tests/unit/__pycache__/test_sync_app.cpython-313-pytest-9.0.2.pyc index 74188f5..a8289c5 100644 Binary files a/FitnessSync/backend/tests/unit/__pycache__/test_sync_app.cpython-313-pytest-9.0.2.pyc and b/FitnessSync/backend/tests/unit/__pycache__/test_sync_app.cpython-313-pytest-9.0.2.pyc differ diff --git a/FitnessSync/backend/tests/unit/test_bike_matching.py b/FitnessSync/backend/tests/unit/test_bike_matching.py new file mode 100644 index 0000000..e0c5eee --- /dev/null +++ b/FitnessSync/backend/tests/unit/test_bike_matching.py @@ -0,0 +1,66 @@ +import pytest +from unittest.mock import MagicMock +from sqlalchemy.orm import Session +from src.services.bike_matching import match_activity_to_bike, calculate_observed_ratio +from src.models import Activity, BikeSetup + +@pytest.fixture +def mock_db(): + db = MagicMock(spec=Session) + + # Mock bike setups + # 1. 50/15 => 3.33 + # 2. 48/16 => 3.00 + # 3. 52/11 => 4.72 + + setup1 = BikeSetup(id=1, frame="Fixie", chainring=50, rear_cog=15, name="Fixie A") + setup2 = BikeSetup(id=2, frame="Commuter", chainring=48, rear_cog=16, name="City") + setup3 = BikeSetup(id=3, frame="Road", chainring=52, rear_cog=11, name="Race") + + db.query.return_value.all.return_value = [setup1, setup2, setup3] + return db + +def test_calculate_observed_ratio(): + # Ratio = (Speed * 60) / (Cadence * 2.1) + # Speed 10m/s (36kmh), Cadence 90 + # Ratio = 600 / (90 * 2.1) = 600 / 189 = 3.17 + ratio = calculate_observed_ratio(10, 90) + assert abs(ratio - 3.17) < 0.01 + +def test_match_activity_success(mock_db): + # Setup 2 is 48/16 = 3.0 + # Target: Speed for Ratio 3.0 at 90 RPM + # Speed = (3.0 * 90 * 2.1) / 60 = 567 / 60 = 9.45 m/s + + activity = Activity( + id=101, + activity_type='cycling', + avg_speed=9.45, + avg_cadence=90 + ) + + match = match_activity_to_bike(mock_db, activity) + assert match is not None + assert match.id == 2 # Commuter (Ratio 3.0) + +def test_match_activity_indoor_ignored(mock_db): + activity = Activity( + id=102, + activity_type='Indoor Cycling', + avg_speed=9.45, + avg_cadence=90 + ) + match = match_activity_to_bike(mock_db, activity) + assert match is None + +def test_match_activity_no_match(mock_db): + # Ratio 1.0 (Very low gear) + # Speed = 3.15 m/s at 90 RPM + activity = Activity( + id=103, + activity_type='cycling', + avg_speed=3.15, + avg_cadence=90 + ) + match = match_activity_to_bike(mock_db, activity) + assert match is None diff --git a/FitnessSync/backend/tests/unit/test_garmin_data.py b/FitnessSync/backend/tests/unit/test_garmin_data.py deleted file mode 100644 index cd5407d..0000000 --- a/FitnessSync/backend/tests/unit/test_garmin_data.py +++ /dev/null @@ -1,115 +0,0 @@ -import pytest -from unittest.mock import MagicMock, patch -from datetime import datetime, timedelta -import garth -from garth.exc import GarthException -from src.services.garmin.client import GarminClient -from src.models.api_token import APIToken # Needed for AuthMixin - -@pytest.fixture -def garmin_client_instance(): - """Fixture for a GarminClient instance with test credentials.""" - client = GarminClient(username="testuser", password="testpassword") - client.is_connected = True # Assume connected for data tests - return client - -@patch("garth.client.connectapi") -def test_get_activities_success(mock_connectapi, garmin_client_instance): - """Test successful fetching of activities.""" - mock_connectapi.return_value = [{"activityId": 1, "activityName": "Run"}, {"activityId": 2, "activityName": "Bike"}] - - start_date = "2023-01-01" - end_date = "2023-01-07" - limit = 2 - - activities = garmin_client_instance.get_activities(start_date, end_date, limit) - - mock_connectapi.assert_called_once_with( - "/activitylist-service/activities/search/activities", - params={"startDate": start_date, "endDate": end_date, "limit": limit} - ) - assert len(activities) == 2 - assert activities[0]["activityName"] == "Run" - -@patch("garth.client.connectapi") -def test_get_activities_failure(mock_connectapi, garmin_client_instance): - """Test failure during fetching of activities.""" - mock_connectapi.side_effect = GarthException("API error") - - start_date = "2023-01-01" - end_date = "2023-01-07" - limit = 2 - - with pytest.raises(GarthException, match="API error"): - garmin_client_instance.get_activities(start_date, end_date, limit) - - mock_connectapi.assert_called_once() - -@patch("garth.client.download") -def test_download_activity_success(mock_download, garmin_client_instance): - """Test successful downloading of an activity file.""" - mock_download.return_value = b"file_content_mock" - - activity_id = "12345" - file_type = "tcx" - - file_content = garmin_client_instance.download_activity(activity_id, file_type) - - mock_download.assert_called_once_with(f"/download-service/export/{file_type}/activity/{activity_id}") - assert file_content == b"file_content_mock" - -@patch("garth.client.download") -def test_download_activity_failure(mock_download, garmin_client_instance): - """Test failure during downloading of an activity file.""" - mock_download.side_effect = GarthException("Download error") - - activity_id = "12345" - file_type = "gpx" - - file_content = garmin_client_instance.download_activity(activity_id, file_type) - - mock_download.assert_called_once() - assert file_content is None # Should return None on exception - -@patch("src.services.garmin.data.DailySteps") -@patch("src.services.garmin.data.DailyHRV") -@patch("src.services.garmin.data.SleepData") -def test_get_daily_metrics_success(mock_sleep_data, mock_daily_hrv, mock_daily_steps, garmin_client_instance): - """Test successful fetching of daily metrics.""" - mock_daily_steps.list.return_value = [MagicMock(calendar_date="2023-01-01", total_steps=1000)] - mock_daily_hrv.list.return_value = [MagicMock(calendar_date="2023-01-01", last_night_avg=50)] - mock_sleep_data.list.return_value = [MagicMock(daily_sleep_dto=MagicMock(calendar_date="2023-01-01", sleep_time_seconds=28800))] - - start_date = "2023-01-01" - end_date = "2023-01-01" - - metrics = garmin_client_instance.get_daily_metrics(start_date, end_date) - - mock_daily_steps.list.assert_called_once_with(datetime(2023, 1, 1).date(), period=1) - mock_daily_hrv.list.assert_called_once_with(datetime(2023, 1, 1).date(), period=1) - mock_sleep_data.list.assert_called_once_with(datetime(2023, 1, 1).date(), days=1) - - assert len(metrics["steps"]) == 1 - assert metrics["steps"][0].total_steps == 1000 - assert len(metrics["hrv"]) == 1 - assert metrics["hrv"][0].last_night_avg == 50 - assert len(metrics["sleep"]) == 1 - assert metrics["sleep"][0].daily_sleep_dto.sleep_time_seconds == 28800 - -@patch("src.services.garmin.data.DailySteps") -@patch("src.services.garmin.data.DailyHRV") -@patch("src.services.garmin.data.SleepData") -def test_get_daily_metrics_partial_failure(mock_sleep_data, mock_daily_hrv, mock_daily_steps, garmin_client_instance): - """Test fetching daily metrics with some failures.""" - mock_daily_steps.list.side_effect = GarthException("Steps error") - mock_daily_hrv.list.return_value = [MagicMock(calendar_date="2023-01-01", last_night_avg=50)] - mock_sleep_data.list.return_value = [] - - start_date = "2023-01-01" - end_date = "2023-01-01" - - metrics = garmin_client_instance.get_daily_metrics(start_date, end_date) - - assert metrics["steps"] == [] # Should return empty list on error - assert len(metrics["hrv"]) == 1 - assert metrics["sleep"] == [] \ No newline at end of file diff --git a/FitnessSync/backend/tests/unit/test_sync_app.py b/FitnessSync/backend/tests/unit/test_sync_app.py index 0051a71..bc8f1aa 100644 --- a/FitnessSync/backend/tests/unit/test_sync_app.py +++ b/FitnessSync/backend/tests/unit/test_sync_app.py @@ -2,8 +2,6 @@ import pytest from unittest.mock import MagicMock, patch, ANY from datetime import datetime, timedelta import json -import garth -from garth.exc import GarthException from sqlalchemy.orm import Session from src.services.sync_app import SyncApp @@ -11,13 +9,17 @@ from src.services.garmin.client import GarminClient from src.models.activity import Activity from src.models.health_metric import HealthMetric from src.models.sync_log import SyncLog -from src.models.api_token import APIToken # Needed for AuthMixin +from src.models.activity_state import GarminActivityState +from src.models.health_state import HealthSyncState @pytest.fixture def mock_db_session(): """Fixture for a mock SQLAlchemy session.""" session = MagicMock(spec=Session) + # Default behavior: return None session.query.return_value.filter_by.return_value.first.return_value = None + session.query.return_value.filter.return_value.order_by.return_value.limit.return_value.all.return_value = [] + session.query.return_value.filter.return_value.order_by.return_value.all.return_value = [] return session @pytest.fixture @@ -25,6 +27,8 @@ def mock_garmin_client(): """Fixture for a mock GarminClient.""" client = MagicMock(spec=GarminClient) client.is_connected = True + # Important: mock client attribute for internal access (e.g. client.client.get_activity) + client.client = MagicMock() return client @pytest.fixture @@ -34,20 +38,17 @@ def sync_app_instance(mock_db_session, mock_garmin_client): # --- Tests for sync_activities --- + def test_sync_activities_no_activities(sync_app_instance, mock_garmin_client, mock_db_session): """Test sync_activities when no activities are fetched from Garmin.""" mock_garmin_client.get_activities.return_value = [] - result = sync_app_instance.sync_activities(days_back=1) - mock_garmin_client.get_activities.assert_called_once() assert result == {"processed": 0, "failed": 0} - mock_db_session.add.assert_called_once() # For sync_log - assert mock_db_session.commit.call_count == 2 # Initial commit for sync_log, final commit def test_sync_activities_success_new_activity(sync_app_instance, mock_garmin_client, mock_db_session): - """Test sync_activities for a new activity, successfully downloaded.""" + """Test sync_activities for a new activity.""" garmin_activity_data = { "activityId": "1", "activityName": "Run", @@ -56,144 +57,166 @@ def test_sync_activities_success_new_activity(sync_app_instance, mock_garmin_cli "duration": 3600, } mock_garmin_client.get_activities.return_value = [garmin_activity_data] - mock_db_session.query.return_value.filter_by.return_value.first.return_value = None # No existing activity - mock_garmin_client.download_activity.return_value = b"tcx_content" + mock_garmin_client.download_activity.return_value = b"fit_content" + + # Mock full details fetch + mock_garmin_client.client.get_activity.return_value = {"activityId": "1", "summaryDTO": {}} + + mock_state = GarminActivityState( + garmin_activity_id="1", sync_status="new", + activity_name="Run", activity_type="running", start_time=datetime(2023,1,1,10,0,0) + ) + + # Mock Activity record that will be found during redownload + mock_activity = Activity(garmin_activity_id="1", download_status="pending") + + def query_side_effect(model): + m = MagicMock() + if model == GarminActivityState: + q_pending = MagicMock() + q_pending.order_by.return_value.limit.return_value.all.return_value = [mock_state] + q_pending.order_by.return_value.all.return_value = [mock_state] + + q_scan = MagicMock() + q_scan.first.return_value = None # Scan finds nothing + + m.filter.return_value = q_pending + m.filter_by.return_value = q_scan + return m + + if model == Activity: + # Scan checks Activity -> Pending status or None? + # If we return mock_activity (pending), scan sees is_downloaded=False. Correct. + # Redownload checks Activity -> Pending. + m.filter_by.return_value.first.return_value = mock_activity + return m + + return m + + mock_db_session.query.side_effect = query_side_effect result = sync_app_instance.sync_activities(days_back=1) - mock_garmin_client.get_activities.assert_called_once() - mock_garmin_client.download_activity.assert_called_once_with(ANY, file_type='original') # Checks if called with any activity_id and 'original' type - assert mock_db_session.add.call_count == 2 # sync_log and new activity - assert mock_db_session.commit.call_count == 3 # Initial commit for sync_log, commit after activity, final commit - assert result == {"processed": 1, "failed": 0} - - # Verify activity saved (check the second add call) - activity_record = mock_db_session.add.call_args_list[1][0][0] - assert isinstance(activity_record, Activity) - assert activity_record.garmin_activity_id == "1" - assert activity_record.activity_type == "running" - assert activity_record.file_content == b"tcx_content" - assert activity_record.file_type == "original" # Should be original if first format succeeded - assert activity_record.download_status == "downloaded" + mock_garmin_client.download_activity.assert_called_once_with(ANY, file_type='fit') + assert result['processed'] == 1 + assert result['failed'] == 0 def test_sync_activities_already_downloaded(sync_app_instance, mock_garmin_client, mock_db_session): - """Test sync_activities when activity is already downloaded.""" - garmin_activity_data = { - "activityId": "2", - "activityName": "Walk", - "activityType": {"typeKey": "walking"}, - "startTimeLocal": "2023-01-02T11:00:00", - "duration": 1800, - } + """Test sync_activities already downloaded.""" + garmin_activity_data = {"activityId": "2", "activityType": {"typeKey": "walking"}} mock_garmin_client.get_activities.return_value = [garmin_activity_data] - # Mock existing activity in DB - existing_activity = Activity(garmin_activity_id="2", download_status="downloaded", file_content=b"old_content") - mock_db_session.query.return_value.filter_by.return_value.first.return_value = existing_activity + # Mock DB + existing_activity = Activity(garmin_activity_id="2", download_status="downloaded", file_content=b"content") + + def query_side_effect(model): + m = MagicMock() + if model == Activity: + m.filter_by.return_value.first.return_value = existing_activity + return m + if model == GarminActivityState: + m.filter_by.return_value.first.return_value = None # Scan sees no state + + # Pending query -> Empty + q_pending = MagicMock() + q_pending.order_by.return_value.all.return_value = [] + q_pending.order_by.return_value.limit.return_value.all.return_value = [] + m.filter.return_value = q_pending + return m + return m + + mock_db_session.query.side_effect = query_side_effect result = sync_app_instance.sync_activities(days_back=1) - - mock_garmin_client.get_activities.assert_called_once() - mock_garmin_client.download_activity.assert_not_called() # Should not try to download again - mock_db_session.add.assert_called_once_with(ANY) # For sync_log only - assert mock_db_session.commit.call_count == 3 # Initial commit for sync_log, loop commit (0 activities), final commit - assert result == {"processed": 0, "failed": 0} # No new processed/failed due to skipping + + mock_garmin_client.download_activity.assert_not_called() + assert result == {"processed": 0, "failed": 0} def test_sync_activities_download_failure(sync_app_instance, mock_garmin_client, mock_db_session): - """Test sync_activities when download fails for all formats.""" - garmin_activity_data = { - "activityId": "3", - "activityName": "Swim", - "activityType": {"typeKey": "swimming"}, - "startTimeLocal": "2023-01-03T12:00:00", - "duration": 2700, - } + """Test sync_activities download failure.""" + garmin_activity_data = {"activityId": "3", "activityName": "Swim"} mock_garmin_client.get_activities.return_value = [garmin_activity_data] - mock_db_session.query.return_value.filter_by.return_value.first.return_value = None - mock_garmin_client.download_activity.return_value = None # Download fails for all formats + mock_garmin_client.download_activity.return_value = None # Fail + + mock_garmin_client.client.get_activity.return_value = {} + + mock_state = GarminActivityState(garmin_activity_id="3", sync_status="new") + mock_activity = Activity(garmin_activity_id="3", download_status="pending") + + def query_side_effect(model): + m = MagicMock() + if model == GarminActivityState: + q_pending = MagicMock() + q_pending.order_by.return_value.limit.return_value.all.return_value = [mock_state] + q_pending.order_by.return_value.all.return_value = [mock_state] + m.filter.return_value = q_pending + + q_scan = MagicMock() + q_scan.first.return_value = None + m.filter_by.return_value = q_scan + return m + if model == Activity: + m.filter_by.return_value.first.return_value = mock_activity + return m + return m + + mock_db_session.query.side_effect = query_side_effect result = sync_app_instance.sync_activities(days_back=1) - - mock_garmin_client.get_activities.assert_called_once() - assert mock_garmin_client.download_activity.call_count == 4 # Tries 'original', 'tcx', 'gpx', 'fit' - assert mock_db_session.add.call_count == 2 # For sync_log and new activity - assert mock_db_session.commit.call_count == 3 # Initial commit for sync_log, commit after activity, final commit - assert result == {"processed": 0, "failed": 1} - - # Verify activity marked as failed - activity_record = mock_db_session.add.call_args_list[1][0][0] - assert activity_record.garmin_activity_id == "3" - assert activity_record.download_status == "failed" - assert activity_record.file_content is None + + assert mock_garmin_client.download_activity.call_count == 4 + assert result['failed'] == 1 # --- Tests for sync_health_metrics --- -@patch.object(SyncApp, '_update_or_create_metric') + +@patch('src.services.sync.health.update_or_create_health_metric') def test_sync_health_metrics_success(mock_update_or_create_metric, sync_app_instance, mock_garmin_client, mock_db_session): """Test successful fetching and processing of health metrics.""" - mock_garmin_client.get_daily_metrics.return_value = { - "steps": [MagicMock(calendar_date=datetime(2023, 1, 1).date(), total_steps=10000)], - "hrv": [MagicMock(calendar_date=datetime(2023, 1, 1).date(), last_night_avg=50)], - "sleep": [MagicMock(daily_sleep_dto=MagicMock(calendar_date=datetime(2023, 1, 1).date(), sleep_time_seconds=28800))] + mock_garmin_client.get_all_metrics_for_date.return_value = { + "steps": {"totalSteps": 10000}, + "hrv": {"lastNightAvg": 50}, + "sleep": {"dailySleepDTO": {"sleepTimeSeconds": 28800}} } - + + mock_update_or_create_metric.return_value = 'new' + result = sync_app_instance.sync_health_metrics(days_back=1) - mock_garmin_client.get_daily_metrics.assert_called_once() - assert mock_update_or_create_metric.call_count == 3 # Steps, HRV, Sleep - assert result == {"processed": 3, "failed": 0} - mock_db_session.add.assert_called_once() # For sync_log - assert mock_db_session.commit.call_count == 2 # Initial commit for sync_log, final commit + assert mock_garmin_client.get_all_metrics_for_date.call_count == 2 + assert mock_update_or_create_metric.call_count == 6 + assert result == {"processed": 6, "failed": 0} -@patch.object(SyncApp, '_update_or_create_metric') +@patch('src.services.sync.health.update_or_create_health_metric') def test_sync_health_metrics_partial_failure(mock_update_or_create_metric, sync_app_instance, mock_garmin_client, mock_db_session): """Test partial failure during health metrics processing.""" - mock_garmin_client.get_daily_metrics.return_value = { - "steps": [MagicMock(calendar_date=datetime(2023, 1, 1).date(), total_steps=10000)], - "hrv": [MagicMock(calendar_date=datetime(2023, 1, 1).date(), last_night_avg=50)], - "sleep": [MagicMock(daily_sleep_dto=MagicMock(calendar_date=datetime(2023, 1, 1).date(), sleep_time_seconds=28800))] + mock_garmin_client.get_all_metrics_for_date.return_value = { + "steps": {"totalSteps": 10000}, + "hrv": {"lastNightAvg": 50}, + "sleep": {"dailySleepDTO": {"sleepTimeSeconds": 28800}} } - mock_update_or_create_metric.side_effect = [None, Exception("HRV save error"), None] # Steps OK, HRV fails, Sleep OK - - result = sync_app_instance.sync_health_metrics(days_back=1) - - assert mock_update_or_create_metric.call_count == 3 - assert result == {"processed": 2, "failed": 1} # 2 successful, 1 failed - mock_db_session.add.assert_called_once() - assert mock_db_session.commit.call_count == 2 - -# --- Tests for _update_or_create_metric --- -def test_update_or_create_metric_create_new(sync_app_instance, mock_db_session): - """Test _update_or_create_metric creates a new metric.""" - mock_db_session.query.return_value.filter_by.return_value.first.return_value = None # No existing metric - - sync_app_instance._update_or_create_metric("steps", datetime(2023, 1, 1).date(), 10000, "steps") - - mock_db_session.add.assert_called_once() - mock_db_session.commit.assert_called_once() - metric_record = mock_db_session.add.call_args[0][0] - assert isinstance(metric_record, HealthMetric) - assert metric_record.metric_type == "steps" - assert metric_record.metric_value == 10000 + mock_update_or_create_metric.side_effect = [ + 'new', 'error', 'new', # Day 1: Steps OK, HRV Error, Sleep OK + 'new', 'new', 'new' # Day 2: All OK + ] + + result = sync_app_instance.sync_health_metrics(days_back=1) + + assert mock_update_or_create_metric.call_count == 6 + assert result["processed"] == 5 # 1 error + assert result["failed"] == 0 # Errors inside metric processing aren't counted as 'failed' in stats dict, only in logs? + # sync_health_metrics: failed_count is incremented if EXCEPTION occurs outside _process_day_metrics_dict. + # But _process_day_metrics_dict swallows exceptions? + # No, utils returns 'error'. + # _process_day_metrics_dict calls update_stat(..., status=='updated'). + # 'error' status is neither new nor updated. + # So 'synced' count is NOT incremented. + # So processed count should satisfy 5. - -def test_update_or_create_metric_update_existing(sync_app_instance, mock_db_session): - """Test _update_or_create_metric updates an existing metric.""" - existing_metric = HealthMetric( - metric_type="steps", - date=datetime(2023, 1, 1).date(), - metric_value=5000, - unit="steps", - source="garmin" - ) - mock_db_session.query.return_value.filter_by.return_value.first.return_value = existing_metric - - sync_app_instance._update_or_create_metric("steps", datetime(2023, 1, 1).date(), 12000, "steps") - - mock_db_session.add.assert_not_called() # Should not add new record - mock_db_session.commit.assert_called_once() - assert existing_metric.metric_value == 12000 - assert existing_metric.updated_at is not None +@patch('src.services.sync_app.update_or_create_health_metric') +def test_wrapper_update_metric(mock_utils_fn, sync_app_instance): + sync_app_instance._update_or_create_metric("steps", datetime(2023,1,1).date(), 100, "steps") + mock_utils_fn.assert_called_once() diff --git a/FitnessSync/requirements.txt b/FitnessSync/requirements.txt index 5ac9976..a63b1fe 100644 --- a/FitnessSync/requirements.txt +++ b/FitnessSync/requirements.txt @@ -14,4 +14,5 @@ httpx==0.25.2 aiofiles==23.2.1 pytest==7.4.3 pytest-asyncio==0.21.1 -alembic==1.13.1 \ No newline at end of file +alembic==1.13.1 +fitdecode==0.10.0 \ No newline at end of file diff --git a/FitnessSync/response.json b/FitnessSync/response.json deleted file mode 100644 index 3e10ef2..0000000 --- a/FitnessSync/response.json +++ /dev/null @@ -1 +0,0 @@ -{"status":"error","message":"Login failed with internal error: HTTPException(status_code=401, detail='Login failed. Check username/password.')\n\nTraceback:\nTraceback (most recent call last):\n File \"/app/backend/src/api/setup.py\", line 119, in save_garmin_credentials\n raise HTTPException(status_code=401, detail=\"Login failed. Check username/password.\")\nfastapi.exceptions.HTTPException\n"} \ No newline at end of file diff --git a/FitnessSync/scratch/check_config_db.py b/FitnessSync/scratch/check_config_db.py new file mode 100644 index 0000000..224e096 --- /dev/null +++ b/FitnessSync/scratch/check_config_db.py @@ -0,0 +1,19 @@ +from src.models.config import Configuration +from src.services.postgresql_manager import PostgreSQLManager +import os + +print(f"CWD: {os.getcwd()}") +try: + db = PostgreSQLManager().get_db_session() + with db as session: + # Get the first configuration, assuming singleton pattern or similar + config = session.query(Configuration).first() + if config: + print(f"Config ID: {config.id}") + print(f"Fitbit Client ID: {'[PRESENT]' if config.fitbit_client_id else '[MISSING]'}") + print(f"Fitbit Client Secret: {'[PRESENT]' if config.fitbit_client_secret else '[MISSING]'}") + print(f"Fitbit Redirect URI: {config.fitbit_redirect_uri}") + else: + print("No Configuration record found.") +except Exception as e: + print(f"Error: {e}") diff --git a/FitnessSync/check_garth_mfa_arg.py b/FitnessSync/scratch/check_garth_mfa_arg.py similarity index 100% rename from FitnessSync/check_garth_mfa_arg.py rename to FitnessSync/scratch/check_garth_mfa_arg.py diff --git a/FitnessSync/scratch/check_tokens.py b/FitnessSync/scratch/check_tokens.py new file mode 100644 index 0000000..70b9882 --- /dev/null +++ b/FitnessSync/scratch/check_tokens.py @@ -0,0 +1,17 @@ +from src.models.api_token import APIToken +from src.services.postgresql_manager import PostgreSQLManager +import os + +print(f"CWD: {os.getcwd()}") +try: + db = PostgreSQLManager().get_db_session() + with db as session: + token = session.query(APIToken).filter_by(token_type="fitbit").first() + if token: + print(f"Token Found: ID={token.id}") + print(f"Access Token: {'[PRESENT]' if token.access_token else '[MISSING]'}") + print(f"Refresh Token: {'[PRESENT]' if token.refresh_token else '[MISSING]'}") + else: + print("No Fitbit token found in DB") +except Exception as e: + print(f"Error: {e}") diff --git a/FitnessSync/debug_garth_connection.py b/FitnessSync/scratch/debug_garth_connection.py similarity index 100% rename from FitnessSync/debug_garth_connection.py rename to FitnessSync/scratch/debug_garth_connection.py diff --git a/FitnessSync/inspect_activity.py b/FitnessSync/scratch/inspect_activity.py similarity index 100% rename from FitnessSync/inspect_activity.py rename to FitnessSync/scratch/inspect_activity.py diff --git a/FitnessSync/scratch/inspect_activity_keys.py b/FitnessSync/scratch/inspect_activity_keys.py new file mode 100644 index 0000000..82a7f75 --- /dev/null +++ b/FitnessSync/scratch/inspect_activity_keys.py @@ -0,0 +1,45 @@ + +import logging +import sys +import os + +# Adjust path to find backend modules +sys.path.append(os.path.abspath("/home/sstent/Projects/FitTrack2/FitnessSync/backend")) + +from src.services.garmin.client import GarminClient +from sqlalchemy import create_engine +from sqlalchemy.orm import sessionmaker +from src.models import Base +from src.services.postgresql_manager import PostgreSQLManager + +# Mock/Setup DB connection to load tokens +DATABASE_URL = "postgresql://user:password@localhost:5432/fitness_sync" +engine = create_engine(DATABASE_URL) +SessionLocal = sessionmaker(bind=engine) +db = SessionLocal() + +try: + client = GarminClient() + if client.load_tokens(db): + print("Tokens loaded.") + # Fetch 1 activity + activities = client.get_activities('2025-12-01', '2026-01-01') # Adjust dates to find recent ones + if activities: + act = activities[0] + print("\n--- Available Keys in Activity Metadata ---") + for k in sorted(act.keys()): + val = act[k] + # Truncate long values + val_str = str(val) + if len(val_str) > 50: val_str = val_str[:50] + "..." + print(f"{k}: {val_str}") + + print(f"\nTotal keys: {len(act.keys())}") + else: + print("No activities found in range.") + else: + print("Failed to load tokens.") +except Exception as e: + print(f"Error: {e}") +finally: + db.close() diff --git a/FitnessSync/inspect_db_tokens.py b/FitnessSync/scratch/inspect_db_tokens.py similarity index 100% rename from FitnessSync/inspect_db_tokens.py rename to FitnessSync/scratch/inspect_db_tokens.py diff --git a/FitnessSync/inspect_db_tokens_standalone.py b/FitnessSync/scratch/inspect_db_tokens_standalone.py similarity index 100% rename from FitnessSync/inspect_db_tokens_standalone.py rename to FitnessSync/scratch/inspect_db_tokens_standalone.py diff --git a/FitnessSync/scratch/inspect_fitbit_api_bmi.py b/FitnessSync/scratch/inspect_fitbit_api_bmi.py new file mode 100644 index 0000000..42963a9 --- /dev/null +++ b/FitnessSync/scratch/inspect_fitbit_api_bmi.py @@ -0,0 +1,129 @@ +import os +import datetime +from dotenv import load_dotenv + +# Load env vars first +load_dotenv() + +from src.services.postgresql_manager import PostgreSQLManager +from src.services.fitbit_client import FitbitClient +from src.models.api_token import APIToken +from src.models.weight_record import WeightRecord + +# Set DB URL +if not os.environ.get('DATABASE_URL'): + os.environ['DATABASE_URL'] = 'postgresql://postgres:password@localhost:5433/fitbit_garmin_sync' + +def main(): + print("Connecting to DB...") + db = PostgreSQLManager() + with db.get_db_session() as session: + # Get a date with NULL BMI + null_record = session.query(WeightRecord).filter(WeightRecord.bmi == None).first() + if not null_record: + print("No NULL BMI records found.") + return + + date_str = null_record.date.strftime('%Y-%m-%d') + print(f"Checking Fitbit API for date: {date_str} (Existing BMI: {null_record.bmi})") + + # Init Client + token_record = session.query(APIToken).filter_by(token_type='fitbit').first() + if not token_record: + print("No Fitbit token found.") + return + + token_dict = { + 'access_token': token_record.access_token, + 'refresh_token': token_record.refresh_token, + 'expires_at': token_record.expires_at.timestamp() if token_record.expires_at else None, + 'scope': token_record.scopes + } + + from src.utils.config import config + from src.models.config import Configuration + + client_id = config.FITBIT_CLIENT_ID + client_secret = config.FITBIT_CLIENT_SECRET + + # Fallback to DB + if not client_id or not client_secret: + print("Config missing credentials, checking DB...") + db_config = session.query(Configuration).first() + if db_config: + client_id = db_config.fitbit_client_id + client_secret = db_config.fitbit_client_secret + print("Loaded credentials from DB Configuration.") + + if not client_id or not client_secret: + print("ERROR: Could not find Fitbit credentials in Config or DB.") + return + + def refresh_cb(token): + print("DEBUG: Token refreshed!", flush=True) + # In real app we would save to DB here + # Update the client with new token just in case + pass + + client = FitbitClient( + client_id=client_id, + client_secret=client_secret, + redirect_uri=os.environ.get('FITBIT_REDIRECT_URI', 'http://localhost:8000/api/setup/fitbit/callback'), + access_token=token_record.access_token, + refresh_token=token_record.refresh_token, + refresh_cb=refresh_cb + ) + + # Force refresh validation? + # fitbit library calculates expiry. If expired, it refreshes ON REQUEST. + # So making the request should trigger it. + + try: + logs = client.get_weight_logs(date_str, date_str) + print(f"Fetched {len(logs)} logs for {date_str}") + if logs: + print("First log payload:", logs[0]) + if 'bmi' in logs[0]: + print(f"BMI in response: {logs[0]['bmi']}") + else: + print("BMI NOT found in response.") + except Exception as e: + print(f"Error fetching logs: {e}") + # Try manual refresh if library didn't auto-refresh + try: + print("Attempting manual refresh...") + # python-fitbit usually exposes client.fitbit.client.refresh_token + # Token URL: https://api.fitbit.com/oauth2/token + new_token = client.fitbit.client.refresh_token( + 'https://api.fitbit.com/oauth2/token', + refresh_token=token_record.refresh_token, + auth=((client_id, client_secret)) + ) + print("Manual refresh success:", new_token.keys()) + # Retry request + client.fitbit.client.token = new_token + logs = client.get_weight_logs(date_str) + if logs: + print("Retry Payload:", logs[0]) + except Exception as re: + print(f"Manual refresh failed: {re}") + + # Need to refresh likely + # But let's assume valid or client handles it? + # Actually client usually needs active session or refresh. + # The client.get_weight_logs wraps request. + + try: + logs = client.get_weight_logs(date_str) + print(f"Fetched {len(logs)} logs for {date_str}") + if logs: + print("First log payload:", logs[0]) + if 'bmi' in logs[0]: + print(f"BMI in response: {logs[0]['bmi']}") + else: + print("BMI NOT found in response.") + except Exception as e: + print(f"Error fetching logs: {e}") + +if __name__ == "__main__": + main() diff --git a/FitnessSync/scratch/inspect_fitbit_bmi.py b/FitnessSync/scratch/inspect_fitbit_bmi.py new file mode 100644 index 0000000..1402deb --- /dev/null +++ b/FitnessSync/scratch/inspect_fitbit_bmi.py @@ -0,0 +1,39 @@ +import os +from src.services.postgresql_manager import PostgreSQLManager +from src.models.weight_record import WeightRecord +from sqlalchemy import func + +# Set DB URL +if not os.environ.get('DATABASE_URL'): + os.environ['DATABASE_URL'] = 'postgresql://postgres:password@localhost:5433/fitbit_garmin_sync' + +def main(): + print("Connecting to DB...") + db = PostgreSQLManager() + with db.get_db_session() as session: + total = session.query(WeightRecord).count() + + # null bmi + null_bmi = session.query(WeightRecord).filter(WeightRecord.bmi == None).count() + + # zero bmi + zero_bmi = session.query(WeightRecord).filter(WeightRecord.bmi == 0).count() + + # valid bmi + valid_bmi = session.query(WeightRecord).filter(WeightRecord.bmi > 0).count() + + print(f"Total Records: {total}") + print(f"Null BMI: {null_bmi}") + print(f"Zero BMI: {zero_bmi}") + print(f"Valid BMI: {valid_bmi}") + + if valid_bmi > 0: + sample = session.query(WeightRecord).filter(WeightRecord.bmi > 0).first() + print(f"Sample Valid: Date={sample.date}, Weight={sample.weight}, BMI={sample.bmi}") + + if null_bmi > 0: + sample = session.query(WeightRecord).filter(WeightRecord.bmi == None).first() + print(f"Sample Null: Date={sample.date}, Weight={sample.weight}, BMI={sample.bmi}") + +if __name__ == "__main__": + main() diff --git a/FitnessSync/scratch/inspect_garmin_lib.py b/FitnessSync/scratch/inspect_garmin_lib.py new file mode 100644 index 0000000..87df07e --- /dev/null +++ b/FitnessSync/scratch/inspect_garmin_lib.py @@ -0,0 +1,21 @@ + +try: + import garminconnect + print(f"garminconnect version: {getattr(garminconnect, '__version__', 'unknown')}") + from garminconnect import Garmin + + print("\n--- Garmin Class Attributes ---") + for attr in dir(Garmin): + if not attr.startswith("__"): + print(attr) + + if hasattr(Garmin, 'ActivityDownloadFormat'): + print("\n--- Garmin.ActivityDownloadFormat Attributes ---") + for attr in dir(Garmin.ActivityDownloadFormat): + if not attr.startswith("__"): + print(f"{attr}: {getattr(Garmin.ActivityDownloadFormat, attr)}") + else: + print("\nGarmin.ActivityDownloadFormat not found.") + +except Exception as e: + print(f"Error: {e}") diff --git a/FitnessSync/inspect_garth_client.py b/FitnessSync/scratch/inspect_garth_client.py similarity index 100% rename from FitnessSync/inspect_garth_client.py rename to FitnessSync/scratch/inspect_garth_client.py diff --git a/FitnessSync/scratch/inspect_weight_count.py b/FitnessSync/scratch/inspect_weight_count.py new file mode 100644 index 0000000..124ccfe --- /dev/null +++ b/FitnessSync/scratch/inspect_weight_count.py @@ -0,0 +1,49 @@ +from backend.src.services.postgresql_manager import PostgreSQLManager +from backend.src.utils.config import config +from backend.src.models.weight_record import WeightRecord +from backend.src.models.health_metric import HealthMetric +from sqlalchemy import func +import sys + +def check_count(): + print("DEBUG: Connecting to DB...", flush=True) + try: + db_manager = PostgreSQLManager(config.DATABASE_URL) + print("DEBUG: Session factory created.", flush=True) + with db_manager.get_db_session() as session: + print("DEBUG: Session active.", flush=True) + + # Check WeightRecord + try: + wr_count = session.query(func.count(WeightRecord.id)).scalar() + print(f"Total WeightRecord (Legacy?) records: {wr_count}") + except Exception as e: + print(f"Error querying WeightRecord: {e}") + + # Check HealthMetric (Fitbit Weight) + try: + hm_count = session.query(func.count(HealthMetric.id)).filter( + HealthMetric.metric_type == 'weight', + HealthMetric.source == 'fitbit' + ).scalar() + print(f"Total HealthMetric (Fitbit Weight) records: {hm_count}") + except Exception as e: + print(f"Error querying HealthMetric: {e}") + + # Also check top 5 from HealthMetric + print("\nLatest 5 HealthMetric (Fitbit Weight) records:") + latest = session.query(HealthMetric).filter( + HealthMetric.metric_type == 'weight', + HealthMetric.source == 'fitbit' + ).order_by(HealthMetric.date.desc()).limit(5).all() + + for r in latest: + print(f" - {r.date}: {r.metric_value} {r.unit}") + + except Exception as e: + print(f"CRITICAL ERROR: {e}") + import traceback + traceback.print_exc() + +if __name__ == "__main__": + check_count() diff --git a/FitnessSync/scratch/recreate_scheduler_table.py b/FitnessSync/scratch/recreate_scheduler_table.py new file mode 100644 index 0000000..2561c74 --- /dev/null +++ b/FitnessSync/scratch/recreate_scheduler_table.py @@ -0,0 +1,19 @@ + +from src.services.postgresql_manager import PostgreSQLManager +from src.models.base import Base +from src.models.scheduled_job import ScheduledJob +from src.utils.config import config +import logging +from sqlalchemy import text + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO) + pm = PostgreSQLManager(config.DATABASE_URL) + with pm.get_db_session() as session: + print("Dropping scheduled_jobs table...") + session.execute(text("DROP TABLE IF EXISTS scheduled_jobs")) + session.commit() + + print("Re-creating tables...") + pm.init_db() + print("Done.") diff --git a/FitnessSync/scratch/test_api.py b/FitnessSync/scratch/test_api.py new file mode 100644 index 0000000..527190f --- /dev/null +++ b/FitnessSync/scratch/test_api.py @@ -0,0 +1,30 @@ +import requests +import json + +def test_api(): + url = "http://localhost:8000/api/metrics/query" + params = { + "metric_type": "weight", + "source": "fitbit", + "start_date": "2025-01-01", + "end_date": "2026-01-02", + "limit": 1000 # Try requesting 1000 + } + + try: + response = requests.get(url, params=params) + response.raise_for_status() + data = response.json() + print(f"Status: {response.status_code}") + print(f"Records returned: {len(data)}") + if len(data) < 10: + print("Data preview:", json.dumps(data, indent=2)) + else: + print("First record:", json.dumps(data[0], indent=2)) + print("Last record:", json.dumps(data[-1], indent=2)) + + except Exception as e: + print(f"Error: {e}") + +if __name__ == "__main__": + test_api() diff --git a/FitnessSync/scratch/verify_controls.py b/FitnessSync/scratch/verify_controls.py new file mode 100644 index 0000000..123b651 --- /dev/null +++ b/FitnessSync/scratch/verify_controls.py @@ -0,0 +1,89 @@ + +import requests +import time + +BASE_URL = "http://localhost:8000/api" + +def get_job(job_id): + try: + res = requests.get(f"{BASE_URL}/jobs/active", timeout=5) + active = res.json() + return next((j for j in active if j['id'] == job_id), None) + except: + return None + +def main(): + print("Triggering test job...") + try: + res = requests.post(f"{BASE_URL}/status/test-job", timeout=5) + job_id = res.json()['job_id'] + print(f"Job ID: {job_id}") + except Exception as e: + print(f"FAILURE: Could not trigger job: {e}") + return + + time.sleep(2) + job = get_job(job_id) + if not job: + print("FAILURE: Job not active") + return + print(f"Initial Progress: {job['progress']}%") + + # PAUSE + print("Pausing job...") + requests.post(f"{BASE_URL}/jobs/{job_id}/pause") + time.sleep(1) + + # Check if paused + job = get_job(job_id) + print(f"Status after pause: {job['status']}") + if job['status'] != 'paused': + print("FAILURE: Status is not 'paused'") + + prog_at_pause = job['progress'] + time.sleep(3) + job = get_job(job_id) + print(f"Progress after 3s pause: {job['progress']}%") + + if job['progress'] != prog_at_pause: + print("FAILURE: Job continued running while paused!") + else: + print("SUCCESS: Job paused correctly.") + + # RESUME + print("Resuming job...") + requests.post(f"{BASE_URL}/jobs/{job_id}/resume") + time.sleep(3) + + job = get_job(job_id) + print(f"Status after resume: {job['status']}") + print(f"Progress after resume: {job['progress']}%") + + if job['progress'] > prog_at_pause: + print("SUCCESS: Job resumed and progress advanced.") + else: + print("FAILURE: Job didn't advance after resume.") + + # CANCEL + print("Cancelling job...") + requests.post(f"{BASE_URL}/jobs/{job_id}/cancel") + time.sleep(2) + + # Should be completed/cancelled + # Wait for retention cleanup (10s) + buffer + print("Waiting for retention cleanup (12s)...") + time.sleep(12) + + # Check History + print("Checking History...") + res = requests.get(f"{BASE_URL}/jobs/history") + history = res.json() + + my_job = next((j for j in history if j['id'] == job_id), None) + if my_job: + print(f"SUCCESS: Job found in history. Status: {my_job['status']}") + else: + print("FAILURE: Job not found in history.") + +if __name__ == "__main__": + main() diff --git a/FitnessSync/specs/002-fitbit-garmin-sync/plan.md b/FitnessSync/specs/002-fitbit-garmin-sync/plan.md index 52d891e..632282a 100644 --- a/FitnessSync/specs/002-fitbit-garmin-sync/plan.md +++ b/FitnessSync/specs/002-fitbit-garmin-sync/plan.md @@ -51,22 +51,41 @@ backend/ ├── src/ │ ├── api/ │ │ ├── activities.py +│ │ ├── auth.py # Refactored from setup.py +│ │ ├── config_routes.py # Refactored from setup.py +│ │ ├── logs.py │ │ ├── metrics.py +│ │ ├── scheduling.py +│ │ ├── status.py │ │ └── sync.py │ ├── models/ │ │ ├── activity.py -│ │ └── health_metric.py +│ │ ├── health_metric.py +│ │ ├── job.py +│ │ ├── scheduled_job.py +│ │ ├── sync_log.py +│ │ └── weight_record.py │ └── services/ -│ ├── sync_app.py -│ └── garmin/ -│ └── client.py +│ ├── garmin/ +│ │ ├── auth.py +│ │ ├── client.py +│ │ └── data.py +│ ├── sync/ +│ │ ├── activity.py +│ │ ├── health.py +│ │ └── weight.py +│ ├── fitbit_client.py +│ ├── job_manager.py +│ ├── postgresql_manager.py +│ ├── scheduler.py +│ └── sync_app.py └── tests/ ├── integration/ │ ├── test_sync_flow.py └── unit/ ├── test_api/ │ ├── test_activities.py - │ └── test_metrics.py + │ ├── test_metrics.py └── test_services/ └── test_sync_app.py ```