Files
WerkoutAPI/generator/management/commands/check_rules_drift.py
Trey t 1c61b80731 workout generator audit: rules engine, structure rules, split patterns, injury UX, metadata cleanup
- Add rules_engine.py with quantitative rules for all 8 workout types
- Add quality gate retry loop in generate_single_workout()
- Expand calibrate_structure_rules to all 120 combinations (8 types × 5 goals × 3 sections)
- Wire WeeklySplitPattern DB records into _pick_weekly_split()
- Enforce movement patterns from WorkoutStructureRule in exercise selection
- Add straight-set strength support (single main lift, 4-6 rounds)
- Add modality consistency check for duration-dominant workout types
- Add InjuryStep component to onboarding and preferences
- Add sibling exercise exclusion in regenerate and preview_day endpoints
- Display generator warnings on dashboard
- Expand fix_rep_durations, fix_exercise_flags, fix_movement_pattern_typo
- Add audit_exercise_data and check_rules_drift management commands
- Add Next.js frontend with dashboard, onboarding, preferences, history pages
- Add generator app with ML-powered workout generation pipeline
- 96 new tests across 7 test modules

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-22 20:07:40 -06:00

106 lines
3.4 KiB
Python

"""
CI management command: check for drift between workout_research.md
calibration values and WorkoutType DB records.
Usage:
python manage.py check_rules_drift
python manage.py check_rules_drift --verbosity 2
"""
import sys
from django.core.management.base import BaseCommand
from generator.models import WorkoutType
from generator.rules_engine import DB_CALIBRATION
class Command(BaseCommand):
help = (
'Check for drift between research doc calibration values '
'and WorkoutType DB records. Exits 1 if mismatches found.'
)
# Fields to compare between DB_CALIBRATION and WorkoutType model
FIELDS_TO_CHECK = [
'duration_bias',
'typical_rest_between_sets',
'typical_intensity',
'rep_range_min',
'rep_range_max',
'round_range_min',
'round_range_max',
'superset_size_min',
'superset_size_max',
]
def handle(self, *args, **options):
verbosity = options.get('verbosity', 1)
mismatches = []
missing_in_db = []
checked = 0
for type_name, expected_values in DB_CALIBRATION.items():
try:
wt = WorkoutType.objects.get(name=type_name)
except WorkoutType.DoesNotExist:
missing_in_db.append(type_name)
continue
for field_name in self.FIELDS_TO_CHECK:
if field_name not in expected_values:
continue
expected = expected_values[field_name]
actual = getattr(wt, field_name, None)
checked += 1
if actual != expected:
mismatches.append({
'type': type_name,
'field': field_name,
'expected': expected,
'actual': actual,
})
elif verbosity >= 2:
self.stdout.write(
f" OK {type_name}.{field_name} = {actual}"
)
# Report results
self.stdout.write('')
self.stdout.write(f'Checked {checked} field(s) across {len(DB_CALIBRATION)} workout types.')
self.stdout.write('')
if missing_in_db:
self.stdout.write(self.style.WARNING(
f'Missing from DB ({len(missing_in_db)}):'
))
for name in missing_in_db:
self.stdout.write(f' - {name}')
self.stdout.write('')
if mismatches:
self.stdout.write(self.style.ERROR(
f'DRIFT DETECTED: {len(mismatches)} mismatch(es)'
))
self.stdout.write('')
header = f'{"Workout Type":<35} {"Field":<30} {"Expected":<15} {"Actual":<15}'
self.stdout.write(header)
self.stdout.write('-' * len(header))
for m in mismatches:
self.stdout.write(
f'{m["type"]:<35} {m["field"]:<30} '
f'{str(m["expected"]):<15} {str(m["actual"]):<15}'
)
self.stdout.write('')
self.stdout.write(self.style.ERROR(
'To fix: update WorkoutType records in the DB or '
'update DB_CALIBRATION in generator/rules_engine.py.'
))
sys.exit(1)
else:
self.stdout.write(self.style.SUCCESS(
'No drift detected. DB values match research calibration.'
))