Hardening follow-up: N+1 elimination, type validation, diversify fix

Additional fixes from parallel hardening streams:

- exercise/serializers: remove unused WorkoutEquipment import, add prefetch docs
- generator/serializers: N+1 fix in GeneratedWorkoutDetailSerializer (inline workout dict, prefetch-aware supersets)
- generator/services/plan_builder: eliminate redundant .save() after .create() via single create_kwargs dict
- generator/services/workout_generator: proper type-match validation for HIIT/cardio/core/flexibility; fix diversify type count to account for removed entry
- generator/views: request-level caching for get_registered_user helper; prefetch chain for accept_workout
- superset/serializers: guard against dangling FK in SupersetExerciseSerializer
- workout/helpers: use prefetched data instead of re-querying per superset

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Trey t
2026-02-27 22:33:40 -06:00
parent c80c66c2e5
commit 8e14fd5774
7 changed files with 117 additions and 46 deletions

View File

@@ -1,7 +1,6 @@
from rest_framework import serializers from rest_framework import serializers
from .models import * from .models import *
from muscle.models import ExerciseMuscle from muscle.models import ExerciseMuscle
from equipment.models import WorkoutEquipment
from equipment.serializers import WorkoutEquipmentSerializer from equipment.serializers import WorkoutEquipmentSerializer
class ExerciseMuscleSerializer(serializers.ModelSerializer): class ExerciseMuscleSerializer(serializers.ModelSerializer):
@@ -26,13 +25,13 @@ class ExerciseSerializer(serializers.ModelSerializer):
fields = '__all__' fields = '__all__'
def get_muscles(self, obj): def get_muscles(self, obj):
# Use prefetched data if available, avoiding N+1 queries # Use prefetched related manager if available (avoids N+1 queries)
if hasattr(obj, '_prefetched_objects_cache') and 'exercise_muscle_exercise' in obj._prefetched_objects_cache: # Callers should use .prefetch_related('exercise_muscle_exercise__muscle')
return [{'muscle': em.muscle_id, 'name': em.muscle.name} for em in obj.exercise_muscle_exercise.all()] objs = obj.exercise_muscle_exercise.all()
return list(obj.exercise_muscle_exercise.values('muscle', name=models.F('muscle__name'))) return ExerciseMuscleSerializer(objs, many=True).data
def get_equipment(self, obj): def get_equipment(self, obj):
# Use prefetched data if available, avoiding N+1 queries # Use prefetched related manager if available (avoids N+1 queries)
if hasattr(obj, '_prefetched_objects_cache') and 'workout_exercise_workout' in obj._prefetched_objects_cache: # Callers should use .prefetch_related('workout_exercise_workout__equipment')
return [{'equipment': we.equipment_id, 'name': we.equipment.name} for we in obj.workout_exercise_workout.all()] objs = obj.workout_exercise_workout.all()
return list(obj.workout_exercise_workout.values('equipment', name=models.F('equipment__name'))) return WorkoutEquipmentSerializer(objs, many=True).data

View File

@@ -330,18 +330,29 @@ class GeneratedWorkoutDetailSerializer(serializers.ModelSerializer):
def get_workout_detail(self, obj): def get_workout_detail(self, obj):
if obj.workout: if obj.workout:
return WorkoutDetailSerializer(obj.workout).data return {
'id': obj.workout.id,
'name': obj.workout.name,
'description': obj.workout.description,
'estimated_time': obj.workout.estimated_time,
}
return None return None
def get_supersets(self, obj): def get_supersets(self, obj):
if obj.workout: if not obj.workout:
return []
# Use prefetched data if available (via workout__superset_workout prefetch),
# otherwise fall back to a query with its own prefetch
workout = obj.workout
if hasattr(workout, '_prefetched_objects_cache') and 'superset_workout' in workout._prefetched_objects_cache:
superset_qs = sorted(workout.superset_workout.all(), key=lambda s: s.order)
else:
superset_qs = Superset.objects.filter( superset_qs = Superset.objects.filter(
workout=obj.workout workout=workout
).prefetch_related( ).prefetch_related(
'supersetexercise_set__exercise', 'superset_exercises__exercise',
).order_by('order') ).order_by('order')
return SupersetSerializer(superset_qs, many=True).data return SupersetSerializer(superset_qs, many=True).data
return []
# ============================================================ # ============================================================

View File

@@ -96,26 +96,27 @@ class PlanBuilder:
order = ex_spec.get('order', 1) order = ex_spec.get('order', 1)
superset_exercise = SupersetExercise.objects.create( # Build kwargs for create, including optional fields,
superset=superset, # so we don't need a separate .save() after .create().
exercise=exercise_obj, create_kwargs = {
order=order, 'superset': superset,
) 'exercise': exercise_obj,
'order': order,
}
# Assign optional fields exactly like add_workout does
if ex_spec.get('weight') is not None: if ex_spec.get('weight') is not None:
superset_exercise.weight = ex_spec['weight'] create_kwargs['weight'] = ex_spec['weight']
if ex_spec.get('reps') is not None: if ex_spec.get('reps') is not None:
superset_exercise.reps = ex_spec['reps'] create_kwargs['reps'] = ex_spec['reps']
rep_duration = exercise_obj.estimated_rep_duration or 3.0 rep_duration = exercise_obj.estimated_rep_duration or 3.0
superset_total_time += ex_spec['reps'] * rep_duration superset_total_time += ex_spec['reps'] * rep_duration
if ex_spec.get('duration') is not None: if ex_spec.get('duration') is not None:
superset_exercise.duration = ex_spec['duration'] create_kwargs['duration'] = ex_spec['duration']
superset_total_time += ex_spec['duration'] superset_total_time += ex_spec['duration']
superset_exercise.save() SupersetExercise.objects.create(**create_kwargs)
# ---- 4. Update superset estimated_time ---- # ---- 4. Update superset estimated_time ----
# Store total time including all rounds and rest between rounds # Store total time including all rounds and rest between rounds

View File

@@ -1398,18 +1398,31 @@ class WorkoutGenerator:
break break
replaced = False replaced = False
removed_type = (result[idx].get('split_type') or 'full_body').strip().lower()
removed_sig = self._split_signature(result[idx])
for candidate in candidates: for candidate in candidates:
candidate_type = (candidate.get('split_type') or 'full_body').strip().lower() candidate_type = (candidate.get('split_type') or 'full_body').strip().lower()
candidate_sig = self._split_signature(candidate) candidate_sig = self._split_signature(candidate)
current_sig = self._split_signature(result[idx]) if candidate_sig == removed_sig:
if candidate_sig == current_sig:
continue continue
new_type_count = type_counts[candidate_type] + (0 if candidate_type == (result[idx].get('split_type') or 'full_body').strip().lower() else 1) # Account for the removal of the old entry when counting
# the new type: subtract 1 for the removed type if it
# matches the candidate type, add 1 for the candidate.
if candidate_type == removed_type:
new_type_count = type_counts[candidate_type] # net zero: -1 removed +1 added
else:
new_type_count = type_counts[candidate_type] + 1
if new_type_count > max_same_type: if new_type_count > max_same_type:
continue continue
if sig_counts[candidate_sig] >= max_same_signature: # Same accounting for signatures: the removed signature
# frees a slot, so only block if the candidate sig count
# (after removing the old entry) is still at max.
effective_sig_count = sig_counts[candidate_sig]
if candidate_sig == removed_sig:
effective_sig_count -= 1
if effective_sig_count >= max_same_signature:
continue continue
result[idx] = dict(candidate) result[idx] = dict(candidate)
@@ -2987,7 +3000,12 @@ class WorkoutGenerator:
return [] return []
wt_name_lower = workout_type.name.strip().lower() wt_name_lower = workout_type.name.strip().lower()
wt_key = _normalize_type_key(wt_name_lower)
is_strength = wt_name_lower in STRENGTH_WORKOUT_TYPES is_strength = wt_name_lower in STRENGTH_WORKOUT_TYPES
is_hiit = wt_key == 'high_intensity_interval_training'
is_cardio = wt_key == 'cardio'
is_core = wt_key == 'core_training'
is_flexibility = wt_key == 'flexibility'
threshold = GENERATION_RULES['workout_type_match_pct']['value'] threshold = GENERATION_RULES['workout_type_match_pct']['value']
total_exercises = 0 total_exercises = 0
@@ -3001,7 +3019,33 @@ class WorkoutGenerator:
if is_strength: if is_strength:
if getattr(ex, 'is_weight', False) or getattr(ex, 'is_compound', False): if getattr(ex, 'is_weight', False) or getattr(ex, 'is_compound', False):
matching_exercises += 1 matching_exercises += 1
elif is_hiit:
# HIIT: favor high HR, compound, or duration-capable exercises
hr = getattr(ex, 'hr_elevation_rating', None) or 0
if hr >= 5 or getattr(ex, 'is_compound', False) or getattr(ex, 'is_duration', False):
matching_exercises += 1
elif is_cardio:
# Cardio: favor duration-capable or high-HR exercises
hr = getattr(ex, 'hr_elevation_rating', None) or 0
if getattr(ex, 'is_duration', False) or hr >= 5:
matching_exercises += 1
elif is_core:
# Core: check if exercise targets core muscles
muscles = (getattr(ex, 'muscle_groups', '') or '').lower()
patterns = (getattr(ex, 'movement_patterns', '') or '').lower()
if any(tok in muscles for tok in ('core', 'abs', 'oblique')):
matching_exercises += 1
elif 'core' in patterns or 'anti' in patterns:
matching_exercises += 1
elif is_flexibility:
# Flexibility: favor duration-based, stretch/mobility exercises
patterns = (getattr(ex, 'movement_patterns', '') or '').lower()
if getattr(ex, 'is_duration', False) or any(
tok in patterns for tok in ('stretch', 'mobility', 'yoga', 'flexibility')
):
matching_exercises += 1
else: else:
# Unknown type -- count all as matching (no false negatives)
matching_exercises += 1 matching_exercises += 1
violations = [] violations = []

View File

@@ -275,7 +275,9 @@ def accept_workout(request, workout_id):
""" """
registered_user = get_registered_user(request) registered_user = get_registered_user(request)
generated_workout = get_object_or_404( generated_workout = get_object_or_404(
GeneratedWorkout.objects.select_related('workout', 'workout_type'), GeneratedWorkout.objects.select_related('workout', 'workout_type').prefetch_related(
'workout__superset_workout__superset_exercises__exercise',
),
pk=workout_id, pk=workout_id,
plan__registered_user=registered_user, plan__registered_user=registered_user,
) )

View File

@@ -12,13 +12,26 @@ class SupersetExerciseSerializer(serializers.ModelSerializer):
fields = '__all__' fields = '__all__'
def get_exercise(self, obj): def get_exercise(self, obj):
data = ExerciseSerializer(obj.exercise, many=False).data try:
return data exercise = obj.exercise
except (Exercise.DoesNotExist, AttributeError):
return None
if exercise is None:
return None
return ExerciseSerializer(exercise, many=False).data
def get_unique_id(self, obj): def get_unique_id(self, obj):
return f"{obj.pk}-{obj.superset_id}" if hasattr(obj, 'superset_id') else str(obj.pk) return f"{obj.pk}-{obj.superset_id}" if hasattr(obj, 'superset_id') else str(obj.pk)
class SupersetSerializer(serializers.ModelSerializer): class SupersetSerializer(serializers.ModelSerializer):
"""Serializer for Superset with nested exercises.
For optimal performance, callers should prefetch related data:
Superset.objects.prefetch_related(
'superset_exercises__exercise__exercise_muscle_exercise__muscle',
'superset_exercises__exercise__workout_exercise_workout__equipment',
)
"""
exercises = serializers.SerializerMethodField() exercises = serializers.SerializerMethodField()
class Meta: class Meta:
@@ -30,5 +43,4 @@ class SupersetSerializer(serializers.ModelSerializer):
return [] return []
# Use prefetched data if available via superset_exercises related manager # Use prefetched data if available via superset_exercises related manager
objs = obj.superset_exercises.all().order_by('order') objs = obj.superset_exercises.all().order_by('order')
data = SupersetExerciseSerializer(objs, many=True).data return SupersetExerciseSerializer(objs, many=True).data
return data

View File

@@ -62,13 +62,14 @@ def create_all_exercise_list_for_workout(workout):
audio_queues.append(next_up_data) audio_queues.append(next_up_data)
elif x < superset.rounds - 1: elif x < superset.rounds - 1:
first_exercise = supersetExercises.first() first_exercise = supersetExercises[0] if supersetExercises else None
next_up_data = { if first_exercise is not None:
"audio_url": first_exercise.exercise.audio_url().lower(), next_up_data = {
"play_at": 7 "audio_url": first_exercise.exercise.audio_url().lower(),
} "play_at": 7
}
audio_queues.append(next_up_data) audio_queues.append(next_up_data)
elif len(supersets) > superset_count+1: elif len(supersets) > superset_count+1:
next_superset = supersets[superset_count+1] next_superset = supersets[superset_count+1]
@@ -76,12 +77,13 @@ def create_all_exercise_list_for_workout(workout):
next_superset_exercises = sorted(next_superset.supersetexercise_set.all(), key=lambda se: se.order) next_superset_exercises = sorted(next_superset.supersetexercise_set.all(), key=lambda se: se.order)
next_supersetExercises = next_superset_exercises[0] if next_superset_exercises else None next_supersetExercises = next_superset_exercises[0] if next_superset_exercises else None
next_up_data = { if next_supersetExercises is not None:
"audio_url": next_supersetExercises.exercise.audio_url().lower(), next_up_data = {
"play_at": 7 "audio_url": next_supersetExercises.exercise.audio_url().lower(),
} "play_at": 7
}
audio_queues.append(next_up_data) audio_queues.append(next_up_data)
ser_data["audio_queues"] = audio_queues ser_data["audio_queues"] = audio_queues
all_superset_exercise.append(ser_data) all_superset_exercise.append(ser_data)