rlogh commited on
Commit
c45cbe3
·
verified ·
1 Parent(s): f761cd6

Upload 34 files

Browse files
Files changed (34) hide show
  1. fitness_coach/README.md +102 -0
  2. fitness_coach/__init__.py +7 -0
  3. fitness_coach/__pycache__/__init__.cpython-311.pyc +0 -0
  4. fitness_coach/__pycache__/__init__.cpython-313.pyc +0 -0
  5. fitness_coach/__pycache__/body_parts.cpython-311.pyc +0 -0
  6. fitness_coach/__pycache__/body_parts.cpython-313.pyc +0 -0
  7. fitness_coach/__pycache__/comparison.cpython-311.pyc +0 -0
  8. fitness_coach/__pycache__/comparison.cpython-313.pyc +0 -0
  9. fitness_coach/__pycache__/noise_scoring.cpython-311.pyc +0 -0
  10. fitness_coach/__pycache__/noise_scoring.cpython-313.pyc +0 -0
  11. fitness_coach/__pycache__/pose_invariant.cpython-311.pyc +0 -0
  12. fitness_coach/__pycache__/reference_processor.cpython-311.pyc +0 -0
  13. fitness_coach/__pycache__/reference_processor.cpython-313.pyc +0 -0
  14. fitness_coach/__pycache__/temporal_align.cpython-311.pyc +0 -0
  15. fitness_coach/__pycache__/temporal_align.cpython-313.pyc +0 -0
  16. fitness_coach/__pycache__/user_processor.cpython-311.pyc +0 -0
  17. fitness_coach/__pycache__/user_processor.cpython-313.pyc +0 -0
  18. fitness_coach/__pycache__/utils.cpython-311.pyc +0 -0
  19. fitness_coach/__pycache__/utils.cpython-313.pyc +0 -0
  20. fitness_coach/__pycache__/video_comparison.cpython-311.pyc +0 -0
  21. fitness_coach/__pycache__/video_from_images.cpython-311.pyc +0 -0
  22. fitness_coach/__pycache__/video_overlay.cpython-311.pyc +0 -0
  23. fitness_coach/body_parts.py +203 -0
  24. fitness_coach/comparison.py +487 -0
  25. fitness_coach/noise_scoring.py +296 -0
  26. fitness_coach/persona_model.py +169 -0
  27. fitness_coach/pose_invariant.py +328 -0
  28. fitness_coach/reference_processor.py +296 -0
  29. fitness_coach/temporal_align.py +174 -0
  30. fitness_coach/test_modules.py +175 -0
  31. fitness_coach/user_processor.py +194 -0
  32. fitness_coach/utils.py +266 -0
  33. fitness_coach/video_comparison.py +196 -0
  34. fitness_coach/video_from_images.py +181 -0
fitness_coach/README.md ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Fitness Coach Scoring System
2
+
3
+ A comprehensive scoring system for comparing user exercise performance to reference videos using 3D pose estimation.
4
+
5
+ ## Overview
6
+
7
+ This module provides tools to:
8
+ - Process reference videos and generate noisy samples for scoring
9
+ - Process user videos and extract 3D poses
10
+ - Compare user performance to reference using noise-based scoring
11
+ - Generate per-body-part scores and overall performance metrics
12
+
13
+ ## Module Structure
14
+
15
+ ### Core Modules (Phase 1 - COMPLETE ✓)
16
+
17
+ 1. **`body_parts.py`** - Body part groupings and joint metadata
18
+ - Defines 17-joint skeleton structure
19
+ - Maps joints to body part groups (arms, legs, core, etc.)
20
+ - Provides noise levels per joint type
21
+ - Exercise-specific body part focus
22
+
23
+ 2. **`utils.py`** - Utility functions
24
+ - Pose normalization and centering
25
+ - Joint distance calculations
26
+ - Sequence interpolation
27
+ - Spatial alignment
28
+
29
+ 3. **`temporal_align.py`** - Temporal alignment
30
+ - Dynamic Time Warping (DTW) for sequence alignment
31
+ - Handles sequences of different lengths
32
+ - Phase alignment for motion comparison
33
+
34
+ 4. **`noise_scoring.py`** - Noise-based scoring
35
+ - Generates noisy reference samples
36
+ - Statistical bounds calculation
37
+ - Per-body-part scoring
38
+ - Overall performance scoring
39
+
40
+ ## Usage Example
41
+
42
+ ```python
43
+ from fitness_coach.noise_scoring import score_with_statistical_bounds
44
+ import numpy as np
45
+
46
+ # Load poses (from processed videos)
47
+ user_poses = np.load('user_keypoints_3D.npz')['reconstruction']
48
+ ref_poses = np.load('reference_keypoints_3D.npz')['reconstruction']
49
+
50
+ # Score the user's performance
51
+ scores = score_with_statistical_bounds(user_poses, ref_poses)
52
+
53
+ print(f"Overall Score: {scores['overall_score']:.2f}")
54
+ print(f"Body Part Scores: {scores['body_part_scores']}")
55
+ ```
56
+
57
+ ## Testing
58
+
59
+ Run the test suite:
60
+ ```bash
61
+ python fitness_coach/test_modules.py
62
+ ```
63
+
64
+ All core modules have been tested and verified working.
65
+
66
+ ## Next Steps (Phase 2)
67
+
68
+ 1. **Reference Processor** - Process reference videos once
69
+ 2. **User Processor** - Process user videos
70
+ 3. **Comparison Module** - Full comparison pipeline
71
+ 4. **API Integration** - REST API endpoints
72
+
73
+ ## Dependencies
74
+
75
+ - numpy
76
+ - scipy
77
+ - fastdtw (optional, for better temporal alignment)
78
+
79
+ Install with:
80
+ ```bash
81
+ pip install fastdtw
82
+ ```
83
+
84
+ ## Status
85
+
86
+ ✅ Phase 1: Core Infrastructure - **COMPLETE**
87
+ - [x] Body parts module
88
+ - [x] Utils module
89
+ - [x] Temporal alignment module
90
+ - [x] Noise scoring module
91
+ - [x] All tests passing
92
+
93
+ 🔄 Phase 2: Processing Pipelines - **IN PROGRESS**
94
+ - [ ] Reference processor
95
+ - [ ] User processor
96
+ - [ ] Integration with existing vis.py
97
+
98
+ ⏳ Phase 3: Comparison & API - **PENDING**
99
+ - [ ] Full comparison module
100
+ - [ ] API endpoints
101
+ - [ ] Documentation
102
+
fitness_coach/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ """
2
+ Fitness Coach Module
3
+ Scoring system for comparing user exercise performance to reference videos
4
+ """
5
+
6
+ __version__ = "0.1.0"
7
+
fitness_coach/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (314 Bytes). View file
 
fitness_coach/__pycache__/__init__.cpython-313.pyc ADDED
Binary file (303 Bytes). View file
 
fitness_coach/__pycache__/body_parts.cpython-311.pyc ADDED
Binary file (6.25 kB). View file
 
fitness_coach/__pycache__/body_parts.cpython-313.pyc ADDED
Binary file (5.41 kB). View file
 
fitness_coach/__pycache__/comparison.cpython-311.pyc ADDED
Binary file (25.6 kB). View file
 
fitness_coach/__pycache__/comparison.cpython-313.pyc ADDED
Binary file (20.6 kB). View file
 
fitness_coach/__pycache__/noise_scoring.cpython-311.pyc ADDED
Binary file (12.2 kB). View file
 
fitness_coach/__pycache__/noise_scoring.cpython-313.pyc ADDED
Binary file (10.2 kB). View file
 
fitness_coach/__pycache__/pose_invariant.cpython-311.pyc ADDED
Binary file (10.1 kB). View file
 
fitness_coach/__pycache__/reference_processor.cpython-311.pyc ADDED
Binary file (12.7 kB). View file
 
fitness_coach/__pycache__/reference_processor.cpython-313.pyc ADDED
Binary file (11 kB). View file
 
fitness_coach/__pycache__/temporal_align.cpython-311.pyc ADDED
Binary file (7.26 kB). View file
 
fitness_coach/__pycache__/temporal_align.cpython-313.pyc ADDED
Binary file (6.12 kB). View file
 
fitness_coach/__pycache__/user_processor.cpython-311.pyc ADDED
Binary file (8.62 kB). View file
 
fitness_coach/__pycache__/user_processor.cpython-313.pyc ADDED
Binary file (7.61 kB). View file
 
fitness_coach/__pycache__/utils.cpython-311.pyc ADDED
Binary file (10 kB). View file
 
fitness_coach/__pycache__/utils.cpython-313.pyc ADDED
Binary file (9.21 kB). View file
 
fitness_coach/__pycache__/video_comparison.cpython-311.pyc ADDED
Binary file (10.2 kB). View file
 
fitness_coach/__pycache__/video_from_images.cpython-311.pyc ADDED
Binary file (9.77 kB). View file
 
fitness_coach/__pycache__/video_overlay.cpython-311.pyc ADDED
Binary file (13.3 kB). View file
 
fitness_coach/body_parts.py ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Body Part Groupings and Joint Metadata
3
+ Defines how 17-joint skeleton maps to body part groups for scoring
4
+ """
5
+
6
+ import numpy as np
7
+
8
+ # Joint indices for 17-joint Human3.6M format
9
+ # 0: Hip, 1-3: Right leg, 4-6: Left leg, 7-10: Spine/Head, 11-13: Left arm, 14-16: Right arm
10
+ JOINT_NAMES = [
11
+ 'Hip', # 0
12
+ 'RightHip', # 1
13
+ 'RightKnee', # 2
14
+ 'RightAnkle', # 3
15
+ 'LeftHip', # 4
16
+ 'LeftKnee', # 5
17
+ 'LeftAnkle', # 6
18
+ 'Spine', # 7
19
+ 'Thorax', # 8
20
+ 'Neck', # 9
21
+ 'Head', # 10
22
+ 'LeftShoulder', # 11
23
+ 'LeftElbow', # 12
24
+ 'LeftWrist', # 13
25
+ 'RightShoulder', # 14
26
+ 'RightElbow', # 15
27
+ 'RightWrist', # 16
28
+ ]
29
+
30
+ # Body part groupings for scoring
31
+ JOINT_GROUPS = {
32
+ 'right_arm': [14, 15, 16], # Right shoulder, elbow, wrist
33
+ 'left_arm': [11, 12, 13], # Left shoulder, elbow, wrist
34
+ 'right_leg': [1, 2, 3], # Right hip, knee, ankle
35
+ 'left_leg': [4, 5, 6], # Left hip, knee, ankle
36
+ 'torso': [0, 7, 8, 9, 10], # Hip, spine, thorax, neck, head
37
+ 'core': [0, 7, 8], # Hip, spine, thorax (for core exercises like push-ups)
38
+ 'upper_body': [7, 8, 9, 10, 11, 12, 13, 14, 15, 16], # Everything above hip
39
+ 'lower_body': [0, 1, 2, 3, 4, 5, 6], # Everything below and including hip
40
+ }
41
+
42
+ # Noise levels per joint type (as fraction of body scale)
43
+ # Different joints have different acceptable variation
44
+ JOINT_NOISE_LEVELS = {
45
+ 'core': 0.02, # Hip, spine - very tight tolerance
46
+ 'shoulders': 0.04, # Shoulder joints
47
+ 'elbows': 0.06, # Elbow, knee
48
+ 'wrists': 0.08, # Wrist, ankle
49
+ 'hands': 0.10, # Hands, feet - most variation
50
+ }
51
+
52
+ # Map each joint to its noise level category
53
+ JOINT_TO_NOISE_CATEGORY = {
54
+ 0: 'core', # Hip
55
+ 1: 'shoulders', # Right hip (treated as shoulder-like for movement)
56
+ 2: 'elbows', # Right knee
57
+ 3: 'wrists', # Right ankle
58
+ 4: 'shoulders', # Left hip
59
+ 5: 'elbows', # Left knee
60
+ 6: 'wrists', # Left ankle
61
+ 7: 'core', # Spine
62
+ 8: 'core', # Thorax
63
+ 9: 'shoulders', # Neck
64
+ 10: 'shoulders', # Head
65
+ 11: 'shoulders', # Left shoulder
66
+ 12: 'elbows', # Left elbow
67
+ 13: 'wrists', # Left wrist
68
+ 14: 'shoulders', # Right shoulder
69
+ 15: 'elbows', # Right elbow
70
+ 16: 'wrists', # Right wrist
71
+ }
72
+
73
+ # Joint pairs for calculating angles (parent-child relationships)
74
+ JOINT_PAIRS = [
75
+ (0, 1), # Hip -> Right Hip
76
+ (1, 2), # Right Hip -> Right Knee
77
+ (2, 3), # Right Knee -> Right Ankle
78
+ (0, 4), # Hip -> Left Hip
79
+ (4, 5), # Left Hip -> Left Knee
80
+ (5, 6), # Left Knee -> Left Ankle
81
+ (0, 7), # Hip -> Spine
82
+ (7, 8), # Spine -> Thorax
83
+ (8, 9), # Thorax -> Neck
84
+ (9, 10), # Neck -> Head
85
+ (8, 11), # Thorax -> Left Shoulder
86
+ (11, 12), # Left Shoulder -> Left Elbow
87
+ (12, 13), # Left Elbow -> Left Wrist
88
+ (8, 14), # Thorax -> Right Shoulder
89
+ (14, 15), # Right Shoulder -> Right Elbow
90
+ (15, 16), # Right Elbow -> Right Wrist
91
+ ]
92
+
93
+
94
+ def get_body_part_joints(part_name):
95
+ """
96
+ Get joint indices for a body part group
97
+
98
+ Args:
99
+ part_name: Name of body part (e.g., 'right_arm', 'core')
100
+
101
+ Returns:
102
+ List of joint indices
103
+ """
104
+ if part_name not in JOINT_GROUPS:
105
+ raise ValueError(f"Unknown body part: {part_name}. Available: {list(JOINT_GROUPS.keys())}")
106
+ return JOINT_GROUPS[part_name]
107
+
108
+
109
+ def get_joint_noise_level(joint_idx):
110
+ """
111
+ Get noise level for a specific joint
112
+
113
+ Args:
114
+ joint_idx: Joint index (0-16)
115
+
116
+ Returns:
117
+ Noise level (float) as fraction of body scale
118
+ """
119
+ if joint_idx not in JOINT_TO_NOISE_CATEGORY:
120
+ return 0.05 # Default
121
+ category = JOINT_TO_NOISE_CATEGORY[joint_idx]
122
+ return JOINT_NOISE_LEVELS[category]
123
+
124
+
125
+ def get_all_body_parts():
126
+ """
127
+ Get all available body part names
128
+
129
+ Returns:
130
+ List of body part names
131
+ """
132
+ return list(JOINT_GROUPS.keys())
133
+
134
+
135
+ def get_joint_name(joint_idx):
136
+ """
137
+ Get human-readable name for a joint
138
+
139
+ Args:
140
+ joint_idx: Joint index (0-16)
141
+
142
+ Returns:
143
+ Joint name string
144
+ """
145
+ if 0 <= joint_idx < len(JOINT_NAMES):
146
+ return JOINT_NAMES[joint_idx]
147
+ return f"Joint_{joint_idx}"
148
+
149
+
150
+ def get_joints_for_exercise(exercise_type):
151
+ """
152
+ Get relevant body parts for a specific exercise type
153
+
154
+ Args:
155
+ exercise_type: Type of exercise (e.g., 'pushup', 'squat', 'plank')
156
+
157
+ Returns:
158
+ List of body part names relevant to the exercise
159
+ """
160
+ exercise_focus = {
161
+ 'pushup': ['core', 'right_arm', 'left_arm', 'torso'],
162
+ 'squat': ['core', 'right_leg', 'left_leg', 'torso'],
163
+ 'plank': ['core', 'torso', 'right_arm', 'left_arm'],
164
+ 'lunge': ['core', 'right_leg', 'left_leg', 'torso'],
165
+ 'all': list(JOINT_GROUPS.keys()),
166
+ }
167
+
168
+ return exercise_focus.get(exercise_type.lower(), exercise_focus['all'])
169
+
170
+
171
+ def calculate_body_scale(poses):
172
+ """
173
+ Calculate body scale (hip-to-shoulder distance) for normalization
174
+
175
+ Args:
176
+ poses: Array of shape [frames, 17, 3] or [17, 3]
177
+
178
+ Returns:
179
+ Average body scale (float)
180
+ """
181
+ poses = np.array(poses)
182
+ if len(poses.shape) == 2:
183
+ poses = poses[np.newaxis, :, :]
184
+
185
+ # Hip (0) to Thorax (8) distance
186
+ hip_to_thorax = np.linalg.norm(poses[:, 0, :] - poses[:, 8, :], axis=1)
187
+ return np.mean(hip_to_thorax)
188
+
189
+
190
+ if __name__ == "__main__":
191
+ # Test the module
192
+ print("Body Part Groups:")
193
+ for part, joints in JOINT_GROUPS.items():
194
+ joint_names = [JOINT_NAMES[j] for j in joints]
195
+ print(f" {part}: {joints} - {joint_names}")
196
+
197
+ print("\nJoint Noise Levels:")
198
+ for i in range(17):
199
+ print(f" {JOINT_NAMES[i]}: {get_joint_noise_level(i)}")
200
+
201
+ print("\nExercise Focus (Push-up):")
202
+ print(f" {get_joints_for_exercise('pushup')}")
203
+
fitness_coach/comparison.py ADDED
@@ -0,0 +1,487 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Motion Comparison Module
3
+ Main module that compares user poses to reference and generates scores
4
+ """
5
+
6
+ import numpy as np
7
+ from .temporal_align import align_poses_sequences, find_phase_alignment
8
+ from .noise_scoring import score_with_statistical_bounds, score_with_noisy_reference
9
+ from .utils import normalize_body_scale, center_poses, calculate_joint_distances
10
+ from .body_parts import get_joints_for_exercise, get_body_part_joints, JOINT_GROUPS
11
+ from .pose_invariant import pose_invariant_score_by_body_part, align_to_canonical_pose
12
+
13
+
14
+ def compare_motions(user_poses, ref_poses, noisy_samples=None, exercise_type='pushup',
15
+ use_dtw=True, scoring_method='statistical', use_pose_invariant=True):
16
+ """
17
+ Compare user motion to reference and generate comprehensive scores
18
+
19
+ Args:
20
+ user_poses: User pose sequence [frames, 17, 3]
21
+ ref_poses: Reference pose sequence [frames, 17, 3]
22
+ noisy_samples: Pre-generated noisy samples [n_samples, frames, 17, 3] (optional)
23
+ exercise_type: Type of exercise for body part focus
24
+ use_dtw: If True, use DTW for temporal alignment (slower but more accurate)
25
+ scoring_method: 'statistical' (faster) or 'noisy_samples' (more accurate)
26
+
27
+ Returns:
28
+ Dictionary with comprehensive scoring results
29
+ """
30
+ user_poses = np.array(user_poses)
31
+ ref_poses = np.array(ref_poses)
32
+
33
+ # Convert lists to arrays if needed
34
+ if isinstance(user_poses, list):
35
+ user_poses = np.array(user_poses)
36
+ if isinstance(ref_poses, list):
37
+ ref_poses = np.array(ref_poses)
38
+
39
+ print(f"Comparing motions:")
40
+ print(f" User: {len(user_poses)} frames")
41
+ print(f" Reference: {len(ref_poses)} frames")
42
+
43
+ # Step 1: Temporal alignment
44
+ alignment_score = None
45
+ if use_dtw:
46
+ print("\n[1/4] Aligning sequences with DTW...")
47
+ try:
48
+ user_aligned, ref_aligned, alignment_score = find_phase_alignment(user_poses, ref_poses)
49
+ print(f" Alignment score: {alignment_score:.4f}")
50
+ except Exception as e:
51
+ print(f" DTW failed, using interpolation: {e}")
52
+ from .utils import interpolate_sequence
53
+ target_length = max(len(user_poses), len(ref_poses))
54
+ user_aligned = interpolate_sequence(user_poses, target_length)
55
+ ref_aligned = interpolate_sequence(ref_poses, target_length)
56
+ else:
57
+ print("\n[1/4] Aligning sequences with interpolation...")
58
+ from .utils import interpolate_sequence
59
+ target_length = max(len(user_poses), len(ref_poses))
60
+ user_aligned = interpolate_sequence(user_poses, target_length)
61
+ ref_aligned = interpolate_sequence(ref_poses, target_length)
62
+
63
+ # Step 2: Spatial normalization
64
+ print("\n[2/4] Normalizing poses...")
65
+ user_norm, user_scale = normalize_body_scale(user_aligned)
66
+ ref_norm, ref_scale = normalize_body_scale(ref_aligned, reference_scale=user_scale)
67
+
68
+ # Center both poses at hip
69
+ user_centered = center_poses(user_norm)
70
+ ref_centered = center_poses(ref_norm)
71
+
72
+ # Step 3: Calculate scores
73
+ print(f"\n[3/4] Calculating scores ({'pose-invariant' if use_pose_invariant else scoring_method} method)...")
74
+
75
+ if use_pose_invariant:
76
+ # Use pose-invariant comparison (orientation-independent)
77
+ print(" Using pose-invariant scoring (orientation-independent)")
78
+ body_part_scores = pose_invariant_score_by_body_part(
79
+ user_aligned,
80
+ ref_aligned,
81
+ JOINT_GROUPS
82
+ )
83
+
84
+ # Calculate overall score
85
+ overall_score = np.mean(list(body_part_scores.values()))
86
+
87
+ scores = {
88
+ 'overall_score': float(overall_score),
89
+ 'body_part_scores': body_part_scores,
90
+ 'frame_scores': [],
91
+ 'per_joint_scores': [],
92
+ 'body_part_details': {}
93
+ }
94
+ elif scoring_method == 'noisy_samples' and noisy_samples is not None:
95
+ # Use noisy samples method
96
+ # Align noisy samples too
97
+ from .utils import interpolate_sequence
98
+ target_length = len(user_centered)
99
+ noisy_aligned = np.array([
100
+ interpolate_sequence(sample, target_length)
101
+ for sample in noisy_samples
102
+ ])
103
+ noisy_norm = np.array([
104
+ normalize_body_scale(sample, reference_scale=ref_scale)[0]
105
+ for sample in noisy_aligned
106
+ ])
107
+ noisy_centered = np.array([center_poses(sample) for sample in noisy_norm])
108
+
109
+ scores = score_with_noisy_reference(
110
+ user_centered,
111
+ ref_centered,
112
+ noisy_samples=noisy_centered
113
+ )
114
+ else:
115
+ # Use statistical bounds method (faster)
116
+ scores = score_with_statistical_bounds(user_centered, ref_centered)
117
+
118
+ # Step 4: Exercise-specific analysis
119
+ print("\n[4/4] Generating exercise-specific feedback...")
120
+ relevant_parts = get_joints_for_exercise(exercise_type)
121
+
122
+ # Filter scores to relevant body parts
123
+ relevant_scores = {
124
+ part: scores['body_part_scores'][part]
125
+ for part in relevant_parts
126
+ if part in scores['body_part_scores']
127
+ }
128
+
129
+ # Calculate average for relevant parts
130
+ relevant_avg = np.mean(list(relevant_scores.values())) if relevant_scores else scores['overall_score']
131
+
132
+ # Generate feedback
133
+ feedback = generate_feedback(scores, relevant_scores, exercise_type)
134
+
135
+ # Compile results
136
+ results = {
137
+ 'overall_score': float(scores['overall_score']),
138
+ 'relevant_score': float(relevant_avg), # Score for exercise-specific body parts
139
+ 'body_part_scores': scores['body_part_scores'],
140
+ 'relevant_body_part_scores': relevant_scores,
141
+ 'frame_scores': scores.get('frame_scores', []),
142
+ 'per_joint_scores': scores.get('per_joint_scores', []),
143
+ 'feedback': feedback,
144
+ 'exercise_type': exercise_type,
145
+ 'num_frames_user': len(user_poses),
146
+ 'num_frames_ref': len(ref_poses),
147
+ 'num_frames_aligned': len(user_centered),
148
+ 'details': {
149
+ 'reference_poses': ref_centered,
150
+ 'user_poses': user_poses,
151
+ 'aligned_user_poses': user_centered,
152
+ 'body_part_details': scores.get('body_part_details', {}),
153
+ 'alignment_score': alignment_score if use_dtw else None,
154
+ }
155
+ }
156
+
157
+ print(f"\n✓ Comparison complete!")
158
+ print(f" Overall score: {results['overall_score']:.2f}")
159
+ print(f" Relevant score: {results['relevant_score']:.2f}")
160
+
161
+ return results
162
+
163
+
164
+ def generate_feedback(scores, relevant_scores, exercise_type):
165
+ """
166
+ Generate human-readable feedback based on scores
167
+
168
+ Args:
169
+ scores: Full scoring dictionary
170
+ relevant_scores: Scores for exercise-specific body parts
171
+ exercise_type: Type of exercise
172
+
173
+ Returns:
174
+ List of feedback strings
175
+ """
176
+ feedback = []
177
+
178
+ # Overall feedback
179
+ overall = scores['overall_score']
180
+ if overall >= 90:
181
+ feedback.append("Excellent form! Keep up the great work.")
182
+ elif overall >= 75:
183
+ feedback.append("Good form overall. Minor adjustments can improve your technique.")
184
+ elif overall >= 60:
185
+ feedback.append("Decent form, but there's room for improvement.")
186
+ else:
187
+ feedback.append("Focus on improving your form. Consider reviewing the reference video.")
188
+
189
+ # Body part specific feedback
190
+ if exercise_type.lower() == 'pushup':
191
+ # Check core
192
+ if 'core' in relevant_scores:
193
+ core_score = relevant_scores['core']
194
+ if core_score < 70:
195
+ feedback.append("Keep your core engaged and back straight throughout the movement.")
196
+
197
+ # Check arms
198
+ arm_scores = [relevant_scores.get('right_arm', 0), relevant_scores.get('left_arm', 0)]
199
+ avg_arm = np.mean(arm_scores)
200
+ if avg_arm < 70:
201
+ feedback.append("Focus on maintaining consistent arm positioning. Both arms should move symmetrically.")
202
+ elif abs(arm_scores[0] - arm_scores[1]) > 15:
203
+ feedback.append("Your arms are moving asymmetrically. Try to keep both sides balanced.")
204
+
205
+ elif exercise_type.lower() == 'squat':
206
+ # Check legs
207
+ leg_scores = [relevant_scores.get('right_leg', 0), relevant_scores.get('left_leg', 0)]
208
+ avg_leg = np.mean(leg_scores)
209
+ if avg_leg < 70:
210
+ feedback.append("Focus on proper leg positioning and depth in your squats.")
211
+ elif abs(leg_scores[0] - leg_scores[1]) > 15:
212
+ feedback.append("Your legs are moving asymmetrically. Focus on balanced movement.")
213
+
214
+ # Find worst performing body part
215
+ if relevant_scores:
216
+ worst_part = min(relevant_scores.items(), key=lambda x: x[1])
217
+ if worst_part[1] < 65:
218
+ feedback.append(f"Pay special attention to your {worst_part[0].replace('_', ' ')} - it needs the most improvement.")
219
+
220
+ return feedback
221
+
222
+
223
+ def score_exercise(user_video_path, reference_id='pushup', references_dir='references',
224
+ use_dtw=True, scoring_method='statistical', force_reprocess=False, use_pose_invariant=True):
225
+ """
226
+ Complete pipeline: process user video and score against reference
227
+
228
+ Args:
229
+ user_video_path: Path to user video
230
+ reference_id: Exercise type / reference ID
231
+ references_dir: Directory containing references
232
+ use_dtw: Use DTW for alignment
233
+ scoring_method: Scoring method to use
234
+ force_reprocess: Force reprocessing even if cached data exists
235
+
236
+ Returns:
237
+ Scoring results dictionary
238
+ """
239
+ from .user_processor import process_user_video
240
+ from .reference_processor import load_reference
241
+ import shutil
242
+ from pathlib import Path
243
+
244
+ print("="*60)
245
+ print("EXERCISE SCORING PIPELINE")
246
+ print("="*60)
247
+
248
+ # Load reference
249
+ print(f"\nLoading reference: {reference_id}")
250
+ ref_data = load_reference(reference_id, references_dir=references_dir)
251
+ ref_poses = ref_data['poses_3d']
252
+ noisy_samples = ref_data.get('noisy_samples')
253
+ metadata = ref_data['metadata']
254
+
255
+ print(f" Reference frames: {len(ref_poses)}")
256
+ print(f" Exercise type: {metadata['exercise_type']}")
257
+
258
+ # Clear cache if force reprocess
259
+ if force_reprocess:
260
+ cache_dir = Path('user_videos_cache') / Path(user_video_path).stem
261
+ if cache_dir.exists():
262
+ print(f"\n⚠ Clearing cache for {Path(user_video_path).name}")
263
+ shutil.rmtree(cache_dir)
264
+
265
+ # Process user video (uses cache if available)
266
+ print(f"\nProcessing user video: {user_video_path}")
267
+ user_data = process_user_video(user_video_path, cleanup=False)
268
+ user_poses = user_data['poses_3d']
269
+
270
+ print(f" User frames: {len(user_poses)}")
271
+
272
+ # Compare
273
+ print(f"\nComparing motions...")
274
+ results = compare_motions(
275
+ user_poses,
276
+ ref_poses,
277
+ noisy_samples=noisy_samples,
278
+ exercise_type=metadata['exercise_type'],
279
+ use_dtw=use_dtw,
280
+ scoring_method=scoring_method,
281
+ use_pose_invariant=use_pose_invariant
282
+ )
283
+
284
+ return results
285
+
286
+
287
+ if __name__ == "__main__":
288
+ import argparse
289
+
290
+ parser = argparse.ArgumentParser(description='Compare user video to reference')
291
+ parser.add_argument('--user-video', type=str, required=True, help='Path to user video')
292
+ parser.add_argument('--reference', type=str, default='pushup', help='Reference ID')
293
+ parser.add_argument('--references-dir', type=str, default='references', help='References directory')
294
+ parser.add_argument('--no-dtw', action='store_true', help='Disable DTW alignment')
295
+ parser.add_argument('--method', type=str, default='statistical', choices=['statistical', 'noisy_samples'],
296
+ help='Scoring method')
297
+ parser.add_argument('--force-reprocess', action='store_true', help='Force reprocessing (ignore cache)')
298
+ parser.add_argument('--json', action='store_true', help='Output results as JSON for API consumption')
299
+ parser.add_argument('--output', type=str, help='Save JSON output to file')
300
+ parser.add_argument('--generate-video', action='store_true', help='Generate side-by-side comparison video')
301
+ parser.add_argument('--video-output', type=str, help='Path for comparison video (default: comparison_<user_video>.mp4)')
302
+ parser.add_argument('--video-fps', type=int, default=30, help='FPS for comparison video')
303
+
304
+ args = parser.parse_args()
305
+
306
+ try:
307
+ results = score_exercise(
308
+ args.user_video,
309
+ reference_id=args.reference,
310
+ references_dir=args.references_dir,
311
+ use_dtw=not args.no_dtw,
312
+ scoring_method=args.method,
313
+ force_reprocess=args.force_reprocess
314
+ )
315
+
316
+ # Format output for API/LLM consumption
317
+ if args.json:
318
+ import json
319
+ from pathlib import Path
320
+
321
+ # Create clean API response
322
+ api_response = {
323
+ "status": "success",
324
+ "exercise": {
325
+ "type": results['exercise_type'],
326
+ "reference": args.reference,
327
+ "user_video": str(Path(args.user_video).name)
328
+ },
329
+ "scores": {
330
+ "overall": float(round(results['overall_score'], 2)),
331
+ "relevant": float(round(results['relevant_score'], 2)),
332
+ "body_parts": {
333
+ part: float(round(score, 2))
334
+ for part, score in results['relevant_body_part_scores'].items()
335
+ }
336
+ },
337
+ "metrics": {
338
+ "frames": {
339
+ "user": int(results['num_frames_user']),
340
+ "reference": int(results['num_frames_ref']),
341
+ "aligned": int(results['num_frames_aligned'])
342
+ },
343
+ "alignment_quality": float(round(results['details'].get('alignment_score', 0), 4)) if results['details'].get('alignment_score') else None,
344
+ "body_part_details": {
345
+ part: {
346
+ "position_error_avg": float(round(metrics.get('position_error', 0), 4)),
347
+ "position_error_max": float(round(metrics.get('max_position_error', 0), 4)),
348
+ "tolerance_threshold": float(round(metrics.get('tolerance_threshold', 0), 4)),
349
+ "in_tolerance_percentage": float(round(metrics.get('in_tolerance_percentage', 0), 1))
350
+ }
351
+ for part, metrics in results['details'].get('body_part_details', {}).items()
352
+ if part in results['relevant_body_part_scores']
353
+ }
354
+ },
355
+ "feedback": results['feedback'],
356
+ "llm_context": {
357
+ "description": f"User performed {results['exercise_type']} exercise",
358
+ "scoring_method": args.method,
359
+ "interpretation": {
360
+ "score_range": "0-100, where 100 is perfect form matching the reference",
361
+ "position_error": "Lower is better. Measures average distance from reference pose in normalized units",
362
+ "in_tolerance": "Percentage of time user's form was within acceptable bounds"
363
+ }
364
+ }
365
+ }
366
+
367
+ # Output to file or stdout
368
+ json_output = json.dumps(api_response, indent=2)
369
+ if args.output:
370
+ with open(args.output, 'w') as f:
371
+ f.write(json_output)
372
+ print(f"✓ Results saved to {args.output}")
373
+ else:
374
+ print(json_output)
375
+ else:
376
+ # Human-readable output
377
+ print("\n" + "="*60)
378
+ print("SCORING RESULTS")
379
+ print("="*60)
380
+ print(f"\nOverall Score: {results['overall_score']:.2f}/100")
381
+ print(f"Relevant Score: {results['relevant_score']:.2f}/100")
382
+ print(f"\nBody Part Scores:")
383
+ for part, score in results['relevant_body_part_scores'].items():
384
+ print(f" {part.replace('_', ' ').title()}: {score:.2f}/100")
385
+ print(f"\nFeedback:")
386
+ for i, fb in enumerate(results['feedback'], 1):
387
+ print(f" {i}. {fb}")
388
+
389
+ # Debug information
390
+ print("\n" + "="*60)
391
+ print("DEBUG INFORMATION")
392
+ print("="*60)
393
+ details = results.get('details', {})
394
+ print(f"\nFrame Counts:")
395
+ print(f" Reference frames: {len(details.get('reference_poses', []))}")
396
+ print(f" User frames (original): {len(details.get('user_poses', []))}")
397
+ print(f" User frames (aligned): {len(details.get('aligned_user_poses', []))}")
398
+
399
+ if details.get('alignment_score') is not None:
400
+ print(f"\nAlignment:")
401
+ print(f" DTW alignment score: {details['alignment_score']:.4f}")
402
+
403
+ print(f"\nDetailed Body Part Metrics:")
404
+ for part, metrics in details.get('body_part_details', {}).items():
405
+ if part in results['relevant_body_part_scores']:
406
+ print(f"\n{part.replace('_', ' ').title()}:")
407
+ print(f" Position Error (avg): {metrics.get('position_error', 0):.4f}")
408
+ print(f" Position Error (max): {metrics.get('max_position_error', 0):.4f}")
409
+ print(f" Tolerance Threshold: {metrics.get('tolerance_threshold', 0):.4f}")
410
+ print(f" In-tolerance %: {metrics.get('in_tolerance_percentage', 0):.1f}%")
411
+
412
+ # Generate comparison video if requested
413
+ if args.generate_video:
414
+ from pathlib import Path
415
+
416
+ print("\n" + "="*60)
417
+ print("GENERATING COMPARISON VIDEO")
418
+ print("="*60)
419
+
420
+ try:
421
+ from .video_from_images import create_comparison_video_from_images
422
+ from .user_processor import process_user_video
423
+ from .reference_processor import load_reference
424
+
425
+ # Determine output path
426
+ if args.video_output:
427
+ video_output = args.video_output
428
+ else:
429
+ user_video_stem = Path(args.user_video).stem
430
+ video_output = f"comparison_{user_video_stem}.mp4"
431
+
432
+ # Find the pose3D image directories
433
+ # User images: user_videos_cache/{video_name}/pose3D
434
+ user_video_name = Path(args.user_video).stem
435
+ user_image_dir = Path('user_videos_cache') / user_video_name / 'pose3D'
436
+
437
+ # Reference images: references/{exercise}/temp_processing/pose3D
438
+ ref_data = load_reference(args.reference, references_dir=args.references_dir)
439
+ ref_dir = Path(ref_data['ref_dir'])
440
+ reference_image_dir = ref_dir / 'temp_processing' / 'pose3D'
441
+
442
+ # Check if directories exist
443
+ if not user_image_dir.exists():
444
+ print(f"⚠ Warning: User pose3D images not found at {user_image_dir}")
445
+ print(" Attempting to process user video to generate images...")
446
+ process_user_video(args.user_video, cleanup=False)
447
+ user_image_dir = Path('user_videos_cache') / user_video_name / 'pose3D'
448
+
449
+ if not reference_image_dir.exists():
450
+ # Try alternative location
451
+ reference_image_dir = ref_dir / 'pose3D'
452
+ if not reference_image_dir.exists():
453
+ raise FileNotFoundError(
454
+ f"Reference pose3D images not found. Tried:\n"
455
+ f" {ref_dir / 'temp_processing' / 'pose3D'}\n"
456
+ f" {ref_dir / 'pose3D'}"
457
+ )
458
+
459
+ print(f" User images: {user_image_dir}")
460
+ print(f" Reference images: {reference_image_dir}")
461
+
462
+ # Create the video from existing images
463
+ create_comparison_video_from_images(
464
+ user_image_dir=str(user_image_dir),
465
+ reference_image_dir=str(reference_image_dir),
466
+ output_path=video_output,
467
+ user_video_name="Your Form",
468
+ reference_name="Correct Form",
469
+ fps=args.video_fps
470
+ )
471
+
472
+ except ImportError as e:
473
+ print(f"✗ Error: Missing dependency for video generation")
474
+ print(f" {e}")
475
+ print("\nPlease ensure matplotlib and ffmpeg are installed:")
476
+ print(" pip install matplotlib")
477
+ print(" And install FFmpeg from: https://ffmpeg.org/download.html")
478
+ except Exception as e:
479
+ print(f"✗ Error generating comparison video: {e}")
480
+ import traceback
481
+ traceback.print_exc()
482
+
483
+ except Exception as e:
484
+ print(f"\nERROR: {e}")
485
+ import traceback
486
+ traceback.print_exc()
487
+
fitness_coach/noise_scoring.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Noise-Based Scoring System
3
+ Generates noisy reference samples and scores user poses against them
4
+ """
5
+
6
+ import numpy as np
7
+ from .body_parts import (
8
+ get_joint_noise_level,
9
+ calculate_body_scale,
10
+ JOINT_GROUPS
11
+ )
12
+
13
+
14
+ def create_noisy_samples(ref_poses, n_samples=100, noise_std=None, per_joint_noise=True):
15
+ """
16
+ Create noisy reference samples for scoring
17
+
18
+ Args:
19
+ ref_poses: Reference poses [frames, 17, 3]
20
+ n_samples: Number of noisy samples to generate
21
+ noise_std: Overall noise standard deviation (as fraction of body scale)
22
+ If None, uses per-joint noise levels
23
+ per_joint_noise: If True, use different noise levels per joint
24
+
25
+ Returns:
26
+ noisy_samples: Array of shape [n_samples, frames, 17, 3]
27
+ """
28
+ ref_poses = np.array(ref_poses)
29
+ body_scale = calculate_body_scale(ref_poses)
30
+
31
+ noisy_samples = []
32
+
33
+ for _ in range(n_samples):
34
+ noisy_pose = ref_poses.copy()
35
+
36
+ for frame_idx in range(len(ref_poses)):
37
+ for joint_idx in range(17):
38
+ if per_joint_noise:
39
+ # Use joint-specific noise level
40
+ joint_noise_std = get_joint_noise_level(joint_idx) * body_scale
41
+ else:
42
+ # Use uniform noise
43
+ if noise_std is None:
44
+ noise_std = 0.05 # Default 5% of body scale
45
+ joint_noise_std = noise_std * body_scale
46
+
47
+ # Add Gaussian noise to each coordinate
48
+ noise = np.random.normal(
49
+ loc=0.0,
50
+ scale=joint_noise_std,
51
+ size=3
52
+ )
53
+ noisy_pose[frame_idx, joint_idx, :] += noise
54
+
55
+ noisy_samples.append(noisy_pose)
56
+
57
+ return np.array(noisy_samples)
58
+
59
+
60
+ def calculate_statistical_bounds(ref_poses, noise_std=0.04, confidence=0.95):
61
+ """
62
+ Calculate statistical bounds (mean ± std) for reference poses
63
+
64
+ Args:
65
+ ref_poses: Reference poses [frames, 17, 3]
66
+ noise_std: Noise standard deviation (as fraction of body scale, default 4%)
67
+ confidence: Confidence level (0.95 = 95%)
68
+
69
+ Returns:
70
+ mean_poses: Mean poses [frames, 17, 3]
71
+ lower_bound: Lower bound [frames, 17, 3]
72
+ upper_bound: Upper bound [frames, 17, 3]
73
+ tolerance: Tolerance per joint [frames, 17]
74
+ """
75
+ ref_poses = np.array(ref_poses)
76
+ body_scale = calculate_body_scale(ref_poses)
77
+
78
+ # Generate many samples and calculate statistics
79
+ n_samples = 1000
80
+ noisy_samples = create_noisy_samples(
81
+ ref_poses,
82
+ n_samples=n_samples,
83
+ noise_std=noise_std,
84
+ per_joint_noise=False
85
+ )
86
+
87
+ # Calculate mean and std
88
+ mean_poses = np.mean(noisy_samples, axis=0)
89
+ std_poses = np.std(noisy_samples, axis=0)
90
+
91
+ # Calculate bounds based on confidence level
92
+ # For 95% confidence, use ~2 standard deviations
93
+ z_score = 1.96 if confidence == 0.95 else 2.576 # 99% confidence
94
+
95
+ lower_bound = mean_poses - z_score * std_poses
96
+ upper_bound = mean_poses + z_score * std_poses
97
+
98
+ # Tolerance is the distance from mean to bound
99
+ tolerance = z_score * std_poses
100
+
101
+ return mean_poses, lower_bound, upper_bound, tolerance
102
+
103
+
104
+ def score_with_noisy_reference(user_poses, ref_poses, noisy_samples=None, n_samples=100):
105
+ """
106
+ Score user poses against noisy reference samples
107
+
108
+ Args:
109
+ user_poses: User pose sequence [frames, 17, 3]
110
+ ref_poses: Reference pose sequence [frames, 17, 3]
111
+ noisy_samples: Pre-generated noisy samples [n_samples, frames, 17, 3]
112
+ If None, generates them
113
+ n_samples: Number of samples to generate if noisy_samples is None
114
+
115
+ Returns:
116
+ scores: Dictionary with overall and per-body-part scores
117
+ """
118
+ user_poses = np.array(user_poses)
119
+ ref_poses = np.array(ref_poses)
120
+
121
+ # Generate noisy samples if not provided
122
+ if noisy_samples is None:
123
+ noisy_samples = create_noisy_samples(ref_poses, n_samples=n_samples)
124
+
125
+ # Align temporally (simple resampling for now, DTW in comparison.py)
126
+ from .utils import interpolate_sequence
127
+ target_length = max(len(user_poses), len(ref_poses))
128
+ user_aligned = interpolate_sequence(user_poses, target_length)
129
+ ref_aligned = interpolate_sequence(ref_poses, target_length)
130
+ noisy_aligned = np.array([
131
+ interpolate_sequence(sample, target_length)
132
+ for sample in noisy_samples
133
+ ])
134
+
135
+ # Normalize by body scale
136
+ from .utils import normalize_body_scale
137
+ user_norm, _ = normalize_body_scale(user_aligned)
138
+ ref_norm, ref_scale = normalize_body_scale(ref_aligned)
139
+ noisy_norm = np.array([
140
+ normalize_body_scale(sample, reference_scale=ref_scale)[0]
141
+ for sample in noisy_aligned
142
+ ])
143
+
144
+ # Calculate scores per body part
145
+ body_part_scores = {}
146
+ frame_scores = []
147
+
148
+ for frame_idx in range(len(user_norm)):
149
+ user_frame = user_norm[frame_idx] # [17, 3]
150
+ ref_frame = ref_norm[frame_idx]
151
+ noisy_frames = noisy_norm[:, frame_idx, :, :] # [n_samples, 17, 3]
152
+
153
+ # Calculate distance from user to reference
154
+ user_to_ref_dist = np.linalg.norm(user_frame - ref_frame, axis=1) # [17]
155
+
156
+ # Calculate distances from each noisy sample to reference
157
+ noisy_to_ref_dists = np.array([
158
+ np.linalg.norm(noisy_frame - ref_frame, axis=1)
159
+ for noisy_frame in noisy_frames
160
+ ]) # [n_samples, 17]
161
+
162
+ # Score: percentage of noisy samples that are "worse" than user
163
+ # (i.e., user is within acceptable range)
164
+ frame_scores_per_joint = []
165
+ for joint_idx in range(17):
166
+ user_dist = user_to_ref_dist[joint_idx]
167
+ noisy_dists = noisy_to_ref_dists[:, joint_idx]
168
+
169
+ # How many noisy samples are further from reference than user?
170
+ better_than = np.sum(noisy_dists > user_dist)
171
+ score = (better_than / len(noisy_dists)) * 100
172
+ frame_scores_per_joint.append(score)
173
+
174
+ frame_scores.append(frame_scores_per_joint)
175
+
176
+ frame_scores = np.array(frame_scores) # [frames, 17]
177
+
178
+ # Aggregate by body part
179
+ for part_name, joint_indices in JOINT_GROUPS.items():
180
+ part_scores = frame_scores[:, joint_indices]
181
+ body_part_scores[part_name] = float(np.mean(part_scores))
182
+
183
+ # Overall score
184
+ overall_score = float(np.mean(frame_scores))
185
+
186
+ return {
187
+ 'overall_score': overall_score,
188
+ 'body_part_scores': body_part_scores,
189
+ 'frame_scores': frame_scores.tolist(),
190
+ 'per_joint_scores': np.mean(frame_scores, axis=0).tolist()
191
+ }
192
+
193
+
194
+ def score_with_statistical_bounds(user_poses, ref_poses, noise_std=0.04):
195
+ """
196
+ Score using statistical bounds (faster than noisy samples)
197
+
198
+ Args:
199
+ user_poses: User pose sequence [frames, 17, 3]
200
+ ref_poses: Reference pose sequence [frames, 17, 3]
201
+ noise_std: Noise standard deviation (as fraction of body scale)
202
+ Default 0.04 = 4% tolerance, accounts for timing differences
203
+
204
+ Returns:
205
+ scores: Dictionary with overall and per-body-part scores
206
+ """
207
+ user_poses = np.array(user_poses)
208
+ ref_poses = np.array(ref_poses)
209
+
210
+ # Calculate bounds (already accounts for body scale)
211
+ mean_poses, lower_bound, upper_bound, tolerance = calculate_statistical_bounds(
212
+ ref_poses, noise_std=noise_std
213
+ )
214
+
215
+ # Align temporally
216
+ from .utils import interpolate_sequence
217
+ target_length = max(len(user_poses), len(ref_poses))
218
+ user_aligned = interpolate_sequence(user_poses, target_length)
219
+ mean_aligned = interpolate_sequence(mean_poses, target_length)
220
+
221
+ # Normalize poses (but not tolerance - it's already in the right scale)
222
+ from .utils import normalize_body_scale
223
+ user_norm, user_scale = normalize_body_scale(user_aligned)
224
+ mean_norm, _ = normalize_body_scale(mean_aligned)
225
+
226
+ # Scale the tolerance by the same factor used for normalization
227
+ # This keeps it proportional to the noise_std parameter
228
+ body_scale = calculate_body_scale(user_aligned)
229
+ tolerance_scaled = tolerance * (1.0 / body_scale)
230
+ tolerance_aligned = interpolate_sequence(tolerance_scaled, target_length)
231
+
232
+ # Check if user poses are within tolerance
233
+ distances = np.linalg.norm(user_norm - mean_norm, axis=2) # [frames, 17]
234
+ tolerance_per_joint = np.linalg.norm(tolerance_aligned, axis=2) # [frames, 17]
235
+
236
+ # Score: percentage of time within tolerance
237
+ within_tolerance = distances < tolerance_per_joint
238
+ joint_scores = np.mean(within_tolerance, axis=0) * 100 # [17]
239
+ frame_scores = np.mean(within_tolerance, axis=1) * 100 # [frames]
240
+
241
+ # Aggregate by body part with detailed metrics
242
+ body_part_scores = {}
243
+ body_part_details = {}
244
+
245
+ for part_name, joint_indices in JOINT_GROUPS.items():
246
+ # Score
247
+ body_part_scores[part_name] = float(np.mean(joint_scores[joint_indices]))
248
+
249
+ # Detailed metrics for this body part
250
+ part_distances = distances[:, joint_indices] # [frames, num_joints_in_part]
251
+ part_tolerance = tolerance_per_joint[:, joint_indices]
252
+ part_within = within_tolerance[:, joint_indices]
253
+
254
+ body_part_details[part_name] = {
255
+ 'position_error': float(np.mean(part_distances)),
256
+ 'max_position_error': float(np.max(part_distances)),
257
+ 'in_tolerance_percentage': float(np.mean(part_within) * 100),
258
+ 'tolerance_threshold': float(np.mean(part_tolerance)),
259
+ }
260
+
261
+ overall_score = float(np.mean(frame_scores))
262
+
263
+ return {
264
+ 'overall_score': overall_score,
265
+ 'body_part_scores': body_part_scores,
266
+ 'body_part_details': body_part_details,
267
+ 'frame_scores': frame_scores.tolist(),
268
+ 'per_joint_scores': joint_scores.tolist()
269
+ }
270
+
271
+
272
+ if __name__ == "__main__":
273
+ # Test noise scoring
274
+ print("Testing noise-based scoring...")
275
+
276
+ # Create test data
277
+ ref_poses = np.random.randn(50, 17, 3)
278
+ user_poses = ref_poses + np.random.normal(0, 0.1, ref_poses.shape) # Slightly different
279
+
280
+ # Test noisy sample generation
281
+ noisy_samples = create_noisy_samples(ref_poses, n_samples=50)
282
+ print(f"Generated {len(noisy_samples)} noisy samples")
283
+ print(f"Noisy samples shape: {noisy_samples.shape}")
284
+
285
+ # Test statistical bounds
286
+ mean, lower, upper, tolerance = calculate_statistical_bounds(ref_poses)
287
+ print(f"Statistical bounds calculated: mean shape {mean.shape}")
288
+
289
+ # Test scoring
290
+ scores = score_with_statistical_bounds(user_poses, ref_poses)
291
+ print(f"\nScoring results:")
292
+ print(f" Overall score: {scores['overall_score']:.2f}")
293
+ print(f" Body part scores: {scores['body_part_scores']}")
294
+
295
+ print("\nNoise scoring tests passed!")
296
+
fitness_coach/persona_model.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Fine-tuned persona model for generating coaching feedback
3
+
4
+ This module provides a local alternative to the Gemini API by using
5
+ fine-tuned transformer models for each persona.
6
+ """
7
+
8
+ import os
9
+ import torch
10
+ from pathlib import Path
11
+ from transformers import AutoTokenizer, AutoModelForCausalLM
12
+
13
+
14
+ class PersonaModel:
15
+ """Fine-tuned model for generating persona-specific coaching feedback"""
16
+
17
+ def __init__(self, persona_name, model_dir='./models'):
18
+ """
19
+ Initialize persona model
20
+
21
+ Args:
22
+ persona_name: Name of persona (e.g., "Hype Beast")
23
+ model_dir: Directory containing fine-tuned models
24
+ """
25
+ self.persona_name = persona_name
26
+ persona_safe = persona_name.lower().replace(' ', '_')
27
+ model_path = Path(model_dir) / f'persona_{persona_safe}'
28
+
29
+ if not model_path.exists():
30
+ raise FileNotFoundError(
31
+ f"Model not found at {model_path}. "
32
+ f"Please fine-tune the model first using scripts/fine_tune_persona.py"
33
+ )
34
+
35
+ print(f"Loading persona model: {persona_name} from {model_path}")
36
+
37
+ self.tokenizer = AutoTokenizer.from_pretrained(model_path)
38
+ self.model = AutoModelForCausalLM.from_pretrained(model_path)
39
+ self.model.eval() # Set to evaluation mode
40
+
41
+ def generate_feedback(self, objective_report, max_length=512, temperature=0.7):
42
+ """
43
+ Generate coaching feedback for given objective report
44
+
45
+ Args:
46
+ objective_report: Objective performance report string
47
+ max_length: Maximum length of generated text
48
+ temperature: Sampling temperature (higher = more creative)
49
+
50
+ Returns:
51
+ Generated feedback string
52
+ """
53
+ prompt = f"<|persona|>{self.persona_name}<|input|>{objective_report}<|output|>"
54
+
55
+ inputs = self.tokenizer(
56
+ prompt,
57
+ return_tensors="pt",
58
+ truncation=True,
59
+ max_length=512
60
+ )
61
+
62
+ with torch.no_grad():
63
+ outputs = self.model.generate(
64
+ **inputs,
65
+ max_length=max_length,
66
+ temperature=temperature,
67
+ do_sample=True,
68
+ pad_token_id=self.tokenizer.eos_token_id,
69
+ eos_token_id=self.tokenizer.convert_tokens_to_ids('<|endoftext|>') if '<|endoftext|>' in self.tokenizer.get_vocab() else self.tokenizer.eos_token_id,
70
+ )
71
+
72
+ generated_text = self.tokenizer.decode(outputs[0], skip_special_tokens=False)
73
+
74
+ # Extract output part
75
+ if '<|output|>' in generated_text:
76
+ output = generated_text.split('<|output|>')[-1]
77
+ if '<|endoftext|>' in output:
78
+ output = output.split('<|endoftext|>')[0]
79
+ return output.strip()
80
+
81
+ return generated_text
82
+
83
+
84
+ def get_persona_feedback(objective_report, persona_name, use_fine_tuned=True, fallback_to_gemini=True):
85
+ """
86
+ Get persona feedback, using fine-tuned model if available, otherwise Gemini API
87
+
88
+ Args:
89
+ objective_report: Objective performance report
90
+ persona_name: Name of persona
91
+ use_fine_tuned: Try to use fine-tuned model first
92
+ fallback_to_gemini: Fall back to Gemini API if fine-tuned model fails
93
+
94
+ Returns:
95
+ Feedback string
96
+ """
97
+ # Try fine-tuned model first
98
+ if use_fine_tuned:
99
+ try:
100
+ model = PersonaModel(persona_name)
101
+ feedback = model.generate_feedback(objective_report)
102
+ print(f"✅ Generated feedback using fine-tuned model for {persona_name}")
103
+ return feedback
104
+ except FileNotFoundError:
105
+ print(f"⚠️ Fine-tuned model not found for {persona_name}, falling back to Gemini")
106
+ except Exception as e:
107
+ print(f"⚠️ Error using fine-tuned model: {e}, falling back to Gemini")
108
+
109
+ # Fall back to Gemini API
110
+ if fallback_to_gemini:
111
+ try:
112
+ import google.generativeai as genai
113
+
114
+ api_key = os.environ.get('GEMINI_API_KEY')
115
+ if not api_key:
116
+ config_path = Path(__file__).parent.parent / 'config.txt'
117
+ if config_path.exists():
118
+ api_key = config_path.read_text().strip()
119
+
120
+ if not api_key:
121
+ raise ValueError("GEMINI_API_KEY not found")
122
+
123
+ genai.configure(api_key=api_key)
124
+
125
+ # Persona system instructions
126
+ PERSONAS = {
127
+ "Hype Beast": "You are The Hype Beast. Your tone is ultra-motivational, energetic, and uses enthusiastic modern slang. You focus on confidence, confidence, confidence, bringing the energy, and framing corrections as leveling up. Use many emojis and exclamation points. Make the user feel like a star. DO NOT use LaTeX formatting like $...$ for scores or percentages.",
128
+ "Data Scientist": "You are The Data Scientist. You speak in precise, objective terms. Your feedback uses specific metrics and quantified improvement. Translate complex biomechanical terms into clear, actionable, technical advice that a novice can understand. Use a formal, structured report format. DO NOT use LaTeX formatting like $...$ for scores or percentages.",
129
+ "No-Nonsense Pro": "You are The No-Nonsense Pro. You are direct, challenging, and slightly impatient with wasted effort. Your language is concise and demanding. Emphasize immediate correction and demand a higher standard of execution. Focus on 'why' the bad form wastes energy and must be fixed NOW. DO NOT use LaTeX formatting like $...$ for scores or percentages.",
130
+ "Mindful Aligner": "You are The Mindful Aligner. Your tone is calm, centered, and encouraging. You focus on connecting movement to breath, finding internal stability, and making gentle, internal adjustments to achieve proper alignment. Use soft, encouraging language. DO NOT use LaTeX formatting like $...$ for scores or percentages.",
131
+ }
132
+
133
+ system_instruction = PERSONAS.get(persona_name, PERSONAS["Hype Beast"])
134
+
135
+ prompt = f"""Based on the following objective performance report for a workout, you must adopt the selected persona and provide detailed, actionable coaching feedback. Start your response with a clear, concise title using a markdown H1 heading (# TITLE). DO NOT use LaTeX commands like $...$ or \\frac{{}}{{}} for scores, percentages, or numbers. Use plain text and standard Unicode symbols (like the percent sign %).
136
+
137
+ **OBJECTIVE PERFORMANCE DATA:**
138
+ ---
139
+ {objective_report}
140
+ ---"""
141
+
142
+ # Try available models
143
+ model_names = [
144
+ "gemini-1.5-flash-latest",
145
+ "gemini-1.5-flash-002",
146
+ "gemini-1.5-pro-latest",
147
+ "gemini-pro",
148
+ ]
149
+
150
+ for model_name in model_names:
151
+ try:
152
+ model = genai.GenerativeModel(
153
+ model_name=model_name,
154
+ system_instruction=system_instruction
155
+ )
156
+ response = model.generate_content(prompt)
157
+ print(f"✅ Generated feedback using Gemini API ({model_name})")
158
+ return response.text
159
+ except Exception as e:
160
+ continue
161
+
162
+ raise Exception("All Gemini models failed")
163
+
164
+ except Exception as e:
165
+ raise Exception(f"Failed to generate feedback: {e}")
166
+
167
+ raise Exception("No feedback generation method available")
168
+
169
+
fitness_coach/pose_invariant.py ADDED
@@ -0,0 +1,328 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Pose-invariant comparison focusing on joint angles and relative positions
3
+ Rather than absolute 3D coordinates in space
4
+ """
5
+
6
+ import numpy as np
7
+ from scipy.spatial.transform import Rotation
8
+
9
+
10
+ def align_to_canonical_pose(poses):
11
+ """
12
+ Align poses to a canonical orientation using the torso as reference
13
+ This removes rotation/translation variations
14
+
15
+ Args:
16
+ poses: Pose sequence [frames, 17, 3]
17
+
18
+ Returns:
19
+ aligned_poses: Poses in canonical orientation [frames, 17, 3]
20
+ """
21
+ poses = np.array(poses)
22
+ aligned_poses = np.zeros_like(poses)
23
+
24
+ # Joint indices
25
+ # 0: pelvis, 1: R_hip, 2: R_knee, 3: R_ankle
26
+ # 4: L_hip, 5: L_knee, 6: L_ankle
27
+ # 7: spine, 8: thorax, 9: neck, 10: head
28
+ # 11: L_shoulder, 12: L_elbow, 13: L_wrist
29
+ # 14: R_shoulder, 15: R_elbow, 16: R_wrist
30
+
31
+ for frame_idx in range(len(poses)):
32
+ pose = poses[frame_idx].copy()
33
+
34
+ # Center at pelvis (joint 0)
35
+ pelvis = pose[0]
36
+ pose_centered = pose - pelvis
37
+
38
+ # Define torso orientation using pelvis (0) and thorax (8)
39
+ pelvis_pt = pose_centered[0] # Should be [0,0,0] now
40
+ thorax_pt = pose_centered[8]
41
+
42
+ # Create coordinate system from torso
43
+ # Y-axis: pelvis to thorax (up direction)
44
+ y_axis = thorax_pt - pelvis_pt
45
+ y_axis = y_axis / (np.linalg.norm(y_axis) + 1e-8)
46
+
47
+ # X-axis: perpendicular to torso in frontal plane
48
+ # Use shoulders to determine front direction
49
+ left_shoulder = pose_centered[11]
50
+ right_shoulder = pose_centered[14]
51
+ shoulder_vec = left_shoulder - right_shoulder
52
+
53
+ # Z-axis: perpendicular to both (forward direction)
54
+ z_axis = np.cross(shoulder_vec, y_axis)
55
+ z_axis = z_axis / (np.linalg.norm(z_axis) + 1e-8)
56
+
57
+ # Recompute X-axis to ensure orthogonality
58
+ x_axis = np.cross(y_axis, z_axis)
59
+ x_axis = x_axis / (np.linalg.norm(x_axis) + 1e-8)
60
+
61
+ # Rotation matrix from pose frame to canonical frame
62
+ rotation_matrix = np.column_stack([x_axis, y_axis, z_axis])
63
+
64
+ # Apply rotation to all joints
65
+ pose_aligned = pose_centered @ rotation_matrix
66
+
67
+ aligned_poses[frame_idx] = pose_aligned
68
+
69
+ return aligned_poses
70
+
71
+
72
+ def compute_joint_angles(poses):
73
+ """
74
+ Compute joint angles from 3D poses
75
+
76
+ Args:
77
+ poses: Pose sequence [frames, 17, 3]
78
+
79
+ Returns:
80
+ angles: Joint angles [frames, n_angles]
81
+ """
82
+ poses = np.array(poses)
83
+ n_frames = len(poses)
84
+
85
+ # Define bone connections for angle calculation
86
+ # Format: (parent_joint, child_joint_1, child_joint_2)
87
+ angle_triplets = [
88
+ # Right leg angles
89
+ (0, 1, 2), # Hip-knee angle (right)
90
+ (1, 2, 3), # Knee-ankle angle (right)
91
+
92
+ # Left leg angles
93
+ (0, 4, 5), # Hip-knee angle (left)
94
+ (4, 5, 6), # Knee-ankle angle (left)
95
+
96
+ # Spine angles
97
+ (0, 7, 8), # Pelvis-spine-thorax
98
+ (7, 8, 9), # Spine-thorax-neck
99
+ (8, 9, 10), # Thorax-neck-head
100
+
101
+ # Right arm angles
102
+ (8, 14, 15), # Thorax-shoulder-elbow (right)
103
+ (14, 15, 16),# Shoulder-elbow-wrist (right)
104
+
105
+ # Left arm angles
106
+ (8, 11, 12), # Thorax-shoulder-elbow (left)
107
+ (11, 12, 13),# Shoulder-elbow-wrist (left)
108
+ ]
109
+
110
+ angles = np.zeros((n_frames, len(angle_triplets)))
111
+
112
+ for frame_idx in range(n_frames):
113
+ pose = poses[frame_idx]
114
+
115
+ for angle_idx, (j0, j1, j2) in enumerate(angle_triplets):
116
+ # Vectors
117
+ v1 = pose[j1] - pose[j0]
118
+ v2 = pose[j2] - pose[j1]
119
+
120
+ # Normalize
121
+ v1_norm = v1 / (np.linalg.norm(v1) + 1e-8)
122
+ v2_norm = v2 / (np.linalg.norm(v2) + 1e-8)
123
+
124
+ # Angle between vectors
125
+ cos_angle = np.clip(np.dot(v1_norm, v2_norm), -1.0, 1.0)
126
+ angle = np.arccos(cos_angle)
127
+
128
+ angles[frame_idx, angle_idx] = angle
129
+
130
+ return angles
131
+
132
+
133
+ def compute_relative_distances(poses):
134
+ """
135
+ Compute relative distances between key joint pairs
136
+ (bone lengths, normalized by body scale)
137
+
138
+ Args:
139
+ poses: Pose sequence [frames, 17, 3]
140
+
141
+ Returns:
142
+ distances: Relative distances [frames, n_distances]
143
+ """
144
+ poses = np.array(poses)
145
+ n_frames = len(poses)
146
+
147
+ # Define important bone connections
148
+ bone_pairs = [
149
+ # Limbs
150
+ (1, 2), # Right thigh
151
+ (2, 3), # Right shin
152
+ (4, 5), # Left thigh
153
+ (5, 6), # Left shin
154
+ (14, 15), # Right upper arm
155
+ (15, 16), # Right forearm
156
+ (11, 12), # Left upper arm
157
+ (12, 13), # Left forearm
158
+
159
+ # Torso
160
+ (0, 8), # Pelvis to thorax
161
+ (8, 9), # Thorax to neck
162
+ (9, 10), # Neck to head
163
+
164
+ # Shoulder width
165
+ (11, 14), # Left to right shoulder
166
+
167
+ # Hip width
168
+ (1, 4), # Left to right hip
169
+ ]
170
+
171
+ distances = np.zeros((n_frames, len(bone_pairs)))
172
+
173
+ for frame_idx in range(n_frames):
174
+ pose = poses[frame_idx]
175
+
176
+ for dist_idx, (j1, j2) in enumerate(bone_pairs):
177
+ dist = np.linalg.norm(pose[j2] - pose[j1])
178
+ distances[frame_idx, dist_idx] = dist
179
+
180
+ return distances
181
+
182
+
183
+ def pose_invariant_comparison(user_poses, ref_poses):
184
+ """
185
+ Compare poses in an orientation-invariant way
186
+ Focuses on joint angles and relative bone lengths
187
+
188
+ Args:
189
+ user_poses: User pose sequence [frames, 17, 3]
190
+ ref_poses: Reference pose sequence [frames, 17, 3]
191
+
192
+ Returns:
193
+ angle_similarity: Similarity score based on joint angles (0-100)
194
+ distance_similarity: Similarity score based on bone lengths (0-100)
195
+ combined_score: Combined similarity score (0-100)
196
+ """
197
+ # Align both sequences to canonical orientation
198
+ user_aligned = align_to_canonical_pose(user_poses)
199
+ ref_aligned = align_to_canonical_pose(ref_poses)
200
+
201
+ # Compute joint angles
202
+ user_angles = compute_joint_angles(user_aligned)
203
+ ref_angles = compute_joint_angles(ref_aligned)
204
+
205
+ # Compute relative distances
206
+ user_distances = compute_relative_distances(user_aligned)
207
+ ref_distances = compute_relative_distances(ref_aligned)
208
+
209
+ # Normalize distances by body scale
210
+ user_scale = np.mean(user_distances)
211
+ ref_scale = np.mean(ref_distances)
212
+ user_distances_norm = user_distances / (user_scale + 1e-8)
213
+ ref_distances_norm = ref_distances / (ref_scale + 1e-8)
214
+
215
+ # Compare angles (in radians)
216
+ angle_diff = np.abs(user_angles - ref_angles)
217
+ # Convert to degrees for interpretability
218
+ angle_diff_deg = np.rad2deg(angle_diff)
219
+
220
+ # Score: percentage of angles within tolerance (15 degrees)
221
+ angle_tolerance_deg = 15.0
222
+ within_angle_tolerance = angle_diff_deg < angle_tolerance_deg
223
+ angle_similarity = float(np.mean(within_angle_tolerance) * 100)
224
+
225
+ # Compare relative distances
226
+ distance_diff = np.abs(user_distances_norm - ref_distances_norm)
227
+
228
+ # Score: percentage of distances within tolerance (10% of normalized value)
229
+ distance_tolerance = 0.10
230
+ within_distance_tolerance = distance_diff < distance_tolerance
231
+ distance_similarity = float(np.mean(within_distance_tolerance) * 100)
232
+
233
+ # Combined score (weighted average)
234
+ # Angles are more important for form
235
+ combined_score = 0.7 * angle_similarity + 0.3 * distance_similarity
236
+
237
+ return angle_similarity, distance_similarity, combined_score
238
+
239
+
240
+ def pose_invariant_score_by_body_part(user_poses, ref_poses, body_part_groups):
241
+ """
242
+ Compute pose-invariant scores for each body part
243
+
244
+ Args:
245
+ user_poses: User pose sequence [frames, 17, 3]
246
+ ref_poses: Reference pose sequence [frames, 17, 3]
247
+ body_part_groups: Dict mapping body part names to joint indices
248
+
249
+ Returns:
250
+ body_part_scores: Dict of scores per body part
251
+ """
252
+ # Align poses
253
+ user_aligned = align_to_canonical_pose(user_poses)
254
+ ref_aligned = align_to_canonical_pose(ref_poses)
255
+
256
+ # Compute angles
257
+ user_angles = compute_joint_angles(user_aligned)
258
+ ref_angles = compute_joint_angles(ref_aligned)
259
+
260
+ # Angle triplet to joint mapping
261
+ # (which joints are involved in each angle)
262
+ angle_to_joints = [
263
+ [0, 1, 2], # Right hip-knee
264
+ [1, 2, 3], # Right knee-ankle
265
+ [0, 4, 5], # Left hip-knee
266
+ [4, 5, 6], # Left knee-ankle
267
+ [0, 7, 8], # Pelvis-spine-thorax
268
+ [7, 8, 9], # Spine-thorax-neck
269
+ [8, 9, 10], # Thorax-neck-head
270
+ [8, 14, 15], # Right thorax-shoulder-elbow
271
+ [14, 15, 16],# Right shoulder-elbow-wrist
272
+ [8, 11, 12], # Left thorax-shoulder-elbow
273
+ [11, 12, 13],# Left shoulder-elbow-wrist
274
+ ]
275
+
276
+ body_part_scores = {}
277
+
278
+ for part_name, joint_indices in body_part_groups.items():
279
+ # Find angles that involve joints from this body part
280
+ relevant_angle_indices = []
281
+ for angle_idx, angle_joints in enumerate(angle_to_joints):
282
+ # If any joint in the angle belongs to this body part
283
+ if any(j in joint_indices for j in angle_joints):
284
+ relevant_angle_indices.append(angle_idx)
285
+
286
+ if relevant_angle_indices:
287
+ # Score based on relevant angles
288
+ angle_diff = np.abs(user_angles[:, relevant_angle_indices] - ref_angles[:, relevant_angle_indices])
289
+ angle_diff_deg = np.rad2deg(angle_diff)
290
+
291
+ within_tolerance = angle_diff_deg < 15.0 # 15 degree tolerance
292
+ score = float(np.mean(within_tolerance) * 100)
293
+ body_part_scores[part_name] = score
294
+ else:
295
+ body_part_scores[part_name] = 100.0 # No relevant angles
296
+
297
+ return body_part_scores
298
+
299
+
300
+ if __name__ == "__main__":
301
+ # Test pose-invariant comparison
302
+ print("Testing pose-invariant comparison...")
303
+
304
+ # Create test poses
305
+ test_poses1 = np.random.randn(100, 17, 3)
306
+ test_poses2 = test_poses1 + np.random.randn(100, 17, 3) * 0.1
307
+
308
+ # Test alignment
309
+ aligned = align_to_canonical_pose(test_poses1)
310
+ print(f"Aligned poses shape: {aligned.shape}")
311
+
312
+ # Test angles
313
+ angles = compute_joint_angles(test_poses1)
314
+ print(f"Joint angles shape: {angles.shape}")
315
+
316
+ # Test comparison
317
+ angle_sim, dist_sim, combined = pose_invariant_comparison(test_poses1, test_poses2)
318
+ print(f"Angle similarity: {angle_sim:.2f}")
319
+ print(f"Distance similarity: {dist_sim:.2f}")
320
+ print(f"Combined score: {combined:.2f}")
321
+
322
+ print("Tests passed!")
323
+
324
+
325
+
326
+
327
+
328
+
fitness_coach/reference_processor.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Reference Video Processor
3
+ Processes reference videos once and saves noisy samples for scoring
4
+ """
5
+
6
+ import os
7
+ import sys
8
+ import numpy as np
9
+ import json
10
+ from pathlib import Path
11
+
12
+ # Add parent directory and demo directory to path
13
+ project_root = Path(__file__).parent.parent
14
+ sys.path.insert(0, str(project_root))
15
+ sys.path.insert(0, str(project_root / 'demo'))
16
+
17
+ from fitness_coach.noise_scoring import create_noisy_samples, calculate_statistical_bounds
18
+ from fitness_coach.body_parts import calculate_body_scale, get_joints_for_exercise
19
+
20
+
21
+ def process_reference_video(video_path, exercise_type='pushup', output_dir=None, n_samples=100):
22
+ """
23
+ Process a reference video and generate noisy samples for scoring
24
+
25
+ Args:
26
+ video_path: Path to reference video file
27
+ exercise_type: Type of exercise (e.g., 'pushup', 'squat')
28
+ output_dir: Directory to save processed data (default: references/{exercise_type}/)
29
+ n_samples: Number of noisy samples to generate
30
+
31
+ Returns:
32
+ Dictionary with paths to saved files and metadata
33
+ """
34
+ # Change to project root for imports to work correctly
35
+ original_cwd = os.getcwd()
36
+ os.chdir(project_root)
37
+
38
+ try:
39
+ # Import after changing directory
40
+ from demo.vis import get_pose2D, get_pose3D
41
+ finally:
42
+ os.chdir(original_cwd)
43
+
44
+ video_path = Path(video_path)
45
+ if not video_path.exists():
46
+ raise FileNotFoundError(f"Video not found: {video_path}")
47
+
48
+ # Set up output directory
49
+ if output_dir is None:
50
+ output_dir = Path('references') / exercise_type
51
+ else:
52
+ output_dir = Path(output_dir)
53
+
54
+ output_dir.mkdir(parents=True, exist_ok=True)
55
+
56
+ print(f"Processing reference video: {video_path.name}")
57
+ print(f"Exercise type: {exercise_type}")
58
+ print(f"Output directory: {output_dir}")
59
+
60
+ # Create temporary output directory for processing
61
+ temp_output = output_dir / 'temp_processing'
62
+ temp_output.mkdir(exist_ok=True)
63
+
64
+ # Format output directory string (get_pose3D expects trailing slash)
65
+ # Use absolute path to avoid issues when changing directories
66
+ temp_output_abs = temp_output.resolve()
67
+ output_dir_str = str(temp_output_abs).replace('\\', '/')
68
+ if not output_dir_str.endswith('/'):
69
+ output_dir_str += '/'
70
+
71
+ video_path_abs = video_path.resolve()
72
+
73
+ # Change to project root for processing
74
+ os.chdir(project_root)
75
+
76
+ # Save original argv and temporarily clear it to avoid argparse conflicts
77
+ original_argv = sys.argv.copy()
78
+ sys.argv = [sys.argv[0]] # Keep only script name
79
+
80
+ try:
81
+ # Step 1: Extract 2D poses
82
+ print("\n[1/4] Extracting 2D poses...")
83
+ try:
84
+ # get_pose2D expects output_dir with trailing slash
85
+ # It adds 'input_2D/' to it (line 95 in vis.py)
86
+ get_pose2D(str(video_path_abs), output_dir_str)
87
+ except Exception as e:
88
+ print(f"Error in 2D pose extraction: {e}")
89
+ raise
90
+
91
+ # Step 2: Extract 3D poses
92
+ print("\n[2/4] Extracting 3D poses...")
93
+ try:
94
+ # get_pose3D also expects output_dir with trailing slash
95
+ # It looks for output_dir + 'input_2D/keypoints.npz' (line 190 in vis.py)
96
+ get_pose3D(str(video_path_abs), output_dir_str)
97
+ except Exception as e:
98
+ print(f"Error in 3D pose extraction: {e}")
99
+ raise
100
+ finally:
101
+ sys.argv = original_argv # Restore original argv
102
+ os.chdir(original_cwd)
103
+
104
+ # Step 3: Load 3D poses
105
+ # get_pose3D saves to output_dir + 'keypoints_3D.npz' (line 279 in vis.py)
106
+ keypoints_3d_path = temp_output_abs / 'keypoints_3D.npz'
107
+
108
+ if not keypoints_3d_path.exists():
109
+ # Try alternative locations in case path handling differs
110
+ alt_paths = [
111
+ temp_output_abs / 'keypoints_3D.npz',
112
+ temp_output_abs.parent / 'keypoints_3D.npz',
113
+ ]
114
+ for alt_path in alt_paths:
115
+ if alt_path.exists():
116
+ keypoints_3d_path = alt_path
117
+ break
118
+ else:
119
+ # List what files actually exist to help debug
120
+ print(f"\nDebug: Looking for keypoints_3D.npz")
121
+ print(f"Expected location: {keypoints_3d_path}")
122
+ print(f"Files in temp_processing:")
123
+ if temp_output_abs.exists():
124
+ for item in temp_output_abs.rglob('*'):
125
+ if item.is_file():
126
+ print(f" {item}")
127
+ raise FileNotFoundError(f"3D keypoints not found: {keypoints_3d_path}")
128
+
129
+ keypoints_3d = np.load(str(keypoints_3d_path), allow_pickle=True)['reconstruction']
130
+ print(f"Loaded {len(keypoints_3d)} frames of 3D poses")
131
+
132
+ # Convert to numpy array if needed
133
+ if isinstance(keypoints_3d, list):
134
+ keypoints_3d = np.array(keypoints_3d)
135
+
136
+ # Step 4: Generate noisy samples
137
+ print(f"\n[3/4] Generating {n_samples} noisy samples...")
138
+ noisy_samples = create_noisy_samples(keypoints_3d, n_samples=n_samples, per_joint_noise=True)
139
+ print(f"Generated noisy samples shape: {noisy_samples.shape}")
140
+
141
+ # Step 5: Calculate metadata
142
+ print("\n[4/4] Calculating metadata...")
143
+ body_scale = calculate_body_scale(keypoints_3d)
144
+ relevant_body_parts = get_joints_for_exercise(exercise_type)
145
+
146
+ # Calculate statistical bounds
147
+ mean_poses, lower_bound, upper_bound, tolerance = calculate_statistical_bounds(
148
+ keypoints_3d, noise_std=0.05
149
+ )
150
+
151
+ metadata = {
152
+ 'exercise_type': exercise_type,
153
+ 'video_path': str(video_path),
154
+ 'video_name': video_path.stem,
155
+ 'num_frames': len(keypoints_3d),
156
+ 'body_scale': float(body_scale),
157
+ 'relevant_body_parts': relevant_body_parts,
158
+ 'n_samples': n_samples,
159
+ 'timestamp': str(Path(video_path).stat().st_mtime) if video_path.exists() else None
160
+ }
161
+
162
+ # Step 6: Save everything
163
+ print("\nSaving processed data...")
164
+
165
+ # Save 3D poses
166
+ poses_3d_path = output_dir / 'keypoints_3D.npz'
167
+ np.savez_compressed(str(poses_3d_path), reconstruction=keypoints_3d)
168
+ print(f" Saved 3D poses: {poses_3d_path}")
169
+
170
+ # Save noisy samples
171
+ noisy_samples_path = output_dir / 'noisy_samples.npz'
172
+ np.savez_compressed(str(noisy_samples_path), samples=noisy_samples)
173
+ print(f" Saved noisy samples: {noisy_samples_path}")
174
+
175
+ # Save statistical bounds
176
+ bounds_path = output_dir / 'statistical_bounds.npz'
177
+ np.savez_compressed(
178
+ str(bounds_path),
179
+ mean=mean_poses,
180
+ lower_bound=lower_bound,
181
+ upper_bound=upper_bound,
182
+ tolerance=tolerance
183
+ )
184
+ print(f" Saved statistical bounds: {bounds_path}")
185
+
186
+ # Save metadata
187
+ metadata_path = output_dir / 'metadata.json'
188
+ with open(metadata_path, 'w') as f:
189
+ json.dump(metadata, f, indent=2)
190
+ print(f" Saved metadata: {metadata_path}")
191
+
192
+ # Clean up temporary files (optional - keep 2D poses for debugging)
193
+ # import shutil
194
+ # shutil.rmtree(temp_output, ignore_errors=True)
195
+
196
+ print(f"\n✓ Reference video processed successfully!")
197
+ print(f" Output directory: {output_dir}")
198
+
199
+ return {
200
+ 'output_dir': str(output_dir),
201
+ 'poses_3d_path': str(poses_3d_path),
202
+ 'noisy_samples_path': str(noisy_samples_path),
203
+ 'bounds_path': str(bounds_path),
204
+ 'metadata_path': str(metadata_path),
205
+ 'metadata': metadata
206
+ }
207
+
208
+
209
+ def load_reference(exercise_type, references_dir='references'):
210
+ """
211
+ Load a processed reference
212
+
213
+ Args:
214
+ exercise_type: Type of exercise (e.g., 'pushup')
215
+ references_dir: Directory containing references
216
+
217
+ Returns:
218
+ Dictionary with loaded data
219
+ """
220
+ ref_dir = Path(references_dir) / exercise_type
221
+
222
+ if not ref_dir.exists():
223
+ raise FileNotFoundError(f"Reference not found: {ref_dir}")
224
+
225
+ # Load metadata
226
+ metadata_path = ref_dir / 'metadata.json'
227
+ if not metadata_path.exists():
228
+ raise FileNotFoundError(f"Metadata not found: {metadata_path}")
229
+
230
+ with open(metadata_path, 'r') as f:
231
+ metadata = json.load(f)
232
+
233
+ # Load 3D poses
234
+ poses_3d_path = ref_dir / 'keypoints_3D.npz'
235
+ if not poses_3d_path.exists():
236
+ raise FileNotFoundError(f"3D poses not found: {poses_3d_path}")
237
+
238
+ poses_3d = np.load(str(poses_3d_path), allow_pickle=True)['reconstruction']
239
+ if isinstance(poses_3d, list):
240
+ poses_3d = np.array(poses_3d)
241
+
242
+ # Load noisy samples
243
+ noisy_samples_path = ref_dir / 'noisy_samples.npz'
244
+ noisy_samples = None
245
+ if noisy_samples_path.exists():
246
+ noisy_samples = np.load(str(noisy_samples_path), allow_pickle=True)['samples']
247
+
248
+ # Load statistical bounds
249
+ bounds_path = ref_dir / 'statistical_bounds.npz'
250
+ bounds = None
251
+ if bounds_path.exists():
252
+ bounds_data = np.load(str(bounds_path), allow_pickle=True)
253
+ bounds = {
254
+ 'mean': bounds_data['mean'],
255
+ 'lower_bound': bounds_data['lower_bound'],
256
+ 'upper_bound': bounds_data['upper_bound'],
257
+ 'tolerance': bounds_data['tolerance']
258
+ }
259
+
260
+ return {
261
+ 'poses_3d': poses_3d,
262
+ 'noisy_samples': noisy_samples,
263
+ 'bounds': bounds,
264
+ 'metadata': metadata,
265
+ 'ref_dir': str(ref_dir)
266
+ }
267
+
268
+
269
+ if __name__ == "__main__":
270
+ import argparse
271
+
272
+ parser = argparse.ArgumentParser(description='Process reference video for scoring')
273
+ parser.add_argument('--video', type=str, required=True, help='Path to reference video')
274
+ parser.add_argument('--exercise', type=str, default='pushup', help='Exercise type')
275
+ parser.add_argument('--output', type=str, default=None, help='Output directory')
276
+ parser.add_argument('--samples', type=int, default=100, help='Number of noisy samples')
277
+
278
+ args = parser.parse_args()
279
+
280
+ try:
281
+ result = process_reference_video(
282
+ args.video,
283
+ exercise_type=args.exercise,
284
+ output_dir=args.output,
285
+ n_samples=args.samples
286
+ )
287
+ print("\n" + "="*50)
288
+ print("SUCCESS!")
289
+ print("="*50)
290
+ print(f"Reference saved to: {result['output_dir']}")
291
+ except Exception as e:
292
+ print(f"\nERROR: {e}")
293
+ import traceback
294
+ traceback.print_exc()
295
+ sys.exit(1)
296
+
fitness_coach/temporal_align.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Temporal Alignment using Dynamic Time Warping (DTW)
3
+ Aligns sequences of different lengths for comparison
4
+ """
5
+
6
+ import numpy as np
7
+ try:
8
+ from fastdtw import fastdtw
9
+ from scipy.spatial.distance import euclidean
10
+ HAS_FASTDTW = True
11
+ except ImportError:
12
+ HAS_FASTDTW = False
13
+ print("Warning: fastdtw not installed. Using simple interpolation instead.")
14
+ print("Install with: pip install fastdtw")
15
+
16
+
17
+ def align_sequences_dtw(seq1, seq2, distance_func=None):
18
+ """
19
+ Align two sequences using Dynamic Time Warping
20
+
21
+ Args:
22
+ seq1: First sequence [frames, ...]
23
+ seq2: Second sequence [frames, ...]
24
+ distance_func: Distance function (default: euclidean)
25
+
26
+ Returns:
27
+ aligned_seq1, aligned_seq2: Aligned sequences of same length
28
+ path: DTW alignment path
29
+ """
30
+ if not HAS_FASTDTW:
31
+ # Fallback: simple interpolation to same length
32
+ target_length = max(len(seq1), len(seq2))
33
+ from .utils import interpolate_sequence
34
+ if len(seq1.shape) == 3: # [frames, joints, coords]
35
+ aligned_seq1 = interpolate_sequence(seq1, target_length)
36
+ aligned_seq2 = interpolate_sequence(seq2, target_length)
37
+ else:
38
+ # Flatten for interpolation
39
+ original_shape1 = seq1.shape
40
+ original_shape2 = seq2.shape
41
+ seq1_flat = seq1.reshape(len(seq1), -1)
42
+ seq2_flat = seq2.reshape(len(seq2), -1)
43
+ aligned_seq1_flat = interpolate_sequence(seq1_flat, target_length)
44
+ aligned_seq2_flat = interpolate_sequence(seq2_flat, target_length)
45
+ aligned_seq1 = aligned_seq1_flat.reshape((target_length,) + original_shape1[1:])
46
+ aligned_seq2 = aligned_seq2_flat.reshape((target_length,) + original_shape2[1:])
47
+ return aligned_seq1, aligned_seq2, None
48
+
49
+ # Flatten sequences for DTW
50
+ seq1_flat = seq1.reshape(len(seq1), -1)
51
+ seq2_flat = seq2.reshape(len(seq2), -1)
52
+
53
+ # Use provided distance function or default
54
+ if distance_func is None:
55
+ distance_func = euclidean
56
+
57
+ # Compute DTW
58
+ distance, path = fastdtw(seq1_flat, seq2_flat, dist=distance_func)
59
+
60
+ # Create aligned sequences using the path
61
+ aligned_seq1_indices = [p[0] for p in path]
62
+ aligned_seq2_indices = [p[1] for p in path]
63
+
64
+ aligned_seq1 = seq1[aligned_seq1_indices]
65
+ aligned_seq2 = seq2[aligned_seq2_indices]
66
+
67
+ return aligned_seq1, aligned_seq2, path
68
+
69
+
70
+ def align_poses_sequences(poses1, poses2):
71
+ """
72
+ Align two pose sequences temporally
73
+
74
+ Args:
75
+ poses1: First pose sequence [frames, 17, 3]
76
+ poses2: Second pose sequence [frames, 17, 3]
77
+
78
+ Returns:
79
+ aligned_poses1, aligned_poses2: Aligned pose sequences
80
+ """
81
+ poses1 = np.array(poses1)
82
+ poses2 = np.array(poses2)
83
+
84
+ # Use DTW to align
85
+ aligned_poses1, aligned_poses2, _ = align_sequences_dtw(poses1, poses2)
86
+
87
+ return aligned_poses1, aligned_poses2
88
+
89
+
90
+ def find_phase_alignment(user_poses, ref_poses):
91
+ """
92
+ Find optimal phase alignment between user and reference sequences
93
+ Uses DTW to handle different speeds and timing
94
+
95
+ Args:
96
+ user_poses: User pose sequence [frames, 17, 3]
97
+ ref_poses: Reference pose sequence [frames, 17, 3]
98
+
99
+ Returns:
100
+ aligned_user, aligned_ref: Phase-aligned sequences
101
+ alignment_score: Quality of alignment (lower is better)
102
+ """
103
+ user_poses = np.array(user_poses)
104
+ ref_poses = np.array(ref_poses)
105
+
106
+ # Align sequences
107
+ aligned_user, aligned_ref, path = align_sequences_dtw(user_poses, ref_poses)
108
+
109
+ # Calculate alignment quality (mean distance after alignment)
110
+ if path is not None and HAS_FASTDTW:
111
+ # Calculate average distance along path
112
+ distances = []
113
+ for i, j in path:
114
+ dist = np.linalg.norm(user_poses[i] - ref_poses[j])
115
+ distances.append(dist)
116
+ alignment_score = np.mean(distances)
117
+ else:
118
+ # Fallback: mean distance between aligned sequences
119
+ alignment_score = np.mean(np.linalg.norm(aligned_user - aligned_ref, axis=2))
120
+
121
+ return aligned_user, aligned_ref, alignment_score
122
+
123
+
124
+ def resample_to_common_length(poses1, poses2, target_length=None):
125
+ """
126
+ Resample both sequences to common length
127
+
128
+ Args:
129
+ poses1: First pose sequence [frames, 17, 3]
130
+ poses2: Second pose sequence [frames, 17, 3]
131
+ target_length: Target length (default: average of both)
132
+
133
+ Returns:
134
+ resampled_poses1, resampled_poses2: Resampled sequences
135
+ """
136
+ from fitness_coach.utils import interpolate_sequence
137
+
138
+ poses1 = np.array(poses1)
139
+ poses2 = np.array(poses2)
140
+
141
+ if target_length is None:
142
+ target_length = (len(poses1) + len(poses2)) // 2
143
+
144
+ resampled_poses1 = interpolate_sequence(poses1, target_length)
145
+ resampled_poses2 = interpolate_sequence(poses2, target_length)
146
+
147
+ return resampled_poses1, resampled_poses2
148
+
149
+
150
+ if __name__ == "__main__":
151
+ # Test temporal alignment
152
+ print("Testing temporal alignment...")
153
+
154
+ # Create test sequences of different lengths
155
+ seq1 = np.random.randn(50, 17, 3)
156
+ seq2 = np.random.randn(75, 17, 3)
157
+
158
+ print(f"Original lengths: {len(seq1)} vs {len(seq2)}")
159
+
160
+ # Test alignment
161
+ aligned_seq1, aligned_seq2, path = align_sequences_dtw(seq1, seq2)
162
+ print(f"Aligned lengths: {len(aligned_seq1)} vs {len(aligned_seq2)}")
163
+
164
+ if path is not None:
165
+ print(f"DTW path length: {len(path)}")
166
+ else:
167
+ print("Using interpolation fallback")
168
+
169
+ # Test phase alignment
170
+ aligned_user, aligned_ref, score = find_phase_alignment(seq1, seq2)
171
+ print(f"Alignment score: {score:.4f}")
172
+
173
+ print("Temporal alignment tests passed!")
174
+
fitness_coach/test_modules.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Test script for fitness_coach modules
3
+ """
4
+
5
+ import numpy as np
6
+ import sys
7
+ import os
8
+
9
+ # Add parent directory to path
10
+ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
11
+
12
+ from fitness_coach.body_parts import (
13
+ get_body_part_joints,
14
+ get_joint_noise_level,
15
+ get_joints_for_exercise,
16
+ calculate_body_scale
17
+ )
18
+ from fitness_coach.utils import (
19
+ normalize_body_scale,
20
+ center_poses,
21
+ calculate_joint_distances,
22
+ interpolate_sequence
23
+ )
24
+ from fitness_coach.temporal_align import (
25
+ align_sequences_dtw,
26
+ align_poses_sequences
27
+ )
28
+ from fitness_coach.noise_scoring import (
29
+ create_noisy_samples,
30
+ score_with_statistical_bounds
31
+ )
32
+
33
+
34
+ def test_body_parts():
35
+ """Test body parts module"""
36
+ print("=" * 50)
37
+ print("Testing body_parts module...")
38
+ print("=" * 50)
39
+
40
+ # Test getting body part joints
41
+ right_arm = get_body_part_joints('right_arm')
42
+ print(f"Right arm joints: {right_arm}")
43
+ assert right_arm == [14, 15, 16], "Right arm joints incorrect"
44
+
45
+ # Test noise levels
46
+ hip_noise = get_joint_noise_level(0)
47
+ print(f"Hip noise level: {hip_noise}")
48
+ assert hip_noise == 0.02, "Hip noise level incorrect"
49
+
50
+ # Test exercise focus
51
+ pushup_parts = get_joints_for_exercise('pushup')
52
+ print(f"Push-up body parts: {pushup_parts}")
53
+ assert 'core' in pushup_parts, "Push-up should include core"
54
+
55
+ # Test body scale calculation
56
+ test_poses = np.random.randn(10, 17, 3)
57
+ scale = calculate_body_scale(test_poses)
58
+ print(f"Body scale: {scale:.4f}")
59
+ assert scale > 0, "Body scale should be positive"
60
+
61
+ print("[OK] body_parts module tests passed!\n")
62
+
63
+
64
+ def test_utils():
65
+ """Test utils module"""
66
+ print("=" * 50)
67
+ print("Testing utils module...")
68
+ print("=" * 50)
69
+
70
+ # Test normalization
71
+ test_poses = np.random.randn(10, 17, 3) * 10
72
+ normalized, scale = normalize_body_scale(test_poses)
73
+ print(f"Normalization: scale = {scale:.4f}")
74
+ assert normalized.shape == test_poses.shape, "Normalized shape should match"
75
+
76
+ # Test centering
77
+ centered = center_poses(test_poses)
78
+ hip_pos = centered[0, 0]
79
+ print(f"Centering: hip position = {hip_pos}")
80
+ assert np.allclose(hip_pos, [0, 0, 0]), "Hip should be at origin"
81
+
82
+ # Test distances
83
+ pose1 = test_poses[0]
84
+ pose2 = test_poses[1]
85
+ dists = calculate_joint_distances(pose1, pose2)
86
+ print(f"Joint distances: mean = {np.mean(dists):.4f}")
87
+ assert len(dists) == 17, "Should have 17 joint distances"
88
+
89
+ # Test interpolation
90
+ short_seq = np.random.randn(5, 17, 3)
91
+ long_seq = interpolate_sequence(short_seq, 10)
92
+ print(f"Interpolation: {len(short_seq)} -> {len(long_seq)} frames")
93
+ assert len(long_seq) == 10, "Interpolated length should be 10"
94
+
95
+ print("[OK] utils module tests passed!\n")
96
+
97
+
98
+ def test_temporal_align():
99
+ """Test temporal alignment module"""
100
+ print("=" * 50)
101
+ print("Testing temporal_align module...")
102
+ print("=" * 50)
103
+
104
+ # Create sequences of different lengths
105
+ seq1 = np.random.randn(30, 17, 3)
106
+ seq2 = np.random.randn(50, 17, 3)
107
+
108
+ print(f"Original lengths: {len(seq1)} vs {len(seq2)}")
109
+
110
+ # Test alignment
111
+ aligned_seq1, aligned_seq2, path = align_sequences_dtw(seq1, seq2)
112
+ print(f"Aligned lengths: {len(aligned_seq1)} vs {len(aligned_seq2)}")
113
+ assert len(aligned_seq1) == len(aligned_seq2), "Aligned sequences should have same length"
114
+
115
+ # Test pose sequence alignment
116
+ aligned_poses1, aligned_poses2 = align_poses_sequences(seq1, seq2)
117
+ print(f"Pose alignment: {len(aligned_poses1)} vs {len(aligned_poses2)}")
118
+ assert len(aligned_poses1) == len(aligned_poses2), "Aligned poses should have same length"
119
+
120
+ print("[OK] temporal_align module tests passed!\n")
121
+
122
+
123
+ def test_noise_scoring():
124
+ """Test noise scoring module"""
125
+ print("=" * 50)
126
+ print("Testing noise_scoring module...")
127
+ print("=" * 50)
128
+
129
+ # Create test data
130
+ ref_poses = np.random.randn(20, 17, 3)
131
+ user_poses = ref_poses + np.random.normal(0, 0.05, ref_poses.shape)
132
+
133
+ # Test noisy sample generation
134
+ noisy_samples = create_noisy_samples(ref_poses, n_samples=20)
135
+ print(f"Generated {len(noisy_samples)} noisy samples")
136
+ assert noisy_samples.shape == (20, 20, 17, 3), "Noisy samples shape incorrect"
137
+
138
+ # Test scoring
139
+ scores = score_with_statistical_bounds(user_poses, ref_poses)
140
+ print(f"Overall score: {scores['overall_score']:.2f}")
141
+ print(f"Body part scores: {list(scores['body_part_scores'].keys())}")
142
+
143
+ assert 'overall_score' in scores, "Should have overall_score"
144
+ assert 'body_part_scores' in scores, "Should have body_part_scores"
145
+ assert 0 <= scores['overall_score'] <= 100, "Score should be 0-100"
146
+
147
+ print("[OK] noise_scoring module tests passed!\n")
148
+
149
+
150
+ def main():
151
+ """Run all tests"""
152
+ print("\n" + "=" * 50)
153
+ print("FITNESS COACH MODULE TESTS")
154
+ print("=" * 50 + "\n")
155
+
156
+ try:
157
+ test_body_parts()
158
+ test_utils()
159
+ test_temporal_align()
160
+ test_noise_scoring()
161
+
162
+ print("=" * 50)
163
+ print("ALL TESTS PASSED! [OK]")
164
+ print("=" * 50)
165
+
166
+ except Exception as e:
167
+ print(f"\n[FAILED] TEST FAILED: {e}")
168
+ import traceback
169
+ traceback.print_exc()
170
+ sys.exit(1)
171
+
172
+
173
+ if __name__ == "__main__":
174
+ main()
175
+
fitness_coach/user_processor.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ User Video Processor
3
+ Processes user videos and extracts 3D poses for scoring
4
+ """
5
+
6
+ import os
7
+ import sys
8
+ import numpy as np
9
+ from pathlib import Path
10
+
11
+ # Add parent directory and demo directory to path
12
+ project_root = Path(__file__).parent.parent
13
+ sys.path.insert(0, str(project_root))
14
+ sys.path.insert(0, str(project_root / 'demo'))
15
+
16
+
17
+ def process_user_video(video_path, output_dir=None, cleanup=True):
18
+ """
19
+ Process a user video and extract 3D poses
20
+
21
+ Args:
22
+ video_path: Path to user video file
23
+ output_dir: Directory to save processed data (default: temp_user_processing/)
24
+ cleanup: If True, remove intermediate files after processing
25
+
26
+ Returns:
27
+ Dictionary with paths and 3D poses
28
+ """
29
+ # Change to project root for imports to work correctly
30
+ original_cwd = os.getcwd()
31
+ os.chdir(project_root)
32
+
33
+ try:
34
+ # Import after changing directory
35
+ from demo.vis import get_pose2D, get_pose3D
36
+ finally:
37
+ os.chdir(original_cwd)
38
+
39
+ video_path = Path(video_path)
40
+ if not video_path.exists():
41
+ raise FileNotFoundError(f"Video not found: {video_path}")
42
+
43
+ # Set up output directory with caching
44
+ if output_dir is None:
45
+ output_dir = Path('user_videos_cache') / video_path.stem
46
+ else:
47
+ output_dir = Path(output_dir)
48
+
49
+ output_dir.mkdir(parents=True, exist_ok=True)
50
+
51
+ # Check if already processed (cache hit)
52
+ keypoints_3d_path = output_dir / 'keypoints_3D.npz'
53
+ if keypoints_3d_path.exists():
54
+ print(f"✓ Using cached processing for: {video_path.name}")
55
+ print(f" Cache location: {output_dir}")
56
+ keypoints_3d = np.load(str(keypoints_3d_path), allow_pickle=True)['reconstruction']
57
+ print(f" Loaded {len(keypoints_3d)} frames from cache\n")
58
+
59
+ return {
60
+ 'keypoints_3d': keypoints_3d,
61
+ 'poses_3d': keypoints_3d, # Alias for compatibility
62
+ 'video_path': video_path,
63
+ 'output_dir': output_dir,
64
+ 'num_frames': len(keypoints_3d)
65
+ }
66
+
67
+ print(f"Processing user video: {video_path.name}")
68
+ print(f"Output directory: {output_dir}")
69
+
70
+ # Format output directory string (both functions expect trailing slash)
71
+ # Use absolute path to avoid issues when changing directories
72
+ output_dir_abs = output_dir.resolve()
73
+ output_dir_str = str(output_dir_abs).replace('\\', '/')
74
+ if not output_dir_str.endswith('/'):
75
+ output_dir_str += '/'
76
+
77
+ video_path_abs = video_path.resolve()
78
+
79
+ # Change to project root for processing
80
+ os.chdir(project_root)
81
+
82
+ # Save original argv and temporarily clear it to avoid argparse conflicts
83
+ original_argv = sys.argv.copy()
84
+ sys.argv = [sys.argv[0]] # Keep only script name
85
+
86
+ try:
87
+ # Step 1: Extract 2D poses
88
+ print("\n[1/2] Extracting 2D poses...")
89
+ try:
90
+ # get_pose2D adds 'input_2D/' to output_dir (line 95 in vis.py)
91
+ get_pose2D(str(video_path_abs), output_dir_str)
92
+ except Exception as e:
93
+ print(f"Error in 2D pose extraction: {e}")
94
+ raise
95
+
96
+ # Step 2: Extract 3D poses
97
+ print("\n[2/2] Extracting 3D poses...")
98
+ try:
99
+ # get_pose3D looks for output_dir + 'input_2D/keypoints.npz' (line 190 in vis.py)
100
+ get_pose3D(str(video_path_abs), output_dir_str)
101
+ except Exception as e:
102
+ print(f"Error in 3D pose extraction: {e}")
103
+ raise
104
+ finally:
105
+ sys.argv = original_argv # Restore original argv
106
+ os.chdir(original_cwd)
107
+
108
+ # Step 3: Load 3D poses
109
+ # get_pose3D saves to output_dir + 'keypoints_3D.npz' (line 279 in vis.py)
110
+ keypoints_3d_path = output_dir_abs / 'keypoints_3D.npz'
111
+ if not keypoints_3d_path.exists():
112
+ raise FileNotFoundError(f"3D keypoints not found: {keypoints_3d_path}")
113
+
114
+ keypoints_3d = np.load(str(keypoints_3d_path), allow_pickle=True)['reconstruction']
115
+ print(f"Loaded {len(keypoints_3d)} frames of 3D poses")
116
+
117
+ # Convert to numpy array if needed
118
+ if isinstance(keypoints_3d, list):
119
+ keypoints_3d = np.array(keypoints_3d)
120
+
121
+ result = {
122
+ 'poses_3d': keypoints_3d,
123
+ 'output_dir': str(output_dir),
124
+ 'keypoints_3d_path': str(keypoints_3d_path),
125
+ 'num_frames': len(keypoints_3d)
126
+ }
127
+
128
+ # Cleanup intermediate files if requested
129
+ if cleanup:
130
+ # Keep only the 3D keypoints
131
+ import shutil
132
+ for item in output_dir.iterdir():
133
+ if item.is_dir() and item.name != 'input_2D': # Keep input_2D for debugging
134
+ shutil.rmtree(item, ignore_errors=True)
135
+ elif item.is_file() and item.name != 'keypoints_3D.npz':
136
+ item.unlink(missing_ok=True)
137
+
138
+ print(f"\n✓ User video processed successfully!")
139
+ print(f" Frames: {len(keypoints_3d)}")
140
+ print(f" Output: {output_dir}")
141
+
142
+ return result
143
+
144
+
145
+ def load_user_poses(keypoints_path):
146
+ """
147
+ Load user poses from a saved file
148
+
149
+ Args:
150
+ keypoints_path: Path to keypoints_3D.npz file
151
+
152
+ Returns:
153
+ poses_3d: Array of shape [frames, 17, 3]
154
+ """
155
+ keypoints_path = Path(keypoints_path)
156
+ if not keypoints_path.exists():
157
+ raise FileNotFoundError(f"Keypoints file not found: {keypoints_path}")
158
+
159
+ data = np.load(str(keypoints_path), allow_pickle=True)
160
+ poses_3d = data['reconstruction']
161
+
162
+ if isinstance(poses_3d, list):
163
+ poses_3d = np.array(poses_3d)
164
+
165
+ return poses_3d
166
+
167
+
168
+ if __name__ == "__main__":
169
+ import argparse
170
+
171
+ parser = argparse.ArgumentParser(description='Process user video for scoring')
172
+ parser.add_argument('--video', type=str, required=True, help='Path to user video')
173
+ parser.add_argument('--output', type=str, default=None, help='Output directory')
174
+ parser.add_argument('--keep-files', action='store_true', help='Keep intermediate files')
175
+
176
+ args = parser.parse_args()
177
+
178
+ try:
179
+ result = process_user_video(
180
+ args.video,
181
+ output_dir=args.output,
182
+ cleanup=not args.keep_files
183
+ )
184
+ print("\n" + "="*50)
185
+ print("SUCCESS!")
186
+ print("="*50)
187
+ print(f"3D poses extracted: {result['num_frames']} frames")
188
+ print(f"Saved to: {result['keypoints_3d_path']}")
189
+ except Exception as e:
190
+ print(f"\nERROR: {e}")
191
+ import traceback
192
+ traceback.print_exc()
193
+ sys.exit(1)
194
+
fitness_coach/utils.py ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utility Functions for Pose Processing
3
+ Helper functions for normalization, distance calculations, and interpolation
4
+ """
5
+
6
+ import numpy as np
7
+
8
+
9
+ def normalize_body_scale(poses, reference_scale=None):
10
+ """
11
+ Normalize poses by body scale (hip-to-thorax distance)
12
+
13
+ Args:
14
+ poses: Array of shape [frames, 17, 3] or [17, 3]
15
+ reference_scale: Optional reference scale to normalize to
16
+
17
+ Returns:
18
+ Normalized poses, scale used
19
+ """
20
+ poses = np.array(poses)
21
+ original_shape = poses.shape
22
+
23
+ if len(poses.shape) == 2:
24
+ poses = poses[np.newaxis, :, :]
25
+
26
+ # Calculate body scale (hip to thorax distance)
27
+ hip_to_thorax = np.linalg.norm(poses[:, 0, :] - poses[:, 8, :], axis=1)
28
+ body_scale = np.mean(hip_to_thorax)
29
+
30
+ if body_scale == 0:
31
+ return poses.reshape(original_shape), 1.0
32
+
33
+ # Normalize
34
+ if reference_scale is not None:
35
+ scale_factor = reference_scale / body_scale
36
+ else:
37
+ scale_factor = 1.0 / body_scale
38
+
39
+ normalized_poses = poses * scale_factor
40
+
41
+ return normalized_poses.reshape(original_shape), body_scale
42
+
43
+
44
+ def center_poses(poses, joint_idx=0):
45
+ """
46
+ Center poses at a specific joint (default: hip)
47
+
48
+ Args:
49
+ poses: Array of shape [frames, 17, 3] or [17, 3]
50
+ joint_idx: Joint to center on (default: 0 = hip)
51
+
52
+ Returns:
53
+ Centered poses
54
+ """
55
+ poses = np.array(poses)
56
+ original_shape = poses.shape
57
+
58
+ if len(poses.shape) == 2:
59
+ poses = poses[np.newaxis, :, :]
60
+
61
+ # Subtract the reference joint position
62
+ centered = poses - poses[:, joint_idx:joint_idx+1, :]
63
+
64
+ return centered.reshape(original_shape)
65
+
66
+
67
+ def calculate_joint_distances(pose1, pose2):
68
+ """
69
+ Calculate Euclidean distances between corresponding joints
70
+
71
+ Args:
72
+ pose1: Array of shape [17, 3] or [frames, 17, 3]
73
+ pose2: Array of shape [17, 3] or [frames, 17, 3]
74
+
75
+ Returns:
76
+ Distances per joint: [17] or [frames, 17]
77
+ """
78
+ pose1 = np.array(pose1)
79
+ pose2 = np.array(pose2)
80
+
81
+ if len(pose1.shape) == 2 and len(pose2.shape) == 2:
82
+ # Single frame
83
+ distances = np.linalg.norm(pose1 - pose2, axis=1)
84
+ else:
85
+ # Multiple frames
86
+ if len(pose1.shape) == 2:
87
+ pose1 = pose1[np.newaxis, :, :]
88
+ if len(pose2.shape) == 2:
89
+ pose2 = pose2[np.newaxis, :, :]
90
+
91
+ distances = np.linalg.norm(pose1 - pose2, axis=2)
92
+
93
+ return distances
94
+
95
+
96
+ def calculate_joint_angles(poses, joint_pairs):
97
+ """
98
+ Calculate angles between joint pairs
99
+
100
+ Args:
101
+ poses: Array of shape [frames, 17, 3] or [17, 3]
102
+ joint_pairs: List of (parent, child) joint index tuples
103
+
104
+ Returns:
105
+ Angles in radians: [frames, n_pairs] or [n_pairs]
106
+ """
107
+ poses = np.array(poses)
108
+ original_shape = poses.shape
109
+
110
+ if len(poses.shape) == 2:
111
+ poses = poses[np.newaxis, :, :]
112
+
113
+ angles = []
114
+ for parent_idx, child_idx in joint_pairs:
115
+ # Vector from parent to child
116
+ vectors = poses[:, child_idx, :] - poses[:, parent_idx, :]
117
+
118
+ # Calculate angle (simplified - angle with vertical)
119
+ # For more accurate angles, would need to consider parent-child relationships
120
+ vertical = np.array([0, 1, 0])
121
+ vertical = np.tile(vertical, (vectors.shape[0], 1))
122
+
123
+ # Dot product and angle
124
+ dot_products = np.sum(vectors * vertical, axis=1)
125
+ vector_norms = np.linalg.norm(vectors, axis=1)
126
+ vertical_norm = np.linalg.norm(vertical, axis=1)
127
+
128
+ # Avoid division by zero
129
+ cosines = np.clip(dot_products / (vector_norms * vertical_norm + 1e-8), -1, 1)
130
+ angle = np.arccos(cosines)
131
+
132
+ angles.append(angle)
133
+
134
+ angles = np.array(angles).T # [frames, n_pairs]
135
+
136
+ if len(original_shape) == 2:
137
+ return angles[0]
138
+ return angles
139
+
140
+
141
+ def interpolate_sequence(poses, target_length):
142
+ """
143
+ Interpolate pose sequence to target length
144
+
145
+ Args:
146
+ poses: Array of shape [frames, 17, 3]
147
+ target_length: Target number of frames
148
+
149
+ Returns:
150
+ Interpolated poses: [target_length, 17, 3]
151
+ """
152
+ poses = np.array(poses)
153
+ original_length = poses.shape[0]
154
+
155
+ if original_length == target_length:
156
+ return poses
157
+
158
+ # Create interpolation indices
159
+ original_indices = np.linspace(0, original_length - 1, original_length)
160
+ target_indices = np.linspace(0, original_length - 1, target_length)
161
+
162
+ # Interpolate each joint and coordinate
163
+ interpolated = np.zeros((target_length, poses.shape[1], poses.shape[2]))
164
+
165
+ for joint_idx in range(poses.shape[1]):
166
+ for coord_idx in range(poses.shape[2]):
167
+ interpolated[:, joint_idx, coord_idx] = np.interp(
168
+ target_indices,
169
+ original_indices,
170
+ poses[:, joint_idx, coord_idx]
171
+ )
172
+
173
+ return interpolated
174
+
175
+
176
+ def smooth_poses(poses, window_size=5):
177
+ """
178
+ Apply moving average smoothing to pose sequence
179
+
180
+ Args:
181
+ poses: Array of shape [frames, 17, 3]
182
+ window_size: Size of smoothing window
183
+
184
+ Returns:
185
+ Smoothed poses
186
+ """
187
+ poses = np.array(poses)
188
+ if len(poses) < window_size:
189
+ return poses
190
+
191
+ # Pad with edge values
192
+ pad_width = window_size // 2
193
+ padded = np.pad(poses, ((pad_width, pad_width), (0, 0), (0, 0)), mode='edge')
194
+
195
+ # Apply moving average
196
+ smoothed = np.zeros_like(poses)
197
+ for i in range(len(poses)):
198
+ smoothed[i] = np.mean(padded[i:i+window_size], axis=0)
199
+
200
+ return smoothed
201
+
202
+
203
+ def align_poses_spatially(poses1, poses2):
204
+ """
205
+ Align two pose sequences spatially (rotation and translation)
206
+ Uses Procrustes alignment
207
+
208
+ Args:
209
+ poses1: Reference poses [frames, 17, 3]
210
+ poses2: Poses to align [frames, 17, 3]
211
+
212
+ Returns:
213
+ Aligned poses2
214
+ """
215
+ from scipy.spatial.transform import Rotation
216
+
217
+ poses1 = np.array(poses1)
218
+ poses2 = np.array(poses2)
219
+
220
+ # Center both
221
+ poses1_centered = center_poses(poses1)
222
+ poses2_centered = center_poses(poses2)
223
+
224
+ # For each frame, find optimal rotation
225
+ aligned = np.zeros_like(poses2_centered)
226
+
227
+ for frame_idx in range(len(poses1_centered)):
228
+ p1 = poses1_centered[frame_idx]
229
+ p2 = poses2_centered[frame_idx]
230
+
231
+ # Find rotation using SVD (Procrustes)
232
+ H = p2.T @ p1
233
+ U, S, Vt = np.linalg.svd(H)
234
+ R = Vt.T @ U.T
235
+
236
+ # Apply rotation
237
+ aligned[frame_idx] = p2 @ R.T
238
+
239
+ return aligned
240
+
241
+
242
+ if __name__ == "__main__":
243
+ # Test functions
244
+ print("Testing utility functions...")
245
+
246
+ # Create dummy pose data
247
+ test_poses = np.random.randn(10, 17, 3)
248
+
249
+ # Test normalization
250
+ normalized, scale = normalize_body_scale(test_poses)
251
+ print(f"Normalization: original scale ~{scale:.2f}")
252
+
253
+ # Test centering
254
+ centered = center_poses(test_poses)
255
+ print(f"Centering: hip position = {centered[0, 0]}")
256
+
257
+ # Test distances
258
+ dists = calculate_joint_distances(test_poses[0], test_poses[1])
259
+ print(f"Joint distances: mean = {np.mean(dists):.2f}")
260
+
261
+ # Test interpolation
262
+ interpolated = interpolate_sequence(test_poses, 20)
263
+ print(f"Interpolation: {test_poses.shape[0]} -> {interpolated.shape[0]} frames")
264
+
265
+ print("All tests passed!")
266
+
fitness_coach/video_comparison.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Generate side-by-side comparison videos of user vs reference 3D poses.
3
+ Uses the same visualization as the original pose3D images.
4
+ """
5
+ import numpy as np
6
+ import matplotlib.pyplot as plt
7
+ from matplotlib.animation import FuncAnimation, FFMpegWriter
8
+ import matplotlib
9
+ matplotlib.use('Agg') # Use non-interactive backend
10
+ from pathlib import Path
11
+ import argparse
12
+ import sys
13
+ import os
14
+
15
+ # Import the original show3Dpose function from demo/vis.py
16
+ # Add demo directory to path
17
+ project_root = Path(__file__).parent.parent
18
+ demo_path = str(project_root / 'demo')
19
+ if demo_path not in sys.path:
20
+ sys.path.insert(0, demo_path)
21
+
22
+ from vis import show3Dpose
23
+
24
+
25
+ def load_3d_poses(pose_file):
26
+ """Load 3D poses from npz file."""
27
+ data = np.load(pose_file, allow_pickle=True)
28
+ if 'reconstruction' in data:
29
+ poses = data['reconstruction']
30
+ elif 'poses_3d' in data:
31
+ poses = data['poses_3d']
32
+ else:
33
+ # Try to get the first array
34
+ poses = data[list(data.keys())[0]]
35
+
36
+ return poses
37
+
38
+
39
+ def plot_pose_3d(ax, pose, title):
40
+ """Plot a single 3D pose using the original show3Dpose function."""
41
+ ax.clear()
42
+
43
+ # Use the original show3Dpose function (same as pose3D images)
44
+ show3Dpose(pose, ax)
45
+
46
+ # Add title
47
+ ax.set_title(title, fontsize=12, fontweight='bold', pad=10)
48
+
49
+
50
+ def create_comparison_video(user_poses, reference_poses, output_path,
51
+ user_video_name="User", reference_name="Reference",
52
+ fps=30, elev=15, azim=70):
53
+ """
54
+ Create a side-by-side comparison video.
55
+
56
+ Args:
57
+ user_poses: User 3D poses (N_frames, 17, 3)
58
+ reference_poses: Reference 3D poses (N_frames, 17, 3)
59
+ output_path: Path to save output video
60
+ user_video_name: Display name for user
61
+ reference_name: Display name for reference
62
+ fps: Frames per second for output video
63
+ elev: Elevation angle for 3D view
64
+ azim: Azimuth angle for 3D view
65
+ """
66
+ print(f"\nCreating comparison video...")
67
+ print(f" User frames: {len(user_poses)}")
68
+ print(f" Reference frames: {len(reference_poses)}")
69
+
70
+ # Ensure same number of frames (use minimum)
71
+ n_frames = min(len(user_poses), len(reference_poses))
72
+ user_poses = user_poses[:n_frames]
73
+ reference_poses = reference_poses[:n_frames]
74
+
75
+ # Create figure with two subplots
76
+ fig = plt.figure(figsize=(16, 8))
77
+ ax1 = fig.add_subplot(121, projection='3d')
78
+ ax2 = fig.add_subplot(122, projection='3d')
79
+
80
+ # Add main title
81
+ fig.suptitle('Exercise Form Comparison', fontsize=16, fontweight='bold')
82
+
83
+ def update(frame):
84
+ """Update function for animation."""
85
+ plot_pose_3d(ax1, reference_poses[frame],
86
+ f'{reference_name}\nFrame {frame+1}/{n_frames}')
87
+ plot_pose_3d(ax2, user_poses[frame],
88
+ f'{user_video_name}\nFrame {frame+1}/{n_frames}')
89
+
90
+ if frame % 30 == 0:
91
+ print(f" Progress: {frame}/{n_frames} frames ({100*frame//n_frames}%)")
92
+
93
+ return ax1, ax2
94
+
95
+ # Create animation
96
+ anim = FuncAnimation(fig, update, frames=n_frames,
97
+ interval=1000/fps, blit=False)
98
+
99
+ # Save video - try MP4 first, fall back to GIF if FFmpeg not available
100
+ print(f" Saving video to: {output_path}")
101
+
102
+ # Try MP4 first (requires FFmpeg)
103
+ try:
104
+ writer = FFMpegWriter(fps=fps, bitrate=5000, codec='libx264')
105
+ anim.save(str(output_path), writer=writer, dpi=100)
106
+ print(f"✓ Video saved successfully!")
107
+ print(f" Output: {output_path}")
108
+ print(f" Duration: {n_frames/fps:.2f} seconds")
109
+ print(f" Format: MP4")
110
+ except (FileNotFoundError, OSError) as e:
111
+ # FFmpeg not found, try GIF instead
112
+ print(f" ⚠ FFmpeg not found, saving as GIF instead...")
113
+ gif_path = str(output_path).replace('.mp4', '.gif')
114
+
115
+ try:
116
+ # Use Pillow writer for GIF (built into matplotlib)
117
+ from matplotlib.animation import PillowWriter
118
+ writer = PillowWriter(fps=fps)
119
+ anim.save(gif_path, writer=writer, dpi=100)
120
+ print(f"✓ GIF saved successfully!")
121
+ print(f" Output: {gif_path}")
122
+ print(f" Duration: {n_frames/fps:.2f} seconds")
123
+ print(f" Format: GIF")
124
+ print(f"\n Note: For MP4 format, install FFmpeg:")
125
+ print(f" conda install -c conda-forge ffmpeg")
126
+ print(f" or: winget install ffmpeg")
127
+ except Exception as gif_error:
128
+ print(f"✗ Error saving GIF: {gif_error}")
129
+ print(f"\nOriginal MP4 error: {e}")
130
+ print("\nTo enable MP4 output, install FFmpeg:")
131
+ print(" conda install -c conda-forge ffmpeg")
132
+ print(" or: winget install ffmpeg")
133
+ raise
134
+ except Exception as e:
135
+ print(f"✗ Error saving video: {e}")
136
+ raise
137
+ finally:
138
+ plt.close(fig)
139
+
140
+
141
+ def main():
142
+ parser = argparse.ArgumentParser(
143
+ description='Generate side-by-side comparison video of 3D poses'
144
+ )
145
+ parser.add_argument('--user-poses', required=True,
146
+ help='Path to user 3D poses npz file')
147
+ parser.add_argument('--reference-poses', required=True,
148
+ help='Path to reference 3D poses npz file')
149
+ parser.add_argument('--output', default='comparison_output.mp4',
150
+ help='Output video path')
151
+ parser.add_argument('--user-name', default='Your Form',
152
+ help='Display name for user')
153
+ parser.add_argument('--reference-name', default='Correct Form',
154
+ help='Display name for reference')
155
+ parser.add_argument('--fps', type=int, default=30,
156
+ help='Frames per second')
157
+ parser.add_argument('--elev', type=float, default=15,
158
+ help='Elevation angle for 3D view (degrees)')
159
+ parser.add_argument('--azim', type=float, default=70,
160
+ help='Azimuth angle for 3D view (degrees)')
161
+
162
+ args = parser.parse_args()
163
+
164
+ print("="*60)
165
+ print("3D POSE COMPARISON VIDEO GENERATOR")
166
+ print("="*60)
167
+
168
+ # Load poses
169
+ print(f"\nLoading user poses from: {args.user_poses}")
170
+ user_poses = load_3d_poses(args.user_poses)
171
+ print(f" Loaded {len(user_poses)} frames")
172
+
173
+ print(f"\nLoading reference poses from: {args.reference_poses}")
174
+ reference_poses = load_3d_poses(args.reference_poses)
175
+ print(f" Loaded {len(reference_poses)} frames")
176
+
177
+ # Create video
178
+ create_comparison_video(
179
+ user_poses=user_poses,
180
+ reference_poses=reference_poses,
181
+ output_path=args.output,
182
+ user_video_name=args.user_name,
183
+ reference_name=args.reference_name,
184
+ fps=args.fps,
185
+ elev=args.elev,
186
+ azim=args.azim
187
+ )
188
+
189
+ print("\n" + "="*60)
190
+ print("Done!")
191
+ print("="*60)
192
+
193
+
194
+ if __name__ == '__main__':
195
+ main()
196
+
fitness_coach/video_from_images.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Generate side-by-side comparison videos from existing pose3D images.
3
+ Much simpler - just combines the existing PNG images!
4
+ """
5
+ import numpy as np
6
+ from PIL import Image
7
+ import glob
8
+ from pathlib import Path
9
+ import argparse
10
+ from matplotlib.animation import FuncAnimation, FFMpegWriter, PillowWriter
11
+ import matplotlib.pyplot as plt
12
+ import matplotlib
13
+ matplotlib.use('Agg')
14
+
15
+
16
+ def load_image_sequence(image_dir):
17
+ """Load all PNG images from a directory, sorted by filename."""
18
+ image_dir = Path(image_dir)
19
+ if not image_dir.exists():
20
+ raise FileNotFoundError(f"Image directory not found: {image_dir}")
21
+
22
+ # Find all PNG files matching the pattern (e.g., 0000_3D.png, 0001_3D.png)
23
+ image_files = sorted(glob.glob(str(image_dir / '*_3D.png')))
24
+
25
+ if not image_files:
26
+ raise FileNotFoundError(f"No pose3D images found in {image_dir}")
27
+
28
+ print(f" Found {len(image_files)} images in {image_dir}")
29
+ return image_files
30
+
31
+
32
+ def create_comparison_video_from_images(user_image_dir, reference_image_dir, output_path,
33
+ user_video_name="Your Form", reference_name="Correct Form",
34
+ fps=30):
35
+ """
36
+ Create side-by-side video from existing pose3D images.
37
+
38
+ Args:
39
+ user_image_dir: Directory containing user pose3D images
40
+ reference_image_dir: Directory containing reference pose3D images
41
+ output_path: Path to save output video
42
+ user_video_name: Display name for user
43
+ reference_name: Display name for reference
44
+ fps: Frames per second
45
+ """
46
+ print(f"\nCreating comparison video from existing images...")
47
+
48
+ # Load image sequences
49
+ print(f"\nLoading user images from: {user_image_dir}")
50
+ user_images = load_image_sequence(user_image_dir)
51
+
52
+ print(f"\nLoading reference images from: {reference_image_dir}")
53
+ reference_images = load_image_sequence(reference_image_dir)
54
+
55
+ # Use minimum length to ensure both sequences are the same
56
+ n_frames = min(len(user_images), len(reference_images))
57
+ user_images = user_images[:n_frames]
58
+ reference_images = reference_images[:n_frames]
59
+
60
+ print(f"\n Using {n_frames} frames for comparison")
61
+
62
+ # Load first images to get dimensions
63
+ user_img = Image.open(user_images[0])
64
+ ref_img = Image.open(reference_images[0])
65
+
66
+ # Get dimensions (assuming they're similar)
67
+ img_height = max(user_img.height, ref_img.height)
68
+ img_width = max(user_img.width, ref_img.width)
69
+
70
+ # Create figure for side-by-side display
71
+ fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 8))
72
+ ax1.axis('off')
73
+ ax2.axis('off')
74
+
75
+ # Add titles
76
+ fig.suptitle('Exercise Form Comparison', fontsize=16, fontweight='bold')
77
+ ax1.set_title(f'{reference_name}', fontsize=14, fontweight='bold', pad=10)
78
+ ax2.set_title(f'{user_video_name}', fontsize=14, fontweight='bold', pad=10)
79
+
80
+ def update(frame):
81
+ """Update function for animation."""
82
+ # Load images
83
+ ref_img = Image.open(reference_images[frame])
84
+ user_img = Image.open(user_images[frame])
85
+
86
+ # Display images
87
+ ax1.clear()
88
+ ax1.imshow(ref_img)
89
+ ax1.axis('off')
90
+ ax1.set_title(f'{reference_name}\nFrame {frame+1}/{n_frames}',
91
+ fontsize=12, fontweight='bold', pad=10)
92
+
93
+ ax2.clear()
94
+ ax2.imshow(user_img)
95
+ ax2.axis('off')
96
+ ax2.set_title(f'{user_video_name}\nFrame {frame+1}/{n_frames}',
97
+ fontsize=12, fontweight='bold', pad=10)
98
+
99
+ if frame % 30 == 0:
100
+ print(f" Progress: {frame}/{n_frames} frames ({100*frame//n_frames}%)")
101
+
102
+ return ax1, ax2
103
+
104
+ # Create animation
105
+ anim = FuncAnimation(fig, update, frames=n_frames,
106
+ interval=1000/fps, blit=False)
107
+
108
+ # Save video - try MP4 first, fall back to GIF if FFmpeg not available
109
+ print(f"\n Saving video to: {output_path}")
110
+
111
+ try:
112
+ writer = FFMpegWriter(fps=fps, bitrate=5000, codec='libx264')
113
+ anim.save(str(output_path), writer=writer, dpi=100)
114
+ print(f"✓ Video saved successfully!")
115
+ print(f" Output: {output_path}")
116
+ print(f" Duration: {n_frames/fps:.2f} seconds")
117
+ print(f" Format: MP4")
118
+ except (FileNotFoundError, OSError) as e:
119
+ # FFmpeg not found, try GIF instead
120
+ print(f" ⚠ FFmpeg not found, saving as GIF instead...")
121
+ gif_path = str(output_path).replace('.mp4', '.gif')
122
+
123
+ try:
124
+ writer = PillowWriter(fps=fps)
125
+ anim.save(gif_path, writer=writer, dpi=100)
126
+ print(f"✓ GIF saved successfully!")
127
+ print(f" Output: {gif_path}")
128
+ print(f" Duration: {n_frames/fps:.2f} seconds")
129
+ print(f" Format: GIF")
130
+ print(f"\n Note: For MP4 format, install FFmpeg:")
131
+ print(f" conda install -c conda-forge ffmpeg")
132
+ except Exception as gif_error:
133
+ print(f"✗ Error saving GIF: {gif_error}")
134
+ raise
135
+ except Exception as e:
136
+ print(f"✗ Error saving video: {e}")
137
+ raise
138
+ finally:
139
+ plt.close(fig)
140
+
141
+
142
+ def main():
143
+ parser = argparse.ArgumentParser(
144
+ description='Generate side-by-side comparison video from existing pose3D images'
145
+ )
146
+ parser.add_argument('--user-images', required=True,
147
+ help='Directory containing user pose3D images (e.g., user_videos_cache/user/pose3D)')
148
+ parser.add_argument('--reference-images', required=True,
149
+ help='Directory containing reference pose3D images')
150
+ parser.add_argument('--output', default='comparison_from_images.mp4',
151
+ help='Output video path')
152
+ parser.add_argument('--user-name', default='Your Form',
153
+ help='Display name for user')
154
+ parser.add_argument('--reference-name', default='Correct Form',
155
+ help='Display name for reference')
156
+ parser.add_argument('--fps', type=int, default=30,
157
+ help='Frames per second')
158
+
159
+ args = parser.parse_args()
160
+
161
+ print("="*60)
162
+ print("3D POSE COMPARISON VIDEO FROM IMAGES")
163
+ print("="*60)
164
+
165
+ create_comparison_video_from_images(
166
+ user_image_dir=args.user_images,
167
+ reference_image_dir=args.reference_images,
168
+ output_path=args.output,
169
+ user_video_name=args.user_name,
170
+ reference_name=args.reference_name,
171
+ fps=args.fps
172
+ )
173
+
174
+ print("\n" + "="*60)
175
+ print("Done!")
176
+ print("="*60)
177
+
178
+
179
+ if __name__ == '__main__':
180
+ main()
181
+