Removed tracking option in pose estimation, as it is performed at the triangulation stage + linear interpolation by default

This commit is contained in:
davidpagnon 2024-09-17 23:05:53 +02:00
parent 394a162d76
commit 6fd237ecc9
5 changed files with 8 additions and 13 deletions

View File

@ -44,7 +44,6 @@ pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_1
mode = 'balanced' # 'lightweight', 'balanced', 'performance'
det_frequency = 100 # Run person detection only every N frames, and inbetween track previously detected bounding boxes (keypoint detection is still run on all frames).
# Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate.
tracking = true # Gives consistent person ID across frames. Slightly slower but might facilitate synchronization if other people are in the background
display_detection = false
overwrite_pose = false # set to false if you don't want to recalculate pose estimation when it has already been done
save_video = 'to_video' # 'to_video' or 'to_images', 'none', or ['to_video', 'to_images']
@ -141,7 +140,7 @@ reorder_trc = false # only checked if multi_person analysis
reproj_error_threshold_triangulation = 15 # px
likelihood_threshold_triangulation= 0.3
min_cameras_for_triangulation = 2
interpolation = 'cubic' #linear, slinear, quadratic, cubic, or none
interpolation = 'linear' #linear, slinear, quadratic, cubic, or none
# 'none' if you don't want to interpolate missing points
interp_if_gap_smaller_than = 10 # do not interpolate bigger gaps
show_interp_indices = true # true or false (lowercase). For each keypoint, return the frames that need to be interpolated

View File

@ -44,8 +44,7 @@
# mode = 'balanced' # 'lightweight', 'balanced', 'performance'
# det_frequency = 100 # Run person detection only every N frames, and inbetween track previously detected bounding boxes (keypoint detection is still run on all frames).
# # Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate.
# tracking = true # Gives consistent person ID across frames. Slightly slower but might facilitate synchronization if other people are in the background
# overwrite_pose = true # set to false if you don't want to recalculate pose estimation when it has already been done
# # overwrite_pose = true # set to false if you don't want to recalculate pose estimation when it has already been done
# display_detection = true
# save_video = 'to_video' # 'to_video' or 'to_images', 'none', or ['to_video', 'to_images']
# output_format = 'openpose' # 'openpose', 'mmpose', 'deeplabcut', 'none' or a list of them # /!\ only 'openpose' is supported for now
@ -141,7 +140,7 @@
# reproj_error_threshold_triangulation = 15 # px
# likelihood_threshold_triangulation= 0.3
# min_cameras_for_triangulation = 2
# interpolation = 'cubic' #linear, slinear, quadratic, cubic, or none
# interpolation = 'linear' #linear, slinear, quadratic, cubic, or none
# # 'none' if you don't want to interpolate missing points
# interp_if_gap_smaller_than = 10 # do not interpolate bigger gaps
# show_interp_indices = true # true or false (lowercase). For each keypoint, return the frames that need to be interpolated

View File

@ -44,8 +44,7 @@ participant_mass = [70.0, 63.5] # kg # Only used for marker augmentation and sca
# mode = 'balanced' # 'lightweight', 'balanced', 'performance'
# det_frequency = 100 # Run person detection only every N frames, and inbetween track previously detected bounding boxes (keypoint detection is still run on all frames).
# # Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate.
# tracking = true # Gives consistent person ID across frames. Slightly slower but might facilitate synchronization if other people are in the background
# overwrite_pose = false # set to false if you don't want to recalculate pose estimation when it has already been done
# # overwrite_pose = false # set to false if you don't want to recalculate pose estimation when it has already been done
# display_detection = true
# save_video = 'to_video' # 'to_video' or 'to_images', 'none', or ['to_video', 'to_images']
# output_format = 'openpose' # 'openpose', 'mmpose', 'deeplabcut', 'none' or a list of them # /!\ only 'openpose' is supported for now
@ -141,7 +140,7 @@ keypoints_to_consider = 'all' # 'all' if all points should be considered, for ex
# reproj_error_threshold_triangulation = 15 # px
# likelihood_threshold_triangulation= 0.3
# min_cameras_for_triangulation = 2
# interpolation = 'cubic' #linear, slinear, quadratic, cubic, or none
# interpolation = 'linear' #linear, slinear, quadratic, cubic, or none
# # 'none' if you don't want to interpolate missing points
# interp_if_gap_smaller_than = 10 # do not interpolate bigger gaps
# fill_large_gaps_with = 'last_value' # 'last_value', 'nan', or 'zeros'

View File

@ -44,7 +44,6 @@ pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_1
mode = 'balanced' # 'lightweight', 'balanced', 'performance'
det_frequency = 1 # Run person detection only every N frames, and inbetween track previously detected bounding boxes (keypoint detection is still run on all frames).
# Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate.
tracking = true # Gives consistent person ID across frames. Slightly slower but might facilitate synchronization if other people are in the background
display_detection = true
overwrite_pose = false # set to false if you don't want to recalculate pose estimation when it has already been done
save_video = 'to_video' # 'to_video' or 'to_images', 'none', or ['to_video', 'to_images']
@ -52,7 +51,7 @@ output_format = 'openpose' # 'openpose', 'mmpose', 'deeplabcut', 'none' or a lis
[synchronization]
display_sync_plots = true # true or false (lowercase)
display_sync_plots = false # true or false (lowercase)
keypoints_to_consider = 'all' # 'all' if all points should be considered, for example if the participant did not perform any particicular sharp movement. In this case, the capture needs to be 5-10 seconds long at least
# ['RWrist', 'RElbow'] list of keypoint names if you want to specify keypoints with a sharp vertical motion.
approx_time_maxspeed = 'auto' # 'auto' if you want to consider the whole capture (default, slower if long sequences)
@ -141,7 +140,7 @@ reorder_trc = false # only checked if multi_person analysis
reproj_error_threshold_triangulation = 15 # px
likelihood_threshold_triangulation= 0.3
min_cameras_for_triangulation = 2
interpolation = 'cubic' #linear, slinear, quadratic, cubic, or none
interpolation = 'linear' #linear, slinear, quadratic, cubic, or none
# 'none' if you don't want to interpolate missing points
interp_if_gap_smaller_than = 10 # do not interpolate bigger gaps
fill_large_gaps_with = 'last_value' # 'last_value', 'nan', or 'zeros'

View File

@ -44,7 +44,6 @@ pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_1
mode = 'balanced' # 'lightweight', 'balanced', 'performance'
det_frequency = 1 # Run person detection only every N frames, and inbetween track previously detected bounding boxes (keypoint detection is still run on all frames).
# Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate.
tracking = true # Gives consistent person ID across frames. Slightly slower but might facilitate synchronization if other people are in the background
display_detection = true
overwrite_pose = false # set to false if you don't want to recalculate pose estimation when it has already been done
save_video = 'to_video' # 'to_video' or 'to_images', 'none', or ['to_video', 'to_images']
@ -141,7 +140,7 @@ reorder_trc = false # only checked if multi_person analysis
reproj_error_threshold_triangulation = 15 # px
likelihood_threshold_triangulation= 0.3
min_cameras_for_triangulation = 2
interpolation = 'cubic' #linear, slinear, quadratic, cubic, or none
interpolation = 'linear' #linear, slinear, quadratic, cubic, or none
# 'none' if you don't want to interpolate missing points
interp_if_gap_smaller_than = 10 # do not interpolate bigger gaps
show_interp_indices = true # true or false (lowercase). For each keypoint, return the frames that need to be interpolated