From 19efec2723014b0725fcdd2d927b5a69a9ef442b Mon Sep 17 00:00:00 2001 From: David PAGNON Date: Sun, 31 Mar 2024 01:40:38 +0100 Subject: [PATCH] Faster and more robust multi-person analysis (#85) * tests synchro * draft * further draft * affinity ok * proposals okay, need to incorporate in Pose2Sim+tests * will transfer sorting across frames in triangulation in next commit * Lasts tests need to be done but seems to work pretty well * should all work smoothly * update readme * last checks * fixed linting issues * getting tired of being forgetful --- Pose2Sim/Pose2Sim.py | 3 - Pose2Sim/S00_Demo_Session/Config.toml | 58 +- .../S00_P00_SingleParticipant/Config.toml | 249 +++--- .../S00_P00_T00_StaticTrial/Config.toml | 249 +++--- .../S00_P00_T01_BalancingTrial/Config.toml | 251 +++--- .../S00_P01_MultiParticipants/Config.toml | 253 +++--- .../Config.toml | 249 +++--- .../Config.toml | 249 +++--- .../S00_P01_T02_Participants1-2/Config.toml | 249 +++--- .../Utilities/json_display_without_img.py | 4 +- Pose2Sim/Utilities/synchronize_cams_draft.py | 40 +- Pose2Sim/common.py | 60 +- Pose2Sim/personAssociation.py | 718 ++++++++++++------ Pose2Sim/synchronize_cams.py | 365 +++++---- Pose2Sim/triangulation.py | 147 +++- README.md | 15 +- 16 files changed, 1821 insertions(+), 1338 deletions(-) diff --git a/Pose2Sim/Pose2Sim.py b/Pose2Sim/Pose2Sim.py index 2c80afc..c06c353 100644 --- a/Pose2Sim/Pose2Sim.py +++ b/Pose2Sim/Pose2Sim.py @@ -287,9 +287,6 @@ def synchronization(config=None): start = time.time() currentDateAndTime = datetime.now() project_dir = os.path.realpath(config_dict.get('project').get('project_dir')) - seq_name = os.path.basename(project_dir) - frame_range = config_dict.get('project').get('frame_range') - frames = ["all frames" if frame_range == [] else f"frames {frame_range[0]} to {frame_range[1]}"][0] logging.info("\n\n---------------------------------------------------------------------") logging.info("Camera synchronization") diff --git a/Pose2Sim/S00_Demo_Session/Config.toml b/Pose2Sim/S00_Demo_Session/Config.toml index be8688f..246bb1b 100644 --- a/Pose2Sim/S00_Demo_Session/Config.toml +++ b/Pose2Sim/S00_Demo_Session/Config.toml @@ -18,9 +18,8 @@ [project] -# multi_person = false # true for trials with multiple participants. If false, only the main person in scene is analyzed (and it run much faster). -nb_persons_to_detect = 2 # checked only if multi_person is selected -frame_rate = 120 # fps +multi_person = false # If false, only the main person in scene is analyzed. +frame_rate = 60 # fps frame_range = [] # For example [10,300], or [] for all frames ## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate ## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate, @@ -31,6 +30,26 @@ exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['< # Take heart, calibration is not that complicated once you get the hang of it! +[pose] +pose_framework = 'openpose' # 'openpose', 'mediapipe', 'alphapose', 'deeplabcut' +pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII + #With mediapipe: BLAZEPOSE. + #With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133. + #With deeplabcut: CUSTOM. See example at the end of the file. +# What follows has not been implemented yet +overwrite_pose = false +openpose_path = '' # only checked if OpenPose is used + + +[synchronization] +display_corr = true # true or false (lowercase) +reset_sync = true # Recalculate synchronization even if already done +# id_kpt = [10] # keypoint ID, to be found in skeleton.py. Example RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7 +# weights_kpt = [1] # Only taken into account if you have several keypoints (Currently only one keypoint is supported). +sync_frame_range = [] # For example [0,150], or [] for all frames (default) + # limit synchronization search (to the beginning or to the end of the capture for example) + + [calibration] calibration_type = 'convert' # 'convert' or 'calculate' @@ -90,30 +109,17 @@ calibration_type = 'convert' # 'convert' or 'calculate' # Coming soon! -[pose] -pose_framework = 'openpose' # 'openpose', 'mediapipe', 'alphapose', 'deeplabcut' -pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII - #With mediapipe: BLAZEPOSE. - #With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133. - #With deeplabcut: CUSTOM. See example at the end of the file. -# What follows has not been implemented yet -overwrite_pose = false -openpose_path = '' # only checked if OpenPose is used - - -[synchronization] -# COMING SOON! -reset_sync = true # Recalculate synchronization even if already done -speed_kind = 'y' # 'y' showed best performance. -id_kpt = [10] # number from keypoint name in skeleton.py. RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7 -weights_kpt = [1] # Only taken into account if you have several keypoints (Currently only one keypoint is supported). - - [personAssociation] -tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py -# and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE) -reproj_error_threshold_association = 20 # px -likelihood_threshold_association = 0.3 + likelihood_threshold_association = 0.3 + + [personAssociation.single_person] + reproj_error_threshold_association = 20 # px + tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py + # and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE) + + [personAssociation.multi_person] + reconstruction_error_threshold = 0.1 # 0.1 = 10 cm + min_affinity = 0.2 # affinity below which a correspondence is ignored [triangulation] diff --git a/Pose2Sim/S00_Demo_Session/S00_P00_SingleParticipant/Config.toml b/Pose2Sim/S00_Demo_Session/S00_P00_SingleParticipant/Config.toml index a05ae03..01ef336 100644 --- a/Pose2Sim/S00_Demo_Session/S00_P00_SingleParticipant/Config.toml +++ b/Pose2Sim/S00_Demo_Session/S00_P00_SingleParticipant/Config.toml @@ -9,91 +9,29 @@ # If a parameter is not found here, Pose2Sim will look for its value in the # Config.toml file of the level above. This way, you can set global # instructions for the Session and alter them for specific Participants or Trials. -# -# If you wish to overwrite a parameter for a specific trial or participant, +# +# If you wish to overwrite a parameter for a specific trial or participant, # edit its Config.toml file by uncommenting its key (e.g., [project]) # and editing its value (e.g., frame_range = [10,300]). Or else, uncomment # [filtering.butterworth] and set cut_off_frequency = 10, etc. - # [project] -# multi_person = false # true for trials with multiple participants. If false, only the main person in scene is analyzed (and it run much faster). -# nb_persons_to_detect = 2 # checked only if multi_person is selected -# frame_rate = 60 # FPS +# multi_person = true # If false, only the main person in scene is analyzed. +# frame_rate = 60 # fps # frame_range = [] # For example [10,300], or [] for all frames ## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate ## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate, ## frame_range = [0.1, 2.0]*frame_rate = [6, 120] # exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['', 'etc']. -# e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial'] - - +## e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial'] ## Take heart, calibration is not that complicated once you get the hang of it! -# [calibration] -# calibration_type = 'convert' # 'convert' or 'calculate' - - # [calibration.convert] - # convert_from = 'qualisys' # 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', or 'biocv' - # [calibration.convert.qualisys] - # binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2 - # [calibration.convert.optitrack] # See readme for instructions - # [calibration.convert.vicon] # No parameter needed - # [calibration.convert.opencap] # No parameter needed - # [calibration.convert.easymocap] # No parameter needed - # [calibration.convert.biocv] # No parameter needed - # [calibration.convert.anipose] # No parameter needed - # [calibration.convert.freemocap] # No parameter needed - - - # [calibration.calculate] - ## Camera properties, theoretically need to be calculated only once in a camera lifetime - # [calibration.calculate.intrinsics] - # overwrite_intrinsics = false # overwrite (or not) if they have already been calculated? - # show_detection_intrinsics = true # true or false (lowercase) - # intrinsics_extension = 'jpg' # any video or image extension - # extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 ) - # intrinsics_corners_nb = [4,7] - # intrinsics_square_size = 60 # mm - - ## Camera placements, need to be done before every session - # [calibration.calculate.extrinsics] - # extrinsics_method = 'scene' # 'board', 'scene', 'keypoints' - ## 'board' should be large enough to be detected when laid on the floor. Not recommended. - ## 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out. - ## 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras. - - # moving_cameras = false # Not implemented yet - # calculate_extrinsics = true # true or false (lowercase) - - # [calibration.calculate.extrinsics.board] - # show_reprojection_error = true # true or false (lowercase) - # extrinsics_extension = 'png' # any video or image extension - # extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h] - # extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle - - # [calibration.calculate.extrinsics.scene] - # show_reprojection_error = true # true or false (lowercase) - # extrinsics_extension = 'png' # any video or image extension - ## list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane. - ## in m -> unlike for intrinsics, NOT in mm! - # object_coords_3d = [[-2.0, 0.3, 0.0], - # [-2.0 , 0.0, 0.0], - # [-2.0, 0.0, 0.05], - # [-2.0, -0.3 , 0.0], - # [0.0, 0.3, 0.0], - # [0.0, 0.0, 0.0], - # [0.0, 0.0, 0.05], - # [0.0, -0.3, 0.0]] - - # [calibration.calculate.extrinsics.keypoints] - ## Coming soon! # [pose] # pose_framework = 'openpose' # 'openpose', 'mediapipe', 'alphapose', 'deeplabcut' -# pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII, +# pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII # #With mediapipe: BLAZEPOSE. # #With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133. # #With deeplabcut: CUSTOM. See example at the end of the file. @@ -103,33 +41,93 @@ # [synchronization] -## COMING SOON! +# display_corr = true # true or false (lowercase) # reset_sync = true # Recalculate synchronization even if already done -# frames = [2850,3490] # Frames to use for synchronization, should point to a moment with fast motion. -# cut_off_frequency = 10 # cut-off frequency for a 4th order low-pass Butterworth filter -## Vertical speeds (on X, Y, or Z axis, or 2D speeds) -# speed_kind = 'y' # 'x', 'y', 'z', or '2D' -# vmax = 20 # px/s -# cam1_nb = 4 -# cam2_nb = 3 -# id_kpt = [9,10] # Pour plus tard aller chercher numéro depuis keypoint name dans skeleton.py. 'RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7 -# weights_kpt = [1,1] # Pris en compte uniquement si on a plusieurs keypoints +# # id_kpt = [10] # keypoint ID, to be found in skeleton.py. Example RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7 +# # weights_kpt = [1] # Only taken into account if you have several keypoints (Currently only one keypoint is supported). +# sync_frame_range = [] # For example [0,150], or [] for all frames (default) +# # limit synchronization search (to the beginning or to the end of the capture for example) + + +# [calibration] +# calibration_type = 'convert' # 'convert' or 'calculate' + +# [calibration.convert] +# convert_from = 'qualisys' # 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', 'biocv', 'anipose', or 'freemocap' +# [calibration.convert.qualisys] +# binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2 +# [calibration.convert.optitrack] # See readme for instructions +# [calibration.convert.vicon] # No parameter needed +# [calibration.convert.opencap] # No parameter needed +# [calibration.convert.easymocap] # No parameter needed +# [calibration.convert.biocv] # No parameter needed +# [calibration.convert.anipose] # No parameter needed +# [calibration.convert.freemocap] # No parameter needed + + +# [calibration.calculate] +# # Camera properties, theoretically need to be calculated only once in a camera lifetime +# [calibration.calculate.intrinsics] +# overwrite_intrinsics = false # overwrite (or not) if they have already been calculated? +# show_detection_intrinsics = true # true or false (lowercase) +# intrinsics_extension = 'jpg' # any video or image extension +# extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 ) +# intrinsics_corners_nb = [4,7] +# intrinsics_square_size = 60 # mm + +# # Camera placements, need to be done before every session +# [calibration.calculate.extrinsics] +# calculate_extrinsics = true # true or false (lowercase) +# extrinsics_method = 'scene' # 'board', 'scene', 'keypoints' +# # 'board' should be large enough to be detected when laid on the floor. Not recommended. +# # 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out. +# # 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras. +# moving_cameras = false # Not implemented yet + +# [calibration.calculate.extrinsics.board] +# show_reprojection_error = true # true or false (lowercase) +# extrinsics_extension = 'png' # any video or image extension +# extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h] +# extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle + +# [calibration.calculate.extrinsics.scene] +# show_reprojection_error = true # true or false (lowercase) +# extrinsics_extension = 'png' # any video or image extension +# # list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane. +# # in m -> unlike for intrinsics, NOT in mm! +# object_coords_3d = [[-2.0, 0.3, 0.0], +# [-2.0 , 0.0, 0.0], +# [-2.0, 0.0, 0.05], +# [-2.0, -0.3 , 0.0], +# [0.0, 0.3, 0.0], +# [0.0, 0.0, 0.0], +# [0.0, 0.0, 0.05], +# [0.0, -0.3, 0.0]] + +# [calibration.calculate.extrinsics.keypoints] +# # Coming soon! # [personAssociation] -# # tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py -## and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE) -# reproj_error_threshold_association = 20 # px -# likelihood_threshold_association = 0.05 +# likelihood_threshold_association = 0.3 + +# [personAssociation.single_person] +# reproj_error_threshold_association = 20 # px +# tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py +# # and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE) + +# [personAssociation.multi_person] +# reconstruction_error_threshold = 0.1 # 0.1 = 10 cm +# min_affinity = 0.2 # affinity below which a correspondence is ignored # [triangulation] # reorder_trc = false # only checked if multi_person analysis # reproj_error_threshold_triangulation = 15 # px -# likelihood_threshold_triangulation= 0.05 +# likelihood_threshold_triangulation= 0.3 # min_cameras_for_triangulation = 2 # interpolation = 'cubic' #linear, slinear, quadratic, cubic, or none -## 'none' if you don't want to interpolate missing points +# # 'none' if you don't want to interpolate missing points # interp_if_gap_smaller_than = 10 # do not interpolate bigger gaps # show_interp_indices = true # true or false (lowercase). For each keypoint, return the frames that need to be interpolated # handle_LR_swap = false # Better if few cameras (eg less than 4) with risk of limb swapping (eg camera facing sagittal plane), otherwise slightly less accurate and slower @@ -141,38 +139,39 @@ # type = 'butterworth' # butterworth, kalman, gaussian, LOESS, median, butterworth_on_speed # display_figures = false # true or false (lowercase) - # [filtering.butterworth] - # order = 4 - # cut_off_frequency = 6 # Hz - # [filtering.kalman] - ## How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)? - # trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise - # smooth = true # should be true, unless you need real-time filtering - # [filtering.butterworth_on_speed] - # order = 4 - # cut_off_frequency = 10 # Hz - # [filtering.gaussian] - # sigma_kernel = 2 #px - # [filtering.LOESS] - # nb_values_used = 30 # = fraction of data used * nb frames - # [filtering.median] - # kernel_size = 9 +# [filtering.butterworth] +# order = 4 +# cut_off_frequency = 6 # Hz +# [filtering.kalman] +# # How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)? +# trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise +# smooth = true # should be true, unless you need real-time filtering +# [filtering.butterworth_on_speed] +# order = 4 +# cut_off_frequency = 10 # Hz +# [filtering.gaussian] +# sigma_kernel = 2 #px +# [filtering.LOESS] +# nb_values_used = 30 # = fraction of data used * nb frames +# [filtering.median] +# kernel_size = 9 # [markerAugmentation] -# ## Only works on BODY_25 and BODY_25B models +## Only works on BODY_25 and BODY_25B models # participant_height = 1.72 # m # float if single person, list of float if multi-person (same order as the Static trials) # participant_mass = 70.0 # kg # [opensim] # static_trial = ['S00_P00_Participant/S00_P00_T00_StaticTrial'] -# # If this Config.toml file is at the Trial level, set to true or false (lowercase); -# # At the Participant level, specify the name of the static trial folder name, e.g. ['S00_P00_T00_StaticTrial']; -# # At the Session level, add participant subdirectory, e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P01_Participant/S00_P00_T00_StaticTrial'] +# # If this Config.toml file is at the Trial level, set to true or false (lowercase); +# # At the Participant level, specify the name of the static trial folder name, e.g. ['S00_P00_T00_StaticTrial']; +# # At the Session level, add participant subdirectory, e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P01_Participant/S00_P00_T00_StaticTrial'] # opensim_bin_path = 'C:\OpenSim 4.4\bin' + ## CUSTOM skeleton, if you trained your own DeepLabCut model for example. ## Make sure the node ids correspond to the column numbers of the 2D pose file, starting from zero. ## @@ -188,65 +187,65 @@ # name = "CHip" # id = "None" # [[pose.CUSTOM.children]] -# id = 12 # name = "RHip" +# id = 12 # [[pose.CUSTOM.children.children]] -# id = 14 # name = "RKnee" +# id = 14 # [[pose.CUSTOM.children.children.children]] -# id = 16 # name = "RAnkle" +# id = 16 # [[pose.CUSTOM.children.children.children.children]] -# id = 22 # name = "RBigToe" +# id = 22 # [[pose.CUSTOM.children.children.children.children.children]] -# id = 23 # name = "RSmallToe" +# id = 23 # [[pose.CUSTOM.children.children.children.children]] -# id = 24 # name = "RHeel" +# id = 24 # [[pose.CUSTOM.children]] -# id = 11 # name = "LHip" +# id = 11 # [[pose.CUSTOM.children.children]] -# id = 13 # name = "LKnee" +# id = 13 # [[pose.CUSTOM.children.children.children]] -# id = 15 # name = "LAnkle" +# id = 15 # [[pose.CUSTOM.children.children.children.children]] -# id = 19 # name = "LBigToe" +# id = 19 # [[pose.CUSTOM.children.children.children.children.children]] -# id = 20 # name = "LSmallToe" +# id = 20 # [[pose.CUSTOM.children.children.children.children]] -# id = 21 # name = "LHeel" +# id = 21 # [[pose.CUSTOM.children]] -# id = 17 # name = "Neck" +# id = 17 # [[pose.CUSTOM.children.children]] -# id = 18 # name = "Head" +# id = 18 # [[pose.CUSTOM.children.children.children]] -# id = 0 # name = "Nose" +# id = 0 # [[pose.CUSTOM.children.children]] -# id = 6 # name = "RShoulder" +# id = 6 # [[pose.CUSTOM.children.children.children]] -# id = 8 # name = "RElbow" +# id = 8 # [[pose.CUSTOM.children.children.children.children]] -# id = 10 # name = "RWrist" +# id = 10 # [[pose.CUSTOM.children.children]] -# id = 5 # name = "LShoulder" +# id = 5 # [[pose.CUSTOM.children.children.children]] -# id = 7 # name = "LElbow" +# id = 7 # [[pose.CUSTOM.children.children.children.children]] -# id = 9 # name = "LWrist" +# id = 9 diff --git a/Pose2Sim/S00_Demo_Session/S00_P00_SingleParticipant/S00_P00_T00_StaticTrial/Config.toml b/Pose2Sim/S00_Demo_Session/S00_P00_SingleParticipant/S00_P00_T00_StaticTrial/Config.toml index a05ae03..01ef336 100644 --- a/Pose2Sim/S00_Demo_Session/S00_P00_SingleParticipant/S00_P00_T00_StaticTrial/Config.toml +++ b/Pose2Sim/S00_Demo_Session/S00_P00_SingleParticipant/S00_P00_T00_StaticTrial/Config.toml @@ -9,91 +9,29 @@ # If a parameter is not found here, Pose2Sim will look for its value in the # Config.toml file of the level above. This way, you can set global # instructions for the Session and alter them for specific Participants or Trials. -# -# If you wish to overwrite a parameter for a specific trial or participant, +# +# If you wish to overwrite a parameter for a specific trial or participant, # edit its Config.toml file by uncommenting its key (e.g., [project]) # and editing its value (e.g., frame_range = [10,300]). Or else, uncomment # [filtering.butterworth] and set cut_off_frequency = 10, etc. - # [project] -# multi_person = false # true for trials with multiple participants. If false, only the main person in scene is analyzed (and it run much faster). -# nb_persons_to_detect = 2 # checked only if multi_person is selected -# frame_rate = 60 # FPS +# multi_person = true # If false, only the main person in scene is analyzed. +# frame_rate = 60 # fps # frame_range = [] # For example [10,300], or [] for all frames ## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate ## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate, ## frame_range = [0.1, 2.0]*frame_rate = [6, 120] # exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['', 'etc']. -# e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial'] - - +## e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial'] ## Take heart, calibration is not that complicated once you get the hang of it! -# [calibration] -# calibration_type = 'convert' # 'convert' or 'calculate' - - # [calibration.convert] - # convert_from = 'qualisys' # 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', or 'biocv' - # [calibration.convert.qualisys] - # binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2 - # [calibration.convert.optitrack] # See readme for instructions - # [calibration.convert.vicon] # No parameter needed - # [calibration.convert.opencap] # No parameter needed - # [calibration.convert.easymocap] # No parameter needed - # [calibration.convert.biocv] # No parameter needed - # [calibration.convert.anipose] # No parameter needed - # [calibration.convert.freemocap] # No parameter needed - - - # [calibration.calculate] - ## Camera properties, theoretically need to be calculated only once in a camera lifetime - # [calibration.calculate.intrinsics] - # overwrite_intrinsics = false # overwrite (or not) if they have already been calculated? - # show_detection_intrinsics = true # true or false (lowercase) - # intrinsics_extension = 'jpg' # any video or image extension - # extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 ) - # intrinsics_corners_nb = [4,7] - # intrinsics_square_size = 60 # mm - - ## Camera placements, need to be done before every session - # [calibration.calculate.extrinsics] - # extrinsics_method = 'scene' # 'board', 'scene', 'keypoints' - ## 'board' should be large enough to be detected when laid on the floor. Not recommended. - ## 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out. - ## 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras. - - # moving_cameras = false # Not implemented yet - # calculate_extrinsics = true # true or false (lowercase) - - # [calibration.calculate.extrinsics.board] - # show_reprojection_error = true # true or false (lowercase) - # extrinsics_extension = 'png' # any video or image extension - # extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h] - # extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle - - # [calibration.calculate.extrinsics.scene] - # show_reprojection_error = true # true or false (lowercase) - # extrinsics_extension = 'png' # any video or image extension - ## list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane. - ## in m -> unlike for intrinsics, NOT in mm! - # object_coords_3d = [[-2.0, 0.3, 0.0], - # [-2.0 , 0.0, 0.0], - # [-2.0, 0.0, 0.05], - # [-2.0, -0.3 , 0.0], - # [0.0, 0.3, 0.0], - # [0.0, 0.0, 0.0], - # [0.0, 0.0, 0.05], - # [0.0, -0.3, 0.0]] - - # [calibration.calculate.extrinsics.keypoints] - ## Coming soon! # [pose] # pose_framework = 'openpose' # 'openpose', 'mediapipe', 'alphapose', 'deeplabcut' -# pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII, +# pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII # #With mediapipe: BLAZEPOSE. # #With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133. # #With deeplabcut: CUSTOM. See example at the end of the file. @@ -103,33 +41,93 @@ # [synchronization] -## COMING SOON! +# display_corr = true # true or false (lowercase) # reset_sync = true # Recalculate synchronization even if already done -# frames = [2850,3490] # Frames to use for synchronization, should point to a moment with fast motion. -# cut_off_frequency = 10 # cut-off frequency for a 4th order low-pass Butterworth filter -## Vertical speeds (on X, Y, or Z axis, or 2D speeds) -# speed_kind = 'y' # 'x', 'y', 'z', or '2D' -# vmax = 20 # px/s -# cam1_nb = 4 -# cam2_nb = 3 -# id_kpt = [9,10] # Pour plus tard aller chercher numéro depuis keypoint name dans skeleton.py. 'RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7 -# weights_kpt = [1,1] # Pris en compte uniquement si on a plusieurs keypoints +# # id_kpt = [10] # keypoint ID, to be found in skeleton.py. Example RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7 +# # weights_kpt = [1] # Only taken into account if you have several keypoints (Currently only one keypoint is supported). +# sync_frame_range = [] # For example [0,150], or [] for all frames (default) +# # limit synchronization search (to the beginning or to the end of the capture for example) + + +# [calibration] +# calibration_type = 'convert' # 'convert' or 'calculate' + +# [calibration.convert] +# convert_from = 'qualisys' # 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', 'biocv', 'anipose', or 'freemocap' +# [calibration.convert.qualisys] +# binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2 +# [calibration.convert.optitrack] # See readme for instructions +# [calibration.convert.vicon] # No parameter needed +# [calibration.convert.opencap] # No parameter needed +# [calibration.convert.easymocap] # No parameter needed +# [calibration.convert.biocv] # No parameter needed +# [calibration.convert.anipose] # No parameter needed +# [calibration.convert.freemocap] # No parameter needed + + +# [calibration.calculate] +# # Camera properties, theoretically need to be calculated only once in a camera lifetime +# [calibration.calculate.intrinsics] +# overwrite_intrinsics = false # overwrite (or not) if they have already been calculated? +# show_detection_intrinsics = true # true or false (lowercase) +# intrinsics_extension = 'jpg' # any video or image extension +# extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 ) +# intrinsics_corners_nb = [4,7] +# intrinsics_square_size = 60 # mm + +# # Camera placements, need to be done before every session +# [calibration.calculate.extrinsics] +# calculate_extrinsics = true # true or false (lowercase) +# extrinsics_method = 'scene' # 'board', 'scene', 'keypoints' +# # 'board' should be large enough to be detected when laid on the floor. Not recommended. +# # 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out. +# # 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras. +# moving_cameras = false # Not implemented yet + +# [calibration.calculate.extrinsics.board] +# show_reprojection_error = true # true or false (lowercase) +# extrinsics_extension = 'png' # any video or image extension +# extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h] +# extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle + +# [calibration.calculate.extrinsics.scene] +# show_reprojection_error = true # true or false (lowercase) +# extrinsics_extension = 'png' # any video or image extension +# # list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane. +# # in m -> unlike for intrinsics, NOT in mm! +# object_coords_3d = [[-2.0, 0.3, 0.0], +# [-2.0 , 0.0, 0.0], +# [-2.0, 0.0, 0.05], +# [-2.0, -0.3 , 0.0], +# [0.0, 0.3, 0.0], +# [0.0, 0.0, 0.0], +# [0.0, 0.0, 0.05], +# [0.0, -0.3, 0.0]] + +# [calibration.calculate.extrinsics.keypoints] +# # Coming soon! # [personAssociation] -# # tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py -## and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE) -# reproj_error_threshold_association = 20 # px -# likelihood_threshold_association = 0.05 +# likelihood_threshold_association = 0.3 + +# [personAssociation.single_person] +# reproj_error_threshold_association = 20 # px +# tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py +# # and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE) + +# [personAssociation.multi_person] +# reconstruction_error_threshold = 0.1 # 0.1 = 10 cm +# min_affinity = 0.2 # affinity below which a correspondence is ignored # [triangulation] # reorder_trc = false # only checked if multi_person analysis # reproj_error_threshold_triangulation = 15 # px -# likelihood_threshold_triangulation= 0.05 +# likelihood_threshold_triangulation= 0.3 # min_cameras_for_triangulation = 2 # interpolation = 'cubic' #linear, slinear, quadratic, cubic, or none -## 'none' if you don't want to interpolate missing points +# # 'none' if you don't want to interpolate missing points # interp_if_gap_smaller_than = 10 # do not interpolate bigger gaps # show_interp_indices = true # true or false (lowercase). For each keypoint, return the frames that need to be interpolated # handle_LR_swap = false # Better if few cameras (eg less than 4) with risk of limb swapping (eg camera facing sagittal plane), otherwise slightly less accurate and slower @@ -141,38 +139,39 @@ # type = 'butterworth' # butterworth, kalman, gaussian, LOESS, median, butterworth_on_speed # display_figures = false # true or false (lowercase) - # [filtering.butterworth] - # order = 4 - # cut_off_frequency = 6 # Hz - # [filtering.kalman] - ## How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)? - # trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise - # smooth = true # should be true, unless you need real-time filtering - # [filtering.butterworth_on_speed] - # order = 4 - # cut_off_frequency = 10 # Hz - # [filtering.gaussian] - # sigma_kernel = 2 #px - # [filtering.LOESS] - # nb_values_used = 30 # = fraction of data used * nb frames - # [filtering.median] - # kernel_size = 9 +# [filtering.butterworth] +# order = 4 +# cut_off_frequency = 6 # Hz +# [filtering.kalman] +# # How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)? +# trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise +# smooth = true # should be true, unless you need real-time filtering +# [filtering.butterworth_on_speed] +# order = 4 +# cut_off_frequency = 10 # Hz +# [filtering.gaussian] +# sigma_kernel = 2 #px +# [filtering.LOESS] +# nb_values_used = 30 # = fraction of data used * nb frames +# [filtering.median] +# kernel_size = 9 # [markerAugmentation] -# ## Only works on BODY_25 and BODY_25B models +## Only works on BODY_25 and BODY_25B models # participant_height = 1.72 # m # float if single person, list of float if multi-person (same order as the Static trials) # participant_mass = 70.0 # kg # [opensim] # static_trial = ['S00_P00_Participant/S00_P00_T00_StaticTrial'] -# # If this Config.toml file is at the Trial level, set to true or false (lowercase); -# # At the Participant level, specify the name of the static trial folder name, e.g. ['S00_P00_T00_StaticTrial']; -# # At the Session level, add participant subdirectory, e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P01_Participant/S00_P00_T00_StaticTrial'] +# # If this Config.toml file is at the Trial level, set to true or false (lowercase); +# # At the Participant level, specify the name of the static trial folder name, e.g. ['S00_P00_T00_StaticTrial']; +# # At the Session level, add participant subdirectory, e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P01_Participant/S00_P00_T00_StaticTrial'] # opensim_bin_path = 'C:\OpenSim 4.4\bin' + ## CUSTOM skeleton, if you trained your own DeepLabCut model for example. ## Make sure the node ids correspond to the column numbers of the 2D pose file, starting from zero. ## @@ -188,65 +187,65 @@ # name = "CHip" # id = "None" # [[pose.CUSTOM.children]] -# id = 12 # name = "RHip" +# id = 12 # [[pose.CUSTOM.children.children]] -# id = 14 # name = "RKnee" +# id = 14 # [[pose.CUSTOM.children.children.children]] -# id = 16 # name = "RAnkle" +# id = 16 # [[pose.CUSTOM.children.children.children.children]] -# id = 22 # name = "RBigToe" +# id = 22 # [[pose.CUSTOM.children.children.children.children.children]] -# id = 23 # name = "RSmallToe" +# id = 23 # [[pose.CUSTOM.children.children.children.children]] -# id = 24 # name = "RHeel" +# id = 24 # [[pose.CUSTOM.children]] -# id = 11 # name = "LHip" +# id = 11 # [[pose.CUSTOM.children.children]] -# id = 13 # name = "LKnee" +# id = 13 # [[pose.CUSTOM.children.children.children]] -# id = 15 # name = "LAnkle" +# id = 15 # [[pose.CUSTOM.children.children.children.children]] -# id = 19 # name = "LBigToe" +# id = 19 # [[pose.CUSTOM.children.children.children.children.children]] -# id = 20 # name = "LSmallToe" +# id = 20 # [[pose.CUSTOM.children.children.children.children]] -# id = 21 # name = "LHeel" +# id = 21 # [[pose.CUSTOM.children]] -# id = 17 # name = "Neck" +# id = 17 # [[pose.CUSTOM.children.children]] -# id = 18 # name = "Head" +# id = 18 # [[pose.CUSTOM.children.children.children]] -# id = 0 # name = "Nose" +# id = 0 # [[pose.CUSTOM.children.children]] -# id = 6 # name = "RShoulder" +# id = 6 # [[pose.CUSTOM.children.children.children]] -# id = 8 # name = "RElbow" +# id = 8 # [[pose.CUSTOM.children.children.children.children]] -# id = 10 # name = "RWrist" +# id = 10 # [[pose.CUSTOM.children.children]] -# id = 5 # name = "LShoulder" +# id = 5 # [[pose.CUSTOM.children.children.children]] -# id = 7 # name = "LElbow" +# id = 7 # [[pose.CUSTOM.children.children.children.children]] -# id = 9 # name = "LWrist" +# id = 9 diff --git a/Pose2Sim/S00_Demo_Session/S00_P00_SingleParticipant/S00_P00_T01_BalancingTrial/Config.toml b/Pose2Sim/S00_Demo_Session/S00_P00_SingleParticipant/S00_P00_T01_BalancingTrial/Config.toml index d4e120b..0a539fe 100644 --- a/Pose2Sim/S00_Demo_Session/S00_P00_SingleParticipant/S00_P00_T01_BalancingTrial/Config.toml +++ b/Pose2Sim/S00_Demo_Session/S00_P00_SingleParticipant/S00_P00_T01_BalancingTrial/Config.toml @@ -9,91 +9,29 @@ # If a parameter is not found here, Pose2Sim will look for its value in the # Config.toml file of the level above. This way, you can set global # instructions for the Session and alter them for specific Participants or Trials. -# -# If you wish to overwrite a parameter for a specific trial or participant, +# +# If you wish to overwrite a parameter for a specific trial or participant, # edit its Config.toml file by uncommenting its key (e.g., [project]) # and editing its value (e.g., frame_range = [10,300]). Or else, uncomment # [filtering.butterworth] and set cut_off_frequency = 10, etc. - # [project] -# multi_person = false # true for trials with multiple participants. If false, only the main person in scene is analyzed (and it run much faster). -# nb_persons_to_detect = 2 # checked only if multi_person is selected -# frame_rate = 60 # FPS +# multi_person = true # If false, only the main person in scene is analyzed. +# frame_rate = 60 # fps # frame_range = [] # For example [10,300], or [] for all frames ## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate ## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate, ## frame_range = [0.1, 2.0]*frame_rate = [6, 120] # exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['', 'etc']. -# e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial'] - - +## e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial'] ## Take heart, calibration is not that complicated once you get the hang of it! -# [calibration] -# calibration_type = 'convert' # 'convert' or 'calculate' - - # [calibration.convert] - # convert_from = 'qualisys' # 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', or 'biocv' - # [calibration.convert.qualisys] - # binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2 - # [calibration.convert.optitrack] # See readme for instructions - # [calibration.convert.vicon] # No parameter needed - # [calibration.convert.opencap] # No parameter needed - # [calibration.convert.easymocap] # No parameter needed - # [calibration.convert.biocv] # No parameter needed - # [calibration.convert.anipose] # No parameter needed - # [calibration.convert.freemocap] # No parameter needed - - - # [calibration.calculate] - ## Camera properties, theoretically need to be calculated only once in a camera lifetime - # [calibration.calculate.intrinsics] - # overwrite_intrinsics = false # overwrite (or not) if they have already been calculated? - # show_detection_intrinsics = true # true or false (lowercase) - # intrinsics_extension = 'jpg' # any video or image extension - # extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 ) - # intrinsics_corners_nb = [4,7] - # intrinsics_square_size = 60 # mm - - ## Camera placements, need to be done before every session - # [calibration.calculate.extrinsics] - # extrinsics_method = 'scene' # 'board', 'scene', 'keypoints' - ## 'board' should be large enough to be detected when laid on the floor. Not recommended. - ## 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out. - ## 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras. - - # moving_cameras = false # Not implemented yet - # calculate_extrinsics = true # true or false (lowercase) - - # [calibration.calculate.extrinsics.board] - # show_reprojection_error = true # true or false (lowercase) - # extrinsics_extension = 'png' # any video or image extension - # extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h] - # extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle - - # [calibration.calculate.extrinsics.scene] - # show_reprojection_error = true # true or false (lowercase) - # extrinsics_extension = 'png' # any video or image extension - ## list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane. - ## in m -> unlike for intrinsics, NOT in mm! - # object_coords_3d = [[-2.0, 0.3, 0.0], - # [-2.0 , 0.0, 0.0], - # [-2.0, 0.0, 0.05], - # [-2.0, -0.3 , 0.0], - # [0.0, 0.3, 0.0], - # [0.0, 0.0, 0.0], - # [0.0, 0.0, 0.05], - # [0.0, -0.3, 0.0]] - - # [calibration.calculate.extrinsics.keypoints] - ## Coming soon! # [pose] # pose_framework = 'openpose' # 'openpose', 'mediapipe', 'alphapose', 'deeplabcut' -# pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII, +# pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII # #With mediapipe: BLAZEPOSE. # #With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133. # #With deeplabcut: CUSTOM. See example at the end of the file. @@ -103,33 +41,93 @@ # [synchronization] -## COMING SOON! +# display_corr = true # true or false (lowercase) # reset_sync = true # Recalculate synchronization even if already done -# frames = [2850,3490] # Frames to use for synchronization, should point to a moment with fast motion. -# cut_off_frequency = 10 # cut-off frequency for a 4th order low-pass Butterworth filter -## Vertical speeds (on X, Y, or Z axis, or 2D speeds) -# speed_kind = 'y' # 'x', 'y', 'z', or '2D' -# vmax = 20 # px/s -# cam1_nb = 4 -# cam2_nb = 3 -# id_kpt = [9,10] # Pour plus tard aller chercher numéro depuis keypoint name dans skeleton.py. 'RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7 -# weights_kpt = [1,1] # Pris en compte uniquement si on a plusieurs keypoints +# # id_kpt = [10] # keypoint ID, to be found in skeleton.py. Example RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7 +# # weights_kpt = [1] # Only taken into account if you have several keypoints (Currently only one keypoint is supported). +# sync_frame_range = [] # For example [0,150], or [] for all frames (default) +# # limit synchronization search (to the beginning or to the end of the capture for example) + + +# [calibration] +# calibration_type = 'convert' # 'convert' or 'calculate' + +# [calibration.convert] +# convert_from = 'qualisys' # 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', 'biocv', 'anipose', or 'freemocap' +# [calibration.convert.qualisys] +# binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2 +# [calibration.convert.optitrack] # See readme for instructions +# [calibration.convert.vicon] # No parameter needed +# [calibration.convert.opencap] # No parameter needed +# [calibration.convert.easymocap] # No parameter needed +# [calibration.convert.biocv] # No parameter needed +# [calibration.convert.anipose] # No parameter needed +# [calibration.convert.freemocap] # No parameter needed + + +# [calibration.calculate] +# # Camera properties, theoretically need to be calculated only once in a camera lifetime +# [calibration.calculate.intrinsics] +# overwrite_intrinsics = false # overwrite (or not) if they have already been calculated? +# show_detection_intrinsics = true # true or false (lowercase) +# intrinsics_extension = 'jpg' # any video or image extension +# extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 ) +# intrinsics_corners_nb = [4,7] +# intrinsics_square_size = 60 # mm + +# # Camera placements, need to be done before every session +# [calibration.calculate.extrinsics] +# calculate_extrinsics = true # true or false (lowercase) +# extrinsics_method = 'scene' # 'board', 'scene', 'keypoints' +# # 'board' should be large enough to be detected when laid on the floor. Not recommended. +# # 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out. +# # 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras. +# moving_cameras = false # Not implemented yet + +# [calibration.calculate.extrinsics.board] +# show_reprojection_error = true # true or false (lowercase) +# extrinsics_extension = 'png' # any video or image extension +# extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h] +# extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle + +# [calibration.calculate.extrinsics.scene] +# show_reprojection_error = true # true or false (lowercase) +# extrinsics_extension = 'png' # any video or image extension +# # list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane. +# # in m -> unlike for intrinsics, NOT in mm! +# object_coords_3d = [[-2.0, 0.3, 0.0], +# [-2.0 , 0.0, 0.0], +# [-2.0, 0.0, 0.05], +# [-2.0, -0.3 , 0.0], +# [0.0, 0.3, 0.0], +# [0.0, 0.0, 0.0], +# [0.0, 0.0, 0.05], +# [0.0, -0.3, 0.0]] + +# [calibration.calculate.extrinsics.keypoints] +# # Coming soon! # [personAssociation] -# # tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py -## and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE) -# reproj_error_threshold_association = 20 # px -# likelihood_threshold_association = 0.05 +# likelihood_threshold_association = 0.3 + +# [personAssociation.single_person] +# reproj_error_threshold_association = 20 # px +# tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py +# # and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE) + +# [personAssociation.multi_person] +# reconstruction_error_threshold = 0.1 # 0.1 = 10 cm +# min_affinity = 0.2 # affinity below which a correspondence is ignored # [triangulation] # reorder_trc = false # only checked if multi_person analysis # reproj_error_threshold_triangulation = 15 # px -# likelihood_threshold_triangulation= 0.05 +# likelihood_threshold_triangulation= 0.3 # min_cameras_for_triangulation = 2 # interpolation = 'cubic' #linear, slinear, quadratic, cubic, or none -## 'none' if you don't want to interpolate missing points +# # 'none' if you don't want to interpolate missing points # interp_if_gap_smaller_than = 10 # do not interpolate bigger gaps # show_interp_indices = true # true or false (lowercase). For each keypoint, return the frames that need to be interpolated # handle_LR_swap = false # Better if few cameras (eg less than 4) with risk of limb swapping (eg camera facing sagittal plane), otherwise slightly less accurate and slower @@ -139,40 +137,41 @@ [filtering] # type = 'butterworth' # butterworth, kalman, gaussian, LOESS, median, butterworth_on_speed -display_figures = true # true or false (lowercase) +display_figures = false # true or false (lowercase) - # [filtering.butterworth] - # order = 4 - # cut_off_frequency = 6 # Hz - # [filtering.kalman] - ## How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)? - # trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise - # smooth = true # should be true, unless you need real-time filtering - # [filtering.butterworth_on_speed] - # order = 4 - # cut_off_frequency = 10 # Hz - # [filtering.gaussian] - # sigma_kernel = 2 #px - # [filtering.LOESS] - # nb_values_used = 30 # = fraction of data used * nb frames - # [filtering.median] - # kernel_size = 9 +# [filtering.butterworth] +# order = 4 +# cut_off_frequency = 6 # Hz +# [filtering.kalman] +# # How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)? +# trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise +# smooth = true # should be true, unless you need real-time filtering +# [filtering.butterworth_on_speed] +# order = 4 +# cut_off_frequency = 10 # Hz +# [filtering.gaussian] +# sigma_kernel = 2 #px +# [filtering.LOESS] +# nb_values_used = 30 # = fraction of data used * nb frames +# [filtering.median] +# kernel_size = 9 # [markerAugmentation] -# ## Only works on BODY_25 and BODY_25B models +## Only works on BODY_25 and BODY_25B models # participant_height = 1.72 # m # float if single person, list of float if multi-person (same order as the Static trials) # participant_mass = 70.0 # kg # [opensim] # static_trial = ['S00_P00_Participant/S00_P00_T00_StaticTrial'] -# # If this Config.toml file is at the Trial level, set to true or false (lowercase); -# # At the Participant level, specify the name of the static trial folder name, e.g. ['S00_P00_T00_StaticTrial']; -# # At the Session level, add participant subdirectory, e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P01_Participant/S00_P00_T00_StaticTrial'] +# # If this Config.toml file is at the Trial level, set to true or false (lowercase); +# # At the Participant level, specify the name of the static trial folder name, e.g. ['S00_P00_T00_StaticTrial']; +# # At the Session level, add participant subdirectory, e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P01_Participant/S00_P00_T00_StaticTrial'] # opensim_bin_path = 'C:\OpenSim 4.4\bin' + ## CUSTOM skeleton, if you trained your own DeepLabCut model for example. ## Make sure the node ids correspond to the column numbers of the 2D pose file, starting from zero. ## @@ -188,65 +187,65 @@ display_figures = true # true or false (lowercase) # name = "CHip" # id = "None" # [[pose.CUSTOM.children]] -# id = 12 # name = "RHip" +# id = 12 # [[pose.CUSTOM.children.children]] -# id = 14 # name = "RKnee" +# id = 14 # [[pose.CUSTOM.children.children.children]] -# id = 16 # name = "RAnkle" +# id = 16 # [[pose.CUSTOM.children.children.children.children]] -# id = 22 # name = "RBigToe" +# id = 22 # [[pose.CUSTOM.children.children.children.children.children]] -# id = 23 # name = "RSmallToe" +# id = 23 # [[pose.CUSTOM.children.children.children.children]] -# id = 24 # name = "RHeel" +# id = 24 # [[pose.CUSTOM.children]] -# id = 11 # name = "LHip" +# id = 11 # [[pose.CUSTOM.children.children]] -# id = 13 # name = "LKnee" +# id = 13 # [[pose.CUSTOM.children.children.children]] -# id = 15 # name = "LAnkle" +# id = 15 # [[pose.CUSTOM.children.children.children.children]] -# id = 19 # name = "LBigToe" +# id = 19 # [[pose.CUSTOM.children.children.children.children.children]] -# id = 20 # name = "LSmallToe" +# id = 20 # [[pose.CUSTOM.children.children.children.children]] -# id = 21 # name = "LHeel" +# id = 21 # [[pose.CUSTOM.children]] -# id = 17 # name = "Neck" +# id = 17 # [[pose.CUSTOM.children.children]] -# id = 18 # name = "Head" +# id = 18 # [[pose.CUSTOM.children.children.children]] -# id = 0 # name = "Nose" +# id = 0 # [[pose.CUSTOM.children.children]] -# id = 6 # name = "RShoulder" +# id = 6 # [[pose.CUSTOM.children.children.children]] -# id = 8 # name = "RElbow" +# id = 8 # [[pose.CUSTOM.children.children.children.children]] -# id = 10 # name = "RWrist" +# id = 10 # [[pose.CUSTOM.children.children]] -# id = 5 # name = "LShoulder" +# id = 5 # [[pose.CUSTOM.children.children.children]] -# id = 7 # name = "LElbow" +# id = 7 # [[pose.CUSTOM.children.children.children.children]] -# id = 9 # name = "LWrist" +# id = 9 diff --git a/Pose2Sim/S00_Demo_Session/S00_P01_MultiParticipants/Config.toml b/Pose2Sim/S00_Demo_Session/S00_P01_MultiParticipants/Config.toml index 028d857..01ef336 100644 --- a/Pose2Sim/S00_Demo_Session/S00_P01_MultiParticipants/Config.toml +++ b/Pose2Sim/S00_Demo_Session/S00_P01_MultiParticipants/Config.toml @@ -9,91 +9,29 @@ # If a parameter is not found here, Pose2Sim will look for its value in the # Config.toml file of the level above. This way, you can set global # instructions for the Session and alter them for specific Participants or Trials. -# -# If you wish to overwrite a parameter for a specific trial or participant, +# +# If you wish to overwrite a parameter for a specific trial or participant, # edit its Config.toml file by uncommenting its key (e.g., [project]) # and editing its value (e.g., frame_range = [10,300]). Or else, uncomment # [filtering.butterworth] and set cut_off_frequency = 10, etc. - # [project] -# multi_person = false # true for trials with multiple participants. If false, only the main person in scene is analyzed (and it run much faster). -# nb_persons_to_detect = 2 # checked only if multi_person is selected -# frame_rate = 60 # FPS +# multi_person = true # If false, only the main person in scene is analyzed. +# frame_rate = 60 # fps # frame_range = [] # For example [10,300], or [] for all frames ## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate ## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate, ## frame_range = [0.1, 2.0]*frame_rate = [6, 120] # exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['', 'etc']. -# e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial'] +## e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial'] ## Take heart, calibration is not that complicated once you get the hang of it! -# [calibration] -# calibration_type = 'convert' # 'convert' or 'calculate' - - # [calibration.convert] - # convert_from = 'qualisys' # 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', or 'biocv' - # [calibration.convert.qualisys] - # binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2 - # [calibration.convert.optitrack] # See readme for instructions - # [calibration.convert.vicon] # No parameter needed - # [calibration.convert.opencap] # No parameter needed - # [calibration.convert.easymocap] # No parameter needed - # [calibration.convert.biocv] # No parameter needed - # [calibration.convert.anipose] # No parameter needed - # [calibration.convert.freemocap] # No parameter needed - - - # [calibration.calculate] - ## Camera properties, theoretically need to be calculated only once in a camera lifetime - # [calibration.calculate.intrinsics] - # overwrite_intrinsics = false # overwrite (or not) if they have already been calculated? - # show_detection_intrinsics = true # true or false (lowercase) - # intrinsics_extension = 'jpg' # any video or image extension - # extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 ) - # intrinsics_corners_nb = [4,7] - # intrinsics_square_size = 60 # mm - - ## Camera placements, need to be done before every session - # [calibration.calculate.extrinsics] - # extrinsics_method = 'scene' # 'board', 'scene', 'keypoints' - ## 'board' should be large enough to be detected when laid on the floor. Not recommended. - ## 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out. - ## 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras. - - # moving_cameras = false # Not implemented yet - # calculate_extrinsics = true # true or false (lowercase) - - # [calibration.calculate.extrinsics.board] - # show_reprojection_error = true # true or false (lowercase) - # extrinsics_extension = 'png' # any video or image extension - # extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h] - # extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle - - # [calibration.calculate.extrinsics.scene] - # show_reprojection_error = true # true or false (lowercase) - # extrinsics_extension = 'png' # any video or image extension - ## list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane. - ## in m -> unlike for intrinsics, NOT in mm! - # object_coords_3d = [[-2.0, 0.3, 0.0], - # [-2.0 , 0.0, 0.0], - # [-2.0, 0.0, 0.05], - # [-2.0, -0.3 , 0.0], - # [0.0, 0.3, 0.0], - # [0.0, 0.0, 0.0], - # [0.0, 0.0, 0.05], - # [0.0, -0.3, 0.0]] - - # [calibration.calculate.extrinsics.keypoints] - ## Coming soon! - - # [pose] # pose_framework = 'openpose' # 'openpose', 'mediapipe', 'alphapose', 'deeplabcut' -# pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII, +# pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII # #With mediapipe: BLAZEPOSE. # #With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133. # #With deeplabcut: CUSTOM. See example at the end of the file. @@ -103,33 +41,93 @@ # [synchronization] -## COMING SOON! +# display_corr = true # true or false (lowercase) # reset_sync = true # Recalculate synchronization even if already done -# frames = [2850,3490] # Frames to use for synchronization, should point to a moment with fast motion. -# cut_off_frequency = 10 # cut-off frequency for a 4th order low-pass Butterworth filter -## Vertical speeds (on X, Y, or Z axis, or 2D speeds) -# speed_kind = 'y' # 'x', 'y', 'z', or '2D' -# vmax = 20 # px/s -# cam1_nb = 4 -# cam2_nb = 3 -# id_kpt = [9,10] # Pour plus tard aller chercher numéro depuis keypoint name dans skeleton.py. 'RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7 -# weights_kpt = [1,1] # Pris en compte uniquement si on a plusieurs keypoints +# # id_kpt = [10] # keypoint ID, to be found in skeleton.py. Example RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7 +# # weights_kpt = [1] # Only taken into account if you have several keypoints (Currently only one keypoint is supported). +# sync_frame_range = [] # For example [0,150], or [] for all frames (default) +# # limit synchronization search (to the beginning or to the end of the capture for example) + + +# [calibration] +# calibration_type = 'convert' # 'convert' or 'calculate' + +# [calibration.convert] +# convert_from = 'qualisys' # 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', 'biocv', 'anipose', or 'freemocap' +# [calibration.convert.qualisys] +# binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2 +# [calibration.convert.optitrack] # See readme for instructions +# [calibration.convert.vicon] # No parameter needed +# [calibration.convert.opencap] # No parameter needed +# [calibration.convert.easymocap] # No parameter needed +# [calibration.convert.biocv] # No parameter needed +# [calibration.convert.anipose] # No parameter needed +# [calibration.convert.freemocap] # No parameter needed + + +# [calibration.calculate] +# # Camera properties, theoretically need to be calculated only once in a camera lifetime +# [calibration.calculate.intrinsics] +# overwrite_intrinsics = false # overwrite (or not) if they have already been calculated? +# show_detection_intrinsics = true # true or false (lowercase) +# intrinsics_extension = 'jpg' # any video or image extension +# extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 ) +# intrinsics_corners_nb = [4,7] +# intrinsics_square_size = 60 # mm + +# # Camera placements, need to be done before every session +# [calibration.calculate.extrinsics] +# calculate_extrinsics = true # true or false (lowercase) +# extrinsics_method = 'scene' # 'board', 'scene', 'keypoints' +# # 'board' should be large enough to be detected when laid on the floor. Not recommended. +# # 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out. +# # 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras. +# moving_cameras = false # Not implemented yet + +# [calibration.calculate.extrinsics.board] +# show_reprojection_error = true # true or false (lowercase) +# extrinsics_extension = 'png' # any video or image extension +# extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h] +# extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle + +# [calibration.calculate.extrinsics.scene] +# show_reprojection_error = true # true or false (lowercase) +# extrinsics_extension = 'png' # any video or image extension +# # list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane. +# # in m -> unlike for intrinsics, NOT in mm! +# object_coords_3d = [[-2.0, 0.3, 0.0], +# [-2.0 , 0.0, 0.0], +# [-2.0, 0.0, 0.05], +# [-2.0, -0.3 , 0.0], +# [0.0, 0.3, 0.0], +# [0.0, 0.0, 0.0], +# [0.0, 0.0, 0.05], +# [0.0, -0.3, 0.0]] + +# [calibration.calculate.extrinsics.keypoints] +# # Coming soon! # [personAssociation] -# # tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py -## and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE) -# reproj_error_threshold_association = 20 # px -# likelihood_threshold_association = 0.05 +# likelihood_threshold_association = 0.3 + +# [personAssociation.single_person] +# reproj_error_threshold_association = 20 # px +# tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py +# # and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE) + +# [personAssociation.multi_person] +# reconstruction_error_threshold = 0.1 # 0.1 = 10 cm +# min_affinity = 0.2 # affinity below which a correspondence is ignored # [triangulation] # reorder_trc = false # only checked if multi_person analysis # reproj_error_threshold_triangulation = 15 # px -# likelihood_threshold_triangulation= 0.05 +# likelihood_threshold_triangulation= 0.3 # min_cameras_for_triangulation = 2 # interpolation = 'cubic' #linear, slinear, quadratic, cubic, or none -## 'none' if you don't want to interpolate missing points +# # 'none' if you don't want to interpolate missing points # interp_if_gap_smaller_than = 10 # do not interpolate bigger gaps # show_interp_indices = true # true or false (lowercase). For each keypoint, return the frames that need to be interpolated # handle_LR_swap = false # Better if few cameras (eg less than 4) with risk of limb swapping (eg camera facing sagittal plane), otherwise slightly less accurate and slower @@ -141,38 +139,39 @@ # type = 'butterworth' # butterworth, kalman, gaussian, LOESS, median, butterworth_on_speed # display_figures = false # true or false (lowercase) - # [filtering.butterworth] - # order = 4 - # cut_off_frequency = 6 # Hz - # [filtering.kalman] - ## How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)? - # trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise - # smooth = true # should be true, unless you need real-time filtering - # [filtering.butterworth_on_speed] - # order = 4 - # cut_off_frequency = 10 # Hz - # [filtering.gaussian] - # sigma_kernel = 2 #px - # [filtering.LOESS] - # nb_values_used = 30 # = fraction of data used * nb frames - # [filtering.median] - # kernel_size = 9 +# [filtering.butterworth] +# order = 4 +# cut_off_frequency = 6 # Hz +# [filtering.kalman] +# # How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)? +# trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise +# smooth = true # should be true, unless you need real-time filtering +# [filtering.butterworth_on_speed] +# order = 4 +# cut_off_frequency = 10 # Hz +# [filtering.gaussian] +# sigma_kernel = 2 #px +# [filtering.LOESS] +# nb_values_used = 30 # = fraction of data used * nb frames +# [filtering.median] +# kernel_size = 9 # [markerAugmentation] -# ## Only works on BODY_25 and BODY_25B models -# participant_height = [1.21, 1.72] # m # float if single person, list of float if multi-person (same order as the Static trials) -# participant_mass = [25.0, 70.0] # kg +## Only works on BODY_25 and BODY_25B models +# participant_height = 1.72 # m # float if single person, list of float if multi-person (same order as the Static trials) +# participant_mass = 70.0 # kg # [opensim] # static_trial = ['S00_P00_Participant/S00_P00_T00_StaticTrial'] -# # If this Config.toml file is at the Trial level, set to true or false (lowercase); -# # At the Participant level, specify the name of the static trial folder name, e.g. ['S00_P00_T00_StaticTrial']; -# # At the Session level, add participant subdirectory, e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P01_Participant/S00_P00_T00_StaticTrial'] +# # If this Config.toml file is at the Trial level, set to true or false (lowercase); +# # At the Participant level, specify the name of the static trial folder name, e.g. ['S00_P00_T00_StaticTrial']; +# # At the Session level, add participant subdirectory, e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P01_Participant/S00_P00_T00_StaticTrial'] # opensim_bin_path = 'C:\OpenSim 4.4\bin' + ## CUSTOM skeleton, if you trained your own DeepLabCut model for example. ## Make sure the node ids correspond to the column numbers of the 2D pose file, starting from zero. ## @@ -188,65 +187,65 @@ # name = "CHip" # id = "None" # [[pose.CUSTOM.children]] -# id = 12 # name = "RHip" +# id = 12 # [[pose.CUSTOM.children.children]] -# id = 14 # name = "RKnee" +# id = 14 # [[pose.CUSTOM.children.children.children]] -# id = 16 # name = "RAnkle" +# id = 16 # [[pose.CUSTOM.children.children.children.children]] -# id = 22 # name = "RBigToe" +# id = 22 # [[pose.CUSTOM.children.children.children.children.children]] -# id = 23 # name = "RSmallToe" +# id = 23 # [[pose.CUSTOM.children.children.children.children]] -# id = 24 # name = "RHeel" +# id = 24 # [[pose.CUSTOM.children]] -# id = 11 # name = "LHip" +# id = 11 # [[pose.CUSTOM.children.children]] -# id = 13 # name = "LKnee" +# id = 13 # [[pose.CUSTOM.children.children.children]] -# id = 15 # name = "LAnkle" +# id = 15 # [[pose.CUSTOM.children.children.children.children]] -# id = 19 # name = "LBigToe" +# id = 19 # [[pose.CUSTOM.children.children.children.children.children]] -# id = 20 # name = "LSmallToe" +# id = 20 # [[pose.CUSTOM.children.children.children.children]] -# id = 21 # name = "LHeel" +# id = 21 # [[pose.CUSTOM.children]] -# id = 17 # name = "Neck" +# id = 17 # [[pose.CUSTOM.children.children]] -# id = 18 # name = "Head" +# id = 18 # [[pose.CUSTOM.children.children.children]] -# id = 0 # name = "Nose" +# id = 0 # [[pose.CUSTOM.children.children]] -# id = 6 # name = "RShoulder" +# id = 6 # [[pose.CUSTOM.children.children.children]] -# id = 8 # name = "RElbow" +# id = 8 # [[pose.CUSTOM.children.children.children.children]] -# id = 10 # name = "RWrist" +# id = 10 # [[pose.CUSTOM.children.children]] -# id = 5 # name = "LShoulder" +# id = 5 # [[pose.CUSTOM.children.children.children]] -# id = 7 # name = "LElbow" +# id = 7 # [[pose.CUSTOM.children.children.children.children]] -# id = 9 # name = "LWrist" +# id = 9 diff --git a/Pose2Sim/S00_Demo_Session/S00_P01_MultiParticipants/S00_P01_T00_StaticTrialParticipant1/Config.toml b/Pose2Sim/S00_Demo_Session/S00_P01_MultiParticipants/S00_P01_T00_StaticTrialParticipant1/Config.toml index 78fd8fe..8f7c602 100644 --- a/Pose2Sim/S00_Demo_Session/S00_P01_MultiParticipants/S00_P01_T00_StaticTrialParticipant1/Config.toml +++ b/Pose2Sim/S00_Demo_Session/S00_P01_MultiParticipants/S00_P01_T00_StaticTrialParticipant1/Config.toml @@ -9,91 +9,29 @@ # If a parameter is not found here, Pose2Sim will look for its value in the # Config.toml file of the level above. This way, you can set global # instructions for the Session and alter them for specific Participants or Trials. -# -# If you wish to overwrite a parameter for a specific trial or participant, +# +# If you wish to overwrite a parameter for a specific trial or participant, # edit its Config.toml file by uncommenting its key (e.g., [project]) # and editing its value (e.g., frame_range = [10,300]). Or else, uncomment # [filtering.butterworth] and set cut_off_frequency = 10, etc. - # [project] -# multi_person = false # true for trials with multiple participants. If false, only the main person in scene is analyzed (and it run much faster). -# nb_persons_to_detect = 2 # checked only if multi_person is selected -# frame_rate = 60 # FPS +# multi_person = true # If false, only the main person in scene is analyzed. +# frame_rate = 60 # fps # frame_range = [] # For example [10,300], or [] for all frames ## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate ## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate, ## frame_range = [0.1, 2.0]*frame_rate = [6, 120] # exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['', 'etc']. -# e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial'] - - +## e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial'] ## Take heart, calibration is not that complicated once you get the hang of it! -# [calibration] -# calibration_type = 'convert' # 'convert' or 'calculate' - - # [calibration.convert] - # convert_from = 'qualisys' # 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', or 'biocv' - # [calibration.convert.qualisys] - # binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2 - # [calibration.convert.optitrack] # See readme for instructions - # [calibration.convert.vicon] # No parameter needed - # [calibration.convert.opencap] # No parameter needed - # [calibration.convert.easymocap] # No parameter needed - # [calibration.convert.biocv] # No parameter needed - # [calibration.convert.anipose] # No parameter needed - # [calibration.convert.freemocap] # No parameter needed - - - # [calibration.calculate] - ## Camera properties, theoretically need to be calculated only once in a camera lifetime - # [calibration.calculate.intrinsics] - # overwrite_intrinsics = false # overwrite (or not) if they have already been calculated? - # show_detection_intrinsics = true # true or false (lowercase) - # intrinsics_extension = 'jpg' # any video or image extension - # extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 ) - # intrinsics_corners_nb = [4,7] - # intrinsics_square_size = 60 # mm - - ## Camera placements, need to be done before every session - # [calibration.calculate.extrinsics] - # extrinsics_method = 'scene' # 'board', 'scene', 'keypoints' - ## 'board' should be large enough to be detected when laid on the floor. Not recommended. - ## 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out. - ## 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras. - - # moving_cameras = false # Not implemented yet - # calculate_extrinsics = true # true or false (lowercase) - - # [calibration.calculate.extrinsics.board] - # show_reprojection_error = true # true or false (lowercase) - # extrinsics_extension = 'png' # any video or image extension - # extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h] - # extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle - - # [calibration.calculate.extrinsics.scene] - # show_reprojection_error = true # true or false (lowercase) - # extrinsics_extension = 'png' # any video or image extension - ## list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane. - ## in m -> unlike for intrinsics, NOT in mm! - # object_coords_3d = [[-2.0, 0.3, 0.0], - # [-2.0 , 0.0, 0.0], - # [-2.0, 0.0, 0.05], - # [-2.0, -0.3 , 0.0], - # [0.0, 0.3, 0.0], - # [0.0, 0.0, 0.0], - # [0.0, 0.0, 0.05], - # [0.0, -0.3, 0.0]] - - # [calibration.calculate.extrinsics.keypoints] - ## Coming soon! # [pose] # pose_framework = 'openpose' # 'openpose', 'mediapipe', 'alphapose', 'deeplabcut' -# pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII, +# pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII # #With mediapipe: BLAZEPOSE. # #With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133. # #With deeplabcut: CUSTOM. See example at the end of the file. @@ -103,33 +41,93 @@ # [synchronization] -## COMING SOON! +# display_corr = true # true or false (lowercase) # reset_sync = true # Recalculate synchronization even if already done -# frames = [2850,3490] # Frames to use for synchronization, should point to a moment with fast motion. -# cut_off_frequency = 10 # cut-off frequency for a 4th order low-pass Butterworth filter -## Vertical speeds (on X, Y, or Z axis, or 2D speeds) -# speed_kind = 'y' # 'x', 'y', 'z', or '2D' -# vmax = 20 # px/s -# cam1_nb = 4 -# cam2_nb = 3 -# id_kpt = [9,10] # Pour plus tard aller chercher numéro depuis keypoint name dans skeleton.py. 'RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7 -# weights_kpt = [1,1] # Pris en compte uniquement si on a plusieurs keypoints +# # id_kpt = [10] # keypoint ID, to be found in skeleton.py. Example RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7 +# # weights_kpt = [1] # Only taken into account if you have several keypoints (Currently only one keypoint is supported). +# sync_frame_range = [] # For example [0,150], or [] for all frames (default) +# # limit synchronization search (to the beginning or to the end of the capture for example) + + +# [calibration] +# calibration_type = 'convert' # 'convert' or 'calculate' + +# [calibration.convert] +# convert_from = 'qualisys' # 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', 'biocv', 'anipose', or 'freemocap' +# [calibration.convert.qualisys] +# binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2 +# [calibration.convert.optitrack] # See readme for instructions +# [calibration.convert.vicon] # No parameter needed +# [calibration.convert.opencap] # No parameter needed +# [calibration.convert.easymocap] # No parameter needed +# [calibration.convert.biocv] # No parameter needed +# [calibration.convert.anipose] # No parameter needed +# [calibration.convert.freemocap] # No parameter needed + + +# [calibration.calculate] +# # Camera properties, theoretically need to be calculated only once in a camera lifetime +# [calibration.calculate.intrinsics] +# overwrite_intrinsics = false # overwrite (or not) if they have already been calculated? +# show_detection_intrinsics = true # true or false (lowercase) +# intrinsics_extension = 'jpg' # any video or image extension +# extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 ) +# intrinsics_corners_nb = [4,7] +# intrinsics_square_size = 60 # mm + +# # Camera placements, need to be done before every session +# [calibration.calculate.extrinsics] +# calculate_extrinsics = true # true or false (lowercase) +# extrinsics_method = 'scene' # 'board', 'scene', 'keypoints' +# # 'board' should be large enough to be detected when laid on the floor. Not recommended. +# # 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out. +# # 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras. +# moving_cameras = false # Not implemented yet + +# [calibration.calculate.extrinsics.board] +# show_reprojection_error = true # true or false (lowercase) +# extrinsics_extension = 'png' # any video or image extension +# extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h] +# extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle + +# [calibration.calculate.extrinsics.scene] +# show_reprojection_error = true # true or false (lowercase) +# extrinsics_extension = 'png' # any video or image extension +# # list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane. +# # in m -> unlike for intrinsics, NOT in mm! +# object_coords_3d = [[-2.0, 0.3, 0.0], +# [-2.0 , 0.0, 0.0], +# [-2.0, 0.0, 0.05], +# [-2.0, -0.3 , 0.0], +# [0.0, 0.3, 0.0], +# [0.0, 0.0, 0.0], +# [0.0, 0.0, 0.05], +# [0.0, -0.3, 0.0]] + +# [calibration.calculate.extrinsics.keypoints] +# # Coming soon! # [personAssociation] -# # tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py -## and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE) -# reproj_error_threshold_association = 20 # px -# likelihood_threshold_association = 0.05 +# likelihood_threshold_association = 0.3 + +# [personAssociation.single_person] +# reproj_error_threshold_association = 20 # px +# tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py +# # and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE) + +# [personAssociation.multi_person] +# reconstruction_error_threshold = 0.1 # 0.1 = 10 cm +# min_affinity = 0.2 # affinity below which a correspondence is ignored # [triangulation] # reorder_trc = false # only checked if multi_person analysis # reproj_error_threshold_triangulation = 15 # px -# likelihood_threshold_triangulation= 0.05 +# likelihood_threshold_triangulation= 0.3 # min_cameras_for_triangulation = 2 # interpolation = 'cubic' #linear, slinear, quadratic, cubic, or none -## 'none' if you don't want to interpolate missing points +# # 'none' if you don't want to interpolate missing points # interp_if_gap_smaller_than = 10 # do not interpolate bigger gaps # show_interp_indices = true # true or false (lowercase). For each keypoint, return the frames that need to be interpolated # handle_LR_swap = false # Better if few cameras (eg less than 4) with risk of limb swapping (eg camera facing sagittal plane), otherwise slightly less accurate and slower @@ -141,38 +139,39 @@ # type = 'butterworth' # butterworth, kalman, gaussian, LOESS, median, butterworth_on_speed # display_figures = false # true or false (lowercase) - # [filtering.butterworth] - # order = 4 - # cut_off_frequency = 6 # Hz - # [filtering.kalman] - ## How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)? - # trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise - # smooth = true # should be true, unless you need real-time filtering - # [filtering.butterworth_on_speed] - # order = 4 - # cut_off_frequency = 10 # Hz - # [filtering.gaussian] - # sigma_kernel = 2 #px - # [filtering.LOESS] - # nb_values_used = 30 # = fraction of data used * nb frames - # [filtering.median] - # kernel_size = 9 +# [filtering.butterworth] +# order = 4 +# cut_off_frequency = 6 # Hz +# [filtering.kalman] +# # How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)? +# trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise +# smooth = true # should be true, unless you need real-time filtering +# [filtering.butterworth_on_speed] +# order = 4 +# cut_off_frequency = 10 # Hz +# [filtering.gaussian] +# sigma_kernel = 2 #px +# [filtering.LOESS] +# nb_values_used = 30 # = fraction of data used * nb frames +# [filtering.median] +# kernel_size = 9 [markerAugmentation] -# ## Only works on BODY_25 and BODY_25B models +## Only works on BODY_25 and BODY_25B models participant_height = 1.21 # m # float if single person, list of float if multi-person (same order as the Static trials) participant_mass = 25.0 # kg # [opensim] # static_trial = ['S00_P00_Participant/S00_P00_T00_StaticTrial'] -# # If this Config.toml file is at the Trial level, set to true or false (lowercase); -# # At the Participant level, specify the name of the static trial folder name, e.g. ['S00_P00_T00_StaticTrial']; -# # At the Session level, add participant subdirectory, e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P01_Participant/S00_P00_T00_StaticTrial'] +# # If this Config.toml file is at the Trial level, set to true or false (lowercase); +# # At the Participant level, specify the name of the static trial folder name, e.g. ['S00_P00_T00_StaticTrial']; +# # At the Session level, add participant subdirectory, e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P01_Participant/S00_P00_T00_StaticTrial'] # opensim_bin_path = 'C:\OpenSim 4.4\bin' + ## CUSTOM skeleton, if you trained your own DeepLabCut model for example. ## Make sure the node ids correspond to the column numbers of the 2D pose file, starting from zero. ## @@ -188,65 +187,65 @@ participant_mass = 25.0 # kg # name = "CHip" # id = "None" # [[pose.CUSTOM.children]] -# id = 12 # name = "RHip" +# id = 12 # [[pose.CUSTOM.children.children]] -# id = 14 # name = "RKnee" +# id = 14 # [[pose.CUSTOM.children.children.children]] -# id = 16 # name = "RAnkle" +# id = 16 # [[pose.CUSTOM.children.children.children.children]] -# id = 22 # name = "RBigToe" +# id = 22 # [[pose.CUSTOM.children.children.children.children.children]] -# id = 23 # name = "RSmallToe" +# id = 23 # [[pose.CUSTOM.children.children.children.children]] -# id = 24 # name = "RHeel" +# id = 24 # [[pose.CUSTOM.children]] -# id = 11 # name = "LHip" +# id = 11 # [[pose.CUSTOM.children.children]] -# id = 13 # name = "LKnee" +# id = 13 # [[pose.CUSTOM.children.children.children]] -# id = 15 # name = "LAnkle" +# id = 15 # [[pose.CUSTOM.children.children.children.children]] -# id = 19 # name = "LBigToe" +# id = 19 # [[pose.CUSTOM.children.children.children.children.children]] -# id = 20 # name = "LSmallToe" +# id = 20 # [[pose.CUSTOM.children.children.children.children]] -# id = 21 # name = "LHeel" +# id = 21 # [[pose.CUSTOM.children]] -# id = 17 # name = "Neck" +# id = 17 # [[pose.CUSTOM.children.children]] -# id = 18 # name = "Head" +# id = 18 # [[pose.CUSTOM.children.children.children]] -# id = 0 # name = "Nose" +# id = 0 # [[pose.CUSTOM.children.children]] -# id = 6 # name = "RShoulder" +# id = 6 # [[pose.CUSTOM.children.children.children]] -# id = 8 # name = "RElbow" +# id = 8 # [[pose.CUSTOM.children.children.children.children]] -# id = 10 # name = "RWrist" +# id = 10 # [[pose.CUSTOM.children.children]] -# id = 5 # name = "LShoulder" +# id = 5 # [[pose.CUSTOM.children.children.children]] -# id = 7 # name = "LElbow" +# id = 7 # [[pose.CUSTOM.children.children.children.children]] -# id = 9 # name = "LWrist" +# id = 9 diff --git a/Pose2Sim/S00_Demo_Session/S00_P01_MultiParticipants/S00_P01_T01_StaticTrialParticipant2/Config.toml b/Pose2Sim/S00_Demo_Session/S00_P01_MultiParticipants/S00_P01_T01_StaticTrialParticipant2/Config.toml index c1e8290..93eaea5 100644 --- a/Pose2Sim/S00_Demo_Session/S00_P01_MultiParticipants/S00_P01_T01_StaticTrialParticipant2/Config.toml +++ b/Pose2Sim/S00_Demo_Session/S00_P01_MultiParticipants/S00_P01_T01_StaticTrialParticipant2/Config.toml @@ -9,91 +9,29 @@ # If a parameter is not found here, Pose2Sim will look for its value in the # Config.toml file of the level above. This way, you can set global # instructions for the Session and alter them for specific Participants or Trials. -# -# If you wish to overwrite a parameter for a specific trial or participant, +# +# If you wish to overwrite a parameter for a specific trial or participant, # edit its Config.toml file by uncommenting its key (e.g., [project]) # and editing its value (e.g., frame_range = [10,300]). Or else, uncomment # [filtering.butterworth] and set cut_off_frequency = 10, etc. - # [project] -# multi_person = false # true for trials with multiple participants. If false, only the main person in scene is analyzed (and it run much faster). -# nb_persons_to_detect = 2 # checked only if multi_person is selected -# frame_rate = 60 # FPS +# multi_person = true # If false, only the main person in scene is analyzed. +# frame_rate = 60 # fps # frame_range = [] # For example [10,300], or [] for all frames ## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate ## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate, ## frame_range = [0.1, 2.0]*frame_rate = [6, 120] # exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['', 'etc']. -# e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial'] - - +## e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial'] ## Take heart, calibration is not that complicated once you get the hang of it! -# [calibration] -# calibration_type = 'convert' # 'convert' or 'calculate' - - # [calibration.convert] - # convert_from = 'qualisys' # 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', or 'biocv' - # [calibration.convert.qualisys] - # binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2 - # [calibration.convert.optitrack] # See readme for instructions - # [calibration.convert.vicon] # No parameter needed - # [calibration.convert.opencap] # No parameter needed - # [calibration.convert.easymocap] # No parameter needed - # [calibration.convert.biocv] # No parameter needed - # [calibration.convert.anipose] # No parameter needed - # [calibration.convert.freemocap] # No parameter needed - - - # [calibration.calculate] - ## Camera properties, theoretically need to be calculated only once in a camera lifetime - # [calibration.calculate.intrinsics] - # overwrite_intrinsics = false # overwrite (or not) if they have already been calculated? - # show_detection_intrinsics = true # true or false (lowercase) - # intrinsics_extension = 'jpg' # any video or image extension - # extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 ) - # intrinsics_corners_nb = [4,7] - # intrinsics_square_size = 60 # mm - - ## Camera placements, need to be done before every session - # [calibration.calculate.extrinsics] - # extrinsics_method = 'scene' # 'board', 'scene', 'keypoints' - ## 'board' should be large enough to be detected when laid on the floor. Not recommended. - ## 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out. - ## 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras. - - # moving_cameras = false # Not implemented yet - # calculate_extrinsics = true # true or false (lowercase) - - # [calibration.calculate.extrinsics.board] - # show_reprojection_error = true # true or false (lowercase) - # extrinsics_extension = 'png' # any video or image extension - # extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h] - # extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle - - # [calibration.calculate.extrinsics.scene] - # show_reprojection_error = true # true or false (lowercase) - # extrinsics_extension = 'png' # any video or image extension - ## list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane. - ## in m -> unlike for intrinsics, NOT in mm! - # object_coords_3d = [[-2.0, 0.3, 0.0], - # [-2.0 , 0.0, 0.0], - # [-2.0, 0.0, 0.05], - # [-2.0, -0.3 , 0.0], - # [0.0, 0.3, 0.0], - # [0.0, 0.0, 0.0], - # [0.0, 0.0, 0.05], - # [0.0, -0.3, 0.0]] - - # [calibration.calculate.extrinsics.keypoints] - ## Coming soon! # [pose] # pose_framework = 'openpose' # 'openpose', 'mediapipe', 'alphapose', 'deeplabcut' -# pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII, +# pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII # #With mediapipe: BLAZEPOSE. # #With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133. # #With deeplabcut: CUSTOM. See example at the end of the file. @@ -103,33 +41,93 @@ # [synchronization] -## COMING SOON! +# display_corr = true # true or false (lowercase) # reset_sync = true # Recalculate synchronization even if already done -# frames = [2850,3490] # Frames to use for synchronization, should point to a moment with fast motion. -# cut_off_frequency = 10 # cut-off frequency for a 4th order low-pass Butterworth filter -## Vertical speeds (on X, Y, or Z axis, or 2D speeds) -# speed_kind = 'y' # 'x', 'y', 'z', or '2D' -# vmax = 20 # px/s -# cam1_nb = 4 -# cam2_nb = 3 -# id_kpt = [9,10] # Pour plus tard aller chercher numéro depuis keypoint name dans skeleton.py. 'RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7 -# weights_kpt = [1,1] # Pris en compte uniquement si on a plusieurs keypoints +# # id_kpt = [10] # keypoint ID, to be found in skeleton.py. Example RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7 +# # weights_kpt = [1] # Only taken into account if you have several keypoints (Currently only one keypoint is supported). +# sync_frame_range = [] # For example [0,150], or [] for all frames (default) +# # limit synchronization search (to the beginning or to the end of the capture for example) + + +# [calibration] +# calibration_type = 'convert' # 'convert' or 'calculate' + +# [calibration.convert] +# convert_from = 'qualisys' # 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', 'biocv', 'anipose', or 'freemocap' +# [calibration.convert.qualisys] +# binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2 +# [calibration.convert.optitrack] # See readme for instructions +# [calibration.convert.vicon] # No parameter needed +# [calibration.convert.opencap] # No parameter needed +# [calibration.convert.easymocap] # No parameter needed +# [calibration.convert.biocv] # No parameter needed +# [calibration.convert.anipose] # No parameter needed +# [calibration.convert.freemocap] # No parameter needed + + +# [calibration.calculate] +# # Camera properties, theoretically need to be calculated only once in a camera lifetime +# [calibration.calculate.intrinsics] +# overwrite_intrinsics = false # overwrite (or not) if they have already been calculated? +# show_detection_intrinsics = true # true or false (lowercase) +# intrinsics_extension = 'jpg' # any video or image extension +# extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 ) +# intrinsics_corners_nb = [4,7] +# intrinsics_square_size = 60 # mm + +# # Camera placements, need to be done before every session +# [calibration.calculate.extrinsics] +# calculate_extrinsics = true # true or false (lowercase) +# extrinsics_method = 'scene' # 'board', 'scene', 'keypoints' +# # 'board' should be large enough to be detected when laid on the floor. Not recommended. +# # 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out. +# # 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras. +# moving_cameras = false # Not implemented yet + +# [calibration.calculate.extrinsics.board] +# show_reprojection_error = true # true or false (lowercase) +# extrinsics_extension = 'png' # any video or image extension +# extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h] +# extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle + +# [calibration.calculate.extrinsics.scene] +# show_reprojection_error = true # true or false (lowercase) +# extrinsics_extension = 'png' # any video or image extension +# # list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane. +# # in m -> unlike for intrinsics, NOT in mm! +# object_coords_3d = [[-2.0, 0.3, 0.0], +# [-2.0 , 0.0, 0.0], +# [-2.0, 0.0, 0.05], +# [-2.0, -0.3 , 0.0], +# [0.0, 0.3, 0.0], +# [0.0, 0.0, 0.0], +# [0.0, 0.0, 0.05], +# [0.0, -0.3, 0.0]] + +# [calibration.calculate.extrinsics.keypoints] +# # Coming soon! # [personAssociation] -# # tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py -## and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE) -# reproj_error_threshold_association = 20 # px -# likelihood_threshold_association = 0.05 +# likelihood_threshold_association = 0.3 + +# [personAssociation.single_person] +# reproj_error_threshold_association = 20 # px +# tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py +# # and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE) + +# [personAssociation.multi_person] +# reconstruction_error_threshold = 0.1 # 0.1 = 10 cm +# min_affinity = 0.2 # affinity below which a correspondence is ignored # [triangulation] # reorder_trc = false # only checked if multi_person analysis # reproj_error_threshold_triangulation = 15 # px -# likelihood_threshold_triangulation= 0.05 +# likelihood_threshold_triangulation= 0.3 # min_cameras_for_triangulation = 2 # interpolation = 'cubic' #linear, slinear, quadratic, cubic, or none -## 'none' if you don't want to interpolate missing points +# # 'none' if you don't want to interpolate missing points # interp_if_gap_smaller_than = 10 # do not interpolate bigger gaps # show_interp_indices = true # true or false (lowercase). For each keypoint, return the frames that need to be interpolated # handle_LR_swap = false # Better if few cameras (eg less than 4) with risk of limb swapping (eg camera facing sagittal plane), otherwise slightly less accurate and slower @@ -141,38 +139,39 @@ # type = 'butterworth' # butterworth, kalman, gaussian, LOESS, median, butterworth_on_speed # display_figures = false # true or false (lowercase) - # [filtering.butterworth] - # order = 4 - # cut_off_frequency = 6 # Hz - # [filtering.kalman] - ## How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)? - # trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise - # smooth = true # should be true, unless you need real-time filtering - # [filtering.butterworth_on_speed] - # order = 4 - # cut_off_frequency = 10 # Hz - # [filtering.gaussian] - # sigma_kernel = 2 #px - # [filtering.LOESS] - # nb_values_used = 30 # = fraction of data used * nb frames - # [filtering.median] - # kernel_size = 9 +# [filtering.butterworth] +# order = 4 +# cut_off_frequency = 6 # Hz +# [filtering.kalman] +# # How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)? +# trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise +# smooth = true # should be true, unless you need real-time filtering +# [filtering.butterworth_on_speed] +# order = 4 +# cut_off_frequency = 10 # Hz +# [filtering.gaussian] +# sigma_kernel = 2 #px +# [filtering.LOESS] +# nb_values_used = 30 # = fraction of data used * nb frames +# [filtering.median] +# kernel_size = 9 [markerAugmentation] -# ## Only works on BODY_25 and BODY_25B models +## Only works on BODY_25 and BODY_25B models participant_height = 1.72 # m # float if single person, list of float if multi-person (same order as the Static trials) participant_mass = 70.0 # kg # [opensim] # static_trial = ['S00_P00_Participant/S00_P00_T00_StaticTrial'] -# # If this Config.toml file is at the Trial level, set to true or false (lowercase); -# # At the Participant level, specify the name of the static trial folder name, e.g. ['S00_P00_T00_StaticTrial']; -# # At the Session level, add participant subdirectory, e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P01_Participant/S00_P00_T00_StaticTrial'] +# # If this Config.toml file is at the Trial level, set to true or false (lowercase); +# # At the Participant level, specify the name of the static trial folder name, e.g. ['S00_P00_T00_StaticTrial']; +# # At the Session level, add participant subdirectory, e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P01_Participant/S00_P00_T00_StaticTrial'] # opensim_bin_path = 'C:\OpenSim 4.4\bin' + ## CUSTOM skeleton, if you trained your own DeepLabCut model for example. ## Make sure the node ids correspond to the column numbers of the 2D pose file, starting from zero. ## @@ -188,65 +187,65 @@ participant_mass = 70.0 # kg # name = "CHip" # id = "None" # [[pose.CUSTOM.children]] -# id = 12 # name = "RHip" +# id = 12 # [[pose.CUSTOM.children.children]] -# id = 14 # name = "RKnee" +# id = 14 # [[pose.CUSTOM.children.children.children]] -# id = 16 # name = "RAnkle" +# id = 16 # [[pose.CUSTOM.children.children.children.children]] -# id = 22 # name = "RBigToe" +# id = 22 # [[pose.CUSTOM.children.children.children.children.children]] -# id = 23 # name = "RSmallToe" +# id = 23 # [[pose.CUSTOM.children.children.children.children]] -# id = 24 # name = "RHeel" +# id = 24 # [[pose.CUSTOM.children]] -# id = 11 # name = "LHip" +# id = 11 # [[pose.CUSTOM.children.children]] -# id = 13 # name = "LKnee" +# id = 13 # [[pose.CUSTOM.children.children.children]] -# id = 15 # name = "LAnkle" +# id = 15 # [[pose.CUSTOM.children.children.children.children]] -# id = 19 # name = "LBigToe" +# id = 19 # [[pose.CUSTOM.children.children.children.children.children]] -# id = 20 # name = "LSmallToe" +# id = 20 # [[pose.CUSTOM.children.children.children.children]] -# id = 21 # name = "LHeel" +# id = 21 # [[pose.CUSTOM.children]] -# id = 17 # name = "Neck" +# id = 17 # [[pose.CUSTOM.children.children]] -# id = 18 # name = "Head" +# id = 18 # [[pose.CUSTOM.children.children.children]] -# id = 0 # name = "Nose" +# id = 0 # [[pose.CUSTOM.children.children]] -# id = 6 # name = "RShoulder" +# id = 6 # [[pose.CUSTOM.children.children.children]] -# id = 8 # name = "RElbow" +# id = 8 # [[pose.CUSTOM.children.children.children.children]] -# id = 10 # name = "RWrist" +# id = 10 # [[pose.CUSTOM.children.children]] -# id = 5 # name = "LShoulder" +# id = 5 # [[pose.CUSTOM.children.children.children]] -# id = 7 # name = "LElbow" +# id = 7 # [[pose.CUSTOM.children.children.children.children]] -# id = 9 # name = "LWrist" +# id = 9 diff --git a/Pose2Sim/S00_Demo_Session/S00_P01_MultiParticipants/S00_P01_T02_Participants1-2/Config.toml b/Pose2Sim/S00_Demo_Session/S00_P01_MultiParticipants/S00_P01_T02_Participants1-2/Config.toml index f97a0c0..a118c09 100644 --- a/Pose2Sim/S00_Demo_Session/S00_P01_MultiParticipants/S00_P01_T02_Participants1-2/Config.toml +++ b/Pose2Sim/S00_Demo_Session/S00_P01_MultiParticipants/S00_P01_T02_Participants1-2/Config.toml @@ -9,91 +9,29 @@ # If a parameter is not found here, Pose2Sim will look for its value in the # Config.toml file of the level above. This way, you can set global # instructions for the Session and alter them for specific Participants or Trials. -# -# If you wish to overwrite a parameter for a specific trial or participant, +# +# If you wish to overwrite a parameter for a specific trial or participant, # edit its Config.toml file by uncommenting its key (e.g., [project]) # and editing its value (e.g., frame_range = [10,300]). Or else, uncomment # [filtering.butterworth] and set cut_off_frequency = 10, etc. - [project] -multi_person = true # true for trials with multiple participants. If false, only the main person in scene is analyzed (and it run much faster). -nb_persons_to_detect = 2 # checked only if multi_person is selected -# frame_rate = 60 # FPS +multi_person = true # If false, only the main person in scene is analyzed. +# frame_rate = 60 # fps # frame_range = [] # For example [10,300], or [] for all frames ## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate ## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate, ## frame_range = [0.1, 2.0]*frame_rate = [6, 120] # exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['', 'etc']. -# e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial'] - - +## e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial'] ## Take heart, calibration is not that complicated once you get the hang of it! -# [calibration] -# calibration_type = 'convert' # 'convert' or 'calculate' - - # [calibration.convert] - # convert_from = 'qualisys' # 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', or 'biocv' - # [calibration.convert.qualisys] - # binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2 - # [calibration.convert.optitrack] # See readme for instructions - # [calibration.convert.vicon] # No parameter needed - # [calibration.convert.opencap] # No parameter needed - # [calibration.convert.easymocap] # No parameter needed - # [calibration.convert.biocv] # No parameter needed - # [calibration.convert.anipose] # No parameter needed - # [calibration.convert.freemocap] # No parameter needed - - - # [calibration.calculate] - ## Camera properties, theoretically need to be calculated only once in a camera lifetime - # [calibration.calculate.intrinsics] - # overwrite_intrinsics = false # overwrite (or not) if they have already been calculated? - # show_detection_intrinsics = true # true or false (lowercase) - # intrinsics_extension = 'jpg' # any video or image extension - # extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 ) - # intrinsics_corners_nb = [4,7] - # intrinsics_square_size = 60 # mm - - ## Camera placements, need to be done before every session - # [calibration.calculate.extrinsics] - # extrinsics_method = 'scene' # 'board', 'scene', 'keypoints' - ## 'board' should be large enough to be detected when laid on the floor. Not recommended. - ## 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out. - ## 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras. - - # moving_cameras = false # Not implemented yet - # calculate_extrinsics = true # true or false (lowercase) - - # [calibration.calculate.extrinsics.board] - # show_reprojection_error = true # true or false (lowercase) - # extrinsics_extension = 'png' # any video or image extension - # extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h] - # extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle - - # [calibration.calculate.extrinsics.scene] - # show_reprojection_error = true # true or false (lowercase) - # extrinsics_extension = 'png' # any video or image extension - ## list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane. - ## in m -> unlike for intrinsics, NOT in mm! - # object_coords_3d = [[-2.0, 0.3, 0.0], - # [-2.0 , 0.0, 0.0], - # [-2.0, 0.0, 0.05], - # [-2.0, -0.3 , 0.0], - # [0.0, 0.3, 0.0], - # [0.0, 0.0, 0.0], - # [0.0, 0.0, 0.05], - # [0.0, -0.3, 0.0]] - - # [calibration.calculate.extrinsics.keypoints] - ## Coming soon! # [pose] # pose_framework = 'openpose' # 'openpose', 'mediapipe', 'alphapose', 'deeplabcut' -# pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII, +# pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII # #With mediapipe: BLAZEPOSE. # #With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133. # #With deeplabcut: CUSTOM. See example at the end of the file. @@ -103,33 +41,93 @@ nb_persons_to_detect = 2 # checked only if multi_person is selected # [synchronization] -## COMING SOON! +# display_corr = true # true or false (lowercase) # reset_sync = true # Recalculate synchronization even if already done -# frames = [2850,3490] # Frames to use for synchronization, should point to a moment with fast motion. -# cut_off_frequency = 10 # cut-off frequency for a 4th order low-pass Butterworth filter -## Vertical speeds (on X, Y, or Z axis, or 2D speeds) -# speed_kind = 'y' # 'x', 'y', 'z', or '2D' -# vmax = 20 # px/s -# cam1_nb = 4 -# cam2_nb = 3 -# id_kpt = [9,10] # Pour plus tard aller chercher numéro depuis keypoint name dans skeleton.py. 'RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7 -# weights_kpt = [1,1] # Pris en compte uniquement si on a plusieurs keypoints +# # id_kpt = [10] # keypoint ID, to be found in skeleton.py. Example RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7 +# # weights_kpt = [1] # Only taken into account if you have several keypoints (Currently only one keypoint is supported). +# sync_frame_range = [] # For example [0,150], or [] for all frames (default) +# # limit synchronization search (to the beginning or to the end of the capture for example) + + +# [calibration] +# calibration_type = 'convert' # 'convert' or 'calculate' + +# [calibration.convert] +# convert_from = 'qualisys' # 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', 'biocv', 'anipose', or 'freemocap' +# [calibration.convert.qualisys] +# binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2 +# [calibration.convert.optitrack] # See readme for instructions +# [calibration.convert.vicon] # No parameter needed +# [calibration.convert.opencap] # No parameter needed +# [calibration.convert.easymocap] # No parameter needed +# [calibration.convert.biocv] # No parameter needed +# [calibration.convert.anipose] # No parameter needed +# [calibration.convert.freemocap] # No parameter needed + + +# [calibration.calculate] +# # Camera properties, theoretically need to be calculated only once in a camera lifetime +# [calibration.calculate.intrinsics] +# overwrite_intrinsics = false # overwrite (or not) if they have already been calculated? +# show_detection_intrinsics = true # true or false (lowercase) +# intrinsics_extension = 'jpg' # any video or image extension +# extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 ) +# intrinsics_corners_nb = [4,7] +# intrinsics_square_size = 60 # mm + +# # Camera placements, need to be done before every session +# [calibration.calculate.extrinsics] +# calculate_extrinsics = true # true or false (lowercase) +# extrinsics_method = 'scene' # 'board', 'scene', 'keypoints' +# # 'board' should be large enough to be detected when laid on the floor. Not recommended. +# # 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out. +# # 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras. +# moving_cameras = false # Not implemented yet + +# [calibration.calculate.extrinsics.board] +# show_reprojection_error = true # true or false (lowercase) +# extrinsics_extension = 'png' # any video or image extension +# extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h] +# extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle + +# [calibration.calculate.extrinsics.scene] +# show_reprojection_error = true # true or false (lowercase) +# extrinsics_extension = 'png' # any video or image extension +# # list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane. +# # in m -> unlike for intrinsics, NOT in mm! +# object_coords_3d = [[-2.0, 0.3, 0.0], +# [-2.0 , 0.0, 0.0], +# [-2.0, 0.0, 0.05], +# [-2.0, -0.3 , 0.0], +# [0.0, 0.3, 0.0], +# [0.0, 0.0, 0.0], +# [0.0, 0.0, 0.05], +# [0.0, -0.3, 0.0]] + +# [calibration.calculate.extrinsics.keypoints] +# # Coming soon! # [personAssociation] -# tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py -## and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE) -# reproj_error_threshold_association = 20 # px -# likelihood_threshold_association = 0.05 +# likelihood_threshold_association = 0.3 + +# [personAssociation.single_person] +# reproj_error_threshold_association = 20 # px +# tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py +# # and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE) + +# [personAssociation.multi_person] +# reconstruction_error_threshold = 0.1 # 0.1 = 10 cm +# min_affinity = 0.2 # affinity below which a correspondence is ignored [triangulation] -reorder_trc = true # only checked if multi_person analysis +reorder_trc = false # only checked if multi_person analysis # reproj_error_threshold_triangulation = 15 # px -# likelihood_threshold_triangulation= 0.05 +# likelihood_threshold_triangulation= 0.3 # min_cameras_for_triangulation = 2 # interpolation = 'cubic' #linear, slinear, quadratic, cubic, or none -## 'none' if you don't want to interpolate missing points +# # 'none' if you don't want to interpolate missing points # interp_if_gap_smaller_than = 10 # do not interpolate bigger gaps # show_interp_indices = true # true or false (lowercase). For each keypoint, return the frames that need to be interpolated # handle_LR_swap = false # Better if few cameras (eg less than 4) with risk of limb swapping (eg camera facing sagittal plane), otherwise slightly less accurate and slower @@ -141,22 +139,22 @@ reorder_trc = true # only checked if multi_person analysis # type = 'butterworth' # butterworth, kalman, gaussian, LOESS, median, butterworth_on_speed # display_figures = false # true or false (lowercase) - # [filtering.butterworth] - # order = 4 - # cut_off_frequency = 6 # Hz - # [filtering.kalman] - ## How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)? - # trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise - # smooth = true # should be true, unless you need real-time filtering - # [filtering.butterworth_on_speed] - # order = 4 - # cut_off_frequency = 10 # Hz - # [filtering.gaussian] - # sigma_kernel = 2 #px - # [filtering.LOESS] - # nb_values_used = 30 # = fraction of data used * nb frames - # [filtering.median] - # kernel_size = 9 +# [filtering.butterworth] +# order = 4 +# cut_off_frequency = 6 # Hz +# [filtering.kalman] +# # How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)? +# trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise +# smooth = true # should be true, unless you need real-time filtering +# [filtering.butterworth_on_speed] +# order = 4 +# cut_off_frequency = 10 # Hz +# [filtering.gaussian] +# sigma_kernel = 2 #px +# [filtering.LOESS] +# nb_values_used = 30 # = fraction of data used * nb frames +# [filtering.median] +# kernel_size = 9 [markerAugmentation] @@ -167,12 +165,13 @@ participant_mass = [25.0, 70.0] # kg # [opensim] # static_trial = ['S00_P00_Participant/S00_P00_T00_StaticTrial'] -# # If this Config.toml file is at the Trial level, set to true or false (lowercase); -# # At the Participant level, specify the name of the static trial folder name, e.g. ['S00_P00_T00_StaticTrial']; -# # At the Session level, add participant subdirectory, e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P01_Participant/S00_P00_T00_StaticTrial'] +# # If this Config.toml file is at the Trial level, set to true or false (lowercase); +# # At the Participant level, specify the name of the static trial folder name, e.g. ['S00_P00_T00_StaticTrial']; +# # At the Session level, add participant subdirectory, e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P01_Participant/S00_P00_T00_StaticTrial'] # opensim_bin_path = 'C:\OpenSim 4.4\bin' + ## CUSTOM skeleton, if you trained your own DeepLabCut model for example. ## Make sure the node ids correspond to the column numbers of the 2D pose file, starting from zero. ## @@ -188,65 +187,65 @@ participant_mass = [25.0, 70.0] # kg # name = "CHip" # id = "None" # [[pose.CUSTOM.children]] -# id = 12 # name = "RHip" +# id = 12 # [[pose.CUSTOM.children.children]] -# id = 14 # name = "RKnee" +# id = 14 # [[pose.CUSTOM.children.children.children]] -# id = 16 # name = "RAnkle" +# id = 16 # [[pose.CUSTOM.children.children.children.children]] -# id = 22 # name = "RBigToe" +# id = 22 # [[pose.CUSTOM.children.children.children.children.children]] -# id = 23 # name = "RSmallToe" +# id = 23 # [[pose.CUSTOM.children.children.children.children]] -# id = 24 # name = "RHeel" +# id = 24 # [[pose.CUSTOM.children]] -# id = 11 # name = "LHip" +# id = 11 # [[pose.CUSTOM.children.children]] -# id = 13 # name = "LKnee" +# id = 13 # [[pose.CUSTOM.children.children.children]] -# id = 15 # name = "LAnkle" +# id = 15 # [[pose.CUSTOM.children.children.children.children]] -# id = 19 # name = "LBigToe" +# id = 19 # [[pose.CUSTOM.children.children.children.children.children]] -# id = 20 # name = "LSmallToe" +# id = 20 # [[pose.CUSTOM.children.children.children.children]] -# id = 21 # name = "LHeel" +# id = 21 # [[pose.CUSTOM.children]] -# id = 17 # name = "Neck" +# id = 17 # [[pose.CUSTOM.children.children]] -# id = 18 # name = "Head" +# id = 18 # [[pose.CUSTOM.children.children.children]] -# id = 0 # name = "Nose" +# id = 0 # [[pose.CUSTOM.children.children]] -# id = 6 # name = "RShoulder" +# id = 6 # [[pose.CUSTOM.children.children.children]] -# id = 8 # name = "RElbow" +# id = 8 # [[pose.CUSTOM.children.children.children.children]] -# id = 10 # name = "RWrist" +# id = 10 # [[pose.CUSTOM.children.children]] -# id = 5 # name = "LShoulder" +# id = 5 # [[pose.CUSTOM.children.children.children]] -# id = 7 # name = "LElbow" +# id = 7 # [[pose.CUSTOM.children.children.children.children]] -# id = 9 # name = "LWrist" +# id = 9 diff --git a/Pose2Sim/Utilities/json_display_without_img.py b/Pose2Sim/Utilities/json_display_without_img.py index 3555c1c..2bc5e9f 100644 --- a/Pose2Sim/Utilities/json_display_without_img.py +++ b/Pose2Sim/Utilities/json_display_without_img.py @@ -15,7 +15,7 @@ Usage: python -m json_display_without_img -j json_folder -W 1920 -H 1080 - python -m json_display_without_img -j json_folder -o output_img_folder -d True -s True -W 1920 -H 1080 - 30 + python -m json_display_without_img -j json_folder -o output_img_folder -d True -s True -W 1920 -H 1080 -f 30 import json_display_without_img; json_display_without_img.json_display_without_img_func(json_folder=r'', image_width=1920, image_height = 1080) ''' @@ -60,7 +60,7 @@ def json_display_without_img_func(**args): Usage: json_display_without_img -j json_folder -W 1920 -H 1080 - json_display_without_img -j json_folder -o output_img_folder -d True -s True -W 1920 -H 1080 + json_display_without_img -j json_folder -o output_img_folder -d True -s True -W 1920 -H 1080 -f 30 import json_display_without_img; json_display_without_img.json_display_without_img_func(json_folder=r'', image_width=1920, image_height = 1080) ''' diff --git a/Pose2Sim/Utilities/synchronize_cams_draft.py b/Pose2Sim/Utilities/synchronize_cams_draft.py index cb56308..1cbe383 100644 --- a/Pose2Sim/Utilities/synchronize_cams_draft.py +++ b/Pose2Sim/Utilities/synchronize_cams_draft.py @@ -114,10 +114,10 @@ def interpolate_nans(col, kind): def plot_time_lagged_cross_corr(camx, camy, ax): pearson_r = [camx.corr(camy.shift(lag)) for lag in range(-2*fps, 2*fps)] # lag -2 sec à +2 sec - offset = int(np.floor(len(pearson_r)*2)-np.argmax(pearson_r)) + offset = int(np.floor(len(pearson_r)/2)-np.argmax(pearson_r)) max_corr = np.max(pearson_r) ax.plot(list(range(-2*fps, 2*fps)), pearson_r) - ax.axvline(np.ceil(len(pearson_r)*2)-2*fps,color='k',linestyle='--') + ax.axvline(np.ceil(len(pearson_r)/2)-2*fps,color='k',linestyle='--') ax.axvline(np.argmax(pearson_r)-2*fps,color='r',linestyle='--',label='Peak synchrony') plt.annotate(f'Max correlation={np.round(max_corr,2)}', xy=(0.05, 0.9), xycoords='axes fraction') ax.set(title=f'Offset = {offset} frames', xlabel='Offset (frames)',ylabel='Pearson r') @@ -156,7 +156,6 @@ with open(os.path.join(pose_dir, 'coords'), 'wb') as fp: ############################# - # Vitesse verticale df_speed = [] for i in range(len(json_dirs)): @@ -199,6 +198,41 @@ else: raise ValueError('wrong values for id_kpt or weights_kpt') + +# camx = df_speed[1][16] +# camy = df_speed[2][16] +# camx = df_speed[1][10] +# camy = df_speed[2][10] +# camx = df_speed[1].sum(axis=1) +# camy = df_speed[2].sum(axis=1) +# camx.plot() +# camy.plot() +# plt.show() + +for i in range(25): + df_coords[1].iloc[:,i*2+1].plot(label='1') + df_coords[2].iloc[:,i*2+1].plot(label='2') + plt.title(i) + plt.legend() + plt.show() + +for i in range(25): + df_speed[1].iloc[:,i].plot(label='1') + df_speed[2].iloc[:,i].plot(label='2') + plt.title(i) + plt.legend() + plt.show() + +for i in range(4): + abs(df_speed[i]).sum(axis=1).plot(label=i) +plt.legend() +plt.show() + +df_speed[0].plot() # --> remove janky points +plt.show() + + + f, ax = plt.subplots(2,1) # speed camx.plot(ax=ax[0], label = f'cam {cam1_nb}') diff --git a/Pose2Sim/common.py b/Pose2Sim/common.py index aa65ae0..b0ca0a5 100644 --- a/Pose2Sim/common.py +++ b/Pose2Sim/common.py @@ -12,6 +12,7 @@ Functions shared between modules, and other utilities ## INIT import toml +import json import numpy as np import re import cv2 @@ -37,6 +38,58 @@ __status__ = "Development" ## FUNCTIONS +def common_items_in_list(list1, list2): + ''' + Do two lists have any items in common at the same index? + Returns True or False + ''' + + for i, j in enumerate(list1): + if j == list2[i]: + return True + return False + + +def bounding_boxes(js_file, margin_percent=0.1, around='extremities'): + ''' + Compute the bounding boxes of the people in the json file. + Either around the extremities (with a margin) + or around the center of the person (with a margin). + + INPUTS: + - js_file: json file + - margin_percent: margin around the person + - around: 'extremities' or 'center' + + OUTPUT: + - bounding_boxes: list of bounding boxes [x_min, y_min, x_max, y_max] + ''' + + bounding_boxes = [] + with open(js_file, 'r') as json_f: + js = json.load(json_f) + for people in range(len(js['people'])): + if len(js['people'][people]['pose_keypoints_2d']) < 3: continue + else: + x = js['people'][people]['pose_keypoints_2d'][0::3] + y = js['people'][people]['pose_keypoints_2d'][1::3] + x_min, x_max = min(x), max(x) + y_min, y_max = min(y), max(y) + + if around == 'extremities': + dx = (x_max - x_min) * margin_percent + dy = (y_max - y_min) * margin_percent + bounding_boxes.append([x_min-dx, y_min-dy, x_max+dx, y_max+dy]) + + elif around == 'center': + x_mean, y_mean = np.mean(x), np.mean(y) + x_size = (x_max - x_min) * (1 + margin_percent) + y_size = (y_max - y_min) * (1 + margin_percent) + bounding_boxes.append([x_mean - x_size/2, y_mean - y_size/2, x_mean + x_size/2, y_mean + y_size/2]) + + return bounding_boxes + + def retrieve_calib_params(calib_file): ''' Compute projection matrices from toml calibration file. @@ -48,6 +101,7 @@ def retrieve_calib_params(calib_file): - S: (h,w) vectors as list of 2x1 arrays - K: intrinsic matrices as list of 3x3 arrays - dist: distortion vectors as list of 4x1 arrays + - inv_K: inverse intrinsic matrices as list of 3x3 arrays - optim_K: intrinsic matrices for undistorting points as list of 3x3 arrays - R: rotation rodrigue vectors as list of 3x1 arrays - T: translation vectors as list of 3x1 arrays @@ -55,16 +109,18 @@ def retrieve_calib_params(calib_file): calib = toml.load(calib_file) - S, K, dist, optim_K, R, T = [], [], [], [], [], [] + S, K, dist, optim_K, inv_K, R, R_mat, T = [], [], [], [], [], [], [], [] for c, cam in enumerate(calib.keys()): if cam != 'metadata': S.append(np.array(calib[cam]['size'])) K.append(np.array(calib[cam]['matrix'])) dist.append(np.array(calib[cam]['distortions'])) optim_K.append(cv2.getOptimalNewCameraMatrix(K[c], dist[c], [int(s) for s in S[c]], 1, [int(s) for s in S[c]])[0]) + inv_K.append(np.linalg.inv(K[c])) R.append(np.array(calib[cam]['rotation'])) + R_mat.append(cv2.Rodrigues(R[c])[0]) T.append(np.array(calib[cam]['translation'])) - calib_params = {'S': S, 'K': K, 'dist': dist, 'optim_K': optim_K, 'R': R, 'T': T} + calib_params = {'S': S, 'K': K, 'dist': dist, 'inv_K': inv_K, 'optim_K': optim_K, 'R': R, 'R_mat': R_mat, 'T': T} return calib_params diff --git a/Pose2Sim/personAssociation.py b/Pose2Sim/personAssociation.py index d903280..298afc6 100644 --- a/Pose2Sim/personAssociation.py +++ b/Pose2Sim/personAssociation.py @@ -8,13 +8,17 @@ ########################################################################### Openpose detects all people in the field of view. -Which is the one of interest? +- multi_person = false: Which is the one of interest? +- multi_person = true: How to triangulate the same persons across views? + How to associate them across time frames? Done in the + triangulation stage. -This module tries all possible triangulations of a chosen anatomical -point. If "multi_person" mode is not used, it chooses the person for -whom the reprojection error is smallest. Otherwise, it selects all -persons with a reprojection error smaller than a threshold, and then -associates them across time frames by minimizing the displacement speed. +If multi_person = false, this module tries all possible triangulations of a chosen +anatomical point, and chooses the person for whom the reprojection error is smallest. + +If multi_person = true, it computes the distance between epipolar lines (camera to +keypoint lines) for all persons detected in all views, and selects the best correspondences. +The computation of the affinity matrix from the distance is inspired from the EasyMocap approach. INPUTS: - a calibration file (.toml extension) @@ -58,97 +62,6 @@ __status__ = "Development" ## FUNCTIONS -def common_items_in_list(list1, list2): - ''' - Do two lists have any items in common at the same index? - Returns True or False - ''' - - for i, j in enumerate(list1): - if j == list2[i]: - return True - return False - - -def min_with_single_indices(L, T): - ''' - Let L be a list (size s) with T associated tuple indices (size s). - Select the smallest values of L, considering that - the next smallest value cannot have the same numbers - in the associated tuple as any of the previous ones. - - Example: - L = [ 20, 27, 51, 33, 43, 23, 37, 24, 4, 68, 84, 3 ] - T = list(it.product(range(2),range(3))) - = [(0,0),(0,1),(0,2),(0,3),(1,0),(1,1),(1,2),(1,3),(2,0),(2,1),(2,2),(2,3)] - - - 1st smallest value: 3 with tuple (2,3), index 11 - - 2nd smallest value when excluding indices (2,.) and (.,3), i.e. [(0,0),(0,1),(0,2),X,(1,0),(1,1),(1,2),X,X,X,X,X]: - 20 with tuple (0,0), index 0 - - 3rd smallest value when excluding [X,X,X,X,X,(1,1),(1,2),X,X,X,X,X]: - 23 with tuple (1,1), index 5 - - INPUTS: - - L: list (size s) - - T: T associated tuple indices (size s) - - OUTPUTS: - - minL: list of smallest values of L, considering constraints on tuple indices - - argminL: list of indices of smallest values of L - - T_minL: list of tuples associated with smallest values of L - ''' - - minL = [np.min(L)] - argminL = [np.argmin(L)] - T_minL = [T[argminL[0]]] - - mask_tokeep = np.array([True for t in T]) - i=0 - while mask_tokeep.any()==True: - mask_tokeep = mask_tokeep & np.array([t[0]!=T_minL[i][0] and t[1]!=T_minL[i][1] for t in T]) - if mask_tokeep.any()==True: - indicesL_tokeep = np.where(mask_tokeep)[0] - minL += [np.min(np.array(L)[indicesL_tokeep])] - argminL += [indicesL_tokeep[np.argmin(np.array(L)[indicesL_tokeep])]] - T_minL += (T[argminL[i+1]],) - i+=1 - - return minL, argminL, T_minL - - -def sort_people(Q_kpt_old, Q_kpt): - ''' - Associate persons across frames - Persons' indices are sometimes swapped when changing frame - A person is associated to another in the next frame when they are at a small distance - - INPUTS: - - Q_kpt_old: list of arrays of 3D coordinates [X, Y, Z, 1.] for the previous frame - - Q_kpt: idem Q_kpt_old, for current frame - - OUTPUT: - - Q_kpt: array with reordered persons - - personsIDs_sorted: index of reordered persons - ''' - - # Generate possible person correspondences across frames - if len(Q_kpt_old) < len(Q_kpt): - Q_kpt_old = np.concatenate((Q_kpt_old, [[0., 0., 0., 1.]]*(len(Q_kpt)-len(Q_kpt_old)))) - personsIDs_comb = sorted(list(it.product(range(len(Q_kpt_old)),range(len(Q_kpt))))) - # Compute distance between persons from one frame to another - frame_by_frame_dist = [] - for comb in personsIDs_comb: - frame_by_frame_dist += [euclidean_distance(Q_kpt_old[comb[0]][:3],Q_kpt[comb[1]][:3])] - # sort correspondences by distance - _, index_best_comb, _ = min_with_single_indices(frame_by_frame_dist, personsIDs_comb) - index_best_comb.sort() - personsIDs_sorted = np.array(personsIDs_comb)[index_best_comb][:,1] - # rearrange persons - Q_kpt = np.array(Q_kpt)[personsIDs_sorted] - - return Q_kpt, personsIDs_sorted - - def persons_combinations(json_files_framef): ''' Find all possible combinations of detected persons' ids. @@ -179,10 +92,63 @@ def persons_combinations(json_files_framef): return personsIDs_comb +def triangulate_comb(comb, coords, P_all, calib_params, config): + ''' + Triangulate 2D points and compute reprojection error for a combination of cameras. + INPUTS: + - comb: list of ints: combination of persons' ids for each camera + - coords: array: x, y, likelihood for each camera + - P_all: list of arrays: projection matrices for each camera + - calib_params: dict: calibration parameters + - config: dictionary from Config.toml file + OUTPUTS: + - error_comb: float: reprojection error + - comb: list of ints: combination of persons' ids for each camera + - Q_comb: array: 3D coordinates of the triangulated point + ''' + + undistort_points = config.get('triangulation').get('undistort_points') + likelihood_threshold = config.get('personAssociation').get('likelihood_threshold_association') + + # Replace likelihood by 0. if under likelihood_threshold + coords[:,2][coords[:,2] < likelihood_threshold] = 0. + comb[coords[:,2] == 0.] = np.nan + + # Filter coords and projection_matrices containing nans + coords_filt = [coords[i] for i in range(len(comb)) if not np.isnan(comb[i])] + projection_matrices_filt = [P_all[i] for i in range(len(comb)) if not np.isnan(comb[i])] + if undistort_points: + calib_params_R_filt = [calib_params['R'][i] for i in range(len(comb)) if not np.isnan(comb[i])] + calib_params_T_filt = [calib_params['T'][i] for i in range(len(comb)) if not np.isnan(comb[i])] + calib_params_K_filt = [calib_params['K'][i] for i in range(len(comb)) if not np.isnan(comb[i])] + calib_params_dist_filt = [calib_params['dist'][i] for i in range(len(comb)) if not np.isnan(comb[i])] + + # Triangulate 2D points + x_files_filt, y_files_filt, likelihood_files_filt = np.array(coords_filt).T + Q_comb = weighted_triangulation(projection_matrices_filt, x_files_filt, y_files_filt, likelihood_files_filt) + + # Reprojection + if undistort_points: + coords_2D_kpt_calc_filt = [cv2.projectPoints(np.array(Q_comb[:-1]), calib_params_R_filt[i], calib_params_T_filt[i], calib_params_K_filt[i], calib_params_dist_filt[i])[0] for i in range(len(Q_comb))] + x_calc = [coords_2D_kpt_calc_filt[i][0,0,0] for i in range(len(Q_comb))] + y_calc = [coords_2D_kpt_calc_filt[i][0,0,1] for i in range(len(Q_comb))] + else: + x_calc, y_calc = reprojection(projection_matrices_filt, Q_comb) + + # Reprojection error + error_comb_per_cam = [] + for cam in range(len(x_calc)): + q_file = (x_files_filt[cam], y_files_filt[cam]) + q_calc = (x_calc[cam], y_calc[cam]) + error_comb_per_cam.append( euclidean_distance(q_file, q_calc) ) + error_comb = np.mean(error_comb_per_cam) + + return error_comb, comb, Q_comb + + def best_persons_and_cameras_combination(config, json_files_framef, personsIDs_combinations, projection_matrices, tracked_keypoint_id, calib_params): ''' - - if multi_person: Choose all the combination of cameras that give a reprojection error below a threshold - - else: Chooses the right person among the multiple ones found by + Chooses the right person among the multiple ones found by OpenPose & excludes cameras with wrong 2d-pose estimation. 1. triangulate the tracked keypoint for all possible combinations of people, @@ -203,9 +169,7 @@ def best_persons_and_cameras_combination(config, json_files_framef, personsIDs_c - comb_errors_below_thresh: list of arrays of ints ''' - multi_person = config.get('project').get('multi_person') - nb_persons_to_detect = config.get('project').get('nb_persons_to_detect') - error_threshold_tracking = config.get('personAssociation').get('reproj_error_threshold_association') + error_threshold_tracking = config.get('personAssociation').get('single_person').get('reproj_error_threshold_association') likelihood_threshold = config.get('personAssociation').get('likelihood_threshold_association') min_cameras_for_triangulation = config.get('triangulation').get('min_cameras_for_triangulation') undistort_points = config.get('triangulation').get('undistort_points') @@ -219,30 +183,23 @@ def best_persons_and_cameras_combination(config, json_files_framef, personsIDs_c while error_min > error_threshold_tracking and n_cams - nb_cams_off >= min_cameras_for_triangulation: # Try all persons combinations for combination in personsIDs_combinations: - # Get x,y,likelihood values from files - x_files, y_files,likelihood_files = [], [], [] + # Get coords from files + coords = [] for index_cam, person_nb in enumerate(combination): - with open(json_files_framef[index_cam], 'r') as json_f: - js = json.load(json_f) - try: - x_files.append( js['people'][int(person_nb)]['pose_keypoints_2d'][tracked_keypoint_id*3] ) - y_files.append( js['people'][int(person_nb)]['pose_keypoints_2d'][tracked_keypoint_id*3+1] ) - likelihood_files.append( js['people'][int(person_nb)]['pose_keypoints_2d'][tracked_keypoint_id*3+2] ) - except: - x_files.append(np.nan) - y_files.append(np.nan) - likelihood_files.append(np.nan) + try: + js = read_json(json_files_framef[index_cam]) + coords.append(js[int(person_nb)][tracked_keypoint_id*3:tracked_keypoint_id*3+3]) + except: + coords.append([np.nan, np.nan, np.nan]) + coords = np.array(coords) # undistort points if undistort_points: - points = np.array(tuple(zip(x_files,y_files))).reshape(-1, 1, 2).astype('float32') + points = np.array(coords)[:,None,:2] undistorted_points = [cv2.undistortPoints(points[i], calib_params['K'][i], calib_params['dist'][i], None, calib_params['optim_K'][i]) for i in range(n_cams)] - x_files = np.array([[u[i][0][0] for i in range(len(u))] for u in undistorted_points]).squeeze() - y_files = np.array([[u[i][0][1] for i in range(len(u))] for u in undistorted_points]).squeeze() - - # Replace likelihood by 0. if under likelihood_threshold - likelihood_files = [0. if lik < likelihood_threshold else lik for lik in likelihood_files] - + coords[:,0] = np.array([[u[i][0][0] for i in range(len(u))] for u in undistorted_points]).squeeze() + coords[:,1] = np.array([[u[i][0][1] for i in range(len(u))] for u in undistorted_points]).squeeze() + # For each persons combination, create subsets with "nb_cams_off" cameras excluded id_cams_off = list(it.combinations(range(len(combination)), nb_cams_off)) combinations_with_cams_off = np.array([combination.copy()]*len(id_cams_off)) @@ -250,91 +207,338 @@ def best_persons_and_cameras_combination(config, json_files_framef, personsIDs_c combinations_with_cams_off[i,id] = np.nan # Try all subsets - error_comb = [] - Q_comb = [] + error_comb_all, comb_all, Q_comb_all = [], [], [] for comb in combinations_with_cams_off: - # Filter x, y, likelihood, projection_matrices, with subset - x_files_filt = [x_files[i] for i in range(len(comb)) if not np.isnan(comb[i])] - y_files_filt = [y_files[i] for i in range(len(comb)) if not np.isnan(comb[i])] - likelihood_files_filt = [likelihood_files[i] for i in range(len(comb)) if not np.isnan(comb[i])] - projection_matrices_filt = [projection_matrices[i] for i in range(len(comb)) if not np.isnan(comb[i])] - if undistort_points: - calib_params_R_filt = [calib_params['R'][i] for i in range(len(comb)) if not np.isnan(comb[i])] - calib_params_T_filt = [calib_params['T'][i] for i in range(len(comb)) if not np.isnan(comb[i])] - calib_params_K_filt = [calib_params['K'][i] for i in range(len(comb)) if not np.isnan(comb[i])] - calib_params_dist_filt = [calib_params['dist'][i] for i in range(len(comb)) if not np.isnan(comb[i])] - - # Triangulate 2D points - Q_comb.append(weighted_triangulation(projection_matrices_filt, x_files_filt, y_files_filt, likelihood_files_filt)) - - # Reprojection - if undistort_points: - coords_2D_kpt_calc_filt = [cv2.projectPoints(np.array(Q_comb[-1][:-1]), calib_params_R_filt[i], calib_params_T_filt[i], calib_params_K_filt[i], calib_params_dist_filt[i])[0] for i in range(n_cams-nb_cams_off)] - x_calc = [coords_2D_kpt_calc_filt[i][0,0,0] for i in range(n_cams-nb_cams_off)] - y_calc = [coords_2D_kpt_calc_filt[i][0,0,1] for i in range(n_cams-nb_cams_off)] - else: - x_calc, y_calc = reprojection(projection_matrices_filt, Q_comb[-1]) - - # Reprojection error - error_comb_per_cam = [] - for cam in range(len(x_calc)): - q_file = (x_files_filt[cam], y_files_filt[cam]) - q_calc = (x_calc[cam], y_calc[cam]) - error_comb_per_cam.append( euclidean_distance(q_file, q_calc) ) - error_comb.append( np.mean(error_comb_per_cam) ) - - if multi_person: - errors_below_thresh += [e for e in error_comb if e0: - # sort combinations by error magnitude - errors_below_thresh_sorted = sorted(errors_below_thresh) - sorted_idx = np.array([errors_below_thresh.index(e) for e in errors_below_thresh_sorted]) - comb_errors_below_thresh = np.array(comb_errors_below_thresh)[sorted_idx] - Q_kpt = np.array(Q_kpt)[sorted_idx] - # remove combinations with indices used several times for the same person - comb_errors_below_thresh = [c.tolist() for c in comb_errors_below_thresh] - comb = comb_errors_below_thresh.copy() - comb_ok = np.array([comb[0]]) - for i, c1 in enumerate(comb): - idx_ok = np.array([not(common_items_in_list(c1, c2)) for c2 in comb[1:]]) - try: - comb = np.array(comb[1:])[idx_ok] - comb_ok = np.concatenate((comb_ok, [comb[0]])) - except: - break - sorted_pruned_idx = [i for i, x in enumerate(comb_errors_below_thresh) for c in comb_ok if np.array_equal(x,c,equal_nan=True)] - errors_below_thresh = np.array(errors_below_thresh_sorted)[sorted_pruned_idx].tolist() - comb_errors_below_thresh = np.array(comb_errors_below_thresh)[sorted_pruned_idx].tolist() - Q_kpt = Q_kpt[sorted_pruned_idx].tolist() + error_comb, comb, Q_comb = triangulate_comb(comb, coords, projection_matrices, calib_params, config) + error_comb_all.append(error_comb) + comb_all.append(comb) + Q_comb_all.append(Q_comb) - # Remove indices already used for a person - personsIDs_combinations = np.array([personsIDs_combinations[i] for i in range(len(personsIDs_combinations)) - if not np.array( - [personsIDs_combinations[i,j]==comb[j] for comb in comb_errors_below_thresh for j in range(len(comb))] - ).any()]) - if len(errors_below_thresh) >= len(personsIDs_combinations) or len(errors_below_thresh) >= nb_persons_to_detect: - errors_below_thresh = errors_below_thresh[:nb_persons_to_detect] - comb_errors_below_thresh = comb_errors_below_thresh[:nb_persons_to_detect] - Q_kpt = Q_kpt[:nb_persons_to_detect] - break + error_min = np.nanmin(error_comb_all) + comb_error_min = [comb_all[np.argmin(error_comb_all)]] + Q_kpt = [Q_comb_all[np.argmin(error_comb_all)]] + if error_min < error_threshold_tracking: + break nb_cams_off += 1 - return errors_below_thresh, comb_errors_below_thresh, Q_kpt + return error_min, comb_error_min, Q_kpt -def recap_tracking(config, error, nb_cams_excluded): +def read_json(js_file): + ''' + Read OpenPose json file + ''' + with open(js_file, 'r') as json_f: + js = json.load(json_f) + json_data = [] + for people in range(len(js['people'])): + if len(js['people'][people]['pose_keypoints_2d']) < 3: continue + else: + json_data.append(js['people'][people]['pose_keypoints_2d']) + return json_data + + +def compute_rays(json_coord, calib_params, cam_id): + ''' + Plucker coordinates of rays from camera to each joint of a person + Plucker coordinates: camera to keypoint line direction (size 3) + moment: origin ^ line (size 3) + additionally, confidence + + INPUTS: + - json_coord: x, y, likelihood for a person seen from a camera (list of 3*joint_nb) + - calib_params: calibration parameters from retrieve_calib_params('calib.toml') + - cam_id: camera id (int) + + OUTPUT: + - plucker: array. nb joints * (6 plucker coordinates + 1 likelihood) + ''' + + x = json_coord[0::3] + y = json_coord[1::3] + likelihood = json_coord[2::3] + + inv_K = calib_params['inv_K'][cam_id] + R_mat = calib_params['R_mat'][cam_id] + T = calib_params['T'][cam_id] + + cam_center = -R_mat.T @ T + plucker = [] + for i in range(len(x)): + q = np.array([x[i], y[i], 1]) + norm_Q = R_mat.T @ (inv_K @ q -T) + + line = norm_Q - cam_center + norm_line = line/np.linalg.norm(line) + moment = np.cross(cam_center, norm_line) + plucker.append(np.concatenate([norm_line, moment, [likelihood[i]]])) + + return np.array(plucker) + + +def broadcast_line_to_line_distance(p0, p1): + ''' + Compute the distance between two lines in 3D space. + + see: https://faculty.sites.iastate.edu/jia/files/inline-files/plucker-coordinates.pdf + p0 = (l0,m0), p1 = (l1,m1) + dist = | (l0,m0) * (l1,m1) | / || l0 x l1 || + (l0,m0) * (l1,m1) = l0 @ m1 + m0 @ l1 (reciprocal product) + + No need to divide by the norm of the cross product of the directions, since we + don't need the actual distance but whether the lines are close to intersecting or not + => dist = | (l0,m0) * (l1,m1) | + + INPUTS: + - p0: array(nb_persons_detected * 1 * nb_joints * 7 coordinates) + - p1: array(1 * nb_persons_detected * nb_joints * 7 coordinates) + + OUTPUT: + - dist: distances between the two lines (not normalized). + array(nb_persons_0 * nb_persons_1 * nb_joints) + ''' + + product = np.sum(p0[..., :3] * p1[..., 3:6], axis=-1) + np.sum(p1[..., :3] * p0[..., 3:6], axis=-1) + dist = np.abs(product) + + return dist + + +def compute_affinity(all_json_data_f, calib_params, cum_persons_per_view, reconstruction_error_threshold=0.1): + ''' + Compute the affinity between all the people in the different views. + + The affinity is defined as 1 - distance/max_distance, with distance the + distance between epipolar lines in each view (reciprocal product of Plucker + coordinates). + + Another approach would be to project one epipolar line onto the other camera + plane and compute the line to point distance, but it is more computationally + intensive (simple dot product vs. projection and distance calculation). + + INPUTS: + - all_json_data_f: list of json data. For frame f, nb_views*nb_persons*(x,y,likelihood)*nb_joints + - calib_params: calibration parameters from retrieve_calib_params('calib.toml') + - cum_persons_per_view: cumulative number of persons per view + - reconstruction_error_threshold: maximum distance between epipolar lines to consider a match + + OUTPUT: + - affinity: affinity matrix between all the people in the different views. + (nb_views*nb_persons_per_view * nb_views*nb_persons_per_view) + ''' + + # Compute plucker coordinates for all keypoints for each person in each view + # pluckers_f: dims=(camera, person, joint, 7 coordinates) + pluckers_f = [] + for cam_id, json_cam in enumerate(all_json_data_f): + pluckers = [] + for json_coord in json_cam: + plucker = compute_rays(json_coord, calib_params, cam_id) # LIMIT TO 15 JOINTS? json_coord[:15*3] + pluckers.append(plucker) + pluckers = np.array(pluckers) + pluckers_f.append(pluckers) + + # Compute affinity matrix + distance = np.zeros((cum_persons_per_view[-1], cum_persons_per_view[-1])) + 2*reconstruction_error_threshold + for compared_cam0, compared_cam1 in it.combinations(range(len(all_json_data_f)), 2): + # skip when no detection for a camera + if cum_persons_per_view[compared_cam0] == cum_persons_per_view[compared_cam0+1] \ + or cum_persons_per_view[compared_cam1] == cum_persons_per_view[compared_cam1 +1]: + continue + + # compute distance + p0 = pluckers_f[compared_cam0][:,None] # add coordinate on second dimension + p1 = pluckers_f[compared_cam1][None,:] # add coordinate on first dimension + dist = broadcast_line_to_line_distance(p0, p1) + likelihood = np.sqrt(p0[..., -1] * p1[..., -1]) + mean_weighted_dist = np.sum(dist*likelihood, axis=-1)/(1e-5 + likelihood.sum(axis=-1)) # array(nb_persons_0 * nb_persons_1) + + # populate distance matrix + distance[cum_persons_per_view[compared_cam0]:cum_persons_per_view[compared_cam0+1], \ + cum_persons_per_view[compared_cam1]:cum_persons_per_view[compared_cam1+1]] \ + = mean_weighted_dist + distance[cum_persons_per_view[compared_cam1]:cum_persons_per_view[compared_cam1+1], \ + cum_persons_per_view[compared_cam0]:cum_persons_per_view[compared_cam0+1]] \ + = mean_weighted_dist.T + + # compute affinity matrix and clamp it to zero when distance > reconstruction_error_threshold + distance[distance > reconstruction_error_threshold] = reconstruction_error_threshold + affinity = 1 - distance / reconstruction_error_threshold + + return affinity + + +def circular_constraint(cum_persons_per_view): + ''' + A person can be matched only with themselves in the same view, and with any + person from other views + + INPUT: + - cum_persons_per_view: cumulative number of persons per view + + OUTPUT: + - circ_constraint: circular constraint matrix + ''' + + circ_constraint = np.identity(cum_persons_per_view[-1]) + for i in range(len(cum_persons_per_view)-1): + circ_constraint[cum_persons_per_view[i]:cum_persons_per_view[i+1], cum_persons_per_view[i+1]:cum_persons_per_view[-1]] = 1 + circ_constraint[cum_persons_per_view[i+1]:cum_persons_per_view[-1], cum_persons_per_view[i]:cum_persons_per_view[i+1]] = 1 + + return circ_constraint + + +def SVT(matrix, threshold): + ''' + Find a low-rank approximation of the matrix using Singular Value Thresholding. + + INPUTS: + - matrix: matrix to decompose + - threshold: threshold for singular values + + OUTPUT: + - matrix_thresh: low-rank approximation of the matrix + ''' + + U, s, Vt = np.linalg.svd(matrix) # decompose matrix + s_thresh = np.maximum(s - threshold, 0) # set smallest singular values to zero + matrix_thresh = U @ np.diag(s_thresh) @ Vt # recompose matrix + + return matrix_thresh + + +def matchSVT(affinity, cum_persons_per_view, circ_constraint, max_iter = 20, w_rank = 50, tol = 1e-4, w_sparse=0.1): + ''' + Find low-rank approximation of 'affinity' while satisfying the circular constraint. + + INPUTS: + - affinity: affinity matrix between all the people in the different views + - cum_persons_per_view: cumulative number of persons per view + - circ_constraint: circular constraint matrix + - max_iter: maximum number of iterations + - w_rank: threshold for singular values + - tol: tolerance for convergence + - w_sparse: regularization parameter + + OUTPUT: + - new_aff: low-rank approximation of the affinity matrix + ''' + + new_aff = affinity.copy() + N = new_aff.shape[0] + index_diag = np.arange(N) + new_aff[index_diag, index_diag] = 0. + # new_aff = (new_aff + new_aff.T)/2 # symmetric by construction + + Y = np.zeros_like(new_aff) # Initial deviation matrix / residual () + W = w_sparse - new_aff # Initial sparse matrix / regularization (prevent overfitting) + mu = 64 # initial step size + + for iter in range(max_iter): + new_aff0 = new_aff.copy() + + Q = new_aff + Y*1.0/mu + Q = SVT(Q,w_rank/mu) + new_aff = Q - (W + Y)/mu + + # Project X onto dimGroups + for i in range(len(cum_persons_per_view) - 1): + ind1, ind2 = cum_persons_per_view[i], cum_persons_per_view[i + 1] + new_aff[ind1:ind2, ind1:ind2] = 0 + + # Reset diagonal elements to one and ensure X is within valid range [0, 1] + new_aff[index_diag, index_diag] = 1. + new_aff[new_aff < 0] = 0 + new_aff[new_aff > 1] = 1 + + # Enforce circular constraint + new_aff = new_aff * circ_constraint + new_aff = (new_aff + new_aff.T) / 2 # kept just in case X loses its symmetry during optimization + Y = Y + mu * (new_aff - Q) + + # Compute convergence criteria: break if new_aff is close enough to Q and no evolution anymore + pRes = np.linalg.norm(new_aff - Q) / N # primal residual (diff between new_aff and SVT result) + dRes = mu * np.linalg.norm(new_aff - new_aff0) / N # dual residual (diff between new_aff and previous new_aff) + if pRes < tol and dRes < tol: + break + if pRes > 10 * dRes: mu = 2 * mu + elif dRes > 10 * pRes: mu = mu / 2 + + iter +=1 + + return new_aff + + +def person_index_per_cam(affinity, cum_persons_per_view, min_cameras_for_triangulation): + ''' + For each detected person, gives their index for each camera + + INPUTS: + - affinity: affinity matrix between all the people in the different views + - min_cameras_for_triangulation: exclude proposals if less than N cameras see them + + OUTPUT: + - proposals: 2D array: n_persons * n_cams + ''' + + # index of the max affinity for each group (-1 if no detection) + proposals = [] + for row in range(affinity.shape[0]): + proposal_row = [] + for cam in range(len(cum_persons_per_view)-1): + id_persons_per_view = affinity[row, cum_persons_per_view[cam]:cum_persons_per_view[cam+1]] + proposal_row += [np.argmax(id_persons_per_view) if (len(id_persons_per_view)>0 and max(id_persons_per_view)>0) else -1] + proposals.append(proposal_row) + proposals = np.array(proposals, dtype=float) + + # remove duplicates and order + proposals, nb_detections = np.unique(proposals, axis=0, return_counts=True) + proposals = proposals[np.argsort(nb_detections)[::-1]] + + # remove row if any value is the same in previous rows at same index (nan!=nan so nan ignored) + proposals[proposals==-1] = np.nan + mask = np.ones(proposals.shape[0], dtype=bool) + for i in range(1, len(proposals)): + mask[i] = ~np.any(proposals[i] == proposals[:i], axis=0).any() + proposals = proposals[mask] + + # remove identifications if less than N cameras see them + nb_cams_per_person = [np.count_nonzero(~np.isnan(p)) for p in proposals] + proposals = np.array([p for (n,p) in zip(nb_cams_per_person, proposals) if n >= min_cameras_for_triangulation]) + + return proposals + + +def rewrite_json_files(json_tracked_files_f, json_files_f, proposals, n_cams): + ''' + Write new json files with correct association of people across cameras. + + INPUTS: + - json_tracked_files_f: list of strings: json files to write + - json_files_f: list of strings: json files to read + - proposals: 2D array: n_persons * n_cams + - n_cams: int: number of cameras + + OUTPUT: + - json files with correct association of people across cameras + ''' + + for cam in range(n_cams): + with open(json_tracked_files_f[cam], 'w') as json_tracked_f: + with open(json_files_f[cam], 'r') as json_f: + js = json.load(json_f) + js_new = js.copy() + js_new['people'] = [] + for new_comb in proposals: + if not np.isnan(new_comb[cam]): + js_new['people'] += [js['people'][int(new_comb[cam])]] + else: + js_new['people'] += [{}] + json_tracked_f.write(json.dumps(js_new)) + + +def recap_tracking(config, error=0, nb_cams_excluded=0): ''' Print a message giving statistics on reprojection errors (in pixel and in m) as well as the number of cameras that had to be excluded to reach threshold @@ -352,27 +556,39 @@ def recap_tracking(config, error, nb_cams_excluded): # Read config project_dir = config.get('project').get('project_dir') session_dir = os.path.realpath(os.path.join(project_dir, '..', '..')) - tracked_keypoint = config.get('personAssociation').get('tracked_keypoint') - error_threshold_tracking = config.get('personAssociation').get('reproj_error_threshold_association') + multi_person = config.get('project').get('multi_person') + likelihood_threshold_association = config.get('personAssociation').get('likelihood_threshold_association') + tracked_keypoint = config.get('personAssociation').get('single_person').get('tracked_keypoint') + error_threshold_tracking = config.get('personAssociation').get('single_person').get('reproj_error_threshold_association') + reconstruction_error_threshold = config.get('personAssociation').get('multi_person').get('reconstruction_error_threshold') + min_affinity = config.get('personAssociation').get('multi_person').get('min_affinity') poseTracked_dir = os.path.join(project_dir, 'pose-associated') calib_dir = [os.path.join(session_dir, c) for c in os.listdir(session_dir) if 'calib' in c.lower()][0] calib_file = glob.glob(os.path.join(calib_dir, '*.toml'))[0] # lastly created calibration file - # Error - mean_error_px = np.around(np.mean(error), decimals=1) - - calib = toml.load(calib_file) - calib_cam1 = calib[list(calib.keys())[0]] - fm = calib_cam1['matrix'][0][0] - Dm = euclidean_distance(calib_cam1['translation'], [0,0,0]) - mean_error_mm = np.around(mean_error_px * Dm / fm * 1000, decimals=1) - - # Excluded cameras - mean_cam_off_count = np.around(np.mean(nb_cams_excluded), decimals=2) + if not multi_person: + logging.info('\nSingle-person analysis selected.') + # Error + mean_error_px = np.around(np.mean(error), decimals=1) + + calib = toml.load(calib_file) + calib_cam1 = calib[list(calib.keys())[0]] + fm = calib_cam1['matrix'][0][0] + Dm = euclidean_distance(calib_cam1['translation'], [0,0,0]) + mean_error_mm = np.around(mean_error_px * Dm / fm * 1000, decimals=1) + + # Excluded cameras + mean_cam_off_count = np.around(np.mean(nb_cams_excluded), decimals=2) + + # Recap + logging.info(f'\n--> Mean reprojection error for {tracked_keypoint} point on all frames is {mean_error_px} px, which roughly corresponds to {mean_error_mm} mm. ') + logging.info(f'--> In average, {mean_cam_off_count} cameras had to be excluded to reach the demanded {error_threshold_tracking} px error threshold after excluding points with likelihood below {likelihood_threshold_association}.') + + else: + logging.info('\nMulti-person analysis selected.') + logging.info(f'\n--> A person was reconstructed if the lines from cameras to their keypoints intersected within {reconstruction_error_threshold} m and if the calculated affinity stayed below {min_affinity} after excluding points with likelihood below {likelihood_threshold_association}.') + logging.info(f'--> Beware that people were sorted across cameras, but not across frames. This will be done in the triangulation stage.') - # Recap - logging.info(f'\n--> Mean reprojection error for {tracked_keypoint} point on all frames is {mean_error_px} px, which roughly corresponds to {mean_error_mm} mm. ') - logging.info(f'--> In average, {mean_cam_off_count} cameras had to be excluded to reach the demanded {error_threshold_tracking} px error threshold.') logging.info(f'\nTracked json files are stored in {os.path.realpath(poseTracked_dir)}.') @@ -401,7 +617,11 @@ def track_2d_all(config): session_dir = os.path.realpath(os.path.join(project_dir, '..', '..')) multi_person = config.get('project').get('multi_person') pose_model = config.get('pose').get('pose_model') - tracked_keypoint = config.get('personAssociation').get('tracked_keypoint') + tracked_keypoint = config.get('personAssociation').get('single_person').get('tracked_keypoint') + likelihood_threshold = config.get('personAssociation').get('likelihood_threshold_association') + min_cameras_for_triangulation = config.get('triangulation').get('min_cameras_for_triangulation') + reconstruction_error_threshold = config.get('personAssociation').get('multi_person').get('reconstruction_error_threshold') + min_affinity = config.get('personAssociation').get('multi_person').get('min_affinity') frame_range = config.get('project').get('frame_range') undistort_points = config.get('triangulation').get('undistort_points') @@ -414,12 +634,12 @@ def track_2d_all(config): poseTracked_dir = os.path.join(project_dir, 'pose-associated') if multi_person: - logging.info('\nMulti-person analysis selected. Note that you can set this option to false for faster runtime if you only need the main person in the scene.') + logging.info('\nMulti-person analysis selected. Note that you can set this option to false if you only need the main person in the scene.') else: logging.info('\nSingle-person analysis selected.') # projection matrix from toml calibration file - P = computeP(calib_file, undistort=undistort_points) + P_all = computeP(calib_file, undistort=undistort_points) calib_params = retrieve_calib_params(calib_file) # selection of tracked keypoint id @@ -448,15 +668,14 @@ def track_2d_all(config): except: pass json_tracked_files = [[os.path.join(poseTracked_dir, j_dir, j_file) for j_file in json_files_names[j]] for j, j_dir in enumerate(json_dirs_names)] - # person's tracking f_range = [[min([len(j) for j in json_files])] if frame_range==[] else frame_range][0] n_cams = len(json_dirs_names) error_min_tot, cameras_off_tot = [], [] # Check that camera number is consistent between calibration file and pose folders - if n_cams != len(P): + if n_cams != len(P_all): raise Exception(f'Error: The number of cameras is not consistent:\ - Found {len(P)} cameras in the calibration file,\ + Found {len(P_all)} cameras in the calibration file,\ and {n_cams} cameras based on the number of pose folders.') Q_kpt = [np.array([0., 0., 0., 1.])] @@ -464,35 +683,40 @@ def track_2d_all(config): # print(f'\nFrame {f}:') json_files_f = [json_files[c][f] for c in range(n_cams)] json_tracked_files_f = [json_tracked_files[c][f] for c in range(n_cams)] - - # all possible combinations of persons - personsIDs_comb = persons_combinations(json_files_f) - - # choose persons of interest and exclude cameras with bad pose estimation Q_kpt_old = Q_kpt - errors_below_thresh, comb_errors_below_thresh, Q_kpt = best_persons_and_cameras_combination(config, json_files_f, personsIDs_comb, P, tracked_keypoint_id, calib_params) - - # reID persons across frames by checking the distance from one frame to another - Q_kpt, personsIDs_sorted = sort_people(Q_kpt_old, Q_kpt) - errors_below_thresh = np.array(errors_below_thresh)[personsIDs_sorted] - comb_errors_below_thresh = np.array(comb_errors_below_thresh)[personsIDs_sorted] + + if not multi_person: + # all possible combinations of persons + personsIDs_comb = persons_combinations(json_files_f) + + # choose persons of interest and exclude cameras with bad pose estimation + error_proposals, proposals, Q_kpt = best_persons_and_cameras_combination(config, json_files_f, personsIDs_comb, P_all, tracked_keypoint_id, calib_params) + + error_min_tot.append(np.mean(error_proposals)) + cameras_off_count = np.count_nonzero([np.isnan(comb) for comb in proposals]) / len(proposals) + cameras_off_tot.append(cameras_off_count) + + else: + # read data + all_json_data_f = [] + for js_file in json_files_f: + all_json_data_f.append(read_json(js_file)) + #TODO: remove people with average likelihood < 0.3, no full torso, less than 12 joints... (cf filter2d in dataset/base.py L498) + + # obtain proposals after computing affinity between all the people in the different views + persons_per_view = [0] + [len(j) for j in all_json_data_f] + cum_persons_per_view = np.cumsum(persons_per_view) + affinity = compute_affinity(all_json_data_f, calib_params, cum_persons_per_view, reconstruction_error_threshold=reconstruction_error_threshold) + circ_constraint = circular_constraint(cum_persons_per_view) + affinity = affinity * circ_constraint + #TODO: affinity without hand, face, feet (cf ray.py L31) + affinity = matchSVT(affinity, cum_persons_per_view, circ_constraint, max_iter = 20, w_rank = 50, tol = 1e-4, w_sparse=0.1) + affinity[affinity 0: + personsIDs_sorted += id_in_old + Q_kpt_new += [Q_kpt[id_in_old[0]]] + else: + personsIDs_sorted += [-1] + Q_kpt_new += [Q_kpt_old[i]] + + return Q_kpt_new, personsIDs_sorted, associated_tuples + + def make_trc(config, Q, keypoints_names, f_range, id_person=-1): ''' Make Opensim compatible trc file from a dataframe with 3D coordinates @@ -267,7 +358,7 @@ def recap_triangulate(config, error, nb_cams_excluded, keypoints_names, cam_excl logging.info(f'In average, {mean_cam_excluded} cameras had to be excluded to reach these thresholds.') cam_excluded_count[n] = {i: v for i, v in zip(cam_names, cam_excluded_count[n].values())} - cam_excluded_count[n] = {i: cam_excluded_count[n][i] for i in sorted(cam_excluded_count[n].keys())} + cam_excluded_count[n] = {k: v for k, v in sorted(cam_excluded_count[n].items(), key=lambda item: item[1])[::-1]} str_cam_excluded_count = '' for i, (k, v) in enumerate(cam_excluded_count[n].items()): if i ==0: @@ -525,7 +616,7 @@ def triangulation_from_best_cameras(config, coords_2D_kpt, coords_2D_kpt_swapped Q = np.array([np.nan, np.nan, np.nan]) return Q, error_min, nb_cams_excluded, id_excluded_cams - + def extract_files_frame_f(json_tracked_files_f, keypoints_ids, nb_persons_to_detect): ''' @@ -584,7 +675,7 @@ def triangulate_all(config): INPUTS: - a calibration file (.toml extension) - - json files for each camera with only one person of interest + - json files for each camera with indices matching the detected persons - a Config.toml file - a skeleton model @@ -655,9 +746,7 @@ def triangulate_all(config): # Prep triangulation f_range = [[0,min([len(j) for j in json_files_names])] if frame_range==[] else frame_range][0] frames_nb = f_range[1]-f_range[0] - nb_persons_to_detect = max([len(json.load(open(json_fname))['people']) for json_fname in json_tracked_files[0]]) - n_cams = len(json_dirs_names) # Check that camera number is consistent between calibration file and pose folders @@ -667,8 +756,14 @@ def triangulate_all(config): and {n_cams} cameras based on the number of pose folders.') # Triangulation + Q = [[[np.nan]*3]*keypoints_nb for n in range(nb_persons_to_detect)] + Q_old = [[[np.nan]*3]*keypoints_nb for n in range(nb_persons_to_detect)] + error = [[] for n in range(nb_persons_to_detect)] + nb_cams_excluded = [[] for n in range(nb_persons_to_detect)] + id_excluded_cams = [[] for n in range(nb_persons_to_detect)] Q_tot, error_tot, nb_cams_excluded_tot,id_excluded_cams_tot = [], [], [], [] for f in tqdm(range(frames_nb)): + # print(f'\nFrame {f}:') # Get x,y,likelihood values from files json_tracked_files_f = [json_tracked_files[c][f] for c in range(n_cams)] # print(json_tracked_files_f) @@ -692,12 +787,19 @@ def triangulate_all(config): y_files[n][likelihood_files[n] < likelihood_threshold] = np.nan likelihood_files[n][likelihood_files[n] < likelihood_threshold] = np.nan + # Q_old = Q except when it has nan, otherwise it takes the Q_old value + nan_mask = np.isnan(Q) + Q_old = np.where(nan_mask, Q_old, Q) + error_old, nb_cams_excluded_old, id_excluded_cams_old = error.copy(), nb_cams_excluded.copy(), id_excluded_cams.copy() Q = [[] for n in range(nb_persons_to_detect)] error = [[] for n in range(nb_persons_to_detect)] nb_cams_excluded = [[] for n in range(nb_persons_to_detect)] id_excluded_cams = [[] for n in range(nb_persons_to_detect)] + for n in range(nb_persons_to_detect): for keypoint_idx in keypoints_idx: + # keypoints_nb = 2 + # for keypoint_idx in range(2): # Triangulate cameras with min reprojection error # print('\n', keypoints_names[keypoint_idx]) coords_2D_kpt = np.array( (x_files[n][:, keypoint_idx], y_files[n][:, keypoint_idx], likelihood_files[n][:, keypoint_idx]) ) @@ -709,7 +811,31 @@ def triangulate_all(config): error[n].append(error_kpt) nb_cams_excluded[n].append(nb_cams_excluded_kpt) id_excluded_cams[n].append(id_excluded_cams_kpt) - + + if multi_person: + # reID persons across frames by checking the distance from one frame to another + # print('Q before ordering ', np.array(Q)[:,:2]) + if f !=0: + Q, personsIDs_sorted, associated_tuples = sort_people(Q_old, Q) + # print('Q after ordering ', personsIDs_sorted, associated_tuples, np.array(Q)[:,:2]) + + error_sorted, nb_cams_excluded_sorted, id_excluded_cams_sorted = [], [], [] + for i in range(len(Q)): + id_in_old = associated_tuples[:,1][associated_tuples[:,0] == i].tolist() + if len(id_in_old) > 0: + personsIDs_sorted += id_in_old + error_sorted += [error[id_in_old[0]]] + nb_cams_excluded_sorted += [nb_cams_excluded[id_in_old[0]]] + id_excluded_cams_sorted += [id_excluded_cams[id_in_old[0]]] + else: + personsIDs_sorted += [-1] + error_sorted += [error[i]] + nb_cams_excluded_sorted += [nb_cams_excluded[i]] + id_excluded_cams_sorted += [id_excluded_cams[i]] + error, nb_cams_excluded, id_excluded_cams = error_sorted, nb_cams_excluded_sorted, id_excluded_cams_sorted + + # TODO: if distance > threshold, new person + # Add triangulated points, errors and excluded cameras to pandas dataframes Q_tot.append([np.concatenate(Q[n]) for n in range(nb_persons_to_detect)]) error_tot.append([error[n] for n in range(nb_persons_to_detect)]) @@ -717,6 +843,13 @@ def triangulate_all(config): id_excluded_cams = [[id_excluded_cams[n][k] for k in range(keypoints_nb)] for n in range(nb_persons_to_detect)] id_excluded_cams_tot.append(id_excluded_cams) + # fill values for if a person that was not initially detected has entered the frame + Q_tot = [list(tpl) for tpl in zip(*it.zip_longest(*Q_tot, fillvalue=[np.nan]*keypoints_nb*3))] + error_tot = [list(tpl) for tpl in zip(*it.zip_longest(*error_tot, fillvalue=[np.nan]*keypoints_nb*3))] + nb_cams_excluded_tot = [list(tpl) for tpl in zip(*it.zip_longest(*nb_cams_excluded_tot, fillvalue=[np.nan]*keypoints_nb*3))] + id_excluded_cams_tot = [list(tpl) for tpl in zip(*it.zip_longest(*id_excluded_cams_tot, fillvalue=[np.nan]*keypoints_nb*3))] + + # dataframes for each person Q_tot = [pd.DataFrame([Q_tot[f][n] for f in range(frames_nb)]) for n in range(nb_persons_to_detect)] error_tot = [pd.DataFrame([error_tot[f][n] for f in range(frames_nb)]) for n in range(nb_persons_to_detect)] nb_cams_excluded_tot = [pd.DataFrame([nb_cams_excluded_tot[f][n] for f in range(frames_nb)]) for n in range(nb_persons_to_detect)] diff --git a/README.md b/README.md index 1347d6e..885d5c4 100644 --- a/README.md +++ b/README.md @@ -17,7 +17,7 @@ ##### N.B:. Please set undistort_points and handle_LR_swap to false for now since it currently leads to inaccuracies. I'll try to fix it soon. > **_News_: Version 0.7:**\ -> **Multi-person analysis is now supported!**\ +> **Multi-person analysis is now supported!** Latest version is 100 times faster that the one before, and also more robust.\ > Team sports, combat sports, and ballroom dancing can now take advantage of Pose2Sim full potential.\ > **Other recently added features**: Automatic batch processing, Marker augmentation, Blender visualization.