Faster and more robust multi-person analysis (#85)
* tests synchro * draft * further draft * affinity ok * proposals okay, need to incorporate in Pose2Sim+tests * will transfer sorting across frames in triangulation in next commit * Lasts tests need to be done but seems to work pretty well * should all work smoothly * update readme * last checks * fixed linting issues * getting tired of being forgetful
This commit is contained in:
parent
5ddef52185
commit
19efec2723
@ -287,9 +287,6 @@ def synchronization(config=None):
|
|||||||
start = time.time()
|
start = time.time()
|
||||||
currentDateAndTime = datetime.now()
|
currentDateAndTime = datetime.now()
|
||||||
project_dir = os.path.realpath(config_dict.get('project').get('project_dir'))
|
project_dir = os.path.realpath(config_dict.get('project').get('project_dir'))
|
||||||
seq_name = os.path.basename(project_dir)
|
|
||||||
frame_range = config_dict.get('project').get('frame_range')
|
|
||||||
frames = ["all frames" if frame_range == [] else f"frames {frame_range[0]} to {frame_range[1]}"][0]
|
|
||||||
|
|
||||||
logging.info("\n\n---------------------------------------------------------------------")
|
logging.info("\n\n---------------------------------------------------------------------")
|
||||||
logging.info("Camera synchronization")
|
logging.info("Camera synchronization")
|
||||||
|
@ -18,9 +18,8 @@
|
|||||||
|
|
||||||
|
|
||||||
[project]
|
[project]
|
||||||
# multi_person = false # true for trials with multiple participants. If false, only the main person in scene is analyzed (and it run much faster).
|
multi_person = false # If false, only the main person in scene is analyzed.
|
||||||
nb_persons_to_detect = 2 # checked only if multi_person is selected
|
frame_rate = 60 # fps
|
||||||
frame_rate = 120 # fps
|
|
||||||
frame_range = [] # For example [10,300], or [] for all frames
|
frame_range = [] # For example [10,300], or [] for all frames
|
||||||
## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate
|
## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate
|
||||||
## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate,
|
## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate,
|
||||||
@ -31,6 +30,26 @@ exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['<
|
|||||||
# Take heart, calibration is not that complicated once you get the hang of it!
|
# Take heart, calibration is not that complicated once you get the hang of it!
|
||||||
|
|
||||||
|
|
||||||
|
[pose]
|
||||||
|
pose_framework = 'openpose' # 'openpose', 'mediapipe', 'alphapose', 'deeplabcut'
|
||||||
|
pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII
|
||||||
|
#With mediapipe: BLAZEPOSE.
|
||||||
|
#With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133.
|
||||||
|
#With deeplabcut: CUSTOM. See example at the end of the file.
|
||||||
|
# What follows has not been implemented yet
|
||||||
|
overwrite_pose = false
|
||||||
|
openpose_path = '' # only checked if OpenPose is used
|
||||||
|
|
||||||
|
|
||||||
|
[synchronization]
|
||||||
|
display_corr = true # true or false (lowercase)
|
||||||
|
reset_sync = true # Recalculate synchronization even if already done
|
||||||
|
# id_kpt = [10] # keypoint ID, to be found in skeleton.py. Example RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7
|
||||||
|
# weights_kpt = [1] # Only taken into account if you have several keypoints (Currently only one keypoint is supported).
|
||||||
|
sync_frame_range = [] # For example [0,150], or [] for all frames (default)
|
||||||
|
# limit synchronization search (to the beginning or to the end of the capture for example)
|
||||||
|
|
||||||
|
|
||||||
[calibration]
|
[calibration]
|
||||||
calibration_type = 'convert' # 'convert' or 'calculate'
|
calibration_type = 'convert' # 'convert' or 'calculate'
|
||||||
|
|
||||||
@ -90,30 +109,17 @@ calibration_type = 'convert' # 'convert' or 'calculate'
|
|||||||
# Coming soon!
|
# Coming soon!
|
||||||
|
|
||||||
|
|
||||||
[pose]
|
|
||||||
pose_framework = 'openpose' # 'openpose', 'mediapipe', 'alphapose', 'deeplabcut'
|
|
||||||
pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII
|
|
||||||
#With mediapipe: BLAZEPOSE.
|
|
||||||
#With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133.
|
|
||||||
#With deeplabcut: CUSTOM. See example at the end of the file.
|
|
||||||
# What follows has not been implemented yet
|
|
||||||
overwrite_pose = false
|
|
||||||
openpose_path = '' # only checked if OpenPose is used
|
|
||||||
|
|
||||||
|
|
||||||
[synchronization]
|
|
||||||
# COMING SOON!
|
|
||||||
reset_sync = true # Recalculate synchronization even if already done
|
|
||||||
speed_kind = 'y' # 'y' showed best performance.
|
|
||||||
id_kpt = [10] # number from keypoint name in skeleton.py. RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7
|
|
||||||
weights_kpt = [1] # Only taken into account if you have several keypoints (Currently only one keypoint is supported).
|
|
||||||
|
|
||||||
|
|
||||||
[personAssociation]
|
[personAssociation]
|
||||||
tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
|
likelihood_threshold_association = 0.3
|
||||||
# and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE)
|
|
||||||
reproj_error_threshold_association = 20 # px
|
[personAssociation.single_person]
|
||||||
likelihood_threshold_association = 0.3
|
reproj_error_threshold_association = 20 # px
|
||||||
|
tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
|
||||||
|
# and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE)
|
||||||
|
|
||||||
|
[personAssociation.multi_person]
|
||||||
|
reconstruction_error_threshold = 0.1 # 0.1 = 10 cm
|
||||||
|
min_affinity = 0.2 # affinity below which a correspondence is ignored
|
||||||
|
|
||||||
|
|
||||||
[triangulation]
|
[triangulation]
|
||||||
|
@ -9,91 +9,29 @@
|
|||||||
# If a parameter is not found here, Pose2Sim will look for its value in the
|
# If a parameter is not found here, Pose2Sim will look for its value in the
|
||||||
# Config.toml file of the level above. This way, you can set global
|
# Config.toml file of the level above. This way, you can set global
|
||||||
# instructions for the Session and alter them for specific Participants or Trials.
|
# instructions for the Session and alter them for specific Participants or Trials.
|
||||||
#
|
#
|
||||||
# If you wish to overwrite a parameter for a specific trial or participant,
|
# If you wish to overwrite a parameter for a specific trial or participant,
|
||||||
# edit its Config.toml file by uncommenting its key (e.g., [project])
|
# edit its Config.toml file by uncommenting its key (e.g., [project])
|
||||||
# and editing its value (e.g., frame_range = [10,300]). Or else, uncomment
|
# and editing its value (e.g., frame_range = [10,300]). Or else, uncomment
|
||||||
# [filtering.butterworth] and set cut_off_frequency = 10, etc.
|
# [filtering.butterworth] and set cut_off_frequency = 10, etc.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# [project]
|
# [project]
|
||||||
# multi_person = false # true for trials with multiple participants. If false, only the main person in scene is analyzed (and it run much faster).
|
# multi_person = true # If false, only the main person in scene is analyzed.
|
||||||
# nb_persons_to_detect = 2 # checked only if multi_person is selected
|
# frame_rate = 60 # fps
|
||||||
# frame_rate = 60 # FPS
|
|
||||||
# frame_range = [] # For example [10,300], or [] for all frames
|
# frame_range = [] # For example [10,300], or [] for all frames
|
||||||
## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate
|
## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate
|
||||||
## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate,
|
## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate,
|
||||||
## frame_range = [0.1, 2.0]*frame_rate = [6, 120]
|
## frame_range = [0.1, 2.0]*frame_rate = [6, 120]
|
||||||
|
|
||||||
# exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['<participant_dir/trial_dir>', 'etc'].
|
# exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['<participant_dir/trial_dir>', 'etc'].
|
||||||
# e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial']
|
## e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial']
|
||||||
|
|
||||||
|
|
||||||
## Take heart, calibration is not that complicated once you get the hang of it!
|
## Take heart, calibration is not that complicated once you get the hang of it!
|
||||||
# [calibration]
|
|
||||||
# calibration_type = 'convert' # 'convert' or 'calculate'
|
|
||||||
|
|
||||||
# [calibration.convert]
|
|
||||||
# convert_from = 'qualisys' # 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', or 'biocv'
|
|
||||||
# [calibration.convert.qualisys]
|
|
||||||
# binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2
|
|
||||||
# [calibration.convert.optitrack] # See readme for instructions
|
|
||||||
# [calibration.convert.vicon] # No parameter needed
|
|
||||||
# [calibration.convert.opencap] # No parameter needed
|
|
||||||
# [calibration.convert.easymocap] # No parameter needed
|
|
||||||
# [calibration.convert.biocv] # No parameter needed
|
|
||||||
# [calibration.convert.anipose] # No parameter needed
|
|
||||||
# [calibration.convert.freemocap] # No parameter needed
|
|
||||||
|
|
||||||
|
|
||||||
# [calibration.calculate]
|
|
||||||
## Camera properties, theoretically need to be calculated only once in a camera lifetime
|
|
||||||
# [calibration.calculate.intrinsics]
|
|
||||||
# overwrite_intrinsics = false # overwrite (or not) if they have already been calculated?
|
|
||||||
# show_detection_intrinsics = true # true or false (lowercase)
|
|
||||||
# intrinsics_extension = 'jpg' # any video or image extension
|
|
||||||
# extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 )
|
|
||||||
# intrinsics_corners_nb = [4,7]
|
|
||||||
# intrinsics_square_size = 60 # mm
|
|
||||||
|
|
||||||
## Camera placements, need to be done before every session
|
|
||||||
# [calibration.calculate.extrinsics]
|
|
||||||
# extrinsics_method = 'scene' # 'board', 'scene', 'keypoints'
|
|
||||||
## 'board' should be large enough to be detected when laid on the floor. Not recommended.
|
|
||||||
## 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out.
|
|
||||||
## 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras.
|
|
||||||
|
|
||||||
# moving_cameras = false # Not implemented yet
|
|
||||||
# calculate_extrinsics = true # true or false (lowercase)
|
|
||||||
|
|
||||||
# [calibration.calculate.extrinsics.board]
|
|
||||||
# show_reprojection_error = true # true or false (lowercase)
|
|
||||||
# extrinsics_extension = 'png' # any video or image extension
|
|
||||||
# extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h]
|
|
||||||
# extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle
|
|
||||||
|
|
||||||
# [calibration.calculate.extrinsics.scene]
|
|
||||||
# show_reprojection_error = true # true or false (lowercase)
|
|
||||||
# extrinsics_extension = 'png' # any video or image extension
|
|
||||||
## list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane.
|
|
||||||
## in m -> unlike for intrinsics, NOT in mm!
|
|
||||||
# object_coords_3d = [[-2.0, 0.3, 0.0],
|
|
||||||
# [-2.0 , 0.0, 0.0],
|
|
||||||
# [-2.0, 0.0, 0.05],
|
|
||||||
# [-2.0, -0.3 , 0.0],
|
|
||||||
# [0.0, 0.3, 0.0],
|
|
||||||
# [0.0, 0.0, 0.0],
|
|
||||||
# [0.0, 0.0, 0.05],
|
|
||||||
# [0.0, -0.3, 0.0]]
|
|
||||||
|
|
||||||
# [calibration.calculate.extrinsics.keypoints]
|
|
||||||
## Coming soon!
|
|
||||||
|
|
||||||
|
|
||||||
# [pose]
|
# [pose]
|
||||||
# pose_framework = 'openpose' # 'openpose', 'mediapipe', 'alphapose', 'deeplabcut'
|
# pose_framework = 'openpose' # 'openpose', 'mediapipe', 'alphapose', 'deeplabcut'
|
||||||
# pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII,
|
# pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII
|
||||||
# #With mediapipe: BLAZEPOSE.
|
# #With mediapipe: BLAZEPOSE.
|
||||||
# #With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133.
|
# #With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133.
|
||||||
# #With deeplabcut: CUSTOM. See example at the end of the file.
|
# #With deeplabcut: CUSTOM. See example at the end of the file.
|
||||||
@ -103,33 +41,93 @@
|
|||||||
|
|
||||||
|
|
||||||
# [synchronization]
|
# [synchronization]
|
||||||
## COMING SOON!
|
# display_corr = true # true or false (lowercase)
|
||||||
# reset_sync = true # Recalculate synchronization even if already done
|
# reset_sync = true # Recalculate synchronization even if already done
|
||||||
# frames = [2850,3490] # Frames to use for synchronization, should point to a moment with fast motion.
|
# # id_kpt = [10] # keypoint ID, to be found in skeleton.py. Example RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7
|
||||||
# cut_off_frequency = 10 # cut-off frequency for a 4th order low-pass Butterworth filter
|
# # weights_kpt = [1] # Only taken into account if you have several keypoints (Currently only one keypoint is supported).
|
||||||
## Vertical speeds (on X, Y, or Z axis, or 2D speeds)
|
# sync_frame_range = [] # For example [0,150], or [] for all frames (default)
|
||||||
# speed_kind = 'y' # 'x', 'y', 'z', or '2D'
|
# # limit synchronization search (to the beginning or to the end of the capture for example)
|
||||||
# vmax = 20 # px/s
|
|
||||||
# cam1_nb = 4
|
|
||||||
# cam2_nb = 3
|
# [calibration]
|
||||||
# id_kpt = [9,10] # Pour plus tard aller chercher numéro depuis keypoint name dans skeleton.py. 'RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7
|
# calibration_type = 'convert' # 'convert' or 'calculate'
|
||||||
# weights_kpt = [1,1] # Pris en compte uniquement si on a plusieurs keypoints
|
|
||||||
|
# [calibration.convert]
|
||||||
|
# convert_from = 'qualisys' # 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', 'biocv', 'anipose', or 'freemocap'
|
||||||
|
# [calibration.convert.qualisys]
|
||||||
|
# binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2
|
||||||
|
# [calibration.convert.optitrack] # See readme for instructions
|
||||||
|
# [calibration.convert.vicon] # No parameter needed
|
||||||
|
# [calibration.convert.opencap] # No parameter needed
|
||||||
|
# [calibration.convert.easymocap] # No parameter needed
|
||||||
|
# [calibration.convert.biocv] # No parameter needed
|
||||||
|
# [calibration.convert.anipose] # No parameter needed
|
||||||
|
# [calibration.convert.freemocap] # No parameter needed
|
||||||
|
|
||||||
|
|
||||||
|
# [calibration.calculate]
|
||||||
|
# # Camera properties, theoretically need to be calculated only once in a camera lifetime
|
||||||
|
# [calibration.calculate.intrinsics]
|
||||||
|
# overwrite_intrinsics = false # overwrite (or not) if they have already been calculated?
|
||||||
|
# show_detection_intrinsics = true # true or false (lowercase)
|
||||||
|
# intrinsics_extension = 'jpg' # any video or image extension
|
||||||
|
# extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 )
|
||||||
|
# intrinsics_corners_nb = [4,7]
|
||||||
|
# intrinsics_square_size = 60 # mm
|
||||||
|
|
||||||
|
# # Camera placements, need to be done before every session
|
||||||
|
# [calibration.calculate.extrinsics]
|
||||||
|
# calculate_extrinsics = true # true or false (lowercase)
|
||||||
|
# extrinsics_method = 'scene' # 'board', 'scene', 'keypoints'
|
||||||
|
# # 'board' should be large enough to be detected when laid on the floor. Not recommended.
|
||||||
|
# # 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out.
|
||||||
|
# # 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras.
|
||||||
|
# moving_cameras = false # Not implemented yet
|
||||||
|
|
||||||
|
# [calibration.calculate.extrinsics.board]
|
||||||
|
# show_reprojection_error = true # true or false (lowercase)
|
||||||
|
# extrinsics_extension = 'png' # any video or image extension
|
||||||
|
# extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h]
|
||||||
|
# extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle
|
||||||
|
|
||||||
|
# [calibration.calculate.extrinsics.scene]
|
||||||
|
# show_reprojection_error = true # true or false (lowercase)
|
||||||
|
# extrinsics_extension = 'png' # any video or image extension
|
||||||
|
# # list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane.
|
||||||
|
# # in m -> unlike for intrinsics, NOT in mm!
|
||||||
|
# object_coords_3d = [[-2.0, 0.3, 0.0],
|
||||||
|
# [-2.0 , 0.0, 0.0],
|
||||||
|
# [-2.0, 0.0, 0.05],
|
||||||
|
# [-2.0, -0.3 , 0.0],
|
||||||
|
# [0.0, 0.3, 0.0],
|
||||||
|
# [0.0, 0.0, 0.0],
|
||||||
|
# [0.0, 0.0, 0.05],
|
||||||
|
# [0.0, -0.3, 0.0]]
|
||||||
|
|
||||||
|
# [calibration.calculate.extrinsics.keypoints]
|
||||||
|
# # Coming soon!
|
||||||
|
|
||||||
|
|
||||||
# [personAssociation]
|
# [personAssociation]
|
||||||
# # tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
|
# likelihood_threshold_association = 0.3
|
||||||
## and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE)
|
|
||||||
# reproj_error_threshold_association = 20 # px
|
# [personAssociation.single_person]
|
||||||
# likelihood_threshold_association = 0.05
|
# reproj_error_threshold_association = 20 # px
|
||||||
|
# tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
|
||||||
|
# # and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE)
|
||||||
|
|
||||||
|
# [personAssociation.multi_person]
|
||||||
|
# reconstruction_error_threshold = 0.1 # 0.1 = 10 cm
|
||||||
|
# min_affinity = 0.2 # affinity below which a correspondence is ignored
|
||||||
|
|
||||||
|
|
||||||
# [triangulation]
|
# [triangulation]
|
||||||
# reorder_trc = false # only checked if multi_person analysis
|
# reorder_trc = false # only checked if multi_person analysis
|
||||||
# reproj_error_threshold_triangulation = 15 # px
|
# reproj_error_threshold_triangulation = 15 # px
|
||||||
# likelihood_threshold_triangulation= 0.05
|
# likelihood_threshold_triangulation= 0.3
|
||||||
# min_cameras_for_triangulation = 2
|
# min_cameras_for_triangulation = 2
|
||||||
# interpolation = 'cubic' #linear, slinear, quadratic, cubic, or none
|
# interpolation = 'cubic' #linear, slinear, quadratic, cubic, or none
|
||||||
## 'none' if you don't want to interpolate missing points
|
# # 'none' if you don't want to interpolate missing points
|
||||||
# interp_if_gap_smaller_than = 10 # do not interpolate bigger gaps
|
# interp_if_gap_smaller_than = 10 # do not interpolate bigger gaps
|
||||||
# show_interp_indices = true # true or false (lowercase). For each keypoint, return the frames that need to be interpolated
|
# show_interp_indices = true # true or false (lowercase). For each keypoint, return the frames that need to be interpolated
|
||||||
# handle_LR_swap = false # Better if few cameras (eg less than 4) with risk of limb swapping (eg camera facing sagittal plane), otherwise slightly less accurate and slower
|
# handle_LR_swap = false # Better if few cameras (eg less than 4) with risk of limb swapping (eg camera facing sagittal plane), otherwise slightly less accurate and slower
|
||||||
@ -141,38 +139,39 @@
|
|||||||
# type = 'butterworth' # butterworth, kalman, gaussian, LOESS, median, butterworth_on_speed
|
# type = 'butterworth' # butterworth, kalman, gaussian, LOESS, median, butterworth_on_speed
|
||||||
# display_figures = false # true or false (lowercase)
|
# display_figures = false # true or false (lowercase)
|
||||||
|
|
||||||
# [filtering.butterworth]
|
# [filtering.butterworth]
|
||||||
# order = 4
|
# order = 4
|
||||||
# cut_off_frequency = 6 # Hz
|
# cut_off_frequency = 6 # Hz
|
||||||
# [filtering.kalman]
|
# [filtering.kalman]
|
||||||
## How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)?
|
# # How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)?
|
||||||
# trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise
|
# trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise
|
||||||
# smooth = true # should be true, unless you need real-time filtering
|
# smooth = true # should be true, unless you need real-time filtering
|
||||||
# [filtering.butterworth_on_speed]
|
# [filtering.butterworth_on_speed]
|
||||||
# order = 4
|
# order = 4
|
||||||
# cut_off_frequency = 10 # Hz
|
# cut_off_frequency = 10 # Hz
|
||||||
# [filtering.gaussian]
|
# [filtering.gaussian]
|
||||||
# sigma_kernel = 2 #px
|
# sigma_kernel = 2 #px
|
||||||
# [filtering.LOESS]
|
# [filtering.LOESS]
|
||||||
# nb_values_used = 30 # = fraction of data used * nb frames
|
# nb_values_used = 30 # = fraction of data used * nb frames
|
||||||
# [filtering.median]
|
# [filtering.median]
|
||||||
# kernel_size = 9
|
# kernel_size = 9
|
||||||
|
|
||||||
|
|
||||||
# [markerAugmentation]
|
# [markerAugmentation]
|
||||||
# ## Only works on BODY_25 and BODY_25B models
|
## Only works on BODY_25 and BODY_25B models
|
||||||
# participant_height = 1.72 # m # float if single person, list of float if multi-person (same order as the Static trials)
|
# participant_height = 1.72 # m # float if single person, list of float if multi-person (same order as the Static trials)
|
||||||
# participant_mass = 70.0 # kg
|
# participant_mass = 70.0 # kg
|
||||||
|
|
||||||
|
|
||||||
# [opensim]
|
# [opensim]
|
||||||
# static_trial = ['S00_P00_Participant/S00_P00_T00_StaticTrial']
|
# static_trial = ['S00_P00_Participant/S00_P00_T00_StaticTrial']
|
||||||
# # If this Config.toml file is at the Trial level, set to true or false (lowercase);
|
# # If this Config.toml file is at the Trial level, set to true or false (lowercase);
|
||||||
# # At the Participant level, specify the name of the static trial folder name, e.g. ['S00_P00_T00_StaticTrial'];
|
# # At the Participant level, specify the name of the static trial folder name, e.g. ['S00_P00_T00_StaticTrial'];
|
||||||
# # At the Session level, add participant subdirectory, e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P01_Participant/S00_P00_T00_StaticTrial']
|
# # At the Session level, add participant subdirectory, e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P01_Participant/S00_P00_T00_StaticTrial']
|
||||||
# opensim_bin_path = 'C:\OpenSim 4.4\bin'
|
# opensim_bin_path = 'C:\OpenSim 4.4\bin'
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## CUSTOM skeleton, if you trained your own DeepLabCut model for example.
|
## CUSTOM skeleton, if you trained your own DeepLabCut model for example.
|
||||||
## Make sure the node ids correspond to the column numbers of the 2D pose file, starting from zero.
|
## Make sure the node ids correspond to the column numbers of the 2D pose file, starting from zero.
|
||||||
##
|
##
|
||||||
@ -188,65 +187,65 @@
|
|||||||
# name = "CHip"
|
# name = "CHip"
|
||||||
# id = "None"
|
# id = "None"
|
||||||
# [[pose.CUSTOM.children]]
|
# [[pose.CUSTOM.children]]
|
||||||
# id = 12
|
|
||||||
# name = "RHip"
|
# name = "RHip"
|
||||||
|
# id = 12
|
||||||
# [[pose.CUSTOM.children.children]]
|
# [[pose.CUSTOM.children.children]]
|
||||||
# id = 14
|
|
||||||
# name = "RKnee"
|
# name = "RKnee"
|
||||||
|
# id = 14
|
||||||
# [[pose.CUSTOM.children.children.children]]
|
# [[pose.CUSTOM.children.children.children]]
|
||||||
# id = 16
|
|
||||||
# name = "RAnkle"
|
# name = "RAnkle"
|
||||||
|
# id = 16
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 22
|
|
||||||
# name = "RBigToe"
|
# name = "RBigToe"
|
||||||
|
# id = 22
|
||||||
# [[pose.CUSTOM.children.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children.children]]
|
||||||
# id = 23
|
|
||||||
# name = "RSmallToe"
|
# name = "RSmallToe"
|
||||||
|
# id = 23
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 24
|
|
||||||
# name = "RHeel"
|
# name = "RHeel"
|
||||||
|
# id = 24
|
||||||
# [[pose.CUSTOM.children]]
|
# [[pose.CUSTOM.children]]
|
||||||
# id = 11
|
|
||||||
# name = "LHip"
|
# name = "LHip"
|
||||||
|
# id = 11
|
||||||
# [[pose.CUSTOM.children.children]]
|
# [[pose.CUSTOM.children.children]]
|
||||||
# id = 13
|
|
||||||
# name = "LKnee"
|
# name = "LKnee"
|
||||||
|
# id = 13
|
||||||
# [[pose.CUSTOM.children.children.children]]
|
# [[pose.CUSTOM.children.children.children]]
|
||||||
# id = 15
|
|
||||||
# name = "LAnkle"
|
# name = "LAnkle"
|
||||||
|
# id = 15
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 19
|
|
||||||
# name = "LBigToe"
|
# name = "LBigToe"
|
||||||
|
# id = 19
|
||||||
# [[pose.CUSTOM.children.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children.children]]
|
||||||
# id = 20
|
|
||||||
# name = "LSmallToe"
|
# name = "LSmallToe"
|
||||||
|
# id = 20
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 21
|
|
||||||
# name = "LHeel"
|
# name = "LHeel"
|
||||||
|
# id = 21
|
||||||
# [[pose.CUSTOM.children]]
|
# [[pose.CUSTOM.children]]
|
||||||
# id = 17
|
|
||||||
# name = "Neck"
|
# name = "Neck"
|
||||||
|
# id = 17
|
||||||
# [[pose.CUSTOM.children.children]]
|
# [[pose.CUSTOM.children.children]]
|
||||||
# id = 18
|
|
||||||
# name = "Head"
|
# name = "Head"
|
||||||
|
# id = 18
|
||||||
# [[pose.CUSTOM.children.children.children]]
|
# [[pose.CUSTOM.children.children.children]]
|
||||||
# id = 0
|
|
||||||
# name = "Nose"
|
# name = "Nose"
|
||||||
|
# id = 0
|
||||||
# [[pose.CUSTOM.children.children]]
|
# [[pose.CUSTOM.children.children]]
|
||||||
# id = 6
|
|
||||||
# name = "RShoulder"
|
# name = "RShoulder"
|
||||||
|
# id = 6
|
||||||
# [[pose.CUSTOM.children.children.children]]
|
# [[pose.CUSTOM.children.children.children]]
|
||||||
# id = 8
|
|
||||||
# name = "RElbow"
|
# name = "RElbow"
|
||||||
|
# id = 8
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 10
|
|
||||||
# name = "RWrist"
|
# name = "RWrist"
|
||||||
|
# id = 10
|
||||||
# [[pose.CUSTOM.children.children]]
|
# [[pose.CUSTOM.children.children]]
|
||||||
# id = 5
|
|
||||||
# name = "LShoulder"
|
# name = "LShoulder"
|
||||||
|
# id = 5
|
||||||
# [[pose.CUSTOM.children.children.children]]
|
# [[pose.CUSTOM.children.children.children]]
|
||||||
# id = 7
|
|
||||||
# name = "LElbow"
|
# name = "LElbow"
|
||||||
|
# id = 7
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 9
|
|
||||||
# name = "LWrist"
|
# name = "LWrist"
|
||||||
|
# id = 9
|
||||||
|
@ -9,91 +9,29 @@
|
|||||||
# If a parameter is not found here, Pose2Sim will look for its value in the
|
# If a parameter is not found here, Pose2Sim will look for its value in the
|
||||||
# Config.toml file of the level above. This way, you can set global
|
# Config.toml file of the level above. This way, you can set global
|
||||||
# instructions for the Session and alter them for specific Participants or Trials.
|
# instructions for the Session and alter them for specific Participants or Trials.
|
||||||
#
|
#
|
||||||
# If you wish to overwrite a parameter for a specific trial or participant,
|
# If you wish to overwrite a parameter for a specific trial or participant,
|
||||||
# edit its Config.toml file by uncommenting its key (e.g., [project])
|
# edit its Config.toml file by uncommenting its key (e.g., [project])
|
||||||
# and editing its value (e.g., frame_range = [10,300]). Or else, uncomment
|
# and editing its value (e.g., frame_range = [10,300]). Or else, uncomment
|
||||||
# [filtering.butterworth] and set cut_off_frequency = 10, etc.
|
# [filtering.butterworth] and set cut_off_frequency = 10, etc.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# [project]
|
# [project]
|
||||||
# multi_person = false # true for trials with multiple participants. If false, only the main person in scene is analyzed (and it run much faster).
|
# multi_person = true # If false, only the main person in scene is analyzed.
|
||||||
# nb_persons_to_detect = 2 # checked only if multi_person is selected
|
# frame_rate = 60 # fps
|
||||||
# frame_rate = 60 # FPS
|
|
||||||
# frame_range = [] # For example [10,300], or [] for all frames
|
# frame_range = [] # For example [10,300], or [] for all frames
|
||||||
## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate
|
## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate
|
||||||
## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate,
|
## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate,
|
||||||
## frame_range = [0.1, 2.0]*frame_rate = [6, 120]
|
## frame_range = [0.1, 2.0]*frame_rate = [6, 120]
|
||||||
|
|
||||||
# exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['<participant_dir/trial_dir>', 'etc'].
|
# exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['<participant_dir/trial_dir>', 'etc'].
|
||||||
# e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial']
|
## e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial']
|
||||||
|
|
||||||
|
|
||||||
## Take heart, calibration is not that complicated once you get the hang of it!
|
## Take heart, calibration is not that complicated once you get the hang of it!
|
||||||
# [calibration]
|
|
||||||
# calibration_type = 'convert' # 'convert' or 'calculate'
|
|
||||||
|
|
||||||
# [calibration.convert]
|
|
||||||
# convert_from = 'qualisys' # 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', or 'biocv'
|
|
||||||
# [calibration.convert.qualisys]
|
|
||||||
# binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2
|
|
||||||
# [calibration.convert.optitrack] # See readme for instructions
|
|
||||||
# [calibration.convert.vicon] # No parameter needed
|
|
||||||
# [calibration.convert.opencap] # No parameter needed
|
|
||||||
# [calibration.convert.easymocap] # No parameter needed
|
|
||||||
# [calibration.convert.biocv] # No parameter needed
|
|
||||||
# [calibration.convert.anipose] # No parameter needed
|
|
||||||
# [calibration.convert.freemocap] # No parameter needed
|
|
||||||
|
|
||||||
|
|
||||||
# [calibration.calculate]
|
|
||||||
## Camera properties, theoretically need to be calculated only once in a camera lifetime
|
|
||||||
# [calibration.calculate.intrinsics]
|
|
||||||
# overwrite_intrinsics = false # overwrite (or not) if they have already been calculated?
|
|
||||||
# show_detection_intrinsics = true # true or false (lowercase)
|
|
||||||
# intrinsics_extension = 'jpg' # any video or image extension
|
|
||||||
# extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 )
|
|
||||||
# intrinsics_corners_nb = [4,7]
|
|
||||||
# intrinsics_square_size = 60 # mm
|
|
||||||
|
|
||||||
## Camera placements, need to be done before every session
|
|
||||||
# [calibration.calculate.extrinsics]
|
|
||||||
# extrinsics_method = 'scene' # 'board', 'scene', 'keypoints'
|
|
||||||
## 'board' should be large enough to be detected when laid on the floor. Not recommended.
|
|
||||||
## 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out.
|
|
||||||
## 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras.
|
|
||||||
|
|
||||||
# moving_cameras = false # Not implemented yet
|
|
||||||
# calculate_extrinsics = true # true or false (lowercase)
|
|
||||||
|
|
||||||
# [calibration.calculate.extrinsics.board]
|
|
||||||
# show_reprojection_error = true # true or false (lowercase)
|
|
||||||
# extrinsics_extension = 'png' # any video or image extension
|
|
||||||
# extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h]
|
|
||||||
# extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle
|
|
||||||
|
|
||||||
# [calibration.calculate.extrinsics.scene]
|
|
||||||
# show_reprojection_error = true # true or false (lowercase)
|
|
||||||
# extrinsics_extension = 'png' # any video or image extension
|
|
||||||
## list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane.
|
|
||||||
## in m -> unlike for intrinsics, NOT in mm!
|
|
||||||
# object_coords_3d = [[-2.0, 0.3, 0.0],
|
|
||||||
# [-2.0 , 0.0, 0.0],
|
|
||||||
# [-2.0, 0.0, 0.05],
|
|
||||||
# [-2.0, -0.3 , 0.0],
|
|
||||||
# [0.0, 0.3, 0.0],
|
|
||||||
# [0.0, 0.0, 0.0],
|
|
||||||
# [0.0, 0.0, 0.05],
|
|
||||||
# [0.0, -0.3, 0.0]]
|
|
||||||
|
|
||||||
# [calibration.calculate.extrinsics.keypoints]
|
|
||||||
## Coming soon!
|
|
||||||
|
|
||||||
|
|
||||||
# [pose]
|
# [pose]
|
||||||
# pose_framework = 'openpose' # 'openpose', 'mediapipe', 'alphapose', 'deeplabcut'
|
# pose_framework = 'openpose' # 'openpose', 'mediapipe', 'alphapose', 'deeplabcut'
|
||||||
# pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII,
|
# pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII
|
||||||
# #With mediapipe: BLAZEPOSE.
|
# #With mediapipe: BLAZEPOSE.
|
||||||
# #With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133.
|
# #With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133.
|
||||||
# #With deeplabcut: CUSTOM. See example at the end of the file.
|
# #With deeplabcut: CUSTOM. See example at the end of the file.
|
||||||
@ -103,33 +41,93 @@
|
|||||||
|
|
||||||
|
|
||||||
# [synchronization]
|
# [synchronization]
|
||||||
## COMING SOON!
|
# display_corr = true # true or false (lowercase)
|
||||||
# reset_sync = true # Recalculate synchronization even if already done
|
# reset_sync = true # Recalculate synchronization even if already done
|
||||||
# frames = [2850,3490] # Frames to use for synchronization, should point to a moment with fast motion.
|
# # id_kpt = [10] # keypoint ID, to be found in skeleton.py. Example RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7
|
||||||
# cut_off_frequency = 10 # cut-off frequency for a 4th order low-pass Butterworth filter
|
# # weights_kpt = [1] # Only taken into account if you have several keypoints (Currently only one keypoint is supported).
|
||||||
## Vertical speeds (on X, Y, or Z axis, or 2D speeds)
|
# sync_frame_range = [] # For example [0,150], or [] for all frames (default)
|
||||||
# speed_kind = 'y' # 'x', 'y', 'z', or '2D'
|
# # limit synchronization search (to the beginning or to the end of the capture for example)
|
||||||
# vmax = 20 # px/s
|
|
||||||
# cam1_nb = 4
|
|
||||||
# cam2_nb = 3
|
# [calibration]
|
||||||
# id_kpt = [9,10] # Pour plus tard aller chercher numéro depuis keypoint name dans skeleton.py. 'RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7
|
# calibration_type = 'convert' # 'convert' or 'calculate'
|
||||||
# weights_kpt = [1,1] # Pris en compte uniquement si on a plusieurs keypoints
|
|
||||||
|
# [calibration.convert]
|
||||||
|
# convert_from = 'qualisys' # 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', 'biocv', 'anipose', or 'freemocap'
|
||||||
|
# [calibration.convert.qualisys]
|
||||||
|
# binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2
|
||||||
|
# [calibration.convert.optitrack] # See readme for instructions
|
||||||
|
# [calibration.convert.vicon] # No parameter needed
|
||||||
|
# [calibration.convert.opencap] # No parameter needed
|
||||||
|
# [calibration.convert.easymocap] # No parameter needed
|
||||||
|
# [calibration.convert.biocv] # No parameter needed
|
||||||
|
# [calibration.convert.anipose] # No parameter needed
|
||||||
|
# [calibration.convert.freemocap] # No parameter needed
|
||||||
|
|
||||||
|
|
||||||
|
# [calibration.calculate]
|
||||||
|
# # Camera properties, theoretically need to be calculated only once in a camera lifetime
|
||||||
|
# [calibration.calculate.intrinsics]
|
||||||
|
# overwrite_intrinsics = false # overwrite (or not) if they have already been calculated?
|
||||||
|
# show_detection_intrinsics = true # true or false (lowercase)
|
||||||
|
# intrinsics_extension = 'jpg' # any video or image extension
|
||||||
|
# extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 )
|
||||||
|
# intrinsics_corners_nb = [4,7]
|
||||||
|
# intrinsics_square_size = 60 # mm
|
||||||
|
|
||||||
|
# # Camera placements, need to be done before every session
|
||||||
|
# [calibration.calculate.extrinsics]
|
||||||
|
# calculate_extrinsics = true # true or false (lowercase)
|
||||||
|
# extrinsics_method = 'scene' # 'board', 'scene', 'keypoints'
|
||||||
|
# # 'board' should be large enough to be detected when laid on the floor. Not recommended.
|
||||||
|
# # 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out.
|
||||||
|
# # 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras.
|
||||||
|
# moving_cameras = false # Not implemented yet
|
||||||
|
|
||||||
|
# [calibration.calculate.extrinsics.board]
|
||||||
|
# show_reprojection_error = true # true or false (lowercase)
|
||||||
|
# extrinsics_extension = 'png' # any video or image extension
|
||||||
|
# extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h]
|
||||||
|
# extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle
|
||||||
|
|
||||||
|
# [calibration.calculate.extrinsics.scene]
|
||||||
|
# show_reprojection_error = true # true or false (lowercase)
|
||||||
|
# extrinsics_extension = 'png' # any video or image extension
|
||||||
|
# # list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane.
|
||||||
|
# # in m -> unlike for intrinsics, NOT in mm!
|
||||||
|
# object_coords_3d = [[-2.0, 0.3, 0.0],
|
||||||
|
# [-2.0 , 0.0, 0.0],
|
||||||
|
# [-2.0, 0.0, 0.05],
|
||||||
|
# [-2.0, -0.3 , 0.0],
|
||||||
|
# [0.0, 0.3, 0.0],
|
||||||
|
# [0.0, 0.0, 0.0],
|
||||||
|
# [0.0, 0.0, 0.05],
|
||||||
|
# [0.0, -0.3, 0.0]]
|
||||||
|
|
||||||
|
# [calibration.calculate.extrinsics.keypoints]
|
||||||
|
# # Coming soon!
|
||||||
|
|
||||||
|
|
||||||
# [personAssociation]
|
# [personAssociation]
|
||||||
# # tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
|
# likelihood_threshold_association = 0.3
|
||||||
## and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE)
|
|
||||||
# reproj_error_threshold_association = 20 # px
|
# [personAssociation.single_person]
|
||||||
# likelihood_threshold_association = 0.05
|
# reproj_error_threshold_association = 20 # px
|
||||||
|
# tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
|
||||||
|
# # and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE)
|
||||||
|
|
||||||
|
# [personAssociation.multi_person]
|
||||||
|
# reconstruction_error_threshold = 0.1 # 0.1 = 10 cm
|
||||||
|
# min_affinity = 0.2 # affinity below which a correspondence is ignored
|
||||||
|
|
||||||
|
|
||||||
# [triangulation]
|
# [triangulation]
|
||||||
# reorder_trc = false # only checked if multi_person analysis
|
# reorder_trc = false # only checked if multi_person analysis
|
||||||
# reproj_error_threshold_triangulation = 15 # px
|
# reproj_error_threshold_triangulation = 15 # px
|
||||||
# likelihood_threshold_triangulation= 0.05
|
# likelihood_threshold_triangulation= 0.3
|
||||||
# min_cameras_for_triangulation = 2
|
# min_cameras_for_triangulation = 2
|
||||||
# interpolation = 'cubic' #linear, slinear, quadratic, cubic, or none
|
# interpolation = 'cubic' #linear, slinear, quadratic, cubic, or none
|
||||||
## 'none' if you don't want to interpolate missing points
|
# # 'none' if you don't want to interpolate missing points
|
||||||
# interp_if_gap_smaller_than = 10 # do not interpolate bigger gaps
|
# interp_if_gap_smaller_than = 10 # do not interpolate bigger gaps
|
||||||
# show_interp_indices = true # true or false (lowercase). For each keypoint, return the frames that need to be interpolated
|
# show_interp_indices = true # true or false (lowercase). For each keypoint, return the frames that need to be interpolated
|
||||||
# handle_LR_swap = false # Better if few cameras (eg less than 4) with risk of limb swapping (eg camera facing sagittal plane), otherwise slightly less accurate and slower
|
# handle_LR_swap = false # Better if few cameras (eg less than 4) with risk of limb swapping (eg camera facing sagittal plane), otherwise slightly less accurate and slower
|
||||||
@ -141,38 +139,39 @@
|
|||||||
# type = 'butterworth' # butterworth, kalman, gaussian, LOESS, median, butterworth_on_speed
|
# type = 'butterworth' # butterworth, kalman, gaussian, LOESS, median, butterworth_on_speed
|
||||||
# display_figures = false # true or false (lowercase)
|
# display_figures = false # true or false (lowercase)
|
||||||
|
|
||||||
# [filtering.butterworth]
|
# [filtering.butterworth]
|
||||||
# order = 4
|
# order = 4
|
||||||
# cut_off_frequency = 6 # Hz
|
# cut_off_frequency = 6 # Hz
|
||||||
# [filtering.kalman]
|
# [filtering.kalman]
|
||||||
## How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)?
|
# # How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)?
|
||||||
# trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise
|
# trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise
|
||||||
# smooth = true # should be true, unless you need real-time filtering
|
# smooth = true # should be true, unless you need real-time filtering
|
||||||
# [filtering.butterworth_on_speed]
|
# [filtering.butterworth_on_speed]
|
||||||
# order = 4
|
# order = 4
|
||||||
# cut_off_frequency = 10 # Hz
|
# cut_off_frequency = 10 # Hz
|
||||||
# [filtering.gaussian]
|
# [filtering.gaussian]
|
||||||
# sigma_kernel = 2 #px
|
# sigma_kernel = 2 #px
|
||||||
# [filtering.LOESS]
|
# [filtering.LOESS]
|
||||||
# nb_values_used = 30 # = fraction of data used * nb frames
|
# nb_values_used = 30 # = fraction of data used * nb frames
|
||||||
# [filtering.median]
|
# [filtering.median]
|
||||||
# kernel_size = 9
|
# kernel_size = 9
|
||||||
|
|
||||||
|
|
||||||
# [markerAugmentation]
|
# [markerAugmentation]
|
||||||
# ## Only works on BODY_25 and BODY_25B models
|
## Only works on BODY_25 and BODY_25B models
|
||||||
# participant_height = 1.72 # m # float if single person, list of float if multi-person (same order as the Static trials)
|
# participant_height = 1.72 # m # float if single person, list of float if multi-person (same order as the Static trials)
|
||||||
# participant_mass = 70.0 # kg
|
# participant_mass = 70.0 # kg
|
||||||
|
|
||||||
|
|
||||||
# [opensim]
|
# [opensim]
|
||||||
# static_trial = ['S00_P00_Participant/S00_P00_T00_StaticTrial']
|
# static_trial = ['S00_P00_Participant/S00_P00_T00_StaticTrial']
|
||||||
# # If this Config.toml file is at the Trial level, set to true or false (lowercase);
|
# # If this Config.toml file is at the Trial level, set to true or false (lowercase);
|
||||||
# # At the Participant level, specify the name of the static trial folder name, e.g. ['S00_P00_T00_StaticTrial'];
|
# # At the Participant level, specify the name of the static trial folder name, e.g. ['S00_P00_T00_StaticTrial'];
|
||||||
# # At the Session level, add participant subdirectory, e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P01_Participant/S00_P00_T00_StaticTrial']
|
# # At the Session level, add participant subdirectory, e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P01_Participant/S00_P00_T00_StaticTrial']
|
||||||
# opensim_bin_path = 'C:\OpenSim 4.4\bin'
|
# opensim_bin_path = 'C:\OpenSim 4.4\bin'
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## CUSTOM skeleton, if you trained your own DeepLabCut model for example.
|
## CUSTOM skeleton, if you trained your own DeepLabCut model for example.
|
||||||
## Make sure the node ids correspond to the column numbers of the 2D pose file, starting from zero.
|
## Make sure the node ids correspond to the column numbers of the 2D pose file, starting from zero.
|
||||||
##
|
##
|
||||||
@ -188,65 +187,65 @@
|
|||||||
# name = "CHip"
|
# name = "CHip"
|
||||||
# id = "None"
|
# id = "None"
|
||||||
# [[pose.CUSTOM.children]]
|
# [[pose.CUSTOM.children]]
|
||||||
# id = 12
|
|
||||||
# name = "RHip"
|
# name = "RHip"
|
||||||
|
# id = 12
|
||||||
# [[pose.CUSTOM.children.children]]
|
# [[pose.CUSTOM.children.children]]
|
||||||
# id = 14
|
|
||||||
# name = "RKnee"
|
# name = "RKnee"
|
||||||
|
# id = 14
|
||||||
# [[pose.CUSTOM.children.children.children]]
|
# [[pose.CUSTOM.children.children.children]]
|
||||||
# id = 16
|
|
||||||
# name = "RAnkle"
|
# name = "RAnkle"
|
||||||
|
# id = 16
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 22
|
|
||||||
# name = "RBigToe"
|
# name = "RBigToe"
|
||||||
|
# id = 22
|
||||||
# [[pose.CUSTOM.children.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children.children]]
|
||||||
# id = 23
|
|
||||||
# name = "RSmallToe"
|
# name = "RSmallToe"
|
||||||
|
# id = 23
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 24
|
|
||||||
# name = "RHeel"
|
# name = "RHeel"
|
||||||
|
# id = 24
|
||||||
# [[pose.CUSTOM.children]]
|
# [[pose.CUSTOM.children]]
|
||||||
# id = 11
|
|
||||||
# name = "LHip"
|
# name = "LHip"
|
||||||
|
# id = 11
|
||||||
# [[pose.CUSTOM.children.children]]
|
# [[pose.CUSTOM.children.children]]
|
||||||
# id = 13
|
|
||||||
# name = "LKnee"
|
# name = "LKnee"
|
||||||
|
# id = 13
|
||||||
# [[pose.CUSTOM.children.children.children]]
|
# [[pose.CUSTOM.children.children.children]]
|
||||||
# id = 15
|
|
||||||
# name = "LAnkle"
|
# name = "LAnkle"
|
||||||
|
# id = 15
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 19
|
|
||||||
# name = "LBigToe"
|
# name = "LBigToe"
|
||||||
|
# id = 19
|
||||||
# [[pose.CUSTOM.children.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children.children]]
|
||||||
# id = 20
|
|
||||||
# name = "LSmallToe"
|
# name = "LSmallToe"
|
||||||
|
# id = 20
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 21
|
|
||||||
# name = "LHeel"
|
# name = "LHeel"
|
||||||
|
# id = 21
|
||||||
# [[pose.CUSTOM.children]]
|
# [[pose.CUSTOM.children]]
|
||||||
# id = 17
|
|
||||||
# name = "Neck"
|
# name = "Neck"
|
||||||
|
# id = 17
|
||||||
# [[pose.CUSTOM.children.children]]
|
# [[pose.CUSTOM.children.children]]
|
||||||
# id = 18
|
|
||||||
# name = "Head"
|
# name = "Head"
|
||||||
|
# id = 18
|
||||||
# [[pose.CUSTOM.children.children.children]]
|
# [[pose.CUSTOM.children.children.children]]
|
||||||
# id = 0
|
|
||||||
# name = "Nose"
|
# name = "Nose"
|
||||||
|
# id = 0
|
||||||
# [[pose.CUSTOM.children.children]]
|
# [[pose.CUSTOM.children.children]]
|
||||||
# id = 6
|
|
||||||
# name = "RShoulder"
|
# name = "RShoulder"
|
||||||
|
# id = 6
|
||||||
# [[pose.CUSTOM.children.children.children]]
|
# [[pose.CUSTOM.children.children.children]]
|
||||||
# id = 8
|
|
||||||
# name = "RElbow"
|
# name = "RElbow"
|
||||||
|
# id = 8
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 10
|
|
||||||
# name = "RWrist"
|
# name = "RWrist"
|
||||||
|
# id = 10
|
||||||
# [[pose.CUSTOM.children.children]]
|
# [[pose.CUSTOM.children.children]]
|
||||||
# id = 5
|
|
||||||
# name = "LShoulder"
|
# name = "LShoulder"
|
||||||
|
# id = 5
|
||||||
# [[pose.CUSTOM.children.children.children]]
|
# [[pose.CUSTOM.children.children.children]]
|
||||||
# id = 7
|
|
||||||
# name = "LElbow"
|
# name = "LElbow"
|
||||||
|
# id = 7
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 9
|
|
||||||
# name = "LWrist"
|
# name = "LWrist"
|
||||||
|
# id = 9
|
||||||
|
@ -9,91 +9,29 @@
|
|||||||
# If a parameter is not found here, Pose2Sim will look for its value in the
|
# If a parameter is not found here, Pose2Sim will look for its value in the
|
||||||
# Config.toml file of the level above. This way, you can set global
|
# Config.toml file of the level above. This way, you can set global
|
||||||
# instructions for the Session and alter them for specific Participants or Trials.
|
# instructions for the Session and alter them for specific Participants or Trials.
|
||||||
#
|
#
|
||||||
# If you wish to overwrite a parameter for a specific trial or participant,
|
# If you wish to overwrite a parameter for a specific trial or participant,
|
||||||
# edit its Config.toml file by uncommenting its key (e.g., [project])
|
# edit its Config.toml file by uncommenting its key (e.g., [project])
|
||||||
# and editing its value (e.g., frame_range = [10,300]). Or else, uncomment
|
# and editing its value (e.g., frame_range = [10,300]). Or else, uncomment
|
||||||
# [filtering.butterworth] and set cut_off_frequency = 10, etc.
|
# [filtering.butterworth] and set cut_off_frequency = 10, etc.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# [project]
|
# [project]
|
||||||
# multi_person = false # true for trials with multiple participants. If false, only the main person in scene is analyzed (and it run much faster).
|
# multi_person = true # If false, only the main person in scene is analyzed.
|
||||||
# nb_persons_to_detect = 2 # checked only if multi_person is selected
|
# frame_rate = 60 # fps
|
||||||
# frame_rate = 60 # FPS
|
|
||||||
# frame_range = [] # For example [10,300], or [] for all frames
|
# frame_range = [] # For example [10,300], or [] for all frames
|
||||||
## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate
|
## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate
|
||||||
## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate,
|
## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate,
|
||||||
## frame_range = [0.1, 2.0]*frame_rate = [6, 120]
|
## frame_range = [0.1, 2.0]*frame_rate = [6, 120]
|
||||||
|
|
||||||
# exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['<participant_dir/trial_dir>', 'etc'].
|
# exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['<participant_dir/trial_dir>', 'etc'].
|
||||||
# e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial']
|
## e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial']
|
||||||
|
|
||||||
|
|
||||||
## Take heart, calibration is not that complicated once you get the hang of it!
|
## Take heart, calibration is not that complicated once you get the hang of it!
|
||||||
# [calibration]
|
|
||||||
# calibration_type = 'convert' # 'convert' or 'calculate'
|
|
||||||
|
|
||||||
# [calibration.convert]
|
|
||||||
# convert_from = 'qualisys' # 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', or 'biocv'
|
|
||||||
# [calibration.convert.qualisys]
|
|
||||||
# binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2
|
|
||||||
# [calibration.convert.optitrack] # See readme for instructions
|
|
||||||
# [calibration.convert.vicon] # No parameter needed
|
|
||||||
# [calibration.convert.opencap] # No parameter needed
|
|
||||||
# [calibration.convert.easymocap] # No parameter needed
|
|
||||||
# [calibration.convert.biocv] # No parameter needed
|
|
||||||
# [calibration.convert.anipose] # No parameter needed
|
|
||||||
# [calibration.convert.freemocap] # No parameter needed
|
|
||||||
|
|
||||||
|
|
||||||
# [calibration.calculate]
|
|
||||||
## Camera properties, theoretically need to be calculated only once in a camera lifetime
|
|
||||||
# [calibration.calculate.intrinsics]
|
|
||||||
# overwrite_intrinsics = false # overwrite (or not) if they have already been calculated?
|
|
||||||
# show_detection_intrinsics = true # true or false (lowercase)
|
|
||||||
# intrinsics_extension = 'jpg' # any video or image extension
|
|
||||||
# extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 )
|
|
||||||
# intrinsics_corners_nb = [4,7]
|
|
||||||
# intrinsics_square_size = 60 # mm
|
|
||||||
|
|
||||||
## Camera placements, need to be done before every session
|
|
||||||
# [calibration.calculate.extrinsics]
|
|
||||||
# extrinsics_method = 'scene' # 'board', 'scene', 'keypoints'
|
|
||||||
## 'board' should be large enough to be detected when laid on the floor. Not recommended.
|
|
||||||
## 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out.
|
|
||||||
## 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras.
|
|
||||||
|
|
||||||
# moving_cameras = false # Not implemented yet
|
|
||||||
# calculate_extrinsics = true # true or false (lowercase)
|
|
||||||
|
|
||||||
# [calibration.calculate.extrinsics.board]
|
|
||||||
# show_reprojection_error = true # true or false (lowercase)
|
|
||||||
# extrinsics_extension = 'png' # any video or image extension
|
|
||||||
# extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h]
|
|
||||||
# extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle
|
|
||||||
|
|
||||||
# [calibration.calculate.extrinsics.scene]
|
|
||||||
# show_reprojection_error = true # true or false (lowercase)
|
|
||||||
# extrinsics_extension = 'png' # any video or image extension
|
|
||||||
## list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane.
|
|
||||||
## in m -> unlike for intrinsics, NOT in mm!
|
|
||||||
# object_coords_3d = [[-2.0, 0.3, 0.0],
|
|
||||||
# [-2.0 , 0.0, 0.0],
|
|
||||||
# [-2.0, 0.0, 0.05],
|
|
||||||
# [-2.0, -0.3 , 0.0],
|
|
||||||
# [0.0, 0.3, 0.0],
|
|
||||||
# [0.0, 0.0, 0.0],
|
|
||||||
# [0.0, 0.0, 0.05],
|
|
||||||
# [0.0, -0.3, 0.0]]
|
|
||||||
|
|
||||||
# [calibration.calculate.extrinsics.keypoints]
|
|
||||||
## Coming soon!
|
|
||||||
|
|
||||||
|
|
||||||
# [pose]
|
# [pose]
|
||||||
# pose_framework = 'openpose' # 'openpose', 'mediapipe', 'alphapose', 'deeplabcut'
|
# pose_framework = 'openpose' # 'openpose', 'mediapipe', 'alphapose', 'deeplabcut'
|
||||||
# pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII,
|
# pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII
|
||||||
# #With mediapipe: BLAZEPOSE.
|
# #With mediapipe: BLAZEPOSE.
|
||||||
# #With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133.
|
# #With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133.
|
||||||
# #With deeplabcut: CUSTOM. See example at the end of the file.
|
# #With deeplabcut: CUSTOM. See example at the end of the file.
|
||||||
@ -103,33 +41,93 @@
|
|||||||
|
|
||||||
|
|
||||||
# [synchronization]
|
# [synchronization]
|
||||||
## COMING SOON!
|
# display_corr = true # true or false (lowercase)
|
||||||
# reset_sync = true # Recalculate synchronization even if already done
|
# reset_sync = true # Recalculate synchronization even if already done
|
||||||
# frames = [2850,3490] # Frames to use for synchronization, should point to a moment with fast motion.
|
# # id_kpt = [10] # keypoint ID, to be found in skeleton.py. Example RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7
|
||||||
# cut_off_frequency = 10 # cut-off frequency for a 4th order low-pass Butterworth filter
|
# # weights_kpt = [1] # Only taken into account if you have several keypoints (Currently only one keypoint is supported).
|
||||||
## Vertical speeds (on X, Y, or Z axis, or 2D speeds)
|
# sync_frame_range = [] # For example [0,150], or [] for all frames (default)
|
||||||
# speed_kind = 'y' # 'x', 'y', 'z', or '2D'
|
# # limit synchronization search (to the beginning or to the end of the capture for example)
|
||||||
# vmax = 20 # px/s
|
|
||||||
# cam1_nb = 4
|
|
||||||
# cam2_nb = 3
|
# [calibration]
|
||||||
# id_kpt = [9,10] # Pour plus tard aller chercher numéro depuis keypoint name dans skeleton.py. 'RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7
|
# calibration_type = 'convert' # 'convert' or 'calculate'
|
||||||
# weights_kpt = [1,1] # Pris en compte uniquement si on a plusieurs keypoints
|
|
||||||
|
# [calibration.convert]
|
||||||
|
# convert_from = 'qualisys' # 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', 'biocv', 'anipose', or 'freemocap'
|
||||||
|
# [calibration.convert.qualisys]
|
||||||
|
# binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2
|
||||||
|
# [calibration.convert.optitrack] # See readme for instructions
|
||||||
|
# [calibration.convert.vicon] # No parameter needed
|
||||||
|
# [calibration.convert.opencap] # No parameter needed
|
||||||
|
# [calibration.convert.easymocap] # No parameter needed
|
||||||
|
# [calibration.convert.biocv] # No parameter needed
|
||||||
|
# [calibration.convert.anipose] # No parameter needed
|
||||||
|
# [calibration.convert.freemocap] # No parameter needed
|
||||||
|
|
||||||
|
|
||||||
|
# [calibration.calculate]
|
||||||
|
# # Camera properties, theoretically need to be calculated only once in a camera lifetime
|
||||||
|
# [calibration.calculate.intrinsics]
|
||||||
|
# overwrite_intrinsics = false # overwrite (or not) if they have already been calculated?
|
||||||
|
# show_detection_intrinsics = true # true or false (lowercase)
|
||||||
|
# intrinsics_extension = 'jpg' # any video or image extension
|
||||||
|
# extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 )
|
||||||
|
# intrinsics_corners_nb = [4,7]
|
||||||
|
# intrinsics_square_size = 60 # mm
|
||||||
|
|
||||||
|
# # Camera placements, need to be done before every session
|
||||||
|
# [calibration.calculate.extrinsics]
|
||||||
|
# calculate_extrinsics = true # true or false (lowercase)
|
||||||
|
# extrinsics_method = 'scene' # 'board', 'scene', 'keypoints'
|
||||||
|
# # 'board' should be large enough to be detected when laid on the floor. Not recommended.
|
||||||
|
# # 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out.
|
||||||
|
# # 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras.
|
||||||
|
# moving_cameras = false # Not implemented yet
|
||||||
|
|
||||||
|
# [calibration.calculate.extrinsics.board]
|
||||||
|
# show_reprojection_error = true # true or false (lowercase)
|
||||||
|
# extrinsics_extension = 'png' # any video or image extension
|
||||||
|
# extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h]
|
||||||
|
# extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle
|
||||||
|
|
||||||
|
# [calibration.calculate.extrinsics.scene]
|
||||||
|
# show_reprojection_error = true # true or false (lowercase)
|
||||||
|
# extrinsics_extension = 'png' # any video or image extension
|
||||||
|
# # list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane.
|
||||||
|
# # in m -> unlike for intrinsics, NOT in mm!
|
||||||
|
# object_coords_3d = [[-2.0, 0.3, 0.0],
|
||||||
|
# [-2.0 , 0.0, 0.0],
|
||||||
|
# [-2.0, 0.0, 0.05],
|
||||||
|
# [-2.0, -0.3 , 0.0],
|
||||||
|
# [0.0, 0.3, 0.0],
|
||||||
|
# [0.0, 0.0, 0.0],
|
||||||
|
# [0.0, 0.0, 0.05],
|
||||||
|
# [0.0, -0.3, 0.0]]
|
||||||
|
|
||||||
|
# [calibration.calculate.extrinsics.keypoints]
|
||||||
|
# # Coming soon!
|
||||||
|
|
||||||
|
|
||||||
# [personAssociation]
|
# [personAssociation]
|
||||||
# # tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
|
# likelihood_threshold_association = 0.3
|
||||||
## and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE)
|
|
||||||
# reproj_error_threshold_association = 20 # px
|
# [personAssociation.single_person]
|
||||||
# likelihood_threshold_association = 0.05
|
# reproj_error_threshold_association = 20 # px
|
||||||
|
# tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
|
||||||
|
# # and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE)
|
||||||
|
|
||||||
|
# [personAssociation.multi_person]
|
||||||
|
# reconstruction_error_threshold = 0.1 # 0.1 = 10 cm
|
||||||
|
# min_affinity = 0.2 # affinity below which a correspondence is ignored
|
||||||
|
|
||||||
|
|
||||||
# [triangulation]
|
# [triangulation]
|
||||||
# reorder_trc = false # only checked if multi_person analysis
|
# reorder_trc = false # only checked if multi_person analysis
|
||||||
# reproj_error_threshold_triangulation = 15 # px
|
# reproj_error_threshold_triangulation = 15 # px
|
||||||
# likelihood_threshold_triangulation= 0.05
|
# likelihood_threshold_triangulation= 0.3
|
||||||
# min_cameras_for_triangulation = 2
|
# min_cameras_for_triangulation = 2
|
||||||
# interpolation = 'cubic' #linear, slinear, quadratic, cubic, or none
|
# interpolation = 'cubic' #linear, slinear, quadratic, cubic, or none
|
||||||
## 'none' if you don't want to interpolate missing points
|
# # 'none' if you don't want to interpolate missing points
|
||||||
# interp_if_gap_smaller_than = 10 # do not interpolate bigger gaps
|
# interp_if_gap_smaller_than = 10 # do not interpolate bigger gaps
|
||||||
# show_interp_indices = true # true or false (lowercase). For each keypoint, return the frames that need to be interpolated
|
# show_interp_indices = true # true or false (lowercase). For each keypoint, return the frames that need to be interpolated
|
||||||
# handle_LR_swap = false # Better if few cameras (eg less than 4) with risk of limb swapping (eg camera facing sagittal plane), otherwise slightly less accurate and slower
|
# handle_LR_swap = false # Better if few cameras (eg less than 4) with risk of limb swapping (eg camera facing sagittal plane), otherwise slightly less accurate and slower
|
||||||
@ -139,40 +137,41 @@
|
|||||||
|
|
||||||
[filtering]
|
[filtering]
|
||||||
# type = 'butterworth' # butterworth, kalman, gaussian, LOESS, median, butterworth_on_speed
|
# type = 'butterworth' # butterworth, kalman, gaussian, LOESS, median, butterworth_on_speed
|
||||||
display_figures = true # true or false (lowercase)
|
display_figures = false # true or false (lowercase)
|
||||||
|
|
||||||
# [filtering.butterworth]
|
# [filtering.butterworth]
|
||||||
# order = 4
|
# order = 4
|
||||||
# cut_off_frequency = 6 # Hz
|
# cut_off_frequency = 6 # Hz
|
||||||
# [filtering.kalman]
|
# [filtering.kalman]
|
||||||
## How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)?
|
# # How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)?
|
||||||
# trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise
|
# trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise
|
||||||
# smooth = true # should be true, unless you need real-time filtering
|
# smooth = true # should be true, unless you need real-time filtering
|
||||||
# [filtering.butterworth_on_speed]
|
# [filtering.butterworth_on_speed]
|
||||||
# order = 4
|
# order = 4
|
||||||
# cut_off_frequency = 10 # Hz
|
# cut_off_frequency = 10 # Hz
|
||||||
# [filtering.gaussian]
|
# [filtering.gaussian]
|
||||||
# sigma_kernel = 2 #px
|
# sigma_kernel = 2 #px
|
||||||
# [filtering.LOESS]
|
# [filtering.LOESS]
|
||||||
# nb_values_used = 30 # = fraction of data used * nb frames
|
# nb_values_used = 30 # = fraction of data used * nb frames
|
||||||
# [filtering.median]
|
# [filtering.median]
|
||||||
# kernel_size = 9
|
# kernel_size = 9
|
||||||
|
|
||||||
|
|
||||||
# [markerAugmentation]
|
# [markerAugmentation]
|
||||||
# ## Only works on BODY_25 and BODY_25B models
|
## Only works on BODY_25 and BODY_25B models
|
||||||
# participant_height = 1.72 # m # float if single person, list of float if multi-person (same order as the Static trials)
|
# participant_height = 1.72 # m # float if single person, list of float if multi-person (same order as the Static trials)
|
||||||
# participant_mass = 70.0 # kg
|
# participant_mass = 70.0 # kg
|
||||||
|
|
||||||
|
|
||||||
# [opensim]
|
# [opensim]
|
||||||
# static_trial = ['S00_P00_Participant/S00_P00_T00_StaticTrial']
|
# static_trial = ['S00_P00_Participant/S00_P00_T00_StaticTrial']
|
||||||
# # If this Config.toml file is at the Trial level, set to true or false (lowercase);
|
# # If this Config.toml file is at the Trial level, set to true or false (lowercase);
|
||||||
# # At the Participant level, specify the name of the static trial folder name, e.g. ['S00_P00_T00_StaticTrial'];
|
# # At the Participant level, specify the name of the static trial folder name, e.g. ['S00_P00_T00_StaticTrial'];
|
||||||
# # At the Session level, add participant subdirectory, e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P01_Participant/S00_P00_T00_StaticTrial']
|
# # At the Session level, add participant subdirectory, e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P01_Participant/S00_P00_T00_StaticTrial']
|
||||||
# opensim_bin_path = 'C:\OpenSim 4.4\bin'
|
# opensim_bin_path = 'C:\OpenSim 4.4\bin'
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## CUSTOM skeleton, if you trained your own DeepLabCut model for example.
|
## CUSTOM skeleton, if you trained your own DeepLabCut model for example.
|
||||||
## Make sure the node ids correspond to the column numbers of the 2D pose file, starting from zero.
|
## Make sure the node ids correspond to the column numbers of the 2D pose file, starting from zero.
|
||||||
##
|
##
|
||||||
@ -188,65 +187,65 @@ display_figures = true # true or false (lowercase)
|
|||||||
# name = "CHip"
|
# name = "CHip"
|
||||||
# id = "None"
|
# id = "None"
|
||||||
# [[pose.CUSTOM.children]]
|
# [[pose.CUSTOM.children]]
|
||||||
# id = 12
|
|
||||||
# name = "RHip"
|
# name = "RHip"
|
||||||
|
# id = 12
|
||||||
# [[pose.CUSTOM.children.children]]
|
# [[pose.CUSTOM.children.children]]
|
||||||
# id = 14
|
|
||||||
# name = "RKnee"
|
# name = "RKnee"
|
||||||
|
# id = 14
|
||||||
# [[pose.CUSTOM.children.children.children]]
|
# [[pose.CUSTOM.children.children.children]]
|
||||||
# id = 16
|
|
||||||
# name = "RAnkle"
|
# name = "RAnkle"
|
||||||
|
# id = 16
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 22
|
|
||||||
# name = "RBigToe"
|
# name = "RBigToe"
|
||||||
|
# id = 22
|
||||||
# [[pose.CUSTOM.children.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children.children]]
|
||||||
# id = 23
|
|
||||||
# name = "RSmallToe"
|
# name = "RSmallToe"
|
||||||
|
# id = 23
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 24
|
|
||||||
# name = "RHeel"
|
# name = "RHeel"
|
||||||
|
# id = 24
|
||||||
# [[pose.CUSTOM.children]]
|
# [[pose.CUSTOM.children]]
|
||||||
# id = 11
|
|
||||||
# name = "LHip"
|
# name = "LHip"
|
||||||
|
# id = 11
|
||||||
# [[pose.CUSTOM.children.children]]
|
# [[pose.CUSTOM.children.children]]
|
||||||
# id = 13
|
|
||||||
# name = "LKnee"
|
# name = "LKnee"
|
||||||
|
# id = 13
|
||||||
# [[pose.CUSTOM.children.children.children]]
|
# [[pose.CUSTOM.children.children.children]]
|
||||||
# id = 15
|
|
||||||
# name = "LAnkle"
|
# name = "LAnkle"
|
||||||
|
# id = 15
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 19
|
|
||||||
# name = "LBigToe"
|
# name = "LBigToe"
|
||||||
|
# id = 19
|
||||||
# [[pose.CUSTOM.children.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children.children]]
|
||||||
# id = 20
|
|
||||||
# name = "LSmallToe"
|
# name = "LSmallToe"
|
||||||
|
# id = 20
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 21
|
|
||||||
# name = "LHeel"
|
# name = "LHeel"
|
||||||
|
# id = 21
|
||||||
# [[pose.CUSTOM.children]]
|
# [[pose.CUSTOM.children]]
|
||||||
# id = 17
|
|
||||||
# name = "Neck"
|
# name = "Neck"
|
||||||
|
# id = 17
|
||||||
# [[pose.CUSTOM.children.children]]
|
# [[pose.CUSTOM.children.children]]
|
||||||
# id = 18
|
|
||||||
# name = "Head"
|
# name = "Head"
|
||||||
|
# id = 18
|
||||||
# [[pose.CUSTOM.children.children.children]]
|
# [[pose.CUSTOM.children.children.children]]
|
||||||
# id = 0
|
|
||||||
# name = "Nose"
|
# name = "Nose"
|
||||||
|
# id = 0
|
||||||
# [[pose.CUSTOM.children.children]]
|
# [[pose.CUSTOM.children.children]]
|
||||||
# id = 6
|
|
||||||
# name = "RShoulder"
|
# name = "RShoulder"
|
||||||
|
# id = 6
|
||||||
# [[pose.CUSTOM.children.children.children]]
|
# [[pose.CUSTOM.children.children.children]]
|
||||||
# id = 8
|
|
||||||
# name = "RElbow"
|
# name = "RElbow"
|
||||||
|
# id = 8
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 10
|
|
||||||
# name = "RWrist"
|
# name = "RWrist"
|
||||||
|
# id = 10
|
||||||
# [[pose.CUSTOM.children.children]]
|
# [[pose.CUSTOM.children.children]]
|
||||||
# id = 5
|
|
||||||
# name = "LShoulder"
|
# name = "LShoulder"
|
||||||
|
# id = 5
|
||||||
# [[pose.CUSTOM.children.children.children]]
|
# [[pose.CUSTOM.children.children.children]]
|
||||||
# id = 7
|
|
||||||
# name = "LElbow"
|
# name = "LElbow"
|
||||||
|
# id = 7
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 9
|
|
||||||
# name = "LWrist"
|
# name = "LWrist"
|
||||||
|
# id = 9
|
||||||
|
@ -9,91 +9,29 @@
|
|||||||
# If a parameter is not found here, Pose2Sim will look for its value in the
|
# If a parameter is not found here, Pose2Sim will look for its value in the
|
||||||
# Config.toml file of the level above. This way, you can set global
|
# Config.toml file of the level above. This way, you can set global
|
||||||
# instructions for the Session and alter them for specific Participants or Trials.
|
# instructions for the Session and alter them for specific Participants or Trials.
|
||||||
#
|
#
|
||||||
# If you wish to overwrite a parameter for a specific trial or participant,
|
# If you wish to overwrite a parameter for a specific trial or participant,
|
||||||
# edit its Config.toml file by uncommenting its key (e.g., [project])
|
# edit its Config.toml file by uncommenting its key (e.g., [project])
|
||||||
# and editing its value (e.g., frame_range = [10,300]). Or else, uncomment
|
# and editing its value (e.g., frame_range = [10,300]). Or else, uncomment
|
||||||
# [filtering.butterworth] and set cut_off_frequency = 10, etc.
|
# [filtering.butterworth] and set cut_off_frequency = 10, etc.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# [project]
|
# [project]
|
||||||
# multi_person = false # true for trials with multiple participants. If false, only the main person in scene is analyzed (and it run much faster).
|
# multi_person = true # If false, only the main person in scene is analyzed.
|
||||||
# nb_persons_to_detect = 2 # checked only if multi_person is selected
|
# frame_rate = 60 # fps
|
||||||
# frame_rate = 60 # FPS
|
|
||||||
# frame_range = [] # For example [10,300], or [] for all frames
|
# frame_range = [] # For example [10,300], or [] for all frames
|
||||||
## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate
|
## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate
|
||||||
## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate,
|
## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate,
|
||||||
## frame_range = [0.1, 2.0]*frame_rate = [6, 120]
|
## frame_range = [0.1, 2.0]*frame_rate = [6, 120]
|
||||||
|
|
||||||
# exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['<participant_dir/trial_dir>', 'etc'].
|
# exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['<participant_dir/trial_dir>', 'etc'].
|
||||||
# e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial']
|
## e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial']
|
||||||
## Take heart, calibration is not that complicated once you get the hang of it!
|
## Take heart, calibration is not that complicated once you get the hang of it!
|
||||||
|
|
||||||
|
|
||||||
# [calibration]
|
|
||||||
# calibration_type = 'convert' # 'convert' or 'calculate'
|
|
||||||
|
|
||||||
# [calibration.convert]
|
|
||||||
# convert_from = 'qualisys' # 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', or 'biocv'
|
|
||||||
# [calibration.convert.qualisys]
|
|
||||||
# binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2
|
|
||||||
# [calibration.convert.optitrack] # See readme for instructions
|
|
||||||
# [calibration.convert.vicon] # No parameter needed
|
|
||||||
# [calibration.convert.opencap] # No parameter needed
|
|
||||||
# [calibration.convert.easymocap] # No parameter needed
|
|
||||||
# [calibration.convert.biocv] # No parameter needed
|
|
||||||
# [calibration.convert.anipose] # No parameter needed
|
|
||||||
# [calibration.convert.freemocap] # No parameter needed
|
|
||||||
|
|
||||||
|
|
||||||
# [calibration.calculate]
|
|
||||||
## Camera properties, theoretically need to be calculated only once in a camera lifetime
|
|
||||||
# [calibration.calculate.intrinsics]
|
|
||||||
# overwrite_intrinsics = false # overwrite (or not) if they have already been calculated?
|
|
||||||
# show_detection_intrinsics = true # true or false (lowercase)
|
|
||||||
# intrinsics_extension = 'jpg' # any video or image extension
|
|
||||||
# extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 )
|
|
||||||
# intrinsics_corners_nb = [4,7]
|
|
||||||
# intrinsics_square_size = 60 # mm
|
|
||||||
|
|
||||||
## Camera placements, need to be done before every session
|
|
||||||
# [calibration.calculate.extrinsics]
|
|
||||||
# extrinsics_method = 'scene' # 'board', 'scene', 'keypoints'
|
|
||||||
## 'board' should be large enough to be detected when laid on the floor. Not recommended.
|
|
||||||
## 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out.
|
|
||||||
## 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras.
|
|
||||||
|
|
||||||
# moving_cameras = false # Not implemented yet
|
|
||||||
# calculate_extrinsics = true # true or false (lowercase)
|
|
||||||
|
|
||||||
# [calibration.calculate.extrinsics.board]
|
|
||||||
# show_reprojection_error = true # true or false (lowercase)
|
|
||||||
# extrinsics_extension = 'png' # any video or image extension
|
|
||||||
# extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h]
|
|
||||||
# extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle
|
|
||||||
|
|
||||||
# [calibration.calculate.extrinsics.scene]
|
|
||||||
# show_reprojection_error = true # true or false (lowercase)
|
|
||||||
# extrinsics_extension = 'png' # any video or image extension
|
|
||||||
## list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane.
|
|
||||||
## in m -> unlike for intrinsics, NOT in mm!
|
|
||||||
# object_coords_3d = [[-2.0, 0.3, 0.0],
|
|
||||||
# [-2.0 , 0.0, 0.0],
|
|
||||||
# [-2.0, 0.0, 0.05],
|
|
||||||
# [-2.0, -0.3 , 0.0],
|
|
||||||
# [0.0, 0.3, 0.0],
|
|
||||||
# [0.0, 0.0, 0.0],
|
|
||||||
# [0.0, 0.0, 0.05],
|
|
||||||
# [0.0, -0.3, 0.0]]
|
|
||||||
|
|
||||||
# [calibration.calculate.extrinsics.keypoints]
|
|
||||||
## Coming soon!
|
|
||||||
|
|
||||||
|
|
||||||
# [pose]
|
# [pose]
|
||||||
# pose_framework = 'openpose' # 'openpose', 'mediapipe', 'alphapose', 'deeplabcut'
|
# pose_framework = 'openpose' # 'openpose', 'mediapipe', 'alphapose', 'deeplabcut'
|
||||||
# pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII,
|
# pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII
|
||||||
# #With mediapipe: BLAZEPOSE.
|
# #With mediapipe: BLAZEPOSE.
|
||||||
# #With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133.
|
# #With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133.
|
||||||
# #With deeplabcut: CUSTOM. See example at the end of the file.
|
# #With deeplabcut: CUSTOM. See example at the end of the file.
|
||||||
@ -103,33 +41,93 @@
|
|||||||
|
|
||||||
|
|
||||||
# [synchronization]
|
# [synchronization]
|
||||||
## COMING SOON!
|
# display_corr = true # true or false (lowercase)
|
||||||
# reset_sync = true # Recalculate synchronization even if already done
|
# reset_sync = true # Recalculate synchronization even if already done
|
||||||
# frames = [2850,3490] # Frames to use for synchronization, should point to a moment with fast motion.
|
# # id_kpt = [10] # keypoint ID, to be found in skeleton.py. Example RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7
|
||||||
# cut_off_frequency = 10 # cut-off frequency for a 4th order low-pass Butterworth filter
|
# # weights_kpt = [1] # Only taken into account if you have several keypoints (Currently only one keypoint is supported).
|
||||||
## Vertical speeds (on X, Y, or Z axis, or 2D speeds)
|
# sync_frame_range = [] # For example [0,150], or [] for all frames (default)
|
||||||
# speed_kind = 'y' # 'x', 'y', 'z', or '2D'
|
# # limit synchronization search (to the beginning or to the end of the capture for example)
|
||||||
# vmax = 20 # px/s
|
|
||||||
# cam1_nb = 4
|
|
||||||
# cam2_nb = 3
|
# [calibration]
|
||||||
# id_kpt = [9,10] # Pour plus tard aller chercher numéro depuis keypoint name dans skeleton.py. 'RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7
|
# calibration_type = 'convert' # 'convert' or 'calculate'
|
||||||
# weights_kpt = [1,1] # Pris en compte uniquement si on a plusieurs keypoints
|
|
||||||
|
# [calibration.convert]
|
||||||
|
# convert_from = 'qualisys' # 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', 'biocv', 'anipose', or 'freemocap'
|
||||||
|
# [calibration.convert.qualisys]
|
||||||
|
# binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2
|
||||||
|
# [calibration.convert.optitrack] # See readme for instructions
|
||||||
|
# [calibration.convert.vicon] # No parameter needed
|
||||||
|
# [calibration.convert.opencap] # No parameter needed
|
||||||
|
# [calibration.convert.easymocap] # No parameter needed
|
||||||
|
# [calibration.convert.biocv] # No parameter needed
|
||||||
|
# [calibration.convert.anipose] # No parameter needed
|
||||||
|
# [calibration.convert.freemocap] # No parameter needed
|
||||||
|
|
||||||
|
|
||||||
|
# [calibration.calculate]
|
||||||
|
# # Camera properties, theoretically need to be calculated only once in a camera lifetime
|
||||||
|
# [calibration.calculate.intrinsics]
|
||||||
|
# overwrite_intrinsics = false # overwrite (or not) if they have already been calculated?
|
||||||
|
# show_detection_intrinsics = true # true or false (lowercase)
|
||||||
|
# intrinsics_extension = 'jpg' # any video or image extension
|
||||||
|
# extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 )
|
||||||
|
# intrinsics_corners_nb = [4,7]
|
||||||
|
# intrinsics_square_size = 60 # mm
|
||||||
|
|
||||||
|
# # Camera placements, need to be done before every session
|
||||||
|
# [calibration.calculate.extrinsics]
|
||||||
|
# calculate_extrinsics = true # true or false (lowercase)
|
||||||
|
# extrinsics_method = 'scene' # 'board', 'scene', 'keypoints'
|
||||||
|
# # 'board' should be large enough to be detected when laid on the floor. Not recommended.
|
||||||
|
# # 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out.
|
||||||
|
# # 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras.
|
||||||
|
# moving_cameras = false # Not implemented yet
|
||||||
|
|
||||||
|
# [calibration.calculate.extrinsics.board]
|
||||||
|
# show_reprojection_error = true # true or false (lowercase)
|
||||||
|
# extrinsics_extension = 'png' # any video or image extension
|
||||||
|
# extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h]
|
||||||
|
# extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle
|
||||||
|
|
||||||
|
# [calibration.calculate.extrinsics.scene]
|
||||||
|
# show_reprojection_error = true # true or false (lowercase)
|
||||||
|
# extrinsics_extension = 'png' # any video or image extension
|
||||||
|
# # list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane.
|
||||||
|
# # in m -> unlike for intrinsics, NOT in mm!
|
||||||
|
# object_coords_3d = [[-2.0, 0.3, 0.0],
|
||||||
|
# [-2.0 , 0.0, 0.0],
|
||||||
|
# [-2.0, 0.0, 0.05],
|
||||||
|
# [-2.0, -0.3 , 0.0],
|
||||||
|
# [0.0, 0.3, 0.0],
|
||||||
|
# [0.0, 0.0, 0.0],
|
||||||
|
# [0.0, 0.0, 0.05],
|
||||||
|
# [0.0, -0.3, 0.0]]
|
||||||
|
|
||||||
|
# [calibration.calculate.extrinsics.keypoints]
|
||||||
|
# # Coming soon!
|
||||||
|
|
||||||
|
|
||||||
# [personAssociation]
|
# [personAssociation]
|
||||||
# # tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
|
# likelihood_threshold_association = 0.3
|
||||||
## and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE)
|
|
||||||
# reproj_error_threshold_association = 20 # px
|
# [personAssociation.single_person]
|
||||||
# likelihood_threshold_association = 0.05
|
# reproj_error_threshold_association = 20 # px
|
||||||
|
# tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
|
||||||
|
# # and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE)
|
||||||
|
|
||||||
|
# [personAssociation.multi_person]
|
||||||
|
# reconstruction_error_threshold = 0.1 # 0.1 = 10 cm
|
||||||
|
# min_affinity = 0.2 # affinity below which a correspondence is ignored
|
||||||
|
|
||||||
|
|
||||||
# [triangulation]
|
# [triangulation]
|
||||||
# reorder_trc = false # only checked if multi_person analysis
|
# reorder_trc = false # only checked if multi_person analysis
|
||||||
# reproj_error_threshold_triangulation = 15 # px
|
# reproj_error_threshold_triangulation = 15 # px
|
||||||
# likelihood_threshold_triangulation= 0.05
|
# likelihood_threshold_triangulation= 0.3
|
||||||
# min_cameras_for_triangulation = 2
|
# min_cameras_for_triangulation = 2
|
||||||
# interpolation = 'cubic' #linear, slinear, quadratic, cubic, or none
|
# interpolation = 'cubic' #linear, slinear, quadratic, cubic, or none
|
||||||
## 'none' if you don't want to interpolate missing points
|
# # 'none' if you don't want to interpolate missing points
|
||||||
# interp_if_gap_smaller_than = 10 # do not interpolate bigger gaps
|
# interp_if_gap_smaller_than = 10 # do not interpolate bigger gaps
|
||||||
# show_interp_indices = true # true or false (lowercase). For each keypoint, return the frames that need to be interpolated
|
# show_interp_indices = true # true or false (lowercase). For each keypoint, return the frames that need to be interpolated
|
||||||
# handle_LR_swap = false # Better if few cameras (eg less than 4) with risk of limb swapping (eg camera facing sagittal plane), otherwise slightly less accurate and slower
|
# handle_LR_swap = false # Better if few cameras (eg less than 4) with risk of limb swapping (eg camera facing sagittal plane), otherwise slightly less accurate and slower
|
||||||
@ -141,38 +139,39 @@
|
|||||||
# type = 'butterworth' # butterworth, kalman, gaussian, LOESS, median, butterworth_on_speed
|
# type = 'butterworth' # butterworth, kalman, gaussian, LOESS, median, butterworth_on_speed
|
||||||
# display_figures = false # true or false (lowercase)
|
# display_figures = false # true or false (lowercase)
|
||||||
|
|
||||||
# [filtering.butterworth]
|
# [filtering.butterworth]
|
||||||
# order = 4
|
# order = 4
|
||||||
# cut_off_frequency = 6 # Hz
|
# cut_off_frequency = 6 # Hz
|
||||||
# [filtering.kalman]
|
# [filtering.kalman]
|
||||||
## How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)?
|
# # How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)?
|
||||||
# trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise
|
# trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise
|
||||||
# smooth = true # should be true, unless you need real-time filtering
|
# smooth = true # should be true, unless you need real-time filtering
|
||||||
# [filtering.butterworth_on_speed]
|
# [filtering.butterworth_on_speed]
|
||||||
# order = 4
|
# order = 4
|
||||||
# cut_off_frequency = 10 # Hz
|
# cut_off_frequency = 10 # Hz
|
||||||
# [filtering.gaussian]
|
# [filtering.gaussian]
|
||||||
# sigma_kernel = 2 #px
|
# sigma_kernel = 2 #px
|
||||||
# [filtering.LOESS]
|
# [filtering.LOESS]
|
||||||
# nb_values_used = 30 # = fraction of data used * nb frames
|
# nb_values_used = 30 # = fraction of data used * nb frames
|
||||||
# [filtering.median]
|
# [filtering.median]
|
||||||
# kernel_size = 9
|
# kernel_size = 9
|
||||||
|
|
||||||
|
|
||||||
# [markerAugmentation]
|
# [markerAugmentation]
|
||||||
# ## Only works on BODY_25 and BODY_25B models
|
## Only works on BODY_25 and BODY_25B models
|
||||||
# participant_height = [1.21, 1.72] # m # float if single person, list of float if multi-person (same order as the Static trials)
|
# participant_height = 1.72 # m # float if single person, list of float if multi-person (same order as the Static trials)
|
||||||
# participant_mass = [25.0, 70.0] # kg
|
# participant_mass = 70.0 # kg
|
||||||
|
|
||||||
|
|
||||||
# [opensim]
|
# [opensim]
|
||||||
# static_trial = ['S00_P00_Participant/S00_P00_T00_StaticTrial']
|
# static_trial = ['S00_P00_Participant/S00_P00_T00_StaticTrial']
|
||||||
# # If this Config.toml file is at the Trial level, set to true or false (lowercase);
|
# # If this Config.toml file is at the Trial level, set to true or false (lowercase);
|
||||||
# # At the Participant level, specify the name of the static trial folder name, e.g. ['S00_P00_T00_StaticTrial'];
|
# # At the Participant level, specify the name of the static trial folder name, e.g. ['S00_P00_T00_StaticTrial'];
|
||||||
# # At the Session level, add participant subdirectory, e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P01_Participant/S00_P00_T00_StaticTrial']
|
# # At the Session level, add participant subdirectory, e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P01_Participant/S00_P00_T00_StaticTrial']
|
||||||
# opensim_bin_path = 'C:\OpenSim 4.4\bin'
|
# opensim_bin_path = 'C:\OpenSim 4.4\bin'
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## CUSTOM skeleton, if you trained your own DeepLabCut model for example.
|
## CUSTOM skeleton, if you trained your own DeepLabCut model for example.
|
||||||
## Make sure the node ids correspond to the column numbers of the 2D pose file, starting from zero.
|
## Make sure the node ids correspond to the column numbers of the 2D pose file, starting from zero.
|
||||||
##
|
##
|
||||||
@ -188,65 +187,65 @@
|
|||||||
# name = "CHip"
|
# name = "CHip"
|
||||||
# id = "None"
|
# id = "None"
|
||||||
# [[pose.CUSTOM.children]]
|
# [[pose.CUSTOM.children]]
|
||||||
# id = 12
|
|
||||||
# name = "RHip"
|
# name = "RHip"
|
||||||
|
# id = 12
|
||||||
# [[pose.CUSTOM.children.children]]
|
# [[pose.CUSTOM.children.children]]
|
||||||
# id = 14
|
|
||||||
# name = "RKnee"
|
# name = "RKnee"
|
||||||
|
# id = 14
|
||||||
# [[pose.CUSTOM.children.children.children]]
|
# [[pose.CUSTOM.children.children.children]]
|
||||||
# id = 16
|
|
||||||
# name = "RAnkle"
|
# name = "RAnkle"
|
||||||
|
# id = 16
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 22
|
|
||||||
# name = "RBigToe"
|
# name = "RBigToe"
|
||||||
|
# id = 22
|
||||||
# [[pose.CUSTOM.children.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children.children]]
|
||||||
# id = 23
|
|
||||||
# name = "RSmallToe"
|
# name = "RSmallToe"
|
||||||
|
# id = 23
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 24
|
|
||||||
# name = "RHeel"
|
# name = "RHeel"
|
||||||
|
# id = 24
|
||||||
# [[pose.CUSTOM.children]]
|
# [[pose.CUSTOM.children]]
|
||||||
# id = 11
|
|
||||||
# name = "LHip"
|
# name = "LHip"
|
||||||
|
# id = 11
|
||||||
# [[pose.CUSTOM.children.children]]
|
# [[pose.CUSTOM.children.children]]
|
||||||
# id = 13
|
|
||||||
# name = "LKnee"
|
# name = "LKnee"
|
||||||
|
# id = 13
|
||||||
# [[pose.CUSTOM.children.children.children]]
|
# [[pose.CUSTOM.children.children.children]]
|
||||||
# id = 15
|
|
||||||
# name = "LAnkle"
|
# name = "LAnkle"
|
||||||
|
# id = 15
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 19
|
|
||||||
# name = "LBigToe"
|
# name = "LBigToe"
|
||||||
|
# id = 19
|
||||||
# [[pose.CUSTOM.children.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children.children]]
|
||||||
# id = 20
|
|
||||||
# name = "LSmallToe"
|
# name = "LSmallToe"
|
||||||
|
# id = 20
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 21
|
|
||||||
# name = "LHeel"
|
# name = "LHeel"
|
||||||
|
# id = 21
|
||||||
# [[pose.CUSTOM.children]]
|
# [[pose.CUSTOM.children]]
|
||||||
# id = 17
|
|
||||||
# name = "Neck"
|
# name = "Neck"
|
||||||
|
# id = 17
|
||||||
# [[pose.CUSTOM.children.children]]
|
# [[pose.CUSTOM.children.children]]
|
||||||
# id = 18
|
|
||||||
# name = "Head"
|
# name = "Head"
|
||||||
|
# id = 18
|
||||||
# [[pose.CUSTOM.children.children.children]]
|
# [[pose.CUSTOM.children.children.children]]
|
||||||
# id = 0
|
|
||||||
# name = "Nose"
|
# name = "Nose"
|
||||||
|
# id = 0
|
||||||
# [[pose.CUSTOM.children.children]]
|
# [[pose.CUSTOM.children.children]]
|
||||||
# id = 6
|
|
||||||
# name = "RShoulder"
|
# name = "RShoulder"
|
||||||
|
# id = 6
|
||||||
# [[pose.CUSTOM.children.children.children]]
|
# [[pose.CUSTOM.children.children.children]]
|
||||||
# id = 8
|
|
||||||
# name = "RElbow"
|
# name = "RElbow"
|
||||||
|
# id = 8
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 10
|
|
||||||
# name = "RWrist"
|
# name = "RWrist"
|
||||||
|
# id = 10
|
||||||
# [[pose.CUSTOM.children.children]]
|
# [[pose.CUSTOM.children.children]]
|
||||||
# id = 5
|
|
||||||
# name = "LShoulder"
|
# name = "LShoulder"
|
||||||
|
# id = 5
|
||||||
# [[pose.CUSTOM.children.children.children]]
|
# [[pose.CUSTOM.children.children.children]]
|
||||||
# id = 7
|
|
||||||
# name = "LElbow"
|
# name = "LElbow"
|
||||||
|
# id = 7
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 9
|
|
||||||
# name = "LWrist"
|
# name = "LWrist"
|
||||||
|
# id = 9
|
||||||
|
@ -9,91 +9,29 @@
|
|||||||
# If a parameter is not found here, Pose2Sim will look for its value in the
|
# If a parameter is not found here, Pose2Sim will look for its value in the
|
||||||
# Config.toml file of the level above. This way, you can set global
|
# Config.toml file of the level above. This way, you can set global
|
||||||
# instructions for the Session and alter them for specific Participants or Trials.
|
# instructions for the Session and alter them for specific Participants or Trials.
|
||||||
#
|
#
|
||||||
# If you wish to overwrite a parameter for a specific trial or participant,
|
# If you wish to overwrite a parameter for a specific trial or participant,
|
||||||
# edit its Config.toml file by uncommenting its key (e.g., [project])
|
# edit its Config.toml file by uncommenting its key (e.g., [project])
|
||||||
# and editing its value (e.g., frame_range = [10,300]). Or else, uncomment
|
# and editing its value (e.g., frame_range = [10,300]). Or else, uncomment
|
||||||
# [filtering.butterworth] and set cut_off_frequency = 10, etc.
|
# [filtering.butterworth] and set cut_off_frequency = 10, etc.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# [project]
|
# [project]
|
||||||
# multi_person = false # true for trials with multiple participants. If false, only the main person in scene is analyzed (and it run much faster).
|
# multi_person = true # If false, only the main person in scene is analyzed.
|
||||||
# nb_persons_to_detect = 2 # checked only if multi_person is selected
|
# frame_rate = 60 # fps
|
||||||
# frame_rate = 60 # FPS
|
|
||||||
# frame_range = [] # For example [10,300], or [] for all frames
|
# frame_range = [] # For example [10,300], or [] for all frames
|
||||||
## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate
|
## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate
|
||||||
## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate,
|
## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate,
|
||||||
## frame_range = [0.1, 2.0]*frame_rate = [6, 120]
|
## frame_range = [0.1, 2.0]*frame_rate = [6, 120]
|
||||||
|
|
||||||
# exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['<participant_dir/trial_dir>', 'etc'].
|
# exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['<participant_dir/trial_dir>', 'etc'].
|
||||||
# e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial']
|
## e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial']
|
||||||
|
|
||||||
|
|
||||||
## Take heart, calibration is not that complicated once you get the hang of it!
|
## Take heart, calibration is not that complicated once you get the hang of it!
|
||||||
# [calibration]
|
|
||||||
# calibration_type = 'convert' # 'convert' or 'calculate'
|
|
||||||
|
|
||||||
# [calibration.convert]
|
|
||||||
# convert_from = 'qualisys' # 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', or 'biocv'
|
|
||||||
# [calibration.convert.qualisys]
|
|
||||||
# binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2
|
|
||||||
# [calibration.convert.optitrack] # See readme for instructions
|
|
||||||
# [calibration.convert.vicon] # No parameter needed
|
|
||||||
# [calibration.convert.opencap] # No parameter needed
|
|
||||||
# [calibration.convert.easymocap] # No parameter needed
|
|
||||||
# [calibration.convert.biocv] # No parameter needed
|
|
||||||
# [calibration.convert.anipose] # No parameter needed
|
|
||||||
# [calibration.convert.freemocap] # No parameter needed
|
|
||||||
|
|
||||||
|
|
||||||
# [calibration.calculate]
|
|
||||||
## Camera properties, theoretically need to be calculated only once in a camera lifetime
|
|
||||||
# [calibration.calculate.intrinsics]
|
|
||||||
# overwrite_intrinsics = false # overwrite (or not) if they have already been calculated?
|
|
||||||
# show_detection_intrinsics = true # true or false (lowercase)
|
|
||||||
# intrinsics_extension = 'jpg' # any video or image extension
|
|
||||||
# extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 )
|
|
||||||
# intrinsics_corners_nb = [4,7]
|
|
||||||
# intrinsics_square_size = 60 # mm
|
|
||||||
|
|
||||||
## Camera placements, need to be done before every session
|
|
||||||
# [calibration.calculate.extrinsics]
|
|
||||||
# extrinsics_method = 'scene' # 'board', 'scene', 'keypoints'
|
|
||||||
## 'board' should be large enough to be detected when laid on the floor. Not recommended.
|
|
||||||
## 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out.
|
|
||||||
## 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras.
|
|
||||||
|
|
||||||
# moving_cameras = false # Not implemented yet
|
|
||||||
# calculate_extrinsics = true # true or false (lowercase)
|
|
||||||
|
|
||||||
# [calibration.calculate.extrinsics.board]
|
|
||||||
# show_reprojection_error = true # true or false (lowercase)
|
|
||||||
# extrinsics_extension = 'png' # any video or image extension
|
|
||||||
# extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h]
|
|
||||||
# extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle
|
|
||||||
|
|
||||||
# [calibration.calculate.extrinsics.scene]
|
|
||||||
# show_reprojection_error = true # true or false (lowercase)
|
|
||||||
# extrinsics_extension = 'png' # any video or image extension
|
|
||||||
## list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane.
|
|
||||||
## in m -> unlike for intrinsics, NOT in mm!
|
|
||||||
# object_coords_3d = [[-2.0, 0.3, 0.0],
|
|
||||||
# [-2.0 , 0.0, 0.0],
|
|
||||||
# [-2.0, 0.0, 0.05],
|
|
||||||
# [-2.0, -0.3 , 0.0],
|
|
||||||
# [0.0, 0.3, 0.0],
|
|
||||||
# [0.0, 0.0, 0.0],
|
|
||||||
# [0.0, 0.0, 0.05],
|
|
||||||
# [0.0, -0.3, 0.0]]
|
|
||||||
|
|
||||||
# [calibration.calculate.extrinsics.keypoints]
|
|
||||||
## Coming soon!
|
|
||||||
|
|
||||||
|
|
||||||
# [pose]
|
# [pose]
|
||||||
# pose_framework = 'openpose' # 'openpose', 'mediapipe', 'alphapose', 'deeplabcut'
|
# pose_framework = 'openpose' # 'openpose', 'mediapipe', 'alphapose', 'deeplabcut'
|
||||||
# pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII,
|
# pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII
|
||||||
# #With mediapipe: BLAZEPOSE.
|
# #With mediapipe: BLAZEPOSE.
|
||||||
# #With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133.
|
# #With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133.
|
||||||
# #With deeplabcut: CUSTOM. See example at the end of the file.
|
# #With deeplabcut: CUSTOM. See example at the end of the file.
|
||||||
@ -103,33 +41,93 @@
|
|||||||
|
|
||||||
|
|
||||||
# [synchronization]
|
# [synchronization]
|
||||||
## COMING SOON!
|
# display_corr = true # true or false (lowercase)
|
||||||
# reset_sync = true # Recalculate synchronization even if already done
|
# reset_sync = true # Recalculate synchronization even if already done
|
||||||
# frames = [2850,3490] # Frames to use for synchronization, should point to a moment with fast motion.
|
# # id_kpt = [10] # keypoint ID, to be found in skeleton.py. Example RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7
|
||||||
# cut_off_frequency = 10 # cut-off frequency for a 4th order low-pass Butterworth filter
|
# # weights_kpt = [1] # Only taken into account if you have several keypoints (Currently only one keypoint is supported).
|
||||||
## Vertical speeds (on X, Y, or Z axis, or 2D speeds)
|
# sync_frame_range = [] # For example [0,150], or [] for all frames (default)
|
||||||
# speed_kind = 'y' # 'x', 'y', 'z', or '2D'
|
# # limit synchronization search (to the beginning or to the end of the capture for example)
|
||||||
# vmax = 20 # px/s
|
|
||||||
# cam1_nb = 4
|
|
||||||
# cam2_nb = 3
|
# [calibration]
|
||||||
# id_kpt = [9,10] # Pour plus tard aller chercher numéro depuis keypoint name dans skeleton.py. 'RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7
|
# calibration_type = 'convert' # 'convert' or 'calculate'
|
||||||
# weights_kpt = [1,1] # Pris en compte uniquement si on a plusieurs keypoints
|
|
||||||
|
# [calibration.convert]
|
||||||
|
# convert_from = 'qualisys' # 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', 'biocv', 'anipose', or 'freemocap'
|
||||||
|
# [calibration.convert.qualisys]
|
||||||
|
# binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2
|
||||||
|
# [calibration.convert.optitrack] # See readme for instructions
|
||||||
|
# [calibration.convert.vicon] # No parameter needed
|
||||||
|
# [calibration.convert.opencap] # No parameter needed
|
||||||
|
# [calibration.convert.easymocap] # No parameter needed
|
||||||
|
# [calibration.convert.biocv] # No parameter needed
|
||||||
|
# [calibration.convert.anipose] # No parameter needed
|
||||||
|
# [calibration.convert.freemocap] # No parameter needed
|
||||||
|
|
||||||
|
|
||||||
|
# [calibration.calculate]
|
||||||
|
# # Camera properties, theoretically need to be calculated only once in a camera lifetime
|
||||||
|
# [calibration.calculate.intrinsics]
|
||||||
|
# overwrite_intrinsics = false # overwrite (or not) if they have already been calculated?
|
||||||
|
# show_detection_intrinsics = true # true or false (lowercase)
|
||||||
|
# intrinsics_extension = 'jpg' # any video or image extension
|
||||||
|
# extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 )
|
||||||
|
# intrinsics_corners_nb = [4,7]
|
||||||
|
# intrinsics_square_size = 60 # mm
|
||||||
|
|
||||||
|
# # Camera placements, need to be done before every session
|
||||||
|
# [calibration.calculate.extrinsics]
|
||||||
|
# calculate_extrinsics = true # true or false (lowercase)
|
||||||
|
# extrinsics_method = 'scene' # 'board', 'scene', 'keypoints'
|
||||||
|
# # 'board' should be large enough to be detected when laid on the floor. Not recommended.
|
||||||
|
# # 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out.
|
||||||
|
# # 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras.
|
||||||
|
# moving_cameras = false # Not implemented yet
|
||||||
|
|
||||||
|
# [calibration.calculate.extrinsics.board]
|
||||||
|
# show_reprojection_error = true # true or false (lowercase)
|
||||||
|
# extrinsics_extension = 'png' # any video or image extension
|
||||||
|
# extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h]
|
||||||
|
# extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle
|
||||||
|
|
||||||
|
# [calibration.calculate.extrinsics.scene]
|
||||||
|
# show_reprojection_error = true # true or false (lowercase)
|
||||||
|
# extrinsics_extension = 'png' # any video or image extension
|
||||||
|
# # list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane.
|
||||||
|
# # in m -> unlike for intrinsics, NOT in mm!
|
||||||
|
# object_coords_3d = [[-2.0, 0.3, 0.0],
|
||||||
|
# [-2.0 , 0.0, 0.0],
|
||||||
|
# [-2.0, 0.0, 0.05],
|
||||||
|
# [-2.0, -0.3 , 0.0],
|
||||||
|
# [0.0, 0.3, 0.0],
|
||||||
|
# [0.0, 0.0, 0.0],
|
||||||
|
# [0.0, 0.0, 0.05],
|
||||||
|
# [0.0, -0.3, 0.0]]
|
||||||
|
|
||||||
|
# [calibration.calculate.extrinsics.keypoints]
|
||||||
|
# # Coming soon!
|
||||||
|
|
||||||
|
|
||||||
# [personAssociation]
|
# [personAssociation]
|
||||||
# # tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
|
# likelihood_threshold_association = 0.3
|
||||||
## and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE)
|
|
||||||
# reproj_error_threshold_association = 20 # px
|
# [personAssociation.single_person]
|
||||||
# likelihood_threshold_association = 0.05
|
# reproj_error_threshold_association = 20 # px
|
||||||
|
# tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
|
||||||
|
# # and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE)
|
||||||
|
|
||||||
|
# [personAssociation.multi_person]
|
||||||
|
# reconstruction_error_threshold = 0.1 # 0.1 = 10 cm
|
||||||
|
# min_affinity = 0.2 # affinity below which a correspondence is ignored
|
||||||
|
|
||||||
|
|
||||||
# [triangulation]
|
# [triangulation]
|
||||||
# reorder_trc = false # only checked if multi_person analysis
|
# reorder_trc = false # only checked if multi_person analysis
|
||||||
# reproj_error_threshold_triangulation = 15 # px
|
# reproj_error_threshold_triangulation = 15 # px
|
||||||
# likelihood_threshold_triangulation= 0.05
|
# likelihood_threshold_triangulation= 0.3
|
||||||
# min_cameras_for_triangulation = 2
|
# min_cameras_for_triangulation = 2
|
||||||
# interpolation = 'cubic' #linear, slinear, quadratic, cubic, or none
|
# interpolation = 'cubic' #linear, slinear, quadratic, cubic, or none
|
||||||
## 'none' if you don't want to interpolate missing points
|
# # 'none' if you don't want to interpolate missing points
|
||||||
# interp_if_gap_smaller_than = 10 # do not interpolate bigger gaps
|
# interp_if_gap_smaller_than = 10 # do not interpolate bigger gaps
|
||||||
# show_interp_indices = true # true or false (lowercase). For each keypoint, return the frames that need to be interpolated
|
# show_interp_indices = true # true or false (lowercase). For each keypoint, return the frames that need to be interpolated
|
||||||
# handle_LR_swap = false # Better if few cameras (eg less than 4) with risk of limb swapping (eg camera facing sagittal plane), otherwise slightly less accurate and slower
|
# handle_LR_swap = false # Better if few cameras (eg less than 4) with risk of limb swapping (eg camera facing sagittal plane), otherwise slightly less accurate and slower
|
||||||
@ -141,38 +139,39 @@
|
|||||||
# type = 'butterworth' # butterworth, kalman, gaussian, LOESS, median, butterworth_on_speed
|
# type = 'butterworth' # butterworth, kalman, gaussian, LOESS, median, butterworth_on_speed
|
||||||
# display_figures = false # true or false (lowercase)
|
# display_figures = false # true or false (lowercase)
|
||||||
|
|
||||||
# [filtering.butterworth]
|
# [filtering.butterworth]
|
||||||
# order = 4
|
# order = 4
|
||||||
# cut_off_frequency = 6 # Hz
|
# cut_off_frequency = 6 # Hz
|
||||||
# [filtering.kalman]
|
# [filtering.kalman]
|
||||||
## How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)?
|
# # How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)?
|
||||||
# trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise
|
# trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise
|
||||||
# smooth = true # should be true, unless you need real-time filtering
|
# smooth = true # should be true, unless you need real-time filtering
|
||||||
# [filtering.butterworth_on_speed]
|
# [filtering.butterworth_on_speed]
|
||||||
# order = 4
|
# order = 4
|
||||||
# cut_off_frequency = 10 # Hz
|
# cut_off_frequency = 10 # Hz
|
||||||
# [filtering.gaussian]
|
# [filtering.gaussian]
|
||||||
# sigma_kernel = 2 #px
|
# sigma_kernel = 2 #px
|
||||||
# [filtering.LOESS]
|
# [filtering.LOESS]
|
||||||
# nb_values_used = 30 # = fraction of data used * nb frames
|
# nb_values_used = 30 # = fraction of data used * nb frames
|
||||||
# [filtering.median]
|
# [filtering.median]
|
||||||
# kernel_size = 9
|
# kernel_size = 9
|
||||||
|
|
||||||
|
|
||||||
[markerAugmentation]
|
[markerAugmentation]
|
||||||
# ## Only works on BODY_25 and BODY_25B models
|
## Only works on BODY_25 and BODY_25B models
|
||||||
participant_height = 1.21 # m # float if single person, list of float if multi-person (same order as the Static trials)
|
participant_height = 1.21 # m # float if single person, list of float if multi-person (same order as the Static trials)
|
||||||
participant_mass = 25.0 # kg
|
participant_mass = 25.0 # kg
|
||||||
|
|
||||||
|
|
||||||
# [opensim]
|
# [opensim]
|
||||||
# static_trial = ['S00_P00_Participant/S00_P00_T00_StaticTrial']
|
# static_trial = ['S00_P00_Participant/S00_P00_T00_StaticTrial']
|
||||||
# # If this Config.toml file is at the Trial level, set to true or false (lowercase);
|
# # If this Config.toml file is at the Trial level, set to true or false (lowercase);
|
||||||
# # At the Participant level, specify the name of the static trial folder name, e.g. ['S00_P00_T00_StaticTrial'];
|
# # At the Participant level, specify the name of the static trial folder name, e.g. ['S00_P00_T00_StaticTrial'];
|
||||||
# # At the Session level, add participant subdirectory, e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P01_Participant/S00_P00_T00_StaticTrial']
|
# # At the Session level, add participant subdirectory, e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P01_Participant/S00_P00_T00_StaticTrial']
|
||||||
# opensim_bin_path = 'C:\OpenSim 4.4\bin'
|
# opensim_bin_path = 'C:\OpenSim 4.4\bin'
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## CUSTOM skeleton, if you trained your own DeepLabCut model for example.
|
## CUSTOM skeleton, if you trained your own DeepLabCut model for example.
|
||||||
## Make sure the node ids correspond to the column numbers of the 2D pose file, starting from zero.
|
## Make sure the node ids correspond to the column numbers of the 2D pose file, starting from zero.
|
||||||
##
|
##
|
||||||
@ -188,65 +187,65 @@ participant_mass = 25.0 # kg
|
|||||||
# name = "CHip"
|
# name = "CHip"
|
||||||
# id = "None"
|
# id = "None"
|
||||||
# [[pose.CUSTOM.children]]
|
# [[pose.CUSTOM.children]]
|
||||||
# id = 12
|
|
||||||
# name = "RHip"
|
# name = "RHip"
|
||||||
|
# id = 12
|
||||||
# [[pose.CUSTOM.children.children]]
|
# [[pose.CUSTOM.children.children]]
|
||||||
# id = 14
|
|
||||||
# name = "RKnee"
|
# name = "RKnee"
|
||||||
|
# id = 14
|
||||||
# [[pose.CUSTOM.children.children.children]]
|
# [[pose.CUSTOM.children.children.children]]
|
||||||
# id = 16
|
|
||||||
# name = "RAnkle"
|
# name = "RAnkle"
|
||||||
|
# id = 16
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 22
|
|
||||||
# name = "RBigToe"
|
# name = "RBigToe"
|
||||||
|
# id = 22
|
||||||
# [[pose.CUSTOM.children.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children.children]]
|
||||||
# id = 23
|
|
||||||
# name = "RSmallToe"
|
# name = "RSmallToe"
|
||||||
|
# id = 23
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 24
|
|
||||||
# name = "RHeel"
|
# name = "RHeel"
|
||||||
|
# id = 24
|
||||||
# [[pose.CUSTOM.children]]
|
# [[pose.CUSTOM.children]]
|
||||||
# id = 11
|
|
||||||
# name = "LHip"
|
# name = "LHip"
|
||||||
|
# id = 11
|
||||||
# [[pose.CUSTOM.children.children]]
|
# [[pose.CUSTOM.children.children]]
|
||||||
# id = 13
|
|
||||||
# name = "LKnee"
|
# name = "LKnee"
|
||||||
|
# id = 13
|
||||||
# [[pose.CUSTOM.children.children.children]]
|
# [[pose.CUSTOM.children.children.children]]
|
||||||
# id = 15
|
|
||||||
# name = "LAnkle"
|
# name = "LAnkle"
|
||||||
|
# id = 15
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 19
|
|
||||||
# name = "LBigToe"
|
# name = "LBigToe"
|
||||||
|
# id = 19
|
||||||
# [[pose.CUSTOM.children.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children.children]]
|
||||||
# id = 20
|
|
||||||
# name = "LSmallToe"
|
# name = "LSmallToe"
|
||||||
|
# id = 20
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 21
|
|
||||||
# name = "LHeel"
|
# name = "LHeel"
|
||||||
|
# id = 21
|
||||||
# [[pose.CUSTOM.children]]
|
# [[pose.CUSTOM.children]]
|
||||||
# id = 17
|
|
||||||
# name = "Neck"
|
# name = "Neck"
|
||||||
|
# id = 17
|
||||||
# [[pose.CUSTOM.children.children]]
|
# [[pose.CUSTOM.children.children]]
|
||||||
# id = 18
|
|
||||||
# name = "Head"
|
# name = "Head"
|
||||||
|
# id = 18
|
||||||
# [[pose.CUSTOM.children.children.children]]
|
# [[pose.CUSTOM.children.children.children]]
|
||||||
# id = 0
|
|
||||||
# name = "Nose"
|
# name = "Nose"
|
||||||
|
# id = 0
|
||||||
# [[pose.CUSTOM.children.children]]
|
# [[pose.CUSTOM.children.children]]
|
||||||
# id = 6
|
|
||||||
# name = "RShoulder"
|
# name = "RShoulder"
|
||||||
|
# id = 6
|
||||||
# [[pose.CUSTOM.children.children.children]]
|
# [[pose.CUSTOM.children.children.children]]
|
||||||
# id = 8
|
|
||||||
# name = "RElbow"
|
# name = "RElbow"
|
||||||
|
# id = 8
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 10
|
|
||||||
# name = "RWrist"
|
# name = "RWrist"
|
||||||
|
# id = 10
|
||||||
# [[pose.CUSTOM.children.children]]
|
# [[pose.CUSTOM.children.children]]
|
||||||
# id = 5
|
|
||||||
# name = "LShoulder"
|
# name = "LShoulder"
|
||||||
|
# id = 5
|
||||||
# [[pose.CUSTOM.children.children.children]]
|
# [[pose.CUSTOM.children.children.children]]
|
||||||
# id = 7
|
|
||||||
# name = "LElbow"
|
# name = "LElbow"
|
||||||
|
# id = 7
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 9
|
|
||||||
# name = "LWrist"
|
# name = "LWrist"
|
||||||
|
# id = 9
|
||||||
|
@ -9,91 +9,29 @@
|
|||||||
# If a parameter is not found here, Pose2Sim will look for its value in the
|
# If a parameter is not found here, Pose2Sim will look for its value in the
|
||||||
# Config.toml file of the level above. This way, you can set global
|
# Config.toml file of the level above. This way, you can set global
|
||||||
# instructions for the Session and alter them for specific Participants or Trials.
|
# instructions for the Session and alter them for specific Participants or Trials.
|
||||||
#
|
#
|
||||||
# If you wish to overwrite a parameter for a specific trial or participant,
|
# If you wish to overwrite a parameter for a specific trial or participant,
|
||||||
# edit its Config.toml file by uncommenting its key (e.g., [project])
|
# edit its Config.toml file by uncommenting its key (e.g., [project])
|
||||||
# and editing its value (e.g., frame_range = [10,300]). Or else, uncomment
|
# and editing its value (e.g., frame_range = [10,300]). Or else, uncomment
|
||||||
# [filtering.butterworth] and set cut_off_frequency = 10, etc.
|
# [filtering.butterworth] and set cut_off_frequency = 10, etc.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# [project]
|
# [project]
|
||||||
# multi_person = false # true for trials with multiple participants. If false, only the main person in scene is analyzed (and it run much faster).
|
# multi_person = true # If false, only the main person in scene is analyzed.
|
||||||
# nb_persons_to_detect = 2 # checked only if multi_person is selected
|
# frame_rate = 60 # fps
|
||||||
# frame_rate = 60 # FPS
|
|
||||||
# frame_range = [] # For example [10,300], or [] for all frames
|
# frame_range = [] # For example [10,300], or [] for all frames
|
||||||
## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate
|
## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate
|
||||||
## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate,
|
## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate,
|
||||||
## frame_range = [0.1, 2.0]*frame_rate = [6, 120]
|
## frame_range = [0.1, 2.0]*frame_rate = [6, 120]
|
||||||
|
|
||||||
# exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['<participant_dir/trial_dir>', 'etc'].
|
# exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['<participant_dir/trial_dir>', 'etc'].
|
||||||
# e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial']
|
## e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial']
|
||||||
|
|
||||||
|
|
||||||
## Take heart, calibration is not that complicated once you get the hang of it!
|
## Take heart, calibration is not that complicated once you get the hang of it!
|
||||||
# [calibration]
|
|
||||||
# calibration_type = 'convert' # 'convert' or 'calculate'
|
|
||||||
|
|
||||||
# [calibration.convert]
|
|
||||||
# convert_from = 'qualisys' # 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', or 'biocv'
|
|
||||||
# [calibration.convert.qualisys]
|
|
||||||
# binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2
|
|
||||||
# [calibration.convert.optitrack] # See readme for instructions
|
|
||||||
# [calibration.convert.vicon] # No parameter needed
|
|
||||||
# [calibration.convert.opencap] # No parameter needed
|
|
||||||
# [calibration.convert.easymocap] # No parameter needed
|
|
||||||
# [calibration.convert.biocv] # No parameter needed
|
|
||||||
# [calibration.convert.anipose] # No parameter needed
|
|
||||||
# [calibration.convert.freemocap] # No parameter needed
|
|
||||||
|
|
||||||
|
|
||||||
# [calibration.calculate]
|
|
||||||
## Camera properties, theoretically need to be calculated only once in a camera lifetime
|
|
||||||
# [calibration.calculate.intrinsics]
|
|
||||||
# overwrite_intrinsics = false # overwrite (or not) if they have already been calculated?
|
|
||||||
# show_detection_intrinsics = true # true or false (lowercase)
|
|
||||||
# intrinsics_extension = 'jpg' # any video or image extension
|
|
||||||
# extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 )
|
|
||||||
# intrinsics_corners_nb = [4,7]
|
|
||||||
# intrinsics_square_size = 60 # mm
|
|
||||||
|
|
||||||
## Camera placements, need to be done before every session
|
|
||||||
# [calibration.calculate.extrinsics]
|
|
||||||
# extrinsics_method = 'scene' # 'board', 'scene', 'keypoints'
|
|
||||||
## 'board' should be large enough to be detected when laid on the floor. Not recommended.
|
|
||||||
## 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out.
|
|
||||||
## 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras.
|
|
||||||
|
|
||||||
# moving_cameras = false # Not implemented yet
|
|
||||||
# calculate_extrinsics = true # true or false (lowercase)
|
|
||||||
|
|
||||||
# [calibration.calculate.extrinsics.board]
|
|
||||||
# show_reprojection_error = true # true or false (lowercase)
|
|
||||||
# extrinsics_extension = 'png' # any video or image extension
|
|
||||||
# extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h]
|
|
||||||
# extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle
|
|
||||||
|
|
||||||
# [calibration.calculate.extrinsics.scene]
|
|
||||||
# show_reprojection_error = true # true or false (lowercase)
|
|
||||||
# extrinsics_extension = 'png' # any video or image extension
|
|
||||||
## list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane.
|
|
||||||
## in m -> unlike for intrinsics, NOT in mm!
|
|
||||||
# object_coords_3d = [[-2.0, 0.3, 0.0],
|
|
||||||
# [-2.0 , 0.0, 0.0],
|
|
||||||
# [-2.0, 0.0, 0.05],
|
|
||||||
# [-2.0, -0.3 , 0.0],
|
|
||||||
# [0.0, 0.3, 0.0],
|
|
||||||
# [0.0, 0.0, 0.0],
|
|
||||||
# [0.0, 0.0, 0.05],
|
|
||||||
# [0.0, -0.3, 0.0]]
|
|
||||||
|
|
||||||
# [calibration.calculate.extrinsics.keypoints]
|
|
||||||
## Coming soon!
|
|
||||||
|
|
||||||
|
|
||||||
# [pose]
|
# [pose]
|
||||||
# pose_framework = 'openpose' # 'openpose', 'mediapipe', 'alphapose', 'deeplabcut'
|
# pose_framework = 'openpose' # 'openpose', 'mediapipe', 'alphapose', 'deeplabcut'
|
||||||
# pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII,
|
# pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII
|
||||||
# #With mediapipe: BLAZEPOSE.
|
# #With mediapipe: BLAZEPOSE.
|
||||||
# #With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133.
|
# #With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133.
|
||||||
# #With deeplabcut: CUSTOM. See example at the end of the file.
|
# #With deeplabcut: CUSTOM. See example at the end of the file.
|
||||||
@ -103,33 +41,93 @@
|
|||||||
|
|
||||||
|
|
||||||
# [synchronization]
|
# [synchronization]
|
||||||
## COMING SOON!
|
# display_corr = true # true or false (lowercase)
|
||||||
# reset_sync = true # Recalculate synchronization even if already done
|
# reset_sync = true # Recalculate synchronization even if already done
|
||||||
# frames = [2850,3490] # Frames to use for synchronization, should point to a moment with fast motion.
|
# # id_kpt = [10] # keypoint ID, to be found in skeleton.py. Example RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7
|
||||||
# cut_off_frequency = 10 # cut-off frequency for a 4th order low-pass Butterworth filter
|
# # weights_kpt = [1] # Only taken into account if you have several keypoints (Currently only one keypoint is supported).
|
||||||
## Vertical speeds (on X, Y, or Z axis, or 2D speeds)
|
# sync_frame_range = [] # For example [0,150], or [] for all frames (default)
|
||||||
# speed_kind = 'y' # 'x', 'y', 'z', or '2D'
|
# # limit synchronization search (to the beginning or to the end of the capture for example)
|
||||||
# vmax = 20 # px/s
|
|
||||||
# cam1_nb = 4
|
|
||||||
# cam2_nb = 3
|
# [calibration]
|
||||||
# id_kpt = [9,10] # Pour plus tard aller chercher numéro depuis keypoint name dans skeleton.py. 'RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7
|
# calibration_type = 'convert' # 'convert' or 'calculate'
|
||||||
# weights_kpt = [1,1] # Pris en compte uniquement si on a plusieurs keypoints
|
|
||||||
|
# [calibration.convert]
|
||||||
|
# convert_from = 'qualisys' # 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', 'biocv', 'anipose', or 'freemocap'
|
||||||
|
# [calibration.convert.qualisys]
|
||||||
|
# binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2
|
||||||
|
# [calibration.convert.optitrack] # See readme for instructions
|
||||||
|
# [calibration.convert.vicon] # No parameter needed
|
||||||
|
# [calibration.convert.opencap] # No parameter needed
|
||||||
|
# [calibration.convert.easymocap] # No parameter needed
|
||||||
|
# [calibration.convert.biocv] # No parameter needed
|
||||||
|
# [calibration.convert.anipose] # No parameter needed
|
||||||
|
# [calibration.convert.freemocap] # No parameter needed
|
||||||
|
|
||||||
|
|
||||||
|
# [calibration.calculate]
|
||||||
|
# # Camera properties, theoretically need to be calculated only once in a camera lifetime
|
||||||
|
# [calibration.calculate.intrinsics]
|
||||||
|
# overwrite_intrinsics = false # overwrite (or not) if they have already been calculated?
|
||||||
|
# show_detection_intrinsics = true # true or false (lowercase)
|
||||||
|
# intrinsics_extension = 'jpg' # any video or image extension
|
||||||
|
# extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 )
|
||||||
|
# intrinsics_corners_nb = [4,7]
|
||||||
|
# intrinsics_square_size = 60 # mm
|
||||||
|
|
||||||
|
# # Camera placements, need to be done before every session
|
||||||
|
# [calibration.calculate.extrinsics]
|
||||||
|
# calculate_extrinsics = true # true or false (lowercase)
|
||||||
|
# extrinsics_method = 'scene' # 'board', 'scene', 'keypoints'
|
||||||
|
# # 'board' should be large enough to be detected when laid on the floor. Not recommended.
|
||||||
|
# # 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out.
|
||||||
|
# # 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras.
|
||||||
|
# moving_cameras = false # Not implemented yet
|
||||||
|
|
||||||
|
# [calibration.calculate.extrinsics.board]
|
||||||
|
# show_reprojection_error = true # true or false (lowercase)
|
||||||
|
# extrinsics_extension = 'png' # any video or image extension
|
||||||
|
# extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h]
|
||||||
|
# extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle
|
||||||
|
|
||||||
|
# [calibration.calculate.extrinsics.scene]
|
||||||
|
# show_reprojection_error = true # true or false (lowercase)
|
||||||
|
# extrinsics_extension = 'png' # any video or image extension
|
||||||
|
# # list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane.
|
||||||
|
# # in m -> unlike for intrinsics, NOT in mm!
|
||||||
|
# object_coords_3d = [[-2.0, 0.3, 0.0],
|
||||||
|
# [-2.0 , 0.0, 0.0],
|
||||||
|
# [-2.0, 0.0, 0.05],
|
||||||
|
# [-2.0, -0.3 , 0.0],
|
||||||
|
# [0.0, 0.3, 0.0],
|
||||||
|
# [0.0, 0.0, 0.0],
|
||||||
|
# [0.0, 0.0, 0.05],
|
||||||
|
# [0.0, -0.3, 0.0]]
|
||||||
|
|
||||||
|
# [calibration.calculate.extrinsics.keypoints]
|
||||||
|
# # Coming soon!
|
||||||
|
|
||||||
|
|
||||||
# [personAssociation]
|
# [personAssociation]
|
||||||
# # tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
|
# likelihood_threshold_association = 0.3
|
||||||
## and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE)
|
|
||||||
# reproj_error_threshold_association = 20 # px
|
# [personAssociation.single_person]
|
||||||
# likelihood_threshold_association = 0.05
|
# reproj_error_threshold_association = 20 # px
|
||||||
|
# tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
|
||||||
|
# # and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE)
|
||||||
|
|
||||||
|
# [personAssociation.multi_person]
|
||||||
|
# reconstruction_error_threshold = 0.1 # 0.1 = 10 cm
|
||||||
|
# min_affinity = 0.2 # affinity below which a correspondence is ignored
|
||||||
|
|
||||||
|
|
||||||
# [triangulation]
|
# [triangulation]
|
||||||
# reorder_trc = false # only checked if multi_person analysis
|
# reorder_trc = false # only checked if multi_person analysis
|
||||||
# reproj_error_threshold_triangulation = 15 # px
|
# reproj_error_threshold_triangulation = 15 # px
|
||||||
# likelihood_threshold_triangulation= 0.05
|
# likelihood_threshold_triangulation= 0.3
|
||||||
# min_cameras_for_triangulation = 2
|
# min_cameras_for_triangulation = 2
|
||||||
# interpolation = 'cubic' #linear, slinear, quadratic, cubic, or none
|
# interpolation = 'cubic' #linear, slinear, quadratic, cubic, or none
|
||||||
## 'none' if you don't want to interpolate missing points
|
# # 'none' if you don't want to interpolate missing points
|
||||||
# interp_if_gap_smaller_than = 10 # do not interpolate bigger gaps
|
# interp_if_gap_smaller_than = 10 # do not interpolate bigger gaps
|
||||||
# show_interp_indices = true # true or false (lowercase). For each keypoint, return the frames that need to be interpolated
|
# show_interp_indices = true # true or false (lowercase). For each keypoint, return the frames that need to be interpolated
|
||||||
# handle_LR_swap = false # Better if few cameras (eg less than 4) with risk of limb swapping (eg camera facing sagittal plane), otherwise slightly less accurate and slower
|
# handle_LR_swap = false # Better if few cameras (eg less than 4) with risk of limb swapping (eg camera facing sagittal plane), otherwise slightly less accurate and slower
|
||||||
@ -141,38 +139,39 @@
|
|||||||
# type = 'butterworth' # butterworth, kalman, gaussian, LOESS, median, butterworth_on_speed
|
# type = 'butterworth' # butterworth, kalman, gaussian, LOESS, median, butterworth_on_speed
|
||||||
# display_figures = false # true or false (lowercase)
|
# display_figures = false # true or false (lowercase)
|
||||||
|
|
||||||
# [filtering.butterworth]
|
# [filtering.butterworth]
|
||||||
# order = 4
|
# order = 4
|
||||||
# cut_off_frequency = 6 # Hz
|
# cut_off_frequency = 6 # Hz
|
||||||
# [filtering.kalman]
|
# [filtering.kalman]
|
||||||
## How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)?
|
# # How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)?
|
||||||
# trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise
|
# trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise
|
||||||
# smooth = true # should be true, unless you need real-time filtering
|
# smooth = true # should be true, unless you need real-time filtering
|
||||||
# [filtering.butterworth_on_speed]
|
# [filtering.butterworth_on_speed]
|
||||||
# order = 4
|
# order = 4
|
||||||
# cut_off_frequency = 10 # Hz
|
# cut_off_frequency = 10 # Hz
|
||||||
# [filtering.gaussian]
|
# [filtering.gaussian]
|
||||||
# sigma_kernel = 2 #px
|
# sigma_kernel = 2 #px
|
||||||
# [filtering.LOESS]
|
# [filtering.LOESS]
|
||||||
# nb_values_used = 30 # = fraction of data used * nb frames
|
# nb_values_used = 30 # = fraction of data used * nb frames
|
||||||
# [filtering.median]
|
# [filtering.median]
|
||||||
# kernel_size = 9
|
# kernel_size = 9
|
||||||
|
|
||||||
|
|
||||||
[markerAugmentation]
|
[markerAugmentation]
|
||||||
# ## Only works on BODY_25 and BODY_25B models
|
## Only works on BODY_25 and BODY_25B models
|
||||||
participant_height = 1.72 # m # float if single person, list of float if multi-person (same order as the Static trials)
|
participant_height = 1.72 # m # float if single person, list of float if multi-person (same order as the Static trials)
|
||||||
participant_mass = 70.0 # kg
|
participant_mass = 70.0 # kg
|
||||||
|
|
||||||
|
|
||||||
# [opensim]
|
# [opensim]
|
||||||
# static_trial = ['S00_P00_Participant/S00_P00_T00_StaticTrial']
|
# static_trial = ['S00_P00_Participant/S00_P00_T00_StaticTrial']
|
||||||
# # If this Config.toml file is at the Trial level, set to true or false (lowercase);
|
# # If this Config.toml file is at the Trial level, set to true or false (lowercase);
|
||||||
# # At the Participant level, specify the name of the static trial folder name, e.g. ['S00_P00_T00_StaticTrial'];
|
# # At the Participant level, specify the name of the static trial folder name, e.g. ['S00_P00_T00_StaticTrial'];
|
||||||
# # At the Session level, add participant subdirectory, e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P01_Participant/S00_P00_T00_StaticTrial']
|
# # At the Session level, add participant subdirectory, e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P01_Participant/S00_P00_T00_StaticTrial']
|
||||||
# opensim_bin_path = 'C:\OpenSim 4.4\bin'
|
# opensim_bin_path = 'C:\OpenSim 4.4\bin'
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## CUSTOM skeleton, if you trained your own DeepLabCut model for example.
|
## CUSTOM skeleton, if you trained your own DeepLabCut model for example.
|
||||||
## Make sure the node ids correspond to the column numbers of the 2D pose file, starting from zero.
|
## Make sure the node ids correspond to the column numbers of the 2D pose file, starting from zero.
|
||||||
##
|
##
|
||||||
@ -188,65 +187,65 @@ participant_mass = 70.0 # kg
|
|||||||
# name = "CHip"
|
# name = "CHip"
|
||||||
# id = "None"
|
# id = "None"
|
||||||
# [[pose.CUSTOM.children]]
|
# [[pose.CUSTOM.children]]
|
||||||
# id = 12
|
|
||||||
# name = "RHip"
|
# name = "RHip"
|
||||||
|
# id = 12
|
||||||
# [[pose.CUSTOM.children.children]]
|
# [[pose.CUSTOM.children.children]]
|
||||||
# id = 14
|
|
||||||
# name = "RKnee"
|
# name = "RKnee"
|
||||||
|
# id = 14
|
||||||
# [[pose.CUSTOM.children.children.children]]
|
# [[pose.CUSTOM.children.children.children]]
|
||||||
# id = 16
|
|
||||||
# name = "RAnkle"
|
# name = "RAnkle"
|
||||||
|
# id = 16
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 22
|
|
||||||
# name = "RBigToe"
|
# name = "RBigToe"
|
||||||
|
# id = 22
|
||||||
# [[pose.CUSTOM.children.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children.children]]
|
||||||
# id = 23
|
|
||||||
# name = "RSmallToe"
|
# name = "RSmallToe"
|
||||||
|
# id = 23
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 24
|
|
||||||
# name = "RHeel"
|
# name = "RHeel"
|
||||||
|
# id = 24
|
||||||
# [[pose.CUSTOM.children]]
|
# [[pose.CUSTOM.children]]
|
||||||
# id = 11
|
|
||||||
# name = "LHip"
|
# name = "LHip"
|
||||||
|
# id = 11
|
||||||
# [[pose.CUSTOM.children.children]]
|
# [[pose.CUSTOM.children.children]]
|
||||||
# id = 13
|
|
||||||
# name = "LKnee"
|
# name = "LKnee"
|
||||||
|
# id = 13
|
||||||
# [[pose.CUSTOM.children.children.children]]
|
# [[pose.CUSTOM.children.children.children]]
|
||||||
# id = 15
|
|
||||||
# name = "LAnkle"
|
# name = "LAnkle"
|
||||||
|
# id = 15
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 19
|
|
||||||
# name = "LBigToe"
|
# name = "LBigToe"
|
||||||
|
# id = 19
|
||||||
# [[pose.CUSTOM.children.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children.children]]
|
||||||
# id = 20
|
|
||||||
# name = "LSmallToe"
|
# name = "LSmallToe"
|
||||||
|
# id = 20
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 21
|
|
||||||
# name = "LHeel"
|
# name = "LHeel"
|
||||||
|
# id = 21
|
||||||
# [[pose.CUSTOM.children]]
|
# [[pose.CUSTOM.children]]
|
||||||
# id = 17
|
|
||||||
# name = "Neck"
|
# name = "Neck"
|
||||||
|
# id = 17
|
||||||
# [[pose.CUSTOM.children.children]]
|
# [[pose.CUSTOM.children.children]]
|
||||||
# id = 18
|
|
||||||
# name = "Head"
|
# name = "Head"
|
||||||
|
# id = 18
|
||||||
# [[pose.CUSTOM.children.children.children]]
|
# [[pose.CUSTOM.children.children.children]]
|
||||||
# id = 0
|
|
||||||
# name = "Nose"
|
# name = "Nose"
|
||||||
|
# id = 0
|
||||||
# [[pose.CUSTOM.children.children]]
|
# [[pose.CUSTOM.children.children]]
|
||||||
# id = 6
|
|
||||||
# name = "RShoulder"
|
# name = "RShoulder"
|
||||||
|
# id = 6
|
||||||
# [[pose.CUSTOM.children.children.children]]
|
# [[pose.CUSTOM.children.children.children]]
|
||||||
# id = 8
|
|
||||||
# name = "RElbow"
|
# name = "RElbow"
|
||||||
|
# id = 8
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 10
|
|
||||||
# name = "RWrist"
|
# name = "RWrist"
|
||||||
|
# id = 10
|
||||||
# [[pose.CUSTOM.children.children]]
|
# [[pose.CUSTOM.children.children]]
|
||||||
# id = 5
|
|
||||||
# name = "LShoulder"
|
# name = "LShoulder"
|
||||||
|
# id = 5
|
||||||
# [[pose.CUSTOM.children.children.children]]
|
# [[pose.CUSTOM.children.children.children]]
|
||||||
# id = 7
|
|
||||||
# name = "LElbow"
|
# name = "LElbow"
|
||||||
|
# id = 7
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 9
|
|
||||||
# name = "LWrist"
|
# name = "LWrist"
|
||||||
|
# id = 9
|
||||||
|
@ -9,91 +9,29 @@
|
|||||||
# If a parameter is not found here, Pose2Sim will look for its value in the
|
# If a parameter is not found here, Pose2Sim will look for its value in the
|
||||||
# Config.toml file of the level above. This way, you can set global
|
# Config.toml file of the level above. This way, you can set global
|
||||||
# instructions for the Session and alter them for specific Participants or Trials.
|
# instructions for the Session and alter them for specific Participants or Trials.
|
||||||
#
|
#
|
||||||
# If you wish to overwrite a parameter for a specific trial or participant,
|
# If you wish to overwrite a parameter for a specific trial or participant,
|
||||||
# edit its Config.toml file by uncommenting its key (e.g., [project])
|
# edit its Config.toml file by uncommenting its key (e.g., [project])
|
||||||
# and editing its value (e.g., frame_range = [10,300]). Or else, uncomment
|
# and editing its value (e.g., frame_range = [10,300]). Or else, uncomment
|
||||||
# [filtering.butterworth] and set cut_off_frequency = 10, etc.
|
# [filtering.butterworth] and set cut_off_frequency = 10, etc.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
[project]
|
[project]
|
||||||
multi_person = true # true for trials with multiple participants. If false, only the main person in scene is analyzed (and it run much faster).
|
multi_person = true # If false, only the main person in scene is analyzed.
|
||||||
nb_persons_to_detect = 2 # checked only if multi_person is selected
|
# frame_rate = 60 # fps
|
||||||
# frame_rate = 60 # FPS
|
|
||||||
# frame_range = [] # For example [10,300], or [] for all frames
|
# frame_range = [] # For example [10,300], or [] for all frames
|
||||||
## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate
|
## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate
|
||||||
## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate,
|
## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate,
|
||||||
## frame_range = [0.1, 2.0]*frame_rate = [6, 120]
|
## frame_range = [0.1, 2.0]*frame_rate = [6, 120]
|
||||||
|
|
||||||
# exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['<participant_dir/trial_dir>', 'etc'].
|
# exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['<participant_dir/trial_dir>', 'etc'].
|
||||||
# e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial']
|
## e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial']
|
||||||
|
|
||||||
|
|
||||||
## Take heart, calibration is not that complicated once you get the hang of it!
|
## Take heart, calibration is not that complicated once you get the hang of it!
|
||||||
# [calibration]
|
|
||||||
# calibration_type = 'convert' # 'convert' or 'calculate'
|
|
||||||
|
|
||||||
# [calibration.convert]
|
|
||||||
# convert_from = 'qualisys' # 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', or 'biocv'
|
|
||||||
# [calibration.convert.qualisys]
|
|
||||||
# binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2
|
|
||||||
# [calibration.convert.optitrack] # See readme for instructions
|
|
||||||
# [calibration.convert.vicon] # No parameter needed
|
|
||||||
# [calibration.convert.opencap] # No parameter needed
|
|
||||||
# [calibration.convert.easymocap] # No parameter needed
|
|
||||||
# [calibration.convert.biocv] # No parameter needed
|
|
||||||
# [calibration.convert.anipose] # No parameter needed
|
|
||||||
# [calibration.convert.freemocap] # No parameter needed
|
|
||||||
|
|
||||||
|
|
||||||
# [calibration.calculate]
|
|
||||||
## Camera properties, theoretically need to be calculated only once in a camera lifetime
|
|
||||||
# [calibration.calculate.intrinsics]
|
|
||||||
# overwrite_intrinsics = false # overwrite (or not) if they have already been calculated?
|
|
||||||
# show_detection_intrinsics = true # true or false (lowercase)
|
|
||||||
# intrinsics_extension = 'jpg' # any video or image extension
|
|
||||||
# extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 )
|
|
||||||
# intrinsics_corners_nb = [4,7]
|
|
||||||
# intrinsics_square_size = 60 # mm
|
|
||||||
|
|
||||||
## Camera placements, need to be done before every session
|
|
||||||
# [calibration.calculate.extrinsics]
|
|
||||||
# extrinsics_method = 'scene' # 'board', 'scene', 'keypoints'
|
|
||||||
## 'board' should be large enough to be detected when laid on the floor. Not recommended.
|
|
||||||
## 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out.
|
|
||||||
## 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras.
|
|
||||||
|
|
||||||
# moving_cameras = false # Not implemented yet
|
|
||||||
# calculate_extrinsics = true # true or false (lowercase)
|
|
||||||
|
|
||||||
# [calibration.calculate.extrinsics.board]
|
|
||||||
# show_reprojection_error = true # true or false (lowercase)
|
|
||||||
# extrinsics_extension = 'png' # any video or image extension
|
|
||||||
# extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h]
|
|
||||||
# extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle
|
|
||||||
|
|
||||||
# [calibration.calculate.extrinsics.scene]
|
|
||||||
# show_reprojection_error = true # true or false (lowercase)
|
|
||||||
# extrinsics_extension = 'png' # any video or image extension
|
|
||||||
## list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane.
|
|
||||||
## in m -> unlike for intrinsics, NOT in mm!
|
|
||||||
# object_coords_3d = [[-2.0, 0.3, 0.0],
|
|
||||||
# [-2.0 , 0.0, 0.0],
|
|
||||||
# [-2.0, 0.0, 0.05],
|
|
||||||
# [-2.0, -0.3 , 0.0],
|
|
||||||
# [0.0, 0.3, 0.0],
|
|
||||||
# [0.0, 0.0, 0.0],
|
|
||||||
# [0.0, 0.0, 0.05],
|
|
||||||
# [0.0, -0.3, 0.0]]
|
|
||||||
|
|
||||||
# [calibration.calculate.extrinsics.keypoints]
|
|
||||||
## Coming soon!
|
|
||||||
|
|
||||||
|
|
||||||
# [pose]
|
# [pose]
|
||||||
# pose_framework = 'openpose' # 'openpose', 'mediapipe', 'alphapose', 'deeplabcut'
|
# pose_framework = 'openpose' # 'openpose', 'mediapipe', 'alphapose', 'deeplabcut'
|
||||||
# pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII,
|
# pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII
|
||||||
# #With mediapipe: BLAZEPOSE.
|
# #With mediapipe: BLAZEPOSE.
|
||||||
# #With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133.
|
# #With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133.
|
||||||
# #With deeplabcut: CUSTOM. See example at the end of the file.
|
# #With deeplabcut: CUSTOM. See example at the end of the file.
|
||||||
@ -103,33 +41,93 @@ nb_persons_to_detect = 2 # checked only if multi_person is selected
|
|||||||
|
|
||||||
|
|
||||||
# [synchronization]
|
# [synchronization]
|
||||||
## COMING SOON!
|
# display_corr = true # true or false (lowercase)
|
||||||
# reset_sync = true # Recalculate synchronization even if already done
|
# reset_sync = true # Recalculate synchronization even if already done
|
||||||
# frames = [2850,3490] # Frames to use for synchronization, should point to a moment with fast motion.
|
# # id_kpt = [10] # keypoint ID, to be found in skeleton.py. Example RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7
|
||||||
# cut_off_frequency = 10 # cut-off frequency for a 4th order low-pass Butterworth filter
|
# # weights_kpt = [1] # Only taken into account if you have several keypoints (Currently only one keypoint is supported).
|
||||||
## Vertical speeds (on X, Y, or Z axis, or 2D speeds)
|
# sync_frame_range = [] # For example [0,150], or [] for all frames (default)
|
||||||
# speed_kind = 'y' # 'x', 'y', 'z', or '2D'
|
# # limit synchronization search (to the beginning or to the end of the capture for example)
|
||||||
# vmax = 20 # px/s
|
|
||||||
# cam1_nb = 4
|
|
||||||
# cam2_nb = 3
|
# [calibration]
|
||||||
# id_kpt = [9,10] # Pour plus tard aller chercher numéro depuis keypoint name dans skeleton.py. 'RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7
|
# calibration_type = 'convert' # 'convert' or 'calculate'
|
||||||
# weights_kpt = [1,1] # Pris en compte uniquement si on a plusieurs keypoints
|
|
||||||
|
# [calibration.convert]
|
||||||
|
# convert_from = 'qualisys' # 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', 'biocv', 'anipose', or 'freemocap'
|
||||||
|
# [calibration.convert.qualisys]
|
||||||
|
# binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2
|
||||||
|
# [calibration.convert.optitrack] # See readme for instructions
|
||||||
|
# [calibration.convert.vicon] # No parameter needed
|
||||||
|
# [calibration.convert.opencap] # No parameter needed
|
||||||
|
# [calibration.convert.easymocap] # No parameter needed
|
||||||
|
# [calibration.convert.biocv] # No parameter needed
|
||||||
|
# [calibration.convert.anipose] # No parameter needed
|
||||||
|
# [calibration.convert.freemocap] # No parameter needed
|
||||||
|
|
||||||
|
|
||||||
|
# [calibration.calculate]
|
||||||
|
# # Camera properties, theoretically need to be calculated only once in a camera lifetime
|
||||||
|
# [calibration.calculate.intrinsics]
|
||||||
|
# overwrite_intrinsics = false # overwrite (or not) if they have already been calculated?
|
||||||
|
# show_detection_intrinsics = true # true or false (lowercase)
|
||||||
|
# intrinsics_extension = 'jpg' # any video or image extension
|
||||||
|
# extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 )
|
||||||
|
# intrinsics_corners_nb = [4,7]
|
||||||
|
# intrinsics_square_size = 60 # mm
|
||||||
|
|
||||||
|
# # Camera placements, need to be done before every session
|
||||||
|
# [calibration.calculate.extrinsics]
|
||||||
|
# calculate_extrinsics = true # true or false (lowercase)
|
||||||
|
# extrinsics_method = 'scene' # 'board', 'scene', 'keypoints'
|
||||||
|
# # 'board' should be large enough to be detected when laid on the floor. Not recommended.
|
||||||
|
# # 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out.
|
||||||
|
# # 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras.
|
||||||
|
# moving_cameras = false # Not implemented yet
|
||||||
|
|
||||||
|
# [calibration.calculate.extrinsics.board]
|
||||||
|
# show_reprojection_error = true # true or false (lowercase)
|
||||||
|
# extrinsics_extension = 'png' # any video or image extension
|
||||||
|
# extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h]
|
||||||
|
# extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle
|
||||||
|
|
||||||
|
# [calibration.calculate.extrinsics.scene]
|
||||||
|
# show_reprojection_error = true # true or false (lowercase)
|
||||||
|
# extrinsics_extension = 'png' # any video or image extension
|
||||||
|
# # list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane.
|
||||||
|
# # in m -> unlike for intrinsics, NOT in mm!
|
||||||
|
# object_coords_3d = [[-2.0, 0.3, 0.0],
|
||||||
|
# [-2.0 , 0.0, 0.0],
|
||||||
|
# [-2.0, 0.0, 0.05],
|
||||||
|
# [-2.0, -0.3 , 0.0],
|
||||||
|
# [0.0, 0.3, 0.0],
|
||||||
|
# [0.0, 0.0, 0.0],
|
||||||
|
# [0.0, 0.0, 0.05],
|
||||||
|
# [0.0, -0.3, 0.0]]
|
||||||
|
|
||||||
|
# [calibration.calculate.extrinsics.keypoints]
|
||||||
|
# # Coming soon!
|
||||||
|
|
||||||
|
|
||||||
# [personAssociation]
|
# [personAssociation]
|
||||||
# tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
|
# likelihood_threshold_association = 0.3
|
||||||
## and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE)
|
|
||||||
# reproj_error_threshold_association = 20 # px
|
# [personAssociation.single_person]
|
||||||
# likelihood_threshold_association = 0.05
|
# reproj_error_threshold_association = 20 # px
|
||||||
|
# tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
|
||||||
|
# # and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE)
|
||||||
|
|
||||||
|
# [personAssociation.multi_person]
|
||||||
|
# reconstruction_error_threshold = 0.1 # 0.1 = 10 cm
|
||||||
|
# min_affinity = 0.2 # affinity below which a correspondence is ignored
|
||||||
|
|
||||||
|
|
||||||
[triangulation]
|
[triangulation]
|
||||||
reorder_trc = true # only checked if multi_person analysis
|
reorder_trc = false # only checked if multi_person analysis
|
||||||
# reproj_error_threshold_triangulation = 15 # px
|
# reproj_error_threshold_triangulation = 15 # px
|
||||||
# likelihood_threshold_triangulation= 0.05
|
# likelihood_threshold_triangulation= 0.3
|
||||||
# min_cameras_for_triangulation = 2
|
# min_cameras_for_triangulation = 2
|
||||||
# interpolation = 'cubic' #linear, slinear, quadratic, cubic, or none
|
# interpolation = 'cubic' #linear, slinear, quadratic, cubic, or none
|
||||||
## 'none' if you don't want to interpolate missing points
|
# # 'none' if you don't want to interpolate missing points
|
||||||
# interp_if_gap_smaller_than = 10 # do not interpolate bigger gaps
|
# interp_if_gap_smaller_than = 10 # do not interpolate bigger gaps
|
||||||
# show_interp_indices = true # true or false (lowercase). For each keypoint, return the frames that need to be interpolated
|
# show_interp_indices = true # true or false (lowercase). For each keypoint, return the frames that need to be interpolated
|
||||||
# handle_LR_swap = false # Better if few cameras (eg less than 4) with risk of limb swapping (eg camera facing sagittal plane), otherwise slightly less accurate and slower
|
# handle_LR_swap = false # Better if few cameras (eg less than 4) with risk of limb swapping (eg camera facing sagittal plane), otherwise slightly less accurate and slower
|
||||||
@ -141,22 +139,22 @@ reorder_trc = true # only checked if multi_person analysis
|
|||||||
# type = 'butterworth' # butterworth, kalman, gaussian, LOESS, median, butterworth_on_speed
|
# type = 'butterworth' # butterworth, kalman, gaussian, LOESS, median, butterworth_on_speed
|
||||||
# display_figures = false # true or false (lowercase)
|
# display_figures = false # true or false (lowercase)
|
||||||
|
|
||||||
# [filtering.butterworth]
|
# [filtering.butterworth]
|
||||||
# order = 4
|
# order = 4
|
||||||
# cut_off_frequency = 6 # Hz
|
# cut_off_frequency = 6 # Hz
|
||||||
# [filtering.kalman]
|
# [filtering.kalman]
|
||||||
## How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)?
|
# # How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)?
|
||||||
# trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise
|
# trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise
|
||||||
# smooth = true # should be true, unless you need real-time filtering
|
# smooth = true # should be true, unless you need real-time filtering
|
||||||
# [filtering.butterworth_on_speed]
|
# [filtering.butterworth_on_speed]
|
||||||
# order = 4
|
# order = 4
|
||||||
# cut_off_frequency = 10 # Hz
|
# cut_off_frequency = 10 # Hz
|
||||||
# [filtering.gaussian]
|
# [filtering.gaussian]
|
||||||
# sigma_kernel = 2 #px
|
# sigma_kernel = 2 #px
|
||||||
# [filtering.LOESS]
|
# [filtering.LOESS]
|
||||||
# nb_values_used = 30 # = fraction of data used * nb frames
|
# nb_values_used = 30 # = fraction of data used * nb frames
|
||||||
# [filtering.median]
|
# [filtering.median]
|
||||||
# kernel_size = 9
|
# kernel_size = 9
|
||||||
|
|
||||||
|
|
||||||
[markerAugmentation]
|
[markerAugmentation]
|
||||||
@ -167,12 +165,13 @@ participant_mass = [25.0, 70.0] # kg
|
|||||||
|
|
||||||
# [opensim]
|
# [opensim]
|
||||||
# static_trial = ['S00_P00_Participant/S00_P00_T00_StaticTrial']
|
# static_trial = ['S00_P00_Participant/S00_P00_T00_StaticTrial']
|
||||||
# # If this Config.toml file is at the Trial level, set to true or false (lowercase);
|
# # If this Config.toml file is at the Trial level, set to true or false (lowercase);
|
||||||
# # At the Participant level, specify the name of the static trial folder name, e.g. ['S00_P00_T00_StaticTrial'];
|
# # At the Participant level, specify the name of the static trial folder name, e.g. ['S00_P00_T00_StaticTrial'];
|
||||||
# # At the Session level, add participant subdirectory, e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P01_Participant/S00_P00_T00_StaticTrial']
|
# # At the Session level, add participant subdirectory, e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P01_Participant/S00_P00_T00_StaticTrial']
|
||||||
# opensim_bin_path = 'C:\OpenSim 4.4\bin'
|
# opensim_bin_path = 'C:\OpenSim 4.4\bin'
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## CUSTOM skeleton, if you trained your own DeepLabCut model for example.
|
## CUSTOM skeleton, if you trained your own DeepLabCut model for example.
|
||||||
## Make sure the node ids correspond to the column numbers of the 2D pose file, starting from zero.
|
## Make sure the node ids correspond to the column numbers of the 2D pose file, starting from zero.
|
||||||
##
|
##
|
||||||
@ -188,65 +187,65 @@ participant_mass = [25.0, 70.0] # kg
|
|||||||
# name = "CHip"
|
# name = "CHip"
|
||||||
# id = "None"
|
# id = "None"
|
||||||
# [[pose.CUSTOM.children]]
|
# [[pose.CUSTOM.children]]
|
||||||
# id = 12
|
|
||||||
# name = "RHip"
|
# name = "RHip"
|
||||||
|
# id = 12
|
||||||
# [[pose.CUSTOM.children.children]]
|
# [[pose.CUSTOM.children.children]]
|
||||||
# id = 14
|
|
||||||
# name = "RKnee"
|
# name = "RKnee"
|
||||||
|
# id = 14
|
||||||
# [[pose.CUSTOM.children.children.children]]
|
# [[pose.CUSTOM.children.children.children]]
|
||||||
# id = 16
|
|
||||||
# name = "RAnkle"
|
# name = "RAnkle"
|
||||||
|
# id = 16
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 22
|
|
||||||
# name = "RBigToe"
|
# name = "RBigToe"
|
||||||
|
# id = 22
|
||||||
# [[pose.CUSTOM.children.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children.children]]
|
||||||
# id = 23
|
|
||||||
# name = "RSmallToe"
|
# name = "RSmallToe"
|
||||||
|
# id = 23
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 24
|
|
||||||
# name = "RHeel"
|
# name = "RHeel"
|
||||||
|
# id = 24
|
||||||
# [[pose.CUSTOM.children]]
|
# [[pose.CUSTOM.children]]
|
||||||
# id = 11
|
|
||||||
# name = "LHip"
|
# name = "LHip"
|
||||||
|
# id = 11
|
||||||
# [[pose.CUSTOM.children.children]]
|
# [[pose.CUSTOM.children.children]]
|
||||||
# id = 13
|
|
||||||
# name = "LKnee"
|
# name = "LKnee"
|
||||||
|
# id = 13
|
||||||
# [[pose.CUSTOM.children.children.children]]
|
# [[pose.CUSTOM.children.children.children]]
|
||||||
# id = 15
|
|
||||||
# name = "LAnkle"
|
# name = "LAnkle"
|
||||||
|
# id = 15
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 19
|
|
||||||
# name = "LBigToe"
|
# name = "LBigToe"
|
||||||
|
# id = 19
|
||||||
# [[pose.CUSTOM.children.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children.children]]
|
||||||
# id = 20
|
|
||||||
# name = "LSmallToe"
|
# name = "LSmallToe"
|
||||||
|
# id = 20
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 21
|
|
||||||
# name = "LHeel"
|
# name = "LHeel"
|
||||||
|
# id = 21
|
||||||
# [[pose.CUSTOM.children]]
|
# [[pose.CUSTOM.children]]
|
||||||
# id = 17
|
|
||||||
# name = "Neck"
|
# name = "Neck"
|
||||||
|
# id = 17
|
||||||
# [[pose.CUSTOM.children.children]]
|
# [[pose.CUSTOM.children.children]]
|
||||||
# id = 18
|
|
||||||
# name = "Head"
|
# name = "Head"
|
||||||
|
# id = 18
|
||||||
# [[pose.CUSTOM.children.children.children]]
|
# [[pose.CUSTOM.children.children.children]]
|
||||||
# id = 0
|
|
||||||
# name = "Nose"
|
# name = "Nose"
|
||||||
|
# id = 0
|
||||||
# [[pose.CUSTOM.children.children]]
|
# [[pose.CUSTOM.children.children]]
|
||||||
# id = 6
|
|
||||||
# name = "RShoulder"
|
# name = "RShoulder"
|
||||||
|
# id = 6
|
||||||
# [[pose.CUSTOM.children.children.children]]
|
# [[pose.CUSTOM.children.children.children]]
|
||||||
# id = 8
|
|
||||||
# name = "RElbow"
|
# name = "RElbow"
|
||||||
|
# id = 8
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 10
|
|
||||||
# name = "RWrist"
|
# name = "RWrist"
|
||||||
|
# id = 10
|
||||||
# [[pose.CUSTOM.children.children]]
|
# [[pose.CUSTOM.children.children]]
|
||||||
# id = 5
|
|
||||||
# name = "LShoulder"
|
# name = "LShoulder"
|
||||||
|
# id = 5
|
||||||
# [[pose.CUSTOM.children.children.children]]
|
# [[pose.CUSTOM.children.children.children]]
|
||||||
# id = 7
|
|
||||||
# name = "LElbow"
|
# name = "LElbow"
|
||||||
|
# id = 7
|
||||||
# [[pose.CUSTOM.children.children.children.children]]
|
# [[pose.CUSTOM.children.children.children.children]]
|
||||||
# id = 9
|
|
||||||
# name = "LWrist"
|
# name = "LWrist"
|
||||||
|
# id = 9
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
python -m json_display_without_img -j json_folder -W 1920 -H 1080
|
python -m json_display_without_img -j json_folder -W 1920 -H 1080
|
||||||
python -m json_display_without_img -j json_folder -o output_img_folder -d True -s True -W 1920 -H 1080 - 30
|
python -m json_display_without_img -j json_folder -o output_img_folder -d True -s True -W 1920 -H 1080 -f 30
|
||||||
import json_display_without_img; json_display_without_img.json_display_without_img_func(json_folder=r'<json_folder>', image_width=1920, image_height = 1080)
|
import json_display_without_img; json_display_without_img.json_display_without_img_func(json_folder=r'<json_folder>', image_width=1920, image_height = 1080)
|
||||||
'''
|
'''
|
||||||
|
|
||||||
@ -60,7 +60,7 @@ def json_display_without_img_func(**args):
|
|||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
json_display_without_img -j json_folder -W 1920 -H 1080
|
json_display_without_img -j json_folder -W 1920 -H 1080
|
||||||
json_display_without_img -j json_folder -o output_img_folder -d True -s True -W 1920 -H 1080
|
json_display_without_img -j json_folder -o output_img_folder -d True -s True -W 1920 -H 1080 -f 30
|
||||||
import json_display_without_img; json_display_without_img.json_display_without_img_func(json_folder=r'<json_folder>', image_width=1920, image_height = 1080)
|
import json_display_without_img; json_display_without_img.json_display_without_img_func(json_folder=r'<json_folder>', image_width=1920, image_height = 1080)
|
||||||
'''
|
'''
|
||||||
|
|
||||||
|
@ -114,10 +114,10 @@ def interpolate_nans(col, kind):
|
|||||||
|
|
||||||
def plot_time_lagged_cross_corr(camx, camy, ax):
|
def plot_time_lagged_cross_corr(camx, camy, ax):
|
||||||
pearson_r = [camx.corr(camy.shift(lag)) for lag in range(-2*fps, 2*fps)] # lag -2 sec à +2 sec
|
pearson_r = [camx.corr(camy.shift(lag)) for lag in range(-2*fps, 2*fps)] # lag -2 sec à +2 sec
|
||||||
offset = int(np.floor(len(pearson_r)*2)-np.argmax(pearson_r))
|
offset = int(np.floor(len(pearson_r)/2)-np.argmax(pearson_r))
|
||||||
max_corr = np.max(pearson_r)
|
max_corr = np.max(pearson_r)
|
||||||
ax.plot(list(range(-2*fps, 2*fps)), pearson_r)
|
ax.plot(list(range(-2*fps, 2*fps)), pearson_r)
|
||||||
ax.axvline(np.ceil(len(pearson_r)*2)-2*fps,color='k',linestyle='--')
|
ax.axvline(np.ceil(len(pearson_r)/2)-2*fps,color='k',linestyle='--')
|
||||||
ax.axvline(np.argmax(pearson_r)-2*fps,color='r',linestyle='--',label='Peak synchrony')
|
ax.axvline(np.argmax(pearson_r)-2*fps,color='r',linestyle='--',label='Peak synchrony')
|
||||||
plt.annotate(f'Max correlation={np.round(max_corr,2)}', xy=(0.05, 0.9), xycoords='axes fraction')
|
plt.annotate(f'Max correlation={np.round(max_corr,2)}', xy=(0.05, 0.9), xycoords='axes fraction')
|
||||||
ax.set(title=f'Offset = {offset} frames', xlabel='Offset (frames)',ylabel='Pearson r')
|
ax.set(title=f'Offset = {offset} frames', xlabel='Offset (frames)',ylabel='Pearson r')
|
||||||
@ -156,7 +156,6 @@ with open(os.path.join(pose_dir, 'coords'), 'wb') as fp:
|
|||||||
#############################
|
#############################
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Vitesse verticale
|
# Vitesse verticale
|
||||||
df_speed = []
|
df_speed = []
|
||||||
for i in range(len(json_dirs)):
|
for i in range(len(json_dirs)):
|
||||||
@ -199,6 +198,41 @@ else:
|
|||||||
raise ValueError('wrong values for id_kpt or weights_kpt')
|
raise ValueError('wrong values for id_kpt or weights_kpt')
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# camx = df_speed[1][16]
|
||||||
|
# camy = df_speed[2][16]
|
||||||
|
# camx = df_speed[1][10]
|
||||||
|
# camy = df_speed[2][10]
|
||||||
|
# camx = df_speed[1].sum(axis=1)
|
||||||
|
# camy = df_speed[2].sum(axis=1)
|
||||||
|
# camx.plot()
|
||||||
|
# camy.plot()
|
||||||
|
# plt.show()
|
||||||
|
|
||||||
|
for i in range(25):
|
||||||
|
df_coords[1].iloc[:,i*2+1].plot(label='1')
|
||||||
|
df_coords[2].iloc[:,i*2+1].plot(label='2')
|
||||||
|
plt.title(i)
|
||||||
|
plt.legend()
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
for i in range(25):
|
||||||
|
df_speed[1].iloc[:,i].plot(label='1')
|
||||||
|
df_speed[2].iloc[:,i].plot(label='2')
|
||||||
|
plt.title(i)
|
||||||
|
plt.legend()
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
for i in range(4):
|
||||||
|
abs(df_speed[i]).sum(axis=1).plot(label=i)
|
||||||
|
plt.legend()
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
df_speed[0].plot() # --> remove janky points
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
f, ax = plt.subplots(2,1)
|
f, ax = plt.subplots(2,1)
|
||||||
# speed
|
# speed
|
||||||
camx.plot(ax=ax[0], label = f'cam {cam1_nb}')
|
camx.plot(ax=ax[0], label = f'cam {cam1_nb}')
|
||||||
|
@ -12,6 +12,7 @@ Functions shared between modules, and other utilities
|
|||||||
|
|
||||||
## INIT
|
## INIT
|
||||||
import toml
|
import toml
|
||||||
|
import json
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import re
|
import re
|
||||||
import cv2
|
import cv2
|
||||||
@ -37,6 +38,58 @@ __status__ = "Development"
|
|||||||
|
|
||||||
|
|
||||||
## FUNCTIONS
|
## FUNCTIONS
|
||||||
|
def common_items_in_list(list1, list2):
|
||||||
|
'''
|
||||||
|
Do two lists have any items in common at the same index?
|
||||||
|
Returns True or False
|
||||||
|
'''
|
||||||
|
|
||||||
|
for i, j in enumerate(list1):
|
||||||
|
if j == list2[i]:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def bounding_boxes(js_file, margin_percent=0.1, around='extremities'):
|
||||||
|
'''
|
||||||
|
Compute the bounding boxes of the people in the json file.
|
||||||
|
Either around the extremities (with a margin)
|
||||||
|
or around the center of the person (with a margin).
|
||||||
|
|
||||||
|
INPUTS:
|
||||||
|
- js_file: json file
|
||||||
|
- margin_percent: margin around the person
|
||||||
|
- around: 'extremities' or 'center'
|
||||||
|
|
||||||
|
OUTPUT:
|
||||||
|
- bounding_boxes: list of bounding boxes [x_min, y_min, x_max, y_max]
|
||||||
|
'''
|
||||||
|
|
||||||
|
bounding_boxes = []
|
||||||
|
with open(js_file, 'r') as json_f:
|
||||||
|
js = json.load(json_f)
|
||||||
|
for people in range(len(js['people'])):
|
||||||
|
if len(js['people'][people]['pose_keypoints_2d']) < 3: continue
|
||||||
|
else:
|
||||||
|
x = js['people'][people]['pose_keypoints_2d'][0::3]
|
||||||
|
y = js['people'][people]['pose_keypoints_2d'][1::3]
|
||||||
|
x_min, x_max = min(x), max(x)
|
||||||
|
y_min, y_max = min(y), max(y)
|
||||||
|
|
||||||
|
if around == 'extremities':
|
||||||
|
dx = (x_max - x_min) * margin_percent
|
||||||
|
dy = (y_max - y_min) * margin_percent
|
||||||
|
bounding_boxes.append([x_min-dx, y_min-dy, x_max+dx, y_max+dy])
|
||||||
|
|
||||||
|
elif around == 'center':
|
||||||
|
x_mean, y_mean = np.mean(x), np.mean(y)
|
||||||
|
x_size = (x_max - x_min) * (1 + margin_percent)
|
||||||
|
y_size = (y_max - y_min) * (1 + margin_percent)
|
||||||
|
bounding_boxes.append([x_mean - x_size/2, y_mean - y_size/2, x_mean + x_size/2, y_mean + y_size/2])
|
||||||
|
|
||||||
|
return bounding_boxes
|
||||||
|
|
||||||
|
|
||||||
def retrieve_calib_params(calib_file):
|
def retrieve_calib_params(calib_file):
|
||||||
'''
|
'''
|
||||||
Compute projection matrices from toml calibration file.
|
Compute projection matrices from toml calibration file.
|
||||||
@ -48,6 +101,7 @@ def retrieve_calib_params(calib_file):
|
|||||||
- S: (h,w) vectors as list of 2x1 arrays
|
- S: (h,w) vectors as list of 2x1 arrays
|
||||||
- K: intrinsic matrices as list of 3x3 arrays
|
- K: intrinsic matrices as list of 3x3 arrays
|
||||||
- dist: distortion vectors as list of 4x1 arrays
|
- dist: distortion vectors as list of 4x1 arrays
|
||||||
|
- inv_K: inverse intrinsic matrices as list of 3x3 arrays
|
||||||
- optim_K: intrinsic matrices for undistorting points as list of 3x3 arrays
|
- optim_K: intrinsic matrices for undistorting points as list of 3x3 arrays
|
||||||
- R: rotation rodrigue vectors as list of 3x1 arrays
|
- R: rotation rodrigue vectors as list of 3x1 arrays
|
||||||
- T: translation vectors as list of 3x1 arrays
|
- T: translation vectors as list of 3x1 arrays
|
||||||
@ -55,16 +109,18 @@ def retrieve_calib_params(calib_file):
|
|||||||
|
|
||||||
calib = toml.load(calib_file)
|
calib = toml.load(calib_file)
|
||||||
|
|
||||||
S, K, dist, optim_K, R, T = [], [], [], [], [], []
|
S, K, dist, optim_K, inv_K, R, R_mat, T = [], [], [], [], [], [], [], []
|
||||||
for c, cam in enumerate(calib.keys()):
|
for c, cam in enumerate(calib.keys()):
|
||||||
if cam != 'metadata':
|
if cam != 'metadata':
|
||||||
S.append(np.array(calib[cam]['size']))
|
S.append(np.array(calib[cam]['size']))
|
||||||
K.append(np.array(calib[cam]['matrix']))
|
K.append(np.array(calib[cam]['matrix']))
|
||||||
dist.append(np.array(calib[cam]['distortions']))
|
dist.append(np.array(calib[cam]['distortions']))
|
||||||
optim_K.append(cv2.getOptimalNewCameraMatrix(K[c], dist[c], [int(s) for s in S[c]], 1, [int(s) for s in S[c]])[0])
|
optim_K.append(cv2.getOptimalNewCameraMatrix(K[c], dist[c], [int(s) for s in S[c]], 1, [int(s) for s in S[c]])[0])
|
||||||
|
inv_K.append(np.linalg.inv(K[c]))
|
||||||
R.append(np.array(calib[cam]['rotation']))
|
R.append(np.array(calib[cam]['rotation']))
|
||||||
|
R_mat.append(cv2.Rodrigues(R[c])[0])
|
||||||
T.append(np.array(calib[cam]['translation']))
|
T.append(np.array(calib[cam]['translation']))
|
||||||
calib_params = {'S': S, 'K': K, 'dist': dist, 'optim_K': optim_K, 'R': R, 'T': T}
|
calib_params = {'S': S, 'K': K, 'dist': dist, 'inv_K': inv_K, 'optim_K': optim_K, 'R': R, 'R_mat': R_mat, 'T': T}
|
||||||
|
|
||||||
return calib_params
|
return calib_params
|
||||||
|
|
||||||
|
@ -8,13 +8,17 @@
|
|||||||
###########################################################################
|
###########################################################################
|
||||||
|
|
||||||
Openpose detects all people in the field of view.
|
Openpose detects all people in the field of view.
|
||||||
Which is the one of interest?
|
- multi_person = false: Which is the one of interest?
|
||||||
|
- multi_person = true: How to triangulate the same persons across views?
|
||||||
|
How to associate them across time frames? Done in the
|
||||||
|
triangulation stage.
|
||||||
|
|
||||||
This module tries all possible triangulations of a chosen anatomical
|
If multi_person = false, this module tries all possible triangulations of a chosen
|
||||||
point. If "multi_person" mode is not used, it chooses the person for
|
anatomical point, and chooses the person for whom the reprojection error is smallest.
|
||||||
whom the reprojection error is smallest. Otherwise, it selects all
|
|
||||||
persons with a reprojection error smaller than a threshold, and then
|
If multi_person = true, it computes the distance between epipolar lines (camera to
|
||||||
associates them across time frames by minimizing the displacement speed.
|
keypoint lines) for all persons detected in all views, and selects the best correspondences.
|
||||||
|
The computation of the affinity matrix from the distance is inspired from the EasyMocap approach.
|
||||||
|
|
||||||
INPUTS:
|
INPUTS:
|
||||||
- a calibration file (.toml extension)
|
- a calibration file (.toml extension)
|
||||||
@ -58,97 +62,6 @@ __status__ = "Development"
|
|||||||
|
|
||||||
|
|
||||||
## FUNCTIONS
|
## FUNCTIONS
|
||||||
def common_items_in_list(list1, list2):
|
|
||||||
'''
|
|
||||||
Do two lists have any items in common at the same index?
|
|
||||||
Returns True or False
|
|
||||||
'''
|
|
||||||
|
|
||||||
for i, j in enumerate(list1):
|
|
||||||
if j == list2[i]:
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def min_with_single_indices(L, T):
|
|
||||||
'''
|
|
||||||
Let L be a list (size s) with T associated tuple indices (size s).
|
|
||||||
Select the smallest values of L, considering that
|
|
||||||
the next smallest value cannot have the same numbers
|
|
||||||
in the associated tuple as any of the previous ones.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
L = [ 20, 27, 51, 33, 43, 23, 37, 24, 4, 68, 84, 3 ]
|
|
||||||
T = list(it.product(range(2),range(3)))
|
|
||||||
= [(0,0),(0,1),(0,2),(0,3),(1,0),(1,1),(1,2),(1,3),(2,0),(2,1),(2,2),(2,3)]
|
|
||||||
|
|
||||||
- 1st smallest value: 3 with tuple (2,3), index 11
|
|
||||||
- 2nd smallest value when excluding indices (2,.) and (.,3), i.e. [(0,0),(0,1),(0,2),X,(1,0),(1,1),(1,2),X,X,X,X,X]:
|
|
||||||
20 with tuple (0,0), index 0
|
|
||||||
- 3rd smallest value when excluding [X,X,X,X,X,(1,1),(1,2),X,X,X,X,X]:
|
|
||||||
23 with tuple (1,1), index 5
|
|
||||||
|
|
||||||
INPUTS:
|
|
||||||
- L: list (size s)
|
|
||||||
- T: T associated tuple indices (size s)
|
|
||||||
|
|
||||||
OUTPUTS:
|
|
||||||
- minL: list of smallest values of L, considering constraints on tuple indices
|
|
||||||
- argminL: list of indices of smallest values of L
|
|
||||||
- T_minL: list of tuples associated with smallest values of L
|
|
||||||
'''
|
|
||||||
|
|
||||||
minL = [np.min(L)]
|
|
||||||
argminL = [np.argmin(L)]
|
|
||||||
T_minL = [T[argminL[0]]]
|
|
||||||
|
|
||||||
mask_tokeep = np.array([True for t in T])
|
|
||||||
i=0
|
|
||||||
while mask_tokeep.any()==True:
|
|
||||||
mask_tokeep = mask_tokeep & np.array([t[0]!=T_minL[i][0] and t[1]!=T_minL[i][1] for t in T])
|
|
||||||
if mask_tokeep.any()==True:
|
|
||||||
indicesL_tokeep = np.where(mask_tokeep)[0]
|
|
||||||
minL += [np.min(np.array(L)[indicesL_tokeep])]
|
|
||||||
argminL += [indicesL_tokeep[np.argmin(np.array(L)[indicesL_tokeep])]]
|
|
||||||
T_minL += (T[argminL[i+1]],)
|
|
||||||
i+=1
|
|
||||||
|
|
||||||
return minL, argminL, T_minL
|
|
||||||
|
|
||||||
|
|
||||||
def sort_people(Q_kpt_old, Q_kpt):
|
|
||||||
'''
|
|
||||||
Associate persons across frames
|
|
||||||
Persons' indices are sometimes swapped when changing frame
|
|
||||||
A person is associated to another in the next frame when they are at a small distance
|
|
||||||
|
|
||||||
INPUTS:
|
|
||||||
- Q_kpt_old: list of arrays of 3D coordinates [X, Y, Z, 1.] for the previous frame
|
|
||||||
- Q_kpt: idem Q_kpt_old, for current frame
|
|
||||||
|
|
||||||
OUTPUT:
|
|
||||||
- Q_kpt: array with reordered persons
|
|
||||||
- personsIDs_sorted: index of reordered persons
|
|
||||||
'''
|
|
||||||
|
|
||||||
# Generate possible person correspondences across frames
|
|
||||||
if len(Q_kpt_old) < len(Q_kpt):
|
|
||||||
Q_kpt_old = np.concatenate((Q_kpt_old, [[0., 0., 0., 1.]]*(len(Q_kpt)-len(Q_kpt_old))))
|
|
||||||
personsIDs_comb = sorted(list(it.product(range(len(Q_kpt_old)),range(len(Q_kpt)))))
|
|
||||||
# Compute distance between persons from one frame to another
|
|
||||||
frame_by_frame_dist = []
|
|
||||||
for comb in personsIDs_comb:
|
|
||||||
frame_by_frame_dist += [euclidean_distance(Q_kpt_old[comb[0]][:3],Q_kpt[comb[1]][:3])]
|
|
||||||
# sort correspondences by distance
|
|
||||||
_, index_best_comb, _ = min_with_single_indices(frame_by_frame_dist, personsIDs_comb)
|
|
||||||
index_best_comb.sort()
|
|
||||||
personsIDs_sorted = np.array(personsIDs_comb)[index_best_comb][:,1]
|
|
||||||
# rearrange persons
|
|
||||||
Q_kpt = np.array(Q_kpt)[personsIDs_sorted]
|
|
||||||
|
|
||||||
return Q_kpt, personsIDs_sorted
|
|
||||||
|
|
||||||
|
|
||||||
def persons_combinations(json_files_framef):
|
def persons_combinations(json_files_framef):
|
||||||
'''
|
'''
|
||||||
Find all possible combinations of detected persons' ids.
|
Find all possible combinations of detected persons' ids.
|
||||||
@ -179,10 +92,63 @@ def persons_combinations(json_files_framef):
|
|||||||
return personsIDs_comb
|
return personsIDs_comb
|
||||||
|
|
||||||
|
|
||||||
|
def triangulate_comb(comb, coords, P_all, calib_params, config):
|
||||||
|
'''
|
||||||
|
Triangulate 2D points and compute reprojection error for a combination of cameras.
|
||||||
|
INPUTS:
|
||||||
|
- comb: list of ints: combination of persons' ids for each camera
|
||||||
|
- coords: array: x, y, likelihood for each camera
|
||||||
|
- P_all: list of arrays: projection matrices for each camera
|
||||||
|
- calib_params: dict: calibration parameters
|
||||||
|
- config: dictionary from Config.toml file
|
||||||
|
OUTPUTS:
|
||||||
|
- error_comb: float: reprojection error
|
||||||
|
- comb: list of ints: combination of persons' ids for each camera
|
||||||
|
- Q_comb: array: 3D coordinates of the triangulated point
|
||||||
|
'''
|
||||||
|
|
||||||
|
undistort_points = config.get('triangulation').get('undistort_points')
|
||||||
|
likelihood_threshold = config.get('personAssociation').get('likelihood_threshold_association')
|
||||||
|
|
||||||
|
# Replace likelihood by 0. if under likelihood_threshold
|
||||||
|
coords[:,2][coords[:,2] < likelihood_threshold] = 0.
|
||||||
|
comb[coords[:,2] == 0.] = np.nan
|
||||||
|
|
||||||
|
# Filter coords and projection_matrices containing nans
|
||||||
|
coords_filt = [coords[i] for i in range(len(comb)) if not np.isnan(comb[i])]
|
||||||
|
projection_matrices_filt = [P_all[i] for i in range(len(comb)) if not np.isnan(comb[i])]
|
||||||
|
if undistort_points:
|
||||||
|
calib_params_R_filt = [calib_params['R'][i] for i in range(len(comb)) if not np.isnan(comb[i])]
|
||||||
|
calib_params_T_filt = [calib_params['T'][i] for i in range(len(comb)) if not np.isnan(comb[i])]
|
||||||
|
calib_params_K_filt = [calib_params['K'][i] for i in range(len(comb)) if not np.isnan(comb[i])]
|
||||||
|
calib_params_dist_filt = [calib_params['dist'][i] for i in range(len(comb)) if not np.isnan(comb[i])]
|
||||||
|
|
||||||
|
# Triangulate 2D points
|
||||||
|
x_files_filt, y_files_filt, likelihood_files_filt = np.array(coords_filt).T
|
||||||
|
Q_comb = weighted_triangulation(projection_matrices_filt, x_files_filt, y_files_filt, likelihood_files_filt)
|
||||||
|
|
||||||
|
# Reprojection
|
||||||
|
if undistort_points:
|
||||||
|
coords_2D_kpt_calc_filt = [cv2.projectPoints(np.array(Q_comb[:-1]), calib_params_R_filt[i], calib_params_T_filt[i], calib_params_K_filt[i], calib_params_dist_filt[i])[0] for i in range(len(Q_comb))]
|
||||||
|
x_calc = [coords_2D_kpt_calc_filt[i][0,0,0] for i in range(len(Q_comb))]
|
||||||
|
y_calc = [coords_2D_kpt_calc_filt[i][0,0,1] for i in range(len(Q_comb))]
|
||||||
|
else:
|
||||||
|
x_calc, y_calc = reprojection(projection_matrices_filt, Q_comb)
|
||||||
|
|
||||||
|
# Reprojection error
|
||||||
|
error_comb_per_cam = []
|
||||||
|
for cam in range(len(x_calc)):
|
||||||
|
q_file = (x_files_filt[cam], y_files_filt[cam])
|
||||||
|
q_calc = (x_calc[cam], y_calc[cam])
|
||||||
|
error_comb_per_cam.append( euclidean_distance(q_file, q_calc) )
|
||||||
|
error_comb = np.mean(error_comb_per_cam)
|
||||||
|
|
||||||
|
return error_comb, comb, Q_comb
|
||||||
|
|
||||||
|
|
||||||
def best_persons_and_cameras_combination(config, json_files_framef, personsIDs_combinations, projection_matrices, tracked_keypoint_id, calib_params):
|
def best_persons_and_cameras_combination(config, json_files_framef, personsIDs_combinations, projection_matrices, tracked_keypoint_id, calib_params):
|
||||||
'''
|
'''
|
||||||
- if multi_person: Choose all the combination of cameras that give a reprojection error below a threshold
|
Chooses the right person among the multiple ones found by
|
||||||
- else: Chooses the right person among the multiple ones found by
|
|
||||||
OpenPose & excludes cameras with wrong 2d-pose estimation.
|
OpenPose & excludes cameras with wrong 2d-pose estimation.
|
||||||
|
|
||||||
1. triangulate the tracked keypoint for all possible combinations of people,
|
1. triangulate the tracked keypoint for all possible combinations of people,
|
||||||
@ -203,9 +169,7 @@ def best_persons_and_cameras_combination(config, json_files_framef, personsIDs_c
|
|||||||
- comb_errors_below_thresh: list of arrays of ints
|
- comb_errors_below_thresh: list of arrays of ints
|
||||||
'''
|
'''
|
||||||
|
|
||||||
multi_person = config.get('project').get('multi_person')
|
error_threshold_tracking = config.get('personAssociation').get('single_person').get('reproj_error_threshold_association')
|
||||||
nb_persons_to_detect = config.get('project').get('nb_persons_to_detect')
|
|
||||||
error_threshold_tracking = config.get('personAssociation').get('reproj_error_threshold_association')
|
|
||||||
likelihood_threshold = config.get('personAssociation').get('likelihood_threshold_association')
|
likelihood_threshold = config.get('personAssociation').get('likelihood_threshold_association')
|
||||||
min_cameras_for_triangulation = config.get('triangulation').get('min_cameras_for_triangulation')
|
min_cameras_for_triangulation = config.get('triangulation').get('min_cameras_for_triangulation')
|
||||||
undistort_points = config.get('triangulation').get('undistort_points')
|
undistort_points = config.get('triangulation').get('undistort_points')
|
||||||
@ -219,30 +183,23 @@ def best_persons_and_cameras_combination(config, json_files_framef, personsIDs_c
|
|||||||
while error_min > error_threshold_tracking and n_cams - nb_cams_off >= min_cameras_for_triangulation:
|
while error_min > error_threshold_tracking and n_cams - nb_cams_off >= min_cameras_for_triangulation:
|
||||||
# Try all persons combinations
|
# Try all persons combinations
|
||||||
for combination in personsIDs_combinations:
|
for combination in personsIDs_combinations:
|
||||||
# Get x,y,likelihood values from files
|
# Get coords from files
|
||||||
x_files, y_files,likelihood_files = [], [], []
|
coords = []
|
||||||
for index_cam, person_nb in enumerate(combination):
|
for index_cam, person_nb in enumerate(combination):
|
||||||
with open(json_files_framef[index_cam], 'r') as json_f:
|
try:
|
||||||
js = json.load(json_f)
|
js = read_json(json_files_framef[index_cam])
|
||||||
try:
|
coords.append(js[int(person_nb)][tracked_keypoint_id*3:tracked_keypoint_id*3+3])
|
||||||
x_files.append( js['people'][int(person_nb)]['pose_keypoints_2d'][tracked_keypoint_id*3] )
|
except:
|
||||||
y_files.append( js['people'][int(person_nb)]['pose_keypoints_2d'][tracked_keypoint_id*3+1] )
|
coords.append([np.nan, np.nan, np.nan])
|
||||||
likelihood_files.append( js['people'][int(person_nb)]['pose_keypoints_2d'][tracked_keypoint_id*3+2] )
|
coords = np.array(coords)
|
||||||
except:
|
|
||||||
x_files.append(np.nan)
|
|
||||||
y_files.append(np.nan)
|
|
||||||
likelihood_files.append(np.nan)
|
|
||||||
|
|
||||||
# undistort points
|
# undistort points
|
||||||
if undistort_points:
|
if undistort_points:
|
||||||
points = np.array(tuple(zip(x_files,y_files))).reshape(-1, 1, 2).astype('float32')
|
points = np.array(coords)[:,None,:2]
|
||||||
undistorted_points = [cv2.undistortPoints(points[i], calib_params['K'][i], calib_params['dist'][i], None, calib_params['optim_K'][i]) for i in range(n_cams)]
|
undistorted_points = [cv2.undistortPoints(points[i], calib_params['K'][i], calib_params['dist'][i], None, calib_params['optim_K'][i]) for i in range(n_cams)]
|
||||||
x_files = np.array([[u[i][0][0] for i in range(len(u))] for u in undistorted_points]).squeeze()
|
coords[:,0] = np.array([[u[i][0][0] for i in range(len(u))] for u in undistorted_points]).squeeze()
|
||||||
y_files = np.array([[u[i][0][1] for i in range(len(u))] for u in undistorted_points]).squeeze()
|
coords[:,1] = np.array([[u[i][0][1] for i in range(len(u))] for u in undistorted_points]).squeeze()
|
||||||
|
|
||||||
# Replace likelihood by 0. if under likelihood_threshold
|
|
||||||
likelihood_files = [0. if lik < likelihood_threshold else lik for lik in likelihood_files]
|
|
||||||
|
|
||||||
# For each persons combination, create subsets with "nb_cams_off" cameras excluded
|
# For each persons combination, create subsets with "nb_cams_off" cameras excluded
|
||||||
id_cams_off = list(it.combinations(range(len(combination)), nb_cams_off))
|
id_cams_off = list(it.combinations(range(len(combination)), nb_cams_off))
|
||||||
combinations_with_cams_off = np.array([combination.copy()]*len(id_cams_off))
|
combinations_with_cams_off = np.array([combination.copy()]*len(id_cams_off))
|
||||||
@ -250,91 +207,338 @@ def best_persons_and_cameras_combination(config, json_files_framef, personsIDs_c
|
|||||||
combinations_with_cams_off[i,id] = np.nan
|
combinations_with_cams_off[i,id] = np.nan
|
||||||
|
|
||||||
# Try all subsets
|
# Try all subsets
|
||||||
error_comb = []
|
error_comb_all, comb_all, Q_comb_all = [], [], []
|
||||||
Q_comb = []
|
|
||||||
for comb in combinations_with_cams_off:
|
for comb in combinations_with_cams_off:
|
||||||
# Filter x, y, likelihood, projection_matrices, with subset
|
error_comb, comb, Q_comb = triangulate_comb(comb, coords, projection_matrices, calib_params, config)
|
||||||
x_files_filt = [x_files[i] for i in range(len(comb)) if not np.isnan(comb[i])]
|
error_comb_all.append(error_comb)
|
||||||
y_files_filt = [y_files[i] for i in range(len(comb)) if not np.isnan(comb[i])]
|
comb_all.append(comb)
|
||||||
likelihood_files_filt = [likelihood_files[i] for i in range(len(comb)) if not np.isnan(comb[i])]
|
Q_comb_all.append(Q_comb)
|
||||||
projection_matrices_filt = [projection_matrices[i] for i in range(len(comb)) if not np.isnan(comb[i])]
|
|
||||||
if undistort_points:
|
|
||||||
calib_params_R_filt = [calib_params['R'][i] for i in range(len(comb)) if not np.isnan(comb[i])]
|
|
||||||
calib_params_T_filt = [calib_params['T'][i] for i in range(len(comb)) if not np.isnan(comb[i])]
|
|
||||||
calib_params_K_filt = [calib_params['K'][i] for i in range(len(comb)) if not np.isnan(comb[i])]
|
|
||||||
calib_params_dist_filt = [calib_params['dist'][i] for i in range(len(comb)) if not np.isnan(comb[i])]
|
|
||||||
|
|
||||||
# Triangulate 2D points
|
|
||||||
Q_comb.append(weighted_triangulation(projection_matrices_filt, x_files_filt, y_files_filt, likelihood_files_filt))
|
|
||||||
|
|
||||||
# Reprojection
|
|
||||||
if undistort_points:
|
|
||||||
coords_2D_kpt_calc_filt = [cv2.projectPoints(np.array(Q_comb[-1][:-1]), calib_params_R_filt[i], calib_params_T_filt[i], calib_params_K_filt[i], calib_params_dist_filt[i])[0] for i in range(n_cams-nb_cams_off)]
|
|
||||||
x_calc = [coords_2D_kpt_calc_filt[i][0,0,0] for i in range(n_cams-nb_cams_off)]
|
|
||||||
y_calc = [coords_2D_kpt_calc_filt[i][0,0,1] for i in range(n_cams-nb_cams_off)]
|
|
||||||
else:
|
|
||||||
x_calc, y_calc = reprojection(projection_matrices_filt, Q_comb[-1])
|
|
||||||
|
|
||||||
# Reprojection error
|
|
||||||
error_comb_per_cam = []
|
|
||||||
for cam in range(len(x_calc)):
|
|
||||||
q_file = (x_files_filt[cam], y_files_filt[cam])
|
|
||||||
q_calc = (x_calc[cam], y_calc[cam])
|
|
||||||
error_comb_per_cam.append( euclidean_distance(q_file, q_calc) )
|
|
||||||
error_comb.append( np.mean(error_comb_per_cam) )
|
|
||||||
|
|
||||||
if multi_person:
|
|
||||||
errors_below_thresh += [e for e in error_comb if e<error_threshold_tracking]
|
|
||||||
comb_errors_below_thresh += [combinations_with_cams_off[error_comb.index(e)] for e in error_comb if e<error_threshold_tracking]
|
|
||||||
Q_kpt += [Q_comb[error_comb.index(e)] for e in error_comb if e<error_threshold_tracking]
|
|
||||||
else:
|
|
||||||
error_min = np.nanmin(error_comb)
|
|
||||||
errors_below_thresh = [error_min]
|
|
||||||
comb_errors_below_thresh = [combinations_with_cams_off[np.argmin(error_comb)]]
|
|
||||||
Q_kpt = [Q_comb[np.argmin(error_comb)]]
|
|
||||||
if errors_below_thresh[0] < error_threshold_tracking:
|
|
||||||
break
|
|
||||||
|
|
||||||
if multi_person:
|
|
||||||
if len(errors_below_thresh)>0:
|
|
||||||
# sort combinations by error magnitude
|
|
||||||
errors_below_thresh_sorted = sorted(errors_below_thresh)
|
|
||||||
sorted_idx = np.array([errors_below_thresh.index(e) for e in errors_below_thresh_sorted])
|
|
||||||
comb_errors_below_thresh = np.array(comb_errors_below_thresh)[sorted_idx]
|
|
||||||
Q_kpt = np.array(Q_kpt)[sorted_idx]
|
|
||||||
# remove combinations with indices used several times for the same person
|
|
||||||
comb_errors_below_thresh = [c.tolist() for c in comb_errors_below_thresh]
|
|
||||||
comb = comb_errors_below_thresh.copy()
|
|
||||||
comb_ok = np.array([comb[0]])
|
|
||||||
for i, c1 in enumerate(comb):
|
|
||||||
idx_ok = np.array([not(common_items_in_list(c1, c2)) for c2 in comb[1:]])
|
|
||||||
try:
|
|
||||||
comb = np.array(comb[1:])[idx_ok]
|
|
||||||
comb_ok = np.concatenate((comb_ok, [comb[0]]))
|
|
||||||
except:
|
|
||||||
break
|
|
||||||
sorted_pruned_idx = [i for i, x in enumerate(comb_errors_below_thresh) for c in comb_ok if np.array_equal(x,c,equal_nan=True)]
|
|
||||||
errors_below_thresh = np.array(errors_below_thresh_sorted)[sorted_pruned_idx].tolist()
|
|
||||||
comb_errors_below_thresh = np.array(comb_errors_below_thresh)[sorted_pruned_idx].tolist()
|
|
||||||
Q_kpt = Q_kpt[sorted_pruned_idx].tolist()
|
|
||||||
|
|
||||||
# Remove indices already used for a person
|
error_min = np.nanmin(error_comb_all)
|
||||||
personsIDs_combinations = np.array([personsIDs_combinations[i] for i in range(len(personsIDs_combinations))
|
comb_error_min = [comb_all[np.argmin(error_comb_all)]]
|
||||||
if not np.array(
|
Q_kpt = [Q_comb_all[np.argmin(error_comb_all)]]
|
||||||
[personsIDs_combinations[i,j]==comb[j] for comb in comb_errors_below_thresh for j in range(len(comb))]
|
if error_min < error_threshold_tracking:
|
||||||
).any()])
|
break
|
||||||
if len(errors_below_thresh) >= len(personsIDs_combinations) or len(errors_below_thresh) >= nb_persons_to_detect:
|
|
||||||
errors_below_thresh = errors_below_thresh[:nb_persons_to_detect]
|
|
||||||
comb_errors_below_thresh = comb_errors_below_thresh[:nb_persons_to_detect]
|
|
||||||
Q_kpt = Q_kpt[:nb_persons_to_detect]
|
|
||||||
break
|
|
||||||
|
|
||||||
nb_cams_off += 1
|
nb_cams_off += 1
|
||||||
|
|
||||||
return errors_below_thresh, comb_errors_below_thresh, Q_kpt
|
return error_min, comb_error_min, Q_kpt
|
||||||
|
|
||||||
|
|
||||||
def recap_tracking(config, error, nb_cams_excluded):
|
def read_json(js_file):
|
||||||
|
'''
|
||||||
|
Read OpenPose json file
|
||||||
|
'''
|
||||||
|
with open(js_file, 'r') as json_f:
|
||||||
|
js = json.load(json_f)
|
||||||
|
json_data = []
|
||||||
|
for people in range(len(js['people'])):
|
||||||
|
if len(js['people'][people]['pose_keypoints_2d']) < 3: continue
|
||||||
|
else:
|
||||||
|
json_data.append(js['people'][people]['pose_keypoints_2d'])
|
||||||
|
return json_data
|
||||||
|
|
||||||
|
|
||||||
|
def compute_rays(json_coord, calib_params, cam_id):
|
||||||
|
'''
|
||||||
|
Plucker coordinates of rays from camera to each joint of a person
|
||||||
|
Plucker coordinates: camera to keypoint line direction (size 3)
|
||||||
|
moment: origin ^ line (size 3)
|
||||||
|
additionally, confidence
|
||||||
|
|
||||||
|
INPUTS:
|
||||||
|
- json_coord: x, y, likelihood for a person seen from a camera (list of 3*joint_nb)
|
||||||
|
- calib_params: calibration parameters from retrieve_calib_params('calib.toml')
|
||||||
|
- cam_id: camera id (int)
|
||||||
|
|
||||||
|
OUTPUT:
|
||||||
|
- plucker: array. nb joints * (6 plucker coordinates + 1 likelihood)
|
||||||
|
'''
|
||||||
|
|
||||||
|
x = json_coord[0::3]
|
||||||
|
y = json_coord[1::3]
|
||||||
|
likelihood = json_coord[2::3]
|
||||||
|
|
||||||
|
inv_K = calib_params['inv_K'][cam_id]
|
||||||
|
R_mat = calib_params['R_mat'][cam_id]
|
||||||
|
T = calib_params['T'][cam_id]
|
||||||
|
|
||||||
|
cam_center = -R_mat.T @ T
|
||||||
|
plucker = []
|
||||||
|
for i in range(len(x)):
|
||||||
|
q = np.array([x[i], y[i], 1])
|
||||||
|
norm_Q = R_mat.T @ (inv_K @ q -T)
|
||||||
|
|
||||||
|
line = norm_Q - cam_center
|
||||||
|
norm_line = line/np.linalg.norm(line)
|
||||||
|
moment = np.cross(cam_center, norm_line)
|
||||||
|
plucker.append(np.concatenate([norm_line, moment, [likelihood[i]]]))
|
||||||
|
|
||||||
|
return np.array(plucker)
|
||||||
|
|
||||||
|
|
||||||
|
def broadcast_line_to_line_distance(p0, p1):
|
||||||
|
'''
|
||||||
|
Compute the distance between two lines in 3D space.
|
||||||
|
|
||||||
|
see: https://faculty.sites.iastate.edu/jia/files/inline-files/plucker-coordinates.pdf
|
||||||
|
p0 = (l0,m0), p1 = (l1,m1)
|
||||||
|
dist = | (l0,m0) * (l1,m1) | / || l0 x l1 ||
|
||||||
|
(l0,m0) * (l1,m1) = l0 @ m1 + m0 @ l1 (reciprocal product)
|
||||||
|
|
||||||
|
No need to divide by the norm of the cross product of the directions, since we
|
||||||
|
don't need the actual distance but whether the lines are close to intersecting or not
|
||||||
|
=> dist = | (l0,m0) * (l1,m1) |
|
||||||
|
|
||||||
|
INPUTS:
|
||||||
|
- p0: array(nb_persons_detected * 1 * nb_joints * 7 coordinates)
|
||||||
|
- p1: array(1 * nb_persons_detected * nb_joints * 7 coordinates)
|
||||||
|
|
||||||
|
OUTPUT:
|
||||||
|
- dist: distances between the two lines (not normalized).
|
||||||
|
array(nb_persons_0 * nb_persons_1 * nb_joints)
|
||||||
|
'''
|
||||||
|
|
||||||
|
product = np.sum(p0[..., :3] * p1[..., 3:6], axis=-1) + np.sum(p1[..., :3] * p0[..., 3:6], axis=-1)
|
||||||
|
dist = np.abs(product)
|
||||||
|
|
||||||
|
return dist
|
||||||
|
|
||||||
|
|
||||||
|
def compute_affinity(all_json_data_f, calib_params, cum_persons_per_view, reconstruction_error_threshold=0.1):
|
||||||
|
'''
|
||||||
|
Compute the affinity between all the people in the different views.
|
||||||
|
|
||||||
|
The affinity is defined as 1 - distance/max_distance, with distance the
|
||||||
|
distance between epipolar lines in each view (reciprocal product of Plucker
|
||||||
|
coordinates).
|
||||||
|
|
||||||
|
Another approach would be to project one epipolar line onto the other camera
|
||||||
|
plane and compute the line to point distance, but it is more computationally
|
||||||
|
intensive (simple dot product vs. projection and distance calculation).
|
||||||
|
|
||||||
|
INPUTS:
|
||||||
|
- all_json_data_f: list of json data. For frame f, nb_views*nb_persons*(x,y,likelihood)*nb_joints
|
||||||
|
- calib_params: calibration parameters from retrieve_calib_params('calib.toml')
|
||||||
|
- cum_persons_per_view: cumulative number of persons per view
|
||||||
|
- reconstruction_error_threshold: maximum distance between epipolar lines to consider a match
|
||||||
|
|
||||||
|
OUTPUT:
|
||||||
|
- affinity: affinity matrix between all the people in the different views.
|
||||||
|
(nb_views*nb_persons_per_view * nb_views*nb_persons_per_view)
|
||||||
|
'''
|
||||||
|
|
||||||
|
# Compute plucker coordinates for all keypoints for each person in each view
|
||||||
|
# pluckers_f: dims=(camera, person, joint, 7 coordinates)
|
||||||
|
pluckers_f = []
|
||||||
|
for cam_id, json_cam in enumerate(all_json_data_f):
|
||||||
|
pluckers = []
|
||||||
|
for json_coord in json_cam:
|
||||||
|
plucker = compute_rays(json_coord, calib_params, cam_id) # LIMIT TO 15 JOINTS? json_coord[:15*3]
|
||||||
|
pluckers.append(plucker)
|
||||||
|
pluckers = np.array(pluckers)
|
||||||
|
pluckers_f.append(pluckers)
|
||||||
|
|
||||||
|
# Compute affinity matrix
|
||||||
|
distance = np.zeros((cum_persons_per_view[-1], cum_persons_per_view[-1])) + 2*reconstruction_error_threshold
|
||||||
|
for compared_cam0, compared_cam1 in it.combinations(range(len(all_json_data_f)), 2):
|
||||||
|
# skip when no detection for a camera
|
||||||
|
if cum_persons_per_view[compared_cam0] == cum_persons_per_view[compared_cam0+1] \
|
||||||
|
or cum_persons_per_view[compared_cam1] == cum_persons_per_view[compared_cam1 +1]:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# compute distance
|
||||||
|
p0 = pluckers_f[compared_cam0][:,None] # add coordinate on second dimension
|
||||||
|
p1 = pluckers_f[compared_cam1][None,:] # add coordinate on first dimension
|
||||||
|
dist = broadcast_line_to_line_distance(p0, p1)
|
||||||
|
likelihood = np.sqrt(p0[..., -1] * p1[..., -1])
|
||||||
|
mean_weighted_dist = np.sum(dist*likelihood, axis=-1)/(1e-5 + likelihood.sum(axis=-1)) # array(nb_persons_0 * nb_persons_1)
|
||||||
|
|
||||||
|
# populate distance matrix
|
||||||
|
distance[cum_persons_per_view[compared_cam0]:cum_persons_per_view[compared_cam0+1], \
|
||||||
|
cum_persons_per_view[compared_cam1]:cum_persons_per_view[compared_cam1+1]] \
|
||||||
|
= mean_weighted_dist
|
||||||
|
distance[cum_persons_per_view[compared_cam1]:cum_persons_per_view[compared_cam1+1], \
|
||||||
|
cum_persons_per_view[compared_cam0]:cum_persons_per_view[compared_cam0+1]] \
|
||||||
|
= mean_weighted_dist.T
|
||||||
|
|
||||||
|
# compute affinity matrix and clamp it to zero when distance > reconstruction_error_threshold
|
||||||
|
distance[distance > reconstruction_error_threshold] = reconstruction_error_threshold
|
||||||
|
affinity = 1 - distance / reconstruction_error_threshold
|
||||||
|
|
||||||
|
return affinity
|
||||||
|
|
||||||
|
|
||||||
|
def circular_constraint(cum_persons_per_view):
|
||||||
|
'''
|
||||||
|
A person can be matched only with themselves in the same view, and with any
|
||||||
|
person from other views
|
||||||
|
|
||||||
|
INPUT:
|
||||||
|
- cum_persons_per_view: cumulative number of persons per view
|
||||||
|
|
||||||
|
OUTPUT:
|
||||||
|
- circ_constraint: circular constraint matrix
|
||||||
|
'''
|
||||||
|
|
||||||
|
circ_constraint = np.identity(cum_persons_per_view[-1])
|
||||||
|
for i in range(len(cum_persons_per_view)-1):
|
||||||
|
circ_constraint[cum_persons_per_view[i]:cum_persons_per_view[i+1], cum_persons_per_view[i+1]:cum_persons_per_view[-1]] = 1
|
||||||
|
circ_constraint[cum_persons_per_view[i+1]:cum_persons_per_view[-1], cum_persons_per_view[i]:cum_persons_per_view[i+1]] = 1
|
||||||
|
|
||||||
|
return circ_constraint
|
||||||
|
|
||||||
|
|
||||||
|
def SVT(matrix, threshold):
|
||||||
|
'''
|
||||||
|
Find a low-rank approximation of the matrix using Singular Value Thresholding.
|
||||||
|
|
||||||
|
INPUTS:
|
||||||
|
- matrix: matrix to decompose
|
||||||
|
- threshold: threshold for singular values
|
||||||
|
|
||||||
|
OUTPUT:
|
||||||
|
- matrix_thresh: low-rank approximation of the matrix
|
||||||
|
'''
|
||||||
|
|
||||||
|
U, s, Vt = np.linalg.svd(matrix) # decompose matrix
|
||||||
|
s_thresh = np.maximum(s - threshold, 0) # set smallest singular values to zero
|
||||||
|
matrix_thresh = U @ np.diag(s_thresh) @ Vt # recompose matrix
|
||||||
|
|
||||||
|
return matrix_thresh
|
||||||
|
|
||||||
|
|
||||||
|
def matchSVT(affinity, cum_persons_per_view, circ_constraint, max_iter = 20, w_rank = 50, tol = 1e-4, w_sparse=0.1):
|
||||||
|
'''
|
||||||
|
Find low-rank approximation of 'affinity' while satisfying the circular constraint.
|
||||||
|
|
||||||
|
INPUTS:
|
||||||
|
- affinity: affinity matrix between all the people in the different views
|
||||||
|
- cum_persons_per_view: cumulative number of persons per view
|
||||||
|
- circ_constraint: circular constraint matrix
|
||||||
|
- max_iter: maximum number of iterations
|
||||||
|
- w_rank: threshold for singular values
|
||||||
|
- tol: tolerance for convergence
|
||||||
|
- w_sparse: regularization parameter
|
||||||
|
|
||||||
|
OUTPUT:
|
||||||
|
- new_aff: low-rank approximation of the affinity matrix
|
||||||
|
'''
|
||||||
|
|
||||||
|
new_aff = affinity.copy()
|
||||||
|
N = new_aff.shape[0]
|
||||||
|
index_diag = np.arange(N)
|
||||||
|
new_aff[index_diag, index_diag] = 0.
|
||||||
|
# new_aff = (new_aff + new_aff.T)/2 # symmetric by construction
|
||||||
|
|
||||||
|
Y = np.zeros_like(new_aff) # Initial deviation matrix / residual ()
|
||||||
|
W = w_sparse - new_aff # Initial sparse matrix / regularization (prevent overfitting)
|
||||||
|
mu = 64 # initial step size
|
||||||
|
|
||||||
|
for iter in range(max_iter):
|
||||||
|
new_aff0 = new_aff.copy()
|
||||||
|
|
||||||
|
Q = new_aff + Y*1.0/mu
|
||||||
|
Q = SVT(Q,w_rank/mu)
|
||||||
|
new_aff = Q - (W + Y)/mu
|
||||||
|
|
||||||
|
# Project X onto dimGroups
|
||||||
|
for i in range(len(cum_persons_per_view) - 1):
|
||||||
|
ind1, ind2 = cum_persons_per_view[i], cum_persons_per_view[i + 1]
|
||||||
|
new_aff[ind1:ind2, ind1:ind2] = 0
|
||||||
|
|
||||||
|
# Reset diagonal elements to one and ensure X is within valid range [0, 1]
|
||||||
|
new_aff[index_diag, index_diag] = 1.
|
||||||
|
new_aff[new_aff < 0] = 0
|
||||||
|
new_aff[new_aff > 1] = 1
|
||||||
|
|
||||||
|
# Enforce circular constraint
|
||||||
|
new_aff = new_aff * circ_constraint
|
||||||
|
new_aff = (new_aff + new_aff.T) / 2 # kept just in case X loses its symmetry during optimization
|
||||||
|
Y = Y + mu * (new_aff - Q)
|
||||||
|
|
||||||
|
# Compute convergence criteria: break if new_aff is close enough to Q and no evolution anymore
|
||||||
|
pRes = np.linalg.norm(new_aff - Q) / N # primal residual (diff between new_aff and SVT result)
|
||||||
|
dRes = mu * np.linalg.norm(new_aff - new_aff0) / N # dual residual (diff between new_aff and previous new_aff)
|
||||||
|
if pRes < tol and dRes < tol:
|
||||||
|
break
|
||||||
|
if pRes > 10 * dRes: mu = 2 * mu
|
||||||
|
elif dRes > 10 * pRes: mu = mu / 2
|
||||||
|
|
||||||
|
iter +=1
|
||||||
|
|
||||||
|
return new_aff
|
||||||
|
|
||||||
|
|
||||||
|
def person_index_per_cam(affinity, cum_persons_per_view, min_cameras_for_triangulation):
|
||||||
|
'''
|
||||||
|
For each detected person, gives their index for each camera
|
||||||
|
|
||||||
|
INPUTS:
|
||||||
|
- affinity: affinity matrix between all the people in the different views
|
||||||
|
- min_cameras_for_triangulation: exclude proposals if less than N cameras see them
|
||||||
|
|
||||||
|
OUTPUT:
|
||||||
|
- proposals: 2D array: n_persons * n_cams
|
||||||
|
'''
|
||||||
|
|
||||||
|
# index of the max affinity for each group (-1 if no detection)
|
||||||
|
proposals = []
|
||||||
|
for row in range(affinity.shape[0]):
|
||||||
|
proposal_row = []
|
||||||
|
for cam in range(len(cum_persons_per_view)-1):
|
||||||
|
id_persons_per_view = affinity[row, cum_persons_per_view[cam]:cum_persons_per_view[cam+1]]
|
||||||
|
proposal_row += [np.argmax(id_persons_per_view) if (len(id_persons_per_view)>0 and max(id_persons_per_view)>0) else -1]
|
||||||
|
proposals.append(proposal_row)
|
||||||
|
proposals = np.array(proposals, dtype=float)
|
||||||
|
|
||||||
|
# remove duplicates and order
|
||||||
|
proposals, nb_detections = np.unique(proposals, axis=0, return_counts=True)
|
||||||
|
proposals = proposals[np.argsort(nb_detections)[::-1]]
|
||||||
|
|
||||||
|
# remove row if any value is the same in previous rows at same index (nan!=nan so nan ignored)
|
||||||
|
proposals[proposals==-1] = np.nan
|
||||||
|
mask = np.ones(proposals.shape[0], dtype=bool)
|
||||||
|
for i in range(1, len(proposals)):
|
||||||
|
mask[i] = ~np.any(proposals[i] == proposals[:i], axis=0).any()
|
||||||
|
proposals = proposals[mask]
|
||||||
|
|
||||||
|
# remove identifications if less than N cameras see them
|
||||||
|
nb_cams_per_person = [np.count_nonzero(~np.isnan(p)) for p in proposals]
|
||||||
|
proposals = np.array([p for (n,p) in zip(nb_cams_per_person, proposals) if n >= min_cameras_for_triangulation])
|
||||||
|
|
||||||
|
return proposals
|
||||||
|
|
||||||
|
|
||||||
|
def rewrite_json_files(json_tracked_files_f, json_files_f, proposals, n_cams):
|
||||||
|
'''
|
||||||
|
Write new json files with correct association of people across cameras.
|
||||||
|
|
||||||
|
INPUTS:
|
||||||
|
- json_tracked_files_f: list of strings: json files to write
|
||||||
|
- json_files_f: list of strings: json files to read
|
||||||
|
- proposals: 2D array: n_persons * n_cams
|
||||||
|
- n_cams: int: number of cameras
|
||||||
|
|
||||||
|
OUTPUT:
|
||||||
|
- json files with correct association of people across cameras
|
||||||
|
'''
|
||||||
|
|
||||||
|
for cam in range(n_cams):
|
||||||
|
with open(json_tracked_files_f[cam], 'w') as json_tracked_f:
|
||||||
|
with open(json_files_f[cam], 'r') as json_f:
|
||||||
|
js = json.load(json_f)
|
||||||
|
js_new = js.copy()
|
||||||
|
js_new['people'] = []
|
||||||
|
for new_comb in proposals:
|
||||||
|
if not np.isnan(new_comb[cam]):
|
||||||
|
js_new['people'] += [js['people'][int(new_comb[cam])]]
|
||||||
|
else:
|
||||||
|
js_new['people'] += [{}]
|
||||||
|
json_tracked_f.write(json.dumps(js_new))
|
||||||
|
|
||||||
|
|
||||||
|
def recap_tracking(config, error=0, nb_cams_excluded=0):
|
||||||
'''
|
'''
|
||||||
Print a message giving statistics on reprojection errors (in pixel and in m)
|
Print a message giving statistics on reprojection errors (in pixel and in m)
|
||||||
as well as the number of cameras that had to be excluded to reach threshold
|
as well as the number of cameras that had to be excluded to reach threshold
|
||||||
@ -352,27 +556,39 @@ def recap_tracking(config, error, nb_cams_excluded):
|
|||||||
# Read config
|
# Read config
|
||||||
project_dir = config.get('project').get('project_dir')
|
project_dir = config.get('project').get('project_dir')
|
||||||
session_dir = os.path.realpath(os.path.join(project_dir, '..', '..'))
|
session_dir = os.path.realpath(os.path.join(project_dir, '..', '..'))
|
||||||
tracked_keypoint = config.get('personAssociation').get('tracked_keypoint')
|
multi_person = config.get('project').get('multi_person')
|
||||||
error_threshold_tracking = config.get('personAssociation').get('reproj_error_threshold_association')
|
likelihood_threshold_association = config.get('personAssociation').get('likelihood_threshold_association')
|
||||||
|
tracked_keypoint = config.get('personAssociation').get('single_person').get('tracked_keypoint')
|
||||||
|
error_threshold_tracking = config.get('personAssociation').get('single_person').get('reproj_error_threshold_association')
|
||||||
|
reconstruction_error_threshold = config.get('personAssociation').get('multi_person').get('reconstruction_error_threshold')
|
||||||
|
min_affinity = config.get('personAssociation').get('multi_person').get('min_affinity')
|
||||||
poseTracked_dir = os.path.join(project_dir, 'pose-associated')
|
poseTracked_dir = os.path.join(project_dir, 'pose-associated')
|
||||||
calib_dir = [os.path.join(session_dir, c) for c in os.listdir(session_dir) if 'calib' in c.lower()][0]
|
calib_dir = [os.path.join(session_dir, c) for c in os.listdir(session_dir) if 'calib' in c.lower()][0]
|
||||||
calib_file = glob.glob(os.path.join(calib_dir, '*.toml'))[0] # lastly created calibration file
|
calib_file = glob.glob(os.path.join(calib_dir, '*.toml'))[0] # lastly created calibration file
|
||||||
|
|
||||||
# Error
|
if not multi_person:
|
||||||
mean_error_px = np.around(np.mean(error), decimals=1)
|
logging.info('\nSingle-person analysis selected.')
|
||||||
|
# Error
|
||||||
calib = toml.load(calib_file)
|
mean_error_px = np.around(np.mean(error), decimals=1)
|
||||||
calib_cam1 = calib[list(calib.keys())[0]]
|
|
||||||
fm = calib_cam1['matrix'][0][0]
|
calib = toml.load(calib_file)
|
||||||
Dm = euclidean_distance(calib_cam1['translation'], [0,0,0])
|
calib_cam1 = calib[list(calib.keys())[0]]
|
||||||
mean_error_mm = np.around(mean_error_px * Dm / fm * 1000, decimals=1)
|
fm = calib_cam1['matrix'][0][0]
|
||||||
|
Dm = euclidean_distance(calib_cam1['translation'], [0,0,0])
|
||||||
# Excluded cameras
|
mean_error_mm = np.around(mean_error_px * Dm / fm * 1000, decimals=1)
|
||||||
mean_cam_off_count = np.around(np.mean(nb_cams_excluded), decimals=2)
|
|
||||||
|
# Excluded cameras
|
||||||
|
mean_cam_off_count = np.around(np.mean(nb_cams_excluded), decimals=2)
|
||||||
|
|
||||||
|
# Recap
|
||||||
|
logging.info(f'\n--> Mean reprojection error for {tracked_keypoint} point on all frames is {mean_error_px} px, which roughly corresponds to {mean_error_mm} mm. ')
|
||||||
|
logging.info(f'--> In average, {mean_cam_off_count} cameras had to be excluded to reach the demanded {error_threshold_tracking} px error threshold after excluding points with likelihood below {likelihood_threshold_association}.')
|
||||||
|
|
||||||
|
else:
|
||||||
|
logging.info('\nMulti-person analysis selected.')
|
||||||
|
logging.info(f'\n--> A person was reconstructed if the lines from cameras to their keypoints intersected within {reconstruction_error_threshold} m and if the calculated affinity stayed below {min_affinity} after excluding points with likelihood below {likelihood_threshold_association}.')
|
||||||
|
logging.info(f'--> Beware that people were sorted across cameras, but not across frames. This will be done in the triangulation stage.')
|
||||||
|
|
||||||
# Recap
|
|
||||||
logging.info(f'\n--> Mean reprojection error for {tracked_keypoint} point on all frames is {mean_error_px} px, which roughly corresponds to {mean_error_mm} mm. ')
|
|
||||||
logging.info(f'--> In average, {mean_cam_off_count} cameras had to be excluded to reach the demanded {error_threshold_tracking} px error threshold.')
|
|
||||||
logging.info(f'\nTracked json files are stored in {os.path.realpath(poseTracked_dir)}.')
|
logging.info(f'\nTracked json files are stored in {os.path.realpath(poseTracked_dir)}.')
|
||||||
|
|
||||||
|
|
||||||
@ -401,7 +617,11 @@ def track_2d_all(config):
|
|||||||
session_dir = os.path.realpath(os.path.join(project_dir, '..', '..'))
|
session_dir = os.path.realpath(os.path.join(project_dir, '..', '..'))
|
||||||
multi_person = config.get('project').get('multi_person')
|
multi_person = config.get('project').get('multi_person')
|
||||||
pose_model = config.get('pose').get('pose_model')
|
pose_model = config.get('pose').get('pose_model')
|
||||||
tracked_keypoint = config.get('personAssociation').get('tracked_keypoint')
|
tracked_keypoint = config.get('personAssociation').get('single_person').get('tracked_keypoint')
|
||||||
|
likelihood_threshold = config.get('personAssociation').get('likelihood_threshold_association')
|
||||||
|
min_cameras_for_triangulation = config.get('triangulation').get('min_cameras_for_triangulation')
|
||||||
|
reconstruction_error_threshold = config.get('personAssociation').get('multi_person').get('reconstruction_error_threshold')
|
||||||
|
min_affinity = config.get('personAssociation').get('multi_person').get('min_affinity')
|
||||||
frame_range = config.get('project').get('frame_range')
|
frame_range = config.get('project').get('frame_range')
|
||||||
undistort_points = config.get('triangulation').get('undistort_points')
|
undistort_points = config.get('triangulation').get('undistort_points')
|
||||||
|
|
||||||
@ -414,12 +634,12 @@ def track_2d_all(config):
|
|||||||
poseTracked_dir = os.path.join(project_dir, 'pose-associated')
|
poseTracked_dir = os.path.join(project_dir, 'pose-associated')
|
||||||
|
|
||||||
if multi_person:
|
if multi_person:
|
||||||
logging.info('\nMulti-person analysis selected. Note that you can set this option to false for faster runtime if you only need the main person in the scene.')
|
logging.info('\nMulti-person analysis selected. Note that you can set this option to false if you only need the main person in the scene.')
|
||||||
else:
|
else:
|
||||||
logging.info('\nSingle-person analysis selected.')
|
logging.info('\nSingle-person analysis selected.')
|
||||||
|
|
||||||
# projection matrix from toml calibration file
|
# projection matrix from toml calibration file
|
||||||
P = computeP(calib_file, undistort=undistort_points)
|
P_all = computeP(calib_file, undistort=undistort_points)
|
||||||
calib_params = retrieve_calib_params(calib_file)
|
calib_params = retrieve_calib_params(calib_file)
|
||||||
|
|
||||||
# selection of tracked keypoint id
|
# selection of tracked keypoint id
|
||||||
@ -448,15 +668,14 @@ def track_2d_all(config):
|
|||||||
except: pass
|
except: pass
|
||||||
json_tracked_files = [[os.path.join(poseTracked_dir, j_dir, j_file) for j_file in json_files_names[j]] for j, j_dir in enumerate(json_dirs_names)]
|
json_tracked_files = [[os.path.join(poseTracked_dir, j_dir, j_file) for j_file in json_files_names[j]] for j, j_dir in enumerate(json_dirs_names)]
|
||||||
|
|
||||||
# person's tracking
|
|
||||||
f_range = [[min([len(j) for j in json_files])] if frame_range==[] else frame_range][0]
|
f_range = [[min([len(j) for j in json_files])] if frame_range==[] else frame_range][0]
|
||||||
n_cams = len(json_dirs_names)
|
n_cams = len(json_dirs_names)
|
||||||
error_min_tot, cameras_off_tot = [], []
|
error_min_tot, cameras_off_tot = [], []
|
||||||
|
|
||||||
# Check that camera number is consistent between calibration file and pose folders
|
# Check that camera number is consistent between calibration file and pose folders
|
||||||
if n_cams != len(P):
|
if n_cams != len(P_all):
|
||||||
raise Exception(f'Error: The number of cameras is not consistent:\
|
raise Exception(f'Error: The number of cameras is not consistent:\
|
||||||
Found {len(P)} cameras in the calibration file,\
|
Found {len(P_all)} cameras in the calibration file,\
|
||||||
and {n_cams} cameras based on the number of pose folders.')
|
and {n_cams} cameras based on the number of pose folders.')
|
||||||
|
|
||||||
Q_kpt = [np.array([0., 0., 0., 1.])]
|
Q_kpt = [np.array([0., 0., 0., 1.])]
|
||||||
@ -464,35 +683,40 @@ def track_2d_all(config):
|
|||||||
# print(f'\nFrame {f}:')
|
# print(f'\nFrame {f}:')
|
||||||
json_files_f = [json_files[c][f] for c in range(n_cams)]
|
json_files_f = [json_files[c][f] for c in range(n_cams)]
|
||||||
json_tracked_files_f = [json_tracked_files[c][f] for c in range(n_cams)]
|
json_tracked_files_f = [json_tracked_files[c][f] for c in range(n_cams)]
|
||||||
|
|
||||||
# all possible combinations of persons
|
|
||||||
personsIDs_comb = persons_combinations(json_files_f)
|
|
||||||
|
|
||||||
# choose persons of interest and exclude cameras with bad pose estimation
|
|
||||||
Q_kpt_old = Q_kpt
|
Q_kpt_old = Q_kpt
|
||||||
errors_below_thresh, comb_errors_below_thresh, Q_kpt = best_persons_and_cameras_combination(config, json_files_f, personsIDs_comb, P, tracked_keypoint_id, calib_params)
|
|
||||||
|
if not multi_person:
|
||||||
# reID persons across frames by checking the distance from one frame to another
|
# all possible combinations of persons
|
||||||
Q_kpt, personsIDs_sorted = sort_people(Q_kpt_old, Q_kpt)
|
personsIDs_comb = persons_combinations(json_files_f)
|
||||||
errors_below_thresh = np.array(errors_below_thresh)[personsIDs_sorted]
|
|
||||||
comb_errors_below_thresh = np.array(comb_errors_below_thresh)[personsIDs_sorted]
|
# choose persons of interest and exclude cameras with bad pose estimation
|
||||||
|
error_proposals, proposals, Q_kpt = best_persons_and_cameras_combination(config, json_files_f, personsIDs_comb, P_all, tracked_keypoint_id, calib_params)
|
||||||
|
|
||||||
|
error_min_tot.append(np.mean(error_proposals))
|
||||||
|
cameras_off_count = np.count_nonzero([np.isnan(comb) for comb in proposals]) / len(proposals)
|
||||||
|
cameras_off_tot.append(cameras_off_count)
|
||||||
|
|
||||||
|
else:
|
||||||
|
# read data
|
||||||
|
all_json_data_f = []
|
||||||
|
for js_file in json_files_f:
|
||||||
|
all_json_data_f.append(read_json(js_file))
|
||||||
|
#TODO: remove people with average likelihood < 0.3, no full torso, less than 12 joints... (cf filter2d in dataset/base.py L498)
|
||||||
|
|
||||||
|
# obtain proposals after computing affinity between all the people in the different views
|
||||||
|
persons_per_view = [0] + [len(j) for j in all_json_data_f]
|
||||||
|
cum_persons_per_view = np.cumsum(persons_per_view)
|
||||||
|
affinity = compute_affinity(all_json_data_f, calib_params, cum_persons_per_view, reconstruction_error_threshold=reconstruction_error_threshold)
|
||||||
|
circ_constraint = circular_constraint(cum_persons_per_view)
|
||||||
|
affinity = affinity * circ_constraint
|
||||||
|
#TODO: affinity without hand, face, feet (cf ray.py L31)
|
||||||
|
affinity = matchSVT(affinity, cum_persons_per_view, circ_constraint, max_iter = 20, w_rank = 50, tol = 1e-4, w_sparse=0.1)
|
||||||
|
affinity[affinity<min_affinity] = 0
|
||||||
|
proposals = person_index_per_cam(affinity, cum_persons_per_view, min_cameras_for_triangulation)
|
||||||
|
|
||||||
# rewrite json files with a single or multiple persons of interest
|
# rewrite json files with a single or multiple persons of interest
|
||||||
error_min_tot.append(np.mean(errors_below_thresh))
|
rewrite_json_files(json_tracked_files_f, json_files_f, proposals, n_cams)
|
||||||
cameras_off_count = np.count_nonzero([np.isnan(comb) for comb in comb_errors_below_thresh]) / len(comb_errors_below_thresh)
|
|
||||||
cameras_off_tot.append(cameras_off_count)
|
|
||||||
for cam in range(n_cams):
|
|
||||||
with open(json_tracked_files_f[cam], 'w') as json_tracked_f:
|
|
||||||
with open(json_files_f[cam], 'r') as json_f:
|
|
||||||
js = json.load(json_f)
|
|
||||||
js_new = js.copy()
|
|
||||||
js_new['people'] = []
|
|
||||||
for new_comb in comb_errors_below_thresh:
|
|
||||||
if not np.isnan(new_comb[cam]):
|
|
||||||
js_new['people'] += [js['people'][int(new_comb[cam])]]
|
|
||||||
else:
|
|
||||||
js_new['people'] += [{}]
|
|
||||||
json_tracked_f.write(json.dumps(js_new))
|
|
||||||
|
|
||||||
# recap message
|
# recap message
|
||||||
recap_tracking(config, error_min_tot, cameras_off_tot)
|
recap_tracking(config, error_min_tot, cameras_off_tot)
|
||||||
|
@ -1,18 +1,10 @@
|
|||||||
import numpy as np
|
#!/usr/bin/env python
|
||||||
import pandas as pd
|
# -*- coding: utf-8 -*-
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
from scipy import signal
|
|
||||||
from scipy import interpolate
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
import fnmatch
|
|
||||||
import pickle as pk
|
|
||||||
import re
|
|
||||||
|
|
||||||
|
|
||||||
'''
|
'''
|
||||||
#########################################
|
#########################################
|
||||||
## Synchronize cameras ##
|
## SYNCHRONIZE CAMERAS ##
|
||||||
#########################################
|
#########################################
|
||||||
|
|
||||||
Steps undergone in this script
|
Steps undergone in this script
|
||||||
@ -25,64 +17,90 @@ import re
|
|||||||
'''
|
'''
|
||||||
|
|
||||||
|
|
||||||
############
|
## INIT
|
||||||
# FUNCTIONS#
|
import numpy as np
|
||||||
############
|
import pandas as pd
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from scipy import signal
|
||||||
|
from scipy import interpolate
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import fnmatch
|
||||||
|
import pickle as pk
|
||||||
|
import re
|
||||||
|
|
||||||
def convert_json2csv(json_dir):
|
from Pose2Sim.filtering import loess_filter_1d
|
||||||
"""
|
|
||||||
|
|
||||||
|
## AUTHORSHIP INFORMATION
|
||||||
|
__author__ = "HunMin Kim, David Pagnon"
|
||||||
|
__copyright__ = "Copyright 2021, Pose2Sim"
|
||||||
|
__credits__ = ["David Pagnon"]
|
||||||
|
__license__ = "BSD 3-Clause License"
|
||||||
|
__version__ = '0.7'
|
||||||
|
__maintainer__ = "David Pagnon"
|
||||||
|
__email__ = "contact@david-pagnon.com"
|
||||||
|
__status__ = "Development"
|
||||||
|
|
||||||
|
|
||||||
|
# FUNCTIONS
|
||||||
|
def convert_json2pandas(json_dir):
|
||||||
|
'''
|
||||||
Convert JSON files in a directory to a pandas DataFrame.
|
Convert JSON files in a directory to a pandas DataFrame.
|
||||||
|
|
||||||
Args:
|
INPUTS:
|
||||||
json_dir (str): The directory path containing the JSON files.
|
- json_dir: str. The directory path containing the JSON files.
|
||||||
|
|
||||||
|
OUTPUT:
|
||||||
|
- df_json_coords: dataframe. Extracted coordinates in a pandas dataframe.
|
||||||
|
'''
|
||||||
|
|
||||||
Returns:
|
|
||||||
pandas.DataFrame: A DataFrame containing the coordinates extracted from the JSON files.
|
|
||||||
"""
|
|
||||||
json_files_names = fnmatch.filter(os.listdir(os.path.join(json_dir)), '*.json') # modified ( 'json' to '*.json' )
|
json_files_names = fnmatch.filter(os.listdir(os.path.join(json_dir)), '*.json') # modified ( 'json' to '*.json' )
|
||||||
json_files_names.sort(key=lambda name: int(re.search(r'(\d+)_keypoints\.json', name).group(1)))
|
json_files_names.sort(key=lambda name: int(re.search(r'(\d+)\.json', name).group(1)))
|
||||||
json_files_path = [os.path.join(json_dir, j_f) for j_f in json_files_names]
|
json_files_path = [os.path.join(json_dir, j_f) for j_f in json_files_names]
|
||||||
json_coords = []
|
json_coords = []
|
||||||
for i, j_p in enumerate(json_files_path):
|
for i, j_p in enumerate(json_files_path):
|
||||||
# if i in range(frames)
|
with open(j_p) as j_f:
|
||||||
with open(j_p) as j_f:
|
try:
|
||||||
try:
|
json_data = json.load(j_f)['people'][0]['pose_keypoints_2d']
|
||||||
json_data = json.load(j_f)['people'][0]['pose_keypoints_2d']
|
except:
|
||||||
except:
|
print(f'No person found in {os.path.basename(json_dir)}, frame {i}')
|
||||||
print(f'No person found in {os.path.basename(json_dir)}, frame {i}')
|
json_data = [0]*75
|
||||||
json_data = [0]*75
|
json_coords.append(json_data)
|
||||||
json_coords.append(json_data)
|
|
||||||
df_json_coords = pd.DataFrame(json_coords)
|
df_json_coords = pd.DataFrame(json_coords)
|
||||||
return df_json_coords
|
return df_json_coords
|
||||||
|
|
||||||
|
|
||||||
def drop_col(df, col_nb):
|
def drop_col(df, col_nb):
|
||||||
"""
|
'''
|
||||||
Drops every nth column from a DataFrame.
|
Drops every nth column from a DataFrame.
|
||||||
|
|
||||||
Parameters:
|
INPUTS:
|
||||||
df (pandas.DataFrame): The DataFrame from which columns will be dropped.
|
- df: dataframe. The DataFrame from which columns will be dropped.
|
||||||
col_nb (int): The column number to drop.
|
- col_nb: int. The column number to drop.
|
||||||
|
|
||||||
Returns:
|
OUTPUT:
|
||||||
pandas.DataFrame: The DataFrame with dropped columns.
|
- dataframe: DataFrame with dropped columns.
|
||||||
"""
|
'''
|
||||||
|
|
||||||
idx_col = list(range(col_nb-1, df.shape[1], col_nb))
|
idx_col = list(range(col_nb-1, df.shape[1], col_nb))
|
||||||
df_dropped = df.drop(idx_col, axis=1)
|
df_dropped = df.drop(idx_col, axis=1)
|
||||||
df_dropped.columns = range(df_dropped.columns.size)
|
df_dropped.columns = range(df_dropped.columns.size)
|
||||||
return df_dropped
|
return df_dropped
|
||||||
|
|
||||||
|
|
||||||
def speed_vert(df, axis='y'):
|
def speed_vert(df, axis='y'):
|
||||||
"""
|
'''
|
||||||
Calculate the vertical speed of a DataFrame along a specified axis.
|
Calculate the vertical speed of a DataFrame along a specified axis.
|
||||||
|
|
||||||
Parameters:
|
Parameters:
|
||||||
df (DataFrame): The input DataFrame.
|
- df: dataframe. DataFrame of 2D coordinates.
|
||||||
axis (str): The axis along which to calculate the speed. Default is 'y'.
|
- axis (str): The axis along which to calculate the speed. Default is 'y'.
|
||||||
|
|
||||||
|
OUTPUT:
|
||||||
|
- DataFrame: The DataFrame containing the vertical speed values.
|
||||||
|
'''
|
||||||
|
|
||||||
Returns:
|
|
||||||
DataFrame: The DataFrame containing the vertical speed values.
|
|
||||||
"""
|
|
||||||
axis_dict = {'x':0, 'y':1, 'z':2}
|
axis_dict = {'x':0, 'y':1, 'z':2}
|
||||||
df_diff = df.diff()
|
df_diff = df.diff()
|
||||||
df_diff = df_diff.fillna(df_diff.iloc[1]*2)
|
df_diff = df_diff.fillna(df_diff.iloc[1]*2)
|
||||||
@ -91,45 +109,58 @@ def speed_vert(df, axis='y'):
|
|||||||
return df_vert_speed
|
return df_vert_speed
|
||||||
|
|
||||||
|
|
||||||
def interpolate_nans(col, kind):
|
def speed_2D(df):
|
||||||
|
'''
|
||||||
|
Calculate the 2D speed of a DataFrame.
|
||||||
|
|
||||||
|
INPUTS:
|
||||||
|
- df: dataframe. DataFrame of 2D coordinates.
|
||||||
|
|
||||||
|
OUTPUT:
|
||||||
|
- DataFrame: The DataFrame containing the 2D speed values.
|
||||||
|
'''
|
||||||
|
|
||||||
|
df_diff = df.diff()
|
||||||
|
df_diff = df_diff.fillna(df_diff.iloc[1]*2)
|
||||||
|
df_2Dspeed = pd.DataFrame([np.sqrt(df_diff.loc[:,2*k]*2 + df_diff.loc[:,2*k+1]*2) for k in range(int(df_diff.shape[1]*2))]).T
|
||||||
|
return df_2Dspeed
|
||||||
|
|
||||||
|
|
||||||
|
def interpolate_zeros_nans(col, kind):
|
||||||
'''
|
'''
|
||||||
Interpolate missing points (of value nan)
|
Interpolate missing points (of value nan)
|
||||||
|
|
||||||
INPUTS
|
INPUTS
|
||||||
- col pandas column of coordinates
|
- col: pandas column of coordinates
|
||||||
- kind 'linear', 'slinear', 'quadratic', 'cubic'. Default 'cubic'
|
- kind: 'linear', 'slinear', 'quadratic', 'cubic'. Default 'cubic'
|
||||||
|
|
||||||
OUTPUT
|
OUTPUT
|
||||||
- col_interp interpolated pandas column
|
- col_interp: interpolated pandas column
|
||||||
'''
|
'''
|
||||||
|
|
||||||
idx = col.index
|
mask = ~(np.isnan(col) | col.eq(0)) # true where nans or zeros
|
||||||
idx_good = np.where(np.isfinite(col))[0] #index of non zeros
|
idx_good = np.where(mask)[0]
|
||||||
if len(idx_good) == 10: return col
|
try:
|
||||||
# idx_notgood = np.delete(np.arange(len(col)), idx_good)
|
f_interp = interpolate.interp1d(idx_good, col[idx_good], kind=kind, bounds_error=False)
|
||||||
|
col_interp = np.where(mask, col, f_interp(col.index))
|
||||||
if not kind: # 'linear', 'slinear', 'quadratic', 'cubic'
|
return col_interp
|
||||||
f_interp = interpolate.interp1d(idx_good, col[idx_good], kind='cubic', bounds_error=False)
|
except:
|
||||||
else:
|
print('No good values to interpolate')
|
||||||
f_interp = interpolate.interp1d(idx_good, col[idx_good], kind=kind, bounds_error=False) # modified
|
return col
|
||||||
col_interp = np.where(np.isfinite(col), col, f_interp(idx)) #replace nans with interpolated values
|
|
||||||
col_interp = np.where(np.isfinite(col_interp), col_interp, np.nanmean(col_interp)) #replace remaining nans
|
|
||||||
|
|
||||||
return col_interp #, idx_notgood
|
|
||||||
|
|
||||||
|
|
||||||
def find_highest_wrist_position(df_coords, wrist_index):
|
def find_highest_wrist_position(df_coords, wrist_index):
|
||||||
"""
|
'''
|
||||||
Find the frame with the highest wrist position in a list of coordinate DataFrames.
|
Find the frame with the highest wrist position in a list of coordinate DataFrames.
|
||||||
Highest wrist position frame use for finding the fastest frame.
|
Highest wrist position frame use for finding the fastest frame.
|
||||||
|
|
||||||
Args:
|
INPUT:
|
||||||
df_coords (list): List of coordinate DataFrames.
|
- df_coords (list): List of coordinate DataFrames.
|
||||||
wrist_index (int): The index of the wrist in the keypoint list.
|
- wrist_index (int): The index of the wrist in the keypoint list.
|
||||||
|
|
||||||
Returns:
|
OUTPUT:
|
||||||
list: The index of the frame with the highest wrist position.
|
- list: The index of the frame with the highest wrist position.
|
||||||
"""
|
'''
|
||||||
|
|
||||||
start_frames = []
|
start_frames = []
|
||||||
min_y_coords = []
|
min_y_coords = []
|
||||||
@ -149,20 +180,22 @@ def find_highest_wrist_position(df_coords, wrist_index):
|
|||||||
|
|
||||||
return start_frames, min_y_coords
|
return start_frames, min_y_coords
|
||||||
|
|
||||||
|
|
||||||
def find_motion_end(df_coords, wrist_index, start_frame, lowest_y, fps):
|
def find_motion_end(df_coords, wrist_index, start_frame, lowest_y, fps):
|
||||||
"""
|
'''
|
||||||
Find the frame where hands down movement ends.
|
Find the frame where hands down movement ends.
|
||||||
Hands down movement is defined as the time when the wrist moves down from the highest position.
|
Hands down movement is defined as the time when the wrist moves down from the highest position.
|
||||||
|
|
||||||
Args:
|
INPUT:
|
||||||
df_coord (DataFrame): The coordinate DataFrame of the reference camera.
|
- df_coord (DataFrame): The coordinate DataFrame of the reference camera.
|
||||||
wrist_index (int): The index of the wrist in the keypoint list.
|
- wrist_index (int): The index of the wrist in the keypoint list.
|
||||||
start_frame (int): The frame where the hands down movement starts.
|
- start_frame (int): The frame where the hands down movement starts.
|
||||||
fps (int): The frame rate of the cameras in Hz.
|
- fps (int): The frame rate of the cameras in Hz.
|
||||||
|
|
||||||
|
OUTPUT:
|
||||||
|
- int: The index of the frame where hands down movement ends.
|
||||||
|
'''
|
||||||
|
|
||||||
Returns:
|
|
||||||
int: The index of the frame where hands down movement ends.
|
|
||||||
"""
|
|
||||||
y_col_index = wrist_index * 2 + 1
|
y_col_index = wrist_index * 2 + 1
|
||||||
wrist_y_values = df_coords.iloc[:, y_col_index].values # wrist y-coordinates
|
wrist_y_values = df_coords.iloc[:, y_col_index].values # wrist y-coordinates
|
||||||
highest_y_value = lowest_y
|
highest_y_value = lowest_y
|
||||||
@ -181,20 +214,21 @@ def find_motion_end(df_coords, wrist_index, start_frame, lowest_y, fps):
|
|||||||
|
|
||||||
return time
|
return time
|
||||||
|
|
||||||
|
|
||||||
def find_fastest_frame(df_speed_list):
|
def find_fastest_frame(df_speed_list):
|
||||||
"""
|
'''
|
||||||
Find the frame with the highest speed in a list of speed DataFrames.
|
Find the frame with the highest speed in a list of speed DataFrames.
|
||||||
Fastest frame should locate in after highest wrist position frame.
|
Fastest frame should locate in after highest wrist position frame.
|
||||||
|
|
||||||
Args:
|
INPUT:
|
||||||
df_speed_list (list): List of speed DataFrames.
|
- df_speed_list (list): List of speed DataFrames.
|
||||||
df_speed (DataFrame): The speed DataFrame of the reference camera.
|
- df_speed (DataFrame): The speed DataFrame of the reference camera.
|
||||||
fps (int): The frame rate of the cameras in Hz.
|
- fps (int): The frame rate of the cameras in Hz.
|
||||||
lag_time (float): The time lag in seconds.
|
- lag_time (float): The time lag in seconds.
|
||||||
|
|
||||||
Returns:
|
OUTPUT:
|
||||||
int: The index of the frame with the highest speed.
|
- int: The index of the frame with the highest speed.
|
||||||
"""
|
'''
|
||||||
|
|
||||||
for speed_series in df_speed_list:
|
for speed_series in df_speed_list:
|
||||||
max_speed = speed_series.abs().max()
|
max_speed = speed_series.abs().max()
|
||||||
@ -205,32 +239,26 @@ def find_fastest_frame(df_speed_list):
|
|||||||
return max_speed_index, max_speed
|
return max_speed_index, max_speed
|
||||||
|
|
||||||
|
|
||||||
def plot_time_lagged_cross_corr(camx, camy, ax, fps, lag_time, camx_max_speed_index, camy_max_speed_index):
|
def plot_time_lagged_cross_corr(camx, camy, ax, fps, lag_time):
|
||||||
"""
|
'''
|
||||||
Calculate and plot the max correlation between two cameras with a time lag.
|
Calculate and plot the max correlation between two cameras with a time lag.
|
||||||
How it works:
|
How it works:
|
||||||
1. Reference camera is camx and the other is camy. (Reference camera should record last. If not, the offset will be positive.)
|
1. Reference camera is camx and the other is camy. (Reference camera should record last. If not, the offset will be positive.)
|
||||||
2. The initial shift alppied to camy to match camx is calculated.
|
2. The initial shift alppied to camy to match camx is calculated.
|
||||||
3. Additionally shift camy by max_lag frames to find the max correlation.
|
3. Additionally shift camy by max_lag frames to find the max correlation.
|
||||||
|
|
||||||
Args:
|
INPUT:
|
||||||
camx (pandas.Series): The speed series of the reference camera.
|
- camx: pd.Series. Speed series of the reference camera.
|
||||||
camy (pandas.Series): The speed series of the other camera.
|
- camy: pd.Series). Speed series of the other camera.
|
||||||
ax (matplotlib.axes.Axes): The axes to plot the correlation.
|
- ax: plt.axis. Plot correlation on second axis.
|
||||||
fps (int): The frame rate of the cameras in Hz.
|
- fps: int. Framerate of the cameras in Hz.
|
||||||
lag_time (float): The time lag in seconds.
|
- lag_time: float. Time lag in seconds.
|
||||||
camx_max_speed_index (int): The index of the frame with the highest speed in camx.
|
|
||||||
camy_max_speed_index (int): The index of the frame with the highest speed in camy.
|
|
||||||
|
|
||||||
Returns:
|
OUTPUT:
|
||||||
int: The offset value to apply to synchronize the cameras.
|
- offset: int. Offset value to apply to synchronize the cameras.
|
||||||
float: The maximum correlation value.
|
- max_corr: float. Maximum correlation value.
|
||||||
"""
|
'''
|
||||||
|
|
||||||
# Initial shift of camy to match camx
|
|
||||||
# initial_shift = -(camy_max_speed_index - camx_max_speed_index) + fps
|
|
||||||
# camy = camy.shift(initial_shift).dropna()
|
|
||||||
|
|
||||||
max_lag = int(fps * lag_time)
|
max_lag = int(fps * lag_time)
|
||||||
pearson_r = []
|
pearson_r = []
|
||||||
lags = range(-max_lag, 1)
|
lags = range(-max_lag, 1)
|
||||||
@ -238,7 +266,6 @@ def plot_time_lagged_cross_corr(camx, camy, ax, fps, lag_time, camx_max_speed_in
|
|||||||
for lag in lags:
|
for lag in lags:
|
||||||
if lag < 0:
|
if lag < 0:
|
||||||
shifted_camy = camy.shift(lag).dropna() # shift the camy segment by lag
|
shifted_camy = camy.shift(lag).dropna() # shift the camy segment by lag
|
||||||
|
|
||||||
corr = camx.corr(shifted_camy) # calculate the correlation between the camx segment and the shifted camy segment
|
corr = camx.corr(shifted_camy) # calculate the correlation between the camx segment and the shifted camy segment
|
||||||
elif lag == 0:
|
elif lag == 0:
|
||||||
corr = camx.corr(camy)
|
corr = camx.corr(camy)
|
||||||
@ -265,19 +292,19 @@ def plot_time_lagged_cross_corr(camx, camy, ax, fps, lag_time, camx_max_speed_in
|
|||||||
|
|
||||||
|
|
||||||
def apply_offset(offset, json_dirs, reset_sync, cam1_nb, cam2_nb):
|
def apply_offset(offset, json_dirs, reset_sync, cam1_nb, cam2_nb):
|
||||||
"""
|
'''
|
||||||
Apply the offset to synchronize the cameras.
|
Apply the offset to synchronize the cameras.
|
||||||
Offset is always applied to the second camera.
|
Offset is always applied to the second camera.
|
||||||
Offset would be always negative if the first camera is the last to start recording.
|
Offset would be always negative if the first camera is the last to start recording.
|
||||||
Delete the camy json files from initial frame to offset frame.
|
Delete the camy json files from initial frame to offset frame.
|
||||||
|
|
||||||
Args:
|
INPUT:
|
||||||
offset (int): The offset value to apply to synchronize the cameras.
|
- offset (int): The offset value to apply to synchronize the cameras.
|
||||||
json_dirs (list): List of directories containing the JSON files for each camera.
|
- json_dirs (list): List of directories containing the JSON files for each camera.
|
||||||
reset_sync (bool): Whether to reset the synchronization by deleting the .del files.
|
- reset_sync (bool): Whether to reset the synchronization by deleting the .del files.
|
||||||
cam1_nb (int): The number of the reference camera.
|
- cam1_nb (int): The number of the reference camera.
|
||||||
cam2_nb (int): The number of the other camera.
|
- cam2_nb (int): The number of the other camera.
|
||||||
"""
|
'''
|
||||||
|
|
||||||
if offset == 0:
|
if offset == 0:
|
||||||
print(f"Cams {cam1_nb} and {cam2_nb} are already synchronized. No offset applied.")
|
print(f"Cams {cam1_nb} and {cam2_nb} are already synchronized. No offset applied.")
|
||||||
@ -300,59 +327,60 @@ def apply_offset(offset, json_dirs, reset_sync, cam1_nb, cam2_nb):
|
|||||||
os.rename(os.path.join(json_dir_to_offset, json_files[i]), os.path.join(json_dir_to_offset, json_files[i] + '.del'))
|
os.rename(os.path.join(json_dir_to_offset, json_files[i]), os.path.join(json_dir_to_offset, json_files[i] + '.del'))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#################
|
|
||||||
# Main Function #
|
|
||||||
#################
|
|
||||||
|
|
||||||
def synchronize_cams_all(config_dict):
|
def synchronize_cams_all(config_dict):
|
||||||
|
'''
|
||||||
#############
|
|
||||||
# CONSTANTS #
|
'''
|
||||||
#############
|
|
||||||
|
|
||||||
# get parameters from Config.toml
|
# get parameters from Config.toml
|
||||||
project_dir = config_dict.get('project').get('project_dir')
|
project_dir = config_dict.get('project').get('project_dir')
|
||||||
pose_dir = os.path.realpath(os.path.join(project_dir, 'pose'))
|
pose_dir = os.path.realpath(os.path.join(project_dir, 'pose'))
|
||||||
fps = config_dict.get('project').get('frame_rate') # frame rate of the cameras (Hz)
|
fps = config_dict.get('project').get('frame_rate') # frame rate of the cameras (Hz)
|
||||||
reset_sync = config_dict.get('synchronization').get('reset_sync') # Start synchronization over each time it is run
|
reset_sync = config_dict.get('synchronization').get('reset_sync') # Start synchronization over each time it is run
|
||||||
|
id_kpt = 4
|
||||||
|
weights_kpt = 1
|
||||||
|
filter_order = 4
|
||||||
|
filter_cutoff = 6
|
||||||
|
vmax = 20 # px/s
|
||||||
|
|
||||||
# Vertical speeds (on 'Y')
|
# List json files
|
||||||
speed_kind = config_dict.get('synchronization').get('speed_kind') # this maybe fixed in the future
|
|
||||||
id_kpt = config_dict.get('synchronization').get('id_kpt') # get the numbers from the keypoint names in skeleton.py: 'RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7
|
|
||||||
weights_kpt = config_dict.get('synchronization').get('weights_kpt') # only considered if there are multiple keypoints.
|
|
||||||
|
|
||||||
######################################
|
|
||||||
# 0. CONVERTING JSON FILES TO PANDAS #
|
|
||||||
######################################
|
|
||||||
|
|
||||||
# Also filter, and then save the filtered data
|
|
||||||
pose_listdirs_names = next(os.walk(pose_dir))[1]
|
pose_listdirs_names = next(os.walk(pose_dir))[1]
|
||||||
pose_listdirs_names.sort(key=lambda name: int(re.search(r'(\d+)', name).group(1)))
|
pose_listdirs_names.sort(key=lambda name: int(re.search(r'(\d+)', name).group(1)))
|
||||||
json_dirs_names = [k for k in pose_listdirs_names if 'json' in k]
|
json_dirs_names = [k for k in pose_listdirs_names if 'json' in k]
|
||||||
json_dirs = [os.path.join(pose_dir, j_d) for j_d in json_dirs_names] # list of json directories in pose_dir
|
json_dirs = [os.path.join(pose_dir, j_d) for j_d in json_dirs_names] # list of json directories in pose_dir
|
||||||
|
cam_nb = len(json_dirs)
|
||||||
|
|
||||||
# keypoints coordinates
|
# Extract, interpolate, and filter keypoint coordinates
|
||||||
df_coords = []
|
df_coords = []
|
||||||
|
b, a = signal.butter(filter_order/2, filter_cutoff/(fps/2), 'low', analog = False)
|
||||||
for i, json_dir in enumerate(json_dirs):
|
for i, json_dir in enumerate(json_dirs):
|
||||||
df_coords.append(convert_json2csv(json_dir))
|
df_coords.append(convert_json2pandas(json_dir))
|
||||||
df_coords[i] = drop_col(df_coords[i],3) # drop likelihood
|
df_coords[i] = drop_col(df_coords[i],3) # drop likelihood
|
||||||
|
df_coords[i] = df_coords[i].apply(interpolate_zeros_nans, axis=0, args = ['cubic'])
|
||||||
|
df_coords[i] = df_coords[i].apply(loess_filter_1d, axis=0, args = [30])
|
||||||
|
df_coords[i] = pd.DataFrame(signal.filtfilt(b, a, df_coords[i], axis=0))
|
||||||
|
|
||||||
## To save it and reopen it if needed
|
|
||||||
|
|
||||||
|
|
||||||
|
# Save keypoint coordinates to pickle
|
||||||
with open(os.path.join(pose_dir, 'coords'), 'wb') as fp:
|
with open(os.path.join(pose_dir, 'coords'), 'wb') as fp:
|
||||||
pk.dump(df_coords, fp)
|
pk.dump(df_coords, fp)
|
||||||
with open(os.path.join(pose_dir, 'coords'), 'rb') as fp:
|
# with open(os.path.join(pose_dir, 'coords'), 'rb') as fp:
|
||||||
df_coords = pk.load(fp)
|
# df_coords = pk.load(fp)
|
||||||
|
|
||||||
#############################
|
# Compute vertical speed
|
||||||
# 1. COMPUTING SPEEDS #
|
|
||||||
#############################
|
|
||||||
|
|
||||||
# Vitesse verticale
|
|
||||||
df_speed = []
|
df_speed = []
|
||||||
for i in range(len(json_dirs)):
|
for i in range(cam_nb):
|
||||||
if speed_kind == 'y':
|
df_speed.append(speed_vert(df_coords[i]))
|
||||||
df_speed.append(speed_vert(df_coords[i]))
|
# df_speed[i] = df_speed[i].where(abs(df_speed[i])<vmax, other=np.nan) # replaces by nan if jumps in speed
|
||||||
|
# df_speed[i] = df_speed[i].apply(interpolate_nans, axis=0, args = ['cubic'])
|
||||||
|
|
||||||
|
|
||||||
|
# Frame with maximum of the sum of absolute speeds
|
||||||
|
max_speed_frame = []
|
||||||
|
for i in range(cam_nb):
|
||||||
|
max_speed_frame += [np.argmax(abs(df_speed[i].sum(axis=1)))]
|
||||||
|
|
||||||
#############################################
|
#############################################
|
||||||
# 2. PLOTTING PAIRED CORRELATIONS OF SPEEDS #
|
# 2. PLOTTING PAIRED CORRELATIONS OF SPEEDS #
|
||||||
@ -368,26 +396,31 @@ def synchronize_cams_all(config_dict):
|
|||||||
lowest_frames, lowest_y_coords = find_highest_wrist_position(df_coords, id_kpt)
|
lowest_frames, lowest_y_coords = find_highest_wrist_position(df_coords, id_kpt)
|
||||||
|
|
||||||
# set reference camera
|
# set reference camera
|
||||||
ref_cam_nb = 0
|
nb_frames_per_cam = [len(d) for d in df_speed]
|
||||||
|
ref_cam_id = nb_frames_per_cam.index(min(nb_frames_per_cam))
|
||||||
|
|
||||||
|
|
||||||
max_speeds = []
|
max_speeds = []
|
||||||
|
|
||||||
for cam_nb in range(1, len(json_dirs)):
|
|
||||||
|
cam_list = list(range(cam_nb))
|
||||||
|
cam_list.pop(ref_cam_id)
|
||||||
|
for cam_id in cam_list:
|
||||||
# find the highest wrist position for each camera
|
# find the highest wrist position for each camera
|
||||||
camx_start_frame = lowest_frames[ref_cam_nb]
|
camx_start_frame = lowest_frames[ref_cam_id]
|
||||||
camy_start_frame = lowest_frames[cam_nb]
|
camy_start_frame = lowest_frames[cam_id]
|
||||||
|
|
||||||
camx_lowest_y = lowest_y_coords[ref_cam_nb]
|
camx_lowest_y = lowest_y_coords[ref_cam_id]
|
||||||
camy_lowest_y = lowest_y_coords[cam_nb]
|
camy_lowest_y = lowest_y_coords[cam_id]
|
||||||
|
|
||||||
camx_time = find_motion_end(df_coords[ref_cam_nb], id_kpt[0], camx_start_frame, camx_lowest_y, fps)
|
camx_time = find_motion_end(df_coords[ref_cam_id], id_kpt[0], camx_start_frame, camx_lowest_y, fps)
|
||||||
camy_time = find_motion_end(df_coords[cam_nb], id_kpt[0], camy_start_frame, camy_lowest_y, fps)
|
camy_time = find_motion_end(df_coords[cam_id], id_kpt[0], camy_start_frame, camy_lowest_y, fps)
|
||||||
|
|
||||||
camx_end_frame = camx_start_frame + int(camx_time * fps)
|
camx_end_frame = camx_start_frame + int(camx_time * fps)
|
||||||
camy_end_frame = camy_start_frame + int(camy_time * fps)
|
camy_end_frame = camy_start_frame + int(camy_time * fps)
|
||||||
|
|
||||||
camx_segment = df_speed[ref_cam_nb].iloc[camx_start_frame:camx_end_frame+1, id_kpt[0]]
|
camx_segment = df_speed[ref_cam_id].iloc[camx_start_frame:camx_end_frame+1, id_kpt[0]]
|
||||||
camy_segment = df_speed[cam_nb].iloc[camy_start_frame:camy_end_frame+1, id_kpt[0]]
|
camy_segment = df_speed[cam_id].iloc[camy_start_frame:camy_end_frame+1, id_kpt[0]]
|
||||||
|
|
||||||
|
|
||||||
# Find the fastest speed and the frame
|
# Find the fastest speed and the frame
|
||||||
camx_max_speed_index, camx_max_speed = find_fastest_frame([camx_segment])
|
camx_max_speed_index, camx_max_speed = find_fastest_frame([camx_segment])
|
||||||
@ -410,15 +443,15 @@ def synchronize_cams_all(config_dict):
|
|||||||
camy_end_frame = camy_max_speed_index + (fps) * (lag_time)
|
camy_end_frame = camy_max_speed_index + (fps) * (lag_time)
|
||||||
|
|
||||||
if len(id_kpt) == 1 and id_kpt[0] != 'all':
|
if len(id_kpt) == 1 and id_kpt[0] != 'all':
|
||||||
camx = df_speed[ref_cam_nb].iloc[camx_start_frame:camx_end_frame+1, id_kpt[0]]
|
camx = df_speed[ref_cam_id].iloc[camx_start_frame:camx_end_frame+1, id_kpt[0]]
|
||||||
camy = df_speed[cam_nb].iloc[camy_start_frame:camy_end_frame+1, id_kpt[0]]
|
camy = df_speed[cam_id].iloc[camy_start_frame:camy_end_frame+1, id_kpt[0]]
|
||||||
elif id_kpt == ['all']:
|
elif id_kpt == ['all']:
|
||||||
camx = df_speed[ref_cam_nb].iloc[camx_start_frame:camx_end_frame+1].sum(axis=1)
|
camx = df_speed[ref_cam_id].iloc[camx_start_frame:camx_end_frame+1].sum(axis=1)
|
||||||
camy = df_speed[cam_nb].iloc[camy_start_frame:camy_end_frame+1].sum(axis=1)
|
camy = df_speed[cam_id].iloc[camy_start_frame:camy_end_frame+1].sum(axis=1)
|
||||||
elif len(id_kpt) == 1 and len(id_kpt) == len(weights_kpt):
|
elif len(id_kpt) == 1 and len(id_kpt) == len(weights_kpt):
|
||||||
dict_id_weights = {i:w for i, w in zip(id_kpt, weights_kpt)}
|
dict_id_weights = {i:w for i, w in zip(id_kpt, weights_kpt)}
|
||||||
camx = df_speed[ref_cam_nb] @ pd.Series(dict_id_weights).reindex(df_speed[ref_cam_nb].columns, fill_value=0)
|
camx = df_speed[ref_cam_id] @ pd.Series(dict_id_weights).reindex(df_speed[ref_cam_id].columns, fill_value=0)
|
||||||
camy = df_speed[cam_nb] @ pd.Series(dict_id_weights).reindex(df_speed[cam_nb].columns, fill_value=0)
|
camy = df_speed[cam_id] @ pd.Series(dict_id_weights).reindex(df_speed[cam_id].columns, fill_value=0)
|
||||||
camx = camx.iloc[camx_start_frame:camx_end_frame+1]
|
camx = camx.iloc[camx_start_frame:camx_end_frame+1]
|
||||||
camy = camy.iloc[camy_start_frame:camy_end_frame+1]
|
camy = camy.iloc[camy_start_frame:camy_end_frame+1]
|
||||||
else:
|
else:
|
||||||
@ -431,8 +464,8 @@ def synchronize_cams_all(config_dict):
|
|||||||
f, ax = plt.subplots(2,1)
|
f, ax = plt.subplots(2,1)
|
||||||
|
|
||||||
# speed
|
# speed
|
||||||
camx.plot(ax=ax[0], label = f'cam {ref_cam_nb+1}')
|
camx.plot(ax=ax[0], label = f'cam {ref_cam_id+1}')
|
||||||
camy.plot(ax=ax[0], label = f'cam {cam_nb+1}')
|
camy.plot(ax=ax[0], label = f'cam {cam_id+1}')
|
||||||
ax[0].set(xlabel='Frame',ylabel='Speed (pxframe)')
|
ax[0].set(xlabel='Frame',ylabel='Speed (pxframe)')
|
||||||
ax[0].legend()
|
ax[0].legend()
|
||||||
|
|
||||||
@ -440,9 +473,9 @@ def synchronize_cams_all(config_dict):
|
|||||||
offset, max_corr = plot_time_lagged_cross_corr(camx, camy, ax[1], fps, lag_time, camx_max_speed_index, camy_max_speed_index)
|
offset, max_corr = plot_time_lagged_cross_corr(camx, camy, ax[1], fps, lag_time, camx_max_speed_index, camy_max_speed_index)
|
||||||
f.tight_layout()
|
f.tight_layout()
|
||||||
plt.show()
|
plt.show()
|
||||||
print(f'Using number{id_kpt} keypoint, synchronized camera {ref_cam_nb+1} and camera {cam_nb+1}, with an offset of {offset} and a max correlation of {max_corr}.')
|
print(f'Using number{id_kpt} keypoint, synchronized camera {ref_cam_id+1} and camera {cam_id+1}, with an offset of {offset} and a max correlation of {max_corr}.')
|
||||||
|
|
||||||
# apply offset
|
# apply offset
|
||||||
apply_offset(offset, json_dirs, reset_sync, ref_cam_nb, cam_nb)
|
apply_offset(offset, json_dirs, reset_sync, ref_cam_id, cam_id)
|
||||||
|
|
||||||
|
|
||||||
|
@ -18,7 +18,8 @@ cameras are removed than a predefined minimum, triangulation is skipped for
|
|||||||
the point and this frame. In the end, missing values are interpolated.
|
the point and this frame. In the end, missing values are interpolated.
|
||||||
|
|
||||||
In case of multiple subjects detection, make sure you first run the
|
In case of multiple subjects detection, make sure you first run the
|
||||||
personAssociation module.
|
personAssociation module. It will then associate people across frames by
|
||||||
|
measuring the frame-by-frame distance between them.
|
||||||
|
|
||||||
INPUTS:
|
INPUTS:
|
||||||
- a calibration file (.toml extension)
|
- a calibration file (.toml extension)
|
||||||
@ -108,6 +109,96 @@ def interpolate_zeros_nans(col, *args):
|
|||||||
return col_interp
|
return col_interp
|
||||||
|
|
||||||
|
|
||||||
|
def min_with_single_indices(L, T):
|
||||||
|
'''
|
||||||
|
Let L be a list (size s) with T associated tuple indices (size s).
|
||||||
|
Select the smallest values of L, considering that
|
||||||
|
the next smallest value cannot have the same numbers
|
||||||
|
in the associated tuple as any of the previous ones.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
L = [ 20, 27, 51, 33, 43, 23, 37, 24, 4, 68, 84, 3 ]
|
||||||
|
T = list(it.product(range(2),range(3)))
|
||||||
|
= [(0,0),(0,1),(0,2),(0,3),(1,0),(1,1),(1,2),(1,3),(2,0),(2,1),(2,2),(2,3)]
|
||||||
|
|
||||||
|
- 1st smallest value: 3 with tuple (2,3), index 11
|
||||||
|
- 2nd smallest value when excluding indices (2,.) and (.,3), i.e. [(0,0),(0,1),(0,2),X,(1,0),(1,1),(1,2),X,X,X,X,X]:
|
||||||
|
20 with tuple (0,0), index 0
|
||||||
|
- 3rd smallest value when excluding [X,X,X,X,X,(1,1),(1,2),X,X,X,X,X]:
|
||||||
|
23 with tuple (1,1), index 5
|
||||||
|
|
||||||
|
INPUTS:
|
||||||
|
- L: list (size s)
|
||||||
|
- T: T associated tuple indices (size s)
|
||||||
|
|
||||||
|
OUTPUTS:
|
||||||
|
- minL: list of smallest values of L, considering constraints on tuple indices
|
||||||
|
- argminL: list of indices of smallest values of L
|
||||||
|
- T_minL: list of tuples associated with smallest values of L
|
||||||
|
'''
|
||||||
|
|
||||||
|
minL = [np.nanmin(L)]
|
||||||
|
argminL = [np.nanargmin(L)]
|
||||||
|
T_minL = [T[argminL[0]]]
|
||||||
|
|
||||||
|
mask_tokeep = np.array([True for t in T])
|
||||||
|
i=0
|
||||||
|
while mask_tokeep.any()==True:
|
||||||
|
mask_tokeep = mask_tokeep & np.array([t[0]!=T_minL[i][0] and t[1]!=T_minL[i][1] for t in T])
|
||||||
|
if mask_tokeep.any()==True:
|
||||||
|
indicesL_tokeep = np.where(mask_tokeep)[0]
|
||||||
|
minL += [np.nanmin(np.array(L)[indicesL_tokeep]) if not np.isnan(np.array(L)[indicesL_tokeep]).all() else np.nan]
|
||||||
|
argminL += [indicesL_tokeep[np.nanargmin(np.array(L)[indicesL_tokeep])] if not np.isnan(minL[-1]) else indicesL_tokeep[0]]
|
||||||
|
T_minL += (T[argminL[i+1]],)
|
||||||
|
i+=1
|
||||||
|
|
||||||
|
return np.array(minL), np.array(argminL), np.array(T_minL)
|
||||||
|
|
||||||
|
|
||||||
|
def sort_people(Q_kpt_old, Q_kpt):
|
||||||
|
'''
|
||||||
|
Associate persons across frames
|
||||||
|
Persons' indices are sometimes swapped when changing frame
|
||||||
|
A person is associated to another in the next frame when they are at a small distance
|
||||||
|
|
||||||
|
INPUTS:
|
||||||
|
- Q_kpt_old: list of arrays of 3D coordinates [X, Y, Z, 1.] for the previous frame
|
||||||
|
- Q_kpt: idem Q_kpt_old, for current frame
|
||||||
|
|
||||||
|
OUTPUT:
|
||||||
|
- Q_kpt_new: array with reordered persons
|
||||||
|
- personsIDs_sorted: index of reordered persons
|
||||||
|
'''
|
||||||
|
|
||||||
|
# Generate possible person correspondences across frames
|
||||||
|
if len(Q_kpt_old) < len(Q_kpt):
|
||||||
|
Q_kpt_old = np.concatenate((Q_kpt_old, [[0., 0., 0., 1.]]*(len(Q_kpt)-len(Q_kpt_old))))
|
||||||
|
personsIDs_comb = sorted(list(it.product(range(len(Q_kpt_old)),range(len(Q_kpt)))))
|
||||||
|
|
||||||
|
# Compute distance between persons from one frame to another
|
||||||
|
frame_by_frame_dist = []
|
||||||
|
for comb in personsIDs_comb:
|
||||||
|
frame_by_frame_dist += [euclidean_distance(Q_kpt_old[comb[0]][:3],Q_kpt[comb[1]][:3])]
|
||||||
|
|
||||||
|
# sort correspondences by distance
|
||||||
|
minL, _, associated_tuples = min_with_single_indices(frame_by_frame_dist, personsIDs_comb)
|
||||||
|
# print('Distances :', minL)
|
||||||
|
|
||||||
|
# associate 3D points to same index across frames, nan if no correspondence
|
||||||
|
Q_kpt_new, personsIDs_sorted = [], []
|
||||||
|
for i in range(len(Q_kpt_old)):
|
||||||
|
id_in_old = associated_tuples[:,1][associated_tuples[:,0] == i].tolist()
|
||||||
|
# print('id_in_old ', i, id_in_old)
|
||||||
|
if len(id_in_old) > 0:
|
||||||
|
personsIDs_sorted += id_in_old
|
||||||
|
Q_kpt_new += [Q_kpt[id_in_old[0]]]
|
||||||
|
else:
|
||||||
|
personsIDs_sorted += [-1]
|
||||||
|
Q_kpt_new += [Q_kpt_old[i]]
|
||||||
|
|
||||||
|
return Q_kpt_new, personsIDs_sorted, associated_tuples
|
||||||
|
|
||||||
|
|
||||||
def make_trc(config, Q, keypoints_names, f_range, id_person=-1):
|
def make_trc(config, Q, keypoints_names, f_range, id_person=-1):
|
||||||
'''
|
'''
|
||||||
Make Opensim compatible trc file from a dataframe with 3D coordinates
|
Make Opensim compatible trc file from a dataframe with 3D coordinates
|
||||||
@ -267,7 +358,7 @@ def recap_triangulate(config, error, nb_cams_excluded, keypoints_names, cam_excl
|
|||||||
logging.info(f'In average, {mean_cam_excluded} cameras had to be excluded to reach these thresholds.')
|
logging.info(f'In average, {mean_cam_excluded} cameras had to be excluded to reach these thresholds.')
|
||||||
|
|
||||||
cam_excluded_count[n] = {i: v for i, v in zip(cam_names, cam_excluded_count[n].values())}
|
cam_excluded_count[n] = {i: v for i, v in zip(cam_names, cam_excluded_count[n].values())}
|
||||||
cam_excluded_count[n] = {i: cam_excluded_count[n][i] for i in sorted(cam_excluded_count[n].keys())}
|
cam_excluded_count[n] = {k: v for k, v in sorted(cam_excluded_count[n].items(), key=lambda item: item[1])[::-1]}
|
||||||
str_cam_excluded_count = ''
|
str_cam_excluded_count = ''
|
||||||
for i, (k, v) in enumerate(cam_excluded_count[n].items()):
|
for i, (k, v) in enumerate(cam_excluded_count[n].items()):
|
||||||
if i ==0:
|
if i ==0:
|
||||||
@ -525,7 +616,7 @@ def triangulation_from_best_cameras(config, coords_2D_kpt, coords_2D_kpt_swapped
|
|||||||
Q = np.array([np.nan, np.nan, np.nan])
|
Q = np.array([np.nan, np.nan, np.nan])
|
||||||
|
|
||||||
return Q, error_min, nb_cams_excluded, id_excluded_cams
|
return Q, error_min, nb_cams_excluded, id_excluded_cams
|
||||||
|
|
||||||
|
|
||||||
def extract_files_frame_f(json_tracked_files_f, keypoints_ids, nb_persons_to_detect):
|
def extract_files_frame_f(json_tracked_files_f, keypoints_ids, nb_persons_to_detect):
|
||||||
'''
|
'''
|
||||||
@ -584,7 +675,7 @@ def triangulate_all(config):
|
|||||||
|
|
||||||
INPUTS:
|
INPUTS:
|
||||||
- a calibration file (.toml extension)
|
- a calibration file (.toml extension)
|
||||||
- json files for each camera with only one person of interest
|
- json files for each camera with indices matching the detected persons
|
||||||
- a Config.toml file
|
- a Config.toml file
|
||||||
- a skeleton model
|
- a skeleton model
|
||||||
|
|
||||||
@ -655,9 +746,7 @@ def triangulate_all(config):
|
|||||||
# Prep triangulation
|
# Prep triangulation
|
||||||
f_range = [[0,min([len(j) for j in json_files_names])] if frame_range==[] else frame_range][0]
|
f_range = [[0,min([len(j) for j in json_files_names])] if frame_range==[] else frame_range][0]
|
||||||
frames_nb = f_range[1]-f_range[0]
|
frames_nb = f_range[1]-f_range[0]
|
||||||
|
|
||||||
nb_persons_to_detect = max([len(json.load(open(json_fname))['people']) for json_fname in json_tracked_files[0]])
|
nb_persons_to_detect = max([len(json.load(open(json_fname))['people']) for json_fname in json_tracked_files[0]])
|
||||||
|
|
||||||
n_cams = len(json_dirs_names)
|
n_cams = len(json_dirs_names)
|
||||||
|
|
||||||
# Check that camera number is consistent between calibration file and pose folders
|
# Check that camera number is consistent between calibration file and pose folders
|
||||||
@ -667,8 +756,14 @@ def triangulate_all(config):
|
|||||||
and {n_cams} cameras based on the number of pose folders.')
|
and {n_cams} cameras based on the number of pose folders.')
|
||||||
|
|
||||||
# Triangulation
|
# Triangulation
|
||||||
|
Q = [[[np.nan]*3]*keypoints_nb for n in range(nb_persons_to_detect)]
|
||||||
|
Q_old = [[[np.nan]*3]*keypoints_nb for n in range(nb_persons_to_detect)]
|
||||||
|
error = [[] for n in range(nb_persons_to_detect)]
|
||||||
|
nb_cams_excluded = [[] for n in range(nb_persons_to_detect)]
|
||||||
|
id_excluded_cams = [[] for n in range(nb_persons_to_detect)]
|
||||||
Q_tot, error_tot, nb_cams_excluded_tot,id_excluded_cams_tot = [], [], [], []
|
Q_tot, error_tot, nb_cams_excluded_tot,id_excluded_cams_tot = [], [], [], []
|
||||||
for f in tqdm(range(frames_nb)):
|
for f in tqdm(range(frames_nb)):
|
||||||
|
# print(f'\nFrame {f}:')
|
||||||
# Get x,y,likelihood values from files
|
# Get x,y,likelihood values from files
|
||||||
json_tracked_files_f = [json_tracked_files[c][f] for c in range(n_cams)]
|
json_tracked_files_f = [json_tracked_files[c][f] for c in range(n_cams)]
|
||||||
# print(json_tracked_files_f)
|
# print(json_tracked_files_f)
|
||||||
@ -692,12 +787,19 @@ def triangulate_all(config):
|
|||||||
y_files[n][likelihood_files[n] < likelihood_threshold] = np.nan
|
y_files[n][likelihood_files[n] < likelihood_threshold] = np.nan
|
||||||
likelihood_files[n][likelihood_files[n] < likelihood_threshold] = np.nan
|
likelihood_files[n][likelihood_files[n] < likelihood_threshold] = np.nan
|
||||||
|
|
||||||
|
# Q_old = Q except when it has nan, otherwise it takes the Q_old value
|
||||||
|
nan_mask = np.isnan(Q)
|
||||||
|
Q_old = np.where(nan_mask, Q_old, Q)
|
||||||
|
error_old, nb_cams_excluded_old, id_excluded_cams_old = error.copy(), nb_cams_excluded.copy(), id_excluded_cams.copy()
|
||||||
Q = [[] for n in range(nb_persons_to_detect)]
|
Q = [[] for n in range(nb_persons_to_detect)]
|
||||||
error = [[] for n in range(nb_persons_to_detect)]
|
error = [[] for n in range(nb_persons_to_detect)]
|
||||||
nb_cams_excluded = [[] for n in range(nb_persons_to_detect)]
|
nb_cams_excluded = [[] for n in range(nb_persons_to_detect)]
|
||||||
id_excluded_cams = [[] for n in range(nb_persons_to_detect)]
|
id_excluded_cams = [[] for n in range(nb_persons_to_detect)]
|
||||||
|
|
||||||
for n in range(nb_persons_to_detect):
|
for n in range(nb_persons_to_detect):
|
||||||
for keypoint_idx in keypoints_idx:
|
for keypoint_idx in keypoints_idx:
|
||||||
|
# keypoints_nb = 2
|
||||||
|
# for keypoint_idx in range(2):
|
||||||
# Triangulate cameras with min reprojection error
|
# Triangulate cameras with min reprojection error
|
||||||
# print('\n', keypoints_names[keypoint_idx])
|
# print('\n', keypoints_names[keypoint_idx])
|
||||||
coords_2D_kpt = np.array( (x_files[n][:, keypoint_idx], y_files[n][:, keypoint_idx], likelihood_files[n][:, keypoint_idx]) )
|
coords_2D_kpt = np.array( (x_files[n][:, keypoint_idx], y_files[n][:, keypoint_idx], likelihood_files[n][:, keypoint_idx]) )
|
||||||
@ -709,7 +811,31 @@ def triangulate_all(config):
|
|||||||
error[n].append(error_kpt)
|
error[n].append(error_kpt)
|
||||||
nb_cams_excluded[n].append(nb_cams_excluded_kpt)
|
nb_cams_excluded[n].append(nb_cams_excluded_kpt)
|
||||||
id_excluded_cams[n].append(id_excluded_cams_kpt)
|
id_excluded_cams[n].append(id_excluded_cams_kpt)
|
||||||
|
|
||||||
|
if multi_person:
|
||||||
|
# reID persons across frames by checking the distance from one frame to another
|
||||||
|
# print('Q before ordering ', np.array(Q)[:,:2])
|
||||||
|
if f !=0:
|
||||||
|
Q, personsIDs_sorted, associated_tuples = sort_people(Q_old, Q)
|
||||||
|
# print('Q after ordering ', personsIDs_sorted, associated_tuples, np.array(Q)[:,:2])
|
||||||
|
|
||||||
|
error_sorted, nb_cams_excluded_sorted, id_excluded_cams_sorted = [], [], []
|
||||||
|
for i in range(len(Q)):
|
||||||
|
id_in_old = associated_tuples[:,1][associated_tuples[:,0] == i].tolist()
|
||||||
|
if len(id_in_old) > 0:
|
||||||
|
personsIDs_sorted += id_in_old
|
||||||
|
error_sorted += [error[id_in_old[0]]]
|
||||||
|
nb_cams_excluded_sorted += [nb_cams_excluded[id_in_old[0]]]
|
||||||
|
id_excluded_cams_sorted += [id_excluded_cams[id_in_old[0]]]
|
||||||
|
else:
|
||||||
|
personsIDs_sorted += [-1]
|
||||||
|
error_sorted += [error[i]]
|
||||||
|
nb_cams_excluded_sorted += [nb_cams_excluded[i]]
|
||||||
|
id_excluded_cams_sorted += [id_excluded_cams[i]]
|
||||||
|
error, nb_cams_excluded, id_excluded_cams = error_sorted, nb_cams_excluded_sorted, id_excluded_cams_sorted
|
||||||
|
|
||||||
|
# TODO: if distance > threshold, new person
|
||||||
|
|
||||||
# Add triangulated points, errors and excluded cameras to pandas dataframes
|
# Add triangulated points, errors and excluded cameras to pandas dataframes
|
||||||
Q_tot.append([np.concatenate(Q[n]) for n in range(nb_persons_to_detect)])
|
Q_tot.append([np.concatenate(Q[n]) for n in range(nb_persons_to_detect)])
|
||||||
error_tot.append([error[n] for n in range(nb_persons_to_detect)])
|
error_tot.append([error[n] for n in range(nb_persons_to_detect)])
|
||||||
@ -717,6 +843,13 @@ def triangulate_all(config):
|
|||||||
id_excluded_cams = [[id_excluded_cams[n][k] for k in range(keypoints_nb)] for n in range(nb_persons_to_detect)]
|
id_excluded_cams = [[id_excluded_cams[n][k] for k in range(keypoints_nb)] for n in range(nb_persons_to_detect)]
|
||||||
id_excluded_cams_tot.append(id_excluded_cams)
|
id_excluded_cams_tot.append(id_excluded_cams)
|
||||||
|
|
||||||
|
# fill values for if a person that was not initially detected has entered the frame
|
||||||
|
Q_tot = [list(tpl) for tpl in zip(*it.zip_longest(*Q_tot, fillvalue=[np.nan]*keypoints_nb*3))]
|
||||||
|
error_tot = [list(tpl) for tpl in zip(*it.zip_longest(*error_tot, fillvalue=[np.nan]*keypoints_nb*3))]
|
||||||
|
nb_cams_excluded_tot = [list(tpl) for tpl in zip(*it.zip_longest(*nb_cams_excluded_tot, fillvalue=[np.nan]*keypoints_nb*3))]
|
||||||
|
id_excluded_cams_tot = [list(tpl) for tpl in zip(*it.zip_longest(*id_excluded_cams_tot, fillvalue=[np.nan]*keypoints_nb*3))]
|
||||||
|
|
||||||
|
# dataframes for each person
|
||||||
Q_tot = [pd.DataFrame([Q_tot[f][n] for f in range(frames_nb)]) for n in range(nb_persons_to_detect)]
|
Q_tot = [pd.DataFrame([Q_tot[f][n] for f in range(frames_nb)]) for n in range(nb_persons_to_detect)]
|
||||||
error_tot = [pd.DataFrame([error_tot[f][n] for f in range(frames_nb)]) for n in range(nb_persons_to_detect)]
|
error_tot = [pd.DataFrame([error_tot[f][n] for f in range(frames_nb)]) for n in range(nb_persons_to_detect)]
|
||||||
nb_cams_excluded_tot = [pd.DataFrame([nb_cams_excluded_tot[f][n] for f in range(frames_nb)]) for n in range(nb_persons_to_detect)]
|
nb_cams_excluded_tot = [pd.DataFrame([nb_cams_excluded_tot[f][n] for f in range(frames_nb)]) for n in range(nb_persons_to_detect)]
|
||||||
|
15
README.md
15
README.md
@ -17,7 +17,7 @@
|
|||||||
##### N.B:. Please set undistort_points and handle_LR_swap to false for now since it currently leads to inaccuracies. I'll try to fix it soon.
|
##### N.B:. Please set undistort_points and handle_LR_swap to false for now since it currently leads to inaccuracies. I'll try to fix it soon.
|
||||||
|
|
||||||
> **_News_: Version 0.7:**\
|
> **_News_: Version 0.7:**\
|
||||||
> **Multi-person analysis is now supported!**\
|
> **Multi-person analysis is now supported!** Latest version is 100 times faster that the one before, and also more robust.\
|
||||||
> Team sports, combat sports, and ballroom dancing can now take advantage of Pose2Sim full potential.\
|
> Team sports, combat sports, and ballroom dancing can now take advantage of Pose2Sim full potential.\
|
||||||
> **Other recently added features**: Automatic batch processing, Marker augmentation, Blender visualization.
|
> **Other recently added features**: Automatic batch processing, Marker augmentation, Blender visualization.
|
||||||
<!-- Incidentally, right/left limb swapping is now handled, which is useful if few cameras are used;\
|
<!-- Incidentally, right/left limb swapping is now handled, which is useful if few cameras are used;\
|
||||||
@ -307,27 +307,34 @@ All AlphaPose models are supported (HALPE_26, HALPE_68, HALPE_136, COCO_133, COC
|
|||||||
> _**Cameras need to be synchronized, so that 2D points correspond to the same position across cameras.**_\
|
> _**Cameras need to be synchronized, so that 2D points correspond to the same position across cameras.**_\
|
||||||
***N.B.:** Skip this step if your cameras are already synchronized.*
|
***N.B.:** Skip this step if your cameras are already synchronized.*
|
||||||
|
|
||||||
|
Open an Anaconda prompt or a terminal in a `Session`, `Participant`, or `Trial` folder.\
|
||||||
|
Type `ipython`.
|
||||||
|
|
||||||
``` python
|
``` python
|
||||||
from Pose2Sim import Pose2Sim
|
from Pose2Sim import Pose2Sim
|
||||||
Pose2Sim.synchronization()
|
Pose2Sim.synchronization()
|
||||||
```
|
```
|
||||||
|
|
||||||
Reference camera (usally cam1) should start record at last between whole cameras.\
|
Reference camera (usally cam1) should start record at last between whole cameras.\
|
||||||
Set fps, id_kpt, weight_kpt, reset_sync in Config.toml.\
|
|
||||||
**How to get perfect sync point**
|
**How to get perfect sync point**
|
||||||
1. Set cameras position where they can see person wrist clearly.
|
1. Set cameras position where they can see `id_kpt` (default: `RWrist`) clearly.
|
||||||
2. Press record button, and what pressed last time to be reference camera.
|
2. Press record button, and what pressed last time to be reference camera.
|
||||||
3. Walk to proper location( See 1 ).
|
3. Walk to proper location( See 1 ).
|
||||||
4. Raise your hands.
|
4. Raise your hands.
|
||||||
5. Downward your hand fastly.
|
5. Downward your hand fastly.
|
||||||
|
|
||||||
|
Check printed output. If results are not satisfying, try and release the constraints in the [Config.toml](https://github.com/perfanalytics/pose2sim/blob/main/Pose2Sim/S00_Demo_Session/Config.toml) file.
|
||||||
|
|
||||||
Alternatively, use a flashlight or a clap to synchronize them. GoPro cameras can also be synchronized with a timecode, by GPS (outdoors) or with a remote control (slightly less reliable).
|
Alternatively, use a flashlight or a clap to synchronize them. GoPro cameras can also be synchronized with a timecode, by GPS (outdoors) or with a remote control (slightly less reliable).
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
</br>
|
</br>
|
||||||
|
|
||||||
## Camera calibration
|
## Camera calibration
|
||||||
> _**Calculate camera intrinsic properties and extrinsic locations and positions.\
|
> _**Calculate camera intrinsic properties and extrinsic locations and positions.\
|
||||||
> Convert a preexisting calibration file, or calculate intrinsic and extrinsic parameters from scratch.**_ \
|
> Convert a preexisting calibration file, or calculate intrinsic and extrinsic parameters from scratch.**_
|
||||||
|
|
||||||
Open an Anaconda prompt or a terminal in a `Session`, `Participant`, or `Trial` folder.\
|
Open an Anaconda prompt or a terminal in a `Session`, `Participant`, or `Trial` folder.\
|
||||||
Type `ipython`.
|
Type `ipython`.
|
||||||
|
Loading…
Reference in New Issue
Block a user