Delete Pose2Sim/Demo/S01_Empty_Session/S01_P01_Participant1 directory

This commit is contained in:
HunMinKim 2024-01-09 20:33:26 +09:00 committed by GitHub
parent f97c3911d2
commit 3d201d2d44
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 0 additions and 996 deletions

View File

@ -1,249 +0,0 @@
###############################################################################
## PROJECT PARAMETERS ##
###############################################################################
# Configure your project parameters here.
#
# IMPORTANT:
# If a parameter is not found here, Pose2Sim will look for its value in the
# Config.toml file of the level above. This way, you can set global
# instructions for the Session and alter them for specific Participants or Trials.
#
# If you wish to overwrite a parameter for a specific trial or participant,
# edit its Config.toml file by uncommenting its key (e.g., [project])
# and editing its value (e.g., frame_range = [10,300]). Or else, uncomment
# [filtering.butterworth] and set cut_off_frequency = 10, etc.
# [project]
# frame_rate = 60 # FPS
# frame_range = [] # For example [10,300], or [] for all frames
## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate
## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate,
## frame_range = [0.1, 2.0]*frame_rate = [6, 120]
# exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['<participant_dir/trial_dir>', 'etc'].
# e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial']
# participant_height = 1.7 # m
# participant_mass = 70 # kg
## Only taken into account if pose_model is 'BODY_25_AUGMENTED' # Coming soon!
## Take heart, calibration is not that complicated once you get the hang of it!
# [calibration]
# calibration_type = 'convert' # 'convert' or 'calculate'
# [calibration.convert]
# convert_from = 'qualisys' # 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', or 'biocv'
# [calibration.convert.qualisys]
# binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2
# [calibration.convert.optitrack] # See readme for instructions
# [calibration.convert.vicon] # No parameter needed
# [calibration.convert.opencap] # No parameter needed
# [calibration.convert.easymocap] # No parameter needed
# [calibration.convert.biocv] # No parameter needed
# [calibration.convert.anipose] # No parameter needed
# [calibration.convert.freemocap] # No parameter needed
# [calibration.calculate]
## Camera properties, theoretically need to be calculated only once in a camera lifetime
# [calibration.calculate.intrinsics]
# overwrite_intrinsics = false # overwrite (or not) if they have already been calculated?
# show_detection_intrinsics = true # true or false (lowercase)
# intrinsics_extension = 'jpg' # any video or image extension
# extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 )
# intrinsics_corners_nb = [4,7]
# intrinsics_square_size = 60 # mm
## Camera placements, need to be done before every session
# [calibration.calculate.extrinsics]
# extrinsics_method = 'scene' # 'board', 'scene', 'keypoints'
## 'board' should be large enough to be detected when laid on the floor. Not recommended.
## 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out.
## 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras.
# moving_cameras = false # Not implemented yet
# calculate_extrinsics = true # true or false (lowercase)
# [calibration.calculate.extrinsics.board]
# show_reprojection_error = true # true or false (lowercase)
# extrinsics_extension = 'png' # any video or image extension
# extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h]
# extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle
# [calibration.calculate.extrinsics.scene]
# show_reprojection_error = true # true or false (lowercase)
# extrinsics_extension = 'png' # any video or image extension
## list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane.
## in m -> unlike for intrinsics, NOT in mm!
# object_coords_3d = [[-2.0, 0.3, 0.0],
# [-2.0 , 0.0, 0.0],
# [-2.0, 0.0, 0.05],
# [-2.0, -0.3 , 0.0],
# [0.0, 0.3, 0.0],
# [0.0, 0.0, 0.0],
# [0.0, 0.0, 0.05],
# [0.0, -0.3, 0.0]]
# [calibration.calculate.extrinsics.keypoints]
## Coming soon!
# [pose]
# pose_framework = 'openpose' # 'openpose', 'mediapipe', 'alphapose', 'deeplabcut'
# pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII,
# BODY_25_AUGMENTED # Coming soon!
# #With mediapipe: BLAZEPOSE.
# #With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133.
# #With deeplabcut: CUSTOM. See example at the end of the file.
## What follows has not been implemented yet
# overwrite_pose = false
# openpose_path = '' # only checked if OpenPose is used
# [synchronization]
## COMING SOON!
# reset_sync = true # Recalculate synchronization even if already done
# frames = [2850,3490] # Frames to use for synchronization, should point to a moment with fast motion.
# cut_off_frequency = 10 # cut-off frequency for a 4th order low-pass Butterworth filter
## Vertical speeds (on X, Y, or Z axis, or 2D speeds)
# speed_kind = 'y' # 'x', 'y', 'z', or '2D'
# vmax = 20 # px/s
# cam1_nb = 4
# cam2_nb = 3
# id_kpt = [9,10] # Pour plus tard aller chercher numéro depuis keypoint name dans skeleton.py. 'RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7
# weights_kpt = [1,1] # Pris en compte uniquement si on a plusieurs keypoints
# [personAssociation]
# single_person = true # false for multi-person analysis (not supported yet), true for only triangulating the main person in scene.
# tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
## and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE)
# reproj_error_threshold_association = 20 # px
# likelihood_error_threshold_association = 0.2
# [triangulation]
# reproj_error_threshold_triangulation = 15 # px
# likelihood_threshold_triangulation = 0.3
# min_cameras_for_triangulation = 2
# interpolation = 'cubic' #linear, slinear, quadratic, cubic, or none
## 'none' if you don't want to interpolate missing points
# interp_if_gap_smaller_than = 10 # do not interpolate bigger gaps
# show_interp_indices = true # true or false (lowercase). For each keypoint, return the frames that need to be interpolated
# handle_LR_swap = true # Better if few cameras (eg less than 4) with risk of limb swapping (eg camera facing sagittal plane), otherwise slightly less accurate and slower
# undistort_points = false # Better if distorted image (parallel lines curvy on the edge or at least one param > 10^-2), but unnecessary (and slightly slower) if distortions are low
# make_c3d = false # save triangulated data in c3d format in addition to trc # Coming soon!
# [filtering]
# type = 'butterworth' # butterworth, kalman, gaussian, LOESS, median, butterworth_on_speed
# display_figures = true # true or false (lowercase)
# [filtering.butterworth]
# order = 4
# cut_off_frequency = 6 # Hz
# [filtering.kalman]
## How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)?
# trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise
# smooth = true # should be true, unless you need real-time filtering
# [filtering.butterworth_on_speed]
# order = 4
# cut_off_frequency = 10 # Hz
# [filtering.gaussian]
# sigma_kernel = 2 #px
# [filtering.LOESS]
# nb_values_used = 30 # = fraction of data used * nb frames
# [filtering.median]
# kernel_size = 9
# [opensim]
# static_trial = ['S01_P01_Participant1/S01_P01_T00_StaticTrial']
# # If this Config.toml file is at the Trial level, set to true or false (lowercase);
# # At the Participant level, specify the name of the static trial folder name, e.g. ['S00_P00_T00_StaticTrial'];
# # At the Session level, add participant subdirectory, e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P01_Participant/S00_P00_T00_StaticTrial']
# opensim_bin_path = 'C:\OpenSim 4.4\bin'
## CUSTOM skeleton, if you trained your own DeepLabCut model for example.
## Make sure the node ids correspond to the column numbers of the 2D pose file, starting from zero.
##
## If you want to perform inverse kinematics, you will also need to create an OpenSim model
## and add to its markerset the location where you expect the triangulated keypoints to be detected.
##
## In this example, CUSTOM reproduces the BODY_25B skeleton (default skeletons are stored in skeletons.py).
## You can create as many custom skeletons as you want, just add them further down and rename them.
##
## Check your model hierarchy with: for pre, _, node in RenderTree(model):
## print(f'{pre}{node.name} id={node.id}')
# [pose.CUSTOM]
# name = "CHip"
# id = "None"
# [[pose.CUSTOM.children]]
# id = 12
# name = "RHip"
# [[pose.CUSTOM.children.children]]
# id = 14
# name = "RKnee"
# [[pose.CUSTOM.children.children.children]]
# id = 16
# name = "RAnkle"
# [[pose.CUSTOM.children.children.children.children]]
# id = 22
# name = "RBigToe"
# [[pose.CUSTOM.children.children.children.children.children]]
# id = 23
# name = "RSmallToe"
# [[pose.CUSTOM.children.children.children.children]]
# id = 24
# name = "RHeel"
# [[pose.CUSTOM.children]]
# id = 11
# name = "LHip"
# [[pose.CUSTOM.children.children]]
# id = 13
# name = "LKnee"
# [[pose.CUSTOM.children.children.children]]
# id = 15
# name = "LAnkle"
# [[pose.CUSTOM.children.children.children.children]]
# id = 19
# name = "LBigToe"
# [[pose.CUSTOM.children.children.children.children.children]]
# id = 20
# name = "LSmallToe"
# [[pose.CUSTOM.children.children.children.children]]
# id = 21
# name = "LHeel"
# [[pose.CUSTOM.children]]
# id = 17
# name = "Neck"
# [[pose.CUSTOM.children.children]]
# id = 18
# name = "Head"
# [[pose.CUSTOM.children.children.children]]
# id = 0
# name = "Nose"
# [[pose.CUSTOM.children.children]]
# id = 6
# name = "RShoulder"
# [[pose.CUSTOM.children.children.children]]
# id = 8
# name = "RElbow"
# [[pose.CUSTOM.children.children.children.children]]
# id = 10
# name = "RWrist"
# [[pose.CUSTOM.children.children]]
# id = 5
# name = "LShoulder"
# [[pose.CUSTOM.children.children.children]]
# id = 7
# name = "LElbow"
# [[pose.CUSTOM.children.children.children.children]]
# id = 9
# name = "LWrist"

View File

@ -1,249 +0,0 @@
###############################################################################
## PROJECT PARAMETERS ##
###############################################################################
# Configure your project parameters here.
#
# IMPORTANT:
# If a parameter is not found here, Pose2Sim will look for its value in the
# Config.toml file of the level above. This way, you can set global
# instructions for the Session and alter them for specific Participants or Trials.
#
# If you wish to overwrite a parameter for a specific trial or participant,
# edit its Config.toml file by uncommenting its key (e.g., [project])
# and editing its value (e.g., frame_range = [10,300]). Or else, uncomment
# [filtering.butterworth] and set cut_off_frequency = 10, etc.
# [project]
# frame_rate = 60 # FPS
# frame_range = [] # For example [10,300], or [] for all frames
## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate
## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate,
## frame_range = [0.1, 2.0]*frame_rate = [6, 120]
# exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['<participant_dir/trial_dir>', 'etc'].
# e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial']
# participant_height = 1.7 # m
# participant_mass = 70 # kg
## Only taken into account if pose_model is 'BODY_25_AUGMENTED' # Coming soon!
## Take heart, calibration is not that complicated once you get the hang of it!
# [calibration]
# calibration_type = 'convert' # 'convert' or 'calculate'
# [calibration.convert]
# convert_from = 'qualisys' # 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', or 'biocv'
# [calibration.convert.qualisys]
# binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2
# [calibration.convert.optitrack] # See readme for instructions
# [calibration.convert.vicon] # No parameter needed
# [calibration.convert.opencap] # No parameter needed
# [calibration.convert.easymocap] # No parameter needed
# [calibration.convert.biocv] # No parameter needed
# [calibration.convert.anipose] # No parameter needed
# [calibration.convert.freemocap] # No parameter needed
# [calibration.calculate]
## Camera properties, theoretically need to be calculated only once in a camera lifetime
# [calibration.calculate.intrinsics]
# overwrite_intrinsics = false # overwrite (or not) if they have already been calculated?
# show_detection_intrinsics = true # true or false (lowercase)
# intrinsics_extension = 'jpg' # any video or image extension
# extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 )
# intrinsics_corners_nb = [4,7]
# intrinsics_square_size = 60 # mm
## Camera placements, need to be done before every session
# [calibration.calculate.extrinsics]
# extrinsics_method = 'scene' # 'board', 'scene', 'keypoints'
## 'board' should be large enough to be detected when laid on the floor. Not recommended.
## 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out.
## 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras.
# moving_cameras = false # Not implemented yet
# calculate_extrinsics = true # true or false (lowercase)
# [calibration.calculate.extrinsics.board]
# show_reprojection_error = true # true or false (lowercase)
# extrinsics_extension = 'png' # any video or image extension
# extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h]
# extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle
# [calibration.calculate.extrinsics.scene]
# show_reprojection_error = true # true or false (lowercase)
# extrinsics_extension = 'png' # any video or image extension
## list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane.
## in m -> unlike for intrinsics, NOT in mm!
# object_coords_3d = [[-2.0, 0.3, 0.0],
# [-2.0 , 0.0, 0.0],
# [-2.0, 0.0, 0.05],
# [-2.0, -0.3 , 0.0],
# [0.0, 0.3, 0.0],
# [0.0, 0.0, 0.0],
# [0.0, 0.0, 0.05],
# [0.0, -0.3, 0.0]]
# [calibration.calculate.extrinsics.keypoints]
## Coming soon!
# [pose]
# pose_framework = 'openpose' # 'openpose', 'mediapipe', 'alphapose', 'deeplabcut'
# pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII,
# BODY_25_AUGMENTED # Coming soon!
# #With mediapipe: BLAZEPOSE.
# #With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133.
# #With deeplabcut: CUSTOM. See example at the end of the file.
## What follows has not been implemented yet
# overwrite_pose = false
# openpose_path = '' # only checked if OpenPose is used
# [synchronization]
## COMING SOON!
# reset_sync = true # Recalculate synchronization even if already done
# frames = [2850,3490] # Frames to use for synchronization, should point to a moment with fast motion.
# cut_off_frequency = 10 # cut-off frequency for a 4th order low-pass Butterworth filter
## Vertical speeds (on X, Y, or Z axis, or 2D speeds)
# speed_kind = 'y' # 'x', 'y', 'z', or '2D'
# vmax = 20 # px/s
# cam1_nb = 4
# cam2_nb = 3
# id_kpt = [9,10] # Pour plus tard aller chercher numéro depuis keypoint name dans skeleton.py. 'RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7
# weights_kpt = [1,1] # Pris en compte uniquement si on a plusieurs keypoints
# [personAssociation]
# single_person = true # false for multi-person analysis (not supported yet), true for only triangulating the main person in scene.
# tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
## and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE)
# reproj_error_threshold_association = 20 # px
# likelihood_error_threshold_association = 0.2
# [triangulation]
# reproj_error_threshold_triangulation = 15 # px
# likelihood_threshold_triangulation = 0.3
# min_cameras_for_triangulation = 2
# interpolation = 'cubic' #linear, slinear, quadratic, cubic, or none
## 'none' if you don't want to interpolate missing points
# interp_if_gap_smaller_than = 10 # do not interpolate bigger gaps
# show_interp_indices = true # true or false (lowercase). For each keypoint, return the frames that need to be interpolated
# handle_LR_swap = true # Better if few cameras (eg less than 4) with risk of limb swapping (eg camera facing sagittal plane), otherwise slightly less accurate and slower
# undistort_points = false # Better if distorted image (parallel lines curvy on the edge or at least one param > 10^-2), but unnecessary (and slightly slower) if distortions are low
# make_c3d = false # save triangulated data in c3d format in addition to trc # Coming soon!
# [filtering]
# type = 'butterworth' # butterworth, kalman, gaussian, LOESS, median, butterworth_on_speed
# display_figures = true # true or false (lowercase)
# [filtering.butterworth]
# order = 4
# cut_off_frequency = 6 # Hz
# [filtering.kalman]
## How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)?
# trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise
# smooth = true # should be true, unless you need real-time filtering
# [filtering.butterworth_on_speed]
# order = 4
# cut_off_frequency = 10 # Hz
# [filtering.gaussian]
# sigma_kernel = 2 #px
# [filtering.LOESS]
# nb_values_used = 30 # = fraction of data used * nb frames
# [filtering.median]
# kernel_size = 9
# [opensim]
# static_trial = ['S01_P01_Participant1/S01_P01_T00_StaticTrial']
# # If this Config.toml file is at the Trial level, set to true or false (lowercase);
# # At the Participant level, specify the name of the static trial folder name, e.g. ['S00_P00_T00_StaticTrial'];
# # At the Session level, add participant subdirectory, e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P01_Participant/S00_P00_T00_StaticTrial']
# opensim_bin_path = 'C:\OpenSim 4.4\bin'
## CUSTOM skeleton, if you trained your own DeepLabCut model for example.
## Make sure the node ids correspond to the column numbers of the 2D pose file, starting from zero.
##
## If you want to perform inverse kinematics, you will also need to create an OpenSim model
## and add to its markerset the location where you expect the triangulated keypoints to be detected.
##
## In this example, CUSTOM reproduces the BODY_25B skeleton (default skeletons are stored in skeletons.py).
## You can create as many custom skeletons as you want, just add them further down and rename them.
##
## Check your model hierarchy with: for pre, _, node in RenderTree(model):
## print(f'{pre}{node.name} id={node.id}')
# [pose.CUSTOM]
# name = "CHip"
# id = "None"
# [[pose.CUSTOM.children]]
# id = 12
# name = "RHip"
# [[pose.CUSTOM.children.children]]
# id = 14
# name = "RKnee"
# [[pose.CUSTOM.children.children.children]]
# id = 16
# name = "RAnkle"
# [[pose.CUSTOM.children.children.children.children]]
# id = 22
# name = "RBigToe"
# [[pose.CUSTOM.children.children.children.children.children]]
# id = 23
# name = "RSmallToe"
# [[pose.CUSTOM.children.children.children.children]]
# id = 24
# name = "RHeel"
# [[pose.CUSTOM.children]]
# id = 11
# name = "LHip"
# [[pose.CUSTOM.children.children]]
# id = 13
# name = "LKnee"
# [[pose.CUSTOM.children.children.children]]
# id = 15
# name = "LAnkle"
# [[pose.CUSTOM.children.children.children.children]]
# id = 19
# name = "LBigToe"
# [[pose.CUSTOM.children.children.children.children.children]]
# id = 20
# name = "LSmallToe"
# [[pose.CUSTOM.children.children.children.children]]
# id = 21
# name = "LHeel"
# [[pose.CUSTOM.children]]
# id = 17
# name = "Neck"
# [[pose.CUSTOM.children.children]]
# id = 18
# name = "Head"
# [[pose.CUSTOM.children.children.children]]
# id = 0
# name = "Nose"
# [[pose.CUSTOM.children.children]]
# id = 6
# name = "RShoulder"
# [[pose.CUSTOM.children.children.children]]
# id = 8
# name = "RElbow"
# [[pose.CUSTOM.children.children.children.children]]
# id = 10
# name = "RWrist"
# [[pose.CUSTOM.children.children]]
# id = 5
# name = "LShoulder"
# [[pose.CUSTOM.children.children.children]]
# id = 7
# name = "LElbow"
# [[pose.CUSTOM.children.children.children.children]]
# id = 9
# name = "LWrist"

View File

@ -1,249 +0,0 @@
###############################################################################
## PROJECT PARAMETERS ##
###############################################################################
# Configure your project parameters here.
#
# IMPORTANT:
# If a parameter is not found here, Pose2Sim will look for its value in the
# Config.toml file of the level above. This way, you can set global
# instructions for the Session and alter them for specific Participants or Trials.
#
# If you wish to overwrite a parameter for a specific trial or participant,
# edit its Config.toml file by uncommenting its key (e.g., [project])
# and editing its value (e.g., frame_range = [10,300]). Or else, uncomment
# [filtering.butterworth] and set cut_off_frequency = 10, etc.
# [project]
# frame_rate = 60 # FPS
# frame_range = [] # For example [10,300], or [] for all frames
## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate
## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate,
## frame_range = [0.1, 2.0]*frame_rate = [6, 120]
# exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['<participant_dir/trial_dir>', 'etc'].
# e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial']
# participant_height = 1.7 # m
# participant_mass = 70 # kg
## Only taken into account if pose_model is 'BODY_25_AUGMENTED' # Coming soon!
## Take heart, calibration is not that complicated once you get the hang of it!
# [calibration]
# calibration_type = 'convert' # 'convert' or 'calculate'
# [calibration.convert]
# convert_from = 'qualisys' # 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', or 'biocv'
# [calibration.convert.qualisys]
# binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2
# [calibration.convert.optitrack] # See readme for instructions
# [calibration.convert.vicon] # No parameter needed
# [calibration.convert.opencap] # No parameter needed
# [calibration.convert.easymocap] # No parameter needed
# [calibration.convert.biocv] # No parameter needed
# [calibration.convert.anipose] # No parameter needed
# [calibration.convert.freemocap] # No parameter needed
# [calibration.calculate]
## Camera properties, theoretically need to be calculated only once in a camera lifetime
# [calibration.calculate.intrinsics]
# overwrite_intrinsics = false # overwrite (or not) if they have already been calculated?
# show_detection_intrinsics = true # true or false (lowercase)
# intrinsics_extension = 'jpg' # any video or image extension
# extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 )
# intrinsics_corners_nb = [4,7]
# intrinsics_square_size = 60 # mm
## Camera placements, need to be done before every session
# [calibration.calculate.extrinsics]
# extrinsics_method = 'scene' # 'board', 'scene', 'keypoints'
## 'board' should be large enough to be detected when laid on the floor. Not recommended.
## 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out.
## 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras.
# moving_cameras = false # Not implemented yet
# calculate_extrinsics = true # true or false (lowercase)
# [calibration.calculate.extrinsics.board]
# show_reprojection_error = true # true or false (lowercase)
# extrinsics_extension = 'png' # any video or image extension
# extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h]
# extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle
# [calibration.calculate.extrinsics.scene]
# show_reprojection_error = true # true or false (lowercase)
# extrinsics_extension = 'png' # any video or image extension
## list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane.
## in m -> unlike for intrinsics, NOT in mm!
# object_coords_3d = [[-2.0, 0.3, 0.0],
# [-2.0 , 0.0, 0.0],
# [-2.0, 0.0, 0.05],
# [-2.0, -0.3 , 0.0],
# [0.0, 0.3, 0.0],
# [0.0, 0.0, 0.0],
# [0.0, 0.0, 0.05],
# [0.0, -0.3, 0.0]]
# [calibration.calculate.extrinsics.keypoints]
## Coming soon!
# [pose]
# pose_framework = 'openpose' # 'openpose', 'mediapipe', 'alphapose', 'deeplabcut'
# pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII,
# BODY_25_AUGMENTED # Coming soon!
# #With mediapipe: BLAZEPOSE.
# #With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133.
# #With deeplabcut: CUSTOM. See example at the end of the file.
## What follows has not been implemented yet
# overwrite_pose = false
# openpose_path = '' # only checked if OpenPose is used
# [synchronization]
## COMING SOON!
# reset_sync = true # Recalculate synchronization even if already done
# frames = [2850,3490] # Frames to use for synchronization, should point to a moment with fast motion.
# cut_off_frequency = 10 # cut-off frequency for a 4th order low-pass Butterworth filter
## Vertical speeds (on X, Y, or Z axis, or 2D speeds)
# speed_kind = 'y' # 'x', 'y', 'z', or '2D'
# vmax = 20 # px/s
# cam1_nb = 4
# cam2_nb = 3
# id_kpt = [9,10] # Pour plus tard aller chercher numéro depuis keypoint name dans skeleton.py. 'RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7
# weights_kpt = [1,1] # Pris en compte uniquement si on a plusieurs keypoints
# [personAssociation]
# single_person = true # false for multi-person analysis (not supported yet), true for only triangulating the main person in scene.
# tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
## and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE)
# reproj_error_threshold_association = 20 # px
# likelihood_error_threshold_association = 0.2
# [triangulation]
# reproj_error_threshold_triangulation = 15 # px
# likelihood_threshold_triangulation = 0.3
# min_cameras_for_triangulation = 2
# interpolation = 'cubic' #linear, slinear, quadratic, cubic, or none
## 'none' if you don't want to interpolate missing points
# interp_if_gap_smaller_than = 10 # do not interpolate bigger gaps
# show_interp_indices = true # true or false (lowercase). For each keypoint, return the frames that need to be interpolated
# handle_LR_swap = true # Better if few cameras (eg less than 4) with risk of limb swapping (eg camera facing sagittal plane), otherwise slightly less accurate and slower
# undistort_points = false # Better if distorted image (parallel lines curvy on the edge or at least one param > 10^-2), but unnecessary (and slightly slower) if distortions are low
# make_c3d = false # save triangulated data in c3d format in addition to trc # Coming soon!
[filtering]
# type = 'butterworth' # butterworth, kalman, gaussian, LOESS, median, butterworth_on_speed
display_figures = true # true or false (lowercase)
# [filtering.butterworth]
# order = 4
# cut_off_frequency = 6 # Hz
# [filtering.kalman]
## How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)?
# trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise
# smooth = true # should be true, unless you need real-time filtering
# [filtering.butterworth_on_speed]
# order = 4
# cut_off_frequency = 10 # Hz
# [filtering.gaussian]
# sigma_kernel = 2 #px
# [filtering.LOESS]
# nb_values_used = 30 # = fraction of data used * nb frames
# [filtering.median]
# kernel_size = 9
# [opensim]
# static_trial = ['S01_P01_Participant1/S01_P01_T00_StaticTrial']
# # If this Config.toml file is at the Trial level, set to true or false (lowercase);
# # At the Participant level, specify the name of the static trial folder name, e.g. ['S00_P00_T00_StaticTrial'];
# # At the Session level, add participant subdirectory, e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P01_Participant/S00_P00_T00_StaticTrial']
# opensim_bin_path = 'C:\OpenSim 4.4\bin'
## CUSTOM skeleton, if you trained your own DeepLabCut model for example.
## Make sure the node ids correspond to the column numbers of the 2D pose file, starting from zero.
##
## If you want to perform inverse kinematics, you will also need to create an OpenSim model
## and add to its markerset the location where you expect the triangulated keypoints to be detected.
##
## In this example, CUSTOM reproduces the BODY_25B skeleton (default skeletons are stored in skeletons.py).
## You can create as many custom skeletons as you want, just add them further down and rename them.
##
## Check your model hierarchy with: for pre, _, node in RenderTree(model):
## print(f'{pre}{node.name} id={node.id}')
# [pose.CUSTOM]
# name = "CHip"
# id = "None"
# [[pose.CUSTOM.children]]
# id = 12
# name = "RHip"
# [[pose.CUSTOM.children.children]]
# id = 14
# name = "RKnee"
# [[pose.CUSTOM.children.children.children]]
# id = 16
# name = "RAnkle"
# [[pose.CUSTOM.children.children.children.children]]
# id = 22
# name = "RBigToe"
# [[pose.CUSTOM.children.children.children.children.children]]
# id = 23
# name = "RSmallToe"
# [[pose.CUSTOM.children.children.children.children]]
# id = 24
# name = "RHeel"
# [[pose.CUSTOM.children]]
# id = 11
# name = "LHip"
# [[pose.CUSTOM.children.children]]
# id = 13
# name = "LKnee"
# [[pose.CUSTOM.children.children.children]]
# id = 15
# name = "LAnkle"
# [[pose.CUSTOM.children.children.children.children]]
# id = 19
# name = "LBigToe"
# [[pose.CUSTOM.children.children.children.children.children]]
# id = 20
# name = "LSmallToe"
# [[pose.CUSTOM.children.children.children.children]]
# id = 21
# name = "LHeel"
# [[pose.CUSTOM.children]]
# id = 17
# name = "Neck"
# [[pose.CUSTOM.children.children]]
# id = 18
# name = "Head"
# [[pose.CUSTOM.children.children.children]]
# id = 0
# name = "Nose"
# [[pose.CUSTOM.children.children]]
# id = 6
# name = "RShoulder"
# [[pose.CUSTOM.children.children.children]]
# id = 8
# name = "RElbow"
# [[pose.CUSTOM.children.children.children.children]]
# id = 10
# name = "RWrist"
# [[pose.CUSTOM.children.children]]
# id = 5
# name = "LShoulder"
# [[pose.CUSTOM.children.children.children]]
# id = 7
# name = "LElbow"
# [[pose.CUSTOM.children.children.children.children]]
# id = 9
# name = "LWrist"

View File

@ -1,249 +0,0 @@
###############################################################################
## PROJECT PARAMETERS ##
###############################################################################
# Configure your project parameters here.
#
# IMPORTANT:
# If a parameter is not found here, Pose2Sim will look for its value in the
# Config.toml file of the level above. This way, you can set global
# instructions for the Session and alter them for specific Participants or Trials.
#
# If you wish to overwrite a parameter for a specific trial or participant,
# edit its Config.toml file by uncommenting its key (e.g., [project])
# and editing its value (e.g., frame_range = [10,300]). Or else, uncomment
# [filtering.butterworth] and set cut_off_frequency = 10, etc.
# [project]
# frame_rate = 60 # FPS
# frame_range = [] # For example [10,300], or [] for all frames
## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate
## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate,
## frame_range = [0.1, 2.0]*frame_rate = [6, 120]
# exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['<participant_dir/trial_dir>', 'etc'].
# e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial']
# participant_height = 1.7 # m
# participant_mass = 70 # kg
## Only taken into account if pose_model is 'BODY_25_AUGMENTED' # Coming soon!
## Take heart, calibration is not that complicated once you get the hang of it!
# [calibration]
# calibration_type = 'convert' # 'convert' or 'calculate'
# [calibration.convert]
# convert_from = 'qualisys' # 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', or 'biocv'
# [calibration.convert.qualisys]
# binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2
# [calibration.convert.optitrack] # See readme for instructions
# [calibration.convert.vicon] # No parameter needed
# [calibration.convert.opencap] # No parameter needed
# [calibration.convert.easymocap] # No parameter needed
# [calibration.convert.biocv] # No parameter needed
# [calibration.convert.anipose] # No parameter needed
# [calibration.convert.freemocap] # No parameter needed
# [calibration.calculate]
## Camera properties, theoretically need to be calculated only once in a camera lifetime
# [calibration.calculate.intrinsics]
# overwrite_intrinsics = false # overwrite (or not) if they have already been calculated?
# show_detection_intrinsics = true # true or false (lowercase)
# intrinsics_extension = 'jpg' # any video or image extension
# extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 )
# intrinsics_corners_nb = [4,7]
# intrinsics_square_size = 60 # mm
## Camera placements, need to be done before every session
# [calibration.calculate.extrinsics]
# extrinsics_method = 'scene' # 'board', 'scene', 'keypoints'
## 'board' should be large enough to be detected when laid on the floor. Not recommended.
## 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out.
## 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras.
# moving_cameras = false # Not implemented yet
# calculate_extrinsics = true # true or false (lowercase)
# [calibration.calculate.extrinsics.board]
# show_reprojection_error = true # true or false (lowercase)
# extrinsics_extension = 'png' # any video or image extension
# extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h]
# extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle
# [calibration.calculate.extrinsics.scene]
# show_reprojection_error = true # true or false (lowercase)
# extrinsics_extension = 'png' # any video or image extension
## list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane.
## in m -> unlike for intrinsics, NOT in mm!
# object_coords_3d = [[-2.0, 0.3, 0.0],
# [-2.0 , 0.0, 0.0],
# [-2.0, 0.0, 0.05],
# [-2.0, -0.3 , 0.0],
# [0.0, 0.3, 0.0],
# [0.0, 0.0, 0.0],
# [0.0, 0.0, 0.05],
# [0.0, -0.3, 0.0]]
# [calibration.calculate.extrinsics.keypoints]
## Coming soon!
# [pose]
# pose_framework = 'openpose' # 'openpose', 'mediapipe', 'alphapose', 'deeplabcut'
# pose_model = 'BODY_25B' #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII,
# BODY_25_AUGMENTED # Coming soon!
# #With mediapipe: BLAZEPOSE.
# #With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133.
# #With deeplabcut: CUSTOM. See example at the end of the file.
## What follows has not been implemented yet
# overwrite_pose = false
# openpose_path = '' # only checked if OpenPose is used
# [synchronization]
## COMING SOON!
# reset_sync = true # Recalculate synchronization even if already done
# frames = [2850,3490] # Frames to use for synchronization, should point to a moment with fast motion.
# cut_off_frequency = 10 # cut-off frequency for a 4th order low-pass Butterworth filter
## Vertical speeds (on X, Y, or Z axis, or 2D speeds)
# speed_kind = 'y' # 'x', 'y', 'z', or '2D'
# vmax = 20 # px/s
# cam1_nb = 4
# cam2_nb = 3
# id_kpt = [9,10] # Pour plus tard aller chercher numéro depuis keypoint name dans skeleton.py. 'RWrist' BLAZEPOSE 16, BODY_25B 10, BODY_25 4 ; 'LWrist' BLAZEPOSE 15, BODY_25B 9, BODY_25 7
# weights_kpt = [1,1] # Pris en compte uniquement si on a plusieurs keypoints
# [personAssociation]
# single_person = true # false for multi-person analysis (not supported yet), true for only triangulating the main person in scene.
# tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
## and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE)
# reproj_error_threshold_association = 20 # px
# likelihood_error_threshold_association = 0.2
# [triangulation]
# reproj_error_threshold_triangulation = 15 # px
# likelihood_threshold_triangulation = 0.3
# min_cameras_for_triangulation = 2
# interpolation = 'cubic' #linear, slinear, quadratic, cubic, or none
## 'none' if you don't want to interpolate missing points
# interp_if_gap_smaller_than = 10 # do not interpolate bigger gaps
# show_interp_indices = true # true or false (lowercase). For each keypoint, return the frames that need to be interpolated
# handle_LR_swap = true # Better if few cameras (eg less than 4) with risk of limb swapping (eg camera facing sagittal plane), otherwise slightly less accurate and slower
# undistort_points = false # Better if distorted image (parallel lines curvy on the edge or at least one param > 10^-2), but unnecessary (and slightly slower) if distortions are low
# make_c3d = false # save triangulated data in c3d format in addition to trc # Coming soon!
[filtering]
# type = 'butterworth' # butterworth, kalman, gaussian, LOESS, median, butterworth_on_speed
display_figures = true # true or false (lowercase)
# [filtering.butterworth]
# order = 4
# cut_off_frequency = 6 # Hz
# [filtering.kalman]
## How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)?
# trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise
# smooth = true # should be true, unless you need real-time filtering
# [filtering.butterworth_on_speed]
# order = 4
# cut_off_frequency = 10 # Hz
# [filtering.gaussian]
# sigma_kernel = 2 #px
# [filtering.LOESS]
# nb_values_used = 30 # = fraction of data used * nb frames
# [filtering.median]
# kernel_size = 9
# [opensim]
# static_trial = ['S01_P01_Participant1/S01_P01_T00_StaticTrial']
# # If this Config.toml file is at the Trial level, set to true or false (lowercase);
# # At the Participant level, specify the name of the static trial folder name, e.g. ['S00_P00_T00_StaticTrial'];
# # At the Session level, add participant subdirectory, e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P01_Participant/S00_P00_T00_StaticTrial']
# opensim_bin_path = 'C:\OpenSim 4.4\bin'
## CUSTOM skeleton, if you trained your own DeepLabCut model for example.
## Make sure the node ids correspond to the column numbers of the 2D pose file, starting from zero.
##
## If you want to perform inverse kinematics, you will also need to create an OpenSim model
## and add to its markerset the location where you expect the triangulated keypoints to be detected.
##
## In this example, CUSTOM reproduces the BODY_25B skeleton (default skeletons are stored in skeletons.py).
## You can create as many custom skeletons as you want, just add them further down and rename them.
##
## Check your model hierarchy with: for pre, _, node in RenderTree(model):
## print(f'{pre}{node.name} id={node.id}')
# [pose.CUSTOM]
# name = "CHip"
# id = "None"
# [[pose.CUSTOM.children]]
# id = 12
# name = "RHip"
# [[pose.CUSTOM.children.children]]
# id = 14
# name = "RKnee"
# [[pose.CUSTOM.children.children.children]]
# id = 16
# name = "RAnkle"
# [[pose.CUSTOM.children.children.children.children]]
# id = 22
# name = "RBigToe"
# [[pose.CUSTOM.children.children.children.children.children]]
# id = 23
# name = "RSmallToe"
# [[pose.CUSTOM.children.children.children.children]]
# id = 24
# name = "RHeel"
# [[pose.CUSTOM.children]]
# id = 11
# name = "LHip"
# [[pose.CUSTOM.children.children]]
# id = 13
# name = "LKnee"
# [[pose.CUSTOM.children.children.children]]
# id = 15
# name = "LAnkle"
# [[pose.CUSTOM.children.children.children.children]]
# id = 19
# name = "LBigToe"
# [[pose.CUSTOM.children.children.children.children.children]]
# id = 20
# name = "LSmallToe"
# [[pose.CUSTOM.children.children.children.children]]
# id = 21
# name = "LHeel"
# [[pose.CUSTOM.children]]
# id = 17
# name = "Neck"
# [[pose.CUSTOM.children.children]]
# id = 18
# name = "Head"
# [[pose.CUSTOM.children.children.children]]
# id = 0
# name = "Nose"
# [[pose.CUSTOM.children.children]]
# id = 6
# name = "RShoulder"
# [[pose.CUSTOM.children.children.children]]
# id = 8
# name = "RElbow"
# [[pose.CUSTOM.children.children.children.children]]
# id = 10
# name = "RWrist"
# [[pose.CUSTOM.children.children]]
# id = 5
# name = "LShoulder"
# [[pose.CUSTOM.children.children.children]]
# id = 7
# name = "LElbow"
# [[pose.CUSTOM.children.children.children.children]]
# id = 9
# name = "LWrist"