fixed calib in multi_person

This commit is contained in:
davidpagnon 2024-07-17 13:49:12 +02:00
parent e5dd81d94d
commit e270c6d1d8
27 changed files with 865 additions and 22 deletions

View File

@ -0,0 +1,269 @@
###############################################################################
## PROJECT PARAMETERS ##
###############################################################################
# Configure your project parameters here.
#
# IMPORTANT:
# If a parameter is not found here, Pose2Sim will look for its value in the
# Config.toml file of the level above. This way, you can set global batch
# instructions and alter them for specific trials.
#
# If you wish to overwrite a parameter for a specific trial, edit
# its Config.toml file by uncommenting its key (e.g., [project])
# and editing its value (e.g., frame_range = [10,300]). Also try
# uncommenting [filtering.butterworth] and set cut_off_frequency = 10, etc.
[project]
multi_person = false # true for trials with multiple participants. If false, only the main person in scene is analyzed (and it run much faster).
participant_height = 1.72 # m # float if single person, list of float if multi-person (same order as the Static trials) # Only used for marker augmentation
participant_mass = 70.0 # kg # Only used for marker augmentation and scaling
frame_rate = 'auto' # fps # int or 'auto'. If 'auto', finds from video (or defaults to 60 fps if you work with images)
frame_range = [] # For example [10,300], or [] for all frames.
## If cameras are not synchronized, designates the frame range of the camera with the shortest recording time
## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate
## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate,
## frame_range = [0.1, 2.0]*frame_rate = [6, 120]
exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['<participant_dir/trial_dir>', 'etc'].
# e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial']
[pose]
vid_img_extension = 'mp4' # any video or image extension
pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands), COCO_17 (body)
# /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the files if needed
#With MMPose: HALPE_26, COCO_133, COCO_17, CUSTOM. See CUSTOM example at the end of the file
#With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII
#With mediapipe: BLAZEPOSE
#With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133
#With deeplabcut: CUSTOM. See example at the end of the file
mode = 'balanced' # 'lightweight', 'balanced', 'performance'
det_frequency = 100 # Run person detection only every N frames, and inbetween track previously detected bounding boxes (keypoint detection is still run on all frames).
# Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate.
tracking = true # Gives consistent person ID across frames. Slightly slower but might facilitate synchronization if other people are in the background
display_detection = true
overwrite_pose = false # set to false if you don't want to recalculate pose estimation when it has already been done
save_video = 'to_video' # 'to_video' or 'to_images', 'none', or ['to_video', 'to_images']
output_format = 'openpose' # 'openpose', 'mmpose', 'deeplabcut', 'none' or a list of them # /!\ only 'openpose' is supported for now
[synchronization]
display_sync_plots = true # true or false (lowercase)
keypoints_to_consider = ['RWrist'] # 'all' if all points should be considered, for example if the participant did not perform any particicular sharp movement. In this case, the capture needs to be 5-10 seconds long at least
# ['RWrist', 'RElbow'] list of keypoint names if you want to specify the keypoints to consider.
approx_time_maxspeed = 'auto' # 'auto' if you want to consider the whole capture (default, slower if long sequences)
# [10.0, 2.0, 8.0, 11.0] list of times in seconds, one value per camera if you want to specify the approximate time of a clear vertical event by one person standing alone in the scene
time_range_around_maxspeed = 2.0 # Search for best correlation in the range [approx_time_maxspeed - time_range_around_maxspeed, approx_time_maxspeed + time_range_around_maxspeed]
likelihood_threshold = 0.4 # Keypoints whose likelihood is below likelihood_threshold are filtered out
filter_cutoff = 6 # time series are smoothed to get coherent time-lagged correlation
filter_order = 4
# Take heart, calibration is not that complicated once you get the hang of it!
[calibration]
calibration_type = 'convert' # 'convert' or 'calculate'
[calibration.convert]
convert_from = 'qualisys' # 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', 'biocv', 'anipose', or 'freemocap'
[calibration.convert.qualisys]
binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2
[calibration.convert.optitrack] # See readme for instructions
[calibration.convert.vicon] # No parameter needed
[calibration.convert.opencap] # No parameter needed
[calibration.convert.easymocap] # No parameter needed
[calibration.convert.biocv] # No parameter needed
[calibration.convert.anipose] # No parameter needed
[calibration.convert.freemocap] # No parameter needed
[calibration.calculate]
# Camera properties, theoretically need to be calculated only once in a camera lifetime
[calibration.calculate.intrinsics]
overwrite_intrinsics = false # set to false if you don't want to recalculate intrinsic parameters
show_detection_intrinsics = true # true or false (lowercase)
intrinsics_extension = 'jpg' # any video or image extension
extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 )
intrinsics_corners_nb = [4,7]
intrinsics_square_size = 60 # mm
# Camera placements, need to be done before every session
[calibration.calculate.extrinsics]
calculate_extrinsics = true # true or false (lowercase)
extrinsics_method = 'scene' # 'board', 'scene', 'keypoints'
# 'board' should be large enough to be detected when laid on the floor. Not recommended.
# 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out.
# 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras.
moving_cameras = false # Not implemented yet
[calibration.calculate.extrinsics.board]
show_reprojection_error = true # true or false (lowercase)
extrinsics_extension = 'png' # any video or image extension
extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h]
extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle
[calibration.calculate.extrinsics.scene]
show_reprojection_error = true # true or false (lowercase)
extrinsics_extension = 'png' # any video or image extension
# list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane.
# in m -> unlike for intrinsics, NOT in mm!
object_coords_3d = [[-2.0, 0.3, 0.0],
[-2.0 , 0.0, 0.0],
[-2.0, 0.0, 0.05],
[-2.0, -0.3 , 0.0],
[0.0, 0.3, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.05],
[0.0, -0.3, 0.0]]
[calibration.calculate.extrinsics.keypoints]
# Coming soon!
[personAssociation]
likelihood_threshold_association = 0.3
[personAssociation.single_person]
reproj_error_threshold_association = 20 # px
tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
# and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' or 'RShoulder')
[personAssociation.multi_person]
reconstruction_error_threshold = 0.1 # 0.1 = 10 cm
min_affinity = 0.2 # affinity below which a correspondence is ignored
[triangulation]
reorder_trc = false # only checked if multi_person analysis
reproj_error_threshold_triangulation = 15 # px
likelihood_threshold_triangulation= 0.3
min_cameras_for_triangulation = 2
interpolation = 'cubic' #linear, slinear, quadratic, cubic, or none
# 'none' if you don't want to interpolate missing points
interp_if_gap_smaller_than = 10 # do not interpolate bigger gaps
show_interp_indices = true # true or false (lowercase). For each keypoint, return the frames that need to be interpolated
fill_large_gaps_with = 'last_value' # 'last_value', 'nan', or 'zeros'
handle_LR_swap = false # Better if few cameras (eg less than 4) with risk of limb swapping (eg camera facing sagittal plane), otherwise slightly less accurate and slower
undistort_points = false # Better if distorted image (parallel lines curvy on the edge or at least one param > 10^-2), but unnecessary (and slightly slower) if distortions are low
make_c3d = true # save triangulated data in c3d format in addition to trc
[filtering]
type = 'butterworth' # butterworth, kalman, gaussian, LOESS, median, butterworth_on_speed
display_figures = false # true or false (lowercase) # fails when run multiple times https://github.com/superjax/plotWindow/issues/7
make_c3d = true # also save triangulated data in c3d format
[filtering.butterworth]
order = 4
cut_off_frequency = 6 # Hz
[filtering.kalman]
# How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)?
trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise
smooth = true # should be true, unless you need real-time filtering
[filtering.butterworth_on_speed]
order = 4
cut_off_frequency = 10 # Hz
[filtering.gaussian]
sigma_kernel = 2 #px
[filtering.LOESS]
nb_values_used = 30 # = fraction of data used * nb frames
[filtering.median]
kernel_size = 9
[markerAugmentation]
## Requires the following markers: ["Neck", "RShoulder", "LShoulder", "RHip", "LHip", "RKnee", "LKnee",
## "RAnkle", "LAnkle", "RHeel", "LHeel", "RSmallToe", "LSmallToe",
## "RBigToe", "LBigToe", "RElbow", "LElbow", "RWrist", "LWrist"]
make_c3d = true # save triangulated data in c3d format in addition to trc
[opensim]
static_trial = ['S00_P00_Participant/S00_P00_T00_StaticTrial']
# # If this Config.toml file is at the Trial level, set to true or false (lowercase);
# # At the Participant level, specify the name of the static trial folder name, e.g. ['S00_P00_T00_StaticTrial'];
# # At the Session level, add participant subdirectory, e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P01_Participant/S00_P00_T00_StaticTrial']
opensim_bin_path = 'C:\OpenSim 4.4\bin'
# CUSTOM skeleton, if you trained your own model from DeepLabCut or MMPose for example.
# Make sure the node ids correspond to the column numbers of the 2D pose file, starting from zero.
#
# If you want to perform inverse kinematics, you will also need to create an OpenSim model
# and add to its markerset the location where you expect the triangulated keypoints to be detected.
#
# In this example, CUSTOM reproduces the HALPE_26 skeleton (default skeletons are stored in skeletons.py).
# You can create as many custom skeletons as you want, just add them further down and rename them.
#
# Check your model hierarchy with: for pre, _, node in RenderTree(model):
# print(f'{pre}{node.name} id={node.id}')
[pose.CUSTOM]
name = "CHip"
id = "19"
[[pose.CUSTOM.children]]
name = "RHip"
id = 12
[[pose.CUSTOM.children.children]]
name = "RKnee"
id = 14
[[pose.CUSTOM.children.children.children]]
name = "RAnkle"
id = 16
[[pose.CUSTOM.children.children.children.children]]
name = "RBigToe"
id = 21
[[pose.CUSTOM.children.children.children.children.children]]
name = "RSmallToe"
id = 23
[[pose.CUSTOM.children.children.children.children]]
name = "RHeel"
id = 25
[[pose.CUSTOM.children]]
name = "LHip"
id = 11
[[pose.CUSTOM.children.children]]
name = "LKnee"
id = 13
[[pose.CUSTOM.children.children.children]]
name = "LAnkle"
id = 15
[[pose.CUSTOM.children.children.children.children]]
name = "LBigToe"
id = 20
[[pose.CUSTOM.children.children.children.children.children]]
name = "LSmallToe"
id = 22
[[pose.CUSTOM.children.children.children.children]]
name = "LHeel"
id = 24
[[pose.CUSTOM.children]]
name = "Neck"
id = 18
[[pose.CUSTOM.children.children]]
name = "Head"
id = 17
[[pose.CUSTOM.children.children.children]]
name = "Nose"
id = 0
[[pose.CUSTOM.children.children]]
name = "RShoulder"
id = 6
[[pose.CUSTOM.children.children.children]]
name = "RElbow"
id = 8
[[pose.CUSTOM.children.children.children.children]]
name = "RWrist"
id = 10
[[pose.CUSTOM.children.children]]
name = "LShoulder"
id = 5
[[pose.CUSTOM.children.children.children]]
name = "LElbow"
id = 7
[[pose.CUSTOM.children.children.children.children]]
name = "LWrist"
id = 9

View File

@ -0,0 +1,269 @@
# ###############################################################################
# ## PROJECT PARAMETERS ##
# ###############################################################################
# # Configure your project parameters here.
# #
# # IMPORTANT:
# # If a parameter is not found here, Pose2Sim will look for its value in the
# # Config.toml file of the level above. This way, you can set global batch
# # instructions and alter them for specific trials.
# #
# # If you wish to overwrite a parameter for a specific trial, edit
# # its Config.toml file by uncommenting its key (e.g., [project])
# # and editing its value (e.g., frame_range = [10,300]). Also try
# # uncommenting [filtering.butterworth] and set cut_off_frequency = 10, etc.
# [project]
# multi_person = false # true for trials with multiple participants. If false, only the main person in scene is analyzed (and it run much faster).
# participant_height = 1.72 # m # float if single person, list of float if multi-person (same order as the Static trials) # Only used for marker augmentation
# participant_mass = 70.0 # kg # Only used for marker augmentation and scaling
# frame_rate = 'auto' # fps # int or 'auto'. If 'auto', finds from video (or defaults to 60 fps if you work with images)
# frame_range = [] # For example [10,300], or [] for all frames.
# ## If cameras are not synchronized, designates the frame range of the camera with the shortest recording time
# ## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate
# ## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate,
# ## frame_range = [0.1, 2.0]*frame_rate = [6, 120]
# exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['<participant_dir/trial_dir>', 'etc'].
# # e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial']
# [pose]
# vid_img_extension = 'mp4' # any video or image extension
# pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands), COCO_17 (body)
# # /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the files if needed
# #With MMPose: HALPE_26, COCO_133, COCO_17, CUSTOM. See CUSTOM example at the end of the file
# #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII
# #With mediapipe: BLAZEPOSE
# #With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133
# #With deeplabcut: CUSTOM. See example at the end of the file
# mode = 'balanced' # 'lightweight', 'balanced', 'performance'
# det_frequency = 100 # Run person detection only every N frames, and inbetween track previously detected bounding boxes (keypoint detection is still run on all frames).
# # Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate.
# tracking = true # Gives consistent person ID across frames. Slightly slower but might facilitate synchronization if other people are in the background
# overwrite_pose = true # set to false if you don't want to recalculate pose estimation when it has already been done
# display_detection = true
# save_video = 'to_video' # 'to_video' or 'to_images', 'none', or ['to_video', 'to_images']
# output_format = 'openpose' # 'openpose', 'mmpose', 'deeplabcut', 'none' or a list of them # /!\ only 'openpose' is supported for now
# [synchronization]
# display_sync_plots = true # true or false (lowercase)
# keypoints_to_consider = ['RWrist'] # 'all' if all points should be considered, for example if the participant did not perform any particicular sharp movement. In this case, the capture needs to be 5-10 seconds long at least
# # ['RWrist', 'RElbow'] list of keypoint names if you want to specify the keypoints to consider.
# approx_time_maxspeed = 'auto' # 'auto' if you want to consider the whole capture (default, slower if long sequences)
# # [10.0, 2.0, 8.0, 11.0] list of times in seconds, one value per camera if you want to specify the approximate time of a clear vertical event by one person standing alone in the scene
# time_range_around_maxspeed = 2.0 # Search for best correlation in the range [approx_time_maxspeed - time_range_around_maxspeed, approx_time_maxspeed + time_range_around_maxspeed]
# likelihood_threshold = 0.4 # Keypoints whose likelihood is below likelihood_threshold are filtered out
# filter_cutoff = 6 # time series are smoothed to get coherent time-lagged correlation
# filter_order = 4
# # Take heart, calibration is not that complicated once you get the hang of it!
# [calibration]
# calibration_type = 'convert' # 'convert' or 'calculate'
# [calibration.convert]
# convert_from = 'qualisys' # 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', 'biocv', 'anipose', or 'freemocap'
# [calibration.convert.qualisys]
# binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2
# [calibration.convert.optitrack] # See readme for instructions
# [calibration.convert.vicon] # No parameter needed
# [calibration.convert.opencap] # No parameter needed
# [calibration.convert.easymocap] # No parameter needed
# [calibration.convert.biocv] # No parameter needed
# [calibration.convert.anipose] # No parameter needed
# [calibration.convert.freemocap] # No parameter needed
# [calibration.calculate]
# # Camera properties, theoretically need to be calculated only once in a camera lifetime
# [calibration.calculate.intrinsics]
# overwrite_intrinsics = false # set to false if you don't want to recalculate intrinsic parameters
# show_detection_intrinsics = true # true or false (lowercase)
# intrinsics_extension = 'jpg' # any video or image extension
# extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 )
# intrinsics_corners_nb = [4,7]
# intrinsics_square_size = 60 # mm
# # Camera placements, need to be done before every session
# [calibration.calculate.extrinsics]
# calculate_extrinsics = true # true or false (lowercase)
# extrinsics_method = 'scene' # 'board', 'scene', 'keypoints'
# # 'board' should be large enough to be detected when laid on the floor. Not recommended.
# # 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out.
# # 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras.
# moving_cameras = false # Not implemented yet
# [calibration.calculate.extrinsics.board]
# show_reprojection_error = true # true or false (lowercase)
# extrinsics_extension = 'png' # any video or image extension
# extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h]
# extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle
# [calibration.calculate.extrinsics.scene]
# show_reprojection_error = true # true or false (lowercase)
# extrinsics_extension = 'png' # any video or image extension
# # list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane.
# # in m -> unlike for intrinsics, NOT in mm!
# object_coords_3d = [[-2.0, 0.3, 0.0],
# [-2.0 , 0.0, 0.0],
# [-2.0, 0.0, 0.05],
# [-2.0, -0.3 , 0.0],
# [0.0, 0.3, 0.0],
# [0.0, 0.0, 0.0],
# [0.0, 0.0, 0.05],
# [0.0, -0.3, 0.0]]
# [calibration.calculate.extrinsics.keypoints]
# # Coming soon!
# [personAssociation]
# likelihood_threshold_association = 0.3
# [personAssociation.single_person]
# reproj_error_threshold_association = 20 # px
# tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
# # and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' or 'RShoulder')
# [personAssociation.multi_person]
# reconstruction_error_threshold = 0.1 # 0.1 = 10 cm
# min_affinity = 0.2 # affinity below which a correspondence is ignored
# [triangulation]
# reorder_trc = false # only checked if multi_person analysis
# reproj_error_threshold_triangulation = 15 # px
# likelihood_threshold_triangulation= 0.3
# min_cameras_for_triangulation = 2
# interpolation = 'cubic' #linear, slinear, quadratic, cubic, or none
# # 'none' if you don't want to interpolate missing points
# interp_if_gap_smaller_than = 10 # do not interpolate bigger gaps
# show_interp_indices = true # true or false (lowercase). For each keypoint, return the frames that need to be interpolated
# fill_large_gaps_with = 'last_value' # 'last_value', 'nan', or 'zeros'
# handle_LR_swap = false # Better if few cameras (eg less than 4) with risk of limb swapping (eg camera facing sagittal plane), otherwise slightly less accurate and slower
# undistort_points = false # Better if distorted image (parallel lines curvy on the edge or at least one param > 10^-2), but unnecessary (and slightly slower) if distortions are low
# make_c3d = true # save triangulated data in c3d format in addition to trc
# [filtering]
# type = 'butterworth' # butterworth, kalman, gaussian, LOESS, median, butterworth_on_speed
# display_figures = false # true or false (lowercase) # fails when run multiple times https://github.com/superjax/plotWindow/issues/7
# make_c3d = true # also save triangulated data in c3d format
# [filtering.butterworth]
# order = 4
# cut_off_frequency = 6 # Hz
# [filtering.kalman]
# # How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)?
# trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise
# smooth = true # should be true, unless you need real-time filtering
# [filtering.butterworth_on_speed]
# order = 4
# cut_off_frequency = 10 # Hz
# [filtering.gaussian]
# sigma_kernel = 2 #px
# [filtering.LOESS]
# nb_values_used = 30 # = fraction of data used * nb frames
# [filtering.median]
# kernel_size = 9
# [markerAugmentation]
# ## Requires the following markers: ["Neck", "RShoulder", "LShoulder", "RHip", "LHip", "RKnee", "LKnee",
# ## "RAnkle", "LAnkle", "RHeel", "LHeel", "RSmallToe", "LSmallToe",
# ## "RBigToe", "LBigToe", "RElbow", "LElbow", "RWrist", "LWrist"]
# make_c3d = false # save triangulated data in c3d format in addition to trc
# [opensim]
# static_trial = ['S00_P00_Participant/S00_P00_T00_StaticTrial']
# # # If this Config.toml file is at the Trial level, set to true or false (lowercase);
# # # At the Participant level, specify the name of the static trial folder name, e.g. ['S00_P00_T00_StaticTrial'];
# # # At the Session level, add participant subdirectory, e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P01_Participant/S00_P00_T00_StaticTrial']
# opensim_bin_path = 'C:\OpenSim 4.4\bin'
# # CUSTOM skeleton, if you trained your own model from DeepLabCut or MMPose for example.
# # Make sure the node ids correspond to the column numbers of the 2D pose file, starting from zero.
# #
# # If you want to perform inverse kinematics, you will also need to create an OpenSim model
# # and add to its markerset the location where you expect the triangulated keypoints to be detected.
# #
# # In this example, CUSTOM reproduces the HALPE_26 skeleton (default skeletons are stored in skeletons.py).
# # You can create as many custom skeletons as you want, just add them further down and rename them.
# #
# # Check your model hierarchy with: for pre, _, node in RenderTree(model):
# # print(f'{pre}{node.name} id={node.id}')
# [pose.CUSTOM]
# name = "CHip"
# id = "19"
# [[pose.CUSTOM.children]]
# name = "RHip"
# id = 12
# [[pose.CUSTOM.children.children]]
# name = "RKnee"
# id = 14
# [[pose.CUSTOM.children.children.children]]
# name = "RAnkle"
# id = 16
# [[pose.CUSTOM.children.children.children.children]]
# name = "RBigToe"
# id = 21
# [[pose.CUSTOM.children.children.children.children.children]]
# name = "RSmallToe"
# id = 23
# [[pose.CUSTOM.children.children.children.children]]
# name = "RHeel"
# id = 25
# [[pose.CUSTOM.children]]
# name = "LHip"
# id = 11
# [[pose.CUSTOM.children.children]]
# name = "LKnee"
# id = 13
# [[pose.CUSTOM.children.children.children]]
# name = "LAnkle"
# id = 15
# [[pose.CUSTOM.children.children.children.children]]
# name = "LBigToe"
# id = 20
# [[pose.CUSTOM.children.children.children.children.children]]
# name = "LSmallToe"
# id = 22
# [[pose.CUSTOM.children.children.children.children]]
# name = "LHeel"
# id = 24
# [[pose.CUSTOM.children]]
# name = "Neck"
# id = 18
# [[pose.CUSTOM.children.children]]
# name = "Head"
# id = 17
# [[pose.CUSTOM.children.children.children]]
# name = "Nose"
# id = 0
# [[pose.CUSTOM.children.children]]
# name = "RShoulder"
# id = 6
# [[pose.CUSTOM.children.children.children]]
# name = "RElbow"
# id = 8
# [[pose.CUSTOM.children.children.children.children]]
# name = "RWrist"
# id = 10
# [[pose.CUSTOM.children.children]]
# name = "LShoulder"
# id = 5
# [[pose.CUSTOM.children.children.children]]
# name = "LElbow"
# id = 7
# [[pose.CUSTOM.children.children.children.children]]
# name = "LWrist"
# id = 9

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,269 @@
# ###############################################################################
# ## PROJECT PARAMETERS ##
# ###############################################################################
# # Configure your project parameters here.
# #
# # IMPORTANT:
# # If a parameter is not found here, Pose2Sim will look for its value in the
# # Config.toml file of the level above. This way, you can set global batch
# # instructions and alter them for specific trials.
# #
# # If you wish to overwrite a parameter for a specific trial, edit
# # its Config.toml file by uncommenting its key (e.g., [project])
# # and editing its value (e.g., frame_range = [10,300]). Also try
# # uncommenting [filtering.butterworth] and set cut_off_frequency = 10, etc.
[project]
multi_person = true # true for trials with multiple participants. If false, only the main person in scene is analyzed (and it run much faster).
participant_height = [1.72, 1.40] # m # float if single person, list of float if multi-person (same order as the Static trials) # Only used for marker augmentation
participant_mass = [70.0, 63.5] # kg # Only used for marker augmentation and scaling
# frame_rate = 'auto' # fps # int or 'auto'. If 'auto', finds from video (or defaults to 60 fps if you work with images)
# frame_range = [] # For example [10,300], or [] for all frames.
# ## If cameras are not synchronized, designates the frame range of the camera with the shortest recording time
# ## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate
# ## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate,
# ## frame_range = [0.1, 2.0]*frame_rate = [6, 120]
# exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['<participant_dir/trial_dir>', 'etc'].
# # e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial']
# [pose]
# vid_img_extension = 'mp4' # any video or image extension
# pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands), COCO_17 (body)
# # /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the files if needed
# #With MMPose: HALPE_26, COCO_133, COCO_17, CUSTOM. See CUSTOM example at the end of the file
# #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII
# #With mediapipe: BLAZEPOSE
# #With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133
# #With deeplabcut: CUSTOM. See example at the end of the file
# mode = 'balanced' # 'lightweight', 'balanced', 'performance'
# det_frequency = 100 # Run person detection only every N frames, and inbetween track previously detected bounding boxes (keypoint detection is still run on all frames).
# # Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate.
# tracking = true # Gives consistent person ID across frames. Slightly slower but might facilitate synchronization if other people are in the background
# overwrite_pose = false # set to false if you don't want to recalculate pose estimation when it has already been done
# display_detection = true
# save_video = 'to_video' # 'to_video' or 'to_images', 'none', or ['to_video', 'to_images']
# output_format = 'openpose' # 'openpose', 'mmpose', 'deeplabcut', 'none' or a list of them # /!\ only 'openpose' is supported for now
[synchronization]
# display_sync_plots = true # true or false (lowercase)
keypoints_to_consider = 'all' # 'all' if all points should be considered, for example if the participant did not perform any particicular sharp movement. In this case, the capture needs to be 5-10 seconds long at least
# # ['RWrist', 'RElbow'] list of keypoint names if you want to specify the keypoints to consider.
# approx_time_maxspeed = 'auto' # 'auto' if you want to consider the whole capture (default, slower if long sequences)
# # [10.0, 2.0, 8.0, 11.0] list of times in seconds, one value per camera if you want to specify the approximate time of a clear vertical event by one person standing alone in the scene
# time_range_around_maxspeed = 2.0 # Search for best correlation in the range [approx_time_maxspeed - time_range_around_maxspeed, approx_time_maxspeed + time_range_around_maxspeed]
# likelihood_threshold = 0.4 # Keypoints whose likelihood is below likelihood_threshold are filtered out
# filter_cutoff = 6 # time series are smoothed to get coherent time-lagged correlation
# filter_order = 4
# # Take heart, calibration is not that complicated once you get the hang of it!
# [calibration]
# calibration_type = 'convert' # 'convert' or 'calculate'
# [calibration.convert]
# convert_from = 'qualisys' # 'qualisys', 'optitrack', vicon', 'opencap', 'easymocap', 'biocv', 'anipose', or 'freemocap'
# [calibration.convert.qualisys]
# binning_factor = 1 # Usually 1, except when filming in 540p where it usually is 2
# [calibration.convert.optitrack] # See readme for instructions
# [calibration.convert.vicon] # No parameter needed
# [calibration.convert.opencap] # No parameter needed
# [calibration.convert.easymocap] # No parameter needed
# [calibration.convert.biocv] # No parameter needed
# [calibration.convert.anipose] # No parameter needed
# [calibration.convert.freemocap] # No parameter needed
# [calibration.calculate]
# # Camera properties, theoretically need to be calculated only once in a camera lifetime
# [calibration.calculate.intrinsics]
# overwrite_intrinsics = false # set to false if you don't want to recalculate intrinsic parameters
# show_detection_intrinsics = true # true or false (lowercase)
# intrinsics_extension = 'jpg' # any video or image extension
# extract_every_N_sec = 1 # if video, extract frames every N seconds (can be <1 )
# intrinsics_corners_nb = [4,7]
# intrinsics_square_size = 60 # mm
# # Camera placements, need to be done before every session
# [calibration.calculate.extrinsics]
# calculate_extrinsics = true # true or false (lowercase)
# extrinsics_method = 'scene' # 'board', 'scene', 'keypoints'
# # 'board' should be large enough to be detected when laid on the floor. Not recommended.
# # 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out.
# # 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras.
# moving_cameras = false # Not implemented yet
# [calibration.calculate.extrinsics.board]
# show_reprojection_error = true # true or false (lowercase)
# extrinsics_extension = 'png' # any video or image extension
# extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h]
# extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle
# [calibration.calculate.extrinsics.scene]
# show_reprojection_error = true # true or false (lowercase)
# extrinsics_extension = 'png' # any video or image extension
# # list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane.
# # in m -> unlike for intrinsics, NOT in mm!
# object_coords_3d = [[-2.0, 0.3, 0.0],
# [-2.0 , 0.0, 0.0],
# [-2.0, 0.0, 0.05],
# [-2.0, -0.3 , 0.0],
# [0.0, 0.3, 0.0],
# [0.0, 0.0, 0.0],
# [0.0, 0.0, 0.05],
# [0.0, -0.3, 0.0]]
# [calibration.calculate.extrinsics.keypoints]
# # Coming soon!
# [personAssociation]
# likelihood_threshold_association = 0.3
# [personAssociation.single_person]
# reproj_error_threshold_association = 20 # px
# tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
# # and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' or 'RShoulder')
# [personAssociation.multi_person]
# reconstruction_error_threshold = 0.1 # 0.1 = 10 cm
# min_affinity = 0.2 # affinity below which a correspondence is ignored
# [triangulation]
# reorder_trc = false # only checked if multi_person analysis
# reproj_error_threshold_triangulation = 15 # px
# likelihood_threshold_triangulation= 0.3
# min_cameras_for_triangulation = 2
# interpolation = 'cubic' #linear, slinear, quadratic, cubic, or none
# # 'none' if you don't want to interpolate missing points
# interp_if_gap_smaller_than = 10 # do not interpolate bigger gaps
# fill_large_gaps_with = 'last_value' # 'last_value', 'nan', or 'zeros'
# show_interp_indices = true # true or false (lowercase). For each keypoint, return the frames that need to be interpolated
# handle_LR_swap = false # Better if few cameras (eg less than 4) with risk of limb swapping (eg camera facing sagittal plane), otherwise slightly less accurate and slower
# undistort_points = false # Better if distorted image (parallel lines curvy on the edge or at least one param > 10^-2), but unnecessary (and slightly slower) if distortions are low
# make_c3d = false # save triangulated data in c3d format in addition to trc
# [filtering]
# type = 'butterworth' # butterworth, kalman, gaussian, LOESS, median, butterworth_on_speed
# display_figures = false # true or false (lowercase) # fails when run multiple times https://github.com/superjax/plotWindow/issues/7
# make_c3d = false # also save triangulated data in c3d format
# [filtering.butterworth]
# order = 4
# cut_off_frequency = 6 # Hz
# [filtering.kalman]
# # How much more do you trust triangulation results (measurements), than previous data (process assuming constant acceleration)?
# trust_ratio = 100 # = measurement_trust/process_trust ~= process_noise/measurement_noise
# smooth = true # should be true, unless you need real-time filtering
# [filtering.butterworth_on_speed]
# order = 4
# cut_off_frequency = 10 # Hz
# [filtering.gaussian]
# sigma_kernel = 2 #px
# [filtering.LOESS]
# nb_values_used = 30 # = fraction of data used * nb frames
# [filtering.median]
# kernel_size = 9
# [markerAugmentation]
# ## Requires the following markers: ["Neck", "RShoulder", "LShoulder", "RHip", "LHip", "RKnee", "LKnee",
# ## "RAnkle", "LAnkle", "RHeel", "LHeel", "RSmallToe", "LSmallToe",
# ## "RBigToe", "LBigToe", "RElbow", "LElbow", "RWrist", "LWrist"]
# make_c3d = false # save triangulated data in c3d format in addition to trc
# [opensim]
# static_trial = ['S00_P00_Participant/S00_P00_T00_StaticTrial']
# # # If this Config.toml file is at the Trial level, set to true or false (lowercase);
# # # At the Participant level, specify the name of the static trial folder name, e.g. ['S00_P00_T00_StaticTrial'];
# # # At the Session level, add participant subdirectory, e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P01_Participant/S00_P00_T00_StaticTrial']
# opensim_bin_path = 'C:\OpenSim 4.4\bin'
# # CUSTOM skeleton, if you trained your own model from DeepLabCut or MMPose for example.
# # Make sure the node ids correspond to the column numbers of the 2D pose file, starting from zero.
# #
# # If you want to perform inverse kinematics, you will also need to create an OpenSim model
# # and add to its markerset the location where you expect the triangulated keypoints to be detected.
# #
# # In this example, CUSTOM reproduces the HALPE_26 skeleton (default skeletons are stored in skeletons.py).
# # You can create as many custom skeletons as you want, just add them further down and rename them.
# #
# # Check your model hierarchy with: for pre, _, node in RenderTree(model):
# # print(f'{pre}{node.name} id={node.id}')
# [pose.CUSTOM]
# name = "CHip"
# id = "19"
# [[pose.CUSTOM.children]]
# name = "RHip"
# id = 12
# [[pose.CUSTOM.children.children]]
# name = "RKnee"
# id = 14
# [[pose.CUSTOM.children.children.children]]
# name = "RAnkle"
# id = 16
# [[pose.CUSTOM.children.children.children.children]]
# name = "RBigToe"
# id = 21
# [[pose.CUSTOM.children.children.children.children.children]]
# name = "RSmallToe"
# id = 23
# [[pose.CUSTOM.children.children.children.children]]
# name = "RHeel"
# id = 25
# [[pose.CUSTOM.children]]
# name = "LHip"
# id = 11
# [[pose.CUSTOM.children.children]]
# name = "LKnee"
# id = 13
# [[pose.CUSTOM.children.children.children]]
# name = "LAnkle"
# id = 15
# [[pose.CUSTOM.children.children.children.children]]
# name = "LBigToe"
# id = 20
# [[pose.CUSTOM.children.children.children.children.children]]
# name = "LSmallToe"
# id = 22
# [[pose.CUSTOM.children.children.children.children]]
# name = "LHeel"
# id = 24
# [[pose.CUSTOM.children]]
# name = "Neck"
# id = 18
# [[pose.CUSTOM.children.children]]
# name = "Head"
# id = 17
# [[pose.CUSTOM.children.children.children]]
# name = "Nose"
# id = 0
# [[pose.CUSTOM.children.children]]
# name = "RShoulder"
# id = 6
# [[pose.CUSTOM.children.children.children]]
# name = "RElbow"
# id = 8
# [[pose.CUSTOM.children.children.children.children]]
# name = "RWrist"
# id = 10
# [[pose.CUSTOM.children.children]]
# name = "LShoulder"
# id = 5
# [[pose.CUSTOM.children.children.children]]
# name = "LElbow"
# id = 7
# [[pose.CUSTOM.children.children.children.children]]
# name = "LWrist"
# id = 9

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,38 @@
<?xml version='1.0' encoding='ASCII'?>
<calibration created="sometimes ago" longArmEnd="none" longArmMiddle="none" maximumFrames="none" qtm-version="none" shortArmEnd="none" source="calibration_qca_rotated.qca.txt" type="regular" wandLength="none">
<results min-max-diff="0." std-dev="0."/>
<cameras>
<camera active="1" avg-residual="0.402000" model="none" point-count="999999999" serial="cam01" viewrotation="0">
<fov_marker bottom="1919" left="0" right="1087" top="0"/>
<fov_marker_max bottom="1919" left="0" right="1087" top="0"/>
<fov_video bottom="1919" left="0" right="1087" top="0"/>
<fov_video_max bottom="1919" left="0" right="1087" top="0"/>
<transform r11="0.5536380477336265" r12="0.8046719867383512" r13="0.2144479094331626" r21="-0.4345300743838753" r22="0.05946489349975602" r23="0.8986921279821063" r31="0.7104002579937896" r32="-0.5907342212870501" r33="0.3825762057979294" x="1460.2323709212087" y="-1909.1590482454608" z="1896.5058524092062"/>
<intrinsic centerPointU="34110.316406" centerPointV="60680.792969" focalLengthU="107599.671875" focalLengthV="107588.828125" focallength="9.314096596679686" radialDistortion1="-0.046183" radialDistortion2="0.139983" radialDistortion3="0.000000" sensorMaxU="69568" sensorMaxV="122816" sensorMinU="0.000000" sensorMinV="0.000000" skew="0.000000" tangentalDistortion1="0.000608" tangentalDistortion2="0.00069"/>
</camera>
<camera active="1" avg-residual="0.444749" model="none" point-count="999999999" serial="cam02" viewrotation="0">
<fov_marker bottom="1919" left="0" right="1087" top="0"/>
<fov_marker_max bottom="1919" left="0" right="1087" top="0"/>
<fov_video bottom="1919" left="0" right="1087" top="0"/>
<fov_video_max bottom="1919" left="0" right="1087" top="0"/>
<transform r11="-0.1966832217091926" r12="0.979523227309506" r13="-0.043011131806342306" r21="-0.31478107085300017" r22="-0.02153908797812718" r23="0.9489198834051846" r31="0.9285626460991909" r32="0.20017570994064315" r33="0.3125717476340885" x="2582.0136248568124" y="706.5662881637625" z="1690.9818366061595"/>
<intrinsic centerPointU="34207.652344" centerPointV="61646.457031" focalLengthU="107118.695313" focalLengthV="107123.023438" focallength="9.272462063031563" radialDistortion1="-0.047847" radialDistortion2="0.136786" radialDistortion3="0.000000" sensorMaxU="69568" sensorMaxV="122816" sensorMinU="0.000000" sensorMinV="0.000000" skew="0.000000" tangentalDistortion1="0.000972" tangentalDistortion2="0.000291"/>
</camera>
<camera active="1" avg-residual="0.450323" model="none" point-count="999999999" serial="cam03" viewrotation="0">
<fov_marker bottom="1919" left="0" right="1087" top="0"/>
<fov_marker_max bottom="1919" left="0" right="1087" top="0"/>
<fov_video bottom="1919" left="0" right="1087" top="0"/>
<fov_video_max bottom="1919" left="0" right="1087" top="0"/>
<transform r11="-0.740641130147863" r12="-0.6704153360235378" r13="-0.044654154988568895" r21="0.25251118212268814" r22="-0.3393170985114385" r23="0.9061467925015139" r31="-0.6226466246887545" r32="0.6598539110153292" r33="0.4205995683324882" x="-3216.86586729648" y="2231.1786368869416" z="2088.19103027083"/>
<intrinsic centerPointU="32845.335938" centerPointV="61120.328125" focalLengthU="107622.296875" focalLengthV="107616.632813" focallength="9.316055073242188" radialDistortion1="-0.046705" radialDistortion2="0.137622" radialDistortion3="0.000000" sensorMaxU="69568" sensorMaxV="122816" sensorMinU="0.000000" sensorMinV="0.000000" skew="0.000000" tangentalDistortion1="-0.000542" tangentalDistortion2="-0.000517"/>
</camera>
<camera active="1" avg-residual="0.504772" model="none" point-count="999999999" serial="cam04" viewrotation="0">
<fov_marker bottom="1919" left="0" right="1087" top="0"/>
<fov_marker_max bottom="1919" left="0" right="1087" top="0"/>
<fov_video bottom="1919" left="0" right="1087" top="0"/>
<fov_video_max bottom="1919" left="0" right="1087" top="0"/>
<transform r11="0.2586331447643361" r12="-0.875095795920923" r13="-0.40904308378315923" r21="0.49590579729377593" r22="-0.24310718522469382" r23="0.8336524076031634" r31="-0.8289670298870364" r32="-0.4184569804097585" r33="0.3710895025551282" x="-3758.720480260634" y="-1415.6654017496533" z="1881.7941041388026"/>
<intrinsic centerPointU="34566.796875" centerPointV="61697.9375" focalLengthU="107215.039063" focalLengthV="107213.070313" focallength="9.280801818890938" radialDistortion1="-0.047633" radialDistortion2="0.134667" radialDistortion3="0.000000" sensorMaxU="69568" sensorMaxV="122816" sensorMinU="0.000000" sensorMinV="0.000000" skew="0.000000" tangentalDistortion1="0.000277" tangentalDistortion2="0.000199"/>
</camera>
</cameras>
</calibration>

View File

@ -153,7 +153,7 @@ make_c3d = false # save triangulated data in c3d format in addition to trc
[filtering]
type = 'butterworth' # butterworth, kalman, gaussian, LOESS, median, butterworth_on_speed
display_figures = false # true or false (lowercase)
display_figures = false # true or false (lowercase) # fails when run multiple times https://github.com/superjax/plotWindow/issues/7
make_c3d = false # also save triangulated data in c3d format
[filtering.butterworth]
@ -178,7 +178,7 @@ make_c3d = false # also save triangulated data in c3d format
## Requires the following markers: ["Neck", "RShoulder", "LShoulder", "RHip", "LHip", "RKnee", "LKnee",
## "RAnkle", "LAnkle", "RHeel", "LHeel", "RSmallToe", "LSmallToe",
## "RBigToe", "LBigToe", "RElbow", "LElbow", "RWrist", "LWrist"]
make_c3d = false # save triangulated data in c3d format in addition to trc
make_c3d = true # save triangulated data in c3d format in addition to trc
[opensim]

View File

@ -2,37 +2,37 @@
<calibration created="sometimes ago" longArmEnd="none" longArmMiddle="none" maximumFrames="none" qtm-version="none" shortArmEnd="none" source="calibration_qca_rotated.qca.txt" type="regular" wandLength="none">
<results min-max-diff="0." std-dev="0."/>
<cameras>
<camera active="1" avg-residual="0.402000" model="none" point-count="999999999" serial="cam_01" viewrotation="0">
<camera active="1" avg-residual="0.402000" model="none" point-count="999999999" serial="cam01" viewrotation="0">
<fov_marker bottom="1919" left="0" right="1087" top="0"/>
<fov_marker_max bottom="1919" left="0" right="1087" top="0"/>
<fov_video bottom="1919" left="0" right="1087" top="0"/>
<fov_video_max bottom="1919" left="0" right="1087" top="0"/>
<transform r11="0.5536380477336265" r12="0.8046719867383512" r13="0.2144479094331626" r21="-0.4345300743838753" r22="0.05946489349975602" r23="0.8986921279821063" r31="0.7104002579937896" r32="-0.5907342212870501" r33="0.3825762057979294" x="1460.2323709212087" y="-1909.1590482454608" z="1896.5058524092062"/>
<intrinsic centerPointU="35134.316406" centerPointV="61960.792969" focalLengthU="107599.671875" focalLengthV="107588.828125" focallength="9.314096596679686" radialDistortion1="-0.046183" radialDistortion2="0.139983" radialDistortion3="0.000000" sensorMaxU="69568" sensorMaxV="122816" sensorMinU="0.000000" sensorMinV="0.000000" skew="0.000000" tangentalDistortion1="0.000608" tangentalDistortion2="0.00069"/>
<intrinsic centerPointU="34110.316406" centerPointV="60680.792969" focalLengthU="107599.671875" focalLengthV="107588.828125" focallength="9.314096596679686" radialDistortion1="-0.046183" radialDistortion2="0.139983" radialDistortion3="0.000000" sensorMaxU="69568" sensorMaxV="122816" sensorMinU="0.000000" sensorMinV="0.000000" skew="0.000000" tangentalDistortion1="0.000608" tangentalDistortion2="0.00069"/>
</camera>
<camera active="1" avg-residual="0.444749" model="none" point-count="999999999" serial="cam_02" viewrotation="0">
<camera active="1" avg-residual="0.444749" model="none" point-count="999999999" serial="cam02" viewrotation="0">
<fov_marker bottom="1919" left="0" right="1087" top="0"/>
<fov_marker_max bottom="1919" left="0" right="1087" top="0"/>
<fov_video bottom="1919" left="0" right="1087" top="0"/>
<fov_video_max bottom="1919" left="0" right="1087" top="0"/>
<transform r11="-0.1966832217091926" r12="0.979523227309506" r13="-0.043011131806342306" r21="-0.31478107085300017" r22="-0.02153908797812718" r23="0.9489198834051846" r31="0.9285626460991909" r32="0.20017570994064315" r33="0.3125717476340885" x="2582.0136248568124" y="706.5662881637625" z="1690.9818366061595"/>
<intrinsic centerPointU="35359.652344" centerPointV="62158.457031" focalLengthU="107118.695313" focalLengthV="107123.023438" focallength="9.272462063031563" radialDistortion1="-0.047847" radialDistortion2="0.136786" radialDistortion3="0.000000" sensorMaxU="69568" sensorMaxV="122816" sensorMinU="0.000000" sensorMinV="0.000000" skew="0.000000" tangentalDistortion1="0.000972" tangentalDistortion2="0.000291"/>
<intrinsic centerPointU="34207.652344" centerPointV="61646.457031" focalLengthU="107118.695313" focalLengthV="107123.023438" focallength="9.272462063031563" radialDistortion1="-0.047847" radialDistortion2="0.136786" radialDistortion3="0.000000" sensorMaxU="69568" sensorMaxV="122816" sensorMinU="0.000000" sensorMinV="0.000000" skew="0.000000" tangentalDistortion1="0.000972" tangentalDistortion2="0.000291"/>
</camera>
<camera active="1" avg-residual="0.450323" model="none" point-count="999999999" serial="cam_03" viewrotation="0">
<camera active="1" avg-residual="0.450323" model="none" point-count="999999999" serial="cam03" viewrotation="0">
<fov_marker bottom="1919" left="0" right="1087" top="0"/>
<fov_marker_max bottom="1919" left="0" right="1087" top="0"/>
<fov_video bottom="1919" left="0" right="1087" top="0"/>
<fov_video_max bottom="1919" left="0" right="1087" top="0"/>
<transform r11="-0.740641130147863" r12="-0.6704153360235378" r13="-0.044654154988568895" r21="0.25251118212268814" r22="-0.3393170985114385" r23="0.9061467925015139" r31="-0.6226466246887545" r32="0.6598539110153292" r33="0.4205995683324882" x="-3216.86586729648" y="2231.1786368869416" z="2088.19103027083"/>
<intrinsic centerPointU="34893.335938" centerPointV="61440.328125" focalLengthU="107622.296875" focalLengthV="107616.632813" focallength="9.316055073242188" radialDistortion1="-0.046705" radialDistortion2="0.137622" radialDistortion3="0.000000" sensorMaxU="69568" sensorMaxV="122816" sensorMinU="0.000000" sensorMinV="0.000000" skew="0.000000" tangentalDistortion1="-0.000542" tangentalDistortion2="-0.000517"/>
<intrinsic centerPointU="32845.335938" centerPointV="61120.328125" focalLengthU="107622.296875" focalLengthV="107616.632813" focallength="9.316055073242188" radialDistortion1="-0.046705" radialDistortion2="0.137622" radialDistortion3="0.000000" sensorMaxU="69568" sensorMaxV="122816" sensorMinU="0.000000" sensorMinV="0.000000" skew="0.000000" tangentalDistortion1="-0.000542" tangentalDistortion2="-0.000517"/>
</camera>
<camera active="1" avg-residual="0.504772" model="none" point-count="999999999" serial="cam_04" viewrotation="0">
<camera active="1" avg-residual="0.504772" model="none" point-count="999999999" serial="cam04" viewrotation="0">
<fov_marker bottom="1919" left="0" right="1087" top="0"/>
<fov_marker_max bottom="1919" left="0" right="1087" top="0"/>
<fov_video bottom="1919" left="0" right="1087" top="0"/>
<fov_video_max bottom="1919" left="0" right="1087" top="0"/>
<transform r11="0.2586331447643361" r12="-0.875095795920923" r13="-0.40904308378315923" r21="0.49590579729377593" r22="-0.24310718522469382" r23="0.8336524076031634" r31="-0.8289670298870364" r32="-0.4184569804097585" r33="0.3710895025551282" x="-3758.720480260634" y="-1415.6654017496533" z="1881.7941041388026"/>
<intrinsic centerPointU="35014.796875" centerPointV="61697.9375" focalLengthU="107215.039063" focalLengthV="107213.070313" focallength="9.280801818890938" radialDistortion1="-0.047633" radialDistortion2="0.134667" radialDistortion3="0.000000" sensorMaxU="69568" sensorMaxV="122816" sensorMinU="0.000000" sensorMinV="0.000000" skew="0.000000" tangentalDistortion1="0.000277" tangentalDistortion2="0.000199"/>
<intrinsic centerPointU="34566.796875" centerPointV="61697.9375" focalLengthU="107215.039063" focalLengthV="107213.070313" focallength="9.280801818890938" radialDistortion1="-0.047633" radialDistortion2="0.134667" radialDistortion3="0.000000" sensorMaxU="69568" sensorMaxV="122816" sensorMinU="0.000000" sensorMinV="0.000000" skew="0.000000" tangentalDistortion1="0.000277" tangentalDistortion2="0.000199"/>
</camera>
</cameras>
</calibration>

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -153,7 +153,7 @@ make_c3d = true # save triangulated data in c3d format in addition to trc
[filtering]
type = 'butterworth' # butterworth, kalman, gaussian, LOESS, median, butterworth_on_speed
display_figures = false # true or false (lowercase)
display_figures = true # true or false (lowercase) # fails when run multiple times https://github.com/superjax/plotWindow/issues/7
make_c3d = true # also save triangulated data in c3d format
[filtering.butterworth]
@ -178,7 +178,7 @@ make_c3d = true # also save triangulated data in c3d format
## Requires the following markers: ["Neck", "RShoulder", "LShoulder", "RHip", "LHip", "RKnee", "LKnee",
## "RAnkle", "LAnkle", "RHeel", "LHeel", "RSmallToe", "LSmallToe",
## "RBigToe", "LBigToe", "RElbow", "LElbow", "RWrist", "LWrist"]
make_c3d = false # save triangulated data in c3d format in addition to trc
make_c3d = true # save triangulated data in c3d format in addition to trc
[opensim]

View File

@ -2,7 +2,7 @@
<calibration created="sometimes ago" longArmEnd="none" longArmMiddle="none" maximumFrames="none" qtm-version="none" shortArmEnd="none" source="calibration_qca_rotated.qca.txt" type="regular" wandLength="none">
<results min-max-diff="0." std-dev="0."/>
<cameras>
<camera active="1" avg-residual="0.402000" model="none" point-count="999999999" serial="cam_01" viewrotation="0">
<camera active="1" avg-residual="0.402000" model="none" point-count="999999999" serial="cam01" viewrotation="0">
<fov_marker bottom="1919" left="0" right="1087" top="0"/>
<fov_marker_max bottom="1919" left="0" right="1087" top="0"/>
<fov_video bottom="1919" left="0" right="1087" top="0"/>
@ -10,7 +10,7 @@
<transform r11="0.5536380477336265" r12="0.8046719867383512" r13="0.2144479094331626" r21="-0.4345300743838753" r22="0.05946489349975602" r23="0.8986921279821063" r31="0.7104002579937896" r32="-0.5907342212870501" r33="0.3825762057979294" x="1460.2323709212087" y="-1909.1590482454608" z="1896.5058524092062"/>
<intrinsic centerPointU="34110.316406" centerPointV="60680.792969" focalLengthU="107599.671875" focalLengthV="107588.828125" focallength="9.314096596679686" radialDistortion1="-0.046183" radialDistortion2="0.139983" radialDistortion3="0.000000" sensorMaxU="69568" sensorMaxV="122816" sensorMinU="0.000000" sensorMinV="0.000000" skew="0.000000" tangentalDistortion1="0.000608" tangentalDistortion2="0.00069"/>
</camera>
<camera active="1" avg-residual="0.444749" model="none" point-count="999999999" serial="cam_02" viewrotation="0">
<camera active="1" avg-residual="0.444749" model="none" point-count="999999999" serial="cam02" viewrotation="0">
<fov_marker bottom="1919" left="0" right="1087" top="0"/>
<fov_marker_max bottom="1919" left="0" right="1087" top="0"/>
<fov_video bottom="1919" left="0" right="1087" top="0"/>
@ -18,7 +18,7 @@
<transform r11="-0.1966832217091926" r12="0.979523227309506" r13="-0.043011131806342306" r21="-0.31478107085300017" r22="-0.02153908797812718" r23="0.9489198834051846" r31="0.9285626460991909" r32="0.20017570994064315" r33="0.3125717476340885" x="2582.0136248568124" y="706.5662881637625" z="1690.9818366061595"/>
<intrinsic centerPointU="34207.652344" centerPointV="61646.457031" focalLengthU="107118.695313" focalLengthV="107123.023438" focallength="9.272462063031563" radialDistortion1="-0.047847" radialDistortion2="0.136786" radialDistortion3="0.000000" sensorMaxU="69568" sensorMaxV="122816" sensorMinU="0.000000" sensorMinV="0.000000" skew="0.000000" tangentalDistortion1="0.000972" tangentalDistortion2="0.000291"/>
</camera>
<camera active="1" avg-residual="0.450323" model="none" point-count="999999999" serial="cam_03" viewrotation="0">
<camera active="1" avg-residual="0.450323" model="none" point-count="999999999" serial="cam03" viewrotation="0">
<fov_marker bottom="1919" left="0" right="1087" top="0"/>
<fov_marker_max bottom="1919" left="0" right="1087" top="0"/>
<fov_video bottom="1919" left="0" right="1087" top="0"/>
@ -26,7 +26,7 @@
<transform r11="-0.740641130147863" r12="-0.6704153360235378" r13="-0.044654154988568895" r21="0.25251118212268814" r22="-0.3393170985114385" r23="0.9061467925015139" r31="-0.6226466246887545" r32="0.6598539110153292" r33="0.4205995683324882" x="-3216.86586729648" y="2231.1786368869416" z="2088.19103027083"/>
<intrinsic centerPointU="32845.335938" centerPointV="61120.328125" focalLengthU="107622.296875" focalLengthV="107616.632813" focallength="9.316055073242188" radialDistortion1="-0.046705" radialDistortion2="0.137622" radialDistortion3="0.000000" sensorMaxU="69568" sensorMaxV="122816" sensorMinU="0.000000" sensorMinV="0.000000" skew="0.000000" tangentalDistortion1="-0.000542" tangentalDistortion2="-0.000517"/>
</camera>
<camera active="1" avg-residual="0.504772" model="none" point-count="999999999" serial="cam_04" viewrotation="0">
<camera active="1" avg-residual="0.504772" model="none" point-count="999999999" serial="cam04" viewrotation="0">
<fov_marker bottom="1919" left="0" right="1087" top="0"/>
<fov_marker_max bottom="1919" left="0" right="1087" top="0"/>
<fov_video bottom="1919" left="0" right="1087" top="0"/>

View File

@ -271,11 +271,8 @@ def augmentTRC(config_dict):
# Save c3d
if make_c3d:
print(pathOutputTRCFile)
convert_to_c3d(pathOutputTRCFile)
logging.info(f'Augmented trc files have been converted to c3d.')
logging.info('\n')
return min_y_pos

View File

@ -27,7 +27,7 @@ and lens distortions are better taken into account.\ -->
`Pose2Sim` provides a workflow for 3D markerless kinematics, as an alternative to marker-based motion capture methods. It aims to provide a free tool to obtain research-grade results from consumer-grade equipment. Any combination of phone, webcam, GoPro, etc. can be used.
Pose2Sim stands for "OpenPose to OpenSim", as it originally used OpenPose inputs (2D keypoints coordinates obtained from multiple videos) and lead to an [OpenSim](https://opensim.stanford.edu/) result (full-body 3D joint angles). Pose estimation is now performed with more recent models from [RTMPose](https://github.com/open-mmlab/mmpose/tree/main/projects/rtmpose). OpenPose and other options are kept as legacy options.
**Pose2Sim** stands for "OpenPose to OpenSim", as it originally used *OpenPose* inputs (2D keypoints coordinates) from multiple videos and lead to an [OpenSim](https://opensim.stanford.edu/) result (full-body 3D joint angles). Pose estimation is now performed with more recent models from [RTMPose](https://github.com/open-mmlab/mmpose/tree/main/projects/rtmpose). OpenPose and other models are kept as legacy options.
If you can only use one single camera and don't mind losing some accuracy, please consider using [Sports2D](https://github.com/davidpagnon/Sports2D).
@ -63,6 +63,7 @@ If you can only use one single camera and don't mind losing some accuracy, pleas
3. [Demonstration Part-2: Obtain 3D joint angles with OpenSim](#demonstration-part-2-obtain-3d-joint-angles-with-opensim)
4. [Demonstration Part-3 (optional): Visualize your results with Blender](#demonstration-part-3-optional-visualize-your-results-with-blender)
5. [Demonstration Part-4 (optional): Try multi-person analysis](#demonstration-part-4-optional-try-multi-person-analysis)
6. [Demonstration Part-5 (optional): Try batch processing](#demonstration-part-5-optional-try-batch-processing)
2. [Use on your own data](#use-on-your-own-data)
1. [Setting your project up](#setting-your-project-up)
1. [Retrieve the folder structure](#retrieve-the-folder-structure)

View File

@ -1,6 +1,6 @@
[metadata]
name = pose2sim
version = 0.9.5
version = 0.9.6
author = David Pagnon
author_email = contact@david-pagnon.com
description = Perform a markerless kinematic analysis from multiple calibrated views as a unified workflow from an OpenPose input to an OpenSim result.