# # If a parameter is not found here, Pose2Sim will look for its value in the
# # Config.toml file of the level above. This way, you can set global batch
# # instructions and alter them for specific trials.
# #
# # If you wish to overwrite a parameter for a specific trial, edit
# # its Config.toml file by uncommenting its key (e.g., [project])
# # and editing its value (e.g., frame_range = [10,300]). Also try
# # uncommenting [filtering.butterworth] and set cut_off_frequency = 10, etc.
# [project]
# multi_person = false # true for trials with multiple participants. If false, only the main person in scene is analyzed (and it run much faster).
# participant_height = 1.72 # m # float if single person, list of float if multi-person (same order as the Static trials) # Only used for marker augmentation
# participant_mass = 70.0 # kg # Only used for marker augmentation and scaling
# frame_rate = 'auto' # fps # int or 'auto'. If 'auto', finds from video (or defaults to 60 fps if you work with images)
# frame_range = [] # For example [10,300], or [] for all frames.
# ## If cameras are not synchronized, designates the frame range of the camera with the shortest recording time
# ## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate
# ## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate,
# pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands, face), COCO_17 (body) # # /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the output files if needed
# det_frequency = 100 # Run person detection only every N frames, and inbetween track previously detected bounding boxes (keypoint detection is still run on all frames).
# # Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate.
# save_video = 'to_video' # 'to_video' or 'to_images', 'none', or ['to_video', 'to_images']
# output_format = 'openpose' # 'openpose', 'mmpose', 'deeplabcut', 'none' or a list of them # /!\ only 'openpose' is supported for now
# [synchronization]
# display_sync_plots = true # true or false (lowercase)
# keypoints_to_consider = ['RWrist'] # 'all' if all points should be considered, for example if the participant did not perform any particicular sharp movement. In this case, the capture needs to be 5-10 seconds long at least
# time_range_around_maxspeed = 2.0 # Search for best correlation in the range [approx_time_maxspeed - time_range_around_maxspeed, approx_time_maxspeed + time_range_around_maxspeed]
# likelihood_threshold = 0.4 # Keypoints whose likelihood is below likelihood_threshold are filtered out
# filter_cutoff = 6 # time series are smoothed to get coherent time-lagged correlation
# filter_order = 4
# # Take heart, calibration is not that complicated once you get the hang of it!
# [calibration]
# calibration_type = 'convert' # 'convert' or 'calculate'
# # 'board' should be large enough to be detected when laid on the floor. Not recommended.
# # 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out.
# # 'keypoints' uses automatic pose estimation of a person freely walking and waving arms in the scene. Slighlty less accurate, requires synchronized cameras.
# moving_cameras = false # Not implemented yet
# [calibration.calculate.extrinsics.board]
# show_reprojection_error = true # true or false (lowercase)
# extrinsics_extension = 'png' # any video or image extension
# extrinsics_corners_nb = [4,7] # [H,W] rather than [w,h]
# extrinsics_square_size = 60 # mm # [h,w] if square is actually a rectangle
# [calibration.calculate.extrinsics.scene]
# show_reprojection_error = true # true or false (lowercase)
# extrinsics_extension = 'png' # any video or image extension
# # list of 3D coordinates to be manually labelled on images. Can also be a 2 dimensional plane.
# # in m -> unlike for intrinsics, NOT in mm!
# object_coords_3d = [[-2.0, 0.3, 0.0],
# [-2.0 , 0.0, 0.0],
# [-2.0, 0.0, 0.05],
# [-2.0, -0.3 , 0.0],
# [0.0, 0.3, 0.0],
# [0.0, 0.0, 0.0],
# [0.0, 0.0, 0.05],
# [0.0, -0.3, 0.0]]
# [calibration.calculate.extrinsics.keypoints]
# # Coming soon!
# [personAssociation]
# likelihood_threshold_association = 0.3
# [personAssociation.single_person]
# reproj_error_threshold_association = 20 # px
# tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
# # and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' or 'RShoulder')
# [personAssociation.multi_person]
# reconstruction_error_threshold = 0.1 # 0.1 = 10 cm
# min_affinity = 0.2 # affinity below which a correspondence is ignored
# # 'none' if you don't want to interpolate missing points
# interp_if_gap_smaller_than = 10 # do not interpolate bigger gaps
# show_interp_indices = true # true or false (lowercase). For each keypoint, return the frames that need to be interpolated
# fill_large_gaps_with = 'last_value' # 'last_value', 'nan', or 'zeros'
# handle_LR_swap = false # Better if few cameras (eg less than 4) with risk of limb swapping (eg camera facing sagittal plane), otherwise slightly less accurate and slower
# undistort_points = false # Better if distorted image (parallel lines curvy on the edge or at least one param > 10^-2), but unnecessary (and slightly slower) if distortions are low
# make_c3d = true # save triangulated data in c3d format in addition to trc
# use_augmentation = true # true or false (lowercase) # Set to true if you want to use the model with augmented markers
# right_left_symmetry = true # true or false (lowercase) # Set to false only if you have good reasons to think the participant is not symmetrical (e.g. prosthetic limb)
# remove_individual_scaling_setup = true # true or false (lowercase) # If true, the individual scaling setup files are removed to avoid cluttering
# remove_individual_IK_setup = true # true or false (lowercase) # If true, the individual IK setup files are removed to avoid cluttering