From e21c763614490a76f50badc7bbab3d614ff084e8 Mon Sep 17 00:00:00 2001 From: davidpagnon Date: Wed, 10 Jul 2024 14:51:34 +0200 Subject: [PATCH] minor edits --- Pose2Sim/Demo_MultiPerson/Config.toml | 22 +++++++++++----------- Pose2Sim/Demo_SinglePerson/Config.toml | 22 +++++++++++----------- 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/Pose2Sim/Demo_MultiPerson/Config.toml b/Pose2Sim/Demo_MultiPerson/Config.toml index 2a6f01e..807bf4a 100644 --- a/Pose2Sim/Demo_MultiPerson/Config.toml +++ b/Pose2Sim/Demo_MultiPerson/Config.toml @@ -34,17 +34,17 @@ exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['< [pose] vid_img_extension = 'mp4' # any video or image extension -pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (default, body and feet), COCO_133 (body, feet, hands), COCO_17 (body) - # /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the files if needed - #With MMPose: HALPE_26 (default), COCO_133, COCO_17, CUSTOM. See example at the end of the file - #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII - #With mediapipe: BLAZEPOSE - #With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133 - #With deeplabcut: CUSTOM. See example at the end of the file -mode = 'balanced' # 'lightweight', 'balanced', 'performance' -det_frequency = 1 # Run person detection only every N frames, and inbetween track previously detected bounding boxes (still run keypoint detection on all frame). - #Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate. -tracking = true # Gives consistent person ID across frames. Slightly slower but might facilitate synchronization if other people are in the background +pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands), COCO_17 (body) + # /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the files if needed + #With MMPose: HALPE_26, COCO_133, COCO_17, CUSTOM. See CUSTOM example at the end of the file + #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII + #With mediapipe: BLAZEPOSE + #With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133 + #With deeplabcut: CUSTOM. See example at the end of the file +mode = 'performance' # 'lightweight', 'balanced', 'performance' +det_frequency = 1 # Run person detection only every N frames, and inbetween track previously detected bounding boxes (keypoint detection is still run on all frames). + # Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate. +tracking = false # Gives consistent person ID across frames. Slightly slower but might facilitate synchronization if other people are in the background display_detection = true save_video = 'to_video' # 'to_video' or 'to_images', 'none', or ['to_video', 'to_images'] output_format = 'openpose' # 'openpose', 'mmpose', 'deeplabcut', 'none' or a list of them # /!\ only 'openpose' is supported for now diff --git a/Pose2Sim/Demo_SinglePerson/Config.toml b/Pose2Sim/Demo_SinglePerson/Config.toml index 4de5aec..952edbd 100644 --- a/Pose2Sim/Demo_SinglePerson/Config.toml +++ b/Pose2Sim/Demo_SinglePerson/Config.toml @@ -34,17 +34,17 @@ exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['< [pose] vid_img_extension = 'mp4' # any video or image extension -pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (default, body and feet), COCO_133 (body, feet, hands), COCO_17 (body) - # /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the files if needed - #With MMPose: HALPE_26 (default), COCO_133, COCO_17, CUSTOM. See example at the end of the file - #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII - #With mediapipe: BLAZEPOSE - #With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133 - #With deeplabcut: CUSTOM. See example at the end of the file -mode = 'balanced' # 'lightweight', 'balanced', 'performance' -det_frequency = 1 # Run person detection only every N frames, and inbetween track previously detected bounding boxes (still run keypoint detection on all frame). - #Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate. -tracking = true # Gives consistent person ID across frames. Slightly slower but might facilitate synchronization if other people are in the background +pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands), COCO_17 (body) + # /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the files if needed + #With MMPose: HALPE_26, COCO_133, COCO_17, CUSTOM. See CUSTOM example at the end of the file + #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII + #With mediapipe: BLAZEPOSE + #With alphapose: HALPE_26, HALPE_68, HALPE_136, COCO_133 + #With deeplabcut: CUSTOM. See example at the end of the file +mode = 'performance' # 'lightweight', 'balanced', 'performance' +det_frequency = 1 # Run person detection only every N frames, and inbetween track previously detected bounding boxes (keypoint detection is still run on all frames). + # Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate. +tracking = false # Gives consistent person ID across frames. Slightly slower but might facilitate synchronization if other people are in the background display_detection = true save_video = 'to_video' # 'to_video' or 'to_images', 'none', or ['to_video', 'to_images'] output_format = 'openpose' # 'openpose', 'mmpose', 'deeplabcut', 'none' or a list of them # /!\ only 'openpose' is supported for now