From 3a31bd05fd090b9d38724a5401b87b1085b03da3 Mon Sep 17 00:00:00 2001 From: davidpagnon Date: Tue, 22 Oct 2024 11:08:29 +0200 Subject: [PATCH] Clearer config on pose estimation --- Pose2Sim/Demo_Batch/Config.toml | 3 +-- Pose2Sim/Demo_Batch/Trial_1/Config.toml | 3 +-- Pose2Sim/Demo_Batch/Trial_2/Config.toml | 3 +-- Pose2Sim/Demo_MultiPerson/Config.toml | 3 +-- Pose2Sim/Demo_SinglePerson/Config.toml | 3 +-- Pose2Sim/poseEstimation.py | 6 +++--- README.md | 8 ++++---- 7 files changed, 12 insertions(+), 17 deletions(-) diff --git a/Pose2Sim/Demo_Batch/Config.toml b/Pose2Sim/Demo_Batch/Config.toml index 2f28228..79ff213 100644 --- a/Pose2Sim/Demo_Batch/Config.toml +++ b/Pose2Sim/Demo_Batch/Config.toml @@ -34,8 +34,7 @@ exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['< [pose] vid_img_extension = 'mp4' # any video or image extension -pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands), COCO_17 (body) - # /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the files if needed +pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands, face. Slower and slightly less accurate on body keypoints but required for wrist motion), COCO_17 (body) # /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the output files if needed #With MMPose: HALPE_26, COCO_133, COCO_17, CUSTOM. See CUSTOM example at the end of the file #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII #With mediapipe: BLAZEPOSE diff --git a/Pose2Sim/Demo_Batch/Trial_1/Config.toml b/Pose2Sim/Demo_Batch/Trial_1/Config.toml index b4bfacd..0098cb1 100644 --- a/Pose2Sim/Demo_Batch/Trial_1/Config.toml +++ b/Pose2Sim/Demo_Batch/Trial_1/Config.toml @@ -34,8 +34,7 @@ # [pose] # vid_img_extension = 'mp4' # any video or image extension -# pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands), COCO_17 (body) - # # /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the files if needed +# pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands, face), COCO_17 (body) # # /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the output files if needed # #With MMPose: HALPE_26, COCO_133, COCO_17, CUSTOM. See CUSTOM example at the end of the file # #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII # #With mediapipe: BLAZEPOSE diff --git a/Pose2Sim/Demo_Batch/Trial_2/Config.toml b/Pose2Sim/Demo_Batch/Trial_2/Config.toml index f01cf4f..ca1257e 100644 --- a/Pose2Sim/Demo_Batch/Trial_2/Config.toml +++ b/Pose2Sim/Demo_Batch/Trial_2/Config.toml @@ -34,8 +34,7 @@ participant_mass = [70.0, 63.5] # kg # Only used for marker augmentation and sca # [pose] # vid_img_extension = 'mp4' # any video or image extension -# pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands), COCO_17 (body) - # # /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the files if needed +# pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands, face), COCO_17 (body) # # /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the output files if needed # #With MMPose: HALPE_26, COCO_133, COCO_17, CUSTOM. See CUSTOM example at the end of the file # #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII # #With mediapipe: BLAZEPOSE diff --git a/Pose2Sim/Demo_MultiPerson/Config.toml b/Pose2Sim/Demo_MultiPerson/Config.toml index 2f2602e..8c985ef 100644 --- a/Pose2Sim/Demo_MultiPerson/Config.toml +++ b/Pose2Sim/Demo_MultiPerson/Config.toml @@ -34,8 +34,7 @@ exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['< [pose] vid_img_extension = 'mp4' # any video or image extension -pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands), COCO_17 (body) - # /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the files if needed +pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands, face. Slower and slightly less accurate on body keypoints but required for wrist motion), COCO_17 (body) # /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the output files if needed #With MMPose: HALPE_26, COCO_133, COCO_17, CUSTOM. See CUSTOM example at the end of the file #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII #With mediapipe: BLAZEPOSE diff --git a/Pose2Sim/Demo_SinglePerson/Config.toml b/Pose2Sim/Demo_SinglePerson/Config.toml index 60f2d4a..0ce5614 100644 --- a/Pose2Sim/Demo_SinglePerson/Config.toml +++ b/Pose2Sim/Demo_SinglePerson/Config.toml @@ -34,8 +34,7 @@ exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['< [pose] vid_img_extension = 'mp4' # any video or image extension -pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands), COCO_17 (body) - # /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the files if needed +pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands, face. Slower and slightly less accurate on body keypoints but required for wrist motion), COCO_17 (body) # /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the output files if needed #With MMPose: HALPE_26, COCO_133, COCO_17, CUSTOM. See CUSTOM example at the end of the file #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII #With mediapipe: BLAZEPOSE diff --git a/Pose2Sim/poseEstimation.py b/Pose2Sim/poseEstimation.py index c327e86..c583dae 100644 --- a/Pose2Sim/poseEstimation.py +++ b/Pose2Sim/poseEstimation.py @@ -11,7 +11,7 @@ write the results to JSON files, videos, and/or images. Results can optionally be displayed in real time. - Supported models: HALPE_26 (default, body and feet), COCO_133 (body, feet, hands), COCO_17 (body) + Supported models: HALPE_26 (default, body and feet), COCO_133 (body, feet, hands, face), COCO_17 (body) Supported modes: lightweight, balanced, performance (edit paths at rtmlib/tools/solutions if you need nother detection or pose models) @@ -345,7 +345,7 @@ def rtm_estimator(config_dict): write the results to JSON files, videos, and/or images. Results can optionally be displayed in real time. - Supported models: HALPE_26 (default, body and feet), COCO_133 (body, feet, hands), COCO_17 (body) + Supported models: HALPE_26 (default, body and feet), COCO_133 (body, feet, hands, face), COCO_17 (body) Supported modes: lightweight, balanced, performance (edit paths at rtmlib/tools/solutions if you need nother detection or pose models) @@ -435,7 +435,7 @@ def rtm_estimator(config_dict): logging.info(f"Using HALPE_26 model (body and feet) for pose estimation.") elif pose_model.upper() == 'COCO_133': ModelClass = Wholebody - logging.info(f"Using COCO_133 model (body, feet, hands, and face) for pose estimation.") + logging.info(f"Using COCO_133 model (body, feet, hands, face, and face) for pose estimation.") elif pose_model.upper() == 'COCO_17': ModelClass = Body # 26 keypoints(halpe26) logging.info(f"Using COCO_17 model (body) for pose estimation.") diff --git a/README.md b/README.md index 9097633..f735949 100644 --- a/README.md +++ b/README.md @@ -350,10 +350,10 @@ Pose2Sim.poseEstimation()
-*N.B.:* The `GPU` will be used with ONNX backend if a valid CUDA installation is found (or MPS with MacOS), otherwise the `CPU` will be used with OpenVINO backend.\ *N.B.:* Pose estimation can be run in `lightweight`, `balanced`, or `performance` mode.\ -*N.B.:* Pose estimation can be dramatically sped up by increasing the value of `det_frequency`. In that case, the detection is only done every `det_frequency` frames, and bounding boxes are tracked inbetween (keypoint detection is still performed on all frames).\ -*N.B.:* Activating `tracking` will attempt to give consistent IDs to the same persons across frames, which might facilitate synchronization if other people are in the background. +*N.B.:* The `pose_model` with body, feet, hands, and face is required for wrist motion but is much slower and slightly less accurate on body keypoints.\ +*N.B.:* The `GPU` will be used with ONNX backend if a valid CUDA installation is found (or MPS with MacOS), otherwise the `CPU` will be used with OpenVINO backend.\ +*N.B.:* Pose estimation can be dramatically sped up by increasing the value of `det_frequency`. In that case, the detection is only done every `det_frequency` frames, and bounding boxes are tracked inbetween (keypoint detection is still performed on all frames). @@ -536,7 +536,7 @@ If you already have a calibration file, set `calibration_type` type to `convert` ### Synchronization -> _**2D points can be triangulated only if they represent the same body position across view: therefore, cameras need to be synchronized.**_\ +> _**2D points can be triangulated only if they represent the same body position across all cameras: therefore, views need to be synchronized.**_\ ***N.B.:** Skip this step if your cameras are natively synchronized.* Open an Anaconda prompt or a terminal in a `Session` or `Trial` folder.\