From f6c49e011eaff7f34ef8522712d61934a17a7b34 Mon Sep 17 00:00:00 2001 From: davidpagnon Date: Thu, 24 Oct 2024 02:48:25 +0200 Subject: [PATCH] ROCM support provided that this is merged on RTMlib: https://github.com/Tau-J/rtmlib/issues/38 --- Pose2Sim/Demo_Batch/Trial_1/Config.toml | 2 +- Pose2Sim/Demo_Batch/Trial_2/Config.toml | 2 +- Pose2Sim/poseEstimation.py | 4 ++++ README.md | 4 ++-- 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/Pose2Sim/Demo_Batch/Trial_1/Config.toml b/Pose2Sim/Demo_Batch/Trial_1/Config.toml index a9d7c4b..2861d7f 100644 --- a/Pose2Sim/Demo_Batch/Trial_1/Config.toml +++ b/Pose2Sim/Demo_Batch/Trial_1/Config.toml @@ -34,7 +34,7 @@ # [pose] # vid_img_extension = 'mp4' # any video or image extension -# pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands), COCO_17 (body) # # /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the output files if needed +# pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands), COCO_17 (body) # #/!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the output files if needed # #With MMPose: HALPE_26, COCO_133, COCO_17, CUSTOM. See CUSTOM example at the end of the file # #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII diff --git a/Pose2Sim/Demo_Batch/Trial_2/Config.toml b/Pose2Sim/Demo_Batch/Trial_2/Config.toml index 5617cd7..ef0ae48 100644 --- a/Pose2Sim/Demo_Batch/Trial_2/Config.toml +++ b/Pose2Sim/Demo_Batch/Trial_2/Config.toml @@ -34,7 +34,7 @@ participant_mass = [70.0, 63.5] # kg # Only used for marker augmentation and sca # [pose] # vid_img_extension = 'mp4' # any video or image extension -# pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands), COCO_17 (body) # # /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the output files if needed +# pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands), COCO_17 (body) # #/!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the output files if needed # #With MMPose: HALPE_26, COCO_133, COCO_17, CUSTOM. See CUSTOM example at the end of the file # #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII diff --git a/Pose2Sim/poseEstimation.py b/Pose2Sim/poseEstimation.py index ea355ea..9e9aed8 100644 --- a/Pose2Sim/poseEstimation.py +++ b/Pose2Sim/poseEstimation.py @@ -408,6 +408,10 @@ def rtm_estimator(config_dict): device = 'cuda' backend = 'onnxruntime' logging.info(f"\nValid CUDA installation found: using ONNXRuntime backend with GPU.") + elif torch.cuda.is_available() == True and 'ROCMExecutionProvider' in ort.get_available_providers(): + device = 'rocm' + backend = 'onnxruntime' + logging.info(f"\nValid ROCM installation found: using ONNXRuntime backend with GPU.") else: raise except: diff --git a/README.md b/README.md index 431c2c8..41804b9 100644 --- a/README.md +++ b/README.md @@ -352,7 +352,7 @@ Pose2Sim.poseEstimation() *N.B.:* Pose estimation can be run in `lightweight`, `balanced`, or `performance` mode.\ *N.B.:* The `pose_model` with body, feet, hands, and face is required for wrist motion but is much slower and slightly less accurate on body keypoints.\ -*N.B.:* The `GPU` will be used with ONNX backend if a valid CUDA installation is found (or MPS with MacOS), otherwise the `CPU` will be used with OpenVINO backend.\ +*N.B.:* The `GPU` will be used with ONNX backend if a valid CUDA installation is found (or ROCM with AMD GPUS, or MPS with MacOS), otherwise the `CPU` will be used with OpenVINO backend.\ *N.B.:* Pose estimation can be dramatically sped up by increasing the value of `det_frequency`. In that case, the detection is only done every `det_frequency` frames, and bounding boxes are tracked inbetween (keypoint detection is still performed on all frames). @@ -537,7 +537,7 @@ If you already have a calibration file, set `calibration_type` type to `convert` ### Synchronization > _**2D points can be triangulated only if they represent the same body position across all cameras: therefore, views need to be synchronized.**_\ -For each camera, this computes mean vertical speed for the chosen keypoints, and finds the time offset for which their correlation is highest. +For each camera, the algorithm computes mean vertical speed for the chosen keypoints, and synchronizes by finding the time offset for which the correlation is highest. >***N.B.:** Skip this step if your cameras are natively synchronized.*