Clearer config on pose estimation
This commit is contained in:
parent
fbfa62ff56
commit
3a31bd05fd
@ -34,8 +34,7 @@ exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['<
|
|||||||
|
|
||||||
[pose]
|
[pose]
|
||||||
vid_img_extension = 'mp4' # any video or image extension
|
vid_img_extension = 'mp4' # any video or image extension
|
||||||
pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands), COCO_17 (body)
|
pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands, face. Slower and slightly less accurate on body keypoints but required for wrist motion), COCO_17 (body) # /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the output files if needed
|
||||||
# /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the files if needed
|
|
||||||
#With MMPose: HALPE_26, COCO_133, COCO_17, CUSTOM. See CUSTOM example at the end of the file
|
#With MMPose: HALPE_26, COCO_133, COCO_17, CUSTOM. See CUSTOM example at the end of the file
|
||||||
#With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII
|
#With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII
|
||||||
#With mediapipe: BLAZEPOSE
|
#With mediapipe: BLAZEPOSE
|
||||||
|
@ -34,8 +34,7 @@
|
|||||||
|
|
||||||
# [pose]
|
# [pose]
|
||||||
# vid_img_extension = 'mp4' # any video or image extension
|
# vid_img_extension = 'mp4' # any video or image extension
|
||||||
# pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands), COCO_17 (body)
|
# pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands, face), COCO_17 (body) # # /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the output files if needed
|
||||||
# # /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the files if needed
|
|
||||||
# #With MMPose: HALPE_26, COCO_133, COCO_17, CUSTOM. See CUSTOM example at the end of the file
|
# #With MMPose: HALPE_26, COCO_133, COCO_17, CUSTOM. See CUSTOM example at the end of the file
|
||||||
# #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII
|
# #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII
|
||||||
# #With mediapipe: BLAZEPOSE
|
# #With mediapipe: BLAZEPOSE
|
||||||
|
@ -34,8 +34,7 @@ participant_mass = [70.0, 63.5] # kg # Only used for marker augmentation and sca
|
|||||||
|
|
||||||
# [pose]
|
# [pose]
|
||||||
# vid_img_extension = 'mp4' # any video or image extension
|
# vid_img_extension = 'mp4' # any video or image extension
|
||||||
# pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands), COCO_17 (body)
|
# pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands, face), COCO_17 (body) # # /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the output files if needed
|
||||||
# # /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the files if needed
|
|
||||||
# #With MMPose: HALPE_26, COCO_133, COCO_17, CUSTOM. See CUSTOM example at the end of the file
|
# #With MMPose: HALPE_26, COCO_133, COCO_17, CUSTOM. See CUSTOM example at the end of the file
|
||||||
# #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII
|
# #With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII
|
||||||
# #With mediapipe: BLAZEPOSE
|
# #With mediapipe: BLAZEPOSE
|
||||||
|
@ -34,8 +34,7 @@ exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['<
|
|||||||
|
|
||||||
[pose]
|
[pose]
|
||||||
vid_img_extension = 'mp4' # any video or image extension
|
vid_img_extension = 'mp4' # any video or image extension
|
||||||
pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands), COCO_17 (body)
|
pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands, face. Slower and slightly less accurate on body keypoints but required for wrist motion), COCO_17 (body) # /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the output files if needed
|
||||||
# /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the files if needed
|
|
||||||
#With MMPose: HALPE_26, COCO_133, COCO_17, CUSTOM. See CUSTOM example at the end of the file
|
#With MMPose: HALPE_26, COCO_133, COCO_17, CUSTOM. See CUSTOM example at the end of the file
|
||||||
#With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII
|
#With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII
|
||||||
#With mediapipe: BLAZEPOSE
|
#With mediapipe: BLAZEPOSE
|
||||||
|
@ -34,8 +34,7 @@ exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['<
|
|||||||
|
|
||||||
[pose]
|
[pose]
|
||||||
vid_img_extension = 'mp4' # any video or image extension
|
vid_img_extension = 'mp4' # any video or image extension
|
||||||
pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands), COCO_17 (body)
|
pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands, face. Slower and slightly less accurate on body keypoints but required for wrist motion), COCO_17 (body) # /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the output files if needed
|
||||||
# /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the files if needed
|
|
||||||
#With MMPose: HALPE_26, COCO_133, COCO_17, CUSTOM. See CUSTOM example at the end of the file
|
#With MMPose: HALPE_26, COCO_133, COCO_17, CUSTOM. See CUSTOM example at the end of the file
|
||||||
#With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII
|
#With openpose: BODY_25B, BODY_25, BODY_135, COCO, MPII
|
||||||
#With mediapipe: BLAZEPOSE
|
#With mediapipe: BLAZEPOSE
|
||||||
|
@ -11,7 +11,7 @@
|
|||||||
write the results to JSON files, videos, and/or images.
|
write the results to JSON files, videos, and/or images.
|
||||||
Results can optionally be displayed in real time.
|
Results can optionally be displayed in real time.
|
||||||
|
|
||||||
Supported models: HALPE_26 (default, body and feet), COCO_133 (body, feet, hands), COCO_17 (body)
|
Supported models: HALPE_26 (default, body and feet), COCO_133 (body, feet, hands, face), COCO_17 (body)
|
||||||
Supported modes: lightweight, balanced, performance (edit paths at rtmlib/tools/solutions if you
|
Supported modes: lightweight, balanced, performance (edit paths at rtmlib/tools/solutions if you
|
||||||
need nother detection or pose models)
|
need nother detection or pose models)
|
||||||
|
|
||||||
@ -345,7 +345,7 @@ def rtm_estimator(config_dict):
|
|||||||
write the results to JSON files, videos, and/or images.
|
write the results to JSON files, videos, and/or images.
|
||||||
Results can optionally be displayed in real time.
|
Results can optionally be displayed in real time.
|
||||||
|
|
||||||
Supported models: HALPE_26 (default, body and feet), COCO_133 (body, feet, hands), COCO_17 (body)
|
Supported models: HALPE_26 (default, body and feet), COCO_133 (body, feet, hands, face), COCO_17 (body)
|
||||||
Supported modes: lightweight, balanced, performance (edit paths at rtmlib/tools/solutions if you
|
Supported modes: lightweight, balanced, performance (edit paths at rtmlib/tools/solutions if you
|
||||||
need nother detection or pose models)
|
need nother detection or pose models)
|
||||||
|
|
||||||
@ -435,7 +435,7 @@ def rtm_estimator(config_dict):
|
|||||||
logging.info(f"Using HALPE_26 model (body and feet) for pose estimation.")
|
logging.info(f"Using HALPE_26 model (body and feet) for pose estimation.")
|
||||||
elif pose_model.upper() == 'COCO_133':
|
elif pose_model.upper() == 'COCO_133':
|
||||||
ModelClass = Wholebody
|
ModelClass = Wholebody
|
||||||
logging.info(f"Using COCO_133 model (body, feet, hands, and face) for pose estimation.")
|
logging.info(f"Using COCO_133 model (body, feet, hands, face, and face) for pose estimation.")
|
||||||
elif pose_model.upper() == 'COCO_17':
|
elif pose_model.upper() == 'COCO_17':
|
||||||
ModelClass = Body # 26 keypoints(halpe26)
|
ModelClass = Body # 26 keypoints(halpe26)
|
||||||
logging.info(f"Using COCO_17 model (body) for pose estimation.")
|
logging.info(f"Using COCO_17 model (body) for pose estimation.")
|
||||||
|
@ -350,10 +350,10 @@ Pose2Sim.poseEstimation()
|
|||||||
|
|
||||||
</br>
|
</br>
|
||||||
|
|
||||||
*N.B.:* The `GPU` will be used with ONNX backend if a valid CUDA installation is found (or MPS with MacOS), otherwise the `CPU` will be used with OpenVINO backend.\
|
|
||||||
*N.B.:* Pose estimation can be run in `lightweight`, `balanced`, or `performance` mode.\
|
*N.B.:* Pose estimation can be run in `lightweight`, `balanced`, or `performance` mode.\
|
||||||
*N.B.:* Pose estimation can be dramatically sped up by increasing the value of `det_frequency`. In that case, the detection is only done every `det_frequency` frames, and bounding boxes are tracked inbetween (keypoint detection is still performed on all frames).\
|
*N.B.:* The `pose_model` with body, feet, hands, and face is required for wrist motion but is much slower and slightly less accurate on body keypoints.\
|
||||||
*N.B.:* Activating `tracking` will attempt to give consistent IDs to the same persons across frames, which might facilitate synchronization if other people are in the background.
|
*N.B.:* The `GPU` will be used with ONNX backend if a valid CUDA installation is found (or MPS with MacOS), otherwise the `CPU` will be used with OpenVINO backend.\
|
||||||
|
*N.B.:* Pose estimation can be dramatically sped up by increasing the value of `det_frequency`. In that case, the detection is only done every `det_frequency` frames, and bounding boxes are tracked inbetween (keypoint detection is still performed on all frames).
|
||||||
|
|
||||||
<img src="Content/Pose2D.png" width="760">
|
<img src="Content/Pose2D.png" width="760">
|
||||||
|
|
||||||
@ -536,7 +536,7 @@ If you already have a calibration file, set `calibration_type` type to `convert`
|
|||||||
|
|
||||||
### Synchronization
|
### Synchronization
|
||||||
|
|
||||||
> _**2D points can be triangulated only if they represent the same body position across view: therefore, cameras need to be synchronized.**_\
|
> _**2D points can be triangulated only if they represent the same body position across all cameras: therefore, views need to be synchronized.**_\
|
||||||
***N.B.:** Skip this step if your cameras are natively synchronized.*
|
***N.B.:** Skip this step if your cameras are natively synchronized.*
|
||||||
|
|
||||||
Open an Anaconda prompt or a terminal in a `Session` or `Trial` folder.\
|
Open an Anaconda prompt or a terminal in a `Session` or `Trial` folder.\
|
||||||
|
Loading…
Reference in New Issue
Block a user