Compare commits
1 Commits
main
...
experiment
Author | SHA1 | Date | |
---|---|---|---|
|
3d6dbfc6d5 |
@ -29,11 +29,17 @@ frame_range = [] # For example [10,300], or [] for all frames.
|
|||||||
## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate,
|
## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate,
|
||||||
## frame_range = [0.1, 2.0]*frame_rate = [6, 120]
|
## frame_range = [0.1, 2.0]*frame_rate = [6, 120]
|
||||||
|
|
||||||
|
save_video = 'to_video' # 'to_video' or 'to_images', 'none', or ['to_video', 'to_images']
|
||||||
|
|
||||||
exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['<participant_dir/trial_dir>', 'etc'].
|
exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['<participant_dir/trial_dir>', 'etc'].
|
||||||
# e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial']
|
# e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial']
|
||||||
|
|
||||||
[pose]
|
[pose]
|
||||||
vid_img_extension = 'mp4' # any video or image extension
|
# Webcam parameters
|
||||||
|
webcam_ids = 0 # your webcam id 0, or [0, 1, ...] (0 is default)
|
||||||
|
input_size = [1280, 720] # [W, H]. Lower resolution will be faster but less precise.
|
||||||
|
|
||||||
|
vid_img_extension = 'mp4' # any video or image extension # 'webcam' for webcam
|
||||||
pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands), COCO_17 (body)
|
pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands), COCO_17 (body)
|
||||||
# /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the output files if needed
|
# /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the output files if needed
|
||||||
#With MMPose: HALPE_26, COCO_133, COCO_17, CUSTOM. See CUSTOM example at the end of the file
|
#With MMPose: HALPE_26, COCO_133, COCO_17, CUSTOM. See CUSTOM example at the end of the file
|
||||||
@ -44,9 +50,8 @@ pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_1
|
|||||||
mode = 'balanced' # 'lightweight', 'balanced', 'performance'
|
mode = 'balanced' # 'lightweight', 'balanced', 'performance'
|
||||||
det_frequency = 100 # Run person detection only every N frames, and inbetween track previously detected bounding boxes (keypoint detection is still run on all frames).
|
det_frequency = 100 # Run person detection only every N frames, and inbetween track previously detected bounding boxes (keypoint detection is still run on all frames).
|
||||||
# Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate.
|
# Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate.
|
||||||
display_detection = false
|
show_realtime_results = false
|
||||||
overwrite_pose = false # set to false if you don't want to recalculate pose estimation when it has already been done
|
overwrite_pose = false # set to false if you don't want to recalculate pose estimation when it has already been done
|
||||||
save_video = 'to_video' # 'to_video' or 'to_images', 'none', or ['to_video', 'to_images']
|
|
||||||
output_format = 'openpose' # 'openpose', 'mmpose', 'deeplabcut', 'none' or a list of them # /!\ only 'openpose' is supported for now
|
output_format = 'openpose' # 'openpose', 'mmpose', 'deeplabcut', 'none' or a list of them # /!\ only 'openpose' is supported for now
|
||||||
|
|
||||||
|
|
||||||
|
@ -45,7 +45,7 @@
|
|||||||
# det_frequency = 100 # Run person detection only every N frames, and inbetween track previously detected bounding boxes (keypoint detection is still run on all frames).
|
# det_frequency = 100 # Run person detection only every N frames, and inbetween track previously detected bounding boxes (keypoint detection is still run on all frames).
|
||||||
# # Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate.
|
# # Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate.
|
||||||
# # overwrite_pose = true # set to false if you don't want to recalculate pose estimation when it has already been done
|
# # overwrite_pose = true # set to false if you don't want to recalculate pose estimation when it has already been done
|
||||||
# display_detection = true
|
# show_realtime_results = true
|
||||||
# save_video = 'to_video' # 'to_video' or 'to_images', 'none', or ['to_video', 'to_images']
|
# save_video = 'to_video' # 'to_video' or 'to_images', 'none', or ['to_video', 'to_images']
|
||||||
# output_format = 'openpose' # 'openpose', 'mmpose', 'deeplabcut', 'none' or a list of them # /!\ only 'openpose' is supported for now
|
# output_format = 'openpose' # 'openpose', 'mmpose', 'deeplabcut', 'none' or a list of them # /!\ only 'openpose' is supported for now
|
||||||
|
|
||||||
|
@ -45,7 +45,7 @@ participant_mass = [70.0, 63.5] # kg # Only used for marker augmentation and sca
|
|||||||
# det_frequency = 100 # Run person detection only every N frames, and inbetween track previously detected bounding boxes (keypoint detection is still run on all frames).
|
# det_frequency = 100 # Run person detection only every N frames, and inbetween track previously detected bounding boxes (keypoint detection is still run on all frames).
|
||||||
# # Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate.
|
# # Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate.
|
||||||
# # overwrite_pose = false # set to false if you don't want to recalculate pose estimation when it has already been done
|
# # overwrite_pose = false # set to false if you don't want to recalculate pose estimation when it has already been done
|
||||||
# display_detection = true
|
# show_realtime_results = true
|
||||||
# save_video = 'to_video' # 'to_video' or 'to_images', 'none', or ['to_video', 'to_images']
|
# save_video = 'to_video' # 'to_video' or 'to_images', 'none', or ['to_video', 'to_images']
|
||||||
# output_format = 'openpose' # 'openpose', 'mmpose', 'deeplabcut', 'none' or a list of them # /!\ only 'openpose' is supported for now
|
# output_format = 'openpose' # 'openpose', 'mmpose', 'deeplabcut', 'none' or a list of them # /!\ only 'openpose' is supported for now
|
||||||
|
|
||||||
|
@ -29,11 +29,17 @@ frame_range = [] # For example [10,300], or [] for all frames.
|
|||||||
## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate,
|
## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate,
|
||||||
## frame_range = [0.1, 2.0]*frame_rate = [6, 120]
|
## frame_range = [0.1, 2.0]*frame_rate = [6, 120]
|
||||||
|
|
||||||
|
save_video = 'to_video' # 'to_video' or 'to_images', 'none', or ['to_video', 'to_images']
|
||||||
|
|
||||||
exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['<participant_dir/trial_dir>', 'etc'].
|
exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['<participant_dir/trial_dir>', 'etc'].
|
||||||
# e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial']
|
# e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial']
|
||||||
|
|
||||||
[pose]
|
[pose]
|
||||||
vid_img_extension = 'mp4' # any video or image extension
|
# Webcam parameters
|
||||||
|
webcam_ids = 0 # your webcam id 0, or [0, 1, ...] (0 is default)
|
||||||
|
input_size = [1280, 720] # [W, H]. Lower resolution will be faster but less precise.
|
||||||
|
|
||||||
|
vid_img_extension = 'mp4' # any video or image extension # 'webcam' for webcam
|
||||||
pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands), COCO_17 (body)
|
pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands), COCO_17 (body)
|
||||||
# /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the output files if needed
|
# /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the output files if needed
|
||||||
#With MMPose: HALPE_26, COCO_133, COCO_17, CUSTOM. See CUSTOM example at the end of the file
|
#With MMPose: HALPE_26, COCO_133, COCO_17, CUSTOM. See CUSTOM example at the end of the file
|
||||||
@ -44,9 +50,8 @@ pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_1
|
|||||||
mode = 'balanced' # 'lightweight', 'balanced', 'performance'
|
mode = 'balanced' # 'lightweight', 'balanced', 'performance'
|
||||||
det_frequency = 1 # Run person detection only every N frames, and inbetween track previously detected bounding boxes (keypoint detection is still run on all frames).
|
det_frequency = 1 # Run person detection only every N frames, and inbetween track previously detected bounding boxes (keypoint detection is still run on all frames).
|
||||||
# Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate.
|
# Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate.
|
||||||
display_detection = true
|
show_realtime_results = true
|
||||||
overwrite_pose = false # set to false if you don't want to recalculate pose estimation when it has already been done
|
overwrite_pose = false # set to false if you don't want to recalculate pose estimation when it has already been done
|
||||||
save_video = 'to_video' # 'to_video' or 'to_images', 'none', or ['to_video', 'to_images']
|
|
||||||
output_format = 'openpose' # 'openpose', 'mmpose', 'deeplabcut', 'none' or a list of them # /!\ only 'openpose' is supported for now
|
output_format = 'openpose' # 'openpose', 'mmpose', 'deeplabcut', 'none' or a list of them # /!\ only 'openpose' is supported for now
|
||||||
|
|
||||||
|
|
||||||
|
@ -22,18 +22,24 @@ multi_person = false # true for trials with multiple participants. If false, onl
|
|||||||
participant_height = 1.72 # m # float if single person, list of float if multi-person (same order as the Static trials) # Only used for marker augmentation
|
participant_height = 1.72 # m # float if single person, list of float if multi-person (same order as the Static trials) # Only used for marker augmentation
|
||||||
participant_mass = 70.0 # kg # Only used for marker augmentation and scaling
|
participant_mass = 70.0 # kg # Only used for marker augmentation and scaling
|
||||||
|
|
||||||
frame_rate = 'auto' # fps # int or 'auto'. If 'auto', finds from video (or defaults to 60 fps if you work with images)
|
frame_rate = 'auto' # fps # int or 'auto'. If 'auto', finds from video (or defaults to 60 fps if you work with images)
|
||||||
frame_range = [] # For example [10,300], or [] for all frames.
|
frame_range = [] # For example [10,300], or [] for all frames.
|
||||||
## If cameras are not synchronized, designates the frame range of the camera with the shortest recording time
|
## If cameras are not synchronized, designates the frame range of the camera with the shortest recording time
|
||||||
## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate
|
## N.B.: If you want a time range instead, use frame_range = time_range * frame_rate
|
||||||
## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate,
|
## For example if you want to analyze from 0.1 to 2 seconds with a 60 fps frame rate,
|
||||||
## frame_range = [0.1, 2.0]*frame_rate = [6, 120]
|
## frame_range = [0.1, 2.0]*frame_rate = [6, 120]
|
||||||
|
|
||||||
|
save_video = 'to_video' # 'to_video' or 'to_images', 'none', or ['to_video', 'to_images']
|
||||||
|
|
||||||
exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['<participant_dir/trial_dir>', 'etc'].
|
exclude_from_batch = [] # List of trials to be excluded from batch analysis, ['<participant_dir/trial_dir>', 'etc'].
|
||||||
# e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial']
|
# e.g. ['S00_P00_Participant/S00_P00_T00_StaticTrial', 'S00_P00_Participant/S00_P00_T01_BalancingTrial']
|
||||||
|
|
||||||
[pose]
|
[pose]
|
||||||
vid_img_extension = 'mp4' # any video or image extension
|
# Webcam parameters
|
||||||
|
webcam_ids = 0 # your webcam id 0, or [0, 1, ...] (0 is default)
|
||||||
|
input_size = [1280, 720] # [W, H]. Lower resolution will be faster but less precise.
|
||||||
|
|
||||||
|
vid_img_extension = 'mp4' # any video or image extension # 'webcam' for webcam
|
||||||
pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands), COCO_17 (body)
|
pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_133 (body, feet, hands), COCO_17 (body)
|
||||||
# /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the output files if needed
|
# /!\ Only RTMPose is natively embeded in Pose2Sim. For all other pose estimation methods, you will have to run them yourself, and then refer to the documentation to convert the output files if needed
|
||||||
#With MMPose: HALPE_26, COCO_133, COCO_17, CUSTOM. See CUSTOM example at the end of the file
|
#With MMPose: HALPE_26, COCO_133, COCO_17, CUSTOM. See CUSTOM example at the end of the file
|
||||||
@ -44,9 +50,8 @@ pose_model = 'HALPE_26' #With RTMLib: HALPE_26 (body and feet, default), COCO_1
|
|||||||
mode = 'balanced' # 'lightweight', 'balanced', 'performance'
|
mode = 'balanced' # 'lightweight', 'balanced', 'performance'
|
||||||
det_frequency = 1 # Run person detection only every N frames, and inbetween track previously detected bounding boxes (keypoint detection is still run on all frames).
|
det_frequency = 1 # Run person detection only every N frames, and inbetween track previously detected bounding boxes (keypoint detection is still run on all frames).
|
||||||
# Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate.
|
# Equal to or greater than 1, can be as high as you want in simple uncrowded cases. Much faster, but might be less accurate.
|
||||||
display_detection = true
|
show_realtime_results = true
|
||||||
overwrite_pose = false # set to false if you don't want to recalculate pose estimation when it has already been done
|
overwrite_pose = false # set to false if you don't want to recalculate pose estimation when it has already been done
|
||||||
save_video = 'to_video' # 'to_video' or 'to_images', 'none', or ['to_video', 'to_images']
|
|
||||||
output_format = 'openpose' # 'openpose', 'mmpose', 'deeplabcut', 'none' or a list of them # /!\ only 'openpose' is supported for now
|
output_format = 'openpose' # 'openpose', 'mmpose', 'deeplabcut', 'none' or a list of them # /!\ only 'openpose' is supported for now
|
||||||
|
|
||||||
|
|
||||||
|
@ -52,6 +52,8 @@ from copy import deepcopy
|
|||||||
import logging, logging.handlers
|
import logging, logging.handlers
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
|
from Sports2D.Utilities.config import setup_logging
|
||||||
|
|
||||||
|
|
||||||
## AUTHORSHIP INFORMATION
|
## AUTHORSHIP INFORMATION
|
||||||
__author__ = "David Pagnon"
|
__author__ = "David Pagnon"
|
||||||
@ -65,15 +67,6 @@ __status__ = "Development"
|
|||||||
|
|
||||||
|
|
||||||
## FUNCTIONS
|
## FUNCTIONS
|
||||||
def setup_logging(session_dir):
|
|
||||||
'''
|
|
||||||
Create logging file and stream handlers
|
|
||||||
'''
|
|
||||||
|
|
||||||
logging.basicConfig(format='%(message)s', level=logging.INFO,
|
|
||||||
handlers = [logging.handlers.TimedRotatingFileHandler(os.path.join(session_dir, 'logs.txt'), when='D', interval=7), logging.StreamHandler()])
|
|
||||||
|
|
||||||
|
|
||||||
def recursive_update(dict_to_update, dict_with_new_values):
|
def recursive_update(dict_to_update, dict_with_new_values):
|
||||||
'''
|
'''
|
||||||
Update nested dictionaries without overwriting existing keys in any level of nesting
|
Update nested dictionaries without overwriting existing keys in any level of nesting
|
||||||
|
@ -118,7 +118,7 @@ class TestWorkflow(unittest.TestCase):
|
|||||||
os.chdir(project_dir)
|
os.chdir(project_dir)
|
||||||
config_dict.get("project").update({"project_dir":project_dir})
|
config_dict.get("project").update({"project_dir":project_dir})
|
||||||
config_dict.get("pose").update({"mode":'lightweight'})
|
config_dict.get("pose").update({"mode":'lightweight'})
|
||||||
config_dict.get("pose").update({"display_detection":False})
|
config_dict.get("pose").update({"show_realtime_results":False})
|
||||||
config_dict.get("synchronization").update({"display_sync_plots":False})
|
config_dict.get("synchronization").update({"display_sync_plots":False})
|
||||||
config_dict['filtering']['display_figures'] = False
|
config_dict['filtering']['display_figures'] = False
|
||||||
|
|
||||||
@ -145,7 +145,7 @@ class TestWorkflow(unittest.TestCase):
|
|||||||
os.chdir(project_dir)
|
os.chdir(project_dir)
|
||||||
config_dict.get("project").update({"project_dir":project_dir})
|
config_dict.get("project").update({"project_dir":project_dir})
|
||||||
config_dict.get("pose").update({"mode":'lightweight'})
|
config_dict.get("pose").update({"mode":'lightweight'})
|
||||||
config_dict.get("pose").update({"display_detection":False})
|
config_dict.get("pose").update({"show_realtime_results":False})
|
||||||
config_dict.get("synchronization").update({"display_sync_plots":False})
|
config_dict.get("synchronization").update({"display_sync_plots":False})
|
||||||
config_dict['filtering']['display_figures'] = False
|
config_dict['filtering']['display_figures'] = False
|
||||||
|
|
||||||
|
@ -45,7 +45,7 @@ from anytree import PreOrderIter
|
|||||||
import opensim
|
import opensim
|
||||||
|
|
||||||
from Pose2Sim.common import natural_sort_key, euclidean_distance, trimmed_mean
|
from Pose2Sim.common import natural_sort_key, euclidean_distance, trimmed_mean
|
||||||
from Pose2Sim.skeletons import *
|
from Sports2D.Utilities.skeletons import *
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -123,15 +123,15 @@ def get_model_path(model_name, osim_setup_dir):
|
|||||||
pose_model_file = 'Model_Pose2Sim_Body135.osim'
|
pose_model_file = 'Model_Pose2Sim_Body135.osim'
|
||||||
elif model_name == 'BLAZEPOSE':
|
elif model_name == 'BLAZEPOSE':
|
||||||
pose_model_file = 'Model_Pose2Sim_Blazepose.osim'
|
pose_model_file = 'Model_Pose2Sim_Blazepose.osim'
|
||||||
elif model_name == 'HALPE_26':
|
elif model_name in ('HALPE_26', 'BODY_WITH_FEET'):
|
||||||
pose_model_file = 'Model_Pose2Sim_Halpe26.osim'
|
pose_model_file = 'Model_Pose2Sim_Halpe26.osim'
|
||||||
elif model_name == 'HALPE_68' or model_name == 'HALPE_136':
|
elif model_name in ('HALPE_68', 'HALPE_136'):
|
||||||
pose_model_file = 'Model_Pose2Sim_Halpe68_136.osim'
|
pose_model_file = 'Model_Pose2Sim_Halpe68_136.osim'
|
||||||
elif model_name == 'COCO_133':
|
elif model_name in ('COCO_133', 'WHOLE_BODY'):
|
||||||
pose_model_file = 'Model_Pose2Sim_Coco133.osim'
|
pose_model_file = 'Model_Pose2Sim_Coco133.osim'
|
||||||
# elif model_name == 'COCO' or model_name == 'MPII':
|
# elif model_name == 'COCO' or model_name == 'MPII':
|
||||||
# pose_model_file = 'Model_Pose2Sim_Coco.osim'
|
# pose_model_file = 'Model_Pose2Sim_Coco.osim'
|
||||||
elif model_name == 'COCO_17':
|
elif model_name in ('COCO_17', 'BODY'):
|
||||||
pose_model_file = 'Model_Pose2Sim_Coco17.osim'
|
pose_model_file = 'Model_Pose2Sim_Coco17.osim'
|
||||||
elif model_name == 'LSTM':
|
elif model_name == 'LSTM':
|
||||||
pose_model_file = 'Model_Pose2Sim_LSTM.osim'
|
pose_model_file = 'Model_Pose2Sim_LSTM.osim'
|
||||||
@ -163,15 +163,15 @@ def get_scaling_setup(model_name, osim_setup_dir):
|
|||||||
scaling_setup_file = 'Scaling_Setup_Pose2Sim_Body135.xml'
|
scaling_setup_file = 'Scaling_Setup_Pose2Sim_Body135.xml'
|
||||||
elif model_name == 'BLAZEPOSE':
|
elif model_name == 'BLAZEPOSE':
|
||||||
scaling_setup_file = 'Scaling_Setup_Pose2Sim_Blazepose.xml'
|
scaling_setup_file = 'Scaling_Setup_Pose2Sim_Blazepose.xml'
|
||||||
elif model_name == 'HALPE_26':
|
elif model_name in ('HALPE_26', 'BODY_WITH_FEET'):
|
||||||
scaling_setup_file = 'Scaling_Setup_Pose2Sim_Halpe26.xml'
|
scaling_setup_file = 'Scaling_Setup_Pose2Sim_Halpe26.xml'
|
||||||
elif model_name == 'HALPE_68' or model_name == 'HALPE_136':
|
elif model_name in ('HALPE_68', 'HALPE_136'):
|
||||||
scaling_setup_file = 'Scaling_Setup_Pose2Sim_Halpe68_136.xml'
|
scaling_setup_file = 'Scaling_Setup_Pose2Sim_Halpe68_136.xml'
|
||||||
elif model_name == 'COCO_133':
|
elif model_name in ('COCO_133', 'WHOLE_BODY'):
|
||||||
scaling_setup_file = 'Scaling_Setup_Pose2Sim_Coco133.xml'
|
scaling_setup_file = 'Scaling_Setup_Pose2Sim_Coco133.xml'
|
||||||
# elif model_name == 'COCO' or model_name == 'MPII':
|
# elif model_name == 'COCO' or model_name == 'MPII':
|
||||||
# scaling_setup_file = 'Scaling_Setup_Pose2Sim_Coco.xml'
|
# scaling_setup_file = 'Scaling_Setup_Pose2Sim_Coco.xml'
|
||||||
elif model_name == 'COCO_17':
|
elif model_name in ('COCO_17', 'BODY'):
|
||||||
scaling_setup_file = 'Scaling_Setup_Pose2Sim_Coco17.xml'
|
scaling_setup_file = 'Scaling_Setup_Pose2Sim_Coco17.xml'
|
||||||
elif model_name == 'LSTM':
|
elif model_name == 'LSTM':
|
||||||
scaling_setup_file = 'Scaling_Setup_Pose2Sim_LSTM.xml'
|
scaling_setup_file = 'Scaling_Setup_Pose2Sim_LSTM.xml'
|
||||||
@ -203,15 +203,15 @@ def get_IK_Setup(model_name, osim_setup_dir):
|
|||||||
ik_setup_file = 'IK_Setup_Pose2Sim_Body135.xml'
|
ik_setup_file = 'IK_Setup_Pose2Sim_Body135.xml'
|
||||||
elif model_name == 'BLAZEPOSE':
|
elif model_name == 'BLAZEPOSE':
|
||||||
ik_setup_file = 'IK_Setup_Pose2Sim_Blazepose.xml'
|
ik_setup_file = 'IK_Setup_Pose2Sim_Blazepose.xml'
|
||||||
elif model_name == 'HALPE_26':
|
elif model_name in ('HALPE_26', 'BODY_WITH_FEET'):
|
||||||
ik_setup_file = 'IK_Setup_Pose2Sim_Halpe26.xml'
|
ik_setup_file = 'IK_Setup_Pose2Sim_Halpe26.xml'
|
||||||
elif model_name == 'HALPE_68' or model_name == 'HALPE_136':
|
elif model_name in ('HALPE_68', 'HALPE_136'):
|
||||||
ik_setup_file = 'IK_Setup_Pose2Sim_Halpe68_136.xml'
|
ik_setup_file = 'IK_Setup_Pose2Sim_Halpe68_136.xml'
|
||||||
elif model_name == 'COCO_133':
|
elif model_name in ('COCO_133', 'WHOLE_BODY'):
|
||||||
ik_setup_file = 'IK_Setup_Pose2Sim_Coco133.xml'
|
ik_setup_file = 'IK_Setup_Pose2Sim_Coco133.xml'
|
||||||
# elif model_name == 'COCO' or model_name == 'MPII':
|
# elif model_name == 'COCO' or model_name == 'MPII':
|
||||||
# ik_setup_file = 'IK_Setup_Pose2Sim_Coco.xml'
|
# ik_setup_file = 'IK_Setup_Pose2Sim_Coco.xml'
|
||||||
elif model_name == 'COCO_17':
|
elif model_name in ('COCO_17', 'BODY'):
|
||||||
ik_setup_file = 'IK_Setup_Pose2Sim_Coco17.xml'
|
ik_setup_file = 'IK_Setup_Pose2Sim_Coco17.xml'
|
||||||
elif model_name == 'LSTM':
|
elif model_name == 'LSTM':
|
||||||
ik_setup_file = 'IK_Setup_Pose2Sim_LSTM.xml'
|
ik_setup_file = 'IK_Setup_Pose2Sim_LSTM.xml'
|
||||||
|
@ -47,7 +47,7 @@ import logging
|
|||||||
|
|
||||||
from Pose2Sim.common import retrieve_calib_params, computeP, weighted_triangulation, \
|
from Pose2Sim.common import retrieve_calib_params, computeP, weighted_triangulation, \
|
||||||
reprojection, euclidean_distance, sort_stringlist_by_last_number
|
reprojection, euclidean_distance, sort_stringlist_by_last_number
|
||||||
from Pose2Sim.skeletons import *
|
from Sports2D.Utilities.skeletons import *
|
||||||
|
|
||||||
|
|
||||||
## AUTHORSHIP INFORMATION
|
## AUTHORSHIP INFORMATION
|
||||||
@ -638,7 +638,7 @@ def associate_all(config_dict):
|
|||||||
# if single trial
|
# if single trial
|
||||||
session_dir = session_dir if 'Config.toml' in os.listdir(session_dir) else os.getcwd()
|
session_dir = session_dir if 'Config.toml' in os.listdir(session_dir) else os.getcwd()
|
||||||
multi_person = config_dict.get('project').get('multi_person')
|
multi_person = config_dict.get('project').get('multi_person')
|
||||||
pose_model = config_dict.get('pose').get('pose_model')
|
pose_model = config_dict.get('pose').get('pose_model').upper()
|
||||||
tracked_keypoint = config_dict.get('personAssociation').get('single_person').get('tracked_keypoint')
|
tracked_keypoint = config_dict.get('personAssociation').get('single_person').get('tracked_keypoint')
|
||||||
min_cameras_for_triangulation = config_dict.get('triangulation').get('min_cameras_for_triangulation')
|
min_cameras_for_triangulation = config_dict.get('triangulation').get('min_cameras_for_triangulation')
|
||||||
reconstruction_error_threshold = config_dict.get('personAssociation').get('multi_person').get('reconstruction_error_threshold')
|
reconstruction_error_threshold = config_dict.get('personAssociation').get('multi_person').get('reconstruction_error_threshold')
|
||||||
|
@ -36,13 +36,19 @@ import os
|
|||||||
import glob
|
import glob
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import itertools as it
|
|
||||||
from tqdm import tqdm
|
|
||||||
import numpy as np
|
|
||||||
import cv2
|
import cv2
|
||||||
|
import time
|
||||||
|
import threading
|
||||||
|
import queue
|
||||||
|
import numpy as np
|
||||||
|
from datetime import datetime
|
||||||
|
from pathlib import Path
|
||||||
|
from tqdm import tqdm
|
||||||
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||||
|
|
||||||
from rtmlib import PoseTracker, Body, Wholebody, BodyWithFeet, draw_skeleton
|
from rtmlib import draw_skeleton
|
||||||
from Pose2Sim.common import natural_sort_key, min_with_single_indices, euclidean_distance
|
from Sports2D.Utilities.config import setup_pose_tracker, setup_capture_directories
|
||||||
|
from Sports2D.Utilities.video_management import track_people
|
||||||
|
|
||||||
|
|
||||||
## AUTHORSHIP INFORMATION
|
## AUTHORSHIP INFORMATION
|
||||||
@ -57,6 +63,471 @@ __status__ = "Development"
|
|||||||
|
|
||||||
|
|
||||||
## FUNCTIONS
|
## FUNCTIONS
|
||||||
|
def rtm_estimator(config_dict):
|
||||||
|
'''
|
||||||
|
Estimate pose from webcams, video files, or a folder of images, and write the results to JSON files, videos, and/or images.
|
||||||
|
Results can optionally be displayed in real-time.
|
||||||
|
|
||||||
|
Supported models: HALPE_26 (default, body and feet), COCO_133 (body, feet, hands), COCO_17 (body)
|
||||||
|
Supported modes: lightweight, balanced, performance (edit paths at rtmlib/tools/solutions if you need another detection or pose models)
|
||||||
|
|
||||||
|
Optionally gives consistent person ID across frames (slower but good for 2D analysis)
|
||||||
|
Optionally runs detection every n frames and in between tracks points (faster but less accurate).
|
||||||
|
|
||||||
|
If a valid CUDA installation is detected, uses the GPU with the ONNXRuntime backend. Otherwise, uses the CPU with the OpenVINO backend.
|
||||||
|
|
||||||
|
INPUTS:
|
||||||
|
- videos or image folders from the video directory
|
||||||
|
- a Config.toml file
|
||||||
|
|
||||||
|
OUTPUTS:
|
||||||
|
- JSON files with the detected keypoints and confidence scores in the OpenPose format
|
||||||
|
- Optionally, videos and/or image files with the detected keypoints
|
||||||
|
'''
|
||||||
|
|
||||||
|
# Read config
|
||||||
|
output_dir = config_dict['project']['project_dir']
|
||||||
|
source_dir = os.path.join(output_dir, 'videos')
|
||||||
|
pose_dir = os.path.join(output_dir, 'pose')
|
||||||
|
|
||||||
|
show_realtime_results = config_dict['pose'].get('show_realtime_results', False)
|
||||||
|
|
||||||
|
vid_img_extension = config_dict['pose']['vid_img_extension']
|
||||||
|
webcam_ids = config_dict['pose'].get('webcam_ids', [])
|
||||||
|
|
||||||
|
overwrite_pose = config_dict['pose'].get('overwrite_pose', False)
|
||||||
|
|
||||||
|
# Check if pose estimation has already been done
|
||||||
|
if os.path.exists(pose_dir) and not overwrite_pose:
|
||||||
|
logging.info('Skipping pose estimation as it has already been done. Set overwrite_pose to true in Config.toml if you want to run it again.')
|
||||||
|
return
|
||||||
|
elif overwrite_pose:
|
||||||
|
logging.info('Overwriting previous pose estimation.')
|
||||||
|
|
||||||
|
logging.info('Estimating pose...')
|
||||||
|
|
||||||
|
# Prepare list of sources (webcams, videos, image folders)
|
||||||
|
sources = []
|
||||||
|
|
||||||
|
if vid_img_extension == 'webcam':
|
||||||
|
sources.extend({'type': 'webcam', 'id': cam_id, 'path': cam_id} for cam_id in (webcam_ids if isinstance(webcam_ids, list) else [webcam_ids]))
|
||||||
|
else:
|
||||||
|
video_files = [str(f) for f in Path(source_dir).rglob('*' + vid_img_extension) if f.is_file()]
|
||||||
|
sources.extend({'type': 'video', 'id': idx, 'path': video_path} for idx, video_path in enumerate(video_files))
|
||||||
|
image_dirs = [str(f) for f in Path(source_dir).iterdir() if f.is_dir()]
|
||||||
|
sources.extend({'type': 'images', 'id': idx, 'path': folder} for idx, folder in enumerate(image_dirs, start=len(video_files)))
|
||||||
|
|
||||||
|
if not sources:
|
||||||
|
raise FileNotFoundError(f'No Webcams or no media files found in {source_dir}.')
|
||||||
|
|
||||||
|
process_functions = {}
|
||||||
|
for source in sources:
|
||||||
|
if source['type'] == 'webcam':
|
||||||
|
process_functions[source['id']] = process_single_frame
|
||||||
|
else:
|
||||||
|
process_functions[source['id']] = process_single_frame
|
||||||
|
|
||||||
|
logging.info(f'Processing sources: {sources}')
|
||||||
|
|
||||||
|
# Initialize pose trackers
|
||||||
|
pose_trackers = {source['id']: setup_pose_tracker(config_dict['pose']['det_frequency'], config_dict['pose']['mode'], config_dict['pose']['pose_model']) for source in sources}
|
||||||
|
|
||||||
|
# Create display queue
|
||||||
|
display_queue = queue.Queue()
|
||||||
|
|
||||||
|
# Initialize streams
|
||||||
|
stream_manager = StreamManager(sources, config_dict, pose_trackers, display_queue, output_dir, process_functions)
|
||||||
|
stream_manager.start()
|
||||||
|
|
||||||
|
# Start display thread only if show_realtime_results is True
|
||||||
|
display_thread = None
|
||||||
|
if show_realtime_results:
|
||||||
|
input_size = config_dict['pose'].get('input_size', (640, 480))
|
||||||
|
display_thread = CombinedDisplayThread(sources, input_size, display_queue)
|
||||||
|
display_thread.start()
|
||||||
|
|
||||||
|
try:
|
||||||
|
while not stream_manager.stopped:
|
||||||
|
if display_thread and display_thread.stopped:
|
||||||
|
break
|
||||||
|
time.sleep(0.1)
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
logging.info("Processing interrupted by user.")
|
||||||
|
finally:
|
||||||
|
stream_manager.stop()
|
||||||
|
if display_thread:
|
||||||
|
display_thread.stop()
|
||||||
|
display_thread.join()
|
||||||
|
|
||||||
|
|
||||||
|
def process_single_frame(config_dict, frame, source_id, frame_idx, output_dirs, pose_tracker, multi_person, save_video, save_images, show_realtime_results, out_vid, output_format):
|
||||||
|
'''
|
||||||
|
Processes a single frame from a source.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config_dict (dict): Configuration dictionary.
|
||||||
|
frame (ndarray): Frame image.
|
||||||
|
source_id (int): Source ID.
|
||||||
|
frame_idx (int): Frame index.
|
||||||
|
output_dirs (tuple): Output directories.
|
||||||
|
pose_tracker: Pose tracker object.
|
||||||
|
multi_person (bool): Whether to track multiple persons.
|
||||||
|
output_format (str): Output format.
|
||||||
|
save_video (bool): Whether to save the output video.
|
||||||
|
save_images (bool): Whether to save output images.
|
||||||
|
show_realtime_results (bool): Whether to display results in real time.
|
||||||
|
out_vid (cv2.VideoWriter): Video writer object.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
tuple: (source_id, img_show, out_vid)
|
||||||
|
'''
|
||||||
|
|
||||||
|
output_dir, output_dir_name, img_output_dir, json_output_dir, output_video_path = output_dirs
|
||||||
|
|
||||||
|
# Perform pose estimation on the frame
|
||||||
|
keypoints, scores = pose_tracker(frame)
|
||||||
|
|
||||||
|
# Tracking people IDs across frames (if needed)
|
||||||
|
keypoints, scores, _ = track_people(
|
||||||
|
keypoints, scores, multi_person, None, None, pose_tracker
|
||||||
|
)
|
||||||
|
|
||||||
|
if 'openpose' in output_format:
|
||||||
|
json_file_path = os.path.join(json_output_dir, f'{output_dir_name}_{frame_idx:06d}.json')
|
||||||
|
save_to_openpose(json_file_path, keypoints, scores)
|
||||||
|
|
||||||
|
# Draw skeleton on the frame
|
||||||
|
img_show = draw_skeleton(frame.copy(), keypoints, scores, kpt_thr=0.1)
|
||||||
|
|
||||||
|
# Save video and images
|
||||||
|
if save_video and out_vid:
|
||||||
|
out_vid.write(img_show)
|
||||||
|
|
||||||
|
if save_images:
|
||||||
|
cv2.imwrite(os.path.join(img_output_dir, f'{output_dir_name}_{frame_idx:06d}.jpg'), img_show)
|
||||||
|
|
||||||
|
return source_id, img_show, out_vid
|
||||||
|
|
||||||
|
|
||||||
|
class CombinedDisplayThread(threading.Thread):
|
||||||
|
'''
|
||||||
|
Thread for displaying combined images to avoid thread-safety issues with OpenCV.
|
||||||
|
'''
|
||||||
|
def __init__(self, sources, input_size, display_queue):
|
||||||
|
super().__init__(daemon=True)
|
||||||
|
self.display_queue = display_queue
|
||||||
|
self.stopped = False
|
||||||
|
self.sources = sources
|
||||||
|
self.input_size = input_size
|
||||||
|
self.window_name = "Combined Feeds"
|
||||||
|
self.grid_size = self.calculate_grid_size(len(sources))
|
||||||
|
self.img_placeholder = np.zeros((input_size[1], input_size[0], 3), dtype=np.uint8)
|
||||||
|
self.frames = {source['id']: self.get_placeholder_frame(source['id'], 'Not Connected') for source in sources}
|
||||||
|
self.source_ids = [source['id'] for source in self.sources]
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
while not self.stopped:
|
||||||
|
try:
|
||||||
|
frames_dict = self.display_queue.get(timeout=0.1)
|
||||||
|
self.frames.update({source_id: frames_dict.get(source_id, self.frames[source_id]) for source_id in self.source_ids})
|
||||||
|
self.display_combined_image()
|
||||||
|
except queue.Empty:
|
||||||
|
continue
|
||||||
|
|
||||||
|
def display_combined_image(self):
|
||||||
|
combined_image = self.combine_frames()
|
||||||
|
if combined_image is not None:
|
||||||
|
cv2.imshow(self.window_name, combined_image)
|
||||||
|
if cv2.waitKey(1) & 0xFF in [ord('q'), 27]:
|
||||||
|
logging.info("Display window closed by user.")
|
||||||
|
self.stopped = True
|
||||||
|
|
||||||
|
def combine_frames(self):
|
||||||
|
resized_frames = [cv2.resize(frame, self.input_size) if frame.shape[:2] != self.input_size else frame
|
||||||
|
for frame in (self.frames.get(source_id, self.img_placeholder) for source_id in self.source_ids)]
|
||||||
|
rows = [np.hstack(resized_frames[i:i + self.grid_size[1]]) for i in range(0, len(resized_frames), self.grid_size[1])]
|
||||||
|
return np.vstack(rows)
|
||||||
|
|
||||||
|
def calculate_grid_size(self, num_sources):
|
||||||
|
cols = int(np.ceil(np.sqrt(num_sources)))
|
||||||
|
rows = int(np.ceil(num_sources / cols))
|
||||||
|
return (rows, cols)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
self.stopped = True
|
||||||
|
|
||||||
|
def get_placeholder_frame(self, source_id, message):
|
||||||
|
return cv2.putText(self.img_placeholder, f'Source {source_id}: {message}', (50, self.input_size[1] // 2),
|
||||||
|
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)
|
||||||
|
|
||||||
|
def __del__(self):
|
||||||
|
cv2.destroyAllWindows()
|
||||||
|
|
||||||
|
|
||||||
|
class StreamManager:
|
||||||
|
def __init__(self, sources, config_dict, pose_trackers, display_queue, output_dir, process_functions):
|
||||||
|
self.sources = sources
|
||||||
|
self.config_dict = config_dict
|
||||||
|
self.pose_trackers = pose_trackers
|
||||||
|
self.display_queue = display_queue
|
||||||
|
self.output_dir = output_dir
|
||||||
|
self.process_functions = process_functions
|
||||||
|
self.executor = ThreadPoolExecutor(max_workers=len(sources) * 2)
|
||||||
|
self.active_streams = set()
|
||||||
|
self.streams, self.outputs, self.out_videos = {}, {}, {}
|
||||||
|
self.initialize_streams_and_outputs()
|
||||||
|
self.stopped = False
|
||||||
|
|
||||||
|
def initialize_streams_and_outputs(self):
|
||||||
|
for source in self.sources:
|
||||||
|
stream = GenericStream(source, self.config_dict)
|
||||||
|
self.streams[source['id']] = stream
|
||||||
|
self.outputs[source['id']] = setup_capture_directories(
|
||||||
|
source['path'], self.output_dir, 'to_images' in self.config_dict['project'].get('save_video', []))
|
||||||
|
self.out_videos[source['id']] = None
|
||||||
|
self.active_streams.add(source['id'])
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
for source in self.sources:
|
||||||
|
self.streams[source['id']].start()
|
||||||
|
threading.Thread(target=self.process_streams, daemon=True).start()
|
||||||
|
|
||||||
|
def process_streams(self):
|
||||||
|
logging.info("Début du traitement des flux")
|
||||||
|
while not self.stopped and any(stream.stopped is False for stream in self.streams.values()):
|
||||||
|
frames = {}
|
||||||
|
for source_id, stream in self.streams.items():
|
||||||
|
if not stream.stopped:
|
||||||
|
frame_idx, frame = stream.read()
|
||||||
|
if frame is not None:
|
||||||
|
frames[source_id] = (frame_idx, frame)
|
||||||
|
if frames:
|
||||||
|
futures = {self.executor.submit(self.process_frame, source_id, frame_idx, frame): source_id
|
||||||
|
for source_id, (frame_idx, frame) in frames.items()}
|
||||||
|
self.handle_future_results(futures)
|
||||||
|
else:
|
||||||
|
logging.info("Aucune frame disponible actuellement, attente...")
|
||||||
|
time.sleep(0.1)
|
||||||
|
|
||||||
|
def process_frame(self, source_id, frame_idx, frame):
|
||||||
|
logging.info(f"Processing frame {frame_idx} from source {source_id}")
|
||||||
|
process_function = self.process_functions.get(source_id)
|
||||||
|
return process_function(self.config_dict,
|
||||||
|
frame,
|
||||||
|
source_id,
|
||||||
|
frame_idx,
|
||||||
|
self.outputs[source_id],
|
||||||
|
self.pose_trackers[source_id],
|
||||||
|
self.config_dict['project'].get('multi_person'),
|
||||||
|
'to_video' in self.config_dict['project'].get('save_video', []),
|
||||||
|
'to_images' in self.config_dict['project'].get('save_video', []),
|
||||||
|
self.config_dict['pose'].get('show_realtime_results', False),
|
||||||
|
self.out_videos.get(source_id),
|
||||||
|
self.config_dict['pose'].get('output_format', 'openpose'))
|
||||||
|
|
||||||
|
def handle_future_results(self, futures):
|
||||||
|
for future in as_completed(futures):
|
||||||
|
source_id = futures[future]
|
||||||
|
try:
|
||||||
|
result = future.result()
|
||||||
|
source_id, processed_frame, out_vid = result
|
||||||
|
self.display_queue.put({source_id: processed_frame})
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Error processing frame from source {source_id}: {e}")
|
||||||
|
|
||||||
|
def initialize_video_writer(self, img_show, source_id):
|
||||||
|
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
||||||
|
H, W = img_show.shape[:2]
|
||||||
|
fps = self.config_dict['pose'].get('fps', 30)
|
||||||
|
output_video_path = f"{self.outputs[source_id]}/{source_id}_output.mp4"
|
||||||
|
return cv2.VideoWriter(output_video_path, fourcc, fps, (W, H))
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
self.stopped = True
|
||||||
|
for stream in self.streams.values():
|
||||||
|
stream.stop()
|
||||||
|
self.executor.shutdown()
|
||||||
|
for out_vid in self.out_videos.values():
|
||||||
|
if out_vid is not None:
|
||||||
|
out_vid.release()
|
||||||
|
|
||||||
|
|
||||||
|
class GenericStream(threading.Thread):
|
||||||
|
def __init__(self, source, config_dict):
|
||||||
|
super().__init__(daemon=True)
|
||||||
|
self.source = source
|
||||||
|
self.input_size = config_dict['pose'].get('input_size', (640, 480))
|
||||||
|
self.image_extension = config_dict['pose']['vid_img_extension']
|
||||||
|
self.frame_ranges = self.parse_frame_ranges(config_dict['project'].get('frame_range', []))
|
||||||
|
self.stopped = False
|
||||||
|
self.frame_queue = queue.Queue()
|
||||||
|
self.lock = threading.Lock()
|
||||||
|
self.frame_idx = 0
|
||||||
|
self.total_frames = 0
|
||||||
|
self.cap = None
|
||||||
|
self.image_files = []
|
||||||
|
self.image_index = 0
|
||||||
|
self.pbar = None
|
||||||
|
|
||||||
|
def parse_frame_ranges(self, frame_ranges):
|
||||||
|
if len(frame_ranges) == 2 and all(isinstance(x, int) for x in frame_ranges):
|
||||||
|
start_frame, end_frame = frame_ranges
|
||||||
|
return set(range(start_frame, end_frame + 1))
|
||||||
|
elif len(frame_ranges) == 0:
|
||||||
|
return set(range(0, int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))))
|
||||||
|
else:
|
||||||
|
return set(frame_ranges)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
if self.source['type'] == 'webcam':
|
||||||
|
self.setup_webcam()
|
||||||
|
elif self.source['type'] == 'video':
|
||||||
|
self.open_video()
|
||||||
|
elif self.source['type'] == 'images':
|
||||||
|
self.load_images()
|
||||||
|
else:
|
||||||
|
logging.error(f"Unknown source type: {self.source['type']}")
|
||||||
|
self.stopped = True
|
||||||
|
return
|
||||||
|
|
||||||
|
while not self.stopped:
|
||||||
|
frame = self.capture_frame()
|
||||||
|
if frame is not None:
|
||||||
|
frame = cv2.resize(frame, self.input_size)
|
||||||
|
self.frame_queue.put((self.frame_idx, frame))
|
||||||
|
if self.pbar:
|
||||||
|
self.pbar.update(1)
|
||||||
|
self.frame_idx += 1
|
||||||
|
else:
|
||||||
|
time.sleep(0.1)
|
||||||
|
|
||||||
|
def setup_webcam(self):
|
||||||
|
self.open_webcam()
|
||||||
|
time.sleep(1) # Give time for the webcam to initialize
|
||||||
|
|
||||||
|
def open_video(self):
|
||||||
|
self.cap = cv2.VideoCapture(self.source['path'])
|
||||||
|
if not self.cap.isOpened():
|
||||||
|
logging.error(f"Cannot open video file {self.source['path']}")
|
||||||
|
self.stopped = True
|
||||||
|
return
|
||||||
|
self.total_frames = len(self.frame_ranges)
|
||||||
|
self.setup_progress_bar()
|
||||||
|
|
||||||
|
def load_images(self):
|
||||||
|
path_pattern = os.path.join(self.source['path'], f'*{self.image_extension}')
|
||||||
|
self.image_files = sorted(glob.glob(path_pattern))
|
||||||
|
self.total_frames = len(self.image_files)
|
||||||
|
self.setup_progress_bar()
|
||||||
|
|
||||||
|
def capture_frame(self):
|
||||||
|
frame = None
|
||||||
|
if self.source['type'] == 'webcam':
|
||||||
|
frame = self.read_webcam_frame()
|
||||||
|
elif self.source['type'] == 'video':
|
||||||
|
if self.frame_ranges and self.frame_idx not in self.frame_ranges:
|
||||||
|
self.cap.set(cv2.CAP_PROP_POS_FRAMES, self.frame_idx + 1)
|
||||||
|
self.frame_idx += 1
|
||||||
|
return None
|
||||||
|
|
||||||
|
ret, frame = self.cap.read()
|
||||||
|
if not ret:
|
||||||
|
logging.info(f"End of video {self.source['path']}")
|
||||||
|
self.stopped = True
|
||||||
|
if self.pbar:
|
||||||
|
self.pbar.close()
|
||||||
|
return None
|
||||||
|
elif self.source['type'] == 'images' and self.image_index < len(self.image_files):
|
||||||
|
frame = cv2.imread(self.image_files[self.image_index])
|
||||||
|
self.image_index += 1
|
||||||
|
|
||||||
|
return frame
|
||||||
|
|
||||||
|
def open_webcam(self):
|
||||||
|
self.connected = False
|
||||||
|
try:
|
||||||
|
self.cap = cv2.VideoCapture(int(self.source['id']), cv2.CAP_DSHOW)
|
||||||
|
if self.cap.isOpened():
|
||||||
|
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.input_size[0])
|
||||||
|
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.input_size[1])
|
||||||
|
logging.info(f"Webcam {self.source['id']} opened.")
|
||||||
|
self.connected = True
|
||||||
|
else:
|
||||||
|
logging.error(f"Cannot open webcam {self.source['id']}.")
|
||||||
|
self.cap = None
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Exception occurred while opening webcam {self.source['id']}: {e}")
|
||||||
|
self.cap = None
|
||||||
|
|
||||||
|
def read_webcam_frame(self):
|
||||||
|
if self.cap is None or not self.cap.isOpened():
|
||||||
|
logging.warning(f"Webcam {self.source['id']} not opened. Attempting to open...")
|
||||||
|
self.open_webcam()
|
||||||
|
if self.cap is None or not self.cap.isOpened():
|
||||||
|
with self.lock:
|
||||||
|
self.frame = None
|
||||||
|
return None
|
||||||
|
ret, frame = self.cap.read()
|
||||||
|
if not ret or frame is None:
|
||||||
|
logging.warning(f"Failed to read frame from webcam {self.source['id']}.")
|
||||||
|
self.cap.release()
|
||||||
|
self.cap = None
|
||||||
|
with self.lock:
|
||||||
|
self.frame = None
|
||||||
|
return None
|
||||||
|
return frame
|
||||||
|
|
||||||
|
def read(self):
|
||||||
|
try:
|
||||||
|
return self.frame_queue.get(timeout=0.1)
|
||||||
|
except queue.Empty:
|
||||||
|
return None, None
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
self.stopped = True
|
||||||
|
if self.cap:
|
||||||
|
self.cap.release()
|
||||||
|
if self.pbar:
|
||||||
|
self.pbar.close()
|
||||||
|
|
||||||
|
def setup_progress_bar(self):
|
||||||
|
self.pbar = tqdm(total=self.total_frames, desc=f'Processing {os.path.basename(str(self.source["path"]))}', position=self.source['id'])
|
||||||
|
|
||||||
|
|
||||||
|
def setup_capture_directories(source_path, output_dir, save_images):
|
||||||
|
'''
|
||||||
|
Set up output directories for saving images and JSON files.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
tuple: (output_dir, output_dir_name, img_output_dir, json_output_dir, output_video_path)
|
||||||
|
'''
|
||||||
|
if isinstance(source_path, int):
|
||||||
|
# Handle webcam source
|
||||||
|
current_date = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||||
|
output_dir_name = f'webcam{source_path}_{current_date}'
|
||||||
|
else:
|
||||||
|
output_dir_name = os.path.basename(os.path.splitext(str(source_path))[0])
|
||||||
|
|
||||||
|
# Define the full path for the output directory
|
||||||
|
output_dir_full = os.path.abspath(os.path.join(output_dir, "pose"))
|
||||||
|
|
||||||
|
# Create output directories if they do not exist
|
||||||
|
if not os.path.isdir(output_dir_full):
|
||||||
|
os.makedirs(output_dir_full)
|
||||||
|
|
||||||
|
# Prepare directories for images and JSON outputs
|
||||||
|
img_output_dir = os.path.join(output_dir_full, f'{output_dir_name}_img')
|
||||||
|
json_output_dir = os.path.join(output_dir_full, f'{output_dir_name}_json')
|
||||||
|
if save_images and not os.path.isdir(img_output_dir):
|
||||||
|
os.makedirs(img_output_dir)
|
||||||
|
if not os.path.isdir(json_output_dir):
|
||||||
|
os.makedirs(json_output_dir)
|
||||||
|
|
||||||
|
# Define the path for the output video file
|
||||||
|
output_video_path = os.path.join(output_dir_full, f'{output_dir_name}_pose.mp4')
|
||||||
|
|
||||||
|
return output_dir, output_dir_name, img_output_dir, json_output_dir, output_video_path
|
||||||
|
|
||||||
def save_to_openpose(json_file_path, keypoints, scores):
|
def save_to_openpose(json_file_path, keypoints, scores):
|
||||||
'''
|
'''
|
||||||
Save the keypoints and scores to a JSON file in the OpenPose format
|
Save the keypoints and scores to a JSON file in the OpenPose format
|
||||||
@ -72,419 +543,26 @@ def save_to_openpose(json_file_path, keypoints, scores):
|
|||||||
|
|
||||||
# Prepare keypoints with confidence scores for JSON output
|
# Prepare keypoints with confidence scores for JSON output
|
||||||
nb_detections = len(keypoints)
|
nb_detections = len(keypoints)
|
||||||
# print('results: ', keypoints, scores)
|
|
||||||
detections = []
|
detections = []
|
||||||
for i in range(nb_detections): # nb of detected people
|
for i in range(nb_detections): # Number of detected people
|
||||||
keypoints_with_confidence_i = []
|
keypoints_with_confidence_i = []
|
||||||
for kp, score in zip(keypoints[i], scores[i]):
|
for kp, score in zip(keypoints[i], scores[i]):
|
||||||
keypoints_with_confidence_i.extend([kp[0].item(), kp[1].item(), score.item()])
|
keypoints_with_confidence_i.extend([kp[0].item(), kp[1].item(), score.item()])
|
||||||
detections.append({
|
detections.append({
|
||||||
"person_id": [-1],
|
"person_id": [-1],
|
||||||
"pose_keypoints_2d": keypoints_with_confidence_i,
|
"pose_keypoints_2d": keypoints_with_confidence_i,
|
||||||
"face_keypoints_2d": [],
|
"face_keypoints_2d": [],
|
||||||
"hand_left_keypoints_2d": [],
|
"hand_left_keypoints_2d": [],
|
||||||
"hand_right_keypoints_2d": [],
|
"hand_right_keypoints_2d": [],
|
||||||
"pose_keypoints_3d": [],
|
"pose_keypoints_3d": [],
|
||||||
"face_keypoints_3d": [],
|
"face_keypoints_3d": [],
|
||||||
"hand_left_keypoints_3d": [],
|
"hand_left_keypoints_3d": [],
|
||||||
"hand_right_keypoints_3d": []
|
"hand_right_keypoints_3d": []
|
||||||
})
|
})
|
||||||
|
|
||||||
# Create JSON output structure
|
# Create JSON output structure
|
||||||
json_output = {"version": 1.3, "people": detections}
|
json_output = {"version": 1.3, "people": detections}
|
||||||
|
|
||||||
# Save JSON output for each frame
|
# Save JSON output for each frame
|
||||||
json_output_dir = os.path.abspath(os.path.join(json_file_path, '..'))
|
|
||||||
if not os.path.isdir(json_output_dir): os.makedirs(json_output_dir)
|
|
||||||
with open(json_file_path, 'w') as json_file:
|
with open(json_file_path, 'w') as json_file:
|
||||||
json.dump(json_output, json_file)
|
json.dump(json_output, json_file)
|
||||||
|
|
||||||
|
|
||||||
def sort_people_sports2d(keyptpre, keypt, scores):
|
|
||||||
'''
|
|
||||||
Associate persons across frames (Pose2Sim method)
|
|
||||||
Persons' indices are sometimes swapped when changing frame
|
|
||||||
A person is associated to another in the next frame when they are at a small distance
|
|
||||||
|
|
||||||
N.B.: Requires min_with_single_indices and euclidian_distance function (see common.py)
|
|
||||||
|
|
||||||
INPUTS:
|
|
||||||
- keyptpre: array of shape K, L, M with K the number of detected persons,
|
|
||||||
L the number of detected keypoints, M their 2D coordinates
|
|
||||||
- keypt: idem keyptpre, for current frame
|
|
||||||
- score: array of shape K, L with K the number of detected persons,
|
|
||||||
L the confidence of detected keypoints
|
|
||||||
|
|
||||||
OUTPUTS:
|
|
||||||
- sorted_prev_keypoints: array with reordered persons with values of previous frame if current is empty
|
|
||||||
- sorted_keypoints: array with reordered persons
|
|
||||||
- sorted_scores: array with reordered scores
|
|
||||||
'''
|
|
||||||
|
|
||||||
# Generate possible person correspondences across frames
|
|
||||||
if len(keyptpre) < len(keypt):
|
|
||||||
keyptpre = np.concatenate((keyptpre, np.full((len(keypt)-len(keyptpre), keypt.shape[1], 2), np.nan)))
|
|
||||||
if len(keypt) < len(keyptpre):
|
|
||||||
keypt = np.concatenate((keypt, np.full((len(keyptpre)-len(keypt), keypt.shape[1], 2), np.nan)))
|
|
||||||
scores = np.concatenate((scores, np.full((len(keyptpre)-len(scores), scores.shape[1]), np.nan)))
|
|
||||||
personsIDs_comb = sorted(list(it.product(range(len(keyptpre)), range(len(keypt)))))
|
|
||||||
|
|
||||||
# Compute distance between persons from one frame to another
|
|
||||||
frame_by_frame_dist = []
|
|
||||||
for comb in personsIDs_comb:
|
|
||||||
frame_by_frame_dist += [euclidean_distance(keyptpre[comb[0]],keypt[comb[1]])]
|
|
||||||
frame_by_frame_dist = np.mean(frame_by_frame_dist, axis=1)
|
|
||||||
|
|
||||||
# Sort correspondences by distance
|
|
||||||
_, _, associated_tuples = min_with_single_indices(frame_by_frame_dist, personsIDs_comb)
|
|
||||||
|
|
||||||
# Associate points to same index across frames, nan if no correspondence
|
|
||||||
sorted_keypoints, sorted_scores = [], []
|
|
||||||
for i in range(len(keyptpre)):
|
|
||||||
id_in_old = associated_tuples[:,1][associated_tuples[:,0] == i].tolist()
|
|
||||||
if len(id_in_old) > 0:
|
|
||||||
sorted_keypoints += [keypt[id_in_old[0]]]
|
|
||||||
sorted_scores += [scores[id_in_old[0]]]
|
|
||||||
else:
|
|
||||||
sorted_keypoints += [keypt[i]]
|
|
||||||
sorted_scores += [scores[i]]
|
|
||||||
sorted_keypoints, sorted_scores = np.array(sorted_keypoints), np.array(sorted_scores)
|
|
||||||
|
|
||||||
# Keep track of previous values even when missing for more than one frame
|
|
||||||
sorted_prev_keypoints = np.where(np.isnan(sorted_keypoints) & ~np.isnan(keyptpre), keyptpre, sorted_keypoints)
|
|
||||||
|
|
||||||
return sorted_prev_keypoints, sorted_keypoints, sorted_scores
|
|
||||||
|
|
||||||
|
|
||||||
def process_video(video_path, pose_tracker, output_format, save_video, save_images, display_detection, frame_range, multi_person):
|
|
||||||
'''
|
|
||||||
Estimate pose from a video file
|
|
||||||
|
|
||||||
INPUTS:
|
|
||||||
- video_path: str. Path to the input video file
|
|
||||||
- pose_tracker: PoseTracker. Initialized pose tracker object from RTMLib
|
|
||||||
- output_format: str. Output format for the pose estimation results ('openpose', 'mmpose', 'deeplabcut')
|
|
||||||
- save_video: bool. Whether to save the output video
|
|
||||||
- save_images: bool. Whether to save the output images
|
|
||||||
- display_detection: bool. Whether to show real-time visualization
|
|
||||||
- frame_range: list. Range of frames to process
|
|
||||||
|
|
||||||
OUTPUTS:
|
|
||||||
- JSON files with the detected keypoints and confidence scores in the OpenPose format
|
|
||||||
- if save_video: Video file with the detected keypoints and confidence scores drawn on the frames
|
|
||||||
- if save_images: Image files with the detected keypoints and confidence scores drawn on the frames
|
|
||||||
'''
|
|
||||||
|
|
||||||
try:
|
|
||||||
cap = cv2.VideoCapture(video_path)
|
|
||||||
cap.read()
|
|
||||||
if cap.read()[0] == False:
|
|
||||||
raise
|
|
||||||
except:
|
|
||||||
raise NameError(f"{video_path} is not a video. Images must be put in one subdirectory per camera.")
|
|
||||||
|
|
||||||
pose_dir = os.path.abspath(os.path.join(video_path, '..', '..', 'pose'))
|
|
||||||
if not os.path.isdir(pose_dir): os.makedirs(pose_dir)
|
|
||||||
video_name_wo_ext = os.path.splitext(os.path.basename(video_path))[0]
|
|
||||||
json_output_dir = os.path.join(pose_dir, f'{video_name_wo_ext}_json')
|
|
||||||
output_video_path = os.path.join(pose_dir, f'{video_name_wo_ext}_pose.mp4')
|
|
||||||
img_output_dir = os.path.join(pose_dir, f'{video_name_wo_ext}_img')
|
|
||||||
|
|
||||||
if save_video: # Set up video writer
|
|
||||||
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # Codec for the output video
|
|
||||||
fps = cap.get(cv2.CAP_PROP_FPS) # Get the frame rate from the raw video
|
|
||||||
W, H = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) # Get the width and height from the raw video
|
|
||||||
out = cv2.VideoWriter(output_video_path, fourcc, fps, (W, H)) # Create the output video file
|
|
||||||
|
|
||||||
if display_detection:
|
|
||||||
cv2.namedWindow(f"Pose Estimation {os.path.basename(video_path)}", cv2.WINDOW_NORMAL + cv2.WINDOW_KEEPRATIO)
|
|
||||||
|
|
||||||
frame_idx = 0
|
|
||||||
cap = cv2.VideoCapture(video_path)
|
|
||||||
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
||||||
f_range = [[total_frames] if frame_range==[] else frame_range][0]
|
|
||||||
with tqdm(total=total_frames, desc=f'Processing {os.path.basename(video_path)}') as pbar:
|
|
||||||
while cap.isOpened():
|
|
||||||
# print('\nFrame ', frame_idx)
|
|
||||||
success, frame = cap.read()
|
|
||||||
if not success:
|
|
||||||
break
|
|
||||||
|
|
||||||
if frame_idx in range(*f_range):
|
|
||||||
# Perform pose estimation on the frame
|
|
||||||
keypoints, scores = pose_tracker(frame)
|
|
||||||
|
|
||||||
# Tracking people IDs across frames
|
|
||||||
if multi_person:
|
|
||||||
if 'prev_keypoints' not in locals(): prev_keypoints = keypoints
|
|
||||||
prev_keypoints, keypoints, scores = sort_people_sports2d(prev_keypoints, keypoints, scores)
|
|
||||||
|
|
||||||
# Save to json
|
|
||||||
if 'openpose' in output_format:
|
|
||||||
json_file_path = os.path.join(json_output_dir, f'{video_name_wo_ext}_{frame_idx:06d}.json')
|
|
||||||
save_to_openpose(json_file_path, keypoints, scores)
|
|
||||||
|
|
||||||
# Draw skeleton on the frame
|
|
||||||
if display_detection or save_video or save_images:
|
|
||||||
img_show = frame.copy()
|
|
||||||
img_show = draw_skeleton(img_show, keypoints, scores, kpt_thr=0.1) # maybe change this value if 0.1 is too low
|
|
||||||
|
|
||||||
if display_detection:
|
|
||||||
cv2.imshow(f"Pose Estimation {os.path.basename(video_path)}", img_show)
|
|
||||||
if cv2.waitKey(1) & 0xFF == ord('q'):
|
|
||||||
break
|
|
||||||
|
|
||||||
if save_video:
|
|
||||||
out.write(img_show)
|
|
||||||
|
|
||||||
if save_images:
|
|
||||||
if not os.path.isdir(img_output_dir): os.makedirs(img_output_dir)
|
|
||||||
cv2.imwrite(os.path.join(img_output_dir, f'{video_name_wo_ext}_{frame_idx:06d}.jpg'), img_show)
|
|
||||||
|
|
||||||
frame_idx += 1
|
|
||||||
pbar.update(1)
|
|
||||||
|
|
||||||
cap.release()
|
|
||||||
if save_video:
|
|
||||||
out.release()
|
|
||||||
logging.info(f"--> Output video saved to {output_video_path}.")
|
|
||||||
if save_images:
|
|
||||||
logging.info(f"--> Output images saved to {img_output_dir}.")
|
|
||||||
if display_detection:
|
|
||||||
cv2.destroyAllWindows()
|
|
||||||
|
|
||||||
|
|
||||||
def process_images(image_folder_path, vid_img_extension, pose_tracker, output_format, fps, save_video, save_images, display_detection, frame_range, multi_person):
|
|
||||||
'''
|
|
||||||
Estimate pose estimation from a folder of images
|
|
||||||
|
|
||||||
INPUTS:
|
|
||||||
- image_folder_path: str. Path to the input image folder
|
|
||||||
- vid_img_extension: str. Extension of the image files
|
|
||||||
- pose_tracker: PoseTracker. Initialized pose tracker object from RTMLib
|
|
||||||
- output_format: str. Output format for the pose estimation results ('openpose', 'mmpose', 'deeplabcut')
|
|
||||||
- save_video: bool. Whether to save the output video
|
|
||||||
- save_images: bool. Whether to save the output images
|
|
||||||
- display_detection: bool. Whether to show real-time visualization
|
|
||||||
- frame_range: list. Range of frames to process
|
|
||||||
|
|
||||||
OUTPUTS:
|
|
||||||
- JSON files with the detected keypoints and confidence scores in the OpenPose format
|
|
||||||
- if save_video: Video file with the detected keypoints and confidence scores drawn on the frames
|
|
||||||
- if save_images: Image files with the detected keypoints and confidence scores drawn on the frames
|
|
||||||
'''
|
|
||||||
|
|
||||||
pose_dir = os.path.abspath(os.path.join(image_folder_path, '..', '..', 'pose'))
|
|
||||||
if not os.path.isdir(pose_dir): os.makedirs(pose_dir)
|
|
||||||
json_output_dir = os.path.join(pose_dir, f'{os.path.basename(image_folder_path)}_json')
|
|
||||||
output_video_path = os.path.join(pose_dir, f'{os.path.basename(image_folder_path)}_pose.mp4')
|
|
||||||
img_output_dir = os.path.join(pose_dir, f'{os.path.basename(image_folder_path)}_img')
|
|
||||||
|
|
||||||
image_files = glob.glob(os.path.join(image_folder_path, '*'+vid_img_extension))
|
|
||||||
sorted(image_files, key=natural_sort_key)
|
|
||||||
|
|
||||||
if save_video: # Set up video writer
|
|
||||||
logging.warning('Using default framerate of 60 fps.')
|
|
||||||
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # Codec for the output video
|
|
||||||
W, H = cv2.imread(image_files[0]).shape[:2][::-1] # Get the width and height from the first image (assuming all images have the same size)
|
|
||||||
out = cv2.VideoWriter(output_video_path, fourcc, fps, (W, H)) # Create the output video file
|
|
||||||
|
|
||||||
if display_detection:
|
|
||||||
cv2.namedWindow(f"Pose Estimation {os.path.basename(image_folder_path)}", cv2.WINDOW_NORMAL)
|
|
||||||
|
|
||||||
f_range = [[len(image_files)] if frame_range==[] else frame_range][0]
|
|
||||||
for frame_idx, image_file in enumerate(tqdm(image_files, desc=f'\nProcessing {os.path.basename(img_output_dir)}')):
|
|
||||||
if frame_idx in range(*f_range):
|
|
||||||
try:
|
|
||||||
frame = cv2.imread(image_file)
|
|
||||||
except:
|
|
||||||
raise NameError(f"{image_file} is not an image. Videos must be put in the video directory, not in subdirectories.")
|
|
||||||
|
|
||||||
# Perform pose estimation on the image
|
|
||||||
keypoints, scores = pose_tracker(frame)
|
|
||||||
|
|
||||||
# Tracking people IDs across frames
|
|
||||||
if multi_person:
|
|
||||||
if 'prev_keypoints' not in locals(): prev_keypoints = keypoints
|
|
||||||
prev_keypoints, keypoints, scores = sort_people_sports2d(prev_keypoints, keypoints, scores)
|
|
||||||
|
|
||||||
# Extract frame number from the filename
|
|
||||||
if 'openpose' in output_format:
|
|
||||||
json_file_path = os.path.join(json_output_dir, f"{os.path.splitext(os.path.basename(image_file))[0]}_{frame_idx:06d}.json")
|
|
||||||
save_to_openpose(json_file_path, keypoints, scores)
|
|
||||||
|
|
||||||
# Draw skeleton on the image
|
|
||||||
if display_detection or save_video or save_images:
|
|
||||||
img_show = frame.copy()
|
|
||||||
img_show = draw_skeleton(img_show, keypoints, scores, kpt_thr=0.1) # maybe change this value if 0.1 is too low
|
|
||||||
|
|
||||||
if display_detection:
|
|
||||||
cv2.imshow(f"Pose Estimation {os.path.basename(image_folder_path)}", img_show)
|
|
||||||
if cv2.waitKey(1) & 0xFF == ord('q'):
|
|
||||||
break
|
|
||||||
|
|
||||||
if save_video:
|
|
||||||
out.write(img_show)
|
|
||||||
|
|
||||||
if save_images:
|
|
||||||
if not os.path.isdir(img_output_dir): os.makedirs(img_output_dir)
|
|
||||||
cv2.imwrite(os.path.join(img_output_dir, f'{os.path.splitext(os.path.basename(image_file))[0]}_{frame_idx:06d}.png'), img_show)
|
|
||||||
|
|
||||||
if save_video:
|
|
||||||
logging.info(f"--> Output video saved to {output_video_path}.")
|
|
||||||
if save_images:
|
|
||||||
logging.info(f"--> Output images saved to {img_output_dir}.")
|
|
||||||
if display_detection:
|
|
||||||
cv2.destroyAllWindows()
|
|
||||||
|
|
||||||
|
|
||||||
def rtm_estimator(config_dict):
|
|
||||||
'''
|
|
||||||
Estimate pose from a video file or a folder of images and
|
|
||||||
write the results to JSON files, videos, and/or images.
|
|
||||||
Results can optionally be displayed in real time.
|
|
||||||
|
|
||||||
Supported models: HALPE_26 (default, body and feet), COCO_133 (body, feet, hands), COCO_17 (body)
|
|
||||||
Supported modes: lightweight, balanced, performance (edit paths at rtmlib/tools/solutions if you
|
|
||||||
need nother detection or pose models)
|
|
||||||
|
|
||||||
Optionally gives consistent person ID across frames (slower but good for 2D analysis)
|
|
||||||
Optionally runs detection every n frames and inbetween tracks points (faster but less accurate).
|
|
||||||
|
|
||||||
If a valid cuda installation is detected, uses the GPU with the ONNXRuntime backend. Otherwise,
|
|
||||||
uses the CPU with the OpenVINO backend.
|
|
||||||
|
|
||||||
INPUTS:
|
|
||||||
- videos or image folders from the video directory
|
|
||||||
- a Config.toml file
|
|
||||||
|
|
||||||
OUTPUTS:
|
|
||||||
- JSON files with the detected keypoints and confidence scores in the OpenPose format
|
|
||||||
- Optionally, videos and/or image files with the detected keypoints
|
|
||||||
'''
|
|
||||||
|
|
||||||
# Read config
|
|
||||||
project_dir = config_dict['project']['project_dir']
|
|
||||||
# if batch
|
|
||||||
session_dir = os.path.realpath(os.path.join(project_dir, '..'))
|
|
||||||
# if single trial
|
|
||||||
session_dir = session_dir if 'Config.toml' in os.listdir(session_dir) else os.getcwd()
|
|
||||||
frame_range = config_dict.get('project').get('frame_range')
|
|
||||||
multi_person = config_dict.get('project').get('multi_person')
|
|
||||||
video_dir = os.path.join(project_dir, 'videos')
|
|
||||||
pose_dir = os.path.join(project_dir, 'pose')
|
|
||||||
|
|
||||||
pose_model = config_dict['pose']['pose_model']
|
|
||||||
mode = config_dict['pose']['mode'] # lightweight, balanced, performance
|
|
||||||
vid_img_extension = config_dict['pose']['vid_img_extension']
|
|
||||||
|
|
||||||
output_format = config_dict['pose']['output_format']
|
|
||||||
save_video = True if 'to_video' in config_dict['pose']['save_video'] else False
|
|
||||||
save_images = True if 'to_images' in config_dict['pose']['save_video'] else False
|
|
||||||
display_detection = config_dict['pose']['display_detection']
|
|
||||||
overwrite_pose = config_dict['pose']['overwrite_pose']
|
|
||||||
det_frequency = config_dict['pose']['det_frequency']
|
|
||||||
|
|
||||||
# Determine frame rate
|
|
||||||
video_files = glob.glob(os.path.join(video_dir, '*'+vid_img_extension))
|
|
||||||
frame_rate = config_dict.get('project').get('frame_rate')
|
|
||||||
if frame_rate == 'auto':
|
|
||||||
try:
|
|
||||||
cap = cv2.VideoCapture(video_files[0])
|
|
||||||
if not cap.isOpened():
|
|
||||||
raise FileNotFoundError(f'Error: Could not open {video_files[0]}. Check that the file exists.')
|
|
||||||
frame_rate = cap.get(cv2.CAP_PROP_FPS)
|
|
||||||
if frame_rate == 0:
|
|
||||||
frame_rate = 30
|
|
||||||
logging.warning(f'Error: Could not retrieve frame rate from {video_files[0]}. Defaulting to 30fps.')
|
|
||||||
except:
|
|
||||||
frame_rate = 30
|
|
||||||
|
|
||||||
# If CUDA is available, use it with ONNXRuntime backend; else use CPU with openvino
|
|
||||||
try:
|
|
||||||
import torch
|
|
||||||
import onnxruntime as ort
|
|
||||||
if torch.cuda.is_available() and 'CUDAExecutionProvider' in ort.get_available_providers():
|
|
||||||
device = 'cuda'
|
|
||||||
backend = 'onnxruntime'
|
|
||||||
logging.info(f"\nValid CUDA installation found: using ONNXRuntime backend with GPU.")
|
|
||||||
elif torch.cuda.is_available() == True and 'ROCMExecutionProvider' in ort.get_available_providers():
|
|
||||||
device = 'rocm'
|
|
||||||
backend = 'onnxruntime'
|
|
||||||
logging.info(f"\nValid ROCM installation found: using ONNXRuntime backend with GPU.")
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
except:
|
|
||||||
try:
|
|
||||||
import onnxruntime as ort
|
|
||||||
if 'MPSExecutionProvider' in ort.get_available_providers() or 'CoreMLExecutionProvider' in ort.get_available_providers():
|
|
||||||
device = 'mps'
|
|
||||||
backend = 'onnxruntime'
|
|
||||||
logging.info(f"\nValid MPS installation found: using ONNXRuntime backend with GPU.")
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
except:
|
|
||||||
device = 'cpu'
|
|
||||||
backend = 'openvino'
|
|
||||||
logging.info(f"\nNo valid CUDA installation found: using OpenVINO backend with CPU.")
|
|
||||||
|
|
||||||
if det_frequency>1:
|
|
||||||
logging.info(f'Inference run only every {det_frequency} frames. Inbetween, pose estimation tracks previously detected points.')
|
|
||||||
elif det_frequency==1:
|
|
||||||
logging.info(f'Inference run on every single frame.')
|
|
||||||
else:
|
|
||||||
raise ValueError(f"Invalid det_frequency: {det_frequency}. Must be an integer greater or equal to 1.")
|
|
||||||
|
|
||||||
# Select the appropriate model based on the model_type
|
|
||||||
if pose_model.upper() == 'HALPE_26':
|
|
||||||
ModelClass = BodyWithFeet
|
|
||||||
logging.info(f"Using HALPE_26 model (body and feet) for pose estimation.")
|
|
||||||
elif pose_model.upper() == 'COCO_133':
|
|
||||||
ModelClass = Wholebody
|
|
||||||
logging.info(f"Using COCO_133 model (body, feet, hands, and face) for pose estimation.")
|
|
||||||
elif pose_model.upper() == 'COCO_17':
|
|
||||||
ModelClass = Body # 26 keypoints(halpe26)
|
|
||||||
logging.info(f"Using COCO_17 model (body) for pose estimation.")
|
|
||||||
else:
|
|
||||||
raise ValueError(f"Invalid model_type: {pose_model}. Must be 'HALPE_26', 'COCO_133', or 'COCO_17'. Use another network (MMPose, DeepLabCut, OpenPose, AlphaPose, BlazePose...) and convert the output files if you need another model. See documentation.")
|
|
||||||
logging.info(f'Mode: {mode}.\n')
|
|
||||||
|
|
||||||
|
|
||||||
# Initialize the pose tracker
|
|
||||||
pose_tracker = PoseTracker(
|
|
||||||
ModelClass,
|
|
||||||
det_frequency=det_frequency,
|
|
||||||
mode=mode,
|
|
||||||
backend=backend,
|
|
||||||
device=device,
|
|
||||||
tracking=False,
|
|
||||||
to_openpose=False)
|
|
||||||
|
|
||||||
|
|
||||||
logging.info('\nEstimating pose...')
|
|
||||||
try:
|
|
||||||
pose_listdirs_names = next(os.walk(pose_dir))[1]
|
|
||||||
os.listdir(os.path.join(pose_dir, pose_listdirs_names[0]))[0]
|
|
||||||
if not overwrite_pose:
|
|
||||||
logging.info('Skipping pose estimation as it has already been done. Set overwrite_pose to true in Config.toml if you want to run it again.')
|
|
||||||
else:
|
|
||||||
logging.info('Overwriting previous pose estimation. Set overwrite_pose to false in Config.toml if you want to keep the previous results.')
|
|
||||||
raise
|
|
||||||
|
|
||||||
except:
|
|
||||||
video_files = glob.glob(os.path.join(video_dir, '*'+vid_img_extension))
|
|
||||||
if not len(video_files) == 0:
|
|
||||||
# Process video files
|
|
||||||
logging.info(f'Found video files with extension {vid_img_extension}.')
|
|
||||||
for video_path in video_files:
|
|
||||||
pose_tracker.reset()
|
|
||||||
process_video(video_path, pose_tracker, output_format, save_video, save_images, display_detection, frame_range, multi_person)
|
|
||||||
|
|
||||||
else:
|
|
||||||
# Process image folders
|
|
||||||
logging.info(f'Found image folders with extension {vid_img_extension}.')
|
|
||||||
image_folders = [f for f in os.listdir(video_dir) if os.path.isdir(os.path.join(video_dir, f))]
|
|
||||||
for image_folder in image_folders:
|
|
||||||
pose_tracker.reset()
|
|
||||||
image_folder_path = os.path.join(video_dir, image_folder)
|
|
||||||
process_images(image_folder_path, vid_img_extension, pose_tracker, output_format, frame_rate, save_video, save_images, display_detection, frame_range, multi_person)
|
|
||||||
|
@ -1,484 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
|
|
||||||
'''
|
|
||||||
###########################################################################
|
|
||||||
## SKELETONS DEFINITIONS ##
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
The definition and hierarchy of the following skeletons are available:
|
|
||||||
- OpenPose BODY_25B, BODY_25, BODY_135, COCO, MPII
|
|
||||||
- Mediapipe BLAZEPOSE
|
|
||||||
- AlphaPose HALPE_26, HALPE_68, HALPE_136, COCO_133, COCO, MPII
|
|
||||||
(for COCO and MPII, AlphaPose must be run with the flag "--format cmu")
|
|
||||||
- DeepLabCut CUSTOM: the skeleton will be defined in Config.toml
|
|
||||||
|
|
||||||
N.B.: Not all face and hand keypoints are reported in the skeleton architecture,
|
|
||||||
since some are redundant for the orientation of some bodies.
|
|
||||||
|
|
||||||
N.B.: The corresponding OpenSim model files are provided in the "Pose2Sim/Empty project" folder.
|
|
||||||
If you wish to use any other, you will need to adjust the markerset in the .osim model file,
|
|
||||||
as well as in the scaling and IK setup files.
|
|
||||||
'''
|
|
||||||
|
|
||||||
## INIT
|
|
||||||
from anytree import Node
|
|
||||||
|
|
||||||
|
|
||||||
## AUTHORSHIP INFORMATION
|
|
||||||
__author__ = "David Pagnon"
|
|
||||||
__copyright__ = "Copyright 2021, Pose2Sim"
|
|
||||||
__credits__ = ["David Pagnon"]
|
|
||||||
__license__ = "BSD 3-Clause License"
|
|
||||||
__version__ = "0.9.4"
|
|
||||||
__maintainer__ = "David Pagnon"
|
|
||||||
__email__ = "contact@david-pagnon.com"
|
|
||||||
__status__ = "Development"
|
|
||||||
|
|
||||||
|
|
||||||
'''BODY_25B (full-body without hands, experimental, from OpenPose)
|
|
||||||
https://github.com/CMU-Perceptual-Computing-Lab/openpose_train/blob/master/experimental_models/README.md'''
|
|
||||||
BODY_25B = Node("CHip", id=None, children=[
|
|
||||||
Node("RHip", id=12, children=[
|
|
||||||
Node("RKnee", id=14, children=[
|
|
||||||
Node("RAnkle", id=16, children=[
|
|
||||||
Node("RBigToe", id=22, children=[
|
|
||||||
Node("RSmallToe", id=23),
|
|
||||||
]),
|
|
||||||
Node("RHeel", id=24),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
Node("LHip", id=11, children=[
|
|
||||||
Node("LKnee", id=13, children=[
|
|
||||||
Node("LAnkle", id=15, children=[
|
|
||||||
Node("LBigToe", id=19, children=[
|
|
||||||
Node("LSmallToe", id=20),
|
|
||||||
]),
|
|
||||||
Node("LHeel", id=21),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
Node("Neck", id=17, children=[
|
|
||||||
Node("Head", id=18, children=[
|
|
||||||
Node("Nose", id=0),
|
|
||||||
]),
|
|
||||||
Node("RShoulder", id=6, children=[
|
|
||||||
Node("RElbow", id=8, children=[
|
|
||||||
Node("RWrist", id=10),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
Node("LShoulder", id=5, children=[
|
|
||||||
Node("LElbow", id=7, children=[
|
|
||||||
Node("LWrist", id=9),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
])
|
|
||||||
|
|
||||||
|
|
||||||
'''BODY_25 (full-body without hands, standard, from OpenPose)
|
|
||||||
https://github.com/CMU-Perceptual-Computing-Lab/openpose/tree/master/models'''
|
|
||||||
BODY_25 = Node("CHip", id=8, children=[
|
|
||||||
Node("RHip", id=9, children=[
|
|
||||||
Node("RKnee", id=10, children=[
|
|
||||||
Node("RAnkle", id=11, children=[
|
|
||||||
Node("RBigToe", id=22, children=[
|
|
||||||
Node("RSmallToe", id=23),
|
|
||||||
]),
|
|
||||||
Node("RHeel", id=24),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
Node("LHip", id=12, children=[
|
|
||||||
Node("LKnee", id=13, children=[
|
|
||||||
Node("LAnkle", id=14, children=[
|
|
||||||
Node("LBigToe", id=19, children=[
|
|
||||||
Node("LSmallToe", id=20),
|
|
||||||
]),
|
|
||||||
Node("LHeel", id=21),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
Node("Neck", id=1, children=[
|
|
||||||
Node("Nose", id=0),
|
|
||||||
Node("RShoulder", id=2, children=[
|
|
||||||
Node("RElbow", id=3, children=[
|
|
||||||
Node("RWrist", id=4),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
Node("LShoulder", id=5, children=[
|
|
||||||
Node("LElbow", id=6, children=[
|
|
||||||
Node("LWrist", id=7),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
])
|
|
||||||
|
|
||||||
|
|
||||||
'''BODY_135 (full-body with hands and face, experimental, from OpenPose)
|
|
||||||
https://github.com/CMU-Perceptual-Computing-Lab/openpose_train/blob/master/experimental_models/README.md)'''
|
|
||||||
BODY_135 = Node("CHip", id=None, children=[
|
|
||||||
Node("RHip", id=12, children=[
|
|
||||||
Node("RKnee", id=14, children=[
|
|
||||||
Node("RAnkle", id=16, children=[
|
|
||||||
Node("RBigToe", id=22, children=[
|
|
||||||
Node("RSmallToe", id=23),
|
|
||||||
]),
|
|
||||||
Node("RHeel", id=24),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
Node("LHip", id=11, children=[
|
|
||||||
Node("LKnee", id=13, children=[
|
|
||||||
Node("LAnkle", id=15, children=[
|
|
||||||
Node("LBigToe", id=19, children=[
|
|
||||||
Node("LSmallToe", id=20),
|
|
||||||
]),
|
|
||||||
Node("LHeel", id=21),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
Node("Neck", id=17, children=[
|
|
||||||
Node("Head", id=18, children=[
|
|
||||||
Node("Nose", id=0),
|
|
||||||
]),
|
|
||||||
Node("RShoulder", id=6, children=[
|
|
||||||
Node("RElbow", id=8, children=[
|
|
||||||
Node("RWrist", id=10, children=[
|
|
||||||
Node("RThumb", id=48),
|
|
||||||
Node("RIndex", id=51),
|
|
||||||
Node("RPinky", id=63),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
Node("LShoulder", id=5, children=[
|
|
||||||
Node("LElbow", id=7, children=[
|
|
||||||
Node("LWrist", id=9, children=[
|
|
||||||
Node("LThumb", id=27),
|
|
||||||
Node("LIndex", id=30),
|
|
||||||
Node("LPinky", id=42),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
])
|
|
||||||
|
|
||||||
|
|
||||||
'''BLAZEPOSE (full-body with simplified hand and foot, from mediapipe)
|
|
||||||
https://google.github.io/mediapipe/solutions/pose'''
|
|
||||||
BLAZEPOSE = Node("root", id=None, children=[
|
|
||||||
Node("right_hip", id=24, children=[
|
|
||||||
Node("right_knee", id=26, children=[
|
|
||||||
Node("right_ankle", id=28, children=[
|
|
||||||
Node("right_heel", id=30),
|
|
||||||
Node("right_foot_index", id=32),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
Node("left_hip", id=23, children=[
|
|
||||||
Node("left_knee", id=25, children=[
|
|
||||||
Node("left_ankle", id=27, children=[
|
|
||||||
Node("left_heel", id=29),
|
|
||||||
Node("left_foot_index", id=31),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
Node("nose", id=0, children=[
|
|
||||||
Node("right_eye", id=5),
|
|
||||||
Node("left_eye", id=2),
|
|
||||||
]),
|
|
||||||
Node("right_shoulder", id=12, children=[
|
|
||||||
Node("right_elbow", id=14, children=[
|
|
||||||
Node("right_wrist", id=16, children=[
|
|
||||||
Node("right_pinky", id=18),
|
|
||||||
Node("right_index", id=20),
|
|
||||||
Node("right_thumb", id=22),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
Node("left_shoulder", id=11, children=[
|
|
||||||
Node("left_elbow", id=13, children=[
|
|
||||||
Node("left_wrist", id=15, children=[
|
|
||||||
Node("left_pinky", id=17),
|
|
||||||
Node("left_index", id=19),
|
|
||||||
Node("left_thumb", id=21),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
])
|
|
||||||
|
|
||||||
|
|
||||||
'''HALPE_26 (full-body without hands, from AlphaPose, MMPose, etc.)
|
|
||||||
https://github.com/MVIG-SJTU/AlphaPose/blob/master/docs/MODEL_ZOO.md
|
|
||||||
https://github.com/open-mmlab/mmpose/tree/main/projects/rtmpose'''
|
|
||||||
HALPE_26 = Node("Hip", id=19, children=[
|
|
||||||
Node("RHip", id=12, children=[
|
|
||||||
Node("RKnee", id=14, children=[
|
|
||||||
Node("RAnkle", id=16, children=[
|
|
||||||
Node("RBigToe", id=21, children=[
|
|
||||||
Node("RSmallToe", id=23),
|
|
||||||
]),
|
|
||||||
Node("RHeel", id=25),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
Node("LHip", id=11, children=[
|
|
||||||
Node("LKnee", id=13, children=[
|
|
||||||
Node("LAnkle", id=15, children=[
|
|
||||||
Node("LBigToe", id=20, children=[
|
|
||||||
Node("LSmallToe", id=22),
|
|
||||||
]),
|
|
||||||
Node("LHeel", id=24),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
Node("Neck", id=18, children=[
|
|
||||||
Node("Head", id=17, children=[
|
|
||||||
Node("Nose", id=0),
|
|
||||||
]),
|
|
||||||
Node("RShoulder", id=6, children=[
|
|
||||||
Node("RElbow", id=8, children=[
|
|
||||||
Node("RWrist", id=10),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
Node("LShoulder", id=5, children=[
|
|
||||||
Node("LElbow", id=7, children=[
|
|
||||||
Node("LWrist", id=9),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
])
|
|
||||||
|
|
||||||
|
|
||||||
'''HALPE_68 (full-body with hands without face, from AlphaPose, MMPose, etc.)
|
|
||||||
https://github.com/MVIG-SJTU/AlphaPose/blob/master/docs/MODEL_ZOO.md'''
|
|
||||||
HALPE_68 = Node("Hip", id=19, children=[
|
|
||||||
Node("RHip", id=12, children=[
|
|
||||||
Node("RKnee", id=14, children=[
|
|
||||||
Node("RAnkle", id=16, children=[
|
|
||||||
Node("RBigToe", id=21, children=[
|
|
||||||
Node("RSmallToe", id=23),
|
|
||||||
]),
|
|
||||||
Node("RHeel", id=25),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
Node("LHip", id=11, children=[
|
|
||||||
Node("LKnee", id=13, children=[
|
|
||||||
Node("LAnkle", id=15, children=[
|
|
||||||
Node("LBigToe", id=20, children=[
|
|
||||||
Node("LSmallToe", id=22),
|
|
||||||
]),
|
|
||||||
Node("LHeel", id=24),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
Node("Neck", id=18, children=[
|
|
||||||
Node("Nose", id=0),
|
|
||||||
Node("RShoulder", id=6, children=[
|
|
||||||
Node("RElbow", id=8, children=[
|
|
||||||
Node("RWrist", id=10, children=[
|
|
||||||
Node("RThumb", id=49),
|
|
||||||
Node("RIndex", id=52),
|
|
||||||
Node("RPinky", id=64),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
Node("LShoulder", id=5, children=[
|
|
||||||
Node("LElbow", id=7, children=[
|
|
||||||
Node("LWrist", id=9, children=[
|
|
||||||
Node("LThumb", id=28),
|
|
||||||
Node("LIndex", id=31),
|
|
||||||
Node("LPinky", id=43),
|
|
||||||
])
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
])
|
|
||||||
|
|
||||||
|
|
||||||
'''HALPE_136 (full-body with hands and face, from AlphaPose, MMPose, etc.)
|
|
||||||
https://github.com/MVIG-SJTU/AlphaPose/blob/master/docs/MODEL_ZOO.md'''
|
|
||||||
HALPE_136 = Node("Hip", id=19, children=[
|
|
||||||
Node("RHip", id=12, children=[
|
|
||||||
Node("RKnee", id=14, children=[
|
|
||||||
Node("RAnkle", id=16, children=[
|
|
||||||
Node("RBigToe", id=21, children=[
|
|
||||||
Node("RSmallToe", id=23),
|
|
||||||
]),
|
|
||||||
Node("RHeel", id=25),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
Node("LHip", id=11, children=[
|
|
||||||
Node("LKnee", id=13, children=[
|
|
||||||
Node("LAnkle", id=15, children=[
|
|
||||||
Node("LBigToe", id=20, children=[
|
|
||||||
Node("LSmallToe", id=22),
|
|
||||||
]),
|
|
||||||
Node("LHeel", id=24),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
Node("Neck", id=18, children=[
|
|
||||||
Node("Nose", id=0),
|
|
||||||
Node("RShoulder", id=6, children=[
|
|
||||||
Node("RElbow", id=8, children=[
|
|
||||||
Node("RWrist", id=10, children=[
|
|
||||||
Node("RThumb", id=117),
|
|
||||||
Node("RIndex", id=120),
|
|
||||||
Node("RPinky", id=132),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
Node("LShoulder", id=5, children=[
|
|
||||||
Node("LElbow", id=7, children=[
|
|
||||||
Node("LWrist", id=9, children=[
|
|
||||||
Node("LThumb", id=96),
|
|
||||||
Node("LIndex", id=99),
|
|
||||||
Node("LPinky", id=111),
|
|
||||||
])
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
])
|
|
||||||
|
|
||||||
|
|
||||||
'''COCO_133 (full-body with hands and face, from AlphaPose, MMPose, etc.)
|
|
||||||
https://github.com/MVIG-SJTU/AlphaPose/blob/master/docs/MODEL_ZOO.md
|
|
||||||
https://github.com/open-mmlab/mmpose/tree/main/projects/rtmpose'''
|
|
||||||
COCO_133 = Node("CHip", id=None, children=[
|
|
||||||
Node("RHip", id=12, children=[
|
|
||||||
Node("RKnee", id=14, children=[
|
|
||||||
Node("RAnkle", id=16, children=[
|
|
||||||
Node("RBigToe", id=20, children=[
|
|
||||||
Node("RSmallToe", id=21),
|
|
||||||
]),
|
|
||||||
Node("RHeel", id=22),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
Node("LHip", id=11, children=[
|
|
||||||
Node("LKnee", id=13, children=[
|
|
||||||
Node("LAnkle", id=15, children=[
|
|
||||||
Node("LBigToe", id=17, children=[
|
|
||||||
Node("LSmallToe", id=18),
|
|
||||||
]),
|
|
||||||
Node("LHeel", id=19),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
Node("Neck", id=None, children=[
|
|
||||||
Node("Nose", id=0, children=[
|
|
||||||
Node("REye", id=2),
|
|
||||||
Node("LEye", id=1),
|
|
||||||
]),
|
|
||||||
Node("RShoulder", id=6, children=[
|
|
||||||
Node("RElbow", id=8, children=[
|
|
||||||
Node("RWrist", id=10, children=[
|
|
||||||
Node("RThumb", id=114),
|
|
||||||
Node("RIndex", id=117),
|
|
||||||
Node("RPinky", id=129),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
Node("LShoulder", id=5, children=[
|
|
||||||
Node("LElbow", id=7, children=[
|
|
||||||
Node("LWrist", id=9, children=[
|
|
||||||
Node("LThumb", id=93),
|
|
||||||
Node("LIndex", id=96),
|
|
||||||
Node("LPinky", id=108),
|
|
||||||
])
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
])
|
|
||||||
|
|
||||||
|
|
||||||
'''COCO (full-body without hands and feet, from OpenPose, AlphaPose, OpenPifPaf, YOLO-pose, MMPose, etc.)
|
|
||||||
https://github.com/CMU-Perceptual-Computing-Lab/openpose/tree/master/models'''
|
|
||||||
COCO = Node("CHip", id=None, children=[
|
|
||||||
Node("RHip", id=8, children=[
|
|
||||||
Node("RKnee", id=9, children=[
|
|
||||||
Node("RAnkle", id=10),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
Node("LHip", id=11, children=[
|
|
||||||
Node("LKnee", id=12, children=[
|
|
||||||
Node("LAnkle", id=13),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
Node("Neck", id=1, children=[
|
|
||||||
Node("Nose", id=0),
|
|
||||||
Node("RShoulder", id=2, children=[
|
|
||||||
Node("RElbow", id=3, children=[
|
|
||||||
Node("RWrist", id=4),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
Node("LShoulder", id=5, children=[
|
|
||||||
Node("LElbow", id=6, children=[
|
|
||||||
Node("LWrist", id=7),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
])
|
|
||||||
|
|
||||||
|
|
||||||
'''MPII (full-body without hands and feet, from OpenPose, AlphaPose, OpenPifPaf, YOLO-pose, MMPose, etc.)
|
|
||||||
https://github.com/CMU-Perceptual-Computing-Lab/openpose/tree/master/models'''
|
|
||||||
MPII = Node("CHip", id=14, children=[
|
|
||||||
Node("RHip", id=8, children=[
|
|
||||||
Node("RKnee", id=9, children=[
|
|
||||||
Node("RAnkle", id=10),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
Node("LHip", id=11, children=[
|
|
||||||
Node("LKnee", id=12, children=[
|
|
||||||
Node("LAnkle", id=13),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
Node("Neck", id=1, children=[
|
|
||||||
Node("Nose", id=0),
|
|
||||||
Node("RShoulder", id=2, children=[
|
|
||||||
Node("RElbow", id=3, children=[
|
|
||||||
Node("RWrist", id=4),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
Node("LShoulder", id=5, children=[
|
|
||||||
Node("LElbow", id=6, children=[
|
|
||||||
Node("LWrist", id=7),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
])
|
|
||||||
|
|
||||||
|
|
||||||
'''COCO_17 (full-body without hands and feet, from OpenPose, AlphaPose, OpenPifPaf, YOLO-pose, MMPose, etc.)
|
|
||||||
https://github.com/open-mmlab/mmpose/tree/main/projects/rtmpose'''
|
|
||||||
COCO_17 = Node("CHip", id=None, children=[
|
|
||||||
Node("RHip", id=12, children=[
|
|
||||||
Node("RKnee", id=14, children=[
|
|
||||||
Node("RAnkle", id=16),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
Node("LHip", id=11, children=[
|
|
||||||
Node("LKnee", id=13, children=[
|
|
||||||
Node("LAnkle", id=15),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
Node("Neck", id=None, children=[
|
|
||||||
Node("Nose", id=0),
|
|
||||||
Node("RShoulder", id=6, children=[
|
|
||||||
Node("RElbow", id=8, children=[
|
|
||||||
Node("RWrist", id=10),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
Node("LShoulder", id=5, children=[
|
|
||||||
Node("LElbow", id=7, children=[
|
|
||||||
Node("LWrist", id=9),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
])
|
|
@ -50,7 +50,7 @@ from anytree.importer import DictImporter
|
|||||||
import logging
|
import logging
|
||||||
|
|
||||||
from Pose2Sim.common import sort_stringlist_by_last_number, bounding_boxes
|
from Pose2Sim.common import sort_stringlist_by_last_number, bounding_boxes
|
||||||
from Pose2Sim.skeletons import *
|
from Sports2D.Utilities.skeletons import *
|
||||||
|
|
||||||
|
|
||||||
## AUTHORSHIP INFORMATION
|
## AUTHORSHIP INFORMATION
|
||||||
@ -586,7 +586,7 @@ def synchronize_cams_all(config_dict):
|
|||||||
# Get parameters from Config.toml
|
# Get parameters from Config.toml
|
||||||
project_dir = config_dict.get('project').get('project_dir')
|
project_dir = config_dict.get('project').get('project_dir')
|
||||||
pose_dir = os.path.realpath(os.path.join(project_dir, 'pose'))
|
pose_dir = os.path.realpath(os.path.join(project_dir, 'pose'))
|
||||||
pose_model = config_dict.get('pose').get('pose_model')
|
pose_model = config_dict.get('pose').get('pose_model').upper()
|
||||||
multi_person = config_dict.get('project').get('multi_person')
|
multi_person = config_dict.get('project').get('multi_person')
|
||||||
fps = config_dict.get('project').get('frame_rate')
|
fps = config_dict.get('project').get('frame_rate')
|
||||||
frame_range = config_dict.get('project').get('frame_range')
|
frame_range = config_dict.get('project').get('frame_range')
|
||||||
@ -756,4 +756,4 @@ def synchronize_cams_all(config_dict):
|
|||||||
json_offset_name = ''.join(j_split)
|
json_offset_name = ''.join(j_split)
|
||||||
shutil.copy(os.path.join(pose_dir, os.path.basename(j_dir), j_file), os.path.join(sync_dir, os.path.basename(j_dir), json_offset_name))
|
shutil.copy(os.path.join(pose_dir, os.path.basename(j_dir), j_file), os.path.join(sync_dir, os.path.basename(j_dir), json_offset_name))
|
||||||
|
|
||||||
logging.info(f'Synchronized json files saved in {sync_dir}.')
|
logging.info(f'Synchronized json files saved in {sync_dir}.')
|
||||||
|
@ -53,7 +53,7 @@ import logging
|
|||||||
from Pose2Sim.common import retrieve_calib_params, computeP, weighted_triangulation, \
|
from Pose2Sim.common import retrieve_calib_params, computeP, weighted_triangulation, \
|
||||||
reprojection, euclidean_distance, sort_stringlist_by_last_number, \
|
reprojection, euclidean_distance, sort_stringlist_by_last_number, \
|
||||||
min_with_single_indices, zup2yup, convert_to_c3d
|
min_with_single_indices, zup2yup, convert_to_c3d
|
||||||
from Pose2Sim.skeletons import *
|
from Sports2D.Utilities.skeletons import *
|
||||||
|
|
||||||
|
|
||||||
## AUTHORSHIP INFORMATION
|
## AUTHORSHIP INFORMATION
|
||||||
@ -692,7 +692,7 @@ def triangulate_all(config_dict):
|
|||||||
# if single trial
|
# if single trial
|
||||||
session_dir = session_dir if 'Config.toml' in os.listdir(session_dir) else os.getcwd()
|
session_dir = session_dir if 'Config.toml' in os.listdir(session_dir) else os.getcwd()
|
||||||
multi_person = config_dict.get('project').get('multi_person')
|
multi_person = config_dict.get('project').get('multi_person')
|
||||||
pose_model = config_dict.get('pose').get('pose_model')
|
pose_model = config_dict.get('pose').get('pose_model').upper()
|
||||||
frame_range = config_dict.get('project').get('frame_range')
|
frame_range = config_dict.get('project').get('frame_range')
|
||||||
likelihood_threshold = config_dict.get('triangulation').get('likelihood_threshold_triangulation')
|
likelihood_threshold = config_dict.get('triangulation').get('likelihood_threshold_triangulation')
|
||||||
interpolation_kind = config_dict.get('triangulation').get('interpolation')
|
interpolation_kind = config_dict.get('triangulation').get('interpolation')
|
||||||
|
Loading…
Reference in New Issue
Block a user