fixed batch processing

This commit is contained in:
davidpagnon 2024-07-17 16:50:14 +02:00
parent e270c6d1d8
commit 2bc68d4d59
19 changed files with 232 additions and 185 deletions

Binary file not shown.

Before

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.1 KiB

BIN
Content/P2S_calibration.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

BIN
Content/P2S_filtering.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 50 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 91 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.3 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 31 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 576 KiB

BIN
Content/synchro.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 49 KiB

View File

@ -52,7 +52,7 @@ output_format = 'openpose' # 'openpose', 'mmpose', 'deeplabcut', 'none' or a lis
[synchronization] [synchronization]
display_sync_plots = true # true or false (lowercase) display_sync_plots = false # true or false (lowercase)
keypoints_to_consider = ['RWrist'] # 'all' if all points should be considered, for example if the participant did not perform any particicular sharp movement. In this case, the capture needs to be 5-10 seconds long at least keypoints_to_consider = ['RWrist'] # 'all' if all points should be considered, for example if the participant did not perform any particicular sharp movement. In this case, the capture needs to be 5-10 seconds long at least
# ['RWrist', 'RElbow'] list of keypoint names if you want to specify the keypoints to consider. # ['RWrist', 'RElbow'] list of keypoint names if you want to specify the keypoints to consider.
approx_time_maxspeed = 'auto' # 'auto' if you want to consider the whole capture (default, slower if long sequences) approx_time_maxspeed = 'auto' # 'auto' if you want to consider the whole capture (default, slower if long sequences)

View File

@ -104,7 +104,7 @@ def determine_level(config_dir):
len_paths = [len(root.split(os.sep)) for root,dirs,files in os.walk(config_dir) if 'Config.toml' in files] len_paths = [len(root.split(os.sep)) for root,dirs,files in os.walk(config_dir) if 'Config.toml' in files]
if len_paths == []: if len_paths == []:
raise FileNotFoundError('Please run Pose2Sim from a Session, Participant, or Trial directory.') raise FileNotFoundError('You need a Config.toml file in each trial or root folder.')
level = max(len_paths) - min(len_paths) + 1 level = max(len_paths) - min(len_paths) + 1
return level return level
@ -172,7 +172,7 @@ def calibration(config=None):
config_dict = config_dicts[0] config_dict = config_dicts[0]
try: try:
session_dir = os.path.realpath([os.getcwd() if level==2 else os.path.join(os.getcwd(), '..')][0]) session_dir = os.path.realpath([os.getcwd() if level==2 else os.path.join(os.getcwd(), '..')][0])
[os.path.join(session_dir, c) for c in os.listdir(session_dir) if 'calib' in c.lower() ][0] [os.path.join(session_dir, c) for c in os.listdir(session_dir) if 'calib' in c.lower() and not c.lower().endswith('.py')][0]
except: except:
session_dir = os.path.realpath(os.getcwd()) session_dir = os.path.realpath(os.getcwd())
config_dict.get("project").update({"project_dir":session_dir}) config_dict.get("project").update({"project_dir":session_dir})
@ -183,17 +183,17 @@ def calibration(config=None):
# Run calibration # Run calibration
calib_dir = [os.path.join(session_dir, c) for c in os.listdir(session_dir) if os.path.isdir(os.path.join(session_dir, c)) and 'calib' in c.lower()][0] calib_dir = [os.path.join(session_dir, c) for c in os.listdir(session_dir) if os.path.isdir(os.path.join(session_dir, c)) and 'calib' in c.lower()][0]
logging.info("\n\n---------------------------------------------------------------------") logging.info("\n---------------------------------------------------------------------")
logging.info("Camera calibration") logging.info("Camera calibration")
logging.info(f"On {currentDateAndTime.strftime('%A %d. %B %Y, %H:%M:%S')}") logging.info(f"On {currentDateAndTime.strftime('%A %d. %B %Y, %H:%M:%S')}")
logging.info("---------------------------------------------------------------------") logging.info(f"Calibration directory: {calib_dir}")
logging.info(f"\nCalibration directory: {calib_dir}") logging.info("---------------------------------------------------------------------\n")
start = time.time() start = time.time()
calibrate_cams_all(config_dict) calibrate_cams_all(config_dict)
end = time.time() end = time.time()
logging.info(f'\nCalibration took {end-start:.2f} s.') logging.info(f'\nCalibration took {end-start:.2f} s.\n')
def poseEstimation(config=None): def poseEstimation(config=None):
@ -228,17 +228,17 @@ def poseEstimation(config=None):
frame_range = config_dict.get('project').get('frame_range') frame_range = config_dict.get('project').get('frame_range')
frames = ["all frames" if not frame_range else f"frames {frame_range[0]} to {frame_range[1]}"][0] frames = ["all frames" if not frame_range else f"frames {frame_range[0]} to {frame_range[1]}"][0]
logging.info("\n\n---------------------------------------------------------------------") logging.info("\n---------------------------------------------------------------------")
logging.info(f"Pose estimation for {seq_name}, for {frames}.") logging.info(f"Pose estimation for {seq_name}, for {frames}.")
logging.info(f"On {currentDateAndTime.strftime('%A %d. %B %Y, %H:%M:%S')}") logging.info(f"On {currentDateAndTime.strftime('%A %d. %B %Y, %H:%M:%S')}")
logging.info("---------------------------------------------------------------------") logging.info(f"Project directory: {project_dir}")
logging.info(f"\nProject directory: {project_dir}") logging.info("---------------------------------------------------------------------\n")
rtm_estimator(config_dict) rtm_estimator(config_dict)
end = time.time() end = time.time()
elapsed = end - start elapsed = end - start
logging.info(f'\nPose estimation took {time.strftime("%Hh%Mm%Ss", time.gmtime(elapsed))}.') logging.info(f'\nPose estimation took {time.strftime("%Hh%Mm%Ss", time.gmtime(elapsed))}.\n')
def synchronization(config=None): def synchronization(config=None):
@ -253,7 +253,7 @@ def synchronization(config=None):
# Import the function # Import the function
from Pose2Sim.synchronization import synchronize_cams_all from Pose2Sim.synchronization import synchronize_cams_all
# Determine the level at which the function is called (session:3, participant:2, trial:1) # Determine the level at which the function is called (root:2, trial:1)
level, config_dicts = read_config_files(config) level, config_dicts = read_config_files(config)
if type(config)==dict: if type(config)==dict:
@ -272,17 +272,17 @@ def synchronization(config=None):
currentDateAndTime = datetime.now() currentDateAndTime = datetime.now()
project_dir = os.path.realpath(config_dict.get('project').get('project_dir')) project_dir = os.path.realpath(config_dict.get('project').get('project_dir'))
logging.info("\n\n---------------------------------------------------------------------") logging.info("\n---------------------------------------------------------------------")
logging.info("Camera synchronization") logging.info("Camera synchronization")
logging.info(f"On {currentDateAndTime.strftime('%A %d. %B %Y, %H:%M:%S')}") logging.info(f"On {currentDateAndTime.strftime('%A %d. %B %Y, %H:%M:%S')}")
logging.info("---------------------------------------------------------------------") logging.info(f"Project directory: {project_dir}")
logging.info(f"\nProject directory: {project_dir}") logging.info("---------------------------------------------------------------------\n")
synchronize_cams_all(config_dict) synchronize_cams_all(config_dict)
end = time.time() end = time.time()
elapsed = end-start elapsed = end-start
logging.info(f'\nSynchronization took {time.strftime("%Hh%Mm%Ss", time.gmtime(elapsed))}.') logging.info(f'\nSynchronization took {time.strftime("%Hh%Mm%Ss", time.gmtime(elapsed))}.\n')
def personAssociation(config=None): def personAssociation(config=None):
@ -297,7 +297,7 @@ def personAssociation(config=None):
from Pose2Sim.personAssociation import track_2d_all from Pose2Sim.personAssociation import track_2d_all
# Determine the level at which the function is called (session:3, participant:2, trial:1) # Determine the level at which the function is called (root:2, trial:1)
level, config_dicts = read_config_files(config) level, config_dicts = read_config_files(config)
if type(config)==dict: if type(config)==dict:
@ -319,17 +319,17 @@ def personAssociation(config=None):
frame_range = config_dict.get('project').get('frame_range') frame_range = config_dict.get('project').get('frame_range')
frames = ["all frames" if frame_range == [] else f"frames {frame_range[0]} to {frame_range[1]}"][0] frames = ["all frames" if frame_range == [] else f"frames {frame_range[0]} to {frame_range[1]}"][0]
logging.info("\n\n---------------------------------------------------------------------") logging.info("\n---------------------------------------------------------------------")
logging.info(f"Associating persons for {seq_name}, for {frames}.") logging.info(f"Associating persons for {seq_name}, for {frames}.")
logging.info(f"On {currentDateAndTime.strftime('%A %d. %B %Y, %H:%M:%S')}") logging.info(f"On {currentDateAndTime.strftime('%A %d. %B %Y, %H:%M:%S')}")
logging.info("---------------------------------------------------------------------") logging.info(f"Project directory: {project_dir}")
logging.info(f"\nProject directory: {project_dir}") logging.info("---------------------------------------------------------------------\n")
track_2d_all(config_dict) track_2d_all(config_dict)
end = time.time() end = time.time()
elapsed = end-start elapsed = end-start
logging.info(f'\nAssociating persons took {time.strftime("%Hh%Mm%Ss", time.gmtime(elapsed))}.') logging.info(f'\nAssociating persons took {time.strftime("%Hh%Mm%Ss", time.gmtime(elapsed))}.\n')
def triangulation(config=None): def triangulation(config=None):
@ -343,7 +343,7 @@ def triangulation(config=None):
from Pose2Sim.triangulation import triangulate_all from Pose2Sim.triangulation import triangulate_all
# Determine the level at which the function is called (session:3, participant:2, trial:1) # Determine the level at which the function is called (root:2, trial:1)
level, config_dicts = read_config_files(config) level, config_dicts = read_config_files(config)
if type(config)==dict: if type(config)==dict:
@ -365,17 +365,17 @@ def triangulation(config=None):
frame_range = config_dict.get('project').get('frame_range') frame_range = config_dict.get('project').get('frame_range')
frames = ["all frames" if frame_range == [] else f"frames {frame_range[0]} to {frame_range[1]}"][0] frames = ["all frames" if frame_range == [] else f"frames {frame_range[0]} to {frame_range[1]}"][0]
logging.info("\n\n---------------------------------------------------------------------") logging.info("\n---------------------------------------------------------------------")
logging.info(f"Triangulation of 2D points for {seq_name}, for {frames}.") logging.info(f"Triangulation of 2D points for {seq_name}, for {frames}.")
logging.info(f"On {currentDateAndTime.strftime('%A %d. %B %Y, %H:%M:%S')}") logging.info(f"On {currentDateAndTime.strftime('%A %d. %B %Y, %H:%M:%S')}")
logging.info("---------------------------------------------------------------------") logging.info(f"Project directory: {project_dir}")
logging.info(f"\nProject directory: {project_dir}") logging.info("---------------------------------------------------------------------\n")
triangulate_all(config_dict) triangulate_all(config_dict)
end = time.time() end = time.time()
elapsed = end-start elapsed = end-start
logging.info(f'\nTriangulation took {time.strftime("%Hh%Mm%Ss", time.gmtime(elapsed))}.') logging.info(f'\nTriangulation took {time.strftime("%Hh%Mm%Ss", time.gmtime(elapsed))}.\n')
def filtering(config=None): def filtering(config=None):
@ -389,7 +389,7 @@ def filtering(config=None):
from Pose2Sim.filtering import filter_all from Pose2Sim.filtering import filter_all
# Determine the level at which the function is called (session:3, participant:2, trial:1) # Determine the level at which the function is called (root:2, trial:1)
level, config_dicts = read_config_files(config) level, config_dicts = read_config_files(config)
if type(config)==dict: if type(config)==dict:
@ -410,14 +410,16 @@ def filtering(config=None):
frame_range = config_dict.get('project').get('frame_range') frame_range = config_dict.get('project').get('frame_range')
frames = ["all frames" if frame_range == [] else f"frames {frame_range[0]} to {frame_range[1]}"][0] frames = ["all frames" if frame_range == [] else f"frames {frame_range[0]} to {frame_range[1]}"][0]
logging.info("\n\n---------------------------------------------------------------------") logging.info("\n---------------------------------------------------------------------")
logging.info(f"Filtering 3D coordinates for {seq_name}, for {frames}.") logging.info(f"Filtering 3D coordinates for {seq_name}, for {frames}.")
logging.info(f"On {currentDateAndTime.strftime('%A %d. %B %Y, %H:%M:%S')}") logging.info(f"On {currentDateAndTime.strftime('%A %d. %B %Y, %H:%M:%S')}")
logging.info("---------------------------------------------------------------------") logging.info(f"Project directory: {project_dir}\n")
logging.info(f"\nProject directory: {project_dir}\n") logging.info("---------------------------------------------------------------------\n")
filter_all(config_dict) filter_all(config_dict)
logging.info('\n')
def markerAugmentation(config=None): def markerAugmentation(config=None):
''' '''
@ -449,17 +451,17 @@ def markerAugmentation(config=None):
frame_range = config_dict.get('project').get('frame_range') frame_range = config_dict.get('project').get('frame_range')
frames = ["all frames" if frame_range == [] else f"frames {frame_range[0]} to {frame_range[1]}"][0] frames = ["all frames" if frame_range == [] else f"frames {frame_range[0]} to {frame_range[1]}"][0]
logging.info("\n\n---------------------------------------------------------------------") logging.info("\n---------------------------------------------------------------------")
logging.info(f"Augmentation process for {seq_name}, for {frames}.") logging.info(f"Augmentation process for {seq_name}, for {frames}.")
logging.info(f"On {currentDateAndTime.strftime('%A %d. %B %Y, %H:%M:%S')}") logging.info(f"On {currentDateAndTime.strftime('%A %d. %B %Y, %H:%M:%S')}")
logging.info("---------------------------------------------------------------------") logging.info(f"Project directory: {project_dir}")
logging.info(f"\nProject directory: {project_dir}\n") logging.info("---------------------------------------------------------------------\n")
augmentTRC(config_dict) augmentTRC(config_dict)
end = time.time() end = time.time()
elapsed = end-start elapsed = end-start
logging.info(f'\nMarker augmentation took {time.strftime("%Hh%Mm%Ss", time.gmtime(elapsed))}.') logging.info(f'\nMarker augmentation took {time.strftime("%Hh%Mm%Ss", time.gmtime(elapsed))}.\n')
def opensimProcessing(config=None): def opensimProcessing(config=None):
@ -477,7 +479,7 @@ def opensimProcessing(config=None):
# # TODO # # TODO
# from Pose2Sim.opensimProcessing import opensim_processing_all # from Pose2Sim.opensimProcessing import opensim_processing_all
# # Determine the level at which the function is called (session:3, participant:2, trial:1) # # Determine the level at which the function is called (root:2, trial:1)
# level, config_dicts = read_config_files(config) # level, config_dicts = read_config_files(config)
# if type(config)==dict: # if type(config)==dict:
@ -499,23 +501,23 @@ def opensimProcessing(config=None):
# frame_range = config_dict.get('project').get('frame_range') # frame_range = config_dict.get('project').get('frame_range')
# frames = ["all frames" if frame_range == [] else f"frames {frame_range[0]} to {frame_range[1]}"][0] # frames = ["all frames" if frame_range == [] else f"frames {frame_range[0]} to {frame_range[1]}"][0]
# logging.info("\n\n---------------------------------------------------------------------") # logging.info("\n---------------------------------------------------------------------")
# # if static_file in project_dir: # # if static_file in project_dir:
# # logging.info(f"Scaling model with <STATIC TRC FILE>.") # # logging.info(f"Scaling model with <STATIC TRC FILE>.")
# # else: # # else:
# # logging.info(f"Running inverse kinematics <MOTION TRC FILE>.") # # logging.info(f"Running inverse kinematics <MOTION TRC FILE>.")
# logging.info(f"On {currentDateAndTime.strftime('%A %d. %B %Y, %H:%M:%S')}") # logging.info(f"On {currentDateAndTime.strftime('%A %d. %B %Y, %H:%M:%S')}")
# logging.info("---------------------------------------------------------------------") # logging.info(f"OpenSim output directory: {project_dir}")
# logging.info(f"\nOpenSim output directory: {project_dir}") # logging.info("---------------------------------------------------------------------\n")
# opensim_processing_all(config_dict) # opensim_processing_all(config_dict)
# end = time.time() # end = time.time()
# elapsed = end-start # elapsed = end-start
# # if static_file in project_dir: # # if static_file in project_dir:
# # logging.info(f'Model scaling took {time.strftime("%Hh%Mm%Ss", time.gmtime(elapsed))}.') # # logging.info(f'Model scaling took {time.strftime("%Hh%Mm%Ss", time.gmtime(elapsed))}.\n')
# # else: # # else:
# # logging.info(f'Inverse kinematics took {time.strftime("%Hh%Mm%Ss", time.gmtime(elapsed))}.') # # logging.info(f'Inverse kinematics took {time.strftime("%Hh%Mm%Ss", time.gmtime(elapsed))}.\n')
def runAll(config=None, do_calibration=True, do_poseEstimation=True, do_synchronization=True, do_personAssociation=True, do_triangulation=True, do_filtering=True, do_markerAugmentation=True, do_opensimProcessing=True): def runAll(config=None, do_calibration=True, do_poseEstimation=True, do_synchronization=True, do_personAssociation=True, do_triangulation=True, do_filtering=True, do_markerAugmentation=True, do_opensimProcessing=True):
@ -524,82 +526,101 @@ def runAll(config=None, do_calibration=True, do_poseEstimation=True, do_synchron
and may even lead to worse results. Think carefully before running all. and may even lead to worse results. Think carefully before running all.
''' '''
# Determine the level at which the function is called (session:3, participant:2, trial:1)
level, config_dicts = read_config_files(config)
if type(config)==dict:
config_dict = config_dicts[0]
if config_dict.get('project').get('project_dir') == None:
raise ValueError('Please specify the project directory in config_dict:\n \
config_dict.get("project").update({"project_dir":"<YOUR_TRIAL_DIRECTORY>"})')
# Set up logging # Set up logging
level, config_dicts = read_config_files(config)
session_dir = os.path.realpath(os.path.join(config_dicts[0].get('project').get('project_dir'), '..')) session_dir = os.path.realpath(os.path.join(config_dicts[0].get('project').get('project_dir'), '..'))
setup_logging(session_dir) setup_logging(session_dir)
# Batch process all trials
for config_dict in config_dicts:
start = time.time()
currentDateAndTime = datetime.now() currentDateAndTime = datetime.now()
project_dir = os.path.realpath(config_dict.get('project').get('project_dir')) start = time.time()
seq_name = os.path.basename(project_dir)
frame_range = config_dict.get('project').get('frame_range')
frames = ["all frames" if frame_range == [] else f"frames {frame_range[0]} to {frame_range[1]}"][0]
logging.info("\n\n=====================================================================") logging.info("\n\n=====================================================================")
logging.info(f"RUNNING ALL FOR {seq_name}, FOR {frames}.") logging.info(f"RUNNING ALL.")
logging.info(f"On {currentDateAndTime.strftime('%A %d. %B %Y, %H:%M:%S')}") logging.info(f"On {currentDateAndTime.strftime('%A %d. %B %Y, %H:%M:%S')}")
logging.info(f"Project directory: {session_dir}\n")
logging.info("=====================================================================") logging.info("=====================================================================")
logging.info(f"\nProject directory: {project_dir}\n")
if do_calibration: if do_calibration:
logging.info('\nRUNNING CALIBRATION...') logging.info("\n\n=====================================================================")
logging.info('Running calibration...')
logging.info("=====================================================================")
calibration(config) calibration(config)
else: else:
logging.info('\nSKIPPING CALIBRATION.') logging.info("\n\n=====================================================================")
logging.info('Skipping calibration.')
logging.info("=====================================================================")
if do_poseEstimation: if do_poseEstimation:
logging.info('\nRUNNING POSE ESTIMATION...') logging.info("\n\n=====================================================================")
logging.info('Running pose estimation...')
logging.info("=====================================================================")
poseEstimation(config) poseEstimation(config)
else: else:
logging.info('\nSKIPPING POSE ESTIMATION.') logging.info("\n\n=====================================================================")
logging.info('Skipping pose estimation.')
logging.info("=====================================================================")
if do_synchronization: if do_synchronization:
logging.info('\nRUNNING SYNCHRONIZATION...') logging.info("\n\n=====================================================================")
logging.info('Running synchronization...')
logging.info("=====================================================================")
synchronization(config) synchronization(config)
else: else:
logging.info('\nSKIPPING SYNCHRONIZATION.') logging.info("\n\n=====================================================================")
logging.info('Skipping synchronization.')
logging.info("=====================================================================")
if do_personAssociation: if do_personAssociation:
logging.info('\nRUNNING PERSON ASSOCIATION...') logging.info("\n\n=====================================================================")
logging.info('Running person association...')
logging.info("=====================================================================")
personAssociation(config) personAssociation(config)
else: else:
logging.info('\nSKIPPING PERSON ASSOCIATION.') logging.info("\n\n=====================================================================")
logging.info('Skipping person association.')
logging.info("=====================================================================")
if do_triangulation: if do_triangulation:
logging.info('\nRUNNING TRIANGULATION...') logging.info("\n\n=====================================================================")
logging.info('Running triangulation...')
logging.info("=====================================================================")
triangulation(config) triangulation(config)
else: else:
logging.info('\nSKIPPING TRIANGULATION.') logging.info("\n\n=====================================================================")
logging.info('Skipping triangulation.')
logging.info("=====================================================================")
if do_filtering: if do_filtering:
logging.info('\nRUNNING FILTERING...') logging.info("\n\n=====================================================================")
logging.info('Running filtering...')
logging.info("=====================================================================")
filtering(config) filtering(config)
else: else:
logging.info('\nSKIPPING FILTERING.') logging.info("\n\n=====================================================================")
logging.info('Skipping filtering.')
logging.info("=====================================================================")
if do_markerAugmentation: if do_markerAugmentation:
logging.info('\nRUNNING MARKER AUGMENTATION.') logging.info("\n\n=====================================================================")
logging.info('Running marker augmentation.')
logging.info("=====================================================================")
markerAugmentation(config) markerAugmentation(config)
else: else:
logging.info('\nSKIPPING MARKER AUGMENTATION.') logging.info("\n\n=====================================================================")
logging.info('Skipping marker augmentation.')
logging.info("\n\n=====================================================================")
# if do_opensimProcessing: # if do_opensimProcessing:
# logging.info('\nRUNNING OPENSIM PROCESSING.') # logging.info("\n\n=====================================================================")
# logging.info('Running opensim processing.')
# logging.info("=====================================================================")
# opensimProcessing(config) # opensimProcessing(config)
# else: # else:
# logging.info('\nSKIPPING OPENSIM PROCESSING.') # logging.info("\n\n=====================================================================")
# logging.info('Skipping opensim processing.')
# logging.info("=====================================================================")
end = time.time() end = time.time()
elapsed = end-start elapsed = end-start
logging.info(f'\nRUNNING ALL FUNCTIONS TOOK {time.strftime("%Hh%Mm%Ss", time.gmtime(elapsed))}.') logging.info(f'\nRUNNING ALL FUNCTIONS TOOK {time.strftime("%Hh%Mm%Ss", time.gmtime(elapsed))}.\n')

View File

@ -66,7 +66,7 @@ class TestWorkflow(unittest.TestCase):
@patch('builtins.input', return_value='no') # Mock input() to return 'no' @patch('builtins.input', return_value='no') # Mock input() to return 'no'
def test_workflow(self, mock_input): def test_workflow(self, mock_input):
''' '''
SINGLE-PERSON and MULTI-PERSON: SINGLE-PERSON, MULTI-PERSON, BATCH PROCESSING:
- calibration - calibration
- pose estimation - pose estimation
- synchronization - synchronization
@ -74,6 +74,7 @@ class TestWorkflow(unittest.TestCase):
- triangulation - triangulation
- filtering - filtering
- marker augmentation - marker augmentation
- run all
N.B.: Calibration from scene dimensions is not tested, as it requires the N.B.: Calibration from scene dimensions is not tested, as it requires the
user to click points on the image. user to click points on the image.
@ -86,9 +87,9 @@ class TestWorkflow(unittest.TestCase):
''' '''
################## ###################
# SINGLE-PERSON # # SINGLE-PERSON #
################## ###################
project_dir = '../Demo_SinglePerson' project_dir = '../Demo_SinglePerson'
config_dict = toml.load(os.path.join(project_dir, 'Config.toml')) config_dict = toml.load(os.path.join(project_dir, 'Config.toml'))
@ -110,10 +111,13 @@ class TestWorkflow(unittest.TestCase):
Pose2Sim.markerAugmentation(config_dict) Pose2Sim.markerAugmentation(config_dict)
# Pose2Sim.kinematics(config_dict) # Pose2Sim.kinematics(config_dict)
config_dict.get("pose").update({"overwrite_pose":False})
Pose2Sim.runAll(config_dict)
##################
####################
# MULTI-PERSON # # MULTI-PERSON #
################## ####################
project_dir = '../Demo_MultiPerson' project_dir = '../Demo_MultiPerson'
config_dict = toml.load(os.path.join(project_dir, 'Config.toml')) config_dict = toml.load(os.path.join(project_dir, 'Config.toml'))
@ -134,6 +138,19 @@ class TestWorkflow(unittest.TestCase):
Pose2Sim.markerAugmentation(config_dict) Pose2Sim.markerAugmentation(config_dict)
# Pose2Sim.kinematics(config_dict) # Pose2Sim.kinematics(config_dict)
config_dict.get("pose").update({"overwrite_pose":False})
Pose2Sim.runAll(config_dict)
####################
# BATCH PROCESSING #
####################
project_dir = '../Demo_Batch'
os.chdir(project_dir)
Pose2Sim.runAll()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()

View File

@ -266,7 +266,7 @@ def synchronize_cams_all(config_dict):
# Warning if multi_person # Warning if multi_person
if multi_person: if multi_person:
logging.warning('\nYou set your project as a multi-person one: make sure you set `approx_time_maxspeed` and `time_range_around_maxspeed` at times where one single persons are in the scene, you you may get inaccurate results.') logging.warning('\nYou set your project as a multi-person one: make sure you set `approx_time_maxspeed` and `time_range_around_maxspeed` at times where one single person is in the scene, or you may get inaccurate results.')
do_synchro = input('Do you want to continue? (y/n)') do_synchro = input('Do you want to continue? (y/n)')
if do_synchro.lower() not in ["y","yes"]: if do_synchro.lower() not in ["y","yes"]:
logging.warning('Synchronization cancelled.') logging.warning('Synchronization cancelled.')

151
README.md
View File

@ -18,7 +18,7 @@
> **_News_: Version 0.9:**\ > **_News_: Version 0.9:**\
> **Pose estimation with RTMPose is now included in Pose2Sim!**\ > **Pose estimation with RTMPose is now included in Pose2Sim!**\
> **Other recently added features**: Automatic camera synchronization, multi-person analysis, Blender visualization, Marker augmentation. > **Other recently added features**: Automatic camera synchronization, multi-person analysis, Blender visualization, Marker augmentation, Batch processing.
<!-- Incidentally, right/left limb swapping is now handled, which is useful if few cameras are used;\ <!-- Incidentally, right/left limb swapping is now handled, which is useful if few cameras are used;\
and lens distortions are better taken into account.\ --> and lens distortions are better taken into account.\ -->
> To upgrade, type `pip install pose2sim --upgrade` (note that you need Python 3.9 or higher). > To upgrade, type `pip install pose2sim --upgrade` (note that you need Python 3.9 or higher).
@ -65,9 +65,7 @@ If you can only use one single camera and don't mind losing some accuracy, pleas
5. [Demonstration Part-4 (optional): Try multi-person analysis](#demonstration-part-4-optional-try-multi-person-analysis) 5. [Demonstration Part-4 (optional): Try multi-person analysis](#demonstration-part-4-optional-try-multi-person-analysis)
6. [Demonstration Part-5 (optional): Try batch processing](#demonstration-part-5-optional-try-batch-processing) 6. [Demonstration Part-5 (optional): Try batch processing](#demonstration-part-5-optional-try-batch-processing)
2. [Use on your own data](#use-on-your-own-data) 2. [Use on your own data](#use-on-your-own-data)
1. [Setting your project up](#setting-your-project-up) 1. [Setting up your project](#setting-up-your-project)
1. [Retrieve the folder structure](#retrieve-the-folder-structure)
2. [Single Trial vs. Batch processing](#single-trial-vs-batch-processing)
2. [2D pose estimation](#2d-pose-estimation) 2. [2D pose estimation](#2d-pose-estimation)
1. [With RTMPose (default)](#with-rtmpose-default) 1. [With RTMPose (default)](#with-rtmpose-default)
2. [With MMPose (coming soon)](#with-mmpose-coming-soon) 2. [With MMPose (coming soon)](#with-mmpose-coming-soon)
@ -167,12 +165,19 @@ Pose2Sim.markerAugmentation()
``` ```
3D results are stored as .trc files in each trial folder in the `pose-3d` directory. 3D results are stored as .trc files in each trial folder in the `pose-3d` directory.
*N.B.:* Default parameters have been provided in [Config.toml](https://github.com/perfanalytics/pose2sim/blob/main/Pose2Sim/Demo_SinglePerson/Config.toml) but can be edited.
</br> </br>
__*GO FURTHER:*__\ **Note:**
Try the calibration tool by changing `calibration_type` to `calculate` instead of `convert` in [Config.toml](https://github.com/perfanalytics/pose2sim/blob/main/Pose2Sim/Demo_SinglePerson/Config.toml) (more info [there](#calculate-from-scratch)). - Default parameters have been provided in [Config.toml](https://github.com/perfanalytics/pose2sim/blob/main/Pose2Sim/Demo_SinglePerson/Config.toml) but can be edited.
- You can run all stages at once:
``` python
from Pose2Sim import Pose2Sim
Pose2Sim.runAll(do_calibration=True, do_poseEstimation=True, do_synchronization=True, do_personAssociation=True, do_triangulation=True, do_filtering=True, do_markerAugmentation=True, do_opensimProcessing=True)
```
- Try the calibration tool by changing `calibration_type` to `calculate` instead of `convert` in [Config.toml](https://github.com/perfanalytics/pose2sim/blob/main/Pose2Sim/Demo_SinglePerson/Config.toml) (more info [there](#calculate-from-scratch)).
</br>
<br/> <br/>
@ -225,14 +230,13 @@ https://github.com/perfanalytics/pose2sim/assets/54667644/5d7c858f-7e46-40c1-928
Go to the Multi-participant Demo folder: `cd <path>\Pose2Sim\Demo_MultiPerson`. \ Go to the Multi-participant Demo folder: `cd <path>\Pose2Sim\Demo_MultiPerson`. \
Type `ipython`, and try the following code: Type `ipython`, and try the following code:
``` python ``` python
from Pose2Sim import Pose2Sim from Pose2Sim import Pose2Sim
Pose2Sim.personAssociation() Pose2Sim.runAll(do_synchronization=False) # Synchronization possible, but tricky with multiple persons
Pose2Sim.triangulation()
Pose2Sim.filtering()
Pose2Sim.markerAugmentation()
``` ```
One .trc file per participant will be generated and stored in the `pose-3d` directory.\ One .trc file per participant will be generated and stored in the `pose-3d` directory.\
You can then run OpenSim scaling and inverse kinematics for each resulting .trc file as in [Demonstration Part-2](#demonstration-part-2-obtain-3d-joint-angles-with-opensim).\ You can then run OpenSim scaling and inverse kinematics for each resulting .trc file as in [Demonstration Part-2](#demonstration-part-2-obtain-3d-joint-angles-with-opensim).\
You can also visualize your results with Blender as in [Demonstration Part-3](#demonstration-part-3-optional-visualize-your-results-with-blender). You can also visualize your results with Blender as in [Demonstration Part-3](#demonstration-part-3-optional-visualize-your-results-with-blender).
@ -241,67 +245,48 @@ You can also visualize your results with Blender as in [Demonstration Part-3](#d
Set *[triangulation]* `reorder_trc = true` if you need to run OpenSim and to match the generated .trc files with the static trials.\ Set *[triangulation]* `reorder_trc = true` if you need to run OpenSim and to match the generated .trc files with the static trials.\
Make sure that the order of *[markerAugmentation]* `participant_height` and `participant_mass` matches the order of the static trials. Make sure that the order of *[markerAugmentation]* `participant_height` and `participant_mass` matches the order of the static trials.
*N.B.:* Note that in the case of our floating ghost participant, marker augmentation may worsen the results. See [Marker augmentation](#marker-augmentation) for instruction on when and when not to use it. <br/>
## Demonstration Part-5 (optional): Try batch processing
> _**Run numerous analysis with different parameters and minimal friction.**_
Go to the Batch Demo folder: `cd <path>\Pose2Sim\Demo_Batch`. \
Type `ipython`, and try the following code:
``` python
from Pose2Sim import Pose2Sim
Pose2Sim.runAll()
```
The batch processing structure requires a `Config.toml` file in each of the trial directories. Global parameters are given in the `Config.toml` file of the `BatchSession` folder. They can be altered for specific or `Trials` by uncommenting keys and their values in their respective `Config.toml` files.
Run Pose2Sim from the `BatchSession` folder if you want to batch process the whole session, or from a `Trial` folder if you want to process only a specific trial.
| SingleTrial | BatchSession |
|-----------------|--------------------|
| <pre><b>SingleTrial</b> <br>├── <b>calibration</b><br>├── <b>videos</b><br>└── <i><b>Config.toml</i></b></pre> | <pre><b>BatchSession</b> <br>├── <b>calibration</b> <br>├── Trial_1 <br>│ ├── <b>videos</b> <br>│ └── <i><b>Config.toml</i></b><br>├── Trial_2 <br>│ ├── <b>videos</b> <br>│ └── <i><b>Config.toml</i></b><br>└── <i><b>Config.toml</i></b></pre> |
For example, try uncommenting `[project]` and set `frame_range = [10,99]`, or uncomment `[pose]` and set `mode = 'lightweight'` in the `Config.toml` file of `Trial_1`.
</br></br> </br></br>
# Use on your own data # Use on your own data
> _**Deeper explanations and instructions are given below.**_ \ > **N.B.: If a step is not relevant for your use case (synchronization, person association, marker augmentation...), you can skip it.**
> N.B.: If a step is not relevant for your use case (synchronization, person association, marker augmentation...), you can skip it.
</br> ## Setting up your project
## Setting your project up
> _**Get ready for automatic batch processing.**_ > _**Get ready for automatic batch processing.**_
### Retrieve the folder structure
1. Open a terminal, enter `pip show pose2sim`, report package location. \ 1. Open a terminal, enter `pip show pose2sim`, report package location. \
Copy this path and do `cd <path>\pose2sim`. Copy this path and do `cd <path>\pose2sim`.
2. Copy the *Demo_SinglePerson* or *Demo_MultiPerson* folder wherever you like, and rename it as you wish. 2. Copy-paste the *Demo_SinglePerson*, *Demo_MultiPerson*, or *Demo_Batch* folder wherever you like, and rename it as you wish.
3. The rest of the tutorial will explain to you how to populate the `Calibration` and `videos` folders, edit the [Config.toml](https://github.com/perfanalytics/pose2sim/blob/main/Pose2Sim/Demo_SinglePerson/Config.toml) files, and run each Pose2Sim step. 3. The rest of the tutorial will explain to you how to populate the `Calibration` and `videos` folders, edit the [Config.toml](https://github.com/perfanalytics/pose2sim/blob/main/Pose2Sim/Demo_SinglePerson/Config.toml) files, and run each Pose2Sim step.
</br> </br>
### Single Trial vs. Batch processing
> _**Copy and edit either the [Demo_SinglePerson](https://github.com/perfanalytics/pose2sim/blob/main/Pose2Sim/Demo_SinglePerson) folder or the [S00_Demo_BatchSession](https://github.com/perfanalytics/pose2sim/blob/main/Pose2Sim/S00_Demo_BatchSession) one.**_
> - Single trial is more straight-forward to set up for isolated experiments
> - Batch processing allows you to run numerous analysis with different parameters and minimal friction
#### Single trial
The single trial folder should contain a `Config.toml` file, a `calibration` folder, and a `pose` folder, the latter including one subfolder for each camera.
<pre>
SingleTrial \
├── calibration \
├── pose \
└── <i><b>Config.toml</i></b>
</pre>
#### Batch processing
For batch processing, each session directory should follow a `Session -> Participant -> Trial` structure, with a `Config.toml` file in each of the directory levels.
<pre>
Session_s1 \ <i><b>Config.toml</i></b>
├── Calibration\
└── Participant_p1 \ <i><b>Config.toml</i></b>
└── Trial_t1 \ <i><b>Config.toml</i></b>
└── pose \
</pre>
Run Pose2Sim from the `Session` folder if you want to batch process the whole session, from the `Participant` folder if you want to batch process all the trials of a participant, or from the `Trial` folder if you want to process a single trial. There should be one `Calibration` folder per session.
Global parameters are given in the `Config.toml` file of the `Session` folder, and can be altered for specific `Participants` or `Trials` by uncommenting keys and their values in their respective Config.toml files.\
Try uncommenting `[project]` and set `frame_range = [10,300]` for a Participant for example, or uncomment `[filtering.butterworth]` and set `cut_off_frequency = 10` for a Trial.
</br>
## 2D pose estimation ## 2D pose estimation
> _**Estimate 2D pose from images with RTMPose or another pose estimation solution.**_ > _**Estimate 2D pose from images with RTMPose or another pose estimation solution.**_
@ -318,6 +303,10 @@ from Pose2Sim import Pose2Sim
Pose2Sim.poseEstimation() Pose2Sim.poseEstimation()
``` ```
<img src="Content/P2S_poseestimation.png" width="760">
</br>
*N.B.:* The `GPU` will be used with ONNX backend if a valid CUDA installation is found (or MPS with MacOS), otherwise the `CPU` will be used with OpenVINO backend.\ *N.B.:* The `GPU` will be used with ONNX backend if a valid CUDA installation is found (or MPS with MacOS), otherwise the `CPU` will be used with OpenVINO backend.\
*N.B.:* Pose estimation can be run in `lightweight`, `balanced`, or `performance` mode.\ *N.B.:* Pose estimation can be run in `lightweight`, `balanced`, or `performance` mode.\
*N.B.:* Pose estimation can be dramatically sped up by increasing the value of `det_frequency`. In that case, the detection is only done every `det_frequency` frames, and bounding boxes are tracked inbetween (keypoint detection is still performed on all frames).\ *N.B.:* Pose estimation can be dramatically sped up by increasing the value of `det_frequency`. In that case, the detection is only done every `det_frequency` frames, and bounding boxes are tracked inbetween (keypoint detection is still performed on all frames).\
@ -407,8 +396,12 @@ from Pose2Sim import Pose2Sim
Pose2Sim.calibration() Pose2Sim.calibration()
``` ```
Output:\
<img src="Content/Calib2D.png" width="760"> <img src="Content/P2S_calibration.png" width="760">
</br>
Output file:
<img src="Content/CalibFile.png" width="760"> <img src="Content/CalibFile.png" width="760">
@ -508,10 +501,19 @@ from Pose2Sim import Pose2Sim
Pose2Sim.synchronization() Pose2Sim.synchronization()
``` ```
<img src="Content/P2S_synchronization.png" width="760">
</br>
For each camera, this computes mean vertical speed for the chosen keypoints, and finds the time offset for which their correlation is highest.\ For each camera, this computes mean vertical speed for the chosen keypoints, and finds the time offset for which their correlation is highest.\
All keypoints can be taken into account, or a subset of them. The user can also specify a time for each camera when only one participant is in the scene, preferably performing a clear vertical motion. All keypoints can be taken into account, or a subset of them. The user can also specify a time for each camera when only one participant is in the scene, preferably performing a clear vertical motion.
*N.B.:* Works best when only one participant is in the scene, at a roughly equal distance from all cameras and when the capture is at least 5-10 seconds long. <img src="Content/synchro.jpg" width="760">
*N.B.:* Works best when:
- only one participant is in the scene (set `approx_time_maxspeed` and `time_range_around_maxspeed` accordingly)
- the participant is at a roughly equal distance from all cameras
- when the capture is at least 5 seconds long
*N.B.:* Alternatively, use a flashlight, a clap, or a clear event to synchronize cameras. GoPro cameras can also be synchronized with a timecode, by GPS (outdoors) or with their app (slightly less reliable). *N.B.:* Alternatively, use a flashlight, a clap, or a clear event to synchronize cameras. GoPro cameras can also be synchronized with a timecode, by GPS (outdoors) or with their app (slightly less reliable).
@ -521,8 +523,9 @@ All keypoints can be taken into account, or a subset of them. The user can also
### Associate persons across cameras ### Associate persons across cameras
> _**If `multi_person` is set to `false`, the algorithm chooses the person for whom the reprojection error is smallest.\ > _**If `multi_person` is set to `false`, the algorithm chooses the person for whom the reprojection error is smallest.\
If `multi_person` is set to `true`, it associates across views the people for whom the distances between epipolar lines are the smallest. People are then associated across frames according to their displacement speed.**_ \ If `multi_person` is set to `true`, it associates across views the people for whom the distances between epipolar lines are the smallest. People are then associated across frames according to their displacement speed.**_
***N.B.:** Skip this step if only one person is in the field of view.*
> ***N.B.:** Skip this step if only one person is in the field of view.*
Open an Anaconda prompt or a terminal in a `Session`, `Participant`, or `Trial` folder.\ Open an Anaconda prompt or a terminal in a `Session`, `Participant`, or `Trial` folder.\
Type `ipython`. Type `ipython`.
@ -531,10 +534,11 @@ from Pose2Sim import Pose2Sim
Pose2Sim.personAssociation() Pose2Sim.personAssociation()
``` ```
Check printed output. If results are not satisfying, try and release the constraints in the [Config.toml](https://github.com/perfanalytics/pose2sim/blob/main/Pose2Sim/S00_Demo_Session/Config.toml) file. <img src="Content/P2S_personassociation.png" width="760">
Output:\ </br>
<img src="Content/Track2D.png" width="760">
Check printed output. If results are not satisfying, try and release the constraints in the [Config.toml](https://github.com/perfanalytics/pose2sim/blob/main/Pose2Sim/S00_Demo_Session/Config.toml) file.
</br> </br>
@ -551,12 +555,13 @@ from Pose2Sim import Pose2Sim
Pose2Sim.triangulation() Pose2Sim.triangulation()
``` ```
<img src="Content/P2S_triangulation.png" width="760">
</br>
Check printed output, and visualize your trc in OpenSim: `File -> Preview experimental data`.\ Check printed output, and visualize your trc in OpenSim: `File -> Preview experimental data`.\
If your triangulation is not satisfying, try and release the constraints in the `Config.toml` file. If your triangulation is not satisfying, try and release the constraints in the `Config.toml` file.
Output:\
<img src="Content/Triangulate3D.png" width="760">
</br> </br>
### Filtering 3D coordinates ### Filtering 3D coordinates
@ -571,13 +576,15 @@ from Pose2Sim import Pose2Sim
Pose2Sim.filtering() Pose2Sim.filtering()
``` ```
<img src="Content/P2S_filtering.png" width="760">
</br>
Check your filtration with the displayed figures, and visualize your .trc file in OpenSim. If your filtering is not satisfying, try and change the parameters in the `Config.toml` file. Check your filtration with the displayed figures, and visualize your .trc file in OpenSim. If your filtering is not satisfying, try and change the parameters in the `Config.toml` file.
Output:\ Output:\
<img src="Content/FilterPlot.png" width="760"> <img src="Content/FilterPlot.png" width="760">
<img src="Content/Filter3D.png" width="760">
</br> </br>
### Marker Augmentation ### Marker Augmentation
@ -604,6 +611,8 @@ from Pose2Sim import Pose2Sim
Pose2Sim.markerAugmentation() Pose2Sim.markerAugmentation()
``` ```
<img src="Content/P2S_markeraugmentation.png" width="760">
</br> </br>
## OpenSim kinematics ## OpenSim kinematics