multi-person frame of work works with single person

This commit is contained in:
davidpagnon 2024-02-26 18:13:39 +01:00
parent 6220c1baa1
commit 05ffe7f36f
7 changed files with 189 additions and 179 deletions

View File

@ -421,7 +421,7 @@ def filtering(config=None):
logging.info("\n\n---------------------------------------------------------------------")
logging.info(f"Filtering 3D coordinates for {seq_name}, for {frames}.")
logging.info("---------------------------------------------------------------------")
logging.info(f"\nProject directory: {project_dir}")
logging.info(f"\nProject directory: {project_dir}\n")
filter_all(config_dict)
@ -458,12 +458,12 @@ def markerAugmentation(config=None):
logging.info("\n\n---------------------------------------------------------------------")
logging.info(f"Augmentation process for {seq_name}, for {frames}.")
logging.info("---------------------------------------------------------------------")
logging.info(f"\nProject directory: {project_dir}")
logging.info(f"\nProject directory: {project_dir}\n")
augmentTRC(config_dict)
end = time.time()
logging.info(f'Augmentation took {end - start:.2f} s.')
logging.info(f'\nAugmentation took {end - start:.2f} s.')

View File

@ -287,7 +287,7 @@ def reproj_from_trc_calib_func(**args):
# Create camera folders
reproj_dir = os.path.realpath(output_file_root)
cam_dirs = [os.path.join(reproj_dir, f'cam_{cam+1:02d}_json') for cam in range(len(P_all))]
cam_dirs = [os.path.join(reproj_dir, f'cam{cam+1:02d}_json') for cam in range(len(P_all))]
if not os.path.exists(reproj_dir): os.mkdir(reproj_dir)
try:
[os.mkdir(cam_dir) for cam_dir in cam_dirs]

View File

@ -24,6 +24,7 @@
## INIT
import os
import glob
import fnmatch
import numpy as np
import pandas as pd
@ -426,7 +427,7 @@ def recap_filter3d(config, trc_path):
'median': f'--> Filter type: Median. Kernel size: {median_filter_kernel_size}'
}
logging.info(filter_mapping_recap[filter_type])
logging.info(f'Filtered 3D coordinates are stored at {trc_path}.')
logging.info(f'Filtered 3D coordinates are stored at {trc_path}.\n')
def filter_all(config):
@ -444,6 +445,7 @@ def filter_all(config):
# Read config
project_dir = config.get('project').get('project_dir')
single_person = config.get('project').get('single_person')
try:
pose_tracked_dir = os.path.join(project_dir, 'pose-associated')
os.listdir(pose_tracked_dir)
@ -451,11 +453,11 @@ def filter_all(config):
except:
pose_dir = os.path.join(project_dir, 'pose')
frame_range = config.get('project').get('frame_range')
seq_name = os.path.basename(os.path.realpath(project_dir))
pose3d_dir = os.path.realpath(os.path.join(project_dir, 'pose-3d'))
display_figures = config.get('filtering').get('display_figures')
filter_type = config.get('filtering').get('type')
seq_name = os.path.basename(os.path.realpath(project_dir))
# Frames range
pose_listdirs_names = next(os.walk(pose_dir))[1]
json_dirs_names = [k for k in pose_listdirs_names if 'json' in k]
@ -463,35 +465,35 @@ def filter_all(config):
f_range = [[0,min([len(j) for j in json_files_names])] if frame_range==[] else frame_range][0]
# Trc paths
trc_f_in = f'{seq_name}_{f_range[0]}-{f_range[1]}.trc'
trc_f_out = f'{seq_name}_filt_{filter_type}_{f_range[0]}-{f_range[1]}.trc'
trc_path_in = os.path.join(pose3d_dir, trc_f_in)
trc_path_out = os.path.join(pose3d_dir, trc_f_out)
trc_path_in = [file for file in glob.glob(os.path.join(pose3d_dir, '*.trc')) if 'filt' not in file]
trc_f_out = [f'{os.path.basename(t).split(".")[0]}_filt_{filter_type}.trc' for t in trc_path_in]
trc_path_out = [os.path.join(pose3d_dir, t) for t in trc_f_out]
# Read trc header
with open(trc_path_in, 'r') as trc_file:
header = [next(trc_file) for line in range(5)]
for t_in, t_out in zip(trc_path_in, trc_path_out):
# Read trc header
with open(t_in, 'r') as trc_file:
header = [next(trc_file) for line in range(5)]
# Read trc coordinates values
trc_df = pd.read_csv(trc_path_in, sep="\t", skiprows=4)
frames_col, time_col = trc_df.iloc[:,0], trc_df.iloc[:,1]
Q_coord = trc_df.drop(trc_df.columns[[0, 1]], axis=1)
# Read trc coordinates values
trc_df = pd.read_csv(t_in, sep="\t", skiprows=4)
frames_col, time_col = trc_df.iloc[:,0], trc_df.iloc[:,1]
Q_coord = trc_df.drop(trc_df.columns[[0, 1]], axis=1)
# Filter coordinates
Q_filt = Q_coord.apply(filter1d, axis=0, args = [config, filter_type])
# Filter coordinates
Q_filt = Q_coord.apply(filter1d, axis=0, args = [config, filter_type])
# Display figures
if display_figures:
# Retrieve keypoints
keypoints_names = pd.read_csv(trc_path_in, sep="\t", skiprows=3, nrows=0).columns[2::3].to_numpy()
display_figures_fun(Q_coord, Q_filt, time_col, keypoints_names)
# Display figures
if display_figures:
# Retrieve keypoints
keypoints_names = pd.read_csv(t_in, sep="\t", skiprows=3, nrows=0).columns[2::3].to_numpy()
display_figures_fun(Q_coord, Q_filt, time_col, keypoints_names)
# Reconstruct trc file with filtered coordinates
with open(trc_path_out, 'w') as trc_o:
[trc_o.write(line) for line in header]
Q_filt.insert(0, 'Frame#', frames_col)
Q_filt.insert(1, 'Time', time_col)
Q_filt.to_csv(trc_o, sep='\t', index=False, header=None, lineterminator='\n')
# Reconstruct trc file with filtered coordinates
with open(t_out, 'w') as trc_o:
[trc_o.write(line) for line in header]
Q_filt.insert(0, 'Frame#', frames_col)
Q_filt.insert(1, 'Time', time_col)
Q_filt.to_csv(trc_o, sep='\t', index=False, header=None, lineterminator='\n')
# Recap
recap_filter3d(config, trc_path_out)
# Recap
recap_filter3d(config, t_out)

View File

@ -70,9 +70,12 @@ def augmentTRC(config_dict):
pathOutputTRCFile = os.path.realpath(os.path.join(project_dir, 'pose-3d'))
pose_model = config_dict.get('pose').get('pose_model')
subject_height = config_dict.get('markerAugmentation').get('participant_height')
if subject_height is None or subject_height == 0:
if subject_height is None or subject_height == 0 or subject_height==0:
raise ValueError("Subject height is not set or invalid in the config file.")
subject_mass = config_dict.get('markerAugmentation').get('participant_mass')
if not type(subject_height) == list:
subject_height = [subject_height]
subject_mass = [subject_mass]
augmenterDir = os.path.join(session_dir, '..', '..', 'MarkerAugmenter')
augmenterModelName = 'LSTM'
augmenter_model = 'v0.3'
@ -82,151 +85,154 @@ def augmentTRC(config_dict):
raise ValueError('Marker augmentation is only supported with OpenPose BODY_25 and BODY_25B models.')
# Apply all trc files
trc_files = [f for f in glob.glob(os.path.join(pathInputTRCFile, '*.trc')) if '_LSTM' not in f]
for pathInputTRCFile in trc_files:
trc_files = [f for f in glob.glob(os.path.join(pathInputTRCFile, '*.trc')) if 'filt' in f and '_LSTM' not in f]
for p, pathInputTRCFile in enumerate(trc_files):
pathOutputTRCFile = os.path.splitext(pathInputTRCFile)[0] + '_LSTM.trc'
# This is by default - might need to be adjusted in the future.
featureHeight = True
featureWeight = True
# Augmenter types
if augmenter_model == 'v0.3':
# Lower body
augmenterModelType_lower = '{}_lower'.format(augmenter_model)
from Pose2Sim.MarkerAugmenter.utils import getOpenPoseMarkers_lowerExtremity2
feature_markers_lower, response_markers_lower = getOpenPoseMarkers_lowerExtremity2()
# Upper body
augmenterModelType_upper = '{}_upper'.format(augmenter_model)
from Pose2Sim.MarkerAugmenter.utils import getMarkers_upperExtremity_noPelvis2
feature_markers_upper, response_markers_upper = getMarkers_upperExtremity_noPelvis2()
augmenterModelType_all = [augmenterModelType_lower, augmenterModelType_upper]
feature_markers_all = [feature_markers_lower, feature_markers_upper]
response_markers_all = [response_markers_lower, response_markers_upper]
else:
raise ValueError('Augmenter models other than 0.3 are not supported.')
logging.info('Using augmenter model: {}'.format(augmenter_model))
# %% Process data.
# Import TRC file
trc_file = utilsDataman.TRCFile(pathInputTRCFile)
# Loop over augmenter types to handle separate augmenters for lower and
# upper bodies.
outputs_all = {}
n_response_markers_all = 0
for idx_augm, augmenterModelType in enumerate(augmenterModelType_all):
outputs_all[idx_augm] = {}
feature_markers = feature_markers_all[idx_augm]
response_markers = response_markers_all[idx_augm]
# This is by default - might need to be adjusted in the future.
featureHeight = True
featureWeight = True
augmenterModelDir = os.path.join(augmenterDir, augmenterModelName,
augmenterModelType)
# Augmenter types
if augmenter_model == 'v0.3':
# Lower body
augmenterModelType_lower = '{}_lower'.format(augmenter_model)
from Pose2Sim.MarkerAugmenter.utils import getOpenPoseMarkers_lowerExtremity2
feature_markers_lower, response_markers_lower = getOpenPoseMarkers_lowerExtremity2()
# Upper body
augmenterModelType_upper = '{}_upper'.format(augmenter_model)
from Pose2Sim.MarkerAugmenter.utils import getMarkers_upperExtremity_noPelvis2
feature_markers_upper, response_markers_upper = getMarkers_upperExtremity_noPelvis2()
augmenterModelType_all = [augmenterModelType_lower, augmenterModelType_upper]
feature_markers_all = [feature_markers_lower, feature_markers_upper]
response_markers_all = [response_markers_lower, response_markers_upper]
else:
raise ValueError('Augmenter models other than 0.3 are not supported.')
logging.info('Using Stanford augmenter model: {}'.format(augmenter_model))
# %% Pre-process inputs.
# Step 1: import .trc file with OpenPose marker trajectories.
trc_data = TRC2numpy(pathInputTRCFile, feature_markers)
# %% Process data.
# Import TRC file
try:
trc_file = utilsDataman.TRCFile(pathInputTRCFile)
except:
raise ValueError('Cannot read TRC file. You may need to enable interpolation in Config.toml while triangulating.')
# Loop over augmenter types to handle separate augmenters for lower and
# upper bodies.
outputs_all = {}
n_response_markers_all = 0
for idx_augm, augmenterModelType in enumerate(augmenterModelType_all):
outputs_all[idx_augm] = {}
feature_markers = feature_markers_all[idx_augm]
response_markers = response_markers_all[idx_augm]
augmenterModelDir = os.path.join(augmenterDir, augmenterModelName,
augmenterModelType)
# %% Pre-process inputs.
# Step 1: import .trc file with OpenPose marker trajectories.
trc_data = TRC2numpy(pathInputTRCFile, feature_markers)
# Calculate the midHip marker as the average of RHip and LHip
midhip_data = get_midhip_data(trc_file)
# Calculate the midHip marker as the average of RHip and LHip
midhip_data = get_midhip_data(trc_file)
trc_data_data = trc_data[:,1:]
trc_data_data = trc_data[:,1:]
# Step 2: Normalize with reference marker position.
with open(os.path.join(augmenterModelDir, "metadata.json"), 'r') as f:
metadata = json.load(f)
referenceMarker_data = midhip_data # instead of trc_file.marker(referenceMarker) # change by HunMin
norm_trc_data_data = np.zeros((trc_data_data.shape[0],
trc_data_data.shape[1]))
for i in range(0,trc_data_data.shape[1],3):
norm_trc_data_data[:,i:i+3] = (trc_data_data[:,i:i+3] -
referenceMarker_data)
# Step 2: Normalize with reference marker position.
with open(os.path.join(augmenterModelDir, "metadata.json"), 'r') as f:
metadata = json.load(f)
referenceMarker_data = midhip_data # instead of trc_file.marker(referenceMarker) # change by HunMin
norm_trc_data_data = np.zeros((trc_data_data.shape[0],
trc_data_data.shape[1]))
for i in range(0,trc_data_data.shape[1],3):
norm_trc_data_data[:,i:i+3] = (trc_data_data[:,i:i+3] -
referenceMarker_data)
# Step 3: Normalize with subject's height.
norm2_trc_data_data = copy.deepcopy(norm_trc_data_data)
norm2_trc_data_data = norm2_trc_data_data / subject_height[p]
# Step 4: Add remaining features.
inputs = copy.deepcopy(norm2_trc_data_data)
if featureHeight:
inputs = np.concatenate(
(inputs, subject_height[p]*np.ones((inputs.shape[0],1))), axis=1)
if featureWeight:
inputs = np.concatenate(
(inputs, subject_mass[p]*np.ones((inputs.shape[0],1))), axis=1)
# Step 5: Pre-process data
pathMean = os.path.join(augmenterModelDir, "mean.npy")
pathSTD = os.path.join(augmenterModelDir, "std.npy")
if os.path.isfile(pathMean):
trainFeatures_mean = np.load(pathMean, allow_pickle=True)
inputs -= trainFeatures_mean
if os.path.isfile(pathSTD):
trainFeatures_std = np.load(pathSTD, allow_pickle=True)
inputs /= trainFeatures_std
# Step 6: Reshape inputs if necessary (eg, LSTM)
if augmenterModelName == "LSTM":
inputs = np.reshape(inputs, (1, inputs.shape[0], inputs.shape[1]))
# %% Load model and weights, and predict outputs.
json_file = open(os.path.join(augmenterModelDir, "model.json"), 'r')
pretrainedModel_json = json_file.read()
json_file.close()
model = tf.keras.models.model_from_json(pretrainedModel_json)
model.load_weights(os.path.join(augmenterModelDir, "weights.h5"))
outputs = model.predict(inputs)
# Step 3: Normalize with subject's height.
norm2_trc_data_data = copy.deepcopy(norm_trc_data_data)
norm2_trc_data_data = norm2_trc_data_data / subject_height
# Step 4: Add remaining features.
inputs = copy.deepcopy(norm2_trc_data_data)
if featureHeight:
inputs = np.concatenate(
(inputs, subject_height*np.ones((inputs.shape[0],1))), axis=1)
if featureWeight:
inputs = np.concatenate(
(inputs, subject_mass*np.ones((inputs.shape[0],1))), axis=1)
# %% Post-process outputs.
# Step 1: Reshape if necessary (eg, LSTM)
if augmenterModelName == "LSTM":
outputs = np.reshape(outputs, (outputs.shape[1], outputs.shape[2]))
# Step 2: Un-normalize with subject's height.
unnorm_outputs = outputs * subject_height[p]
# Step 5: Pre-process data
pathMean = os.path.join(augmenterModelDir, "mean.npy")
pathSTD = os.path.join(augmenterModelDir, "std.npy")
if os.path.isfile(pathMean):
trainFeatures_mean = np.load(pathMean, allow_pickle=True)
inputs -= trainFeatures_mean
if os.path.isfile(pathSTD):
trainFeatures_std = np.load(pathSTD, allow_pickle=True)
inputs /= trainFeatures_std
# Step 2: Un-normalize with reference marker position.
unnorm2_outputs = np.zeros((unnorm_outputs.shape[0],
unnorm_outputs.shape[1]))
for i in range(0,unnorm_outputs.shape[1],3):
unnorm2_outputs[:,i:i+3] = (unnorm_outputs[:,i:i+3] +
referenceMarker_data)
# %% Add markers to .trc file.
for c, marker in enumerate(response_markers):
x = unnorm2_outputs[:,c*3]
y = unnorm2_outputs[:,c*3+1]
z = unnorm2_outputs[:,c*3+2]
trc_file.add_marker(marker, x, y, z)
# %% Gather data for computing minimum y-position.
outputs_all[idx_augm]['response_markers'] = response_markers
outputs_all[idx_augm]['response_data'] = unnorm2_outputs
n_response_markers_all += len(response_markers)
# Step 6: Reshape inputs if necessary (eg, LSTM)
if augmenterModelName == "LSTM":
inputs = np.reshape(inputs, (1, inputs.shape[0], inputs.shape[1]))
# %% Extract minimum y-position across response markers. This is used
# to align feet and floor when visualizing.
responses_all_conc = np.zeros((unnorm2_outputs.shape[0],
n_response_markers_all*3))
idx_acc_res = 0
for idx_augm in outputs_all:
idx_acc_res_end = (idx_acc_res +
(len(outputs_all[idx_augm]['response_markers']))*3)
responses_all_conc[:,idx_acc_res:idx_acc_res_end] = (
outputs_all[idx_augm]['response_data'])
idx_acc_res = idx_acc_res_end
# Minimum y-position across response markers.
min_y_pos = np.min(responses_all_conc[:,1::3])
# %% Load model and weights, and predict outputs.
json_file = open(os.path.join(augmenterModelDir, "model.json"), 'r')
pretrainedModel_json = json_file.read()
json_file.close()
model = tf.keras.models.model_from_json(pretrainedModel_json)
model.load_weights(os.path.join(augmenterModelDir, "weights.h5"))
outputs = model.predict(inputs)
# %% Post-process outputs.
# Step 1: Reshape if necessary (eg, LSTM)
if augmenterModelName == "LSTM":
outputs = np.reshape(outputs, (outputs.shape[1], outputs.shape[2]))
# %% If offset
if offset:
trc_file.offset('y', -(min_y_pos-0.01))
# Step 2: Un-normalize with subject's height.
unnorm_outputs = outputs * subject_height
# Step 2: Un-normalize with reference marker position.
unnorm2_outputs = np.zeros((unnorm_outputs.shape[0],
unnorm_outputs.shape[1]))
for i in range(0,unnorm_outputs.shape[1],3):
unnorm2_outputs[:,i:i+3] = (unnorm_outputs[:,i:i+3] +
referenceMarker_data)
# %% Add markers to .trc file.
for c, marker in enumerate(response_markers):
x = unnorm2_outputs[:,c*3]
y = unnorm2_outputs[:,c*3+1]
z = unnorm2_outputs[:,c*3+2]
trc_file.add_marker(marker, x, y, z)
# %% Gather data for computing minimum y-position.
outputs_all[idx_augm]['response_markers'] = response_markers
outputs_all[idx_augm]['response_data'] = unnorm2_outputs
n_response_markers_all += len(response_markers)
# %% Extract minimum y-position across response markers. This is used
# to align feet and floor when visualizing.
responses_all_conc = np.zeros((unnorm2_outputs.shape[0],
n_response_markers_all*3))
idx_acc_res = 0
for idx_augm in outputs_all:
idx_acc_res_end = (idx_acc_res +
(len(outputs_all[idx_augm]['response_markers']))*3)
responses_all_conc[:,idx_acc_res:idx_acc_res_end] = (
outputs_all[idx_augm]['response_data'])
idx_acc_res = idx_acc_res_end
# Minimum y-position across response markers.
min_y_pos = np.min(responses_all_conc[:,1::3])
# %% If offset
if offset:
trc_file.offset('y', -(min_y_pos-0.01))
# %% Return augmented .trc file
trc_file.write(pathOutputTRCFile)
# %% Return augmented .trc file
trc_file.write(pathOutputTRCFile)
logging.info(f'Augmented marker coordinates are stored at {pathOutputTRCFile}.')
logging.info(f'Augmented marker coordinates are stored at {pathOutputTRCFile}.\n')
return min_y_pos

View File

@ -205,10 +205,10 @@ def best_persons_and_cameras_combination(config, json_files_framef, personsIDs_c
comb_errors_below_thresh += [combinations_with_cams_off[error_comb.index(e)] for e in error_comb if e<error_threshold_tracking]
Q_kpt += [Q_comb[error_comb.index(e)] for e in error_comb if e<error_threshold_tracking]
print('\n', personsIDs_combinations)
print(errors_below_thresh)
print(comb_errors_below_thresh)
print(Q_kpt)
# print('\n', personsIDs_combinations)
# print(errors_below_thresh)
# print(comb_errors_below_thresh)
# print(Q_kpt)
if not single_person:
# Remove indices already used for a person
personsIDs_combinations = np.array([personsIDs_combinations[i] for i in range(len(personsIDs_combinations))
@ -340,7 +340,7 @@ def track_2d_all(config):
and {n_cams} cameras based on the number of pose folders.')
for f in tqdm(range(*f_range)):
print(f'\nFrame {f}:')
# print(f'\nFrame {f}:')
json_files_f = [json_files[c][f] for c in range(n_cams)]
json_tracked_files_f = [json_tracked_files[c][f] for c in range(n_cams)]
@ -355,7 +355,7 @@ def track_2d_all(config):
error_min_tot.append(np.mean(errors_below_thresh))
cameras_off_count = np.count_nonzero([np.isnan(comb) for comb in comb_errors_below_thresh]) / len(comb_errors_below_thresh)
print(cameras_off_count)
# print(cameras_off_count)
cameras_off_tot.append(cameras_off_count)
# rewrite json files with a single or multiple persons of interest

View File

@ -126,10 +126,11 @@ def make_trc(config, Q, keypoints_names, f_range, id_person=-1):
# Read config
project_dir = config.get('project').get('project_dir')
frame_rate = config.get('project').get('frame_rate')
if id_person == -1:
single_person = config.get('project').get('single_person')
if single_person:
seq_name = f'{os.path.basename(os.path.realpath(project_dir))}'
else:
seq_name = f'{os.path.basename(os.path.realpath(project_dir))}_Participant{id_person+1}'
seq_name = f'{os.path.basename(os.path.realpath(project_dir))}_P{id_person+1}'
pose3d_dir = os.path.join(project_dir, 'pose-3d')
trc_f = f'{seq_name}_{f_range[0]}-{f_range[1]}.trc'
@ -716,7 +717,7 @@ def triangulate_all(config):
# Interpolate missing values
if interpolation_kind != 'none':
for n in range(nb_persons_to_detect):
Q_tot[n].apply(interpolate_zeros_nans, axis=0, args = [interp_gap_smaller_than, interpolation_kind])
Q_tot[n] = Q_tot[n].apply(interpolate_zeros_nans, axis=0, args = [interp_gap_smaller_than, interpolation_kind])
# Q_tot.replace(np.nan, 0, inplace=True)
# Create TRC file

View File

@ -16,8 +16,9 @@
##### N.B:. Please set undistort_points and handle_LR_swap to false for now since it currently leads to inaccuracies. I'll try to fix it soon.
> **_News_: Version 0.6:**\
> **Batch processing, Marker augmentation, and Blender visualizer now released!**
> **_News_: Version 0.7:**\
> **Multi-person analysis is now supported!** Team or combat sports can now take advantage of Pose2Sim.\
> Other recent releases: Automatic batch processing, Marker augmentation, Blender visualizer.
<!-- Incidentally, right/left limb swapping is now handled, which is useful if few cameras are used;\
and lens distortions are better taken into account.\ -->
> To upgrade, type `pip install pose2sim --upgrade`.
@ -45,9 +46,9 @@ If you can only use one single camera and don't mind losing some accuracy, pleas
- [x] v0.3: Supported other pose estimation algorithms
- [x] v0.4: New calibration tool based on scene measurements
- [x] v0.5: Automatic batch processing
- [x] v0.6: **Marker augmentation, Blender visualizer**
- [ ] v0.7: Calibration based on keypoint detection, Handling left/right swaps, Correcting lens distortions
- [ ] v0.8: Supports multi-person analysis
- [x] v0.6: Marker augmentation, Blender visualizer
- [ ] **v0.7: Multi-person analysis**
- [ ] v0.8: Calibration based on keypoint detection, Handling left/right swaps, Correcting lens distortions
- [ ] v0.9: New synchronization tool
- [ ] v0.10: Graphical User Interface
- [ ] v1.0: First accomplished release