small improvements on multi-person detection
This commit is contained in:
parent
757673d01c
commit
66df6bbd7c
@ -118,7 +118,7 @@ def json_display_without_img_func(**args):
|
|||||||
scat.set_offsets(np.c_[X[frame], image_height-Y[frame]])
|
scat.set_offsets(np.c_[X[frame], image_height-Y[frame]])
|
||||||
scat.set_array(CONF[frame])
|
scat.set_array(CONF[frame])
|
||||||
if save == True or save=='True' or save == '1':
|
if save == True or save=='True' or save == '1':
|
||||||
output_name = os.path.join(output_img_folder, f'{os.path.basename(output_img_folder)}_{frame}.png')
|
output_name = os.path.join(output_img_folder, f'{os.path.basename(output_img_folder)}_{str(frame).zfill(5)}.png')
|
||||||
plt.savefig(output_name)
|
plt.savefig(output_name)
|
||||||
return scat,
|
return scat,
|
||||||
|
|
||||||
|
@ -319,14 +319,15 @@ def read_intrinsic_yml(intrinsic_path):
|
|||||||
N.B. : Size is calculated as twice the position of the optical center. Please correct in the .toml file if needed.
|
N.B. : Size is calculated as twice the position of the optical center. Please correct in the .toml file if needed.
|
||||||
'''
|
'''
|
||||||
intrinsic_yml = cv2.FileStorage(intrinsic_path, cv2.FILE_STORAGE_READ)
|
intrinsic_yml = cv2.FileStorage(intrinsic_path, cv2.FILE_STORAGE_READ)
|
||||||
N = intrinsic_yml.getNode('names').size()
|
cam_number = intrinsic_yml.getNode('names').size()
|
||||||
S, D, K = [], [], []
|
N, S, D, K = [], [], [], []
|
||||||
for i in range(N):
|
for i in range(cam_number):
|
||||||
name = intrinsic_yml.getNode('names').at(i).string()
|
name = intrinsic_yml.getNode('names').at(i).string()
|
||||||
|
N.append(name)
|
||||||
K.append(intrinsic_yml.getNode(f'K_{name}').mat())
|
K.append(intrinsic_yml.getNode(f'K_{name}').mat())
|
||||||
D.append(intrinsic_yml.getNode(f'dist_{name}').mat().flatten()[:-1])
|
D.append(intrinsic_yml.getNode(f'dist_{name}').mat().flatten()[:-1])
|
||||||
S.append([K[i][0,2]*2, K[i][1,2]*2])
|
S.append([K[i][0,2]*2, K[i][1,2]*2])
|
||||||
return S, K, D
|
return N, S, K, D
|
||||||
|
|
||||||
|
|
||||||
def read_extrinsic_yml(extrinsic_path):
|
def read_extrinsic_yml(extrinsic_path):
|
||||||
@ -337,13 +338,14 @@ def read_extrinsic_yml(extrinsic_path):
|
|||||||
- T (extrinsic translation)
|
- T (extrinsic translation)
|
||||||
'''
|
'''
|
||||||
extrinsic_yml = cv2.FileStorage(extrinsic_path, cv2.FILE_STORAGE_READ)
|
extrinsic_yml = cv2.FileStorage(extrinsic_path, cv2.FILE_STORAGE_READ)
|
||||||
N = extrinsic_yml.getNode('names').size()
|
cam_number = extrinsic_yml.getNode('names').size()
|
||||||
R, T = [], []
|
N, R, T = [], [], []
|
||||||
for i in range(N):
|
for i in range(cam_number):
|
||||||
name = extrinsic_yml.getNode('names').at(i).string()
|
name = extrinsic_yml.getNode('names').at(i).string()
|
||||||
|
N.append(name)
|
||||||
R.append(extrinsic_yml.getNode(f'R_{name}').mat().flatten()) # R_1 pour Rodrigues, Rot_1 pour matrice
|
R.append(extrinsic_yml.getNode(f'R_{name}').mat().flatten()) # R_1 pour Rodrigues, Rot_1 pour matrice
|
||||||
T.append(extrinsic_yml.getNode(f'T_{name}').mat().flatten())
|
T.append(extrinsic_yml.getNode(f'T_{name}').mat().flatten())
|
||||||
return R, T
|
return N, R, T
|
||||||
|
|
||||||
|
|
||||||
def calib_easymocap_fun(files_to_convert_paths, binning_factor=1):
|
def calib_easymocap_fun(files_to_convert_paths, binning_factor=1):
|
||||||
@ -365,9 +367,8 @@ def calib_easymocap_fun(files_to_convert_paths, binning_factor=1):
|
|||||||
'''
|
'''
|
||||||
|
|
||||||
extrinsic_path, intrinsic_path = files_to_convert_paths
|
extrinsic_path, intrinsic_path = files_to_convert_paths
|
||||||
S, K, D = read_intrinsic_yml(intrinsic_path)
|
C, S, K, D = read_intrinsic_yml(intrinsic_path)
|
||||||
R, T = read_extrinsic_yml(extrinsic_path)
|
_, R, T = read_extrinsic_yml(extrinsic_path)
|
||||||
C = np.array(range(len(S)))
|
|
||||||
ret = [np.nan]*len(C)
|
ret = [np.nan]*len(C)
|
||||||
|
|
||||||
return ret, C, S, D, K, R, T
|
return ret, C, S, D, K, R, T
|
||||||
|
@ -59,6 +59,18 @@ __status__ = "Development"
|
|||||||
|
|
||||||
|
|
||||||
## FUNCTIONS
|
## FUNCTIONS
|
||||||
|
def common_items_in_list(list1, list2):
|
||||||
|
'''
|
||||||
|
Do two lists have any items in common at the same index?
|
||||||
|
Returns True or False
|
||||||
|
'''
|
||||||
|
|
||||||
|
for i, j in enumerate(list1):
|
||||||
|
if j == list2[i]:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
def min_with_single_indices(L, T):
|
def min_with_single_indices(L, T):
|
||||||
'''
|
'''
|
||||||
Let L be a list (size s) with T associated tuple indices (size s).
|
Let L be a list (size s) with T associated tuple indices (size s).
|
||||||
@ -121,6 +133,8 @@ def sort_people(Q_kpt_old, Q_kpt, nb_persons_to_detect):
|
|||||||
'''
|
'''
|
||||||
|
|
||||||
# Generate possible person correspondences across frames
|
# Generate possible person correspondences across frames
|
||||||
|
if len(Q_kpt_old) < len(Q_kpt):
|
||||||
|
Q_kpt_old = np.concatenate((Q_kpt_old, [[0., 0., 0., 1.]]*(len(Q_kpt)-len(Q_kpt_old))))
|
||||||
personsIDs_comb = sorted(list(it.product(range(len(Q_kpt_old)),range(len(Q_kpt)))))
|
personsIDs_comb = sorted(list(it.product(range(len(Q_kpt_old)),range(len(Q_kpt)))))
|
||||||
# Compute distance between persons from one frame to another
|
# Compute distance between persons from one frame to another
|
||||||
frame_by_frame_dist = []
|
frame_by_frame_dist = []
|
||||||
@ -287,6 +301,27 @@ def best_persons_and_cameras_combination(config, json_files_framef, personsIDs_c
|
|||||||
# print(comb_errors_below_thresh)
|
# print(comb_errors_below_thresh)
|
||||||
# print(Q_kpt)
|
# print(Q_kpt)
|
||||||
if multi_person:
|
if multi_person:
|
||||||
|
# sort combinations by error magnitude
|
||||||
|
errors_below_thresh_sorted = sorted(errors_below_thresh)
|
||||||
|
sorted_idx = np.array([errors_below_thresh.index(e) for e in errors_below_thresh_sorted])
|
||||||
|
comb_errors_below_thresh = np.array(comb_errors_below_thresh)[sorted_idx]
|
||||||
|
Q_kpt = np.array(Q_kpt)[sorted_idx]
|
||||||
|
# remove combinations with indices used several times for the same person
|
||||||
|
comb_errors_below_thresh = [c.tolist() for c in comb_errors_below_thresh]
|
||||||
|
comb = comb_errors_below_thresh.copy()
|
||||||
|
comb_ok = np.array([comb[0]])
|
||||||
|
for i, c1 in enumerate(comb):
|
||||||
|
idx_ok = np.array([not(common_items_in_list(c1, c2)) for c2 in comb[1:]])
|
||||||
|
try:
|
||||||
|
comb = np.array(comb[1:])[idx_ok]
|
||||||
|
comb_ok = np.concatenate((comb_ok, [comb[0]]))
|
||||||
|
except:
|
||||||
|
break
|
||||||
|
sorted_pruned_idx = [comb_errors_below_thresh.index(c.tolist()) for c in comb_ok]
|
||||||
|
errors_below_thresh = np.array(errors_below_thresh_sorted)[sorted_pruned_idx]
|
||||||
|
comb_errors_below_thresh = np.array(comb_errors_below_thresh)[sorted_pruned_idx]
|
||||||
|
Q_kpt = Q_kpt[sorted_pruned_idx]
|
||||||
|
|
||||||
# Remove indices already used for a person
|
# Remove indices already used for a person
|
||||||
personsIDs_combinations = np.array([personsIDs_combinations[i] for i in range(len(personsIDs_combinations))
|
personsIDs_combinations = np.array([personsIDs_combinations[i] for i in range(len(personsIDs_combinations))
|
||||||
if not np.array(
|
if not np.array(
|
||||||
@ -365,6 +400,7 @@ def track_2d_all(config):
|
|||||||
# Read config
|
# Read config
|
||||||
project_dir = config.get('project').get('project_dir')
|
project_dir = config.get('project').get('project_dir')
|
||||||
session_dir = os.path.realpath(os.path.join(project_dir, '..', '..'))
|
session_dir = os.path.realpath(os.path.join(project_dir, '..', '..'))
|
||||||
|
multi_person = config.get('project').get('multi_person')
|
||||||
pose_model = config.get('pose').get('pose_model')
|
pose_model = config.get('pose').get('pose_model')
|
||||||
tracked_keypoint = config.get('personAssociation').get('tracked_keypoint')
|
tracked_keypoint = config.get('personAssociation').get('tracked_keypoint')
|
||||||
frame_range = config.get('project').get('frame_range')
|
frame_range = config.get('project').get('frame_range')
|
||||||
@ -378,6 +414,11 @@ def track_2d_all(config):
|
|||||||
pose_dir = os.path.join(project_dir, 'pose')
|
pose_dir = os.path.join(project_dir, 'pose')
|
||||||
poseTracked_dir = os.path.join(project_dir, 'pose-associated')
|
poseTracked_dir = os.path.join(project_dir, 'pose-associated')
|
||||||
|
|
||||||
|
if multi_person:
|
||||||
|
logging.info('\nMulti-person analysis selected. Note that you can set this option to false for faster runtime if you only need the main person in the scene.')
|
||||||
|
else:
|
||||||
|
logging.info('\nSingle-person analysis selected.')
|
||||||
|
|
||||||
# projection matrix from toml calibration file
|
# projection matrix from toml calibration file
|
||||||
P = computeP(calib_file, undistort=undistort_points)
|
P = computeP(calib_file, undistort=undistort_points)
|
||||||
calib_params = retrieve_calib_params(calib_file)
|
calib_params = retrieve_calib_params(calib_file)
|
||||||
|
@ -669,7 +669,7 @@ def triangulate_all(config):
|
|||||||
|
|
||||||
# Triangulation
|
# Triangulation
|
||||||
Q_tot, error_tot, nb_cams_excluded_tot,id_excluded_cams_tot = [], [], [], []
|
Q_tot, error_tot, nb_cams_excluded_tot,id_excluded_cams_tot = [], [], [], []
|
||||||
for f in tqdm(range(*f_range)):
|
for f in tqdm(range(frames_nb)):
|
||||||
# Get x,y,likelihood values from files
|
# Get x,y,likelihood values from files
|
||||||
json_tracked_files_f = [json_tracked_files[c][f] for c in range(n_cams)]
|
json_tracked_files_f = [json_tracked_files[c][f] for c in range(n_cams)]
|
||||||
# print(json_tracked_files_f)
|
# print(json_tracked_files_f)
|
||||||
@ -718,10 +718,10 @@ def triangulate_all(config):
|
|||||||
id_excluded_cams = [[id_excluded_cams[n][k] for k in range(keypoints_nb)] for n in range(nb_persons_to_detect)]
|
id_excluded_cams = [[id_excluded_cams[n][k] for k in range(keypoints_nb)] for n in range(nb_persons_to_detect)]
|
||||||
id_excluded_cams_tot.append(id_excluded_cams)
|
id_excluded_cams_tot.append(id_excluded_cams)
|
||||||
|
|
||||||
Q_tot = [pd.DataFrame([Q_tot[f][n] for f in range(*f_range)]) for n in range(nb_persons_to_detect)]
|
Q_tot = [pd.DataFrame([Q_tot[f][n] for f in range(frames_nb)]) for n in range(nb_persons_to_detect)]
|
||||||
error_tot = [pd.DataFrame([error_tot[f][n] for f in range(*f_range)]) for n in range(nb_persons_to_detect)]
|
error_tot = [pd.DataFrame([error_tot[f][n] for f in range(frames_nb)]) for n in range(nb_persons_to_detect)]
|
||||||
nb_cams_excluded_tot = [pd.DataFrame([nb_cams_excluded_tot[f][n] for f in range(*f_range)]) for n in range(nb_persons_to_detect)]
|
nb_cams_excluded_tot = [pd.DataFrame([nb_cams_excluded_tot[f][n] for f in range(frames_nb)]) for n in range(nb_persons_to_detect)]
|
||||||
id_excluded_cams_tot = [pd.DataFrame([id_excluded_cams_tot[f][n] for f in range(*f_range)]) for n in range(nb_persons_to_detect)]
|
id_excluded_cams_tot = [pd.DataFrame([id_excluded_cams_tot[f][n] for f in range(frames_nb)]) for n in range(nb_persons_to_detect)]
|
||||||
|
|
||||||
for n in range(nb_persons_to_detect):
|
for n in range(nb_persons_to_detect):
|
||||||
error_tot[n]['mean'] = error_tot[n].mean(axis = 1)
|
error_tot[n]['mean'] = error_tot[n].mean(axis = 1)
|
||||||
@ -769,7 +769,7 @@ def triangulate_all(config):
|
|||||||
trc_paths = [make_trc(config, Q_tot[n], keypoints_names, f_range, id_person=n) for n in range(len(Q_tot))]
|
trc_paths = [make_trc(config, Q_tot[n], keypoints_names, f_range, id_person=n) for n in range(len(Q_tot))]
|
||||||
|
|
||||||
# Reorder TRC files
|
# Reorder TRC files
|
||||||
if multi_person and reorder_trc:
|
if multi_person and reorder_trc and len(trc_paths)>1:
|
||||||
trc_id = retrieve_right_trc_order(trc_paths)
|
trc_id = retrieve_right_trc_order(trc_paths)
|
||||||
[os.rename(t, t+'.old') for t in trc_paths]
|
[os.rename(t, t+'.old') for t in trc_paths]
|
||||||
[os.rename(t+'.old', trc_paths[i]) for i, t in zip(trc_id,trc_paths)]
|
[os.rename(t+'.old', trc_paths[i]) for i, t in zip(trc_id,trc_paths)]
|
||||||
|
Loading…
Reference in New Issue
Block a user