careless push had overwritten previous changes

This commit is contained in:
davidpagnon 2024-02-05 11:51:26 +01:00
parent a4d38da980
commit db145942de
15 changed files with 24 additions and 30 deletions

View File

@ -119,7 +119,7 @@
# tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
## and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE)
# reproj_error_threshold_association = 20 # px
# likelihood_error_threshold_association = 0.2
# likelihood_threshold_association = 0.2
# [triangulation]

View File

@ -119,7 +119,7 @@
# tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
## and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE)
# reproj_error_threshold_association = 20 # px
# likelihood_error_threshold_association = 0.2
# likelihood_threshold_association = 0.2
# [triangulation]

View File

@ -119,7 +119,7 @@
# tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
## and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE)
# reproj_error_threshold_association = 20 # px
# likelihood_error_threshold_association = 0.2
# likelihood_threshold_association = 0.2
# [triangulation]

View File

@ -119,7 +119,7 @@
# tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
## and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE)
# reproj_error_threshold_association = 20 # px
# likelihood_error_threshold_association = 0.2
# likelihood_threshold_association = 0.2
# [triangulation]

View File

@ -119,7 +119,7 @@
# tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
## and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE)
# reproj_error_threshold_association = 20 # px
# likelihood_error_threshold_association = 0.2
# likelihood_threshold_association = 0.2
# [triangulation]

View File

@ -119,7 +119,7 @@
# tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
## and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE)
# reproj_error_threshold_association = 20 # px
# likelihood_error_threshold_association = 0.2
# likelihood_threshold_association = 0.2
# [triangulation]

View File

@ -119,7 +119,7 @@
# tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
## and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE)
# reproj_error_threshold_association = 20 # px
# likelihood_error_threshold_association = 0.2
# likelihood_threshold_association = 0.2
# [triangulation]

View File

@ -119,7 +119,7 @@
# tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
## and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE)
# reproj_error_threshold_association = 20 # px
# likelihood_error_threshold_association = 0.2
# likelihood_threshold_association = 0.2
# [triangulation]

View File

@ -119,7 +119,7 @@
# tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
## and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE)
# reproj_error_threshold_association = 20 # px
# likelihood_error_threshold_association = 0.2
# likelihood_threshold_association = 0.2
# [triangulation]

View File

@ -119,7 +119,7 @@
# tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
## and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE)
# reproj_error_threshold_association = 20 # px
# likelihood_error_threshold_association = 0.2
# likelihood_threshold_association = 0.2
# [triangulation]

View File

@ -119,7 +119,7 @@
# tracked_keypoint = 'Neck' # If the neck is not detected by the pose_model, check skeleton.py
## and choose a stable point for tracking the person of interest (e.g., 'right_shoulder' with BLAZEPOSE)
# reproj_error_threshold_association = 20 # px
# likelihood_error_threshold_association = 0.2
# likelihood_threshold_association = 0.2
# [triangulation]

View File

@ -31,7 +31,7 @@
## INIT
from Pose2Sim.common import RT_qca2cv, rotate_cam, quat2mat, euclidean_distance, natural_sort, zup2yup
from Pose2Sim.common import world_to_camera_persp, rotate_cam, quat2mat, euclidean_distance, natural_sort, zup2yup
import os
import logging

View File

@ -99,7 +99,7 @@ def computeP(calib_file, undistort=False):
T = np.array(calib[cam]['translation'])
H = np.block([[R,T.reshape(3,1)], [np.zeros(3), 1 ]])
P.append(Kh.dot(H))
P.append(Kh @ H)
return P
@ -149,8 +149,8 @@ def reprojection(P_all, Q):
x_calc, y_calc = [], []
for c in range(len(P_all)):
P_cam = P_all[c]
x_calc.append(P_cam[0].dot(Q) / P_cam[2].dot(Q))
y_calc.append(P_cam[1].dot(Q) / P_cam[2].dot(Q))
x_calc.append(P_cam[0] @ Q / (P_cam[2] @ Q))
y_calc.append(P_cam[1] @ Q / (P_cam[2] @ Q))
return x_calc, y_calc
@ -177,10 +177,10 @@ def euclidean_distance(q1, q2):
return euc_dist
def RT_qca2cv(r, t):
def world_to_camera_persp(r, t):
'''
Converts rotation R and translation T
from Qualisys object centered perspective
from Qualisys world centered perspective
to OpenCV camera centered perspective
and inversely.
@ -188,7 +188,7 @@ def RT_qca2cv(r, t):
'''
r = r.T
t = - r.dot(t)
t = - r @ t
return r, t
@ -208,10 +208,10 @@ def rotate_cam(r, t, ang_x=0, ang_y=0, ang_z=0):
r_ax_x = np.array([1,0,0, 0,np.cos(ang_x),-np.sin(ang_x), 0,np.sin(ang_x),np.cos(ang_x)]).reshape(3,3)
r_ax_y = np.array([np.cos(ang_y),0,np.sin(ang_y), 0,1,0, -np.sin(ang_y),0,np.cos(ang_y)]).reshape(3,3)
r_ax_z = np.array([np.cos(ang_z),-np.sin(ang_z),0, np.sin(ang_z),np.cos(ang_z),0, 0,0,1]).reshape(3,3)
r_ax = r_ax_z.dot(r_ax_y).dot(r_ax_x)
r_ax = r_ax_z @ r_ax_y @ r_ax_x
r_ax_h = np.block([[r_ax,np.zeros(3).reshape(3,1)], [np.zeros(3), 1]])
r_ax_h__rt_h = r_ax_h.dot(rt_h)
r_ax_h__rt_h = r_ax_h @ rt_h
r = r_ax_h__rt_h[:3,:3]
t = r_ax_h__rt_h[:3,3]
@ -295,23 +295,21 @@ def natural_sort(list):
def zup2yup(Q):
'''
Turns Z-up system coordinates into Y-up coordinates
INPUT:
- Q: pandas dataframe
N 3D points as columns, ie 3*N columns in Z-up system coordinates
and frame number as rows
OUTPUT:
- Q: pandas dataframe with N 3D points in Y-up system coordinates
'''
# X->Y, Y->Z, Z->X
cols = list(Q.columns)
cols = np.array([[cols[i*3+1],cols[i*3+2],cols[i*3]] for i in range(int(len(cols)/3))]).flatten()
Q = Q[cols]
return Q
## CLASSES
class plotWindow():

View File

@ -384,7 +384,7 @@ def triangulation_from_best_cameras(config, coords_2D_kpt, coords_2D_kpt_swapped
# Swap left and right sides if reprojection error still too high
if handle_LR_swap and error_min > error_threshold_triangulation:
print('handle')
# print('handle')
n_cams_swapped = 1
error_off_swap_min = error_min
while error_off_swap_min > error_threshold_triangulation and n_cams_swapped < (n_cams - nb_cams_off_tot) / 2: # more than half of the cameras switched: may triangulate twice the same side
@ -468,8 +468,6 @@ def triangulation_from_best_cameras(config, coords_2D_kpt, coords_2D_kpt_swapped
# print('id_cams_off_tot ', id_cams_off_tot)
id_excluded_cams = id_cams_off_tot[best_cams]
# print('id_excluded_cams ', id_excluded_cams)
id_excluded_cams = id_cams_off_tot[best_cams]
else:
id_excluded_cams = list(range(n_cams))
nb_cams_excluded = n_cams
@ -610,8 +608,6 @@ def triangulate_all(config):
Q_tot, error_tot, nb_cams_excluded_tot,id_excluded_cams_tot = [], [], [], []
for f in tqdm(range(*f_range)):
# Get x,y,likelihood values from files
json_tracked_files_f = [json_tracked_files[c][f] for c in range(n_cams)]
# print(json_tracked_files_f)
x_files, y_files, likelihood_files = extract_files_frame_f(json_tracked_files_f, keypoints_ids)

View File

@ -300,7 +300,7 @@ The accuracy and robustness of Pose2Sim have been thoroughly assessed only with
* Open a command prompt in your **OpenPose** directory. \
Launch OpenPose for each `videos` folder:
``` cmd
bin\OpenPoseDemo.exe --model_pose BODY_25B --video <PATH_TO_TRIAL_DIR>\videos\vid_cam1.mp4 --write_json <PATH_TO_TRIAL_DIR>\pose\pose_cam1_json
bin\OpenPoseDemo.exe --model_pose BODY_25B --video <PATH_TO_TRIAL_DIR>\videos\cam01.mp4 --write_json <PATH_TO_TRIAL_DIR>\pose\pose_cam01_json
```
* The [BODY_25B model](https://github.com/CMU-Perceptual-Computing-Lab/openpose_train/tree/master/experimental_models) has more accurate results than the standard BODY_25 one and has been extensively tested for Pose2Sim. \
You can also use the [BODY_135 model](https://github.com/CMU-Perceptual-Computing-Lab/openpose_train/tree/master/experimental_models), which allows for the evaluation of pronation/supination, wrist flexion, and wrist deviation.\