Moving cameras supported (to be added to the Pose2Sim main pipeline)
This commit is contained in:
parent
ac037e05ce
commit
7b58454712
@ -11,10 +11,12 @@
|
|||||||
toml calibration file.
|
toml calibration file.
|
||||||
|
|
||||||
The output 2D points can be chosen to follow the DeepLabCut (default) or
|
The output 2D points can be chosen to follow the DeepLabCut (default) or
|
||||||
the OpenPose format. If OpenPose is chosen, the BODY_25B model is used,
|
the OpenPose format. If OpenPose is chosen, the HALPE_26 model is used,
|
||||||
with ear and eye at coordinates (0,0) since they are not used by Pose2Sim.
|
with ear and eye at coordinates (0,0) since they are not used by Pose2Sim.
|
||||||
You can change the MODEL tree to a different one if you need to reproject
|
You can change the MODEL tree to a different one if you need to reproject
|
||||||
in OpenPose format with a different model than BODY_25B.
|
in OpenPose format with a different model than HALPLE_26.
|
||||||
|
|
||||||
|
New: Moving cameras and zooming cameras are now supported.
|
||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
from Pose2Sim.Utilities import reproj_from_trc_calib; reproj_from_trc_calib.reproj_from_trc_calib_func(r'<input_trc_file>', r'<input_calib_file>', '<output_format>', r'<output_file_root>')
|
from Pose2Sim.Utilities import reproj_from_trc_calib; reproj_from_trc_calib.reproj_from_trc_calib_func(r'<input_trc_file>', r'<input_calib_file>', '<output_format>', r'<output_file_root>')
|
||||||
@ -48,33 +50,32 @@ __status__ = "Development"
|
|||||||
|
|
||||||
|
|
||||||
## SKELETON
|
## SKELETON
|
||||||
'''BODY_25B (full-body without hands, experimental, from OpenPose)
|
'''HALPE_26 (full-body without hands, from AlphaPose, MMPose, etc.)
|
||||||
https://github.com/CMU-Perceptual-Computing-Lab/openpose_train/blob/master/experimental_models/README.md
|
https://github.com/MVIG-SJTU/AlphaPose/blob/master/docs/MODEL_ZOO.md
|
||||||
Adjust it if you want to reproject in OpenPose format with a different model'''
|
https://github.com/open-mmlab/mmpose/tree/main/projects/rtmpose'''
|
||||||
nb_joints = 25
|
MODEL = Node("Hip", id=19, children=[
|
||||||
MODEL = Node("CHip", id=None, children=[
|
|
||||||
Node("RHip", id=12, children=[
|
Node("RHip", id=12, children=[
|
||||||
Node("RKnee", id=14, children=[
|
Node("RKnee", id=14, children=[
|
||||||
Node("RAnkle", id=16, children=[
|
Node("RAnkle", id=16, children=[
|
||||||
Node("RBigToe", id=22, children=[
|
Node("RBigToe", id=21, children=[
|
||||||
Node("RSmallToe", id=23),
|
Node("RSmallToe", id=23),
|
||||||
]),
|
]),
|
||||||
Node("RHeel", id=24),
|
Node("RHeel", id=25),
|
||||||
]),
|
]),
|
||||||
]),
|
]),
|
||||||
]),
|
]),
|
||||||
Node("LHip", id=11, children=[
|
Node("LHip", id=11, children=[
|
||||||
Node("LKnee", id=13, children=[
|
Node("LKnee", id=13, children=[
|
||||||
Node("LAnkle", id=15, children=[
|
Node("LAnkle", id=15, children=[
|
||||||
Node("LBigToe", id=19, children=[
|
Node("LBigToe", id=20, children=[
|
||||||
Node("LSmallToe", id=20),
|
Node("LSmallToe", id=22),
|
||||||
]),
|
]),
|
||||||
Node("LHeel", id=21),
|
Node("LHeel", id=24),
|
||||||
]),
|
]),
|
||||||
]),
|
]),
|
||||||
]),
|
]),
|
||||||
Node("Neck", id=17, children=[
|
Node("Neck", id=18, children=[
|
||||||
Node("Head", id=18, children=[
|
Node("Head", id=17, children=[
|
||||||
Node("Nose", id=0),
|
Node("Nose", id=0),
|
||||||
]),
|
]),
|
||||||
Node("RShoulder", id=6, children=[
|
Node("RShoulder", id=6, children=[
|
||||||
@ -90,32 +91,12 @@ MODEL = Node("CHip", id=None, children=[
|
|||||||
]),
|
]),
|
||||||
])
|
])
|
||||||
|
|
||||||
# nb_joints = 17
|
|
||||||
# MODEL = Node("None", id=None, children=[
|
|
||||||
# Node("Origin", id=0),
|
|
||||||
# Node("Board1", id=1),
|
|
||||||
# Node("Board2", id=2),
|
|
||||||
# Node("Board3", id=3),
|
|
||||||
# Node("Board4", id=4),
|
|
||||||
# Node("Furniture5", id=5),
|
|
||||||
# Node("Furniture6", id=6),
|
|
||||||
# Node("Furniture7", id=7),
|
|
||||||
# Node("Screen8", id=8),
|
|
||||||
# Node("Screen9", id=9),
|
|
||||||
# Node("Furniture10", id=10),
|
|
||||||
# Node("Furniture11", id=11),
|
|
||||||
# Node("Furniture12", id=12),
|
|
||||||
# Node("Furniture13", id=13),
|
|
||||||
# Node("Furniture14", id=14),
|
|
||||||
# Node("Furniture15", id=15),
|
|
||||||
# Node("Table16", id=16)])
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## FUNCTIONS
|
## FUNCTIONS
|
||||||
def computeP(calib_file, undistort=False):
|
def computeP(calib_file, undistort=False):
|
||||||
'''
|
'''
|
||||||
Compute projection matrices from toml calibration file.
|
Compute projection matrices from toml calibration file.
|
||||||
|
Zooming or moving cameras are handled.
|
||||||
|
|
||||||
INPUT:
|
INPUT:
|
||||||
- calib_file: calibration .toml file.
|
- calib_file: calibration .toml file.
|
||||||
@ -133,24 +114,47 @@ def computeP(calib_file, undistort=False):
|
|||||||
if cam != 'metadata':
|
if cam != 'metadata':
|
||||||
S = np.array(calib[cam]['size'])
|
S = np.array(calib[cam]['size'])
|
||||||
K = np.array(calib[cam]['matrix'])
|
K = np.array(calib[cam]['matrix'])
|
||||||
|
|
||||||
|
if len(K.shape) == 2: # static camera
|
||||||
if undistort:
|
if undistort:
|
||||||
dist = np.array(calib[cam]['distortions'])
|
dist = np.array(calib[cam]['distortions'])
|
||||||
optim_K = cv2.getOptimalNewCameraMatrix(K, dist, [int(s) for s in S], 1, [int(s) for s in S])[0]
|
optim_K = cv2.getOptimalNewCameraMatrix(K, dist, [int(s) for s in S], 1, [int(s) for s in S])[0]
|
||||||
Kh = np.block([optim_K, np.zeros(3).reshape(3,1)])
|
Kh = np.block([optim_K, np.zeros(3).reshape(3,1)])
|
||||||
else:
|
else:
|
||||||
Kh = np.block([K, np.zeros(3).reshape(3,1)])
|
Kh = np.block([K, np.zeros(3).reshape(3,1)])
|
||||||
R, _ = cv2.Rodrigues(np.array(calib[cam]['rotation']))
|
elif len(K.shape) == 3: # zooming camera
|
||||||
|
if undistort:
|
||||||
|
dist = np.array(calib[cam]['distortions'])
|
||||||
|
optim_K = [cv2.getOptimalNewCameraMatrix(K[f], dist, [int(s) for s in S], 1, [int(s) for s in S])[0] for f in range(len(K))]
|
||||||
|
Kh = [np.block([optim_K[f], np.zeros(3).reshape(3,1)]) for f in range(len(K))]
|
||||||
|
else:
|
||||||
|
Kh = [np.block([K[f], np.zeros(3).reshape(3,1)]) for f in range(len(K))]
|
||||||
|
|
||||||
|
R = np.array(calib[cam]['rotation'])
|
||||||
T = np.array(calib[cam]['translation'])
|
T = np.array(calib[cam]['translation'])
|
||||||
H = np.block([[R,T.reshape(3,1)], [np.zeros(3), 1 ]])
|
if len(R.shape) == 1: # static camera
|
||||||
|
R_mat, _ = cv2.Rodrigues(np.array(calib[cam]['rotation']))
|
||||||
|
H = np.block([[R_mat,T.reshape(3,1)], [np.zeros(3), 1 ]])
|
||||||
|
elif len(R.shape) == 2: # moving camera
|
||||||
|
R_mat = [cv2.Rodrigues(R[f])[0] for f in range(len(R))]
|
||||||
|
H = [np.block([[R_mat[f],T[f].reshape(3,1)], [np.zeros(3), 1 ]]) for f in range(len(R))]
|
||||||
|
|
||||||
P.append(Kh @ H)
|
if len(K.shape) == 2 and len(R.shape)==1: # static camera
|
||||||
|
P.append([Kh @ H])
|
||||||
|
elif len(K.shape) == 3 and len(R.shape)==1: # zooming camera
|
||||||
|
P.append([Kh[f] @ H for f in range(len(K))])
|
||||||
|
elif len(K.shape) == 2 and len(R.shape)==2: # moving camera
|
||||||
|
P.append([Kh @ H[f] for f in range(len(R))])
|
||||||
|
elif len(K.shape) == 3 and len(R.shape)==2: # zooming and moving camera
|
||||||
|
P.append([Kh[f] @ H[f] for f in range(len(K))])
|
||||||
|
|
||||||
return P
|
return np.array(P)
|
||||||
|
|
||||||
|
|
||||||
def retrieve_calib_params(calib_file):
|
def retrieve_calib_params(calib_file):
|
||||||
'''
|
'''
|
||||||
Compute projection matrices from toml calibration file.
|
Compute projection matrices from toml calibration file.
|
||||||
|
Zooming or moving cameras are handled.
|
||||||
|
|
||||||
INPUT:
|
INPUT:
|
||||||
- calib_file: calibration .toml file.
|
- calib_file: calibration .toml file.
|
||||||
@ -172,9 +176,15 @@ def retrieve_calib_params(calib_file):
|
|||||||
S.append(np.array(calib[cam]['size']))
|
S.append(np.array(calib[cam]['size']))
|
||||||
K.append(np.array(calib[cam]['matrix']))
|
K.append(np.array(calib[cam]['matrix']))
|
||||||
dist.append(np.array(calib[cam]['distortions']))
|
dist.append(np.array(calib[cam]['distortions']))
|
||||||
|
|
||||||
|
if len(K[c].shape) == 2: # static camera
|
||||||
optim_K.append(cv2.getOptimalNewCameraMatrix(K[c], dist[c], [int(s) for s in S[c]], 1, [int(s) for s in S[c]])[0])
|
optim_K.append(cv2.getOptimalNewCameraMatrix(K[c], dist[c], [int(s) for s in S[c]], 1, [int(s) for s in S[c]])[0])
|
||||||
|
elif len(K[c].shape) == 3: # zooming camera
|
||||||
|
optim_K.append([cv2.getOptimalNewCameraMatrix(K[c][f], dist[c], [int(s) for s in S[c]], 1, [int(s) for s in S[c]])[0] for f in range(len(K[c]))])
|
||||||
|
|
||||||
R.append(np.array(calib[cam]['rotation']))
|
R.append(np.array(calib[cam]['rotation']))
|
||||||
T.append(np.array(calib[cam]['translation']))
|
T.append(np.array(calib[cam]['translation']))
|
||||||
|
|
||||||
calib_params = {'S': S, 'K': K, 'dist': dist, 'optim_K': optim_K, 'R': R, 'T': T}
|
calib_params = {'S': S, 'K': K, 'dist': dist, 'optim_K': optim_K, 'R': R, 'T': T}
|
||||||
|
|
||||||
return calib_params
|
return calib_params
|
||||||
@ -248,10 +258,12 @@ def reproj_from_trc_calib_func(**args):
|
|||||||
toml calibration file.
|
toml calibration file.
|
||||||
|
|
||||||
The output 2D points can be chosen to follow the DeepLabCut (default) or
|
The output 2D points can be chosen to follow the DeepLabCut (default) or
|
||||||
the OpenPose format. If OpenPose is chosen, the BODY_25B model is used,
|
the OpenPose format. If OpenPose is chosen, the HALPE_26 model is used,
|
||||||
with ear and eye at coordinates (0,0) since they are not used by Pose2Sim.
|
with ear and eye at coordinates (0,0) since they are not used by Pose2Sim.
|
||||||
You can change the MODEL tree to a different one if you need to reproject
|
You can change the MODEL tree to a different one if you need to reproject
|
||||||
in OpenPose format with a different model than BODY_25B.
|
in OpenPose format with a different model than HALPLE_26.
|
||||||
|
|
||||||
|
New: Moving cameras and zooming cameras are now supported.
|
||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
from Pose2Sim.Utilities import reproj_from_trc_calib; reproj_from_trc_calib.reproj_from_trc_calib_func(input_trc_file = r'<input_trc_file>', input_calib_file = r'<input_calib_file>', openpose_output=True, deeplabcut_output=True, undistort_points=True, output_file_root = r'<output_file_root>')
|
from Pose2Sim.Utilities import reproj_from_trc_calib; reproj_from_trc_calib.reproj_from_trc_calib_func(input_trc_file = r'<input_trc_file>', input_calib_file = r'<input_calib_file>', openpose_output=True, deeplabcut_output=True, undistort_points=True, output_file_root = r'<output_file_root>')
|
||||||
@ -268,6 +280,8 @@ def reproj_from_trc_calib_func(**args):
|
|||||||
output_file_root = args.get('output_file_root')
|
output_file_root = args.get('output_file_root')
|
||||||
if output_file_root == None:
|
if output_file_root == None:
|
||||||
output_file_root = input_trc_file.replace('.trc', '_reproj')
|
output_file_root = input_trc_file.replace('.trc', '_reproj')
|
||||||
|
if os.path.exists(output_file_root):
|
||||||
|
os.makedirs(output_file_root, exist_ok=True)
|
||||||
if not openpose_output and not deeplabcut_output:
|
if not openpose_output and not deeplabcut_output:
|
||||||
raise ValueError('Output_format must be specified either "openpose_output" (-o) or "deeplabcut_output (-d)"')
|
raise ValueError('Output_format must be specified either "openpose_output" (-o) or "deeplabcut_output (-d)"')
|
||||||
|
|
||||||
@ -297,27 +311,29 @@ def reproj_from_trc_calib_func(**args):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
# header preparation
|
# header preparation
|
||||||
|
num_frames = min(P_all.shape[1], len(data_trc))
|
||||||
columns_iterables = [['DavidPagnon'], ['person0'], bodyparts, ['x','y']]
|
columns_iterables = [['DavidPagnon'], ['person0'], bodyparts, ['x','y']]
|
||||||
columns_h5 = pd.MultiIndex.from_product(columns_iterables, names=['scorer', 'individuals', 'bodyparts', 'coords'])
|
columns_h5 = pd.MultiIndex.from_product(columns_iterables, names=['scorer', 'individuals', 'bodyparts', 'coords'])
|
||||||
rows_iterables = [['labeled_data'], [filename], [f'img_{i:03d}.png' for i in range(len(data_trc))]]
|
rows_iterables = [[os.path.join(os.path.splitext(input_trc_file)[0],f'img_{i:03d}.png') for i in range(num_frames)]]
|
||||||
rows_h5 = pd.MultiIndex.from_product(rows_iterables)
|
rows_h5 = pd.MultiIndex.from_product(rows_iterables)
|
||||||
data_h5 = pd.DataFrame(np.nan, index=rows_h5, columns=columns_h5)
|
data_h5 = pd.DataFrame(np.nan, index=rows_h5, columns=columns_h5)
|
||||||
|
|
||||||
# Reproject 3D points on all cameras
|
# Reproject 3D points on all cameras
|
||||||
data_proj = [deepcopy(data_h5) for cam in range(len(P_all))] # copy data_h5 as many times as there are cameras
|
data_proj = [deepcopy(data_h5) for cam in range(len(P_all))] # copy data_h5 as many times as there are cameras
|
||||||
Q = data_trc_zup.iloc[:,2:]
|
Q = data_trc_zup.iloc[:,2:]
|
||||||
for frame in range(len(Q)):
|
for frame in range(num_frames):
|
||||||
coords = [[] for cam in range(len(P_all))]
|
coords = [[] for cam in range(len(P_all))]
|
||||||
|
P_all_frame = [P_all[cam][frame] for cam in range(len(P_all))]
|
||||||
for keypoint in range(num_bodyparts):
|
for keypoint in range(num_bodyparts):
|
||||||
q = np.append(Q.iloc[frame,3*keypoint:3*keypoint+3], 1)
|
q = np.append(Q.iloc[frame,3*keypoint:3*keypoint+3], 1)
|
||||||
if undistort_points:
|
if undistort_points:
|
||||||
coords_2D_all = [cv2.projectPoints(np.array(q[:-1]), calib_params_R_filt[i], calib_params_T_filt[i], calib_params_K_filt[i], calib_params_dist_filt[i])[0] for i in range(len(P_all))]
|
coords_2D_all = [cv2.projectPoints(np.array(q[:-1]), calib_params_R_filt[i], calib_params_T_filt[i], calib_params_K_filt[i], calib_params_dist_filt[i])[0] for i in range(len(P_all))]
|
||||||
x_all = [coords_2D_all[i][0,0,0] for i in range(len(P_all))]
|
x_all = [coords_2D_all[i][0,0,0] for i in range(len(P_all_frame))]
|
||||||
y_all = [coords_2D_all[i][0,0,1] for i in range(len(P_all))]
|
y_all = [coords_2D_all[i][0,0,1] for i in range(len(P_all_frame))]
|
||||||
else:
|
else:
|
||||||
x_all, y_all = reprojection(P_all, q)
|
x_all, y_all = reprojection(P_all_frame, q)
|
||||||
[coords[cam].extend([x_all[cam], y_all[cam]]) for cam in range(len(P_all))]
|
[coords[cam].extend([x_all[cam], y_all[cam]]) for cam in range(len(P_all_frame))]
|
||||||
for cam in range(len(P_all)):
|
for cam in range(len(P_all_frame)):
|
||||||
data_proj[cam].iloc[frame,:] = coords[cam]
|
data_proj[cam].iloc[frame,:] = coords[cam]
|
||||||
|
|
||||||
# Save as h5 and csv if DeepLabCut format
|
# Save as h5 and csv if DeepLabCut format
|
||||||
@ -331,13 +347,14 @@ def reproj_from_trc_calib_func(**args):
|
|||||||
[data_proj[i].to_csv(csv_files[i], sep=',', index=True, lineterminator='\n') for i in range(len(P_all))]
|
[data_proj[i].to_csv(csv_files[i], sep=',', index=True, lineterminator='\n') for i in range(len(P_all))]
|
||||||
|
|
||||||
# Save as json if OpenPose format
|
# Save as json if OpenPose format
|
||||||
if openpose_output:
|
elif openpose_output:
|
||||||
# read model tree
|
# read model tree
|
||||||
model = MODEL
|
model = MODEL
|
||||||
print('Keypoint hierarchy:')
|
print('Keypoint hierarchy:')
|
||||||
for pre, _, node in RenderTree(model):
|
for pre, _, node in RenderTree(model):
|
||||||
print(f'{pre}{node.name} id={node.id}')
|
print(f'{pre}{node.name} id={node.id}')
|
||||||
bodyparts_ids = [[node.id for _, _, node in RenderTree(model) if node.name==b][0] for b in bodyparts]
|
bodyparts_ids = [[node.id for _, _, node in RenderTree(model) if node.name==b][0] for b in bodyparts]
|
||||||
|
nb_joints = len(bodyparts_ids)
|
||||||
#prepare json files
|
#prepare json files
|
||||||
json_dict = {'version':1.3, 'people':[]}
|
json_dict = {'version':1.3, 'people':[]}
|
||||||
json_dict['people'] = [{'person_id':[-1],
|
json_dict['people'] = [{'person_id':[-1],
|
||||||
|
Loading…
Reference in New Issue
Block a user