[vis] update realtime visualization
This commit is contained in:
parent
0ab05bdafc
commit
0c2d5f2d64
11
config/vis3d/o3d_scene_smpl.yml
Normal file
11
config/vis3d/o3d_scene_smpl.yml
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
parent: "config/vis3d/o3d_scene.yml"
|
||||||
|
|
||||||
|
body_model:
|
||||||
|
module: "easymocap.smplmodel.body_model.SMPLlayer"
|
||||||
|
args:
|
||||||
|
_no_merge_: True
|
||||||
|
model_path: "data/smplx/smpl"
|
||||||
|
model_type: "smpl"
|
||||||
|
gender: "neutral"
|
||||||
|
device: "cuda"
|
||||||
|
regressor_path: "data/smplx/J_regressor_body25.npy"
|
@ -2,8 +2,8 @@
|
|||||||
@ Date: 2021-05-30 11:17:18
|
@ Date: 2021-05-30 11:17:18
|
||||||
@ Author: Qing Shuai
|
@ Author: Qing Shuai
|
||||||
@ LastEditors: Qing Shuai
|
@ LastEditors: Qing Shuai
|
||||||
@ LastEditTime: 2021-06-12 14:56:00
|
@ LastEditTime: 2021-06-22 10:35:26
|
||||||
@ FilePath: /EasyMocapRelease/easymocap/config/vis_socket.py
|
@ FilePath: /EasyMocap/easymocap/config/vis_socket.py
|
||||||
'''
|
'''
|
||||||
from .baseconfig import CN
|
from .baseconfig import CN
|
||||||
from .baseconfig import Config as BaseConfig
|
from .baseconfig import Config as BaseConfig
|
||||||
@ -35,6 +35,7 @@ class Config(BaseConfig):
|
|||||||
# skel
|
# skel
|
||||||
cfg.skel = CN()
|
cfg.skel = CN()
|
||||||
cfg.skel.joint_radius = 0.02
|
cfg.skel.joint_radius = 0.02
|
||||||
|
cfg.body_model_template = "none"
|
||||||
# camera
|
# camera
|
||||||
cfg.camera = CN()
|
cfg.camera = CN()
|
||||||
cfg.camera.phi = 0
|
cfg.camera.phi = 0
|
||||||
|
@ -382,6 +382,9 @@ def _merge_a_into_b(a, b, root, key_list):
|
|||||||
isinstance(b, CfgNode),
|
isinstance(b, CfgNode),
|
||||||
"`b` (cur type {}) must be an instance of {}".format(type(b), CfgNode),
|
"`b` (cur type {}) must be an instance of {}".format(type(b), CfgNode),
|
||||||
)
|
)
|
||||||
|
if '_no_merge_' in a.keys() and a['_no_merge_']:
|
||||||
|
b.clear()
|
||||||
|
a.pop('_no_merge_')
|
||||||
|
|
||||||
for k, v_ in a.items():
|
for k, v_ in a.items():
|
||||||
full_key = ".".join(key_list + [k])
|
full_key = ".".join(key_list + [k])
|
||||||
|
@ -2,8 +2,8 @@
|
|||||||
@ Date: 2020-11-18 14:04:10
|
@ Date: 2020-11-18 14:04:10
|
||||||
@ Author: Qing Shuai
|
@ Author: Qing Shuai
|
||||||
@ LastEditors: Qing Shuai
|
@ LastEditors: Qing Shuai
|
||||||
@ LastEditTime: 2021-05-27 20:35:10
|
@ LastEditTime: 2021-06-22 13:44:10
|
||||||
@ FilePath: /EasyMocapRelease/easymocap/smplmodel/body_model.py
|
@ FilePath: /EasyMocap/easymocap/smplmodel/body_model.py
|
||||||
'''
|
'''
|
||||||
import torch
|
import torch
|
||||||
import torch.nn as nn
|
import torch.nn as nn
|
||||||
@ -39,54 +39,91 @@ def load_regressor(regressor_path):
|
|||||||
import ipdb; ipdb.set_trace()
|
import ipdb; ipdb.set_trace()
|
||||||
return X_regressor
|
return X_regressor
|
||||||
|
|
||||||
|
def load_bodydata(model_path, gender):
|
||||||
|
if osp.isdir(model_path):
|
||||||
|
model_fn = 'SMPL_{}.{ext}'.format(gender.upper(), ext='pkl')
|
||||||
|
smpl_path = osp.join(model_path, model_fn)
|
||||||
|
else:
|
||||||
|
smpl_path = model_path
|
||||||
|
assert osp.exists(smpl_path), 'Path {} does not exist!'.format(
|
||||||
|
smpl_path)
|
||||||
|
|
||||||
|
with open(smpl_path, 'rb') as smpl_file:
|
||||||
|
data = pickle.load(smpl_file, encoding='latin1')
|
||||||
|
return data
|
||||||
|
|
||||||
NUM_POSES = {'smpl': 72, 'smplh': 78, 'smplx': 66 + 12 + 9, 'mano': 9}
|
NUM_POSES = {'smpl': 72, 'smplh': 78, 'smplx': 66 + 12 + 9, 'mano': 9}
|
||||||
NUM_SHAPES = 10
|
NUM_SHAPES = 10
|
||||||
NUM_EXPR = 10
|
NUM_EXPR = 10
|
||||||
class SMPLlayer(nn.Module):
|
class SMPLlayer(nn.Module):
|
||||||
def __init__(self, model_path, model_type='smpl', gender='neutral', device=None,
|
def __init__(self, model_path, model_type='smpl', gender='neutral', device=None,
|
||||||
regressor_path=None) -> None:
|
regressor_path=None,
|
||||||
|
use_pose_blending=True, use_shape_blending=True, use_joints=True,
|
||||||
|
with_color=False,
|
||||||
|
**kwargs) -> None:
|
||||||
super(SMPLlayer, self).__init__()
|
super(SMPLlayer, self).__init__()
|
||||||
dtype = torch.float32
|
dtype = torch.float32
|
||||||
self.dtype = dtype
|
self.dtype = dtype
|
||||||
|
self.use_pose_blending = use_pose_blending
|
||||||
|
self.use_shape_blending = use_shape_blending
|
||||||
|
self.use_joints = use_joints
|
||||||
|
|
||||||
|
if isinstance(device, str):
|
||||||
|
device = torch.device(device)
|
||||||
self.device = device
|
self.device = device
|
||||||
self.model_type = model_type
|
self.model_type = model_type
|
||||||
# create the SMPL model
|
# create the SMPL model
|
||||||
if osp.isdir(model_path):
|
data = load_bodydata(model_path, gender)
|
||||||
model_fn = 'SMPL_{}.{ext}'.format(gender.upper(), ext='pkl')
|
if with_color:
|
||||||
smpl_path = osp.join(model_path, model_fn)
|
self.color = data['vertex_colors']
|
||||||
else:
|
else:
|
||||||
smpl_path = model_path
|
self.color = None
|
||||||
assert osp.exists(smpl_path), 'Path {} does not exist!'.format(
|
|
||||||
smpl_path)
|
|
||||||
|
|
||||||
with open(smpl_path, 'rb') as smpl_file:
|
|
||||||
data = pickle.load(smpl_file, encoding='latin1')
|
|
||||||
self.faces = data['f']
|
self.faces = data['f']
|
||||||
self.register_buffer('faces_tensor',
|
self.register_buffer('faces_tensor',
|
||||||
to_tensor(to_np(self.faces, dtype=np.int64),
|
to_tensor(to_np(self.faces, dtype=np.int64),
|
||||||
dtype=torch.long))
|
dtype=torch.long))
|
||||||
# Pose blend shape basis: 6890 x 3 x 207, reshaped to 6890*3 x 207
|
for key in ['J_regressor', 'v_template', 'weights']:
|
||||||
num_pose_basis = data['posedirs'].shape[-1]
|
|
||||||
# 207 x 20670
|
|
||||||
posedirs = data['posedirs']
|
|
||||||
data['posedirs'] = np.reshape(data['posedirs'], [-1, num_pose_basis]).T
|
|
||||||
|
|
||||||
for key in ['J_regressor', 'v_template', 'weights', 'posedirs', 'shapedirs']:
|
|
||||||
val = to_tensor(to_np(data[key]), dtype=dtype)
|
val = to_tensor(to_np(data[key]), dtype=dtype)
|
||||||
self.register_buffer(key, val)
|
self.register_buffer(key, val)
|
||||||
|
# add poseblending
|
||||||
|
if use_pose_blending:
|
||||||
|
# Pose blend shape basis: 6890 x 3 x 207, reshaped to 6890*3 x 207
|
||||||
|
num_pose_basis = data['posedirs'].shape[-1]
|
||||||
|
# 207 x 20670
|
||||||
|
posedirs = data['posedirs']
|
||||||
|
data['posedirs'] = np.reshape(data['posedirs'], [-1, num_pose_basis]).T
|
||||||
|
val = to_tensor(to_np(data['posedirs']), dtype=dtype)
|
||||||
|
self.register_buffer('posedirs', val)
|
||||||
|
else:
|
||||||
|
self.posedirs = None
|
||||||
|
# add shape blending
|
||||||
|
if use_shape_blending:
|
||||||
|
val = to_tensor(to_np(data['shapedirs']), dtype=dtype)
|
||||||
|
self.register_buffer('shapedirs', val)
|
||||||
|
else:
|
||||||
|
self.shapedirs = None
|
||||||
|
if use_shape_blending:
|
||||||
|
self.J_shaped = None
|
||||||
|
else:
|
||||||
|
val = to_tensor(to_np(data['J']), dtype=dtype)
|
||||||
|
self.register_buffer('J_shaped', val)
|
||||||
|
|
||||||
|
self.nVertices = self.v_template.shape[0]
|
||||||
# indices of parents for each joints
|
# indices of parents for each joints
|
||||||
parents = to_tensor(to_np(data['kintree_table'][0])).long()
|
parents = to_tensor(to_np(data['kintree_table'][0])).long()
|
||||||
parents[0] = -1
|
parents[0] = -1
|
||||||
self.register_buffer('parents', parents)
|
self.register_buffer('parents', parents)
|
||||||
if self.model_type == 'smplx':
|
|
||||||
# shape
|
if self.use_shape_blending:
|
||||||
self.num_expression_coeffs = 10
|
if self.model_type == 'smplx':
|
||||||
self.num_shapes = 10
|
# shape
|
||||||
self.shapedirs = self.shapedirs[:, :, :self.num_shapes+self.num_expression_coeffs]
|
self.num_expression_coeffs = 10
|
||||||
elif self.model_type in ['smpl', 'smplh']:
|
self.num_shapes = 10
|
||||||
self.shapedirs = self.shapedirs[:, :, :NUM_SHAPES]
|
self.shapedirs = self.shapedirs[:, :, :self.num_shapes+self.num_expression_coeffs]
|
||||||
|
elif self.model_type in ['smpl', 'smplh']:
|
||||||
|
self.shapedirs = self.shapedirs[:, :, :NUM_SHAPES]
|
||||||
# joints regressor
|
# joints regressor
|
||||||
if regressor_path is not None:
|
if regressor_path is not None and use_joints:
|
||||||
X_regressor = load_regressor(regressor_path)
|
X_regressor = load_regressor(regressor_path)
|
||||||
X_regressor = torch.cat((self.J_regressor, X_regressor), dim=0)
|
X_regressor = torch.cat((self.J_regressor, X_regressor), dim=0)
|
||||||
|
|
||||||
@ -95,14 +132,20 @@ class SMPLlayer(nn.Module):
|
|||||||
j_J_regressor[i, i] = 1
|
j_J_regressor[i, i] = 1
|
||||||
j_v_template = X_regressor @ self.v_template
|
j_v_template = X_regressor @ self.v_template
|
||||||
#
|
#
|
||||||
j_shapedirs = torch.einsum('vij,kv->kij', [self.shapedirs, X_regressor])
|
|
||||||
# (25, 24)
|
# (25, 24)
|
||||||
j_weights = X_regressor @ self.weights
|
j_weights = X_regressor @ self.weights
|
||||||
j_posedirs = torch.einsum('ab, bde->ade', [X_regressor, torch.Tensor(posedirs)]).numpy()
|
if self.use_pose_blending:
|
||||||
j_posedirs = np.reshape(j_posedirs, [-1, num_pose_basis]).T
|
j_posedirs = torch.einsum('ab, bde->ade', [X_regressor, torch.Tensor(posedirs)]).numpy()
|
||||||
j_posedirs = to_tensor(j_posedirs)
|
j_posedirs = np.reshape(j_posedirs, [-1, num_pose_basis]).T
|
||||||
self.register_buffer('j_posedirs', j_posedirs)
|
j_posedirs = to_tensor(j_posedirs)
|
||||||
self.register_buffer('j_shapedirs', j_shapedirs)
|
self.register_buffer('j_posedirs', j_posedirs)
|
||||||
|
else:
|
||||||
|
self.j_posedirs = None
|
||||||
|
if self.use_shape_blending:
|
||||||
|
j_shapedirs = torch.einsum('vij,kv->kij', [self.shapedirs, X_regressor])
|
||||||
|
self.register_buffer('j_shapedirs', j_shapedirs)
|
||||||
|
else:
|
||||||
|
self.j_shapedirs = None
|
||||||
self.register_buffer('j_weights', j_weights)
|
self.register_buffer('j_weights', j_weights)
|
||||||
self.register_buffer('j_v_template', j_v_template)
|
self.register_buffer('j_v_template', j_v_template)
|
||||||
self.register_buffer('j_J_regressor', j_J_regressor)
|
self.register_buffer('j_J_regressor', j_J_regressor)
|
||||||
@ -122,13 +165,22 @@ class SMPLlayer(nn.Module):
|
|||||||
self.use_flat_mean = True
|
self.use_flat_mean = True
|
||||||
elif self.model_type == 'mano':
|
elif self.model_type == 'mano':
|
||||||
# TODO:write this into config file
|
# TODO:write this into config file
|
||||||
|
# self.num_pca_comps = 12
|
||||||
|
# self.use_pca = True
|
||||||
|
# if self.use_pca:
|
||||||
|
# NUM_POSES['mano'] = self.num_pca_comps + 3
|
||||||
|
# else:
|
||||||
|
# NUM_POSES['mano'] = 45 + 3
|
||||||
|
# self.use_flat_mean = True
|
||||||
|
|
||||||
self.num_pca_comps = 12
|
self.num_pca_comps = 12
|
||||||
self.use_pca = True
|
self.use_pca = True
|
||||||
|
self.use_flat_mean = True
|
||||||
if self.use_pca:
|
if self.use_pca:
|
||||||
NUM_POSES['mano'] = self.num_pca_comps + 3
|
NUM_POSES['mano'] = self.num_pca_comps + 3
|
||||||
else:
|
else:
|
||||||
NUM_POSES['mano'] = 45 + 3
|
NUM_POSES['mano'] = 45 + 3
|
||||||
self.use_flat_mean = True
|
|
||||||
val = to_tensor(to_np(data['hands_mean'].reshape(1, -1)), dtype=dtype)
|
val = to_tensor(to_np(data['hands_mean'].reshape(1, -1)), dtype=dtype)
|
||||||
self.register_buffer('mHandsMean', val)
|
self.register_buffer('mHandsMean', val)
|
||||||
val = to_tensor(to_np(data['hands_components'][:self.num_pca_comps, :]), dtype=dtype)
|
val = to_tensor(to_np(data['hands_components'][:self.num_pca_comps, :]), dtype=dtype)
|
||||||
@ -144,6 +196,7 @@ class SMPLlayer(nn.Module):
|
|||||||
self.register_buffer('mHandsComponents'+key[0], val)
|
self.register_buffer('mHandsComponents'+key[0], val)
|
||||||
self.use_pca = True
|
self.use_pca = True
|
||||||
self.use_flat_mean = True
|
self.use_flat_mean = True
|
||||||
|
self.to(self.device)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def extend_hand(poses, use_pca, use_flat_mean, coeffs, mean):
|
def extend_hand(poses, use_pca, use_flat_mean, coeffs, mean):
|
||||||
@ -245,7 +298,8 @@ class SMPLlayer(nn.Module):
|
|||||||
poses = self.extend_pose(poses)
|
poses = self.extend_pose(poses)
|
||||||
return poses.detach().cpu().numpy()
|
return poses.detach().cpu().numpy()
|
||||||
|
|
||||||
def forward(self, poses, shapes, Rh=None, Th=None, expression=None, return_verts=True, return_tensor=True, only_shape=False, **kwargs):
|
def forward(self, poses, shapes, Rh=None, Th=None, expression=None,
|
||||||
|
return_verts=True, return_tensor=True, return_smpl_joints=False, only_shape=False, **kwargs):
|
||||||
""" Forward pass for SMPL model
|
""" Forward pass for SMPL model
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@ -285,31 +339,41 @@ class SMPLlayer(nn.Module):
|
|||||||
shapes = torch.cat([shapes, expression], dim=1)
|
shapes = torch.cat([shapes, expression], dim=1)
|
||||||
# process poses
|
# process poses
|
||||||
poses = self.extend_pose(poses)
|
poses = self.extend_pose(poses)
|
||||||
if return_verts:
|
if return_verts or not self.use_joints:
|
||||||
vertices, joints = lbs(shapes, poses, self.v_template,
|
vertices, joints = lbs(shapes, poses, self.v_template,
|
||||||
self.shapedirs, self.posedirs,
|
self.shapedirs, self.posedirs,
|
||||||
self.J_regressor, self.parents,
|
self.J_regressor, self.parents,
|
||||||
self.weights, pose2rot=True, dtype=self.dtype)
|
self.weights, pose2rot=True, dtype=self.dtype,
|
||||||
|
use_pose_blending=self.use_pose_blending, use_shape_blending=self.use_shape_blending, J_shaped=self.J_shaped)
|
||||||
|
if not self.use_joints and not return_verts:
|
||||||
|
vertices = joints
|
||||||
else:
|
else:
|
||||||
vertices, joints = lbs(shapes, poses, self.j_v_template,
|
vertices, joints = lbs(shapes, poses, self.j_v_template,
|
||||||
self.j_shapedirs, self.j_posedirs,
|
self.j_shapedirs, self.j_posedirs,
|
||||||
self.j_J_regressor, self.parents,
|
self.j_J_regressor, self.parents,
|
||||||
self.j_weights, pose2rot=True, dtype=self.dtype, only_shape=only_shape)
|
self.j_weights, pose2rot=True, dtype=self.dtype, only_shape=only_shape,
|
||||||
vertices = vertices[:, self.J_regressor.shape[0]:, :]
|
use_pose_blending=self.use_pose_blending, use_shape_blending=self.use_shape_blending, J_shaped=self.J_shaped)
|
||||||
|
if return_smpl_joints:
|
||||||
|
vertices = vertices[:, :self.J_regressor.shape[0], :]
|
||||||
|
else:
|
||||||
|
vertices = vertices[:, self.J_regressor.shape[0]:, :]
|
||||||
vertices = torch.matmul(vertices, rot.transpose(1, 2)) + transl
|
vertices = torch.matmul(vertices, rot.transpose(1, 2)) + transl
|
||||||
if not return_tensor:
|
if not return_tensor:
|
||||||
vertices = vertices.detach().cpu().numpy()
|
vertices = vertices.detach().cpu().numpy()
|
||||||
return vertices
|
return vertices
|
||||||
|
|
||||||
def init_params(self, nFrames):
|
def init_params(self, nFrames=1, nShapes=1, ret_tensor=False):
|
||||||
params = {
|
params = {
|
||||||
'poses': np.zeros((nFrames, NUM_POSES[self.model_type])),
|
'poses': np.zeros((nFrames, NUM_POSES[self.model_type])),
|
||||||
'shapes': np.zeros((1, NUM_SHAPES)),
|
'shapes': np.zeros((nShapes, NUM_SHAPES)),
|
||||||
'Rh': np.zeros((nFrames, 3)),
|
'Rh': np.zeros((nFrames, 3)),
|
||||||
'Th': np.zeros((nFrames, 3)),
|
'Th': np.zeros((nFrames, 3)),
|
||||||
}
|
}
|
||||||
if self.model_type == 'smplx':
|
if self.model_type == 'smplx':
|
||||||
params['expression'] = np.zeros((nFrames, NUM_EXPR))
|
params['expression'] = np.zeros((nFrames, NUM_EXPR))
|
||||||
|
if ret_tensor:
|
||||||
|
for key in params.keys():
|
||||||
|
params[key] = to_tensor(params[key], self.dtype, self.device)
|
||||||
return params
|
return params
|
||||||
|
|
||||||
def check_params(self, body_params):
|
def check_params(self, body_params):
|
||||||
|
@ -140,7 +140,8 @@ def vertices2landmarks(vertices, faces, lmk_faces_idx, lmk_bary_coords):
|
|||||||
|
|
||||||
|
|
||||||
def lbs(betas, pose, v_template, shapedirs, posedirs, J_regressor, parents,
|
def lbs(betas, pose, v_template, shapedirs, posedirs, J_regressor, parents,
|
||||||
lbs_weights, pose2rot=True, dtype=torch.float32, only_shape=False):
|
lbs_weights, pose2rot=True, dtype=torch.float32, only_shape=False,
|
||||||
|
use_shape_blending=True, use_pose_blending=True, J_shaped=None):
|
||||||
''' Performs Linear Blend Skinning with the given shape and pose parameters
|
''' Performs Linear Blend Skinning with the given shape and pose parameters
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
@ -183,32 +184,34 @@ def lbs(betas, pose, v_template, shapedirs, posedirs, J_regressor, parents,
|
|||||||
device = betas.device
|
device = betas.device
|
||||||
|
|
||||||
# Add shape contribution
|
# Add shape contribution
|
||||||
v_shaped = v_template + blend_shapes(betas, shapedirs)
|
if use_shape_blending:
|
||||||
|
v_shaped = v_template + blend_shapes(betas, shapedirs)
|
||||||
|
# Get the joints
|
||||||
|
# NxJx3 array
|
||||||
|
J = vertices2joints(J_regressor, v_shaped)
|
||||||
|
else:
|
||||||
|
v_shaped = v_template.unsqueeze(0).expand(batch_size, -1, -1)
|
||||||
|
assert J_shaped is not None
|
||||||
|
J = J_shaped[None].expand(batch_size, -1, -1)
|
||||||
|
|
||||||
# Get the joints
|
|
||||||
# NxJx3 array
|
|
||||||
J = vertices2joints(J_regressor, v_shaped)
|
|
||||||
if only_shape:
|
if only_shape:
|
||||||
return v_shaped, J
|
return v_shaped, J
|
||||||
# 3. Add pose blend shapes
|
# 3. Add pose blend shapes
|
||||||
# N x J x 3 x 3
|
# N x J x 3 x 3
|
||||||
ident = torch.eye(3, dtype=dtype, device=device)
|
|
||||||
if pose2rot:
|
if pose2rot:
|
||||||
rot_mats = batch_rodrigues(
|
rot_mats = batch_rodrigues(
|
||||||
pose.view(-1, 3), dtype=dtype).view([batch_size, -1, 3, 3])
|
pose.view(-1, 3), dtype=dtype).view([batch_size, -1, 3, 3])
|
||||||
|
|
||||||
pose_feature = (rot_mats[:, 1:, :, :] - ident).view([batch_size, -1])
|
|
||||||
# (N x P) x (P, V * 3) -> N x V x 3
|
|
||||||
pose_offsets = torch.matmul(pose_feature, posedirs) \
|
|
||||||
.view(batch_size, -1, 3)
|
|
||||||
else:
|
else:
|
||||||
pose_feature = pose[:, 1:].view(batch_size, -1, 3, 3) - ident
|
|
||||||
rot_mats = pose.view(batch_size, -1, 3, 3)
|
rot_mats = pose.view(batch_size, -1, 3, 3)
|
||||||
|
|
||||||
pose_offsets = torch.matmul(pose_feature.view(batch_size, -1),
|
if use_pose_blending:
|
||||||
posedirs).view(batch_size, -1, 3)
|
ident = torch.eye(3, dtype=dtype, device=device)
|
||||||
|
pose_feature = (rot_mats[:, 1:, :, :] - ident).view([batch_size, -1])
|
||||||
v_posed = pose_offsets + v_shaped
|
pose_offsets = torch.matmul(pose_feature, posedirs) \
|
||||||
|
.view(batch_size, -1, 3)
|
||||||
|
v_posed = pose_offsets + v_shaped
|
||||||
|
else:
|
||||||
|
v_posed = v_shaped
|
||||||
# 4. Get the global joint location
|
# 4. Get the global joint location
|
||||||
J_transformed, A = batch_rigid_transform(rot_mats, J, parents, dtype=dtype)
|
J_transformed, A = batch_rigid_transform(rot_mats, J, parents, dtype=dtype)
|
||||||
|
|
||||||
|
@ -2,11 +2,11 @@
|
|||||||
@ Date: 2021-05-25 13:39:07
|
@ Date: 2021-05-25 13:39:07
|
||||||
@ Author: Qing Shuai
|
@ Author: Qing Shuai
|
||||||
@ LastEditors: Qing Shuai
|
@ LastEditors: Qing Shuai
|
||||||
@ LastEditTime: 2021-06-04 16:43:39
|
@ LastEditTime: 2021-06-16 14:42:35
|
||||||
@ FilePath: /EasyMocapRelease/easymocap/socket/base_client.py
|
@ FilePath: /EasyMocap/easymocap/socket/base_client.py
|
||||||
'''
|
'''
|
||||||
import socket
|
import socket
|
||||||
from .utils import encode_detect
|
from .utils import encode_detect, encode_smpl
|
||||||
|
|
||||||
class BaseSocketClient:
|
class BaseSocketClient:
|
||||||
def __init__(self, host, port) -> None:
|
def __init__(self, host, port) -> None:
|
||||||
@ -21,5 +21,10 @@ class BaseSocketClient:
|
|||||||
self.s.send(bytes('{}\n'.format(len(val)), 'ascii'))
|
self.s.send(bytes('{}\n'.format(len(val)), 'ascii'))
|
||||||
self.s.sendall(val)
|
self.s.sendall(val)
|
||||||
|
|
||||||
|
def send_smpl(self, data):
|
||||||
|
val = encode_smpl(data)
|
||||||
|
self.s.send(bytes('{}\n'.format(len(val)), 'ascii'))
|
||||||
|
self.s.sendall(val)
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
self.s.close()
|
self.s.close()
|
@ -2,7 +2,7 @@
|
|||||||
@ Date: 2021-05-25 11:15:53
|
@ Date: 2021-05-25 11:15:53
|
||||||
@ Author: Qing Shuai
|
@ Author: Qing Shuai
|
||||||
@ LastEditors: Qing Shuai
|
@ LastEditors: Qing Shuai
|
||||||
@ LastEditTime: 2021-06-12 14:54:43
|
@ LastEditTime: 2021-06-25 21:16:02
|
||||||
@ FilePath: /EasyMocapRelease/easymocap/socket/o3d.py
|
@ FilePath: /EasyMocapRelease/easymocap/socket/o3d.py
|
||||||
'''
|
'''
|
||||||
import open3d as o3d
|
import open3d as o3d
|
||||||
@ -16,17 +16,26 @@ import numpy as np
|
|||||||
from os.path import join
|
from os.path import join
|
||||||
import os
|
import os
|
||||||
from ..assignment.criterion import CritRange
|
from ..assignment.criterion import CritRange
|
||||||
|
import copy
|
||||||
|
|
||||||
|
rotate = False
|
||||||
|
def o3d_callback_rotate(vis):
|
||||||
|
global rotate
|
||||||
|
rotate = not rotate
|
||||||
|
return False
|
||||||
|
|
||||||
class VisOpen3DSocket(BaseSocket):
|
class VisOpen3DSocket(BaseSocket):
|
||||||
def __init__(self, host, port, cfg) -> None:
|
def __init__(self, host, port, cfg) -> None:
|
||||||
# output
|
# output
|
||||||
self.write = cfg.write
|
self.write = cfg.write
|
||||||
self.out = cfg.out
|
self.out = cfg.out
|
||||||
|
self.cfg = cfg
|
||||||
if self.write:
|
if self.write:
|
||||||
print('[Info] capture the screen to {}'.format(self.out))
|
print('[Info] capture the screen to {}'.format(self.out))
|
||||||
os.makedirs(self.out, exist_ok=True)
|
os.makedirs(self.out, exist_ok=True)
|
||||||
# scene
|
# scene
|
||||||
vis = o3d.visualization.Visualizer()
|
vis = o3d.visualization.VisualizerWithKeyCallback()
|
||||||
|
vis.register_key_callback(ord('A'), o3d_callback_rotate)
|
||||||
vis.create_window(window_name='Visualizer', width=cfg.width, height=cfg.height)
|
vis.create_window(window_name='Visualizer', width=cfg.width, height=cfg.height)
|
||||||
self.vis = vis
|
self.vis = vis
|
||||||
# load the scene
|
# load the scene
|
||||||
@ -41,10 +50,16 @@ class VisOpen3DSocket(BaseSocket):
|
|||||||
# create vis => update => super() init
|
# create vis => update => super() init
|
||||||
super().__init__(host, port, debug=cfg.debug)
|
super().__init__(host, port, debug=cfg.debug)
|
||||||
self.block = cfg.block
|
self.block = cfg.block
|
||||||
|
if os.path.exists(cfg.body_model_template):
|
||||||
|
body_template = o3d.io.read_triangle_mesh(cfg.body_model_template)
|
||||||
|
self.body_template = body_template
|
||||||
|
else:
|
||||||
|
self.body_template = None
|
||||||
self.body_model = load_object(cfg.body_model.module, cfg.body_model.args)
|
self.body_model = load_object(cfg.body_model.module, cfg.body_model.args)
|
||||||
zero_params = self.body_model.init_params(1)
|
zero_params = self.body_model.init_params(1)
|
||||||
self.max_human = cfg.max_human
|
self.max_human = cfg.max_human
|
||||||
self.track = cfg.track
|
self.track = cfg.track
|
||||||
|
self.filter = cfg.filter
|
||||||
self.camera_pose = cfg.camera.camera_pose
|
self.camera_pose = cfg.camera.camera_pose
|
||||||
self.init_camera(cfg.camera.camera_pose)
|
self.init_camera(cfg.camera.camera_pose)
|
||||||
self.zero_vertices = Vector3dVector(np.zeros((self.body_model.nVertices, 3)))
|
self.zero_vertices = Vector3dVector(np.zeros((self.body_model.nVertices, 3)))
|
||||||
@ -59,13 +74,35 @@ class VisOpen3DSocket(BaseSocket):
|
|||||||
self.new_frames = cfg.new_frames
|
self.new_frames = cfg.new_frames
|
||||||
|
|
||||||
def add_human(self, zero_params):
|
def add_human(self, zero_params):
|
||||||
vertices = self.body_model(zero_params)[0]
|
vertices = self.body_model(return_verts=True, return_tensor=False, **zero_params)[0]
|
||||||
self.vertices.append(vertices)
|
self.vertices.append(vertices)
|
||||||
mesh = create_mesh(vertices=vertices, faces=self.body_model.faces)
|
if self.body_template is None: # create template
|
||||||
|
mesh = create_mesh(vertices=vertices, faces=self.body_model.faces, colors=self.body_model.color)
|
||||||
|
else:
|
||||||
|
mesh = copy.deepcopy(self.body_template)
|
||||||
self.meshes.append(mesh)
|
self.meshes.append(mesh)
|
||||||
self.vis.add_geometry(mesh)
|
self.vis.add_geometry(mesh)
|
||||||
self.init_camera(self.camera_pose)
|
self.init_camera(self.camera_pose)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def set_camera(cfg, camera_pose):
|
||||||
|
theta, phi = np.deg2rad(-(cfg.camera.theta + 90)), np.deg2rad(cfg.camera.phi)
|
||||||
|
theta = theta + np.pi
|
||||||
|
st, ct = np.sin(theta), np.cos(theta)
|
||||||
|
sp, cp = np.sin(phi), np.cos(phi)
|
||||||
|
rot_x = np.array([
|
||||||
|
[1., 0., 0.],
|
||||||
|
[0., ct, -st],
|
||||||
|
[0, st, ct]
|
||||||
|
])
|
||||||
|
rot_z = np.array([
|
||||||
|
[cp, -sp, 0],
|
||||||
|
[sp, cp, 0.],
|
||||||
|
[0., 0., 1.]
|
||||||
|
])
|
||||||
|
camera_pose[:3, :3] = rot_x @ rot_z
|
||||||
|
return camera_pose
|
||||||
|
|
||||||
def init_camera(self, camera_pose):
|
def init_camera(self, camera_pose):
|
||||||
ctr = self.vis.get_view_control()
|
ctr = self.vis.get_view_control()
|
||||||
init_param = ctr.convert_to_pinhole_camera_parameters()
|
init_param = ctr.convert_to_pinhole_camera_parameters()
|
||||||
@ -73,6 +110,11 @@ class VisOpen3DSocket(BaseSocket):
|
|||||||
init_param.extrinsic = np.array(camera_pose)
|
init_param.extrinsic = np.array(camera_pose)
|
||||||
ctr.convert_from_pinhole_camera_parameters(init_param)
|
ctr.convert_from_pinhole_camera_parameters(init_param)
|
||||||
|
|
||||||
|
def get_camera(self):
|
||||||
|
ctr = self.vis.get_view_control()
|
||||||
|
init_param = ctr.convert_to_pinhole_camera_parameters()
|
||||||
|
return np.array(init_param.extrinsic)
|
||||||
|
|
||||||
def filter_human(self, datas):
|
def filter_human(self, datas):
|
||||||
datas_new = []
|
datas_new = []
|
||||||
for data in datas:
|
for data in datas:
|
||||||
@ -90,15 +132,33 @@ class VisOpen3DSocket(BaseSocket):
|
|||||||
|
|
||||||
def main(self, datas):
|
def main(self, datas):
|
||||||
if self.debug:log('[Info] Load data {}'.format(self.count))
|
if self.debug:log('[Info] Load data {}'.format(self.count))
|
||||||
datas = json.loads(datas)
|
if isinstance(datas, str):
|
||||||
datas = self.filter_human(datas)
|
datas = json.loads(datas)
|
||||||
|
for data in datas:
|
||||||
|
for key in data.keys():
|
||||||
|
if key == 'id':
|
||||||
|
continue
|
||||||
|
data[key] = np.array(data[key])
|
||||||
|
if 'keypoints3d' not in data.keys() and self.filter:
|
||||||
|
data['keypoints3d'] = self.body_model(return_verts=False, return_tensor=False, **data)[0]
|
||||||
|
if self.filter:
|
||||||
|
datas = self.filter_human(datas)
|
||||||
with Timer('forward'):
|
with Timer('forward'):
|
||||||
|
params = []
|
||||||
for i, data in enumerate(datas):
|
for i, data in enumerate(datas):
|
||||||
if i >= len(self.meshes):
|
if i >= len(self.meshes):
|
||||||
print('[Error] the number of human exceeds!')
|
print('[Error] the number of human exceeds!')
|
||||||
self.add_human(np.array(data['keypoints3d']))
|
self.add_human(data)
|
||||||
vertices = self.body_model(data['keypoints3d'])
|
if 'vertices' in data.keys():
|
||||||
self.vertices[i] = Vector3dVector(vertices[0])
|
vertices = data['vertices']
|
||||||
|
self.vertices[i] = Vector3dVector(vertices)
|
||||||
|
else:
|
||||||
|
params.append(data)
|
||||||
|
if len(params) > 0:
|
||||||
|
params = self.body_model.merge_params(params, share_shape=False)
|
||||||
|
vertices = self.body_model(return_verts=True, return_tensor=False, **params)
|
||||||
|
for i in range(vertices.shape[0]):
|
||||||
|
self.vertices[i] = Vector3dVector(vertices[i])
|
||||||
for i in range(len(datas), len(self.meshes)):
|
for i in range(len(datas), len(self.meshes)):
|
||||||
self.vertices[i] = self.zero_vertices
|
self.vertices[i] = self.zero_vertices
|
||||||
# Open3D will lock the thread here
|
# Open3D will lock the thread here
|
||||||
@ -109,6 +169,12 @@ class VisOpen3DSocket(BaseSocket):
|
|||||||
col = get_rgb_01(datas[i]['id'])
|
col = get_rgb_01(datas[i]['id'])
|
||||||
self.meshes[i].paint_uniform_color(col)
|
self.meshes[i].paint_uniform_color(col)
|
||||||
|
|
||||||
|
def o3dcallback(self):
|
||||||
|
if rotate:
|
||||||
|
self.cfg.camera.phi += np.pi/10
|
||||||
|
camera_pose = self.set_camera(self.cfg, self.get_camera())
|
||||||
|
self.init_camera(camera_pose)
|
||||||
|
|
||||||
def update(self):
|
def update(self):
|
||||||
if self.disconnect and not self.block:
|
if self.disconnect and not self.block:
|
||||||
self.previous.clear()
|
self.previous.clear()
|
||||||
@ -123,6 +189,7 @@ class VisOpen3DSocket(BaseSocket):
|
|||||||
for mesh in self.meshes:
|
for mesh in self.meshes:
|
||||||
mesh.compute_triangle_normals()
|
mesh.compute_triangle_normals()
|
||||||
self.vis.update_geometry(mesh)
|
self.vis.update_geometry(mesh)
|
||||||
|
self.o3dcallback()
|
||||||
self.vis.poll_events()
|
self.vis.poll_events()
|
||||||
self.vis.update_renderer()
|
self.vis.update_renderer()
|
||||||
if self.write:
|
if self.write:
|
||||||
@ -132,5 +199,6 @@ class VisOpen3DSocket(BaseSocket):
|
|||||||
self.count += 1
|
self.count += 1
|
||||||
else:
|
else:
|
||||||
with Timer('update renderer', True):
|
with Timer('update renderer', True):
|
||||||
|
self.o3dcallback()
|
||||||
self.vis.poll_events()
|
self.vis.poll_events()
|
||||||
self.vis.update_renderer()
|
self.vis.update_renderer()
|
@ -2,8 +2,8 @@
|
|||||||
@ Date: 2021-05-24 20:07:34
|
@ Date: 2021-05-24 20:07:34
|
||||||
@ Author: Qing Shuai
|
@ Author: Qing Shuai
|
||||||
@ LastEditors: Qing Shuai
|
@ LastEditors: Qing Shuai
|
||||||
@ LastEditTime: 2021-06-04 16:29:35
|
@ LastEditTime: 2021-06-16 14:42:23
|
||||||
@ FilePath: /EasyMocapRelease/media/qing/Project/mirror/EasyMocap/easymocap/socket/utils.py
|
@ FilePath: /EasyMocap/easymocap/socket/utils.py
|
||||||
'''
|
'''
|
||||||
import cv2
|
import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@ -14,6 +14,11 @@ def encode_detect(data):
|
|||||||
res = res.replace('\r', '').replace('\n', '').replace(' ', '')
|
res = res.replace('\r', '').replace('\n', '').replace(' ', '')
|
||||||
return res.encode('ascii')
|
return res.encode('ascii')
|
||||||
|
|
||||||
|
def encode_smpl(data):
|
||||||
|
res = write_common_results(None, data, ['poses', 'shapes', 'Rh', 'Th'])
|
||||||
|
res = res.replace('\r', '').replace('\n', '').replace(' ', '')
|
||||||
|
return res.encode('ascii')
|
||||||
|
|
||||||
def encode_image(image):
|
def encode_image(image):
|
||||||
fourcc = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
|
fourcc = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
|
||||||
#frame을 binary 형태로 변환 jpg로 decoding
|
#frame을 binary 형태로 변환 jpg로 decoding
|
||||||
|
@ -2,13 +2,65 @@
|
|||||||
@ Date: 2021-01-17 22:44:34
|
@ Date: 2021-01-17 22:44:34
|
||||||
@ Author: Qing Shuai
|
@ Author: Qing Shuai
|
||||||
@ LastEditors: Qing Shuai
|
@ LastEditors: Qing Shuai
|
||||||
@ LastEditTime: 2021-05-25 14:01:24
|
@ LastEditTime: 2021-06-20 17:24:12
|
||||||
@ FilePath: /EasyMocap/easymocap/visualize/geometry.py
|
@ FilePath: /EasyMocap/easymocap/visualize/geometry.py
|
||||||
'''
|
'''
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import cv2
|
import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
|
from os.path import join
|
||||||
|
|
||||||
|
def load_sphere():
|
||||||
|
cur_dir = os.path.dirname(__file__)
|
||||||
|
faces = np.loadtxt(join(cur_dir, 'sphere_faces_20.txt'), dtype=np.int)
|
||||||
|
vertices = np.loadtxt(join(cur_dir, 'sphere_vertices_20.txt'))
|
||||||
|
return vertices, faces
|
||||||
|
|
||||||
|
def load_cylinder():
|
||||||
|
cur_dir = os.path.dirname(__file__)
|
||||||
|
faces = np.loadtxt(join(cur_dir, 'cylinder_faces_20.txt'), dtype=np.int)
|
||||||
|
vertices = np.loadtxt(join(cur_dir, 'cylinder_vertices_20.txt'))
|
||||||
|
return vertices, faces
|
||||||
|
|
||||||
|
def create_point(points, r=0.01):
|
||||||
|
""" create sphere
|
||||||
|
|
||||||
|
Args:
|
||||||
|
points (array): (N, 3)/(N, 4)
|
||||||
|
r (float, optional): radius. Defaults to 0.01.
|
||||||
|
"""
|
||||||
|
nPoints = points.shape[0]
|
||||||
|
vert, face = load_sphere()
|
||||||
|
nVerts = vert.shape[0]
|
||||||
|
vert = vert[None, :, :].repeat(points.shape[0], 0)
|
||||||
|
vert = vert + points[:, None, :]
|
||||||
|
verts = np.vstack(vert)
|
||||||
|
face = face[None, :, :].repeat(points.shape[0], 0)
|
||||||
|
face = face + nVerts * np.arange(nPoints).reshape(nPoints, 1, 1)
|
||||||
|
faces = np.vstack(face)
|
||||||
|
return {'vertices': verts, 'faces': faces, 'name': 'points'}
|
||||||
|
|
||||||
|
def calRot(axis, direc):
|
||||||
|
direc = direc/np.linalg.norm(direc)
|
||||||
|
axis = axis/np.linalg.norm(axis)
|
||||||
|
rotdir = np.cross(axis, direc)
|
||||||
|
rotdir = rotdir/np.linalg.norm(rotdir)
|
||||||
|
rotdir = rotdir * np.arccos(np.dot(direc, axis))
|
||||||
|
rotmat, _ = cv2.Rodrigues(rotdir)
|
||||||
|
return rotmat
|
||||||
|
|
||||||
|
def create_line(start, end, r=0.01, col=None):
|
||||||
|
length = np.linalg.norm(end[:3] - start[:3])
|
||||||
|
vertices, faces = load_cylinder()
|
||||||
|
vertices[:, :2] *= r
|
||||||
|
vertices[:, 2] *= length/2
|
||||||
|
rotmat = calRot(np.array([0, 0, 1]), end - start)
|
||||||
|
vertices = vertices @ rotmat.T + (start + end)/2
|
||||||
|
ret = {'vertices': vertices, 'faces': faces, 'name': 'line'}
|
||||||
|
if col is not None:
|
||||||
|
ret['colors'] = col.reshape(-1, 3).repeat(vertices.shape[0], 0)
|
||||||
|
return ret
|
||||||
|
|
||||||
def create_ground(
|
def create_ground(
|
||||||
center=[0, 0, 0], xdir=[1, 0, 0], ydir=[0, 1, 0], # 位置
|
center=[0, 0, 0], xdir=[1, 0, 0], ydir=[0, 1, 0], # 位置
|
||||||
|
@ -2,17 +2,25 @@
|
|||||||
@ Date: 2021-04-25 15:52:01
|
@ Date: 2021-04-25 15:52:01
|
||||||
@ Author: Qing Shuai
|
@ Author: Qing Shuai
|
||||||
@ LastEditors: Qing Shuai
|
@ LastEditors: Qing Shuai
|
||||||
@ LastEditTime: 2021-05-25 11:48:49
|
@ LastEditTime: 2021-06-20 15:22:59
|
||||||
@ FilePath: /EasyMocap/easymocap/visualize/o3dwrapper.py
|
@ FilePath: /EasyMocap/easymocap/visualize/o3dwrapper.py
|
||||||
'''
|
'''
|
||||||
import open3d as o3d
|
import open3d as o3d
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
from .geometry import create_ground as create_ground_
|
||||||
|
from .geometry import create_point as create_point_
|
||||||
|
from .geometry import create_line as create_line_
|
||||||
|
|
||||||
Vector3dVector = o3d.utility.Vector3dVector
|
Vector3dVector = o3d.utility.Vector3dVector
|
||||||
Vector3iVector = o3d.utility.Vector3iVector
|
Vector3iVector = o3d.utility.Vector3iVector
|
||||||
Vector2iVector = o3d.utility.Vector2iVector
|
Vector2iVector = o3d.utility.Vector2iVector
|
||||||
TriangleMesh = o3d.geometry.TriangleMesh
|
TriangleMesh = o3d.geometry.TriangleMesh
|
||||||
load_mesh = o3d.io.read_triangle_mesh
|
load_mesh = o3d.io.read_triangle_mesh
|
||||||
|
vis = o3d.visualization.draw_geometries
|
||||||
|
|
||||||
|
def _create_cylinder():
|
||||||
|
# create_cylinder(radius=1.0, height=2.0, resolution=20, split=4, create_uv_map=False)
|
||||||
|
pass
|
||||||
|
|
||||||
def create_mesh(vertices, faces, colors=None, **kwargs):
|
def create_mesh(vertices, faces, colors=None, **kwargs):
|
||||||
mesh = TriangleMesh()
|
mesh = TriangleMesh()
|
||||||
@ -25,14 +33,20 @@ def create_mesh(vertices, faces, colors=None, **kwargs):
|
|||||||
mesh.compute_vertex_normals()
|
mesh.compute_vertex_normals()
|
||||||
return mesh
|
return mesh
|
||||||
|
|
||||||
|
def create_point(**kwargs):
|
||||||
|
return create_mesh(**create_point_(**kwargs))
|
||||||
|
|
||||||
|
def create_line(**kwargs):
|
||||||
|
return create_mesh(**create_line_(**kwargs))
|
||||||
|
|
||||||
def create_ground(**kwargs):
|
def create_ground(**kwargs):
|
||||||
from .geometry import create_ground as create_ground_
|
|
||||||
ground = create_ground_(**kwargs)
|
ground = create_ground_(**kwargs)
|
||||||
return create_mesh(**ground)
|
return create_mesh(**ground)
|
||||||
|
|
||||||
def create_coord(camera = [0,0,0], radius=1):
|
def create_coord(camera = [0,0,0], radius=1, scale=1):
|
||||||
camera_frame = TriangleMesh.create_coordinate_frame(
|
camera_frame = TriangleMesh.create_coordinate_frame(
|
||||||
size=radius, origin=camera)
|
size=radius, origin=camera)
|
||||||
|
camera_frame.scale(scale)
|
||||||
return camera_frame
|
return camera_frame
|
||||||
|
|
||||||
def create_bbox(min_bound=(-3., -3., 0), max_bound=(3., 3., 2), flip=False):
|
def create_bbox(min_bound=(-3., -3., 0), max_bound=(3., 3., 2), flip=False):
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
@ Date: 2021-01-17 21:38:19
|
@ Date: 2021-01-17 21:38:19
|
||||||
@ Author: Qing Shuai
|
@ Author: Qing Shuai
|
||||||
@ LastEditors: Qing Shuai
|
@ LastEditors: Qing Shuai
|
||||||
@ LastEditTime: 2021-06-04 15:52:49
|
@ LastEditTime: 2021-06-18 18:48:37
|
||||||
@ FilePath: /EasyMocap/easymocap/visualize/skelmodel.py
|
@ FilePath: /EasyMocap/easymocap/visualize/skelmodel.py
|
||||||
'''
|
'''
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@ -24,9 +24,12 @@ def calTransformation(v_i, v_j, r, adaptr=False, ratio=10):
|
|||||||
length = np.linalg.norm(direc)
|
length = np.linalg.norm(direc)
|
||||||
direc = direc/length
|
direc = direc/length
|
||||||
rotdir = np.cross(xaxis, direc)
|
rotdir = np.cross(xaxis, direc)
|
||||||
rotdir = rotdir/np.linalg.norm(rotdir)
|
if np.linalg.norm(rotdir) > 1e-3:
|
||||||
rotdir = rotdir * np.arccos(np.dot(direc, xaxis))
|
rotdir = rotdir/np.linalg.norm(rotdir)
|
||||||
rotmat, _ = cv2.Rodrigues(rotdir)
|
rotdir = rotdir * np.arccos(np.dot(direc, xaxis))
|
||||||
|
rotmat, _ = cv2.Rodrigues(rotdir)
|
||||||
|
else:
|
||||||
|
rotmat = np.eye(3)
|
||||||
# set the minimal radius for the finger and face
|
# set the minimal radius for the finger and face
|
||||||
shrink = min(max(length/ratio, 0.005), 0.05)
|
shrink = min(max(length/ratio, 0.005), 0.05)
|
||||||
eigval = np.array([[length/2/r, 0, 0], [0, shrink, 0], [0, 0, shrink]])
|
eigval = np.array([[length/2/r, 0, 0], [0, shrink, 0], [0, 0, shrink]])
|
||||||
@ -44,6 +47,7 @@ class SkelModel:
|
|||||||
config = CONFIG[body_type]
|
config = CONFIG[body_type]
|
||||||
self.nJoints = config['nJoints']
|
self.nJoints = config['nJoints']
|
||||||
self.kintree = config['kintree']
|
self.kintree = config['kintree']
|
||||||
|
self.device = 'none'
|
||||||
cur_dir = os.path.dirname(__file__)
|
cur_dir = os.path.dirname(__file__)
|
||||||
faces = np.loadtxt(join(cur_dir, 'sphere_faces_20.txt'), dtype=np.int)
|
faces = np.loadtxt(join(cur_dir, 'sphere_faces_20.txt'), dtype=np.int)
|
||||||
self.vertices = np.loadtxt(join(cur_dir, 'sphere_vertices_20.txt'))
|
self.vertices = np.loadtxt(join(cur_dir, 'sphere_vertices_20.txt'))
|
||||||
@ -54,34 +58,54 @@ class SkelModel:
|
|||||||
for nk in range(len(self.kintree)):
|
for nk in range(len(self.kintree)):
|
||||||
faces_all.append(faces + self.nJoints*self.vertices.shape[0] + nk*self.vertices.shape[0])
|
faces_all.append(faces + self.nJoints*self.vertices.shape[0] + nk*self.vertices.shape[0])
|
||||||
self.faces = np.vstack(faces_all)
|
self.faces = np.vstack(faces_all)
|
||||||
|
self.color = None
|
||||||
self.nVertices = self.vertices.shape[0] * self.nJoints + self.vertices.shape[0] * len(self.kintree)
|
self.nVertices = self.vertices.shape[0] * self.nJoints + self.vertices.shape[0] * len(self.kintree)
|
||||||
self.joint_radius = joint_radius
|
self.joint_radius = joint_radius
|
||||||
|
|
||||||
def __call__(self, keypoints3d, id=0, return_verts=True, return_tensor=False, **kwargs):
|
def __call__(self, keypoints3d, id=0, return_verts=True, return_tensor=False, **kwargs):
|
||||||
vertices_all = []
|
if len(keypoints3d.shape) == 2:
|
||||||
|
keypoints3d = keypoints3d[None]
|
||||||
|
if not return_verts:
|
||||||
|
return keypoints3d
|
||||||
|
if keypoints3d.shape[-1] == 3: # add confidence
|
||||||
|
keypoints3d = np.hstack((keypoints3d, np.ones((keypoints3d.shape[0], 1))))
|
||||||
r = self.joint_radius
|
r = self.joint_radius
|
||||||
# joints
|
# joints
|
||||||
for nj in range(self.nJoints):
|
min_conf = 0.1
|
||||||
if keypoints3d[nj, -1] < 0.01:
|
verts_final = []
|
||||||
vertices_all.append(self.vertices*0.001)
|
for nper in range(keypoints3d.shape[0]):
|
||||||
continue
|
vertices_all = []
|
||||||
vertices_all.append(self.vertices*r + keypoints3d[nj:nj+1, :3])
|
kpts3d = keypoints3d[nper]
|
||||||
# limb
|
for nj in range(self.nJoints):
|
||||||
for nk, (i, j) in enumerate(self.kintree):
|
if kpts3d[nj, -1] < min_conf:
|
||||||
if keypoints3d[i][-1] < 0.1 or keypoints3d[j][-1] < 0.1:
|
vertices_all.append(self.vertices*0.001)
|
||||||
vertices_all.append(self.vertices*0.001)
|
continue
|
||||||
continue
|
vertices_all.append(self.vertices*r + kpts3d[nj:nj+1, :3])
|
||||||
T, _, length = calTransformation(keypoints3d[i, :3], keypoints3d[j, :3], r=1)
|
# limb
|
||||||
if length > 2: # 超过两米的
|
for nk, (i, j) in enumerate(self.kintree):
|
||||||
vertices_all.append(self.vertices*0.001)
|
if kpts3d[i][-1] < min_conf or kpts3d[j][-1] < min_conf:
|
||||||
continue
|
vertices_all.append(self.vertices*0.001)
|
||||||
vertices = self.vertices @ T[:3, :3].T + T[:3, 3:].T
|
continue
|
||||||
vertices_all.append(vertices)
|
T, _, length = calTransformation(kpts3d[i, :3], kpts3d[j, :3], r=1)
|
||||||
vertices = np.vstack(vertices_all)
|
if length > 2: # 超过两米的
|
||||||
return vertices[None, :, :]
|
vertices_all.append(self.vertices*0.001)
|
||||||
|
continue
|
||||||
|
vertices = self.vertices @ T[:3, :3].T + T[:3, 3:].T
|
||||||
|
vertices_all.append(vertices)
|
||||||
|
vertices = np.vstack(vertices_all)
|
||||||
|
verts_final.append(vertices)
|
||||||
|
verts_final = np.stack(verts_final)
|
||||||
|
return verts_final
|
||||||
|
|
||||||
|
def to(self, none):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def merge_params(self, params, share_shape=False):
|
||||||
|
keypoints = np.stack([param['keypoints3d'] for param in params])
|
||||||
|
return {'keypoints3d': keypoints, 'id': [0]}
|
||||||
|
|
||||||
def init_params(self, nFrames):
|
def init_params(self, nFrames):
|
||||||
return np.zeros((self.nJoints, 4))
|
return {'keypoints3d': np.zeros((self.nJoints, 4))}
|
||||||
|
|
||||||
class SMPLSKEL:
|
class SMPLSKEL:
|
||||||
def __init__(self, model_type, gender, body_type) -> None:
|
def __init__(self, model_type, gender, body_type) -> None:
|
||||||
|
Loading…
Reference in New Issue
Block a user