🚀 update the support of MANO

This commit is contained in:
shuaiqing 2021-05-27 21:12:17 +08:00
parent 81ef081211
commit fa4bd6ddaa
14 changed files with 303 additions and 211 deletions

View File

@ -2,12 +2,22 @@
* @Date: 2021-01-24 22:30:40
* @Author: Qing Shuai
* @LastEditors: Qing Shuai
* @LastEditTime: 2021-01-24 22:32:53
* @LastEditTime: 2021-05-27 21:10:07
* @FilePath: /EasyMocapRelease/doc/log.md
-->
## 2020.01.24
## 2021.01.24
1. Support SMPL+H, SMPL-X model.
2. Upgrade `body_model.py`.
3. Update the optimization functions.
4. Add checking length of limb
5. Update the example figures.
5. Update the example figures.
## 2021.04.13
1. Add mirrored-human code.
2. Add `calibration` and `annotator`
3. Add `setup.py`
## 2021.05.27
1. Remove the `code/` folder
2. Add the support for mano model
3. Add `--write_smpl_full` flag

View File

@ -2,11 +2,10 @@
@ Date: 2021-01-13 16:53:55
@ Author: Qing Shuai
@ LastEditors: Qing Shuai
@ LastEditTime: 2021-04-13 15:59:35
@ FilePath: /EasyMocap/easymocap/dataset/base.py
@ LastEditTime: 2021-05-27 20:37:55
@ FilePath: /EasyMocapRelease/easymocap/dataset/base.py
'''
import os
import json
from os.path import join
from glob import glob
import cv2
@ -14,13 +13,13 @@ import os, sys
import numpy as np
from ..mytools.camera_utils import read_camera, get_fundamental_matrix, Undistort
from ..mytools import FileWriter, read_annot, getFileList
from ..mytools.reader import read_keypoints3d, read_json
from ..mytools import FileWriter, read_annot, getFileList, save_json
from ..mytools.reader import read_keypoints3d, read_json, read_smpl
# from ..mytools.writer import FileWriter
# from ..mytools.camera_utils import read_camera, undistort, write_camera, get_fundamental_matrix
# from ..mytools.vis_base import merge, plot_bbox, plot_keypoints
# from ..mytools.file_utils import read_json, save_json, read_annot, read_smpl, write_smpl, get_bbox_from_pose
# from ..mytools.file_utils import merge_params, select_nf, getFileList
from ..mytools.file_utils import merge_params, select_nf
def crop_image(img, annot, vis_2d=False, config={}, crop_square=True):
for det in annot:
@ -74,8 +73,6 @@ class ImageFolder:
self.imagelist.extend(images)
annots = sorted([join(sub, i) for i in os.listdir(join(self.annot_root, sub))])
self.annotlist.extend(annots)
# output
assert out is not None
self.out = out
self.writer = FileWriter(self.out, config=config)
self.gtK, self.gtRT = False, False
@ -132,6 +129,10 @@ class ImageFolder:
def write_keypoints3d(self, results, nf):
outname = join(self.out, 'keypoints3d', '{}.json'.format(self.basename(nf)))
self.writer.write_keypoints3d(results, outname)
def write_vertices(self, results, nf):
outname = join(self.out, 'vertices', '{}.json'.format(self.basename(nf)))
self.writer.write_vertices(results, outname)
def write_smpl(self, results, nf):
outname = join(self.out, 'smpl', '{}.json'.format(self.basename(nf)))
@ -144,20 +145,21 @@ class ImageFolder:
camera[key] = camera[key][None, :, :]
self.writer.vis_smpl(render_data, images, camera, outname, add_back=True)
class VideoFolder(ImageFolder):
"一段视频的图片的文件夹"
def __init__(self, root, name, out=None,
image_root='images', annot_root='annots',
kpts_type='body15', config={}, no_img=False) -> None:
self.root = root
self.image_root = join(root, image_root, name)
self.annot_root = join(root, annot_root, name)
self.name = name
self.kpts_type = kpts_type
self.no_img = no_img
self.imagelist = sorted(os.listdir(self.image_root))
self.annotlist = sorted(os.listdir(self.annot_root))
self.ret_crop = False
# class VideoFolder(ImageFolder):
# "一段视频的图片的文件夹"
# def __init__(self, root, name, out=None,
# image_root='images', annot_root='annots',
# kpts_type='body15', config={}, no_img=False) -> None:
# self.root = root
# self.image_root = join(root, image_root, name)
# self.annot_root = join(root, annot_root, name)
# self.name = name
# self.kpts_type = kpts_type
# self.no_img = no_img
# self.imagelist = sorted(os.listdir(self.image_root))
# self.annotlist = sorted(os.listdir(self.annot_root))
# self.ret_crop = False
# self.gtK, self.gtRT = False, False
def load_annot_all(self, path):
# 这个不使用personID只是单纯的罗列一下
@ -363,7 +365,10 @@ def load_cameras(path):
print('\n\n!!!there is no camera parameters, maybe bug: \n', intri_name, extri_name, '\n')
cameras = None
return cameras
def numpy_to_list(array, precision=3):
return np.round(array, precision).tolist()
class MVBase:
""" Dataset for multiview data
"""
@ -498,24 +503,33 @@ class MVBase:
images = [images[i] for i in valid_idx]
lDetections = [lDetections[i] for i in valid_idx]
return self.writer.vis_keypoints2d_mv(images, lDetections, outname=outname, vis_id=False)
def vis_match(self, images, lDetections, nf, to_img=True, sub_vis=[]):
if len(sub_vis) != 0:
valid_idx = [self.cams.index(i) for i in sub_vis]
images = [images[i] for i in valid_idx]
lDetections = [lDetections[i] for i in valid_idx]
return self.writer.vis_detections(images, lDetections, nf,
key='match', to_img=to_img, vis_id=True)
def basename(self, nf):
return '{:06d}'.format(nf)
def write_keypoints2d(self, lDetections, nf):
for nv in range(len(lDetections)):
cam = self.cams[nv]
annname = join(self.annot_root, cam, self.annotlist[cam][nf])
outname = join(self.out, 'keypoints2d', cam, self.annotlist[cam][nf])
annot_origin = read_json(annname)
annots = lDetections[nv]
results = []
for annot in annots:
results.append({
'personID': annot['id'],
'bbox': numpy_to_list(annot['bbox'], 2),
'keypoints': numpy_to_list(annot['keypoints'], 2)
})
annot_origin['annots'] = results
save_json(outname, annot_origin)
def write_keypoints3d(self, results, nf):
outname = join(self.out, 'keypoints3d', self.basename(nf)+'.json')
self.writer.write_keypoints3d(results, outname)
def write_smpl(self, results, nf):
outname = join(self.out, 'smpl', self.basename(nf)+'.json')
def write_smpl(self, results, nf, mode='smpl'):
outname = join(self.out, mode, self.basename(nf)+'.json')
self.writer.write_smpl(results, outname)
def vis_smpl(self, peopleDict, faces, images, nf, sub_vis=[],

View File

@ -2,7 +2,7 @@
* @ Date: 2020-09-26 16:52:55
* @ Author: Qing Shuai
@ LastEditors: Qing Shuai
@ LastEditTime: 2021-04-03 18:30:13
@ LastEditTime: 2021-05-27 14:33:03
@ FilePath: /EasyMocap/easymocap/dataset/config.py
'''
import numpy as np
@ -197,6 +197,9 @@ CONFIG['hand'] = {'kintree':
'y', 'y', 'y', 'y']
}
CONFIG['handl'] = CONFIG['hand']
CONFIG['handr'] = CONFIG['hand']
CONFIG['bodyhand'] = {'kintree':
[[ 1, 0],
[ 2, 1],
@ -673,6 +676,8 @@ CONFIG['total']['nJoints'] = 137
COCO17_IN_BODY25 = [0,16,15,18,17,5,2,6,3,7,4,12,9,13,10,14,11]
CONFIG['bodyhandface']['joint_names'] = CONFIG['body25']['joint_names']
def coco17tobody25(points2d):
dim = 3
if len(points2d.shape) == 2:

View File

@ -2,7 +2,7 @@
@ Date: 2021-01-12 17:12:50
@ Author: Qing Shuai
@ LastEditors: Qing Shuai
@ LastEditTime: 2021-04-13 10:59:22
@ LastEditTime: 2021-05-27 20:25:24
@ FilePath: /EasyMocap/easymocap/dataset/mv1pmf.py
'''
from ..mytools.file_utils import get_bbox_from_pose
@ -25,10 +25,10 @@ class MV1PMF(MVBase):
results = [{'id': self.pid, 'keypoints3d': keypoints3d}]
super().write_keypoints3d(results, nf)
def write_smpl(self, params, nf):
def write_smpl(self, params, nf, mode='smpl'):
result = {'id': 0}
result.update(params)
super().write_smpl([result], nf)
super().write_smpl([result], nf, mode)
def vis_smpl(self, vertices, faces, images, nf, sub_vis=[],
mode='smpl', extra_data=[], add_back=True):
@ -42,7 +42,7 @@ class MV1PMF(MVBase):
if len(sub_vis) == 0:
sub_vis = self.cams
for key in cameras.keys():
cameras[key] = [self.cameras[cam][key] for cam in sub_vis]
cameras[key] = np.stack([self.cameras[cam][key] for cam in sub_vis])
images = [images[self.cams.index(cam)] for cam in sub_vis]
self.writer.vis_smpl(render_data, images, cameras, outname, add_back=add_back)
@ -57,7 +57,7 @@ class MV1PMF(MVBase):
lDetections.append([det])
return super().vis_detections(images, lDetections, nf, sub_vis=sub_vis)
def vis_repro(self, images, kpts_repro, nf, to_img=True, sub_vis=[]):
def vis_repro(self, images, kpts_repro, nf, to_img=True, sub_vis=[], mode='repro'):
lDetections = []
for nv in range(len(images)):
det = {
@ -66,7 +66,7 @@ class MV1PMF(MVBase):
'bbox': get_bbox_from_pose(kpts_repro[nv], images[nv])
}
lDetections.append([det])
return super().vis_detections(images, lDetections, nf, mode='repro', sub_vis=sub_vis)
return super().vis_detections(images, lDetections, nf, mode=mode, sub_vis=sub_vis)
def __getitem__(self, index: int):
images, annots_all = super().__getitem__(index)

View File

@ -1,6 +1,5 @@
import cv2
import numpy as np
from tqdm import tqdm
import os
class FileStorage(object):
@ -53,10 +52,6 @@ class FileStorage(object):
def close(self):
self.__del__(self)
def safe_mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def read_intri(intri_name):
assert os.path.exists(intri_name), intri_name
intri = FileStorage(intri_name)
@ -145,6 +140,14 @@ def write_camera(camera, path):
extri.write('Rot_{}'.format(key), val['R'])
extri.write('T_{}'.format(key), val['T'])
def camera_from_img(img):
height, width = img.shape[0], img.shape[1]
# focal = 1.2*max(height, width) # as colmap
focal = 1.2*min(height, width) # as colmap
K = np.array([focal, 0., width/2, 0., focal, height/2, 0. ,0., 1.]).reshape(3, 3)
camera = {'K':K ,'R': np.eye(3), 'T': np.zeros((3, 1)), 'dist': np.zeros((1, 5))}
return camera
class Undistort:
@staticmethod
def image(frame, K, dist):
@ -169,84 +172,8 @@ class Undistort:
def undistort(camera, frame=None, keypoints=None, output=None, bbox=None):
# bbox: 1, 7
mtx = camera['K']
dist = camera['dist']
if frame is not None:
frame = cv2.undistort(frame, mtx, dist, None)
if output is not None:
output = cv2.undistort(output, mtx, dist, None)
if keypoints is not None:
for nP in range(keypoints.shape[0]):
kpts = keypoints[nP][:, None, :2]
kpts = np.ascontiguousarray(kpts)
kpts = cv2.undistortPoints(kpts, mtx, dist, P=mtx)
keypoints[nP, :, :2] = kpts[:, 0]
if bbox is not None:
kpts = np.zeros((2, 1, 2))
kpts[0, 0, 0] = bbox[0]
kpts[0, 0, 1] = bbox[1]
kpts[1, 0, 0] = bbox[2]
kpts[1, 0, 1] = bbox[3]
kpts = cv2.undistortPoints(kpts, mtx, dist, P=mtx)
bbox[0] = kpts[0, 0, 0]
bbox[1] = kpts[0, 0, 1]
bbox[2] = kpts[1, 0, 0]
bbox[3] = kpts[1, 0, 1]
return bbox
return frame, keypoints, output
def get_bbox(points_set, H, W, thres=0.1, scale=1.2):
bboxes = np.zeros((points_set.shape[0], 6))
for iv in range(points_set.shape[0]):
pose = points_set[iv, :, :]
use_idx = pose[:,2] > thres
if np.sum(use_idx) < 1:
continue
ll, rr = np.min(pose[use_idx, 0]), np.max(pose[use_idx, 0])
bb, tt = np.min(pose[use_idx, 1]), np.max(pose[use_idx, 1])
center = (int((ll + rr) / 2), int((bb + tt) / 2))
length = [int(scale*(rr-ll)/2), int(scale*(tt-bb)/2)]
l = max(0, center[0] - length[0])
r = min(W, center[0] + length[0]) # img.shape[1]
b = max(0, center[1] - length[1])
t = min(H, center[1] + length[1]) # img.shape[0]
conf = pose[:, 2].mean()
cls_conf = pose[use_idx, 2].mean()
bboxes[iv, 0] = l
bboxes[iv, 1] = r
bboxes[iv, 2] = b
bboxes[iv, 3] = t
bboxes[iv, 4] = conf
bboxes[iv, 5] = cls_conf
return bboxes
def filterKeypoints(keypoints, thres = 0.1, min_width=40, \
min_height=40, min_area= 50000, min_count=6):
add_list = []
# TODO:并行化
for ik in range(keypoints.shape[0]):
pose = keypoints[ik]
vis_count = np.sum(pose[:15, 2] > thres) #TODO:
if vis_count < min_count:
continue
ll, rr = np.min(pose[pose[:,2]>thres,0]), np.max(pose[pose[:,2]>thres,0])
bb, tt = np.min(pose[pose[:,2]>thres,1]), np.max(pose[pose[:,2]>thres,1])
center = (int((ll+rr)/2), int((bb+tt)/2))
length = [int(1.2*(rr-ll)/2), int(1.2*(tt-bb)/2)]
l = center[0] - length[0]
r = center[0] + length[0]
b = center[1] - length[1]
t = center[1] + length[1]
if (r - l) < min_width:
continue
if (t - b) < min_height:
continue
if (r - l)*(t - b) < min_area:
continue
add_list.append(ik)
keypoints = keypoints[add_list, :, :]
return keypoints, add_list
print('This function is deprecated')
raise NotImplementedError
def get_fundamental_matrix(cameras, basenames):
skew_op = lambda x: np.array([[0, -x[2], x[1]], [x[2], 0, -x[0]], [-x[1], x[0], 0]])

View File

@ -2,7 +2,7 @@
@ Date: 2021-01-15 12:09:27
@ Author: Qing Shuai
@ LastEditors: Qing Shuai
@ LastEditTime: 2021-04-13 19:45:18
@ LastEditTime: 2021-05-27 20:36:42
@ FilePath: /EasyMocapRelease/easymocap/mytools/cmd_loader.py
'''
import os
@ -12,9 +12,11 @@ def load_parser():
parser = argparse.ArgumentParser('EasyMocap commond line tools')
parser.add_argument('path', type=str)
parser.add_argument('--out', type=str, default=None)
parser.add_argument('--camera', type=str, default=None)
parser.add_argument('--annot', type=str, default='annots', help="sub directory name to store the generated annotation files, default to be annots")
parser.add_argument('--sub', type=str, nargs='+', default=[],
help='the sub folder lists when in video mode')
parser.add_argument('--from_file', type=str, default=None)
parser.add_argument('--pid', type=int, nargs='+', default=[0],
help='the person IDs')
parser.add_argument('--max_person', type=int, default=-1,
@ -28,8 +30,8 @@ def load_parser():
#
# keypoints and body model
#
parser.add_argument('--body', type=str, default='body25', choices=['body15', 'body25', 'h36m', 'bodyhand', 'bodyhandface', 'total'])
parser.add_argument('--model', type=str, default='smpl', choices=['smpl', 'smplh', 'smplx', 'mano'])
parser.add_argument('--body', type=str, default='body25', choices=['body15', 'body25', 'h36m', 'bodyhand', 'bodyhandface', 'handl', 'handr', 'total'])
parser.add_argument('--model', type=str, default='smpl', choices=['smpl', 'smplh', 'smplx', 'manol', 'manor'])
parser.add_argument('--gender', type=str, default='neutral',
choices=['neutral', 'male', 'female'])
# Input control
@ -50,17 +52,22 @@ def load_parser():
#
# visualization part
#
parser.add_argument('--vis_det', action='store_true')
parser.add_argument('--vis_repro', action='store_true')
parser.add_argument('--vis_smpl', action='store_true')
parser.add_argument('--undis', action='store_true')
parser.add_argument('--sub_vis', type=str, nargs='+', default=[],
output = parser.add_argument_group('Output control')
output.add_argument('--vis_det', action='store_true')
output.add_argument('--vis_repro', action='store_true')
output.add_argument('--vis_smpl', action='store_true')
output.add_argument('--write_smpl_full', action='store_true')
output.add_argument('--vis_mask', action='store_true')
output.add_argument('--undis', action='store_true')
output.add_argument('--sub_vis', type=str, nargs='+', default=[],
help='the sub folder lists for visualization')
#
# debug
#
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--save_origin', action='store_true')
parser.add_argument('--restart', action='store_true')
parser.add_argument('--no_opt', action='store_true')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--opts',
help="Modify config options using the command-line",
@ -82,6 +89,21 @@ def parse_parser(parser):
print(' - [Warning] Please specify the output path `--out ${out}`')
print(' - [Warning] Default to {}/output'.format(args.path))
args.out = join(args.path, 'output')
if args.from_file is not None:
assert os.path.exists(args.from_file), args.from_file
with open(args.from_file) as f:
datas = f.readlines()
subs = [d for d in datas if not d.startswith('#')]
subs = [d.rstrip().replace('https://www.youtube.com/watch?v=', '') for d in subs]
newsubs = sorted(os.listdir(join(args.path, 'images')))
clips = []
for newsub in newsubs:
if newsub.split('+')[0] in subs:
clips.append(newsub)
for sub in subs:
if os.path.exists(join(args.path, 'images', sub)):
clips.append(sub)
args.sub = clips
if len(args.sub) == 0 and os.path.exists(join(args.path, 'images')):
args.sub = sorted(os.listdir(join(args.path, 'images')))
if args.sub[0].isdigit():

View File

@ -2,8 +2,8 @@
@ Date: 2021-03-15 12:23:12
@ Author: Qing Shuai
@ LastEditors: Qing Shuai
@ LastEditTime: 2021-04-01 16:17:34
@ FilePath: /EasyMocap/easymocap/mytools/file_utils.py
@ LastEditTime: 2021-05-27 20:50:43
@ FilePath: /EasyMocapRelease/easymocap/mytools/file_utils.py
'''
import os
import json
@ -64,6 +64,13 @@ def read_annot(annotname, mode='body25'):
data[i]['keypoints'] = data[i]['keypoints']
elif mode == 'body15':
data[i]['keypoints'] = data[i]['keypoints'][:15, :]
elif mode in ['handl', 'handr']:
data[i]['keypoints'] = np.array(data[i][mode+'2d']).astype(np.float32)
key = 'bbox_'+mode+'2d'
if key not in data[i].keys():
data[i]['bbox'] = np.array(get_bbox_from_pose(data[i]['keypoints'])).astype(np.float32)
else:
data[i]['bbox'] = data[i]['bbox_'+mode+'2d'][:5]
elif mode == 'total':
data[i]['keypoints'] = np.vstack([data[i][key] for key in ['keypoints', 'handl2d', 'handr2d', 'face2d']])
elif mode == 'bodyhand':
@ -75,42 +82,70 @@ def read_annot(annotname, mode='body25'):
data.sort(key=lambda x:x['id'])
return data
def write_common_results(dumpname, results, keys, fmt='%.3f'):
mkout(dumpname)
def array2raw(array, separator=' ', fmt='%.3f'):
assert len(array.shape) == 2, 'Only support MxN matrix, {}'.format(array.shape)
res = []
for data in array:
res.append(separator.join([fmt%(d) for d in data]))
def myarray2string(array, separator=', ', fmt='%.3f'):
assert len(array.shape) == 2, 'Only support MxN matrix, {}'.format(array.shape)
res = ['[']
for i in range(array.shape[0]):
res.append(' [{}]'.format(separator.join([fmt%(d) for d in array[i]])))
if i != array.shape[0] -1:
res[-1] += ', '
res.append(' ]')
return '\r\n'.join(res)
def write_common_results(dumpname=None, results=[], keys=[], fmt='%2.3f'):
format_out = {'float_kind':lambda x: fmt % x}
with open(dumpname, 'w') as f:
f.write('[\n')
for idata, data in enumerate(results):
f.write(' {\n')
output = {}
output['id'] = data['id']
for key in keys:
if key not in data.keys():continue
output[key] = np.array2string(data[key], max_line_width=1000, separator=', ', formatter=format_out)
for key in output.keys():
f.write(' \"{}\": {}'.format(key, output[key]))
if key != keys[-1]:
f.write(',\n')
else:
f.write('\n')
f.write(' }')
if idata != len(results) - 1:
f.write(',\n')
out_text = []
out_text.append('[\n')
for idata, data in enumerate(results):
out_text.append(' {\n')
output = {}
output['id'] = data['id']
for key in keys:
if key not in data.keys():continue
# BUG: This function will failed if the rows of the data[key] is too large
# output[key] = np.array2string(data[key], max_line_width=1000, separator=', ', formatter=format_out)
output[key] = myarray2string(data[key], separator=', ', fmt=fmt)
for key in output.keys():
out_text.append(' \"{}\": {}'.format(key, output[key]))
if key != keys[-1]:
out_text.append(',\n')
else:
f.write('\n')
f.write(']\n')
out_text.append('\n')
out_text.append(' }')
if idata != len(results) - 1:
out_text.append(',\n')
else:
out_text.append('\n')
out_text.append(']\n')
if dumpname is not None:
mkout(dumpname)
with open(dumpname, 'w') as f:
f.writelines(out_text)
else:
return ''.join(out_text)
def write_keypoints3d(dumpname, results):
# TODO:rewrite it
keys = ['keypoints3d']
write_common_results(dumpname, results, keys, fmt='%.3f')
write_common_results(dumpname, results, keys, fmt='%6.3f')
def write_vertices(dumpname, results):
keys = ['vertices']
write_common_results(dumpname, results, keys, fmt='%6.3f')
def write_smpl(dumpname, results):
keys = ['Rh', 'Th', 'poses', 'expression', 'shapes']
write_common_results(dumpname, results, keys)
def get_bbox_from_pose(pose_2d, img, rate = 0.1):
def get_bbox_from_pose(pose_2d, img=None, rate = 0.1):
# this function returns bounding box from the 2D pose
# here use pose_2d[:, -1] instead of pose_2d[:, 2]
# because when vis reprojection, the result will be (x, y, depth, conf)
@ -125,7 +160,8 @@ def get_bbox_from_pose(pose_2d, img, rate = 0.1):
dy = (y_max - y_min)*rate
# 后面加上类别这些
bbox = [x_min-dx, y_min-dy, x_max+dx, y_max+dy, 1]
correct_bbox(img, bbox)
if img is not None:
correct_bbox(img, bbox)
return bbox
def correct_bbox(img, bbox):

View File

@ -2,11 +2,10 @@
@ Date: 2021-04-13 20:43:16
@ Author: Qing Shuai
@ LastEditors: Qing Shuai
@ LastEditTime: 2021-04-14 13:38:34
@ FilePath: /EasyMocapRelease/easymocap/pipeline/basic.py
@ LastEditTime: 2021-05-27 15:35:22
@ FilePath: /EasyMocap/easymocap/pipeline/basic.py
'''
from ..pyfitting import optimizeShape, optimizePose2D, optimizePose3D
from ..smplmodel import init_params
from ..mytools import Timer
from ..dataset import CONFIG
from .weight import load_weight_pose, load_weight_shape
@ -37,10 +36,26 @@ def multi_stage_optimize(body_model, params, kp3ds, kp2ds=None, bboxes=None, Pal
params = optimizePose2D(body_model, params, bboxes, kp2ds, Pall, weight=weight, cfg=cfg)
return params
def multi_stage_optimize2d(body_model, params, kp2ds, bboxes, Pall, weight={}, args=None):
cfg = Config(args)
cfg.device = body_model.device
cfg.device = body_model.device
cfg.model = body_model.model_type
with Timer('Optimize global RT'):
cfg.OPT_R = True
cfg.OPT_T = True
params = optimizePose2D(body_model, params, bboxes, kp2ds, Pall, weight=weight, cfg=cfg)
with Timer('Optimize 2D Pose/{} frames'.format(kp2ds.shape[0])):
cfg.OPT_POSE = True
cfg.OPT_SHAPE = True
# bboxes => (nFrames, nViews, 5), keypoints2d => (nFrames, nViews, nJoints, 3)
params = optimizePose2D(body_model, params, bboxes, kp2ds, Pall, weight=weight, cfg=cfg)
return params
def smpl_from_keypoints3d2d(body_model, kp3ds, kp2ds, bboxes, Pall, config, args,
weight_shape=None, weight_pose=None):
model_type = body_model.model_type
params_init = init_params(nFrames=1, model_type=model_type)
params_init = body_model.init_params(nFrames=1)
if weight_shape is None:
weight_shape = load_weight_shape(args.opts)
if model_type in ['smpl', 'smplh', 'smplx']:
@ -54,7 +69,7 @@ def smpl_from_keypoints3d2d(body_model, kp3ds, kp2ds, bboxes, Pall, config, args
# optimize 3D pose
cfg = Config(args)
cfg.device = body_model.device
params = init_params(nFrames=kp3ds.shape[0], model_type=model_type)
params = body_model.init_params(nFrames=kp3ds.shape[0])
params['shapes'] = params_shape['shapes'].copy()
if weight_pose is None:
weight_pose = load_weight_pose(model_type, args.opts)
@ -65,7 +80,7 @@ def smpl_from_keypoints3d2d(body_model, kp3ds, kp2ds, bboxes, Pall, config, args
def smpl_from_keypoints3d(body_model, kp3ds, config, args,
weight_shape=None, weight_pose=None):
model_type = body_model.model_type
params_init = init_params(nFrames=1, model_type=model_type)
params_init = body_model.init_params(nFrames=1)
if weight_shape is None:
weight_shape = load_weight_shape(args.opts)
if model_type in ['smpl', 'smplh', 'smplx']:
@ -80,7 +95,7 @@ def smpl_from_keypoints3d(body_model, kp3ds, config, args,
cfg = Config(args)
cfg.device = body_model.device
cfg.model_type = model_type
params = init_params(nFrames=kp3ds.shape[0], model_type=model_type)
params = body_model.init_params(nFrames=kp3ds.shape[0])
params['shapes'] = params_shape['shapes'].copy()
if weight_pose is None:
weight_pose = load_weight_pose(model_type, args.opts)

View File

@ -2,8 +2,8 @@
@ Date: 2021-04-13 20:12:58
@ Author: Qing Shuai
@ LastEditors: Qing Shuai
@ LastEditTime: 2021-04-13 22:51:39
@ FilePath: /EasyMocapRelease/easymocap/pipeline/weight.py
@ LastEditTime: 2021-05-27 17:04:47
@ FilePath: /EasyMocap/easymocap/pipeline/weight.py
'''
def load_weight_shape(opts):
weight = {'s3d': 1., 'reg_shapes': 5e-3}
@ -35,9 +35,33 @@ def load_weight_pose(model, opts):
'reg_hand': 1e-4, 'reg_expr': 1e-2, 'reg_head': 1e-2,
'k2d': 1e-4
}
elif model == 'mano':
weight = {
'k3d': 1e2, 'k2d': 1e-3,
'reg_poses': 1e-3, 'smooth_body': 1e2
}
else:
print(model)
raise NotImplementedError
for key in opts.keys():
if key in weight.keys():
weight[key] = opts[key]
return weight
def load_weight_pose2d(model, opts):
if model == 'smpl':
weight = {
'k2d': 2e-4,
'init_poses': 1e-3, 'init_shapes': 1e-2,
'smooth_body': 5e-1, 'smooth_poses': 1e-1,
}
elif model == 'smplh':
raise NotImplementedError
elif model == 'smplx':
raise NotImplementedError
else:
weight = {}
for key in opts.keys():
if key in weight.keys():
weight[key] = opts[key]
return weight

View File

@ -2,8 +2,8 @@
@ Date: 2020-11-19 10:49:26
@ Author: Qing Shuai
@ LastEditors: Qing Shuai
@ LastEditTime: 2021-04-13 22:52:28
@ FilePath: /EasyMocapRelease/easymocap/pyfitting/optimize_simple.py
@ LastEditTime: 2021-05-25 19:51:12
@ FilePath: /EasyMocap/easymocap/pyfitting/optimize_simple.py
'''
import numpy as np
import torch
@ -279,8 +279,9 @@ def optimizePose3D(body_model, params, keypoints3d, weight, cfg):
'smooth_poses': LossSmoothPoses(1, nFrames, cfg).poses,
'reg_poses': LossRegPoses(cfg).reg_body,
'init_poses': LossInit(params, cfg).init_poses,
'reg_poses_zero': LossRegPosesZero(keypoints3d, cfg).__call__,
}
if body_model.model_type != 'mano':
loss_funcs['reg_poses_zero'] = LossRegPosesZero(keypoints3d, cfg).__call__
if cfg.OPT_HAND:
loss_funcs['k3d_hand'] = LossKeypoints3D(keypoints3d, cfg, norm='l1').hand
loss_funcs['reg_hand'] = LossRegPoses(cfg).reg_hand
@ -327,9 +328,12 @@ def optimizePose2D(body_model, params, bboxes, keypoints2d, Pall, weight, cfg):
'smooth_body': LossSmoothBodyMean(cfg).body,
'init_poses': LossInit(params, cfg).init_poses,
'smooth_poses': LossSmoothPoses(nViews, nFrames, cfg).poses,
# 'reg_poses': LossRegPoses(cfg).reg_body,
'reg_poses_zero': LossRegPosesZero(keypoints2d, cfg).__call__,
'reg_poses': LossRegPoses(cfg).reg_body,
}
if body_model.model_type != 'mano':
loss_funcs['reg_poses_zero'] = LossRegPosesZero(keypoints2d, cfg).__call__
if cfg.OPT_SHAPE:
loss_funcs['init_shapes'] = LossInit(params, cfg).init_shapes
if cfg.OPT_HAND:
loss_funcs['reg_hand'] = LossRegPoses(cfg).reg_hand
# loss_funcs['smooth_hand'] = LossSmoothPoses(1, nFrames, cfg).hands

View File

@ -2,9 +2,9 @@
@ Date: 2020-11-18 14:33:20
@ Author: Qing Shuai
@ LastEditors: Qing Shuai
@ LastEditTime: 2021-01-20 16:33:02
@ FilePath: /EasyMocap/code/smplmodel/__init__.py
@ LastEditTime: 2021-05-25 19:20:52
@ FilePath: /EasyMocap/easymocap/smplmodel/__init__.py
'''
from .body_model import SMPLlayer
from .body_param import load_model
from .body_param import merge_params, select_nf, init_params, check_params, check_keypoints
from .body_param import merge_params, select_nf, check_keypoints

View File

@ -2,8 +2,8 @@
@ Date: 2020-11-18 14:04:10
@ Author: Qing Shuai
@ LastEditors: Qing Shuai
@ LastEditTime: 2021-05-11 15:09:44
@ FilePath: /EasyMocap/easymocap/smplmodel/body_model.py
@ LastEditTime: 2021-05-27 20:35:10
@ FilePath: /EasyMocapRelease/easymocap/smplmodel/body_model.py
'''
import torch
import torch.nn as nn
@ -39,7 +39,7 @@ def load_regressor(regressor_path):
import ipdb; ipdb.set_trace()
return X_regressor
NUM_POSES = {'smpl': 72, 'smplh': 78, 'smplx': 66 + 12 + 9}
NUM_POSES = {'smpl': 72, 'smplh': 78, 'smplx': 66 + 12 + 9, 'mano': 9}
NUM_SHAPES = 10
NUM_EXPR = 10
class SMPLlayer(nn.Module):
@ -120,6 +120,19 @@ class SMPLlayer(nn.Module):
self.register_buffer('mHandsComponents'+key[0], val)
self.use_pca = True
self.use_flat_mean = True
elif self.model_type == 'mano':
# TODO:write this into config file
self.num_pca_comps = 12
self.use_pca = True
if self.use_pca:
NUM_POSES['mano'] = self.num_pca_comps + 3
else:
NUM_POSES['mano'] = 45 + 3
self.use_flat_mean = True
val = to_tensor(to_np(data['hands_mean'].reshape(1, -1)), dtype=dtype)
self.register_buffer('mHandsMean', val)
val = to_tensor(to_np(data['hands_components'][:self.num_pca_comps, :]), dtype=dtype)
self.register_buffer('mHandsComponents', val)
elif self.model_type == 'smplx':
# hand pose
self.num_pca_comps = 6
@ -131,15 +144,29 @@ class SMPLlayer(nn.Module):
self.register_buffer('mHandsComponents'+key[0], val)
self.use_pca = True
self.use_flat_mean = True
@staticmethod
def extend_hand(poses, use_pca, use_flat_mean, coeffs, mean):
if use_pca:
poses = poses @ coeffs
if use_flat_mean:
poses = poses + mean
return poses
def extend_pose(self, poses):
if self.model_type not in ['smplh', 'smplx']:
if self.model_type not in ['smplh', 'smplx', 'mano']:
return poses
elif self.model_type == 'smplh' and poses.shape[-1] == 156:
return poses
elif self.model_type == 'smplx' and poses.shape[-1] == 165:
return poses
elif self.model_type == 'mano' and poses.shape[-1] == 48:
return poses
if self.model_type == 'mano':
poses_hand = self.extend_hand(poses[..., 3:], self.use_pca, self.use_flat_mean,
self.mHandsComponents, self.mHandsMean)
poses = torch.cat([poses[..., :3], poses_hand], dim=-1)
return poses
NUM_BODYJOINTS = 22 * 3
if self.use_pca:
NUM_HANDJOINTS = self.num_pca_comps
@ -210,6 +237,13 @@ class SMPLlayer(nn.Module):
Th=Tnew.detach().cpu().numpy()
)
return res
def full_poses(self, poses):
if 'torch' not in str(type(poses)):
dtype, device = self.dtype, self.device
poses = to_tensor(poses, dtype, device)
poses = self.extend_pose(poses)
return poses.detach().cpu().numpy()
def forward(self, poses, shapes, Rh=None, Th=None, expression=None, return_verts=True, return_tensor=True, only_shape=False, **kwargs):
""" Forward pass for SMPL model
@ -250,8 +284,7 @@ class SMPLlayer(nn.Module):
if expression is not None and self.model_type == 'smplx':
shapes = torch.cat([shapes, expression], dim=1)
# process poses
if self.model_type == 'smplh' or self.model_type == 'smplx':
poses = self.extend_pose(poses)
poses = self.extend_pose(poses)
if return_verts:
vertices, joints = lbs(shapes, poses, self.v_template,
self.shapedirs, self.posedirs,
@ -268,6 +301,17 @@ class SMPLlayer(nn.Module):
vertices = vertices.detach().cpu().numpy()
return vertices
def init_params(self, nFrames):
params = {
'poses': np.zeros((nFrames, NUM_POSES[self.model_type])),
'shapes': np.zeros((1, NUM_SHAPES)),
'Rh': np.zeros((nFrames, 3)),
'Th': np.zeros((nFrames, 3)),
}
if self.model_type == 'smplx':
params['expression'] = np.zeros((nFrames, NUM_EXPR))
return params
def check_params(self, body_params):
model_type = self.model_type
nFrames = body_params['poses'].shape[0]

View File

@ -2,8 +2,8 @@
@ Date: 2020-11-20 13:34:54
@ Author: Qing Shuai
@ LastEditors: Qing Shuai
@ LastEditTime: 2021-04-13 20:31:49
@ FilePath: /EasyMocapRelease/easymocap/smplmodel/body_param.py
@ LastEditTime: 2021-05-25 19:21:12
@ FilePath: /EasyMocap/easymocap/smplmodel/body_param.py
'''
import numpy as np
from os.path import join
@ -29,28 +29,6 @@ def select_nf(params_all, nf):
output['shapes'] = params_all['shapes'][nf:nf+1, :]
return output
NUM_POSES = {'smpl': 72, 'smplh': 78, 'smplx': 66 + 12 + 9}
NUM_EXPR = 10
def init_params(nFrames=1, model_type='smpl'):
params = {
'poses': np.zeros((nFrames, NUM_POSES[model_type])),
'shapes': np.zeros((1, 10)),
'Rh': np.zeros((nFrames, 3)),
'Th': np.zeros((nFrames, 3)),
}
if model_type == 'smplx':
params['expression'] = np.zeros((nFrames, NUM_EXPR))
return params
def check_params(body_params, model_type):
nFrames = body_params['poses'].shape[0]
if body_params['poses'].shape[1] != NUM_POSES[model_type]:
body_params['poses'] = np.hstack((body_params['poses'], np.zeros((nFrames, NUM_POSES[model_type] - body_params['poses'].shape[1]))))
if model_type == 'smplx' and 'expression' not in body_params.keys():
body_params['expression'] = np.zeros((nFrames, NUM_EXPR))
return body_params
def load_model(gender='neutral', use_cuda=True, model_type='smpl', skel_type='body25', device=None, model_path='data/smplx'):
# prepare SMPL model
# print('[Load model {}/{}]'.format(model_type, gender))
@ -76,6 +54,10 @@ def load_model(gender='neutral', use_cuda=True, model_type='smpl', skel_type='bo
elif model_type == 'smplx':
body_model = SMPLlayer(join(model_path, 'smplx/SMPLX_{}.pkl'.format(gender.upper())), model_type='smplx', gender=gender, device=device,
regressor_path=join(model_path, 'J_regressor_body25_smplx.txt'))
elif model_type == 'manol' or model_type == 'manor':
lr = {'manol': 'LEFT', 'manor': 'RIGHT'}
body_model = SMPLlayer(join(model_path, 'smplh/MANO_{}.pkl'.format(lr[model_type])), model_type='mano', gender=gender, device=device,
regressor_path=join(model_path, 'J_regressor_mano_{}.txt'.format(lr[model_type])))
else:
body_model = None
body_model.to(device)

View File

@ -1 +1,10 @@
from .renderer import Renderer
'''
@ Date: 2021-04-25 22:07:06
@ Author: Qing Shuai
@ LastEditors: Qing Shuai
@ LastEditTime: 2021-05-27 21:09:08
@ FilePath: /EasyMocapRelease/easymocap/visualize/__init__.py
'''
from .renderer import Renderer
from .geometry import create_cameras
from .geometry import create_mesh_pyrender