update tools
This commit is contained in:
parent
0b48fa8577
commit
abd9b905b6
@ -10,7 +10,6 @@ from .basic_dataset import ImageFolder, MVBase
|
||||
from .basic_visualize import vis_point, vis_line, vis_bbox
|
||||
from .basic_visualize import plot_bbox_body, plot_skeleton, plot_text, vis_active_bbox, plot_bbox_factory
|
||||
from .basic_annotator import AnnotBase, AnnotMV
|
||||
from .chessboard import findChessboardCorners
|
||||
# bbox callbacks
|
||||
# create, delete, copy
|
||||
from .bbox_callback import create_bbox, delete_bbox, delete_all_bbox, copy_previous_bbox, copy_previous_missing
|
||||
|
@ -1,5 +1,6 @@
|
||||
import shutil
|
||||
import cv2
|
||||
import os
|
||||
from tqdm import tqdm
|
||||
from .basic_keyboard import print_help, register_keys
|
||||
from .basic_visualize import plot_text, resize_to_screen, merge
|
||||
@ -203,6 +204,7 @@ class AnnotBase:
|
||||
param['frame'] = nf
|
||||
param['annots'] = annots
|
||||
if not no_img:
|
||||
assert os.path.exists(imgname), imgname
|
||||
img0 = cv2.imread(imgname)
|
||||
param['img0'] = img0
|
||||
# param['pid'] = len(annot['annots'])
|
||||
@ -239,6 +241,7 @@ class AnnotBase:
|
||||
img = func(img, **self.param)
|
||||
if not self.no_window:
|
||||
cv2.imshow(self.name, img)
|
||||
return img
|
||||
|
||||
class AnnotMV:
|
||||
def __init__(self, datasets, key_funcs={}, key_funcs_view={}, callbacks=[], vis_funcs=[], vis_funcs_all=[],
|
||||
@ -602,8 +605,11 @@ def parse_parser(parser):
|
||||
data = read_json(args.from_file)
|
||||
args.sub = sorted([v['vid'] for v in data])
|
||||
elif len(args.sub) == 0:
|
||||
if not os.path.exists(join(args.path, args.image)):
|
||||
print('{} not exists, Please run extract_image first'.format(join(args.path, args.image)))
|
||||
raise FileNotFoundError
|
||||
subs = sorted(os.listdir(join(args.path, args.image)))
|
||||
subs = [s for s in subs if os.path.isdir(join(args.path, args.image, s))]
|
||||
subs = [s for s in subs if os.path.isdir(join(args.path, args.image, s)) and not s.startswith('._')]
|
||||
if len(subs) > 0 and subs[0].isdigit():
|
||||
subs = sorted(subs, key=lambda x:int(x))
|
||||
args.sub = subs
|
||||
|
@ -2,8 +2,8 @@
|
||||
@ Date: 2021-04-15 16:57:53
|
||||
@ Author: Qing Shuai
|
||||
@ LastEditors: Qing Shuai
|
||||
@ LastEditTime: 2021-10-15 16:43:12
|
||||
@ FilePath: /EasyMocap/easymocap/annotator/basic_dataset.py
|
||||
@ LastEditTime: 2022-09-15 21:58:57
|
||||
@ FilePath: /EasyMocapPublic/easymocap/annotator/basic_dataset.py
|
||||
'''
|
||||
from os.path import join
|
||||
import os
|
||||
@ -25,13 +25,20 @@ class ImageFolder:
|
||||
self.annot_root_tmp = join(path, self.annot + '_tmp')
|
||||
if os.path.exists(self.annot_root_tmp) and remove_tmp:
|
||||
shutil.rmtree(self.annot_root_tmp)
|
||||
print('- Load data from {}'.format(path))
|
||||
if sub is None:
|
||||
print('- Try to find image names...')
|
||||
self.imgnames = getFileList(self.image_root, ext=ext, max=max_per_folder)
|
||||
print(' -> find {} images'.format(len(self.imgnames)))
|
||||
if not no_annot:
|
||||
print('- Try to find annot names...')
|
||||
self.annnames = getFileList(self.annot_root, ext='.json')
|
||||
print(' -> find {} annots'.format(len(self.annnames)))
|
||||
else:
|
||||
print('- Try to find image names of camera {}...'.format(sub))
|
||||
self.imgnames = getFileList(join(self.image_root, sub), ext=ext)
|
||||
self.imgnames = [join(sub, name) for name in self.imgnames]
|
||||
print(' -> find {} images'.format(len(self.imgnames)))
|
||||
if not no_annot:
|
||||
self.annnames = getFileList(join(self.annot_root, sub), ext='.json')
|
||||
self.annnames = [join(sub, name) for name in self.annnames]
|
||||
@ -46,6 +53,9 @@ class ImageFolder:
|
||||
self.no_annot = no_annot
|
||||
|
||||
def __getitem__(self, index):
|
||||
if index > len(self.imgnames):
|
||||
print('!!! You are try to read {} image from {} images'.format(index, len(self.imgnames)))
|
||||
print('!!! Please check image path: {}'.format(self.image_root))
|
||||
imgname = join(self.image_root, self.imgnames[index])
|
||||
if self.no_annot:
|
||||
annname = None
|
||||
|
@ -2,8 +2,8 @@
|
||||
@ Date: 2021-04-13 16:14:36
|
||||
@ Author: Qing Shuai
|
||||
@ LastEditors: Qing Shuai
|
||||
@ LastEditTime: 2022-08-17 16:49:40
|
||||
@ FilePath: /EasyMocapPublic/easymocap/annotator/chessboard.py
|
||||
@ LastEditTime: 2022-10-25 20:56:26
|
||||
@ FilePath: /EasyMocapRelease/easymocap/annotator/chessboard.py
|
||||
'''
|
||||
import numpy as np
|
||||
import cv2
|
||||
@ -101,14 +101,14 @@ def create_chessboard(path, keypoints3d, out='annots'):
|
||||
annots['keypoints3d'] = template['keypoints3d']
|
||||
save_json(annname, annots)
|
||||
|
||||
ARUCO_DICT = {
|
||||
"4X4_50": cv2.aruco.DICT_4X4_50,
|
||||
"4X4_100": cv2.aruco.DICT_4X4_100,
|
||||
"5X5_100": cv2.aruco.DICT_5X5_100,
|
||||
"5X5_250": cv2.aruco.DICT_5X5_250,
|
||||
}
|
||||
|
||||
def detect_charuco(image, aruco_type, long, short, squareLength, aruco_len):
|
||||
ARUCO_DICT = {
|
||||
"4X4_50": cv2.aruco.DICT_4X4_50,
|
||||
"4X4_100": cv2.aruco.DICT_4X4_100,
|
||||
"5X5_100": cv2.aruco.DICT_5X5_100,
|
||||
"5X5_250": cv2.aruco.DICT_5X5_250,
|
||||
}
|
||||
# 创建ChArUco标定板
|
||||
dictionary = cv2.aruco.getPredefinedDictionary(dict=ARUCO_DICT[aruco_type])
|
||||
board = cv2.aruco.CharucoBoard_create(
|
||||
@ -139,3 +139,95 @@ def detect_charuco(image, aruco_type, long, short, squareLength, aruco_len):
|
||||
else:
|
||||
retval = False
|
||||
return retval, keypoints2d, corners3d
|
||||
|
||||
class CharucoBoard:
|
||||
def __init__(self, long, short, squareLength, aruco_len, aruco_type) -> None:
|
||||
'''
|
||||
short,long 分别表示短边、长边的格子数.
|
||||
squareLength,aruco_len 分别表示棋盘格的边长与aruco的边长.
|
||||
aruco_type 表示Aruco的类型 4X4表示aruco中的白色格子是4x4的 _50表示aruco字典中有多少种aruco.
|
||||
'''
|
||||
# 定义现有的Aruco类型
|
||||
self.ARUCO_DICT = {
|
||||
"4X4_50": cv2.aruco.DICT_4X4_50,
|
||||
"4X4_100": cv2.aruco.DICT_4X4_100,
|
||||
"5X5_100": cv2.aruco.DICT_5X5_100,
|
||||
"5X5_250": cv2.aruco.DICT_5X5_250,
|
||||
}
|
||||
# 创建ChArUco标定板
|
||||
dictionary = cv2.aruco.getPredefinedDictionary(dict=self.ARUCO_DICT[aruco_type])
|
||||
board = cv2.aruco.CharucoBoard_create(
|
||||
squaresY=long,
|
||||
squaresX=short,
|
||||
squareLength=squareLength,
|
||||
markerLength=aruco_len,
|
||||
dictionary=dictionary,
|
||||
)
|
||||
corners = board.chessboardCorners
|
||||
# ATTN: exchange the XY
|
||||
corners = corners[:, [1, 0, 2]]
|
||||
self.template = {
|
||||
'keypoints3d': corners,
|
||||
'keypoints2d': np.zeros_like(corners),
|
||||
'pattern': (long-1, short-1),
|
||||
'grid_size': squareLength,
|
||||
'visted': False
|
||||
}
|
||||
print(corners.shape)
|
||||
self.dictionary = dictionary
|
||||
self.board = board
|
||||
|
||||
def detect(self, img_color, annots):
|
||||
# 查找标志块的左上角点
|
||||
corners, ids, _ = cv2.aruco.detectMarkers(
|
||||
image=img_color, dictionary=self.dictionary, parameters=None
|
||||
)
|
||||
# 棋盘格黑白块内角点
|
||||
if ids is not None:
|
||||
retval, charucoCorners, charucoIds = cv2.aruco.interpolateCornersCharuco(
|
||||
markerCorners=corners, markerIds=ids, image=img_color, board=self.board
|
||||
)
|
||||
else:
|
||||
retval = False
|
||||
if retval:
|
||||
# 绘制棋盘格黑白块内角点
|
||||
cv2.aruco.drawDetectedCornersCharuco(
|
||||
img_color, charucoCorners, charucoIds, [0, 0, 255]
|
||||
)
|
||||
if False:
|
||||
cv2.aruco.drawDetectedMarkers(
|
||||
image=img_color, corners=corners, ids=ids, borderColor=None
|
||||
)
|
||||
|
||||
ids = charucoIds[:, 0]
|
||||
pts = charucoCorners[:, 0]
|
||||
annots['keypoints2d'][ids, :2] = pts
|
||||
annots['keypoints2d'][ids, 2] = 1.
|
||||
# if args.show:
|
||||
# img_color = cv2.resize(img_color, None, fx=0.5, fy=0.5)
|
||||
# cv2.imshow('vis', img_color)
|
||||
# cv2.waitKey(0)
|
||||
# visname = imgname.replace(images, output)
|
||||
# os.makedirs(os.path.dirname(visname), exist_ok=True)
|
||||
# cv2.imwrite(visname, img_color)
|
||||
else:
|
||||
# mywarn('Cannot find in {}'.format(imgname))
|
||||
pass
|
||||
|
||||
def __call__(self, imgname, images='images', output='output'):
|
||||
import os
|
||||
from .file_utils import read_json, save_json
|
||||
import copy
|
||||
img_color = cv2.imread(imgname)
|
||||
annotname = imgname.replace('images', 'chessboard').replace('.jpg', '.json')
|
||||
if os.path.exists(annotname):
|
||||
annots = read_json(annotname)
|
||||
if annots['visited']:
|
||||
return
|
||||
else:
|
||||
annots = copy.deepcopy(self.template)
|
||||
annots['visited'] = True
|
||||
self.detect(img_color, annots)
|
||||
annots['keypoints2d'] = annots['keypoints2d'].tolist()
|
||||
annots['keypoints3d'] = annots['keypoints3d'].tolist()
|
||||
save_json(annotname, annots)
|
@ -229,8 +229,13 @@ class Base(BaseData):
|
||||
self.image_names = get_allname(self.root, self.subs, self.ranges, **reader.image)
|
||||
self.reader = reader
|
||||
self.writer = writer
|
||||
if camera != 'none' and os.path.exists(camera):
|
||||
cameras = read_cameras(camera)
|
||||
if camera != 'none':
|
||||
if not os.path.isabs(camera):
|
||||
camera = join(self.root, camera)
|
||||
if os.path.exists(camera):
|
||||
cameras = read_cameras(camera)
|
||||
else:
|
||||
cameras = None
|
||||
else:
|
||||
cameras = None
|
||||
self.cameras = cameras
|
||||
@ -417,7 +422,7 @@ class ImageFolder(Base):
|
||||
if self.loadmp:
|
||||
for i, data_ in enumerate(data['annots']['annots']):
|
||||
if keyname not in data_.keys():
|
||||
if key not in self.cache_shape.keys():
|
||||
if key not in self.cache_shape.keys() and self.read_flag[keyname]:
|
||||
cache_shape = {
|
||||
'handl2d': np.zeros((21, 3)),
|
||||
'handr2d': np.zeros((21, 3)),
|
||||
@ -473,6 +478,23 @@ class ImageFolder(Base):
|
||||
data[key+'_distort'] = np.stack([d[key+'_distort'] for d in data['annots']])
|
||||
else:
|
||||
data.pop('annots')
|
||||
if not self.loadmp:
|
||||
if 'depth' in self.reader.keys():
|
||||
depthname = join(self.root, self.reader['depth']['root'], self.get_view(index)[1], '{}.png'.format(os.path.basename(data['annname']).replace('.json', '')))
|
||||
depthmap = cv2.imread(depthname, cv2.IMREAD_UNCHANGED)
|
||||
depthmap = depthmap.astype(np.float32)/1000.
|
||||
depths = np.zeros_like(data['keypoints2d'][:, :2])
|
||||
for i, (x, y, c) in enumerate(data['keypoints2d']):
|
||||
if c < 0.3:continue
|
||||
if i >= 15:continue
|
||||
x, y = int(x+0.5), int(y+0.5)
|
||||
if x > depthmap.shape[0] or y > depthmap.shape[1] or x < 0 or y < 0:
|
||||
continue
|
||||
d_value = depthmap[y, x]
|
||||
if d_value < 0.1:continue
|
||||
depths[i, 0] = d_value
|
||||
depths[i, 1] = c
|
||||
data['depth'] = depths
|
||||
return data
|
||||
|
||||
def vis_data(self, data, img=None):
|
||||
@ -503,14 +525,27 @@ class MultiVideo(ImageFolder):
|
||||
camera_for_each_image = False
|
||||
if 'camera' in self.reader.keys():
|
||||
camera_for_each_image = True
|
||||
cameras = read_cameras(join(self.root, self.reader['camera'], sub))
|
||||
if os.path.exists(join(self.root, self.reader['camera'], sub)):
|
||||
cameras = read_cameras(join(self.root, self.reader['camera'], sub))
|
||||
elif os.path.exists(join(self.root, self.reader['camera'])):
|
||||
cameras = read_cameras(join(self.root, self.reader['camera']))
|
||||
else:
|
||||
myerror("You must give a valid camera path")
|
||||
raise NotImplementedError
|
||||
for info in tqdm(self.image_dict[sub], 'Loading {}'.format(sub)):
|
||||
basename = os.path.basename(info['imgname']).split('.')[0]
|
||||
if camera_for_each_image:
|
||||
K, dist = cameras[basename]['K'], cameras[basename]['dist']
|
||||
if basename in cameras.keys():
|
||||
camera = cameras[basename]
|
||||
elif sub+'/'+basename in cameras.keys():
|
||||
camera = cameras[sub+'/'+basename]
|
||||
else:
|
||||
myerror("You must give a valid camera")
|
||||
raise NotImplementedError
|
||||
K, dist = camera['K'], camera['dist']
|
||||
data = super().__getitem__(info['index'], K=K, dist=dist)
|
||||
for oldkey, newkey in [('K', 'K'), ('dist', 'dist'), ('R', 'Rc'), ('T', 'Tc')]:
|
||||
data[newkey] = cameras[basename][oldkey].astype(np.float32)
|
||||
data[newkey] = camera[oldkey].astype(np.float32)
|
||||
else:
|
||||
data = super().__getitem__(info['index'])
|
||||
data_all.append(data)
|
||||
@ -518,6 +553,12 @@ class MultiVideo(ImageFolder):
|
||||
ret = self.collect_data(data_all)
|
||||
if self.loadmp and self.compose_mp: # 针对镜子的情况,需要load多人的数据
|
||||
for key in ['keypoints2d', 'keypoints2d_distort', 'keypoints2d_unproj']:
|
||||
if len(self.pids) > 0:
|
||||
for i in range(len(ret[key])):
|
||||
ret[key][i] = ret[key][i][:len(self.pids)]
|
||||
shapes = set([v.shape for v in ret[key]])
|
||||
if len(shapes) > 1:
|
||||
myerror('The shape is not the same!')
|
||||
ret[key] = np.stack(ret[key])
|
||||
ret['pid'] = self.pids
|
||||
return ret
|
||||
|
@ -250,6 +250,42 @@ class MultiStage:
|
||||
infos[key] = val.detach().cpu()
|
||||
return body_params
|
||||
|
||||
def fit_data(self, data, body_model):
|
||||
infos = data.copy()
|
||||
init_params = body_model.init_params(nFrames=infos['nFrames'], nPerson=infos.get('nPerson', 1))
|
||||
# first initialize the model
|
||||
for name, init_func in self.initialize.items():
|
||||
if 'loss' in init_func.keys():
|
||||
# fitting to initialize
|
||||
init_params = self.fit_stage(body_model, init_params, infos, init_func, 0)
|
||||
else:
|
||||
# use initialize module
|
||||
init_module = load_object(init_func.module, init_func.args)
|
||||
init_params = init_module(body_model, init_params, infos)
|
||||
# if there are multiple initialization params
|
||||
# then fit each of them
|
||||
if not isinstance(init_params, list):
|
||||
init_params = [init_params]
|
||||
results = []
|
||||
for init_param in init_params:
|
||||
# check the repeat params
|
||||
body_params = init_param
|
||||
for stage_name, stage in self.stages.items():
|
||||
for irepeat in range(stage.get('repeat', 1)):
|
||||
with Timer('optimize {}'.format(stage_name), not self.monitor.timer):
|
||||
body_params = self.fit_stage(body_model, body_params, infos, stage, irepeat)
|
||||
results.append(body_params)
|
||||
# select the best results
|
||||
if len(results) > 1:
|
||||
# check the result
|
||||
loss = load_object(self.check.module, self.check.args, **{key:infos[key] for key in self.check.infos})
|
||||
metrics = [loss(body_model.keypoints(body_params, return_tensor=True).cpu()).item() for body_params in results]
|
||||
best_idx = np.argmin(metrics)
|
||||
else:
|
||||
best_idx = 0
|
||||
body_params = Params(**results[best_idx])
|
||||
return body_params, infos
|
||||
|
||||
def fit(self, body_model, dataset):
|
||||
batch_size = len(dataset) if self.batch_size == -1 else self.batch_size
|
||||
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=0, drop_last=False)
|
||||
@ -257,42 +293,10 @@ class MultiStage:
|
||||
dataloader = tqdm(dataloader, desc='optimizing')
|
||||
for data in dataloader:
|
||||
data = dataset.reshape_data(data)
|
||||
infos = data.copy()
|
||||
init_params = body_model.init_params(nFrames=infos['nFrames'], nPerson=infos.get('nPerson', 1))
|
||||
# first initialize the model
|
||||
for name, init_func in self.initialize.items():
|
||||
if 'loss' in init_func.keys():
|
||||
# fitting to initialize
|
||||
init_params = self.fit_stage(body_model, init_params, infos, init_func, 0)
|
||||
else:
|
||||
# use initialize module
|
||||
init_module = load_object(init_func.module, init_func.args)
|
||||
init_params = init_module(body_model, init_params, infos)
|
||||
# if there are multiple initialization params
|
||||
# then fit each of them
|
||||
if not isinstance(init_params, list):
|
||||
init_params = [init_params]
|
||||
results = []
|
||||
for init_param in init_params:
|
||||
# check the repeat params
|
||||
body_params = init_param
|
||||
for stage_name, stage in self.stages.items():
|
||||
for irepeat in range(stage.get('repeat', 1)):
|
||||
with Timer('optimize {}'.format(stage_name), not self.monitor.timer):
|
||||
body_params = self.fit_stage(body_model, body_params, infos, stage, irepeat)
|
||||
results.append(body_params)
|
||||
# select the best results
|
||||
if len(results) > 1:
|
||||
# check the result
|
||||
loss = load_object(self.check.module, self.check.args, **{key:infos[key] for key in self.check.infos})
|
||||
metrics = [loss(body_model.keypoints(body_params, return_tensor=True).cpu()).item() for body_params in results]
|
||||
best_idx = np.argmin(metrics)
|
||||
else:
|
||||
best_idx = 0
|
||||
body_params, infos = self.fit_data(data, body_model)
|
||||
if 'sync_offset' in body_params.keys():
|
||||
offset = body_params.pop('sync_offset')
|
||||
dataset.write_offset(offset)
|
||||
body_params = Params(**results[best_idx])
|
||||
if data['nFrames'] != body_params['poses'].shape[0]:
|
||||
for key in body_params.keys():
|
||||
if body_params[key].shape[0] == 1:continue
|
||||
|
@ -3,7 +3,7 @@
|
||||
@ Author: Qing Shuai
|
||||
@ Mail: s_q@zju.edu.cn
|
||||
@ LastEditors: Qing Shuai
|
||||
@ LastEditTime: 2022-07-11 22:20:44
|
||||
@ LastEditTime: 2022-08-30 19:47:04
|
||||
@ FilePath: /EasyMocapPublic/easymocap/multistage/init_cnn.py
|
||||
'''
|
||||
import os
|
||||
@ -82,7 +82,10 @@ class InitSpin:
|
||||
print('[WARN] not enough joints: {} in first frame'.format(imgname))
|
||||
else:
|
||||
print('[WARN] not enough joints: {}'.format(imgname))
|
||||
result = {'body_params': params_all[-1][pid]}
|
||||
if self.multi_person:
|
||||
result = {'body_params': params_all[-1][pid]}
|
||||
else:
|
||||
result = {'body_params': params_all[-1]}
|
||||
params = result['body_params']
|
||||
params['id'] = pid
|
||||
params_current.append(params)
|
||||
|
@ -73,7 +73,9 @@ class InitRT:
|
||||
self.torso = torso
|
||||
|
||||
def __call__(self, body_model, body_params, infos):
|
||||
keypoints3d = infos['keypoints3d'].detach().cpu().numpy()
|
||||
keypoints3d = infos['keypoints3d']
|
||||
if torch.is_tensor(keypoints3d):
|
||||
keypoints3d = keypoints3d.detach().cpu().numpy()
|
||||
temp_joints = body_model.keypoints(body_params, return_tensor=False)
|
||||
|
||||
torso = keypoints3d[..., self.torso, :3].copy()
|
||||
|
@ -242,6 +242,8 @@ class AnySmooth(LossBase):
|
||||
return name
|
||||
|
||||
def check_at_start(self, **kwargs):
|
||||
if self.key not in kwargs.keys():
|
||||
return 0
|
||||
value = kwargs[self.key]
|
||||
if value.shape[0] < len(self.weight):
|
||||
return 0
|
||||
@ -253,6 +255,8 @@ class AnySmooth(LossBase):
|
||||
return super().check_at_start(**kwargs)
|
||||
|
||||
def check_at_end(self, **kwargs):
|
||||
if self.key not in kwargs.keys():
|
||||
return 0
|
||||
value = kwargs[self.key]
|
||||
if value.shape[0] < len(self.weight):
|
||||
return 0
|
||||
@ -587,3 +591,24 @@ class Keypoints2D(BaseKeypoints):
|
||||
header.append('after(pix)')
|
||||
contents.append(err_after.detach().cpu().numpy().tolist())
|
||||
print_table(header, contents)
|
||||
|
||||
class DepthLoss(LossBase):
|
||||
def __init__(self, K, Rc, Tc, depth, norm, norm_info, index_est=[]):
|
||||
super().__init__()
|
||||
P = torch.bmm(K, torch.cat([Rc, Tc], dim=-1))
|
||||
self.register_buffer('P', P)
|
||||
self.index_est = index_est
|
||||
depth = BaseKeypoints.select(depth, self.index_est, [])
|
||||
self.register_buffer('depth', depth)
|
||||
self.einsum = 'fab,fnb->fna'
|
||||
self.lossfunc = make_loss(norm, norm_info)
|
||||
|
||||
def forward(self, kpts_est, **kwargs):
|
||||
kpts_est = BaseKeypoints.select(kpts_est, self.index_est, [])
|
||||
kpts_homo = torch.ones_like(kpts_est[..., -1:])
|
||||
kpts_homo = torch.cat([kpts_est, kpts_homo], dim=-1)
|
||||
point_cam = torch.einsum(self.einsum, self.P, kpts_homo)
|
||||
depth = point_cam[..., -1]
|
||||
conf = self.depth[..., 1]
|
||||
loss = self.lossfunc(depth[..., None], self.depth[..., :1], conf)
|
||||
return loss
|
@ -193,7 +193,7 @@ class Undistort:
|
||||
kpts = keypoints[:, None, :2]
|
||||
kpts = np.ascontiguousarray(kpts)
|
||||
kpts = cv2.undistortPoints(kpts, K, dist, P=K)
|
||||
keypoints[:, :2] = kpts[:, 0]
|
||||
keypoints = np.hstack([kpts[:, 0], keypoints[:, 2:]])
|
||||
return keypoints
|
||||
|
||||
@staticmethod
|
||||
@ -221,7 +221,7 @@ class UndistortFisheye:
|
||||
kpts = keypoints[:, None, :2]
|
||||
kpts = np.ascontiguousarray(kpts)
|
||||
kpts = cv2.fisheye.undistortPoints(kpts, K, dist, P=Knew)
|
||||
keypoints[:, :2] = kpts[:, 0]
|
||||
keypoints = np.hstack([kpts[:, 0], keypoints[:, 2:]])
|
||||
return keypoints
|
||||
|
||||
@staticmethod
|
||||
|
80
easymocap/mytools/timer.py
Normal file
80
easymocap/mytools/timer.py
Normal file
@ -0,0 +1,80 @@
|
||||
'''
|
||||
@ Date: 2021-01-15 11:12:00
|
||||
@ Author: Qing Shuai
|
||||
@ LastEditors: Qing Shuai
|
||||
@ LastEditTime: 2022-09-27 15:50:50
|
||||
@ FilePath: /EasyMocapPublic/easymocap/mytools/timer.py
|
||||
'''
|
||||
import time
|
||||
import tabulate
|
||||
class Timer:
|
||||
records = {}
|
||||
tmp = None
|
||||
indent = -1
|
||||
|
||||
@classmethod
|
||||
def tic(cls):
|
||||
cls.tmp = time.time()
|
||||
@classmethod
|
||||
def toc(cls):
|
||||
res = (time.time() - cls.tmp) * 1000
|
||||
cls.tmp = None
|
||||
return res
|
||||
|
||||
@classmethod
|
||||
def report(cls):
|
||||
header = ['', 'Time(ms)']
|
||||
contents = []
|
||||
for key, val in cls.records.items():
|
||||
contents.append(['{:20s}'.format(key), '{:.2f}'.format(sum(val)/len(val))])
|
||||
print(tabulate.tabulate(contents, header, tablefmt='fancy_grid'))
|
||||
|
||||
def __init__(self, name, silent=False):
|
||||
self.name = name
|
||||
self.silent = silent
|
||||
if name not in Timer.records.keys():
|
||||
Timer.records[name] = []
|
||||
|
||||
def __enter__(self):
|
||||
self.start = time.time()
|
||||
Timer.indent += 1
|
||||
|
||||
def __exit__(self, exc_type, exc_value, exc_tb):
|
||||
end = time.time()
|
||||
Timer.records[self.name].append((end-self.start)*1000)
|
||||
indent = self.indent * ' '
|
||||
if not self.silent:
|
||||
t = (end - self.start)*1000
|
||||
if t > 1000:
|
||||
print('-> {}[{:20s}]: {:5.1f}s'.format(indent, self.name, t/1000))
|
||||
elif t > 1e3*60*60:
|
||||
print('-> {}[{:20s}]: {:5.1f}min'.format(indent, self.name, t/1e3/60))
|
||||
else:
|
||||
print('-> {}[{:20s}]: {:5.1f}ms'.format(indent, self.name, (end-self.start)*1000))
|
||||
Timer.indent -= 1
|
||||
|
||||
@staticmethod
|
||||
def timer(name):
|
||||
from functools import wraps
|
||||
def decorator(func):
|
||||
@wraps(func)
|
||||
def wrapped_function(*args, **kwargs):
|
||||
with Timer(name):
|
||||
ret = func(*args, **kwargs)
|
||||
return ret
|
||||
return wrapped_function
|
||||
return decorator
|
||||
|
||||
if __name__ == '__main__':
|
||||
@Timer.timer('testfunc')
|
||||
def dummyfunc():
|
||||
time.sleep(1)
|
||||
with Timer('level0'):
|
||||
with Timer('level1'):
|
||||
with Timer('level2'):
|
||||
time.sleep(1)
|
||||
time.sleep(1)
|
||||
time.sleep(1)
|
||||
dummyfunc()
|
||||
Timer.report()
|
||||
|
@ -7,6 +7,16 @@ from easymocap.mytools.vis_base import merge, plot_keypoints_auto
|
||||
from .debug_utils import log, mywarn, myerror
|
||||
|
||||
def batch_triangulate(keypoints_, Pall, min_view=2):
|
||||
""" triangulate the keypoints of whole body
|
||||
|
||||
Args:
|
||||
keypoints_ (nViews, nJoints, 3): 2D detections
|
||||
Pall (nViews, 3, 4): projection matrix of each view
|
||||
min_view (int, optional): min view for visible points. Defaults to 2.
|
||||
|
||||
Returns:
|
||||
keypoints3d: (nJoints, 4)
|
||||
"""
|
||||
# keypoints: (nViews, nJoints, 3)
|
||||
# Pall: (nViews, 3, 4)
|
||||
# A: (nJoints, nViewsx2, 4), x: (nJoints, 4, 1); b: (nJoints, nViewsx2, 1)
|
||||
@ -589,6 +599,7 @@ class SimpleMatchAndTriangulator(SimpleTriangulator):
|
||||
plt.vlines([i-0.5 for i in dimGroups[1:]], -0.5, M-0.5, 'w')
|
||||
plt.ioff()
|
||||
plt.show()
|
||||
import ipdb;ipdb.set_trace()
|
||||
return aff_svt
|
||||
|
||||
def _track_add(self, res):
|
||||
|
@ -2,7 +2,7 @@
|
||||
@ Date: 2021-11-27 16:50:33
|
||||
@ Author: Qing Shuai
|
||||
@ LastEditors: Qing Shuai
|
||||
@ LastEditTime: 2022-04-13 18:19:03
|
||||
@ LastEditTime: 2022-10-19 21:37:49
|
||||
@ FilePath: /EasyMocapPublic/easymocap/visualize/ffmpeg_wrapper.py
|
||||
'''
|
||||
import shutil
|
||||
@ -41,7 +41,8 @@ class VideoMaker:
|
||||
self.reorder = True
|
||||
if self.reorder:
|
||||
tmpdir = '/tmp/ffmpeg-tmp'
|
||||
shutil.rmtree(tmpdir)
|
||||
if os.path.exists(tmpdir):
|
||||
shutil.rmtree(tmpdir)
|
||||
os.makedirs(tmpdir, exist_ok=True)
|
||||
for nf, imgname in tqdm(enumerate(imgnames), desc='copy to /tmp'):
|
||||
tmpname = join(tmpdir, '{:06d}{}'.format(nf, self.ext))
|
||||
|
@ -2,7 +2,7 @@
|
||||
@ Date: 2021-05-13 14:20:13
|
||||
@ Author: Qing Shuai
|
||||
@ LastEditors: Qing Shuai
|
||||
@ LastEditTime: 2022-06-10 22:47:42
|
||||
@ LastEditTime: 2022-09-13 12:24:20
|
||||
@ FilePath: /EasyMocapPublic/easymocap/visualize/pyrender_wrapper.py
|
||||
'''
|
||||
import pyrender
|
||||
@ -97,7 +97,7 @@ class Renderer:
|
||||
# material = pyrender.material.SpecularGlossinessMaterial(
|
||||
# diffuseFactor=1.0, glossinessFactor=0.0
|
||||
# )
|
||||
mesh = pyrender.Mesh.from_trimesh(mesh, material=material, smooth=True)
|
||||
mesh = pyrender.Mesh.from_trimesh(mesh, material=material, smooth=data.get('smooth', True))
|
||||
else:
|
||||
mesh = trimesh.Trimesh(vert, faces, vertex_colors=data['colors'], process=False)
|
||||
mesh.apply_transform(rot)
|
||||
@ -149,17 +149,12 @@ def plot_meshes(img, meshes, K, R, T, mode='image'):
|
||||
|
||||
# 这个顺序是BGR的。虽然render的使用的是RGB的,但是由于和图像拼接了,所以又变成BGR的了
|
||||
colors = [
|
||||
# (0.5, 0.2, 0.2, 1.), # Defalut BGR
|
||||
(.5, .5, .7, 1.), # Pink BGR
|
||||
(.44, .50, .98, 1.), # Red
|
||||
(.7, .7, .6, 1.), # Neutral
|
||||
(.5, .5, .7, 1.), # Blue
|
||||
(.5, .55, .3, 1.), # capsule
|
||||
(.3, .5, .55, 1.), # Yellow
|
||||
# (.6, .6, .6, 1.), # gray
|
||||
(.9, 1., 1., 1.),
|
||||
(0.95, 0.74, 0.65, 1.),
|
||||
(.9, .7, .7, 1.)
|
||||
(94/255, 124/255, 226/255), # 青色
|
||||
(255/255, 200/255, 87/255), # yellow
|
||||
(74/255., 189/255., 172/255.), # green
|
||||
(8/255, 76/255, 97/255), # blue
|
||||
(219/255, 58/255, 52/255), # red
|
||||
(77/255, 40/255, 49/255), # brown
|
||||
]
|
||||
|
||||
colors_table = {
|
||||
|
Loading…
Reference in New Issue
Block a user