update annotator, visualization

This commit is contained in:
shuaiqing 2021-08-28 20:50:20 +08:00
parent bbc1432e68
commit f969c0e972
31 changed files with 3920 additions and 254 deletions

View File

@ -2,8 +2,8 @@
@ Date: 2021-07-16 20:13:57
@ Author: Qing Shuai
@ LastEditors: Qing Shuai
@ LastEditTime: 2021-07-17 19:25:00
@ FilePath: /EasyMocapRelease/apps/calibration/detect_chessboard.py
@ LastEditTime: 2021-07-21 19:56:38
@ FilePath: /EasyMocap/apps/calibration/detect_chessboard.py
'''
# detect the corner of chessboard
from easymocap.annotator.file_utils import getFileList, read_json, save_json
@ -60,7 +60,7 @@ def detect_chessboard(path, out, pattern, gridSize, args):
cv2.imwrite(outname, show)
def detect_chessboard_sequence(path, out, pattern, gridSize, args):
# create_chessboard(path, pattern, gridSize, ext=args.ext)
create_chessboard(path, pattern, gridSize, ext=args.ext)
subs = sorted(os.listdir(join(path, 'images')))
for sub in subs:
dataset = ImageFolder(path, sub=sub, annot='chessboard', ext=args.ext)
@ -69,8 +69,8 @@ def detect_chessboard_sequence(path, out, pattern, gridSize, args):
found = np.zeros(nFrames, dtype=np.bool)
visited = np.zeros(nFrames, dtype=np.bool)
proposals = []
init_step = 50
min_step = 1
init_step = args.max_step
min_step = args.min_step
for nf in range(0, nFrames, init_step):
if nf + init_step < len(dataset):
proposals.append([nf, nf+init_step])
@ -98,6 +98,8 @@ def detect_chessboard_sequence(path, out, pattern, gridSize, args):
if not found[left] and not found[right]:
continue
mid = (left+right)//2
if mid == left or mid == right:
continue
if mid - left > min_step:
proposals.append((left, mid))
if right - mid > min_step:
@ -113,6 +115,9 @@ if __name__ == "__main__":
help='The pattern of the chessboard', default=(9, 6))
parser.add_argument('--grid', type=float, default=0.1,
help='The length of the grid size (unit: meter)')
parser.add_argument('--max_step', type=int, default=50)
parser.add_argument('--min_step', type=int, default=0)
parser.add_argument('--silent', action='store_true')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--seq', action='store_true')

50
apps/vis/vis_smpl.py Normal file
View File

@ -0,0 +1,50 @@
'''
@ Date: 2021-07-19 20:37:16
@ Author: Qing Shuai
@ LastEditors: Qing Shuai
@ LastEditTime: 2021-08-28 20:42:44
@ FilePath: /EasyMocapRelease/apps/vis/vis_smpl.py
'''
from easymocap.config import Config, load_object
import open3d as o3d
from easymocap.visualize.o3dwrapper import Vector3dVector, create_mesh, create_coord
import numpy as np
def update_vis(vis, mesh, body_model, params):
vertices = body_model(return_verts=True, return_tensor=False, **params)[0]
mesh.vertices = Vector3dVector(vertices)
vis.update_geometry(model)
vis.poll_events()
vis.update_renderer()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str,
default='config/model/smpl_neutral.yml')
parser.add_argument('--key', type=str,
default='poses')
parser.add_argument('--num', type=int, default=50)
parser.add_argument('--debug', action='store_true')
args = parser.parse_args()
key = args.key
config = Config.load(args.cfg)
body_model = load_object(config.module, config.args)
params = body_model.init_params(1)
vertices = body_model(return_verts=True, return_tensor=False, **params)
joints = body_model(return_verts=False, return_smpl_joints=True, return_tensor=False, **params)
model = create_mesh(vertices=vertices[0], faces=body_model.faces)
vis = o3d.visualization.Visualizer()
vis.create_window(width=900, height=900)
vis.add_geometry(model)
params = body_model.init_params(1)
var_ranges = np.linspace(0, np.pi/2, args.num)
var_ranges = np.concatenate([-var_ranges, -var_ranges[::-1], var_ranges, var_ranges[::-1]])
for npose in range(54, params[key].shape[1]):
print('[Vis] {}: {}'.format(key, npose))
for i in range(var_ranges.shape[0]):
params[key][0, npose] = var_ranges[i]
update_vis(vis, model, body_model, params)
import ipdb; ipdb.set_trace()

View File

@ -2,7 +2,7 @@
* @Date: 2021-04-02 11:52:33
* @Author: Qing Shuai
* @LastEditors: Qing Shuai
* @LastEditTime: 2021-06-21 21:18:45
* @LastEditTime: 2021-07-22 20:58:33
* @FilePath: /EasyMocapRelease/doc/installation.md
-->
# EasyMocap - Installation
@ -74,7 +74,7 @@ data
- torch==1.4.0
- torchvision==0.5.0
- opencv-python
- [pyrender](https://pyrender.readthedocs.io/en/latest/install/index.html#python-installation): for visualization
- [pyrender](https://pyrender.readthedocs.io/en/latest/install/index.html#python-installation): for visualization, or [pyrender for server without a screen](https://pyrender.readthedocs.io/en/latest/install/index.html#getting-pyrender-working-with-osmesa).
- chumpy: for loading SMPL model
- OpenPose[4]: for 2D pose

View File

@ -2,11 +2,13 @@
* @Date: 2021-04-02 11:53:16
* @Author: Qing Shuai
* @LastEditors: Qing Shuai
* @LastEditTime: 2021-06-14 14:26:19
* @LastEditTime: 2021-07-22 20:57:16
* @FilePath: /EasyMocapRelease/doc/quickstart.md
-->
# Quick Start
First install this project following [install](./installation.md)
## Demo
We provide an example multiview dataset[[dropbox](https://www.dropbox.com/s/24mb7r921b1g9a7/zju-ls-feng.zip?dl=0)][[BaiduDisk](https://pan.baidu.com/s/1lvAopzYGCic3nauoQXjbPw)(vg1z)], which has 800 frames from 23 synchronized and calibrated cameras. After downloading the dataset, you can run the following example scripts.

View File

@ -2,8 +2,8 @@
@ Date: 2021-01-25 21:27:56
@ Author: Qing Shuai
@ LastEditors: Qing Shuai
@ LastEditTime: 2021-06-25 15:50:40
@ FilePath: /EasyMocapRelease/easymocap/affinity/plucker.py
@ LastEditTime: 2021-07-28 17:18:20
@ FilePath: /EasyMocap/easymocap/affinity/plucker.py
'''
import numpy as np
@ -14,9 +14,9 @@ def plucker_from_pl(point, line):
point {tensor} -- N, 3
line {tensor} -- N, 3
"""
norm = np.linalg.norm(line, axis=1, keepdims=True)
norm = np.linalg.norm(line, axis=-1, keepdims=True)
lunit = line/norm
moment = np.cross(point, lunit, axis=1)
moment = np.cross(point, lunit, axis=-1)
return lunit, moment
def plucker_from_pp(point1, point2):
@ -70,3 +70,16 @@ def computeRay(keypoints2d, invK, R, T):
res = np.hstack((l, m, conf))
# 兼容cpp版本所以补一个维度
return res[None, :, :]
def computeRaynd(keypoints2d, invK, R, T):
# keypoints2d: (..., 3)
conf = keypoints2d[..., 2:]
# cam_center: (1, 3)
cam_center = - (R.T @ T).T
kp_pixel = np.concatenate([keypoints2d[..., :2], np.ones_like(conf)], axis=-1)
kp_all_3d = (kp_pixel @ invK.T - T.T) @ R
while len(cam_center.shape) < len(kp_all_3d.shape):
cam_center = cam_center[None]
l, m = plucker_from_pp(cam_center, kp_all_3d)
res = np.concatenate((l, m, conf), axis=-1)
return res

View File

@ -1,7 +1,19 @@
from .basic_dataset import ImageFolder
from .basic_visualize import vis_point, vis_line
from .basic_visualize import plot_bbox_body, plot_skeleton, plot_skeleton_simple, plot_text, vis_active_bbox
from .basic_annotator import AnnotBase
'''
@ Date: 2021-04-15 16:56:18
@ Author: Qing Shuai
@ LastEditors: Qing Shuai
@ LastEditTime: 2021-06-09 10:16:29
@ FilePath: /EasyMocap/easymocap/annotator/__init__.py
'''
from .basic_annotator import load_parser, parse_parser
from .basic_dataset import ImageFolder, MVBase
from .basic_visualize import vis_point, vis_line, vis_bbox
from .basic_visualize import plot_bbox_body, plot_skeleton, plot_text, vis_active_bbox, plot_bbox_factory
from .basic_annotator import AnnotBase, AnnotMV
from .chessboard import findChessboardCorners
# bbox callbacks
from .bbox_callback import callback_select_bbox_center, callback_select_bbox_corner, auto_pose_track
# create, delete, copy
from .bbox_callback import create_bbox, delete_bbox, delete_all_bbox, copy_previous_bbox, copy_previous_missing
# track
from .bbox_callback import get_auto_track
from .bbox_callback import callback_select_bbox_center, callback_select_bbox_corner

View File

@ -1,9 +1,10 @@
import shutil
import cv2
from tqdm import tqdm
from .basic_keyboard import register_keys
from .basic_visualize import resize_to_screen
from .basic_visualize import plot_text, resize_to_screen, merge
from .basic_callback import point_callback, CV_KEY, get_key
from .file_utils import load_annot_to_tmp, save_annot
from .file_utils import load_annot_to_tmp, read_json, save_annot
class ComposedCallback:
def __init__(self, callbacks=[point_callback], processes=[]) -> None:
@ -12,7 +13,7 @@ class ComposedCallback:
def call(self, event, x, y, flags, param):
scale = param['scale']
x, y = int(x/scale), int(y/scale)
x, y = int(round(x/scale)), int(round(y/scale))
for callback in self.callbacks:
callback(event, x, y, flags, param)
for key in ['click', 'start', 'end']:
@ -23,25 +24,50 @@ class ComposedCallback:
for process in self.processes:
process(**param)
def get_valid_yn():
while True:
key = input('Saving this annotations? [y/n]')
if key in ['y', 'n']:
break
print('Please specify [y/n]')
return key
restore_key = {
'body25': ('bbox', 'keypoints'),
'handl': ('bbox_handl2d', 'handl2d'),
'handr': ('bbox_handr2d', 'handr2d'),
}
class AnnotBase:
def __init__(self, dataset, key_funcs={}, callbacks=[], vis_funcs=[],
name = 'main',
step=1) -> None:
name = 'main', body='body25',
start=0, end=100000, step=10, no_window=False) -> None:
self.name = name
self.dataset = dataset
self.nFrames = len(dataset)
self.step = step
self.register_keys = register_keys.copy()
self.register_keys.update(key_funcs)
self.no_img = False
if resize_to_screen not in vis_funcs:
vis_funcs += [resize_to_screen]
self.vis_funcs = vis_funcs
self.start = start
self.end = end
self.vis_funcs = vis_funcs + [resize_to_screen]
self.isOpen = True
self._frame = 0
self._frame = self.start
self.visited_frames = set([self._frame])
self.param = {'select': {'bbox': -1, 'corner': -1},
'start': None, 'end': None, 'click': None,
bbox_name, kpts_name = restore_key[body]
self.param = {
'frame': 0, 'nFrames': self.nFrames,
'kpts_name': kpts_name, 'bbox_name': bbox_name,
'select': {bbox_name: -1, 'corner': -1},
'click': None,
'name': name,
'capture_screen':False}
self.set_frame(0)
self.set_frame(self.start)
self.no_window = no_window
if not no_window:
cv2.namedWindow(self.name)
callback = ComposedCallback(processes=callbacks)
cv2.setMouseCallback(self.name, callback.call, self.param)
@ -57,32 +83,29 @@ class AnnotBase:
flag = True
return flag
def clear_working(self):
self.param['click'] = None
self.param['start'] = None
self.param['end'] = None
for key in self.param['select']:
self.param['select'][key] = -1
@staticmethod
def clear_working(param):
param['click'] = None
param['start'] = None
param['end'] = None
for key in param['select']:
param['select'][key] = -1
def save_and_quit(self):
def save_and_quit(self, key=None):
self.frame = self.frame
self.isOpen = False
cv2.destroyWindow(self.name)
# get the input
while True:
key = input('Saving this annotations? [y/n]')
if key in ['y', 'n']:
break
print('Please specify [y/n]')
if key is None:
key = get_valid_yn()
if key == 'n':
return 0
if key == 'n':
return 0
for frame in self.visited_frames:
for frame in tqdm(self.visited_frames, desc='writing'):
self.dataset.isTmp = True
_, annname = self.dataset[frame]
self.dataset.isTmp = False
_, annname_ = self.dataset[frame]
if annname is not None:
shutil.copy(annname, annname_)
@property
@ -97,24 +120,33 @@ class AnnotBase:
annots = load_annot_to_tmp(annname)
return annots
def set_frame(self, nf):
self.clear_working()
imgname, annname = self.dataset[nf]
img0 = cv2.imread(imgname)
@staticmethod
def set_param(param, imgname, annname, nf, no_img=False):
annots = load_annot_to_tmp(annname)
# 清空键盘
for key in ['click', 'start', 'end']:
self.param[key] = None
param[key] = None
# 清空选中
for key in self.param['select']:
self.param['select'][key] = -1
self.param['imgname'] = imgname
self.param['annname'] = annname
self.param['frame'] = nf
self.param['annots'] = annots
self.param['img0'] = img0
# self.param['pid'] = len(annot['annots'])
self.param['scale'] = min(CV_KEY.WINDOW_HEIGHT/img0.shape[0], CV_KEY.WINDOW_WIDTH/img0.shape[1])
for key in param['select']:
param['select'][key] = -1
param['imgname'] = imgname
param['annname'] = annname
param['frame'] = nf
param['annots'] = annots
if not no_img:
img0 = cv2.imread(imgname)
param['img0'] = img0
# param['pid'] = len(annot['annots'])
param['scale'] = min(CV_KEY.WINDOW_HEIGHT/img0.shape[0], CV_KEY.WINDOW_WIDTH/img0.shape[1])
# param['scale'] = 1
def set_frame(self, nf):
param = self.param
if 'annots' in param.keys():
save_annot(param['annname'], param['annots'])
self.clear_working(param)
imgname, annname = self.dataset[nf]
self.set_param(param, imgname, annname, nf, no_img=self.no_img)
@frame.setter
def frame(self, value):
@ -124,14 +156,232 @@ class AnnotBase:
save_annot(self.param['annname'], self.param['annots'])
self.set_frame(value)
def run(self, key=None):
def run(self, key=None, noshow=False):
if key is None:
key = chr(get_key())
if key in self.register_keys.keys():
self.register_keys[key](self, param=self.param)
if not self.isOpen:
return 0
if noshow:
return 0
img = self.param['img0'].copy()
for func in self.vis_funcs:
img = func(img, **self.param)
if not self.no_window:
cv2.imshow(self.name, img)
class AnnotMV:
def __init__(self, datasets, key_funcs={}, key_funcs_view={}, callbacks=[], vis_funcs=[], vis_funcs_all=[],
name='main', step=100, body='body25', start=0, end=100000) -> None:
self.subs = list(datasets.keys())
self.annotdict = {}
self.nFrames = end
for sub, dataset in datasets.items():
annot = AnnotBase(dataset, key_funcs={}, callbacks=callbacks, vis_funcs=vis_funcs,
name=sub, step=step, body=body, start=start, end=end)
self.annotdict[sub] = annot
self.nFrames = min(self.nFrames, annot.nFrames)
self.isOpen = True
# self.register_keys_view = {key:register_keys[key] for key in 'q'}
self.register_keys_view = {}
if 'w' not in key_funcs:
for key in 'wasd':
self.register_keys_view[key] = register_keys[key]
self.register_keys_view.update(key_funcs_view)
self.register_keys = {
'Q': register_keys['q'],
'h': register_keys['H'],
'A': register_keys['A']
}
self.register_keys.update(key_funcs)
self.vis_funcs_all = vis_funcs_all
self.name = name
self.param = {}
@property
def frame(self):
sub = list(self.annotdict.keys())[0]
return self.annotdict[sub].frame
@property
def working(self):
return False
def save_and_quit(self):
key = get_valid_yn()
for sub, annot in self.annotdict.items():
annot.save_and_quit(key)
self.isOpen = False
def run(self, key=None, noshow=False):
if key is None:
key = chr(get_key())
for sub, annot in self.annotdict.items():
if key in self.register_keys_view.keys():
self.register_keys_view[key](annot, param=annot.param)
else:
annot.run(key='')
if key in self.register_keys.keys():
self.register_keys[key](self, param=self.param)
if len(self.vis_funcs_all) > 0 or True:
imgs = []
for sub in self.subs:
img = self.annotdict[sub].param['img0'].copy()
for func in self.vis_funcs_all:
img = func(img, sub, param=self.annotdict[sub].param)
imgs.append(img)
for func in [merge, resize_to_screen]:
imgs = func(imgs, scale=0.1)
cv2.imshow(self.name, imgs)
import numpy as np
def callback_select_image(click, select, ranges, **kwargs):
if click is None:
return 0
ranges = np.array(ranges)
click = np.array(click).reshape(1, -1)
res = (click[:, 0]>ranges[:, 0])&(click[:, 0]<ranges[:, 2])&(click[:, 1]>ranges[:, 1])&(click[:, 1]<ranges[:, 3])
if res.any():
select['camera'] = int(np.where(res)[0])
class AnnotMVMain:
def __init__(self, datasets, key_funcs={}, key_funcs_view={}, callbacks=[], vis_funcs=[], vis_funcs_all=[],
name='main', step=100, body='body25', start=0, end=100000) -> None:
self.subs = list(datasets.keys())
self.annotdict = {}
self.nFrames = end
for sub, dataset in datasets.items():
annot = AnnotBase(dataset, key_funcs={}, callbacks=callbacks, vis_funcs=vis_funcs,
name=sub, step=step, body=body, start=start, end=end, no_window=True)
self.annotdict[sub] = annot
self.nFrames = min(self.nFrames, annot.nFrames)
self.isOpen = True
self.register_keys_view = {}
self.register_keys = {
'Q': register_keys['q'],
'h': register_keys['H'],
'A': register_keys['A']
}
self.register_keys.update(key_funcs)
self.vis_funcs_all = vis_funcs_all
self.name = name
imgs = self.load_images()
imgs, ranges = merge(imgs, ret_range=True)
self.param = {
'scale': 0.45, 'ranges': ranges,
'click': None, 'start': None, 'end': None,
'select': {'camera': -1}}
callbacks = [callback_select_image]
cv2.namedWindow(self.name)
callback = ComposedCallback(processes=callbacks)
cv2.setMouseCallback(self.name, callback.call, self.param)
@property
def frame(self):
sub = list(self.annotdict.keys())[0]
return self.annotdict[sub].frame
@property
def working(self):
return False
def save_and_quit(self, key=None):
if key is None:
key = get_valid_yn()
for sub, annot in self.annotdict.items():
annot.save_and_quit(key)
self.isOpen = False
def load_images(self):
imgs = []
for sub in self.subs:
img = self.annotdict[sub].param['img0'].copy()
imgs.append(img)
return imgs
def run(self, key=None, noshow=False):
if key is None:
key = chr(get_key())
active_v = self.param['select']['camera']
if active_v == -1:
# run the key for all cameras
if key in self.register_keys.keys():
self.register_keys[key](self, param=self.param)
else:
for sub in self.subs:
self.annotdict[sub].run(key)
else:
# run the key for the selected cameras
self.annotdict[self.subs[active_v]].run(key=key)
if len(self.vis_funcs_all) > 0:
imgs = []
for nv, sub in enumerate(self.subs):
img = self.annotdict[sub].param['img0'].copy()
for func in self.vis_funcs_all:
# img = func(img, sub, param=self.annotdict[sub].param)
img = func(img, **self.annotdict[sub].param)
if self.param['select']['camera'] == nv:
cv2.rectangle(img, (0, 0), (img.shape[1], img.shape[0]), (0, 0, 255), img.shape[1]//100)
# img = plot_text(img, self.annotdict[sub].param['annots'], self.annotdict[sub].param['imgname'])
imgs.append(img)
for func in [merge, resize_to_screen]:
imgs = func(imgs, scale=0.45)
cv2.imshow(self.name, imgs)
def load_parser():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('path', type=str)
parser.add_argument('--out', type=str)
parser.add_argument('--sub', type=str, nargs='+', default=[],
help='the sub folder lists when in video mode')
parser.add_argument('--from_file', type=str, default=None)
parser.add_argument('--image', type=str, default='images')
parser.add_argument('--annot', type=str, default='annots')
parser.add_argument('--body', type=str, default='handl')
parser.add_argument('--step', type=int, default=100)
parser.add_argument('--vis', action='store_true')
parser.add_argument('--debug', action='store_true')
# new arguments
parser.add_argument('--start', type=int, default=0, help='frame start')
parser.add_argument('--end', type=int, default=100000, help='frame end')
return parser
def parse_parser(parser):
import os
from os.path import join
args = parser.parse_args()
if args.from_file is not None and args.from_file.endswith('.txt'):
assert os.path.exists(args.from_file), args.from_file
with open(args.from_file) as f:
datas = f.readlines()
subs = [d for d in datas if not d.startswith('#')]
subs = [d.rstrip().replace('https://www.youtube.com/watch?v=', '') for d in subs]
newsubs = sorted(os.listdir(join(args.path, 'images')))
clips = []
for newsub in newsubs:
if newsub in subs:
continue
if newsub.split('+')[0] in subs:
clips.append(newsub)
for sub in subs:
if os.path.exists(join(args.path, 'images', sub)):
clips.append(sub)
args.sub = sorted(clips)
elif args.from_file is not None and args.from_file.endswith('.json'):
data = read_json(args.from_file)
args.sub = sorted([v['vid'] for v in data])
elif len(args.sub) == 0:
args.sub = sorted(os.listdir(join(args.path, 'images')))
if args.sub[0].isdigit():
args.sub = sorted(args.sub, key=lambda x:int(x))
helps = """
Demo code for annotation:
- Input : {}
- => {}
- => {}
""".format(args.path, ', '.join(args.sub), args.annot)
print(helps)
return args

View File

@ -1,3 +1,10 @@
'''
@ Date: 2021-04-21 14:18:50
@ Author: Qing Shuai
@ LastEditors: Qing Shuai
@ LastEditTime: 2021-07-11 16:56:39
@ FilePath: /EasyMocap/easymocap/annotator/basic_callback.py
'''
import cv2
class CV_KEY:
@ -38,11 +45,13 @@ def point_callback(event, x, y, flags, param):
return 0
# 判断出了选择了的点的位置,直接写入这个位置
if event == cv2.EVENT_LBUTTONDOWN:
# 如果选中了框,那么在按下的时候,就不能清零
param['click'] = None
param['start'] = (x, y)
param['end'] = (x, y)
# 清除所有选择项
# 清除所有选择项:需要操作吗?
for key in param['select'].keys():
if key != 'bbox':
param['select'][key] = -1
elif event == cv2.EVENT_MOUSEMOVE and flags == cv2.EVENT_FLAG_LBUTTON:
param['end'] = (x, y)

View File

@ -1,3 +1,11 @@
'''
@ Date: 2021-04-15 17:39:34
@ Author: Qing Shuai
@ LastEditors: Qing Shuai
@ LastEditTime: 2021-07-24 17:01:18
@ FilePath: /EasyMocap/easymocap/annotator/basic_keyboard.py
'''
from glob import glob
from tqdm import tqdm
from .basic_callback import get_key
@ -6,16 +14,36 @@ def print_help(annotator, **kwargs):
print('Here is the help:')
print( '------------------')
for key, val in annotator.register_keys.items():
# print(' {}: {}'.format(key, ': ', str(val.__doc__)))
print(' {}: '.format(key, ': '), str(val.__doc__))
def close(annotator, param, **kwargs):
def print_help_mv(annotator, **kwargs):
print_help(annotator)
print( '------------------')
print('Here is the help for each view:')
print( '------------------')
for key, val in annotator.register_keys_view.items():
print(' {}: '.format(key, ': '), str(val.__doc__))
def close(annotator, **kwargs):
"""quit the annotation"""
if annotator.working:
annotator.clear_working()
annotator.set_frame(annotator.frame)
else:
annotator.save_and_quit()
# annotator.pbar.close()
def skip(annotator, **kwargs):
"""skip the annotation"""
annotator.save_and_quit(key='y')
def get_any_move(df):
get_frame = lambda x, f: f + df
clip_frame = lambda x, f: max(0, min(x.nFrames-1, f))
def move(annotator, **kwargs):
newframe = get_frame(annotator, annotator.frame)
newframe = clip_frame(annotator, newframe)
annotator.frame = newframe
move.__doc__ = '{} frames'.format(df)
return move
def get_move(wasd):
get_frame = {
@ -30,7 +58,7 @@ def get_move(wasd):
'w': 'Move to last step frame',
's': 'Move to next step frame'
}
clip_frame = lambda x, f: max(0, min(x.nFrames-1, f))
clip_frame = lambda x, f: max(x.start, min(x.nFrames-1, min(x.end-1, f)))
def move(annotator, **kwargs):
newframe = get_frame(annotator, annotator.frame)
newframe = clip_frame(annotator, newframe)
@ -41,7 +69,7 @@ def get_move(wasd):
def set_personID(i):
def func(self, param, **kwargs):
active = param['select']['bbox']
if active == -1:
if active == -1 and active >= len(param['annots']['annots']):
return 0
else:
param['annots']['annots'][active]['personID'] = i
@ -49,15 +77,14 @@ def set_personID(i):
func.__doc__ = "set the bbox ID to {}".format(i)
return func
def delete_bbox(self, param, **kwargs):
"delete the person"
active = param['select']['bbox']
if active == -1:
return 0
else:
param['annots']['annots'].pop(active)
param['select']['bbox'] = -1
def choose_personID(i):
def func(self, param, **kwargs):
for idata, data in enumerate(param['annots']['annots']):
if data['personID'] == i:
param['select']['bbox'] = idata
return 0
func.__doc__ = "choose the bbox of ID {}".format(i)
return func
def capture_screen(self, param):
"capture the screen"
@ -66,27 +93,97 @@ def capture_screen(self, param):
else:
param['capture_screen'] = True
def automatic(self, param):
"Automatic running"
remain = 0
keys_pre = []
def cont_automatic(self, param):
"continue automatic"
global remain, keys_pre
if remain > 0:
keys = keys_pre
repeats = remain
else:
print('Examples: ')
print(' - noshow r t: automatic removing and tracking')
print(' - noshow nostop r t r c: automatic removing and tracking, if missing, just copy')
keys = input('Enter the ordered key(separate with blank): ').split(' ')
repeats = int(input('Input the repeat times: (0->{})'.format(len(self.dataset)-self.frame)))
keys_pre = keys
try:
repeats = int(input('Input the repeat times(0->{}): '.format(len(self.dataset)-self.frame)))
except:
repeats = 0
if repeats == -1:
repeats = len(self.dataset)
repeats = min(repeats, len(self.dataset)-self.frame+1)
if len(keys) < 1:
return 0
noshow = 'noshow' in keys
if noshow:
self.no_img = True
nostop = 'nostop' in keys
param['stop'] = False
for nf in tqdm(range(repeats), desc='auto {}'.format('->'.join(keys))):
for key in keys:
self.run(key=key)
if chr(get_key()) == 'q':
self.run(key=key, noshow=noshow)
if chr(get_key()) == 'q' or (param['stop'] and not nostop):
remain = repeats - nf
break
self.run(key='d')
self.run(key='d', noshow=noshow)
else:
remain = 0
keys_pre = []
self.no_img = False
def automatic(self, param):
"Automatic running"
global remain, keys_pre
print('Examples: ')
print(' - noshow r t: automatic removing and tracking')
print(' - noshow nostop r t r c: automatic removing and tracking, if missing, just copy')
keys = input('Enter the ordered key(separate with blank): ').split(' ')
keys_pre = keys
try:
repeats = int(input('Input the repeat times(0->{}): '.format(self.nFrames-self.frame)))
except:
repeats = 0
repeats = min(repeats, self.nFrames-self.frame+1)
if len(keys) < 1:
return 0
noshow = 'noshow' in keys
if noshow:
self.no_img = True
nostop = 'nostop' in keys
param['stop'] = False
for nf in tqdm(range(repeats), desc='auto {}'.format('->'.join(keys))):
for key in keys:
self.run(key=key, noshow=noshow)
if chr(get_key()) == 'q' or (param['stop'] and not nostop):
remain = repeats - nf
break
self.run(key='d', noshow=noshow)
else:
remain = 0
keys_pre = []
self.no_img = False
def set_keyframe(self, param):
"set/unset the key-frame"
param['annots']['isKeyframe'] = not param['annots']['isKeyframe']
register_keys = {
'h': print_help,
'H': print_help_mv,
'q': close,
'x': delete_bbox,
' ': skip,
'p': capture_screen,
'A': automatic
'A': automatic,
'z': cont_automatic,
'k': set_keyframe
}
for key in 'wasd':
register_keys[key] = get_move(key)
for i in range(10):
for i in range(5):
register_keys[str(i)] = set_personID(i)
register_keys['s'+str(i)] = choose_personID(i)

View File

@ -2,7 +2,8 @@ import numpy as np
import cv2
import os
from os.path import join
from ..mytools import plot_cross, plot_line, plot_bbox, plot_keypoints, get_rgb
from ..mytools import plot_cross, plot_line, plot_bbox, plot_keypoints, get_rgb, merge
from ..mytools.file_utils import get_bbox_from_pose
from ..dataset import CONFIG
# click and (start, end) is the output of the OpenCV callback
@ -13,11 +14,23 @@ def vis_point(img, click, **kwargs):
def vis_line(img, start, end, **kwargs):
if start is not None and end is not None:
lw = max(2, img.shape[0]//500)
cv2.line(img, (int(start[0]), int(start[1])),
(int(end[0]), int(end[1])), (0, 255, 0), 1)
(int(end[0]), int(end[1])), (0, 255, 0), lw)
return img
def resize_to_screen(img, scale=1, capture_screen=False, **kwargs):
def vis_bbox(img, start, end, **kwargs):
if start is not None and end is not None:
lw = max(2, img.shape[0]//500)
cv2.rectangle(img, (int(start[0]), int(start[1])),
(int(end[0]), int(end[1])), (0, 255, 0), lw)
return img
def resize_to_screen(img, scale=1, **kwargs):
img = cv2.resize(img, None, fx=scale, fy=scale)
return img
def capture_screen(img, capture_screen=False, **kwargs):
if capture_screen:
from datetime import datetime
time_now = datetime.now().strftime("%m-%d-%H:%M:%S")
@ -25,18 +38,33 @@ def resize_to_screen(img, scale=1, capture_screen=False, **kwargs):
os.makedirs('capture', exist_ok=True)
cv2.imwrite(outname, img)
print('Capture current screen to {}'.format(outname))
img = cv2.resize(img, None, fx=scale, fy=scale)
return img
def plot_text(img, annots, **kwargs):
def plot_text(img, annots, imgname, **kwargs):
if 'isKeyframe' in annots.keys():
if annots['isKeyframe']: # 关键帧使用红框表示
cv2.rectangle(img, (0, 0), (img.shape[1], img.shape[0]), (0, 0, 255), img.shape[1]//100)
else: # 非关键帧使用绿框表示
cv2.rectangle(img, (0, 0), (img.shape[1], img.shape[0]), (0, 255, 0), img.shape[1]//100)
imgname = '/'.join(imgname.split(os.sep)[-3:])
text_size = int(max(1, img.shape[0]//1500))
border = 20 * text_size
width = 2 * text_size
cv2.putText(img, '{}'.format(annots['filename']), (border, img.shape[0]-border), cv2.FONT_HERSHEY_SIMPLEX, text_size, (0, 0, 255), width)
cv2.putText(img, '{}'.format(imgname), (border, img.shape[0]-border), cv2.FONT_HERSHEY_SIMPLEX, text_size, (0, 0, 255), width)
# 显示标注进度条:
if 'frame' in kwargs.keys():
width = img.shape[1]
frame, nFrames = kwargs['frame'], kwargs['nFrames']
lw = 12
pos = lambda x: int(width*(x+1)/nFrames)
COL_ALL = (0, 255, 0)
COL_CUR = (255, 0, 0)
COL_PIN = (255, 128, 128)
plot_line(img, (0, lw/2), (width, lw/2), lw, COL_ALL)
plot_line(img, (0, lw/2), (pos(frame), lw/2), lw, COL_CUR)
top = pos(frame)
pts = np.array([[top, lw], [top-lw, lw*4], [top+lw, lw*4]])
cv2.fillPoly(img, [pts], COL_PIN)
return img
def plot_bbox_body(img, annots, **kwargs):
@ -52,8 +80,33 @@ def plot_bbox_body(img, annots, **kwargs):
plot_line(img, (x1, y2), (x2, y1), lw, color)
# border
cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), color, lw+1)
ratio = (y2-y1)/(x2-x1)
w = 10*lw
cv2.rectangle(img,
(int((x1+x2)/2-w), int((y1+y2)/2-w*ratio)),
(int((x1+x2)/2+w), int((y1+y2)/2+w*ratio)),
color, -1)
cv2.putText(img, '{}'.format(pid), (int(x1), int(y1)+20), cv2.FONT_HERSHEY_SIMPLEX, 5, color, 2)
return img
def plot_bbox_sp(img, annots, bbox_type='handl_bbox', add_center=False):
assert bbox_type in ('bbox', 'bbox_handl2d', 'bbox_handr2d', 'bbox_face2d')
for data in annots['annots']:
if bbox_type not in data.keys():
continue
bbox = data[bbox_type]
# 画一个X形
x1, y1, x2, y2 = bbox[:4]
pid = data['personID']
color = get_rgb(pid)
lw = max(1, int((x2 - x1)//100))
plot_line(img, (x1, y1), (x2, y2), lw, color)
plot_line(img, (x1, y2), (x2, y1), lw, color)
# border
cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), color, lw+1)
ratio = (y2-y1)/(x2-x1)/2
w = 10*lw
if add_center:
cv2.rectangle(img,
(int((x1+x2)/2-w), int((y1+y2)/2-w*ratio)),
(int((x1+x2)/2+w), int((y1+y2)/2+w*ratio)),
@ -61,51 +114,45 @@ def plot_bbox_body(img, annots, **kwargs):
cv2.putText(img, '{}'.format(pid), (int(x1), int(y1)+20), cv2.FONT_HERSHEY_SIMPLEX, 1, color, 2)
return img
def plot_skeleton(img, annots, **kwargs):
def plot_bbox_factory(bbox_type, add_center=False):
def ret_foo(img, annots, **kwargs):
return plot_bbox_sp(img, annots, bbox_type=bbox_type, add_center=add_center)
return ret_foo
def plot_skeleton(img, annots, body='body25', bbox_name='bbox', kpts_name='keypoints', **kwargs):
annots = annots['annots']
vis_conf = False
for data in annots:
bbox, keypoints = data['bbox'], data['keypoints']
if False:
pid = data.get('matchID', -1)
else:
pid = data.get('personID', -1)
if kpts_name in data.keys():
keypoints = data[kpts_name]
plot_keypoints(img, keypoints, pid, CONFIG[body], vis_conf=vis_conf, use_limb_color=True)
if bbox_name in data.keys():
bbox = data[bbox_name]
plot_bbox(img, bbox, pid)
elif kpts_name in data.keys():
bbox = get_bbox_from_pose(np.array(data[kpts_name]))
plot_bbox(img, bbox, pid)
if True:
plot_keypoints(img, keypoints, pid, CONFIG['body25'], vis_conf=vis_conf, use_limb_color=True)
if 'handl2d' in data.keys():
plot_keypoints(img, data['handl2d'], pid, CONFIG['hand'], vis_conf=vis_conf, lw=1, use_limb_color=False)
plot_keypoints(img, data['handr2d'], pid, CONFIG['hand'], vis_conf=vis_conf, lw=1, use_limb_color=False)
plot_keypoints(img, data['face2d'], pid, CONFIG['face'], vis_conf=vis_conf, lw=1, use_limb_color=False)
return img
def plot_keypoints_whole(img, points, kintree):
for ii, (i, j) in enumerate(kintree):
if i >= len(points) or j >= len(points):
continue
col = (255, 240, 160)
lw = 4
pt1, pt2 = points[i], points[j]
if pt1[-1] > 0.01 and pt2[-1] > 0.01:
image = cv2.line(
img, (int(pt1[0]+0.5), int(pt1[1]+0.5)), (int(pt2[0]+0.5), int(pt2[1]+0.5)),
col, lw)
def plot_skeleton_factory(body):
restore_key = {
'body25': ('bbox', 'keypoints'),
'handl': ('bbox_handl2d', 'handl2d'),
'handr': ('bbox_handr2d', 'handr2d'),
'face': ('bbox_face2d', 'face2d'),
}
bbox_name, kpts_name = restore_key[body]
def ret_foo(img, annots, **kwargs):
return plot_skeleton(img, annots, body, bbox_name, kpts_name)
return ret_foo
def plot_skeleton_simple(img, annots, **kwargs):
annots = annots['annots']
vis_conf = False
for data in annots:
bbox, keypoints = data['bbox'], data['keypoints']
pid = data.get('personID', -1)
plot_keypoints_whole(img, keypoints, CONFIG['body25']['kintree'])
return img
def vis_active_bbox(img, annots, select, **kwargs):
active = select['bbox']
if active == -1:
def vis_active_bbox(img, annots, select, bbox_name, **kwargs):
active = select[bbox_name]
if active == -1 or active >= len(annots['annots']):
return img
else:
bbox = annots['annots'][active]['bbox']
bbox = annots['annots'][active][bbox_name]
pid = annots['annots'][active]['personID']
mask = np.zeros_like(img, dtype=np.uint8)
cv2.rectangle(mask,

View File

@ -1,8 +1,22 @@
import numpy as np
import cv2
MIN_PIXEL = 50
def callback_select_bbox_corner(start, end, annots, select, **kwargs):
def findNearestPoint(points, click):
# points: (N, 2)
# click : [x, y]
click = np.array(click)
if len(points.shape) == 2:
click = click[None, :]
elif len(points.shape) == 3:
click = click[None, None, :]
dist = np.linalg.norm(points - click, axis=-1)
if dist.min() < MIN_PIXEL:
idx = np.unravel_index(dist.argmin(), dist.shape)
return True, idx
else:
return False, (-1, -1)
def callback_select_bbox_corner(start, end, annots, select, bbox_name, **kwargs):
if start is None or end is None:
select['corner'] = -1
return 0
@ -10,63 +24,109 @@ def callback_select_bbox_corner(start, end, annots, select, **kwargs):
return 0
# 判断选择了哪个角点
annots = annots['annots']
start = np.array(start)[None, :]
if select['bbox'] == -1 and select['corner'] == -1:
# not select a bbox
if select[bbox_name] == -1 and select['corner'] == -1:
corners = []
for i in range(len(annots)):
l, t, r, b = annots[i]['bbox'][:4]
corners = np.array([(l, t), (l, b), (r, t), (r, b)])
dist = np.linalg.norm(corners - start, axis=1)
mindist = dist.min()
if mindist < MIN_PIXEL:
mincor = dist.argmin()
select['bbox'] = i
select['corner'] = mincor
break
l, t, r, b = annots[i][bbox_name][:4]
corner = np.array([(l, t), (l, b), (r, t), (r, b), ((l+r)/2, (t+b)/2)])
corners.append(corner)
corners = np.stack(corners)
flag, minid = findNearestPoint(corners, start)
if flag:
select[bbox_name] = minid[0]
select['corner'] = minid[1]
else:
select['corner'] = -1
elif select['bbox'] != -1 and select['corner'] == -1:
i = select['bbox']
l, t, r, b = annots[i]['bbox'][:4]
corners = np.array([(l, t), (l, b), (r, t), (r, b)])
dist = np.linalg.norm(corners - start, axis=1)
mindist = dist.min()
if mindist < MIN_PIXEL:
mincor = dist.argmin()
select['corner'] = mincor
elif select['bbox'] != -1 and select['corner'] != -1:
# Move the corner
# have selected a bbox, not select a corner
elif select[bbox_name] != -1 and select['corner'] == -1:
i = select[bbox_name]
l, t, r, b = annots[i][bbox_name][:4]
corners = np.array([(l, t), (l, b), (r, t), (r, b), ((l+r)/2, (t+b)/2)])
flag, minid = findNearestPoint(corners, start)
if flag:
select['corner'] = minid[0]
# have selected a bbox, and select a corner
elif select[bbox_name] != -1 and select['corner'] != -1:
x, y = end
# Move the corner
if select['corner'] < 4:
(i, j) = [(0, 1), (0, 3), (2, 1), (2, 3)][select['corner']]
data = annots[select['bbox']]
data['bbox'][i] = x
data['bbox'][j] = y
elif select['bbox'] == -1 and select['corner'] != -1:
data = annots[select[bbox_name]]
data[bbox_name][i] = x
data[bbox_name][j] = y
# Move the center
else:
bbox = annots[select[bbox_name]][bbox_name]
w = (bbox[2] - bbox[0])/2
h = (bbox[3] - bbox[1])/2
bbox[0] = x - w
bbox[1] = y - h
bbox[2] = x + w
bbox[3] = y + h
elif select[bbox_name] == -1 and select['corner'] != -1:
select['corner'] = -1
def callback_select_bbox_center(click, annots, select, **kwargs):
def callback_select_bbox_center(click, annots, select, bbox_name, **kwargs):
if click is None:
return 0
annots = annots['annots']
bboxes = np.array([d['bbox'] for d in annots])
bboxes = np.array([d[bbox_name] for d in annots])
center = (bboxes[:, [2, 3]] + bboxes[:, [0, 1]])/2
click = np.array(click)[None, :]
dist = np.linalg.norm(click - center, axis=1)
mindist, minid = dist.min(), dist.argmin()
if mindist < MIN_PIXEL:
select['bbox'] = minid
select[bbox_name] = minid
def auto_pose_track(self, param, **kwargs):
"auto tracking with poses"
def get_auto_track(mode='kpts'):
MAX_SPEED = 100
if mode == 'bbox':
MAX_SPEED = 0.2
def auto_track(self, param, **kwargs):
if self.frame == 0:
return 0
previous = self.previous()
annots = param['annots']['annots']
keypoints_pre = np.array([d['keypoints'] for d in previous['annots']])
keypoints_now = np.array([d['keypoints'] for d in annots])
bbox_name = param['bbox_name']
kpts_name = param['kpts_name']
if len(annots) == 0:
return 0
if len(previous['annots']) == 0:
return 0
if mode == 'kpts':
keypoints_pre = np.array([d[kpts_name] for d in previous['annots']])
keypoints_now = np.array([d[kpts_name] for d in annots])
conf = np.sqrt(keypoints_now[:, None, :, -1] * keypoints_pre[None, :, :, -1])
diff = np.linalg.norm(keypoints_now[:, None, :, :] - keypoints_pre[None, :, :, :], axis=-1)
diff = np.linalg.norm(keypoints_now[:, None, :, :2] - keypoints_pre[None, :, :, :2], axis=-1)
dist = np.sum(diff * conf, axis=-1)/np.sum(conf, axis=-1)
elif mode == bbox_name:
# 计算IoU
bbox_pre = np.array([d[bbox_name] for d in previous['annots']])
bbox_now = np.array([d[bbox_name] for d in annots])
bbox_pre = bbox_pre[None]
bbox_now = bbox_now[:, None]
areas_pre = (bbox_pre[..., 2] - bbox_pre[..., 0]) * (bbox_pre[..., 3] - bbox_pre[..., 1])
areas_now = (bbox_now[..., 2] - bbox_now[..., 0]) * (bbox_now[..., 3] - bbox_now[..., 1])
# 左边界的大值
xx1 = np.maximum(bbox_pre[..., 0], bbox_now[..., 0])
yy1 = np.maximum(bbox_pre[..., 1], bbox_now[..., 1])
# 右边界的小值
xx2 = np.minimum(bbox_pre[..., 2], bbox_now[..., 2])
yy2 = np.minimum(bbox_pre[..., 3], bbox_now[..., 3])
w = np.maximum(0.0, xx2 - xx1)
h = np.maximum(0.0, yy2 - yy1)
inter = w * h
over = inter / (areas_pre + areas_now - inter)
dist = 1 - over
# diff = np.linalg.norm(bbox_now[:, None, :4] - bbox_pre[None, :, :4], axis=-1)
# bbox_size = np.max(bbox_pre[:, [2, 3]] - bbox_pre[:, [0, 1]], axis=1)[None, :]
# diff = diff / bbox_size
# dist = diff
else:
raise NotImplementedError
nows, pres = np.where(dist < MAX_SPEED)
edges = []
for n, p in zip(nows, pres):
@ -80,4 +140,74 @@ def auto_pose_track(self, param, **kwargs):
used_n.append(n)
used_p.append(p)
# TODO:stop when missing
pre_ids = [d['personID'] for d in previous['annots']]
if len(used_p) != len(pre_ids):
param['stop'] = True
print('>>> Stop because missing key: {}'.format(
[i for i in pre_ids if i not in used_p]))
print(dist)
max_id = max(pre_ids) + 1
for i in range(len(annots)):
if i in used_n:
continue
annots[i]['personID'] = max_id
max_id += 1
auto_track.__doc__ = 'auto track the {}'.format(mode)
return auto_track
def copy_previous_missing(self, param, **kwargs):
"copy the missing person of previous frame"
if self.frame == 0:
return 0
previous = self.previous()
annots = param['annots']['annots']
pre_ids = [d['personID'] for d in previous['annots']]
now_ids = [d['personID'] for d in annots]
for i in range(len(pre_ids)):
if pre_ids[i] not in now_ids:
annots.append(previous['annots'][i])
def copy_previous_bbox(self, param, **kwargs):
"copy the annots of previous frame"
if self.frame == 0:
return 0
previous = self.previous()
annots = param['annots']['annots'] = previous['annots']
def create_bbox(self, param, **kwargs):
"add new boundbox"
start, end = param['start'], param['end']
if start is None or end is None:
return 0
annots = param['annots']['annots']
nowids = [d['personID'] for d in annots]
bbox_name, kpts_name = param['bbox_name'], param['kpts_name']
if len(nowids) == 0:
maxID = 0
else:
maxID = max(nowids) + 1
data = {
'personID': maxID,
bbox_name: [start[0], start[1], end[0], end[1], 1],
kpts_name: [[0., 0., 0.] for _ in range(25)]
}
annots.append(data)
param['start'], param['end'] = None, None
def delete_bbox(self, param, **kwargs):
"delete the person"
bbox_name = param['bbox_name']
active = param['select'][bbox_name]
if active == -1:
return 0
else:
param['annots']['annots'].pop(active)
param['select'][bbox_name] = -1
return 0
def delete_all_bbox(self, param, **kwargs):
"delete the person"
bbox_name = param['bbox_name']
param['annots']['annots'] = []
param['select'][bbox_name] = -1
return 0

View File

@ -2,7 +2,7 @@
@ Date: 2021-04-13 16:14:36
@ Author: Qing Shuai
@ LastEditors: Qing Shuai
@ LastEditTime: 2021-05-26 15:42:02
@ LastEditTime: 2021-07-17 16:00:17
@ FilePath: /EasyMocap/easymocap/annotator/chessboard.py
'''
import numpy as np
@ -53,7 +53,10 @@ def _findChessboardCornersAdapt(img, pattern):
return _findChessboardCorners(img, pattern)
def findChessboardCorners(img, annots, pattern):
if annots['visited']:
conf = sum([v[2] for v in annots['keypoints2d']])
if annots['visited'] and conf > 0:
return True
elif annots['visited']:
return None
annots['visited'] = True
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

View File

@ -0,0 +1,59 @@
'''
@ Date: 2021-04-22 11:40:31
@ Author: Qing Shuai
@ LastEditors: Qing Shuai
@ LastEditTime: 2021-06-10 16:00:15
@ FilePath: /EasyMocap/easymocap/annotator/keypoints_callback.py
'''
import numpy as np
from .bbox_callback import findNearestPoint
def callback_select_joints(start, end, annots, select, bbox_name='bbox', kpts_name='keypoints', **kwargs):
if start is None or end is None:
select['joints'] = -1
return 0
if start[0] == end[0] and start[1] == end[1]:
select['joints'] = -1
return 0
if select['corner'] != -1:
return 0
# 判断选择了哪个角点
annots = annots['annots']
# not select a bbox
if select[bbox_name] == -1 and select['joints'] == -1:
corners = []
for annot in annots:
corners.append(np.array(annot[kpts_name]))
corners = np.stack(corners)
flag, minid = findNearestPoint(corners[..., :2], start)
if flag:
select[bbox_name] = minid[0]
select['joints'] = minid[1]
else:
select['joints'] = -1
# have selected a bbox, not select a corner
elif select[bbox_name] != -1 and select['joints'] == -1:
i = select[bbox_name]
corners = np.array(annots[i][kpts_name])[:, :2]
flag, minid = findNearestPoint(corners, start)
if flag:
select['joints'] = minid[0]
# have selected a bbox, and select a corner
elif select[bbox_name] != -1 and select['joints'] != -1:
x, y = end
# Move the corner
data = annots[select[bbox_name]]
nj = select['joints']
data[kpts_name][nj][0] = x
data[kpts_name][nj][1] = y
if kpts_name == 'keypoints': # for body
if nj in [1, 8]:
return 0
if nj in [2, 5]:
data[kpts_name][1][0] = (data[kpts_name][2][0] + data[kpts_name][5][0])/2
data[kpts_name][1][1] = (data[kpts_name][2][1] + data[kpts_name][5][1])/2
if nj in [9, 12]:
data[kpts_name][8][0] = (data[kpts_name][9][0] + data[kpts_name][12][0])/2
data[kpts_name][8][1] = (data[kpts_name][9][1] + data[kpts_name][12][1])/2
elif select[bbox_name] == -1 and select['joints'] != -1:
select['joints'] = -1

View File

@ -0,0 +1,89 @@
'''
@ Date: 2021-06-10 15:39:55
@ Author: Qing Shuai
@ LastEditors: Qing Shuai
@ LastEditTime: 2021-06-10 16:03:13
@ FilePath: /EasyMocap/easymocap/annotator/keypoints_keyboard.py
'''
import numpy as np
def set_unvisible(self, param, **kwargs):
"set the selected joints unvisible"
bbox_name, kpts_name = param['bbox_name'], param['kpts_name']
select = param['select']
if select[bbox_name] == -1:
return 0
if select['joints'] == -1:
return 0
param['annots']['annots'][select[bbox_name]][kpts_name][select['joints']][-1] = 0.
def set_unvisible_according_previous(self, param, **kwargs):
"set the selected joints unvisible if previous unvisible"
previous = self.previous()
select = param['select']
bbox_name, kpts_name = param['bbox_name'], param['kpts_name']
if select[bbox_name] == -1:
return 0
pid = param['annots']['annots'][select[bbox_name]]['personID']
kpts_now = param['annots']['annots'][select[bbox_name]][kpts_name]
for annots in previous['annots']:
if annots['personID'] == pid:
kpts_old = annots[kpts_name]
for nj in range(len(kpts_old)):
kpts_now[nj][2] = min(kpts_old[nj][2], kpts_now[nj][2])
def set_face_unvisible(self, param, **kwargs):
"set the face unvisible"
select = param['select']
bbox_name, kpts_name = param['bbox_name'], param['kpts_name']
if select[bbox_name] == -1:
return 0
for i in [15, 16, 17, 18]:
param['annots']['annots'][select[bbox_name]][kpts_name][i][-1] = 0.
def mirror_keypoints2d(self, param, **kwargs):
"mirror the keypoints2d"
select = param['select']
bbox_name, kpts_name = param['bbox_name'], param['kpts_name']
if select[bbox_name] == -1:
return 0
kpts = param['annots']['annots'][select[bbox_name]][kpts_name]
for pairs in [[(2, 5), (3, 6), (4, 7)], [(15, 16), (17, 18)], [(9, 12), (10, 13), (11, 14), (21, 24), (19, 22), (20, 23)]]:
for i, j in pairs:
kpts[i], kpts[j] = kpts[j], kpts[i]
def mirror_keypoints2d_leg(self, param, **kwargs):
"mirror the keypoints2d of legs and feet"
select = param['select']
bbox_name, kpts_name = param['bbox_name'], param['kpts_name']
if select[bbox_name] == -1:
return 0
kpts = param['annots']['annots'][select[bbox_name]][kpts_name]
for pairs in [[(9, 12), (10, 13), (11, 14), (21, 24), (19, 22), (20, 23)]]:
for i, j in pairs:
kpts[i], kpts[j] = kpts[j], kpts[i]
def check_track(self, param):
"check the tracking keypoints"
if self.frame == 0:
return 0
bbox_name, kpts_name = param['bbox_name'], param['kpts_name']
annots_pre = self.previous()['annots']
annots = param['annots']['annots']
if len(annots) == 0 or len(annots_pre) == 0 or len(annots) != len(annots_pre):
param['stop'] = True
return 0
for data in annots:
for data_pre in annots_pre:
if data_pre['personID'] != data['personID']:
continue
l, t, r, b, c = data_pre[bbox_name][:5]
bbox_size = max(r-l, b-t)
keypoints_now = np.array(data[kpts_name])
keypoints_pre = np.array(data_pre[kpts_name])
conf = np.sqrt(keypoints_now[:, -1] * keypoints_pre[:, -1])
diff = np.linalg.norm(keypoints_now[:, :2] - keypoints_pre[:, :2], axis=-1)
dist = np.sum(diff * conf, axis=-1)/np.sum(conf, axis=-1)/bbox_size
print('{}: {:.2f}'.format(data['personID'], dist))
if dist > 0.05:
param['stop'] = True

View File

@ -1,6 +1,7 @@
import numpy as np
from easymocap.dataset.mirror import flipPoint2D
CONF_VANISHING_ANNOT = 2.
def clear_vanish_points(self, param):
"remove all vanishing points"
annots = param['annots']
@ -67,12 +68,16 @@ def get_record_vanish_lines(index):
annots['vanish_point'] = [[], [], []]
start, end = param['start'], param['end']
if start is not None and end is not None:
annots['vanish_line'][index].append([[start[0], start[1], 2], [end[0], end[1], 2]])
annots['vanish_line'][index].append([[start[0], start[1], CONF_VANISHING_ANNOT], [end[0], end[1], CONF_VANISHING_ANNOT]])
# 更新vanish point
if len(annots['vanish_line'][index]) > 1:
annots['vanish_point'][index] = update_vanish_points(annots['vanish_line'][index])
param['start'] = None
param['end'] = None
if len(annots['vanish_line'][index]) > 1:
for val in annots['vanish_line'][index]:
if len(val[0]) == 2:
val[0].append(CONF_VANISHING_ANNOT)
val[1].append(CONF_VANISHING_ANNOT)
annots['vanish_point'][index] = update_vanish_points(annots['vanish_line'][index])
func = record_vanish_lines
text = ['parallel to mirror edges', 'vertical to mirror', 'vertical to ground']
func.__doc__ = 'vanish line of ' + text[index]
@ -135,8 +140,10 @@ def get_calc_intrinsic(mode='xy'):
K = np.eye(3)
K[0, 2] = W/2
K[1, 2] = H/2
print(vanish_point)
vanish_point[:, 0] -= W/2
vanish_point[:, 1] -= H/2
print(vanish_point)
focal = np.sqrt(-(vanish_point[0][0]*vanish_point[1][0] + vanish_point[0][1]*vanish_point[1][1]))
K[0, 0] = focal

View File

@ -1,3 +1,10 @@
'''
@ Date: 2021-07-13 21:12:15
@ Author: Qing Shuai
@ LastEditors: Qing Shuai
@ LastEditTime: 2021-07-13 21:12:46
@ FilePath: /EasyMocap/easymocap/annotator/vanish_visualize.py
'''
import cv2
import numpy as np
from .basic_visualize import plot_cross
@ -17,7 +24,7 @@ def vis_vanish_lines(img, annots, **kwargs):
plot_cross(img, x, y, colors[i])
points = np.array(annots['vanish_line'][i]).reshape(-1, 3)
for (xx, yy, conf) in points:
plot_cross(img, xx, yy, colors[i])
plot_cross(img, xx, yy, col=colors[i])
cv2.line(img, (int(x), int(y)), (int(xx), int(yy)), colors[i], 2)
for i in range(3):

View File

@ -78,6 +78,16 @@ class CfgNode(dict):
if type(v) is dict:
# Convert dict to CfgNode
init_dict[k] = CfgNode(v, key_list=key_list + [k])
if '_parent_' in v.keys():
init_dict[k].merge_from_file(v['_parent_'])
init_dict[k].pop('_parent_')
if '_const_' in v.keys() and v['_const_']:
init_dict[k].__dict__[CfgNode.IMMUTABLE] = True
init_dict[k].pop('_const_')
elif type(v) is str and v.startswith('_file_/'):
filename = v.replace('_file_/', '')
init_dict[k] = CfgNode()
init_dict[k].merge_from_file(filename)
else:
# Check for valid leaf type or nested CfgNode
_assert_with_logging(
@ -385,7 +395,6 @@ def _merge_a_into_b(a, b, root, key_list):
if '_no_merge_' in a.keys() and a['_no_merge_']:
b.clear()
a.pop('_no_merge_')
for k, v_ in a.items():
full_key = ".".join(key_list + [k])
# a must specify keys that are in b

View File

@ -2,8 +2,8 @@
@ Date: 2021-04-21 15:19:21
@ Author: Qing Shuai
@ LastEditors: Qing Shuai
@ LastEditTime: 2021-06-28 11:55:27
@ FilePath: /EasyMocapRelease/easymocap/mytools/reader.py
@ LastEditTime: 2021-07-29 16:12:37
@ FilePath: /EasyMocap/easymocap/mytools/reader.py
'''
# function to read data
"""
@ -27,7 +27,7 @@ def read_keypoints3d(filename):
res_ = []
for d in data:
pid = d['id'] if 'id' in d.keys() else d['personID']
pose3d = np.array(d['keypoints3d'])
pose3d = np.array(d['keypoints3d'], dtype=np.float32)
if pose3d.shape[0] > 25:
# 对于有手的情况把手的根节点赋值成body25上的点
pose3d[25, :] = pose3d[7, :]
@ -40,13 +40,27 @@ def read_keypoints3d(filename):
})
return res_
def read_keypoints3d_dict(filename):
data = read_json(filename)
res_ = {}
for d in data:
pid = d['id'] if 'id' in d.keys() else d['personID']
pose3d = np.array(d['keypoints3d'], dtype=np.float32)
if pose3d.shape[1] == 3:
pose3d = np.hstack([pose3d, np.ones((pose3d.shape[0], 1))])
res_[pid] = {
'id': pid,
'keypoints3d': pose3d
}
return res_
def read_smpl(filename):
datas = read_json(filename)
outputs = []
for data in datas:
for key in ['Rh', 'Th', 'poses', 'shapes', 'expression']:
if key in data.keys():
data[key] = np.array(data[key])
data[key] = np.array(data[key], dtype=np.float32)
# for smplx results
outputs.append(data)
return outputs
@ -68,7 +82,7 @@ def read_keypoints3d_a4d(outname):
pose3d = np.fromstring(content, dtype=float, sep=' ').reshape((nJoints, 4))
# association4d 的关节顺序和正常的定义不一样
pose3d = pose3d[[4, 1, 5, 9, 13, 6, 10, 14, 0, 2, 7, 11, 3, 8, 12], :]
res_.append({'id':trackId, 'keypoints3d':np.array(pose3d)})
res_.append({'id':trackId, 'keypoints3d':np.array(pose3d, dtype=np.float32)})
return res_
def read_keypoints3d_all(path, key='keypoints3d', pids=[]):

View File

@ -2,7 +2,7 @@
@ Date: 2020-11-28 17:23:04
@ Author: Qing Shuai
@ LastEditors: Qing Shuai
@ LastEditTime: 2021-06-03 22:31:31
@ LastEditTime: 2021-08-22 16:11:25
@ FilePath: /EasyMocap/easymocap/mytools/vis_base.py
'''
import cv2
@ -75,7 +75,7 @@ def plot_line(img, pt1, pt2, lw, col):
def plot_cross(img, x, y, col, width=-1, lw=-1):
if lw == -1:
lw = int(round(img.shape[0]/1000))
lw = max(1, int(round(img.shape[0]/1000)))
width = lw * 5
cv2.line(img, (int(x-width), int(y)), (int(x+width), int(y)), col, lw)
cv2.line(img, (int(x), int(y-width)), (int(x), int(y+width)), col, lw)
@ -170,7 +170,7 @@ def merge(images, row=-1, col=-1, resize=False, ret_range=False, **kwargs):
ret_img[height * i: height * (i+1), width * j: width * (j+1)] = img
ranges.append((width*j, height*i, width*(j+1), height*(i+1)))
if resize:
min_height = 3000
min_height = 1000
if ret_img.shape[0] > min_height:
scale = min_height/ret_img.shape[0]
ret_img = cv2.resize(ret_img, None, fx=scale, fy=scale)

View File

@ -2,12 +2,13 @@
@ Date: 2020-11-18 14:04:10
@ Author: Qing Shuai
@ LastEditors: Qing Shuai
@ LastEditTime: 2021-06-28 11:55:00
@ FilePath: /EasyMocapRelease/easymocap/smplmodel/body_model.py
@ LastEditTime: 2021-08-28 16:37:55
@ FilePath: /EasyMocap/easymocap/smplmodel/body_model.py
'''
import torch
import torch.nn as nn
from .lbs import lbs, batch_rodrigues
from .lbs import batch_rodrigues
from .lbs import lbs, dqs
import os.path as osp
import pickle
import numpy as np
@ -59,7 +60,7 @@ class SMPLlayer(nn.Module):
def __init__(self, model_path, model_type='smpl', gender='neutral', device=None,
regressor_path=None,
use_pose_blending=True, use_shape_blending=True, use_joints=True,
with_color=False,
with_color=False, use_lbs=True,
**kwargs) -> None:
super(SMPLlayer, self).__init__()
dtype = torch.float32
@ -72,7 +73,12 @@ class SMPLlayer(nn.Module):
device = torch.device(device)
self.device = device
self.model_type = model_type
self.NUM_POSES = NUM_POSES[model_type]
# create the SMPL model
if use_lbs:
self.lbs = lbs
else:
self.lbs = dqs
data = load_bodydata(model_type, model_path, gender)
if with_color:
self.color = data['vertex_colors']
@ -151,35 +157,30 @@ class SMPLlayer(nn.Module):
self.register_buffer('j_J_regressor', j_J_regressor)
if self.model_type == 'smplh':
# load smplh data
self.num_pca_comps = 6
self.num_pca_comps = kwargs['num_pca_comps']
from os.path import join
for key in ['LEFT', 'RIGHT']:
left_file = join(os.path.dirname(smpl_path), 'MANO_{}.pkl'.format(key))
left_file = join(kwargs['mano_path'], 'MANO_{}.pkl'.format(key))
with open(left_file, 'rb') as f:
data = pickle.load(f, encoding='latin1')
val = to_tensor(to_np(data['hands_mean'].reshape(1, -1)), dtype=dtype)
self.register_buffer('mHandsMean'+key[0], val)
val = to_tensor(to_np(data['hands_components'][:self.num_pca_comps, :]), dtype=dtype)
self.register_buffer('mHandsComponents'+key[0], val)
self.use_pca = True
self.use_flat_mean = True
elif self.model_type == 'mano':
# TODO:write this into config file
# self.num_pca_comps = 12
# self.use_pca = True
# if self.use_pca:
# NUM_POSES['mano'] = self.num_pca_comps + 3
# else:
# NUM_POSES['mano'] = 45 + 3
# self.use_flat_mean = True
self.num_pca_comps = 12
self.use_pca = True
self.use_flat_mean = True
self.use_pca = kwargs['use_pca']
self.use_flat_mean = kwargs['use_flat_mean']
if self.use_pca:
NUM_POSES['mano'] = self.num_pca_comps + 3
self.NUM_POSES = 66 + self.num_pca_comps * 2
else:
NUM_POSES['mano'] = 45 + 3
self.NUM_POSES = 66 + 15 * 3 * 2
elif self.model_type == 'mano':
self.num_pca_comps = kwargs['num_pca_comps']
self.use_pca = kwargs['use_pca']
self.use_flat_mean = kwargs['use_flat_mean']
if self.use_pca:
self.NUM_POSES = self.num_pca_comps + 3
else:
self.NUM_POSES = 45 + 3
val = to_tensor(to_np(data['hands_mean'].reshape(1, -1)), dtype=dtype)
self.register_buffer('mHandsMean', val)
@ -202,19 +203,21 @@ class SMPLlayer(nn.Module):
def extend_hand(poses, use_pca, use_flat_mean, coeffs, mean):
if use_pca:
poses = poses @ coeffs
if use_flat_mean:
if not use_flat_mean:
poses = poses + mean
return poses
def extend_pose(self, poses):
# skip SMPL or already extend
if self.model_type not in ['smplh', 'smplx', 'mano']:
return poses
elif self.model_type == 'smplh' and poses.shape[-1] == 156:
elif self.model_type == 'smplh' and poses.shape[-1] == 156 and self.use_flat_mean:
return poses
elif self.model_type == 'smplx' and poses.shape[-1] == 165:
elif self.model_type == 'smplx' and poses.shape[-1] == 165 and self.use_flat_mean:
return poses
elif self.model_type == 'mano' and poses.shape[-1] == 48:
elif self.model_type == 'mano' and poses.shape[-1] == 48 and self.use_flat_mean:
return poses
# skip mano
if self.model_type == 'mano':
poses_hand = self.extend_hand(poses[..., 3:], self.use_pca, self.use_flat_mean,
self.mHandsComponents, self.mHandsMean)
@ -231,7 +234,7 @@ class SMPLlayer(nn.Module):
if self.use_pca:
poses_lh = poses_lh @ self.mHandsComponentsL
poses_rh = poses_rh @ self.mHandsComponentsR
if self.use_flat_mean:
if not self.use_flat_mean:
poses_lh = poses_lh + self.mHandsMeanL
poses_rh = poses_rh + self.mHandsMeanR
if self.model_type == 'smplh':
@ -299,7 +302,9 @@ class SMPLlayer(nn.Module):
return poses.detach().cpu().numpy()
def forward(self, poses, shapes, Rh=None, Th=None, expression=None,
return_verts=True, return_tensor=True, return_smpl_joints=False, only_shape=False, **kwargs):
v_template=None,
return_verts=True, return_tensor=True, return_smpl_joints=False,
only_shape=False, pose2rot=True, **kwargs):
""" Forward pass for SMPL model
Args:
@ -338,20 +343,23 @@ class SMPLlayer(nn.Module):
if expression is not None and self.model_type == 'smplx':
shapes = torch.cat([shapes, expression], dim=1)
# process poses
if pose2rot: # if given rotation matrix, no need for this
poses = self.extend_pose(poses)
if return_verts or not self.use_joints:
vertices, joints = lbs(shapes, poses, self.v_template,
if v_template is None:
v_template = self.v_template
vertices, joints = self.lbs(shapes, poses, v_template,
self.shapedirs, self.posedirs,
self.J_regressor, self.parents,
self.weights, pose2rot=True, dtype=self.dtype,
self.weights, pose2rot=pose2rot, dtype=self.dtype,
use_pose_blending=self.use_pose_blending, use_shape_blending=self.use_shape_blending, J_shaped=self.J_shaped)
if not self.use_joints and not return_verts:
vertices = joints
else:
vertices, joints = lbs(shapes, poses, self.j_v_template,
vertices, joints = self.lbs(shapes, poses, self.j_v_template,
self.j_shapedirs, self.j_posedirs,
self.j_J_regressor, self.parents,
self.j_weights, pose2rot=True, dtype=self.dtype, only_shape=only_shape,
self.j_weights, pose2rot=pose2rot, dtype=self.dtype, only_shape=only_shape,
use_pose_blending=self.use_pose_blending, use_shape_blending=self.use_shape_blending, J_shaped=self.J_shaped)
if return_smpl_joints:
vertices = vertices[:, :self.J_regressor.shape[0], :]
@ -364,13 +372,13 @@ class SMPLlayer(nn.Module):
def init_params(self, nFrames=1, nShapes=1, ret_tensor=False):
params = {
'poses': np.zeros((nFrames, NUM_POSES[self.model_type])),
'poses': np.zeros((nFrames, self.NUM_POSES)),
'shapes': np.zeros((nShapes, NUM_SHAPES)),
'Rh': np.zeros((nFrames, 3)),
'Th': np.zeros((nFrames, 3)),
}
if self.model_type == 'smplx':
params['expression'] = np.zeros((nFrames, NUM_EXPR))
params['expression'] = np.zeros((nFrames, self.NUM_EXPR))
if ret_tensor:
for key in params.keys():
params[key] = to_tensor(params[key], self.dtype, self.device)
@ -379,10 +387,10 @@ class SMPLlayer(nn.Module):
def check_params(self, body_params):
model_type = self.model_type
nFrames = body_params['poses'].shape[0]
if body_params['poses'].shape[1] != NUM_POSES[model_type]:
body_params['poses'] = np.hstack((body_params['poses'], np.zeros((nFrames, NUM_POSES[model_type] - body_params['poses'].shape[1]))))
if body_params['poses'].shape[1] != self.NUM_POSES:
body_params['poses'] = np.hstack((body_params['poses'], np.zeros((nFrames, self.NUM_POSES - body_params['poses'].shape[1]))))
if model_type == 'smplx' and 'expression' not in body_params.keys():
body_params['expression'] = np.zeros((nFrames, NUM_EXPR))
body_params['expression'] = np.zeros((nFrames, self.NUM_EXPR))
return body_params
@staticmethod
@ -393,4 +401,22 @@ class SMPLlayer(nn.Module):
output[key] = np.vstack([v[key] for v in param_list])
if share_shape:
output['shapes'] = output['shapes'].mean(axis=0, keepdims=True)
# add other keys
for key in param_list[0].keys():
if key in output.keys():
continue
output[key] = np.stack([v[key] for v in param_list])
return output
@staticmethod
def select_nf(params_all, nf):
output = {}
for key in ['poses', 'Rh', 'Th']:
output[key] = params_all[key][nf:nf+1, :]
if 'expression' in params_all.keys():
output['expression'] = params_all['expression'][nf:nf+1, :]
if params_all['shapes'].shape[0] == 1:
output['shapes'] = params_all['shapes']
else:
output['shapes'] = params_all['shapes'][nf:nf+1, :]
return output

View File

@ -379,3 +379,115 @@ def batch_rigid_transform(rot_mats, joints, parents, dtype=torch.float32):
torch.matmul(transforms, joints_homogen), [3, 0, 0, 0, 0, 0, 0, 0])
return posed_joints, rel_transforms
def dqs(betas, pose, v_template, shapedirs, posedirs, J_regressor, parents,
lbs_weights, pose2rot=True, dtype=torch.float32, only_shape=False,
use_shape_blending=True, use_pose_blending=True, J_shaped=None):
''' Performs Linear Blend Skinning with the given shape and pose parameters
Parameters
----------
betas : torch.tensor BxNB
The tensor of shape parameters
pose : torch.tensor Bx(J + 1) * 3
The pose parameters in axis-angle format
v_template torch.tensor BxVx3
The template mesh that will be deformed
shapedirs : torch.tensor 1xNB
The tensor of PCA shape displacements
posedirs : torch.tensor Px(V * 3)
The pose PCA coefficients
J_regressor : torch.tensor JxV
The regressor array that is used to calculate the joints from
the position of the vertices
parents: torch.tensor J
The array that describes the kinematic tree for the model
lbs_weights: torch.tensor N x V x (J + 1)
The linear blend skinning weights that represent how much the
rotation matrix of each part affects each vertex
pose2rot: bool, optional
Flag on whether to convert the input pose tensor to rotation
matrices. The default value is True. If False, then the pose tensor
should already contain rotation matrices and have a size of
Bx(J + 1)x9
dtype: torch.dtype, optional
Returns
-------
verts: torch.tensor BxVx3
The vertices of the mesh after applying the shape and pose
displacements.
joints: torch.tensor BxJx3
The joints of the model
'''
batch_size = max(betas.shape[0], pose.shape[0])
device = betas.device
# Add shape contribution
if use_shape_blending:
v_shaped = v_template + blend_shapes(betas, shapedirs)
# Get the joints
# NxJx3 array
J = vertices2joints(J_regressor, v_shaped)
else:
v_shaped = v_template.unsqueeze(0).expand(batch_size, -1, -1)
assert J_shaped is not None
J = J_shaped[None].expand(batch_size, -1, -1)
if only_shape:
return v_shaped, J
# 3. Add pose blend shapes
# N x J x 3 x 3
if pose2rot:
rot_mats = batch_rodrigues(
pose.view(-1, 3), dtype=dtype).view([batch_size, -1, 3, 3])
else:
rot_mats = pose.view(batch_size, -1, 3, 3)
if use_pose_blending:
ident = torch.eye(3, dtype=dtype, device=device)
pose_feature = (rot_mats[:, 1:, :, :] - ident).view([batch_size, -1])
pose_offsets = torch.matmul(pose_feature, posedirs) \
.view(batch_size, -1, 3)
v_posed = pose_offsets + v_shaped
else:
v_posed = v_shaped
# 4. Get the global joint location
J_transformed, A = batch_rigid_transform(rot_mats, J, parents, dtype=dtype)
# 5. Do skinning:
# W is N x V x (J + 1)
W = lbs_weights.unsqueeze(dim=0).expand([batch_size, -1, -1])
verts=batch_dqs_blending(A,W,v_posed)
return verts, J_transformed
#A: B,J,4,4 W: B,V,J
def batch_dqs_blending(A,W,Vs):
Bnum,Jnum,_,_=A.shape
_,Vnum,_=W.shape
A = A.view(Bnum*Jnum,4,4)
Rs=A[:,:3,:3]
ws=torch.sqrt(torch.clamp(Rs[:,0,0]+Rs[:,1,1]+Rs[:,2,2]+1.,min=1.e-6))/2.
xs=(Rs[:,2,1]-Rs[:,1,2])/(4.*ws)
ys=(Rs[:,0,2]-Rs[:,2,0])/(4.*ws)
zs=(Rs[:,1,0]-Rs[:,0,1])/(4.*ws)
Ts=A[:,:3,3]
vDw=-0.5*( Ts[:,0]*xs + Ts[:,1]*ys + Ts[:,2]*zs)
vDx=0.5*( Ts[:,0]*ws + Ts[:,1]*zs - Ts[:,2]*ys)
vDy=0.5*(-Ts[:,0]*zs + Ts[:,1]*ws + Ts[:,2]*xs)
vDz=0.5*( Ts[:,0]*ys - Ts[:,1]*xs + Ts[:,2]*ws)
b0=W.unsqueeze(-2)@torch.cat([ws[:,None],xs[:,None],ys[:,None],zs[:,None]],dim=-1).reshape(Bnum, 1, Jnum, 4) #B,V,J,4
be=W.unsqueeze(-2)@torch.cat([vDw[:,None],vDx[:,None],vDy[:,None],vDz[:,None]],dim=-1).reshape(Bnum, 1, Jnum, 4) #B,V,J,4
b0 = b0.reshape(-1, 4)
be = be.reshape(-1, 4)
ns=torch.norm(b0,dim=-1,keepdim=True)
be=be/ns
b0=b0/ns
Vs=Vs.view(Bnum*Vnum,3)
Vs=Vs+2.*b0[:,1:].cross(b0[:,1:].cross(Vs)+b0[:,:1]*Vs)+2.*(b0[:,:1]*be[:,1:]-be[:,:1]*b0[:,1:]+b0[:,1:].cross(be[:,1:]))
return Vs.reshape(Bnum,Vnum,3)

View File

@ -0,0 +1,8 @@
0 2 3
1 3 2
0 3 4
1 4 3
0 4 5
1 5 4
0 5 2
1 2 5

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,48 @@
0 2 3
1 19 18
0 3 4
1 20 19
0 4 5
1 21 20
0 5 6
1 22 21
0 6 7
1 23 22
0 7 8
1 24 23
0 8 9
1 25 24
0 9 2
1 18 25
10 3 2
10 11 3
11 4 3
11 12 4
12 5 4
12 13 5
13 6 5
13 14 6
14 7 6
14 15 7
15 8 7
15 16 8
16 9 8
16 17 9
17 2 9
17 10 2
18 11 10
18 19 11
19 12 11
19 20 12
20 13 12
20 21 13
21 14 13
21 22 14
22 15 14
22 23 15
23 16 15
23 24 16
24 17 16
24 25 17
25 10 17
25 18 10

View File

@ -0,0 +1,224 @@
0 2 3
1 99 98
0 3 4
1 100 99
0 4 5
1 101 100
0 5 6
1 102 101
0 6 7
1 103 102
0 7 8
1 104 103
0 8 9
1 105 104
0 9 10
1 106 105
0 10 11
1 107 106
0 11 12
1 108 107
0 12 13
1 109 108
0 13 14
1 110 109
0 14 15
1 111 110
0 15 16
1 112 111
0 16 17
1 113 112
0 17 2
1 98 113
18 3 2
18 19 3
19 4 3
19 20 4
20 5 4
20 21 5
21 6 5
21 22 6
22 7 6
22 23 7
23 8 7
23 24 8
24 9 8
24 25 9
25 10 9
25 26 10
26 11 10
26 27 11
27 12 11
27 28 12
28 13 12
28 29 13
29 14 13
29 30 14
30 15 14
30 31 15
31 16 15
31 32 16
32 17 16
32 33 17
33 2 17
33 18 2
34 19 18
34 35 19
35 20 19
35 36 20
36 21 20
36 37 21
37 22 21
37 38 22
38 23 22
38 39 23
39 24 23
39 40 24
40 25 24
40 41 25
41 26 25
41 42 26
42 27 26
42 43 27
43 28 27
43 44 28
44 29 28
44 45 29
45 30 29
45 46 30
46 31 30
46 47 31
47 32 31
47 48 32
48 33 32
48 49 33
49 18 33
49 34 18
50 35 34
50 51 35
51 36 35
51 52 36
52 37 36
52 53 37
53 38 37
53 54 38
54 39 38
54 55 39
55 40 39
55 56 40
56 41 40
56 57 41
57 42 41
57 58 42
58 43 42
58 59 43
59 44 43
59 60 44
60 45 44
60 61 45
61 46 45
61 62 46
62 47 46
62 63 47
63 48 47
63 64 48
64 49 48
64 65 49
65 34 49
65 50 34
66 51 50
66 67 51
67 52 51
67 68 52
68 53 52
68 69 53
69 54 53
69 70 54
70 55 54
70 71 55
71 56 55
71 72 56
72 57 56
72 73 57
73 58 57
73 74 58
74 59 58
74 75 59
75 60 59
75 76 60
76 61 60
76 77 61
77 62 61
77 78 62
78 63 62
78 79 63
79 64 63
79 80 64
80 65 64
80 81 65
81 50 65
81 66 50
82 67 66
82 83 67
83 68 67
83 84 68
84 69 68
84 85 69
85 70 69
85 86 70
86 71 70
86 87 71
87 72 71
87 88 72
88 73 72
88 89 73
89 74 73
89 90 74
90 75 74
90 91 75
91 76 75
91 92 76
92 77 76
92 93 77
93 78 77
93 94 78
94 79 78
94 95 79
95 80 79
95 96 80
96 81 80
96 97 81
97 66 81
97 82 66
98 83 82
98 99 83
99 84 83
99 100 84
100 85 84
100 101 85
101 86 85
101 102 86
102 87 86
102 103 87
103 88 87
103 104 88
104 89 88
104 105 89
105 90 89
105 106 90
106 91 90
106 107 91
107 92 91
107 108 92
108 93 92
108 109 93
109 94 93
109 110 94
110 95 94
110 111 95
111 96 95
111 112 96
112 97 96
112 113 97
113 82 97
113 98 82

View File

@ -0,0 +1,6 @@
0.000 0.000 1.000
0.000 0.000 -1.000
1.000 0.000 0.000
0.000 1.000 0.000
-1.000 0.000 0.000
-0.000 -1.000 0.000

View File

@ -0,0 +1,762 @@
0.000 0.000 1.000
0.000 0.000 -1.000
0.156 0.000 0.988
0.155 0.024 0.988
0.149 0.048 0.988
0.139 0.071 0.988
0.127 0.092 0.988
0.111 0.111 0.988
0.092 0.127 0.988
0.071 0.139 0.988
0.048 0.149 0.988
0.024 0.155 0.988
0.000 0.156 0.988
-0.024 0.155 0.988
-0.048 0.149 0.988
-0.071 0.139 0.988
-0.092 0.127 0.988
-0.111 0.111 0.988
-0.127 0.092 0.988
-0.139 0.071 0.988
-0.149 0.048 0.988
-0.155 0.024 0.988
-0.156 0.000 0.988
-0.155 -0.024 0.988
-0.149 -0.048 0.988
-0.139 -0.071 0.988
-0.127 -0.092 0.988
-0.111 -0.111 0.988
-0.092 -0.127 0.988
-0.071 -0.139 0.988
-0.048 -0.149 0.988
-0.024 -0.155 0.988
-0.000 -0.156 0.988
0.024 -0.155 0.988
0.048 -0.149 0.988
0.071 -0.139 0.988
0.092 -0.127 0.988
0.111 -0.111 0.988
0.127 -0.092 0.988
0.139 -0.071 0.988
0.149 -0.048 0.988
0.155 -0.024 0.988
0.309 0.000 0.951
0.305 0.048 0.951
0.294 0.095 0.951
0.275 0.140 0.951
0.250 0.182 0.951
0.219 0.219 0.951
0.182 0.250 0.951
0.140 0.275 0.951
0.095 0.294 0.951
0.048 0.305 0.951
0.000 0.309 0.951
-0.048 0.305 0.951
-0.095 0.294 0.951
-0.140 0.275 0.951
-0.182 0.250 0.951
-0.219 0.219 0.951
-0.250 0.182 0.951
-0.275 0.140 0.951
-0.294 0.095 0.951
-0.305 0.048 0.951
-0.309 0.000 0.951
-0.305 -0.048 0.951
-0.294 -0.095 0.951
-0.275 -0.140 0.951
-0.250 -0.182 0.951
-0.219 -0.219 0.951
-0.182 -0.250 0.951
-0.140 -0.275 0.951
-0.095 -0.294 0.951
-0.048 -0.305 0.951
-0.000 -0.309 0.951
0.048 -0.305 0.951
0.095 -0.294 0.951
0.140 -0.275 0.951
0.182 -0.250 0.951
0.219 -0.219 0.951
0.250 -0.182 0.951
0.275 -0.140 0.951
0.294 -0.095 0.951
0.305 -0.048 0.951
0.454 0.000 0.891
0.448 0.071 0.891
0.432 0.140 0.891
0.405 0.206 0.891
0.367 0.267 0.891
0.321 0.321 0.891
0.267 0.367 0.891
0.206 0.405 0.891
0.140 0.432 0.891
0.071 0.448 0.891
0.000 0.454 0.891
-0.071 0.448 0.891
-0.140 0.432 0.891
-0.206 0.405 0.891
-0.267 0.367 0.891
-0.321 0.321 0.891
-0.367 0.267 0.891
-0.405 0.206 0.891
-0.432 0.140 0.891
-0.448 0.071 0.891
-0.454 0.000 0.891
-0.448 -0.071 0.891
-0.432 -0.140 0.891
-0.405 -0.206 0.891
-0.367 -0.267 0.891
-0.321 -0.321 0.891
-0.267 -0.367 0.891
-0.206 -0.405 0.891
-0.140 -0.432 0.891
-0.071 -0.448 0.891
-0.000 -0.454 0.891
0.071 -0.448 0.891
0.140 -0.432 0.891
0.206 -0.405 0.891
0.267 -0.367 0.891
0.321 -0.321 0.891
0.367 -0.267 0.891
0.405 -0.206 0.891
0.432 -0.140 0.891
0.448 -0.071 0.891
0.588 0.000 0.809
0.581 0.092 0.809
0.559 0.182 0.809
0.524 0.267 0.809
0.476 0.345 0.809
0.416 0.416 0.809
0.345 0.476 0.809
0.267 0.524 0.809
0.182 0.559 0.809
0.092 0.581 0.809
0.000 0.588 0.809
-0.092 0.581 0.809
-0.182 0.559 0.809
-0.267 0.524 0.809
-0.345 0.476 0.809
-0.416 0.416 0.809
-0.476 0.345 0.809
-0.524 0.267 0.809
-0.559 0.182 0.809
-0.581 0.092 0.809
-0.588 0.000 0.809
-0.581 -0.092 0.809
-0.559 -0.182 0.809
-0.524 -0.267 0.809
-0.476 -0.345 0.809
-0.416 -0.416 0.809
-0.345 -0.476 0.809
-0.267 -0.524 0.809
-0.182 -0.559 0.809
-0.092 -0.581 0.809
-0.000 -0.588 0.809
0.092 -0.581 0.809
0.182 -0.559 0.809
0.267 -0.524 0.809
0.345 -0.476 0.809
0.416 -0.416 0.809
0.476 -0.345 0.809
0.524 -0.267 0.809
0.559 -0.182 0.809
0.581 -0.092 0.809
0.707 0.000 0.707
0.698 0.111 0.707
0.672 0.219 0.707
0.630 0.321 0.707
0.572 0.416 0.707
0.500 0.500 0.707
0.416 0.572 0.707
0.321 0.630 0.707
0.219 0.672 0.707
0.111 0.698 0.707
0.000 0.707 0.707
-0.111 0.698 0.707
-0.219 0.672 0.707
-0.321 0.630 0.707
-0.416 0.572 0.707
-0.500 0.500 0.707
-0.572 0.416 0.707
-0.630 0.321 0.707
-0.672 0.219 0.707
-0.698 0.111 0.707
-0.707 0.000 0.707
-0.698 -0.111 0.707
-0.672 -0.219 0.707
-0.630 -0.321 0.707
-0.572 -0.416 0.707
-0.500 -0.500 0.707
-0.416 -0.572 0.707
-0.321 -0.630 0.707
-0.219 -0.672 0.707
-0.111 -0.698 0.707
-0.000 -0.707 0.707
0.111 -0.698 0.707
0.219 -0.672 0.707
0.321 -0.630 0.707
0.416 -0.572 0.707
0.500 -0.500 0.707
0.572 -0.416 0.707
0.630 -0.321 0.707
0.672 -0.219 0.707
0.698 -0.111 0.707
0.809 0.000 0.588
0.799 0.127 0.588
0.769 0.250 0.588
0.721 0.367 0.588
0.655 0.476 0.588
0.572 0.572 0.588
0.476 0.655 0.588
0.367 0.721 0.588
0.250 0.769 0.588
0.127 0.799 0.588
0.000 0.809 0.588
-0.127 0.799 0.588
-0.250 0.769 0.588
-0.367 0.721 0.588
-0.476 0.655 0.588
-0.572 0.572 0.588
-0.655 0.476 0.588
-0.721 0.367 0.588
-0.769 0.250 0.588
-0.799 0.127 0.588
-0.809 0.000 0.588
-0.799 -0.127 0.588
-0.769 -0.250 0.588
-0.721 -0.367 0.588
-0.655 -0.476 0.588
-0.572 -0.572 0.588
-0.476 -0.655 0.588
-0.367 -0.721 0.588
-0.250 -0.769 0.588
-0.127 -0.799 0.588
-0.000 -0.809 0.588
0.127 -0.799 0.588
0.250 -0.769 0.588
0.367 -0.721 0.588
0.476 -0.655 0.588
0.572 -0.572 0.588
0.655 -0.476 0.588
0.721 -0.367 0.588
0.769 -0.250 0.588
0.799 -0.127 0.588
0.891 0.000 0.454
0.880 0.139 0.454
0.847 0.275 0.454
0.794 0.405 0.454
0.721 0.524 0.454
0.630 0.630 0.454
0.524 0.721 0.454
0.405 0.794 0.454
0.275 0.847 0.454
0.139 0.880 0.454
0.000 0.891 0.454
-0.139 0.880 0.454
-0.275 0.847 0.454
-0.405 0.794 0.454
-0.524 0.721 0.454
-0.630 0.630 0.454
-0.721 0.524 0.454
-0.794 0.405 0.454
-0.847 0.275 0.454
-0.880 0.139 0.454
-0.891 0.000 0.454
-0.880 -0.139 0.454
-0.847 -0.275 0.454
-0.794 -0.405 0.454
-0.721 -0.524 0.454
-0.630 -0.630 0.454
-0.524 -0.721 0.454
-0.405 -0.794 0.454
-0.275 -0.847 0.454
-0.139 -0.880 0.454
-0.000 -0.891 0.454
0.139 -0.880 0.454
0.275 -0.847 0.454
0.405 -0.794 0.454
0.524 -0.721 0.454
0.630 -0.630 0.454
0.721 -0.524 0.454
0.794 -0.405 0.454
0.847 -0.275 0.454
0.880 -0.139 0.454
0.951 0.000 0.309
0.939 0.149 0.309
0.905 0.294 0.309
0.847 0.432 0.309
0.769 0.559 0.309
0.672 0.672 0.309
0.559 0.769 0.309
0.432 0.847 0.309
0.294 0.905 0.309
0.149 0.939 0.309
0.000 0.951 0.309
-0.149 0.939 0.309
-0.294 0.905 0.309
-0.432 0.847 0.309
-0.559 0.769 0.309
-0.672 0.672 0.309
-0.769 0.559 0.309
-0.847 0.432 0.309
-0.905 0.294 0.309
-0.939 0.149 0.309
-0.951 0.000 0.309
-0.939 -0.149 0.309
-0.905 -0.294 0.309
-0.847 -0.432 0.309
-0.769 -0.559 0.309
-0.672 -0.672 0.309
-0.559 -0.769 0.309
-0.432 -0.847 0.309
-0.294 -0.905 0.309
-0.149 -0.939 0.309
-0.000 -0.951 0.309
0.149 -0.939 0.309
0.294 -0.905 0.309
0.432 -0.847 0.309
0.559 -0.769 0.309
0.672 -0.672 0.309
0.769 -0.559 0.309
0.847 -0.432 0.309
0.905 -0.294 0.309
0.939 -0.149 0.309
0.988 0.000 0.156
0.976 0.155 0.156
0.939 0.305 0.156
0.880 0.448 0.156
0.799 0.581 0.156
0.698 0.698 0.156
0.581 0.799 0.156
0.448 0.880 0.156
0.305 0.939 0.156
0.155 0.976 0.156
0.000 0.988 0.156
-0.155 0.976 0.156
-0.305 0.939 0.156
-0.448 0.880 0.156
-0.581 0.799 0.156
-0.698 0.698 0.156
-0.799 0.581 0.156
-0.880 0.448 0.156
-0.939 0.305 0.156
-0.976 0.155 0.156
-0.988 0.000 0.156
-0.976 -0.155 0.156
-0.939 -0.305 0.156
-0.880 -0.448 0.156
-0.799 -0.581 0.156
-0.698 -0.698 0.156
-0.581 -0.799 0.156
-0.448 -0.880 0.156
-0.305 -0.939 0.156
-0.155 -0.976 0.156
-0.000 -0.988 0.156
0.155 -0.976 0.156
0.305 -0.939 0.156
0.448 -0.880 0.156
0.581 -0.799 0.156
0.698 -0.698 0.156
0.799 -0.581 0.156
0.880 -0.448 0.156
0.939 -0.305 0.156
0.976 -0.155 0.156
1.000 0.000 0.000
0.988 0.156 0.000
0.951 0.309 0.000
0.891 0.454 0.000
0.809 0.588 0.000
0.707 0.707 0.000
0.588 0.809 0.000
0.454 0.891 0.000
0.309 0.951 0.000
0.156 0.988 0.000
0.000 1.000 0.000
-0.156 0.988 0.000
-0.309 0.951 0.000
-0.454 0.891 0.000
-0.588 0.809 0.000
-0.707 0.707 0.000
-0.809 0.588 0.000
-0.891 0.454 0.000
-0.951 0.309 0.000
-0.988 0.156 0.000
-1.000 0.000 0.000
-0.988 -0.156 0.000
-0.951 -0.309 0.000
-0.891 -0.454 0.000
-0.809 -0.588 0.000
-0.707 -0.707 0.000
-0.588 -0.809 0.000
-0.454 -0.891 0.000
-0.309 -0.951 0.000
-0.156 -0.988 0.000
-0.000 -1.000 0.000
0.156 -0.988 0.000
0.309 -0.951 0.000
0.454 -0.891 0.000
0.588 -0.809 0.000
0.707 -0.707 0.000
0.809 -0.588 0.000
0.891 -0.454 0.000
0.951 -0.309 0.000
0.988 -0.156 0.000
0.988 0.000 -0.156
0.976 0.155 -0.156
0.939 0.305 -0.156
0.880 0.448 -0.156
0.799 0.581 -0.156
0.698 0.698 -0.156
0.581 0.799 -0.156
0.448 0.880 -0.156
0.305 0.939 -0.156
0.155 0.976 -0.156
0.000 0.988 -0.156
-0.155 0.976 -0.156
-0.305 0.939 -0.156
-0.448 0.880 -0.156
-0.581 0.799 -0.156
-0.698 0.698 -0.156
-0.799 0.581 -0.156
-0.880 0.448 -0.156
-0.939 0.305 -0.156
-0.976 0.155 -0.156
-0.988 0.000 -0.156
-0.976 -0.155 -0.156
-0.939 -0.305 -0.156
-0.880 -0.448 -0.156
-0.799 -0.581 -0.156
-0.698 -0.698 -0.156
-0.581 -0.799 -0.156
-0.448 -0.880 -0.156
-0.305 -0.939 -0.156
-0.155 -0.976 -0.156
-0.000 -0.988 -0.156
0.155 -0.976 -0.156
0.305 -0.939 -0.156
0.448 -0.880 -0.156
0.581 -0.799 -0.156
0.698 -0.698 -0.156
0.799 -0.581 -0.156
0.880 -0.448 -0.156
0.939 -0.305 -0.156
0.976 -0.155 -0.156
0.951 0.000 -0.309
0.939 0.149 -0.309
0.905 0.294 -0.309
0.847 0.432 -0.309
0.769 0.559 -0.309
0.672 0.672 -0.309
0.559 0.769 -0.309
0.432 0.847 -0.309
0.294 0.905 -0.309
0.149 0.939 -0.309
0.000 0.951 -0.309
-0.149 0.939 -0.309
-0.294 0.905 -0.309
-0.432 0.847 -0.309
-0.559 0.769 -0.309
-0.672 0.672 -0.309
-0.769 0.559 -0.309
-0.847 0.432 -0.309
-0.905 0.294 -0.309
-0.939 0.149 -0.309
-0.951 0.000 -0.309
-0.939 -0.149 -0.309
-0.905 -0.294 -0.309
-0.847 -0.432 -0.309
-0.769 -0.559 -0.309
-0.672 -0.672 -0.309
-0.559 -0.769 -0.309
-0.432 -0.847 -0.309
-0.294 -0.905 -0.309
-0.149 -0.939 -0.309
-0.000 -0.951 -0.309
0.149 -0.939 -0.309
0.294 -0.905 -0.309
0.432 -0.847 -0.309
0.559 -0.769 -0.309
0.672 -0.672 -0.309
0.769 -0.559 -0.309
0.847 -0.432 -0.309
0.905 -0.294 -0.309
0.939 -0.149 -0.309
0.891 0.000 -0.454
0.880 0.139 -0.454
0.847 0.275 -0.454
0.794 0.405 -0.454
0.721 0.524 -0.454
0.630 0.630 -0.454
0.524 0.721 -0.454
0.405 0.794 -0.454
0.275 0.847 -0.454
0.139 0.880 -0.454
0.000 0.891 -0.454
-0.139 0.880 -0.454
-0.275 0.847 -0.454
-0.405 0.794 -0.454
-0.524 0.721 -0.454
-0.630 0.630 -0.454
-0.721 0.524 -0.454
-0.794 0.405 -0.454
-0.847 0.275 -0.454
-0.880 0.139 -0.454
-0.891 0.000 -0.454
-0.880 -0.139 -0.454
-0.847 -0.275 -0.454
-0.794 -0.405 -0.454
-0.721 -0.524 -0.454
-0.630 -0.630 -0.454
-0.524 -0.721 -0.454
-0.405 -0.794 -0.454
-0.275 -0.847 -0.454
-0.139 -0.880 -0.454
-0.000 -0.891 -0.454
0.139 -0.880 -0.454
0.275 -0.847 -0.454
0.405 -0.794 -0.454
0.524 -0.721 -0.454
0.630 -0.630 -0.454
0.721 -0.524 -0.454
0.794 -0.405 -0.454
0.847 -0.275 -0.454
0.880 -0.139 -0.454
0.809 0.000 -0.588
0.799 0.127 -0.588
0.769 0.250 -0.588
0.721 0.367 -0.588
0.655 0.476 -0.588
0.572 0.572 -0.588
0.476 0.655 -0.588
0.367 0.721 -0.588
0.250 0.769 -0.588
0.127 0.799 -0.588
0.000 0.809 -0.588
-0.127 0.799 -0.588
-0.250 0.769 -0.588
-0.367 0.721 -0.588
-0.476 0.655 -0.588
-0.572 0.572 -0.588
-0.655 0.476 -0.588
-0.721 0.367 -0.588
-0.769 0.250 -0.588
-0.799 0.127 -0.588
-0.809 0.000 -0.588
-0.799 -0.127 -0.588
-0.769 -0.250 -0.588
-0.721 -0.367 -0.588
-0.655 -0.476 -0.588
-0.572 -0.572 -0.588
-0.476 -0.655 -0.588
-0.367 -0.721 -0.588
-0.250 -0.769 -0.588
-0.127 -0.799 -0.588
-0.000 -0.809 -0.588
0.127 -0.799 -0.588
0.250 -0.769 -0.588
0.367 -0.721 -0.588
0.476 -0.655 -0.588
0.572 -0.572 -0.588
0.655 -0.476 -0.588
0.721 -0.367 -0.588
0.769 -0.250 -0.588
0.799 -0.127 -0.588
0.707 0.000 -0.707
0.698 0.111 -0.707
0.672 0.219 -0.707
0.630 0.321 -0.707
0.572 0.416 -0.707
0.500 0.500 -0.707
0.416 0.572 -0.707
0.321 0.630 -0.707
0.219 0.672 -0.707
0.111 0.698 -0.707
0.000 0.707 -0.707
-0.111 0.698 -0.707
-0.219 0.672 -0.707
-0.321 0.630 -0.707
-0.416 0.572 -0.707
-0.500 0.500 -0.707
-0.572 0.416 -0.707
-0.630 0.321 -0.707
-0.672 0.219 -0.707
-0.698 0.111 -0.707
-0.707 0.000 -0.707
-0.698 -0.111 -0.707
-0.672 -0.219 -0.707
-0.630 -0.321 -0.707
-0.572 -0.416 -0.707
-0.500 -0.500 -0.707
-0.416 -0.572 -0.707
-0.321 -0.630 -0.707
-0.219 -0.672 -0.707
-0.111 -0.698 -0.707
-0.000 -0.707 -0.707
0.111 -0.698 -0.707
0.219 -0.672 -0.707
0.321 -0.630 -0.707
0.416 -0.572 -0.707
0.500 -0.500 -0.707
0.572 -0.416 -0.707
0.630 -0.321 -0.707
0.672 -0.219 -0.707
0.698 -0.111 -0.707
0.588 0.000 -0.809
0.581 0.092 -0.809
0.559 0.182 -0.809
0.524 0.267 -0.809
0.476 0.345 -0.809
0.416 0.416 -0.809
0.345 0.476 -0.809
0.267 0.524 -0.809
0.182 0.559 -0.809
0.092 0.581 -0.809
0.000 0.588 -0.809
-0.092 0.581 -0.809
-0.182 0.559 -0.809
-0.267 0.524 -0.809
-0.345 0.476 -0.809
-0.416 0.416 -0.809
-0.476 0.345 -0.809
-0.524 0.267 -0.809
-0.559 0.182 -0.809
-0.581 0.092 -0.809
-0.588 0.000 -0.809
-0.581 -0.092 -0.809
-0.559 -0.182 -0.809
-0.524 -0.267 -0.809
-0.476 -0.345 -0.809
-0.416 -0.416 -0.809
-0.345 -0.476 -0.809
-0.267 -0.524 -0.809
-0.182 -0.559 -0.809
-0.092 -0.581 -0.809
-0.000 -0.588 -0.809
0.092 -0.581 -0.809
0.182 -0.559 -0.809
0.267 -0.524 -0.809
0.345 -0.476 -0.809
0.416 -0.416 -0.809
0.476 -0.345 -0.809
0.524 -0.267 -0.809
0.559 -0.182 -0.809
0.581 -0.092 -0.809
0.454 0.000 -0.891
0.448 0.071 -0.891
0.432 0.140 -0.891
0.405 0.206 -0.891
0.367 0.267 -0.891
0.321 0.321 -0.891
0.267 0.367 -0.891
0.206 0.405 -0.891
0.140 0.432 -0.891
0.071 0.448 -0.891
0.000 0.454 -0.891
-0.071 0.448 -0.891
-0.140 0.432 -0.891
-0.206 0.405 -0.891
-0.267 0.367 -0.891
-0.321 0.321 -0.891
-0.367 0.267 -0.891
-0.405 0.206 -0.891
-0.432 0.140 -0.891
-0.448 0.071 -0.891
-0.454 0.000 -0.891
-0.448 -0.071 -0.891
-0.432 -0.140 -0.891
-0.405 -0.206 -0.891
-0.367 -0.267 -0.891
-0.321 -0.321 -0.891
-0.267 -0.367 -0.891
-0.206 -0.405 -0.891
-0.140 -0.432 -0.891
-0.071 -0.448 -0.891
-0.000 -0.454 -0.891
0.071 -0.448 -0.891
0.140 -0.432 -0.891
0.206 -0.405 -0.891
0.267 -0.367 -0.891
0.321 -0.321 -0.891
0.367 -0.267 -0.891
0.405 -0.206 -0.891
0.432 -0.140 -0.891
0.448 -0.071 -0.891
0.309 0.000 -0.951
0.305 0.048 -0.951
0.294 0.095 -0.951
0.275 0.140 -0.951
0.250 0.182 -0.951
0.219 0.219 -0.951
0.182 0.250 -0.951
0.140 0.275 -0.951
0.095 0.294 -0.951
0.048 0.305 -0.951
0.000 0.309 -0.951
-0.048 0.305 -0.951
-0.095 0.294 -0.951
-0.140 0.275 -0.951
-0.182 0.250 -0.951
-0.219 0.219 -0.951
-0.250 0.182 -0.951
-0.275 0.140 -0.951
-0.294 0.095 -0.951
-0.305 0.048 -0.951
-0.309 0.000 -0.951
-0.305 -0.048 -0.951
-0.294 -0.095 -0.951
-0.275 -0.140 -0.951
-0.250 -0.182 -0.951
-0.219 -0.219 -0.951
-0.182 -0.250 -0.951
-0.140 -0.275 -0.951
-0.095 -0.294 -0.951
-0.048 -0.305 -0.951
-0.000 -0.309 -0.951
0.048 -0.305 -0.951
0.095 -0.294 -0.951
0.140 -0.275 -0.951
0.182 -0.250 -0.951
0.219 -0.219 -0.951
0.250 -0.182 -0.951
0.275 -0.140 -0.951
0.294 -0.095 -0.951
0.305 -0.048 -0.951
0.156 0.000 -0.988
0.155 0.024 -0.988
0.149 0.048 -0.988
0.139 0.071 -0.988
0.127 0.092 -0.988
0.111 0.111 -0.988
0.092 0.127 -0.988
0.071 0.139 -0.988
0.048 0.149 -0.988
0.024 0.155 -0.988
0.000 0.156 -0.988
-0.024 0.155 -0.988
-0.048 0.149 -0.988
-0.071 0.139 -0.988
-0.092 0.127 -0.988
-0.111 0.111 -0.988
-0.127 0.092 -0.988
-0.139 0.071 -0.988
-0.149 0.048 -0.988
-0.155 0.024 -0.988
-0.156 0.000 -0.988
-0.155 -0.024 -0.988
-0.149 -0.048 -0.988
-0.139 -0.071 -0.988
-0.127 -0.092 -0.988
-0.111 -0.111 -0.988
-0.092 -0.127 -0.988
-0.071 -0.139 -0.988
-0.048 -0.149 -0.988
-0.024 -0.155 -0.988
-0.000 -0.156 -0.988
0.024 -0.155 -0.988
0.048 -0.149 -0.988
0.071 -0.139 -0.988
0.092 -0.127 -0.988
0.111 -0.111 -0.988
0.127 -0.092 -0.988
0.139 -0.071 -0.988
0.149 -0.048 -0.988
0.155 -0.024 -0.988

View File

@ -0,0 +1,26 @@
0.000 0.000 1.000
0.000 0.000 -1.000
0.707 0.000 0.707
0.500 0.500 0.707
0.000 0.707 0.707
-0.500 0.500 0.707
-0.707 0.000 0.707
-0.500 -0.500 0.707
-0.000 -0.707 0.707
0.500 -0.500 0.707
1.000 0.000 0.000
0.707 0.707 0.000
0.000 1.000 0.000
-0.707 0.707 0.000
-1.000 0.000 0.000
-0.707 -0.707 0.000
-0.000 -1.000 0.000
0.707 -0.707 0.000
0.707 0.000 -0.707
0.500 0.500 -0.707
0.000 0.707 -0.707
-0.500 0.500 -0.707
-0.707 0.000 -0.707
-0.500 -0.500 -0.707
-0.000 -0.707 -0.707
0.500 -0.500 -0.707

View File

@ -0,0 +1,114 @@
0.000 0.000 1.000
0.000 0.000 -1.000
0.383 0.000 0.924
0.354 0.146 0.924
0.271 0.271 0.924
0.146 0.354 0.924
0.000 0.383 0.924
-0.146 0.354 0.924
-0.271 0.271 0.924
-0.354 0.146 0.924
-0.383 0.000 0.924
-0.354 -0.146 0.924
-0.271 -0.271 0.924
-0.146 -0.354 0.924
-0.000 -0.383 0.924
0.146 -0.354 0.924
0.271 -0.271 0.924
0.354 -0.146 0.924
0.707 0.000 0.707
0.653 0.271 0.707
0.500 0.500 0.707
0.271 0.653 0.707
0.000 0.707 0.707
-0.271 0.653 0.707
-0.500 0.500 0.707
-0.653 0.271 0.707
-0.707 0.000 0.707
-0.653 -0.271 0.707
-0.500 -0.500 0.707
-0.271 -0.653 0.707
-0.000 -0.707 0.707
0.271 -0.653 0.707
0.500 -0.500 0.707
0.653 -0.271 0.707
0.924 0.000 0.383
0.854 0.354 0.383
0.653 0.653 0.383
0.354 0.854 0.383
0.000 0.924 0.383
-0.354 0.854 0.383
-0.653 0.653 0.383
-0.854 0.354 0.383
-0.924 0.000 0.383
-0.854 -0.354 0.383
-0.653 -0.653 0.383
-0.354 -0.854 0.383
-0.000 -0.924 0.383
0.354 -0.854 0.383
0.653 -0.653 0.383
0.854 -0.354 0.383
1.000 0.000 0.000
0.924 0.383 0.000
0.707 0.707 0.000
0.383 0.924 0.000
0.000 1.000 0.000
-0.383 0.924 0.000
-0.707 0.707 0.000
-0.924 0.383 0.000
-1.000 0.000 0.000
-0.924 -0.383 0.000
-0.707 -0.707 0.000
-0.383 -0.924 0.000
-0.000 -1.000 0.000
0.383 -0.924 0.000
0.707 -0.707 0.000
0.924 -0.383 0.000
0.924 0.000 -0.383
0.854 0.354 -0.383
0.653 0.653 -0.383
0.354 0.854 -0.383
0.000 0.924 -0.383
-0.354 0.854 -0.383
-0.653 0.653 -0.383
-0.854 0.354 -0.383
-0.924 0.000 -0.383
-0.854 -0.354 -0.383
-0.653 -0.653 -0.383
-0.354 -0.854 -0.383
-0.000 -0.924 -0.383
0.354 -0.854 -0.383
0.653 -0.653 -0.383
0.854 -0.354 -0.383
0.707 0.000 -0.707
0.653 0.271 -0.707
0.500 0.500 -0.707
0.271 0.653 -0.707
0.000 0.707 -0.707
-0.271 0.653 -0.707
-0.500 0.500 -0.707
-0.653 0.271 -0.707
-0.707 0.000 -0.707
-0.653 -0.271 -0.707
-0.500 -0.500 -0.707
-0.271 -0.653 -0.707
-0.000 -0.707 -0.707
0.271 -0.653 -0.707
0.500 -0.500 -0.707
0.653 -0.271 -0.707
0.383 0.000 -0.924
0.354 0.146 -0.924
0.271 0.271 -0.924
0.146 0.354 -0.924
0.000 0.383 -0.924
-0.146 0.354 -0.924
-0.271 0.271 -0.924
-0.354 0.146 -0.924
-0.383 0.000 -0.924
-0.354 -0.146 -0.924
-0.271 -0.271 -0.924
-0.146 -0.354 -0.924
-0.000 -0.383 -0.924
0.146 -0.354 -0.924
0.271 -0.271 -0.924
0.354 -0.146 -0.924

View File

@ -2,7 +2,7 @@
@ Date: 2021-01-17 22:44:34
@ Author: Qing Shuai
@ LastEditors: Qing Shuai
@ LastEditTime: 2021-06-20 17:24:12
@ LastEditTime: 2021-08-24 16:28:15
@ FilePath: /EasyMocap/easymocap/visualize/geometry.py
'''
import numpy as np
@ -30,11 +30,13 @@ def create_point(points, r=0.01):
points (array): (N, 3)/(N, 4)
r (float, optional): radius. Defaults to 0.01.
"""
points = np.array(points)
nPoints = points.shape[0]
vert, face = load_sphere()
vert = vert * r
nVerts = vert.shape[0]
vert = vert[None, :, :].repeat(points.shape[0], 0)
vert = vert + points[:, None, :]
vert = vert + points[:, None, :3]
verts = np.vstack(vert)
face = face[None, :, :].repeat(points.shape[0], 0)
face = face + nVerts * np.arange(nPoints).reshape(nPoints, 1, 1)
@ -148,6 +150,17 @@ def create_plane(normal, center, dx=1, dy=1, dz=0.005, color=[0.8, 0.8, 0.8]):
vertices += np.array(center).reshape(-1, 3)
return {'vertices': vertices, 'faces': PLANE_FACES.copy(), 'name': 'plane'}
def merge_meshes(meshes):
verts = []
faces = []
# TODO:add colors
nVerts = 0
for mesh in meshes:
verts.append(mesh['vertices'])
faces.append(mesh['faces'] + nVerts)
nVerts += mesh['vertices'].shape[0]
return {'vertices': np.vstack(verts), 'faces':np.vstack(faces), 'name': 'compose_{}'.format(meshes[0]['name'])}
def create_cameras(cameras):
vertex = np.array([[0.203982,0.061435,0.00717595],[-0.116019,0.061435,0.00717595],[-0.116019,-0.178565,0.00717595],[0.203982,-0.178565,0.00717595],[0.203982,0.061435,-0.092824],[-0.116019,0.061435,-0.092824],[-0.116019,-0.178565,-0.092824],[0.203982,-0.178565,-0.092824],[0.131154,-0.0361827,0.00717595],[0.131154,-0.0361827,0.092176],[0.122849,-0.015207,0.00717595],[0.122849,-0.015207,0.092176],[0.109589,0.00304419,0.00717595],[0.109589,0.00304419,0.092176],[0.092206,0.0174247,0.00717595],[0.092206,0.0174247,0.092176],[0.071793,0.0270302,0.00717595],[0.071793,0.0270302,0.092176],[0.0496327,0.0312577,0.00717595],[0.0496327,0.0312577,0.092176],[0.0271172,0.0298412,0.00717595],[0.0271172,0.0298412,0.092176],[0.00566135,0.0228697,0.00717595],[0.00566135,0.0228697,0.092176],[-0.0133865,0.0107812,0.00717595],[-0.0133865,0.0107812,0.092176],[-0.02883,-0.0056643,0.00717595],[-0.02883,-0.0056643,0.092176],[-0.0396985,-0.0254336,0.00717595],[-0.0396985,-0.0254336,0.092176],[-0.045309,-0.0472848,0.00717595],[-0.045309,-0.0472848,0.092176],[-0.045309,-0.069845,0.00717595],[-0.045309,-0.069845,0.092176],[-0.0396985,-0.091696,0.00717595],[-0.0396985,-0.091696,0.092176],[-0.02883,-0.111466,0.00717595],[-0.02883,-0.111466,0.092176],[-0.0133865,-0.127911,0.00717595],[-0.0133865,-0.127911,0.092176],[0.00566135,-0.14,0.00717595],[0.00566135,-0.14,0.092176],[0.0271172,-0.146971,0.00717595],[0.0271172,-0.146971,0.092176],[0.0496327,-0.148388,0.00717595],[0.0496327,-0.148388,0.092176],[0.071793,-0.14416,0.00717595],[0.071793,-0.14416,0.092176],[0.092206,-0.134554,0.00717595],[0.092206,-0.134554,0.092176],[0.109589,-0.120174,0.00717595],[0.109589,-0.120174,0.092176],[0.122849,-0.101923,0.00717595],[0.122849,-0.101923,0.092176],[0.131154,-0.080947,0.00717595],[0.131154,-0.080947,0.092176],[0.133982,-0.058565,0.00717595],[0.133982,-0.058565,0.092176],[-0.0074325,0.061435,-0.0372285],[-0.0074325,0.074435,-0.0372285],[-0.0115845,0.061435,-0.0319846],[-0.0115845,0.074435,-0.0319846],[-0.018215,0.061435,-0.0274218],[-0.018215,0.074435,-0.0274218],[-0.0269065,0.061435,-0.0238267],[-0.0269065,0.074435,-0.0238267],[-0.0371125,0.061435,-0.0214253],[-0.0371125,0.074435,-0.0214253],[-0.048193,0.061435,-0.0203685],[-0.048193,0.074435,-0.0203685],[-0.0594505,0.061435,-0.0207226],[-0.0594505,0.074435,-0.0207226],[-0.0701785,0.061435,-0.0224655],[-0.0701785,0.074435,-0.0224655],[-0.0797025,0.061435,-0.0254875],[-0.0797025,0.074435,-0.0254875],[-0.0874245,0.061435,-0.0295989],[-0.0874245,0.074435,-0.0295989],[-0.0928585,0.061435,-0.0345412],[-0.0928585,0.074435,-0.0345412],[-0.0956635,0.061435,-0.040004],[-0.0956635,0.074435,-0.040004],[-0.0956635,0.061435,-0.045644],[-0.0956635,0.074435,-0.045644],[-0.0928585,0.061435,-0.051107],[-0.0928585,0.074435,-0.051107],[-0.0874245,0.061435,-0.056049],[-0.0874245,0.074435,-0.056049],[-0.0797025,0.061435,-0.0601605],[-0.0797025,0.074435,-0.0601605],[-0.0701785,0.061435,-0.0631825],[-0.0701785,0.074435,-0.0631825],[-0.0594505,0.061435,-0.0649255],[-0.0594505,0.074435,-0.0649255],[-0.048193,0.061435,-0.0652795],[-0.048193,0.074435,-0.0652795],[-0.0371125,0.061435,-0.064223],[-0.0371125,0.074435,-0.064223],[-0.0269065,0.061435,-0.0618215],[-0.0269065,0.074435,-0.0618215],[-0.018215,0.061435,-0.0582265],[-0.018215,0.074435,-0.0582265],[-0.0115845,0.061435,-0.0536635],[-0.0115845,0.074435,-0.0536635],[-0.0074325,0.061435,-0.0484195],[-0.0074325,0.074435,-0.0484195],[-0.0060185,0.061435,-0.0428241],[-0.0060185,0.074435,-0.0428241]])*0.5
tri = [[4,3,2],[1,4,2],[6,1,2],[6,5,1],[8,4,1],[5,8,1],[3,7,2],[7,6,2],[4,7,3],[8,7,4],[6,7,5],[7,8,5],[43,42,44],[42,43,41],[43,46,45],[46,43,44],[58,9,57],[9,58,10],[55,58,57],[56,58,55],[53,54,55],[54,56,55],[12,11,9],[12,9,10],[21,20,22],[20,21,19],[34,33,32],[32,33,31],[35,36,37],[37,36,38],[33,36,35],[36,33,34],[29,30,31],[30,32,31],[40,39,37],[40,37,38],[39,40,41],[40,42,41],[47,48,49],[49,48,50],[48,47,45],[46,48,45],[49,52,51],[52,49,50],[52,53,51],[52,54,53],[14,15,13],[15,14,16],[11,14,13],[12,14,11],[18,17,15],[18,15,16],[17,18,19],[18,20,19],[27,35,37],[17,27,15],[27,53,55],[27,49,51],[11,27,9],[27,47,49],[27,33,35],[23,27,21],[27,39,41],[27,55,57],[9,27,57],[15,27,13],[39,27,37],[47,27,45],[53,27,51],[27,11,13],[43,27,41],[27,29,31],[27,43,45],[27,17,19],[21,27,19],[33,27,31],[27,23,25],[23,24,25],[25,24,26],[24,21,22],[24,23,21],[28,36,34],[42,28,44],[28,58,56],[54,28,56],[52,28,54],[28,34,32],[28,46,44],[18,28,20],[20,28,22],[30,28,32],[40,28,42],[58,28,10],[28,48,46],[28,12,10],[28,14,12],[36,28,38],[28,24,22],[28,40,38],[48,28,50],[28,52,50],[14,28,16],[28,18,16],[24,28,26],[28,27,25],[28,25,26],[28,30,29],[27,28,29],[108,59,60],[59,108,107],[62,59,61],[59,62,60],[103,102,101],[102,103,104],[64,61,63],[64,62,61],[70,67,69],[67,70,68],[70,71,72],[71,70,69],[83,84,82],[83,82,81],[86,85,87],[86,87,88],[86,83,85],[83,86,84],[77,78,75],[75,78,76],[105,106,103],[103,106,104],[108,106,107],[106,105,107],[97,96,95],[96,97,98],[96,93,95],[93,96,94],[93,92,91],[92,93,94],[79,105,103],[59,79,61],[79,93,91],[83,79,85],[85,79,87],[61,79,63],[79,103,101],[65,79,67],[79,99,97],[89,79,91],[79,77,75],[79,59,107],[67,79,69],[79,89,87],[79,73,71],[105,79,107],[79,97,95],[79,71,69],[79,83,81],[99,79,101],[93,79,95],[79,65,63],[73,79,75],[99,100,97],[97,100,98],[102,100,101],[100,99,101],[89,90,87],[87,90,88],[90,89,91],[92,90,91],[66,67,68],[66,65,67],[66,64,63],[65,66,63],[74,75,76],[74,73,75],[71,74,72],[73,74,71],[80,106,108],[74,80,72],[86,80,84],[84,80,82],[64,80,62],[80,108,60],[80,100,102],[62,80,60],[66,80,64],[80,70,72],[80,102,104],[96,80,94],[80,90,92],[70,80,68],[80,86,88],[78,80,76],[106,80,104],[80,96,98],[80,92,94],[100,80,98],[90,80,88],[80,66,68],[80,74,76],[82,80,81],[80,79,81],[80,78,77],[79,80,77]]
@ -159,6 +172,7 @@ def create_cameras(cameras):
meshes.append({
'vertices': vertices, 'faces': triangles, 'name': 'camera_{}'.format(nv), 'vid': nv
})
meshes = merge_meshes(meshes)
return meshes
import os
@ -203,3 +217,6 @@ def create_mesh_pyrender(vert, faces, col):
mesh,
material=material)
return mesh
if __name__ == "__main__":
pass

View File

@ -2,8 +2,8 @@
@ Date: 2021-01-17 21:38:19
@ Author: Qing Shuai
@ LastEditors: Qing Shuai
@ LastEditTime: 2021-06-28 11:43:00
@ FilePath: /EasyMocapRelease/easymocap/visualize/skelmodel.py
@ LastEditTime: 2021-08-24 16:42:22
@ FilePath: /EasyMocap/easymocap/visualize/skelmodel.py
'''
import numpy as np
import cv2
@ -39,7 +39,7 @@ def calTransformation(v_i, v_j, r, adaptr=False, ratio=10):
return T, r, length
class SkelModel:
def __init__(self, nJoints=None, kintree=None, body_type=None, joint_radius=0.02, **kwargs) -> None:
def __init__(self, nJoints=None, kintree=None, body_type=None, joint_radius=0.02, res=20, **kwargs) -> None:
if nJoints is not None:
self.nJoints = nJoints
self.kintree = kintree
@ -50,8 +50,8 @@ class SkelModel:
self.body_type = body_type
self.device = 'none'
cur_dir = os.path.dirname(__file__)
faces = np.loadtxt(join(cur_dir, 'sphere_faces_20.txt'), dtype=np.int)
self.vertices = np.loadtxt(join(cur_dir, 'sphere_vertices_20.txt'))
faces = np.loadtxt(join(cur_dir, 'assets', 'sphere_faces_{}.txt'.format(res)), dtype=np.int)
self.vertices = np.loadtxt(join(cur_dir, 'assets', 'sphere_vertices_{}.txt'.format(res)))
# compose faces
faces_all = []
for nj in range(self.nJoints):
@ -69,7 +69,7 @@ class SkelModel:
if not return_verts:
return keypoints3d
if keypoints3d.shape[-1] == 3: # add confidence
keypoints3d = np.hstack((keypoints3d, np.ones((keypoints3d.shape[0], 1))))
keypoints3d = np.dstack((keypoints3d, np.ones((keypoints3d.shape[0], keypoints3d.shape[1], 1))))
r = self.joint_radius
# joints
min_conf = 0.1