add check calib

This commit is contained in:
shuaiqing 2023-03-24 21:27:39 +08:00
parent 982873a515
commit 368ab6092e

View File

@ -2,9 +2,11 @@
@ Date: 2021-03-27 19:13:50 @ Date: 2021-03-27 19:13:50
@ Author: Qing Shuai @ Author: Qing Shuai
@ LastEditors: Qing Shuai @ LastEditors: Qing Shuai
@ LastEditTime: 2021-04-15 22:53:23 @ LastEditTime: 2022-10-11 16:47:10
@ FilePath: /EasyMocap/apps/calibration/check_calib.py @ FilePath: /EasyMocapPublic/apps/calibration/check_calib.py
''' '''
from easymocap.mytools.debug_utils import myerror, mywarn
from easymocap.mytools.file_utils import myarray2string
import cv2 import cv2
import numpy as np import numpy as np
import os import os
@ -28,16 +30,29 @@ LINES_SQUARE = np.array([
[3, 0] [3, 0]
]) ])
def load_cube(): def load_cube(grid_size=1, **kwargs):
min_x, min_y, min_z = (0, 0, 0.)
max_x, max_y, max_z = (grid_size, grid_size, grid_size)
# min_x, min_y, min_z = (-0.75, -0.9, 0.)
# max_x, max_y, max_z = (0.75, 0.7, 0.9)
# # 灯光球场篮球:
# min_x, min_y, min_z = (-7.5, -2.89, 0.)
# max_x, max_y, max_z = (7.5, 11.11, 2.)
# # 4d association:
# min_x, min_y, min_z = (-1.6, -1.6, 0.)
# max_x, max_y, max_z = (1.5, 1.6, 2.4)
# min_x, min_y, min_z = (-2.45, -4., 0.)
# max_x, max_y, max_z = (1.65, 2.45, 2.6)
points3d = np.array([ points3d = np.array([
[0., 0., 0.], [min_x, min_y, min_z],
[1., 0., 0.], [max_x, min_y, min_z],
[1., 1., 0.], [max_x, max_y, min_z],
[0., 1., 0.], [min_x, max_y, min_z],
[0., 0., 1.], [min_x, min_y, max_z],
[1., 0., 1.], [max_x, min_y, max_z],
[1., 1., 1.], [max_x, max_y, max_z],
[0., 1., 1.] [min_x, max_y, max_z],
]) ])
lines = np.array([ lines = np.array([
[0, 1], [0, 1],
@ -52,7 +67,7 @@ def load_cube():
[1, 5], [1, 5],
[2, 6], [2, 6],
[3, 7] [3, 7]
], dtype=int) ], dtype=np.int64)
points3d = np.hstack((points3d, np.ones((points3d.shape[0], 1)))) points3d = np.hstack((points3d, np.ones((points3d.shape[0], 1))))
return points3d, lines return points3d, lines
@ -73,36 +88,211 @@ def merge_points_lines(points3d, lines):
mapid[mapid==newi] = i mapid[mapid==newi] = i
return newpoints, mapid[lines] return newpoints, mapid[lines]
def load_grid(xrange=10, yrange=10): def load_grid(xrange=28, yrange=15, step=1, two=False, **kwargs):
start = np.array([0., 0., 0.]) start = np.array([0., 0., 0.])
xdir = np.array([1., 0., 0.]) xdir = np.array([1., 0., 0.])
ydir = np.array([0., 1., 0.]) ydir = np.array([0., 1., 0.])
stepx = 1. stepx = step
stepy = 1. stepy = step
points3d, lines = [], [] points3d, lines = [], []
for i in range(xrange): if two:
for j in range(yrange): start_x = -xrange
start_y = -yrange
else:
start_x = 0
start_y = 0
for i in range(start_x, xrange):
for j in range(start_y, yrange):
base = start + xdir*i*stepx + ydir*j*stepy base = start + xdir*i*stepx + ydir*j*stepy
points3d.append(POINTS_SQUARE+base) points3d.append(POINTS_SQUARE+base)
lines.append(LINES_SQUARE+4*(i*yrange+j)) lines.append(LINES_SQUARE+4*((i-start_x)*(yrange-start_y)+(j-start_y)))
points3d = np.vstack(points3d) points3d = np.vstack(points3d)
lines = np.vstack(lines) lines = np.vstack(lines)
return merge_points_lines(points3d, lines) return merge_points_lines(points3d, lines)
def load_axes(): def load_human(path, pid, nf=0, camnames=[], annot='annots'):
points3d = np.array([ points = []
[0., 0., 0.], nvs = []
[1., 0., 0.], annot_ = annot
[0., 1., 0.], for nv, sub in enumerate(camnames):
[0., 0., -1.] annotname = join(path, annot_, sub, '{:06d}.json'.format(nf))
]) if not os.path.exists(annotname):
lines = np.array([ print('[Warn] Not exist ', annotname)
[0,1], continue
[0,2], annots = read_json(annotname)
[0,3] if isinstance(annots, dict):
], dtype=int) annots = annots['annots']
points3d = np.hstack((points3d, np.ones((points3d.shape[0], 1)))) annot = [d for d in annots if d['personID'] == pid]
return points3d, lines if len(annot) == 0:
continue
pts = np.array(annot[0]['keypoints'])
if args.hand:
handl = np.array(annot[0]['handl2d'])
handr = np.array(annot[0]['handr2d'])
pts = np.vstack([pts, handl, handr])
points.append(pts)
nvs.append(nv)
points = np.stack(points)
results = np.zeros((len(camnames), *points.shape[1:]))
results[nvs] = points
from easymocap.dataset.config import CONFIG
lines = CONFIG['body25']['kintree']
return results, lines
class BaseCheck:
def __init__(self, path, out, mode='cube', ext='.jpg', sub=[]) -> None:
cameras = read_camera(join(out, 'intri.yml'), join(out, 'extri.yml'))
cameras.pop('basenames')
self.outdir = join(out, mode)
self.cameras = cameras
if len(sub) == 0:
self.camnames = sorted(list(cameras.keys()))
else:
self.camnames = sub
if args.prefix is not None:
for c in self.camnames:
self.cameras[c.replace(args.prefix, '')] = self.cameras.pop(c)
self.camnames = [c.replace(args.prefix, '') for c in self.camnames]
print('[check] cameras: ', self.camnames)
zaxis = np.array([0., 0., 1.]).reshape(3, 1)
for cam in self.camnames:
camera = cameras[cam]
center = -camera['R'].T @ camera['T']
#
lookat = camera['R'].T @ (zaxis - camera['T'])
print(' - {}: center = {}, look at = {}'.format(cam, np.round(center.T, 3), np.round(lookat.T, 3)))
self.path = path
self.kpts2d = None
self.ext = ext
self.errors = []
def check(self, points3d, lines, nf = 0, show=False, write=True):
if write:
os.makedirs(self.outdir, exist_ok=True)
conf3d = points3d[:, -1]
p3d = np.ascontiguousarray(points3d[:, :3])
errors = []
for nv, cam in enumerate(self.camnames):
camera = self.cameras[cam]
if show or write:
imgname = join(self.path, 'images', cam, '{:06d}{}'.format(nf, self.ext))
if not os.path.exists(imgname):
imgname = join(self.path, 'images', cam, '{:08d}{}'.format(nf, self.ext))
if not os.path.exists(imgname):
print('[WARN] Not exist', imgname)
continue
assert os.path.exists(imgname), imgname
img = cv2.imread(imgname)
img = Undistort.image(img, camera['K'], camera['dist'])
if False:
points2d_repro, xxx = cv2.projectPoints(p3d, cv2.Rodrigues(camera['R'])[0], camera['T'], camera['K'], camera['dist'])
kpts_repro = points2d_repro.squeeze()
else:
kpts_repro = projectN3(p3d, [camera['P']])[0]
if self.kpts2d is not None:
k2d = self.kpts2d[nv]
k2d = Undistort.points(k2d, camera['K'], camera['dist'])
valid = (conf3d > 0.)&(k2d[:, 2] > 0.)
# print(kpts_repro)
# import ipdb; ipdb.set_trace()
if k2d[:, 2].sum() > 0.:
diff = np.linalg.norm(k2d[:, :2] - kpts_repro[:, :2], axis=1) * valid
print('[Check] {}: {} points, {:3.2f} pixels max is {}, {:3.2f} pixels'.format(cam, valid.sum(), diff.sum()/valid.sum(), diff.argmax(), diff.max()))
diff = diff.sum()/valid.sum()
errors.append(diff)
self.errors.append((diff, nv, nf))
if show or write:
plot_points2d(img, k2d, lines, col=(0, 255, 0), lw=1, putText=False)
else:
k2d = np.zeros((10, 3))
if show or write:
if points3d.shape[-1] == 4:
conf = points3d[..., -1:] > 0.01
elif points3d.shape[-1] == 3:
conf = np.ones_like(points3d[..., -1:])
kpts_vis = np.hstack((kpts_repro[:, :2], conf))
# for i in range(kpts_vis.shape[0]):
# print('{}: {}, {}, {}'.format(i, *kpts_vis[i]))
plot_points2d(img, kpts_vis, lines, col=(0, 0, 255), lw=1, putText=args.text, style='+')
for i in range(kpts_vis.shape[0]):
if k2d[i][-1] < 0.1:continue
cv2.line(img, (int(kpts_vis[i][0]), int(kpts_vis[i][1])), (int(k2d[i][0]), int(k2d[i][1])), (0,0,0), thickness=2)
not_skip_unvis = True
if show and (k2d[:, 2].sum()>0 or not_skip_unvis):
vis = img
if vis.shape[0] > 1000:
vis = cv2.resize(vis, None, fx=1000/vis.shape[0], fy=1000/vis.shape[0])
cv2.imshow('vis', vis)
cv2.waitKey(0)
if write:
outname = join(self.outdir, '{}_{:06d}.jpg'.format(cam, nf))
cv2.imwrite(outname, img)
if len(errors) > 0:
print('[Check] Mean error: {:3.2f} pixels'.format(sum(errors)/len(errors)))
def summary(self):
errors = self.errors
if len(errors) > 0:
errors.sort(key=lambda x:-x[0])
print('[Check] Total {} frames Mean error: {:3.2f} pixels, max: {:3.2f} in cam "{}" frame {}'.format(len(errors), sum([e[0] for e in errors])/len(errors), errors[0][0], self.camnames[errors[0][1]], self.errors[0][2]))
class QuanCheck(BaseCheck):
def __init__(self, path, out, mode, ext, sub=[]) -> None:
super().__init__(path, out, mode, ext, sub)
def triangulate(self, k2ds, gt=None):
# k2ds: (nViews, nPoints, 3)
self.kpts2d = k2ds
k2dus = []
for nv in range(k2ds.shape[0]):
camera = self.cameras[self.camnames[nv]]
k2d = k2ds[nv].copy()
k2du = Undistort.points(k2d, camera['K'], camera['dist'])
k2dus.append(k2du)
Pall = np.stack([self.cameras[cam]['P'] for cam in self.camnames])
k2dus = np.stack(k2dus)
k3d = batch_triangulate(k2dus, Pall)
if gt is not None:
if gt.shape[0] < k3d.shape[0]: # gt少了点
gt = np.vstack([gt, np.zeros((k3d.shape[0]-gt.shape[0], 3))])
valid = np.where(k3d[:, -1] > 0.)[0]
err3d = np.linalg.norm(k3d[valid, :3] - gt[valid], axis=1)
print('[Check3D] mean error: {:.2f}mm'.format(err3d.mean()*1000))
return k3d
def load2d_ground(path, nf=0, camnames=[]):
k2ds = []
k3d = None
MAX_POINTS = 0
for cam in sorted(camnames):
annname = join(path, cam, '{:06d}.json'.format(nf))
if not os.path.exists(annname):
mywarn(annname + ' not exists')
data = read_json(annname)
k2d = np.array(data['keypoints2d'], dtype=np.float32)
k3d = np.array(data['keypoints3d'], dtype=np.float32)
if k2d.shape[0] > MAX_POINTS:
MAX_POINTS = k2d.shape[0]
k2ds.append(k2d)
for i, k2d in enumerate(k2ds):
if k2d.shape[0] < MAX_POINTS:
k2ds[i] = np.vstack([k2d, np.zeros((MAX_POINTS-k2d.shape[0], 3))])
k2ds = np.stack(k2ds)
conf = k2ds[:, :, 2].sum(axis=1)
if (conf>0).sum() < 2:
return False, None, None
return True, k2ds, k3d
def read_match2d_file(file, camnames):
points = read_json(file)['points_global']
match2d = np.zeros((len(camnames), len(points), 3))
for npo in range(match2d.shape[1]):
for key, (x, y) in points[npo].items():
if key not in camnames:
continue
match2d[camnames.index(key), npo] = [x, y, 1.]
return True, match2d, np.zeros((match2d.shape[1], 3))
def check_calib(path, out, vis=False, show=False, debug=False): def check_calib(path, out, vis=False, show=False, debug=False):
if vis: if vis:
@ -116,7 +306,10 @@ def check_calib(path, out, vis=False, show=False, debug=False):
k2ds = [] k2ds = []
for cam, camera in cameras.items(): for cam, camera in cameras.items():
if vis: if vis:
imgname = join(path, 'images', cam, '{:06d}.jpg'.format(nf)) for ext in ['jpg', 'png']:
imgname = join(path, 'images', cam, '{:06d}.{}'.format(nf, ext))
if not os.path.exists(imgname):
continue
assert os.path.exists(imgname), imgname assert os.path.exists(imgname), imgname
img = cv2.imread(imgname) img = cv2.imread(imgname)
img = Undistort.image(img, camera['K'], camera['dist']) img = Undistort.image(img, camera['K'], camera['dist'])
@ -142,8 +335,11 @@ def check_calib(path, out, vis=False, show=False, debug=False):
if debug: if debug:
print('{:2d}-{:2d}: {:6.2f}/{:2d}'.format(nf, nv, dist.sum(), int(conf.sum()))) print('{:2d}-{:2d}: {:6.2f}/{:2d}'.format(nf, nv, dist.sum(), int(conf.sum())))
if vis: if vis:
plot_points2d(imgs[nv], kpts_repro[nv], [], col=(0, 0, 255), lw=1, putText=False) kpts_repro_vis = np.hstack((kpts_repro[nv][:, :2], conf[:, None]))
plot_points2d(imgs[nv], kpts_repro_vis, [], col=(0, 0, 255), lw=1, putText=False)
plot_points2d(imgs[nv], k2ds[nv], [], lw=1, putText=False) plot_points2d(imgs[nv], k2ds[nv], [], lw=1, putText=False)
for i in range(kpts_repro_vis.shape[0]):
cv2.line(imgs[nv], kpts_repro_vis[i], k2ds[nv][i], (0,0,0), thickness=1)
if show: if show:
cv2.imshow('vis', imgs[nv]) cv2.imshow('vis', imgs[nv])
cv2.waitKey(0) cv2.waitKey(0)
@ -153,20 +349,6 @@ def check_calib(path, out, vis=False, show=False, debug=False):
cv2.imwrite(outname, imgout) cv2.imwrite(outname, imgout)
print('{:.2f}/{} = {:.2f} pixel'.format(total_sum, int(cnt), total_sum/cnt)) print('{:.2f}/{} = {:.2f} pixel'.format(total_sum, int(cnt), total_sum/cnt))
def check_scene(path, out, points3d, lines):
cameras = read_camera(join(out, 'intri.yml'), join(out, 'extri.yml'))
cameras.pop('basenames')
nf = 0
for cam, camera in cameras.items():
imgname = join(path, 'images', cam, '{:06d}.jpg'.format(nf))
assert os.path.exists(imgname), imgname
img = cv2.imread(imgname)
img = Undistort.image(img, camera['K'], camera['dist'])
kpts_repro = projectN3(points3d, camera['P'][None, :, :])[0]
plot_points2d(img, kpts_repro, lines, col=(0, 0, 255), lw=1, putText=True)
cv2.imshow('vis', img)
cv2.waitKey(0)
def check_match(path, out): def check_match(path, out):
os.makedirs(out, exist_ok=True) os.makedirs(out, exist_ok=True)
cameras = read_camera(join(path, 'intri.yml'), join(path, 'extri.yml')) cameras = read_camera(join(path, 'intri.yml'), join(path, 'extri.yml'))
@ -195,6 +377,8 @@ def check_match(path, out):
kpts_repro = projectN3(points3d, camera['P'][None, :, :])[0] kpts_repro = projectN3(points3d, camera['P'][None, :, :])[0]
plot_points2d(img, kpts_repro, lines, col=(0, 0, 255), lw=1, putText=True) plot_points2d(img, kpts_repro, lines, col=(0, 0, 255), lw=1, putText=True)
plot_points2d(img, points2d[cams.index(cam)], lines, col=(0, 255, 0), lw=1, putText=True) plot_points2d(img, points2d[cams.index(cam)], lines, col=(0, 255, 0), lw=1, putText=True)
for i in range(kpts_repro_vis.shape[0]):
cv2.line(imgs[nv], kpts_repro[i], points2d[cams.index(cam)][i], (0,0,0), thickness=1)
outname = join(out, cam+'.jpg') outname = join(out, cam+'.jpg')
cv2.imwrite(outname, img) cv2.imwrite(outname, img)
@ -203,26 +387,79 @@ if __name__ == "__main__":
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('path', type=str, parser.add_argument('path', type=str,
help='the directory contains the extrinsic images') help='the directory contains the extrinsic images')
parser.add_argument('--sub', type=str,
default=[], nargs='+')
parser.add_argument('--out', type=str, parser.add_argument('--out', type=str,
help='with camera parameters') help='with camera parameters')
parser.add_argument('--vis', action='store_true') parser.add_argument('--mode', type=str, default='cube',
help='with camera parameters')
parser.add_argument('--ext', type=str, default='.jpg', choices=['.jpg', '.png'])
parser.add_argument('--prefix', type=str, default=None)
parser.add_argument('--grid_x', type=int, default=3)
parser.add_argument('--grid_y', type=int, default=3)
parser.add_argument('--grid_step', type=float, default=1.)
parser.add_argument('--grid_two', action='store_true')
parser.add_argument('--step', type=int, default=5)
parser.add_argument('--show', action='store_true') parser.add_argument('--show', action='store_true')
parser.add_argument('--write', action='store_true')
parser.add_argument('--debug', action='store_true') parser.add_argument('--debug', action='store_true')
parser.add_argument('--cube', action='store_true') parser.add_argument('--human', action='store_true')
parser.add_argument('--grid', action='store_true') parser.add_argument('--hand', action='store_true')
parser.add_argument('--axes', action='store_true') parser.add_argument('--pid', type=int, default=0)
parser.add_argument('--frame', type=int, default=0)
parser.add_argument('--annot', type=str, default='annots')
parser.add_argument('--calib', action='store_true') parser.add_argument('--calib', action='store_true')
parser.add_argument('--text', action='store_true')
parser.add_argument('--print3d', action='store_true')
parser.add_argument('--gt', action='store_true')
args = parser.parse_args() args = parser.parse_args()
if args.cube: if args.mode in ['cube', 'grid']:
points, lines = load_cube() points, lines = {'cube': load_cube, 'grid': load_grid}[args.mode](
check_scene(args.path, args.out, points, lines) xrange=args.grid_x, yrange=args.grid_y, step=args.grid_step, two=args.grid_two,
elif args.grid: grid_size=args.grid_step
points, lines = load_grid(xrange=15, yrange=14) )
check_scene(args.path, args.out, points, lines) print('Check {} points'.format(points.shape))
elif args.axes: checker = BaseCheck(args.path, args.out, args.mode, args.ext)
points, lines = load_axes() checker.check(points, lines, args.frame, show=args.show, write=args.write)
check_scene(args.path, args.out, points, lines) elif args.mode in ['gcp', 'match']:
checker = QuanCheck(args.path, args.out, args.mode, args.ext)
lines = []
if args.mode == 'match':
for nf in range(0, 10000, args.step):
# try:
flag, k2ds, gt3d = load2d_ground(join(args.path, args.annot), nf=nf, camnames=checker.camnames)
# except:
# myerror('{} not exist'.format(join(args.path, args.annot, '{:06d}.json'.format(nf))))
# break
if not flag:continue
points = checker.triangulate(k2ds, gt=gt3d)
if args.print3d:
valid = points[:, -1] > 0.01
points_ = points[valid]
np.savetxt(join(args.out, 'points3d.txt'), points_, fmt='%10.5f')
print(myarray2string(points_, indent=0))
norm = np.linalg.norm(points_, axis=1)
print('[calib] max norm={}, min norm={}'.format(norm.max(), norm.min()))
checker.check(gt3d if args.gt else points, lines, nf, show=args.show, write=args.write)
checker.summary()
elif args.mode == 'gcp':
flag, k2ds, gt3d = read_match2d_file(join(args.path, 'calib.json'), camnames=checker.camnames)
points = checker.triangulate(k2ds, gt=gt3d)
print(myarray2string(points, indent=4))
checker.check(gt3d if args.gt else points, lines, 0, show=args.show, write=args.write)
else:
flag, k2ds, gt3d = load2d_ground(join(args.path, 'chessboard'), camnames=checker.camnames)
points = checker.triangulate(k2ds, gt=gt3d)
checker.check(gt3d if args.gt else points, lines, 0, show=args.show, write=args.write)
elif args.mode == 'human':
checker = QuanCheck(args.path, args.out, args.mode, args.ext, sub=args.sub)
points, lines = load_human(args.path, pid=args.pid, nf=args.frame, camnames=checker.camnames, annot=args.annot)
points = checker.triangulate(points, gt=None)
print('[calib] check human')
print(myarray2string(points, indent=0))
print('[calib]limblength: {}'.format(np.linalg.norm(points[1, :3] - points[8, :3])))
checker.check(points, lines, args.frame, show=args.show, write=args.write)
elif args.calib: elif args.calib:
check_match(args.path, args.out) check_match(args.path, args.out)
else: else: