🚧 update calibrate and annotate
This commit is contained in:
parent
ffc135cedc
commit
ffd518e6ca
217
apps/annotation/annot_clip.py
Normal file
217
apps/annotation/annot_clip.py
Normal file
@ -0,0 +1,217 @@
|
||||
'''
|
||||
@ Date: 2021-06-09 09:57:23
|
||||
@ Author: Qing Shuai
|
||||
@ LastEditors: Qing Shuai
|
||||
@ LastEditTime: 2022-07-14 21:37:34
|
||||
@ FilePath: /EasyMocapPublic/apps/annotation/annot_clip.py
|
||||
'''
|
||||
# 功能:
|
||||
# 1. 快速预览图像
|
||||
# 2. 设置起点
|
||||
# 3. 设置终点
|
||||
# 不兼容的接口:没有标注文件
|
||||
from easymocap.mytools.debug_utils import myerror, mywarn, run_cmd
|
||||
from easymocap.mytools.vis_base import plot_line
|
||||
from easymocap.annotator.basic_annotator import AnnotBase, parse_parser
|
||||
from easymocap.annotator import ImageFolder
|
||||
from easymocap.annotator import plot_text
|
||||
from easymocap.annotator.basic_visualize import capture_screen, resize_to_screen
|
||||
from easymocap.mytools import read_json, save_json
|
||||
from easymocap.annotator.basic_keyboard import get_any_move
|
||||
from os.path import join
|
||||
import os
|
||||
import numpy as np
|
||||
import cv2
|
||||
|
||||
class Clips:
|
||||
def __init__(self, path) -> None:
|
||||
self.temp = join(path, 'clips.json')
|
||||
if os.path.exists(self.temp):
|
||||
self.annots = read_json(self.temp)
|
||||
else:
|
||||
self.annots = {}
|
||||
self.start_ = None
|
||||
self.end_ = None
|
||||
self.clips = []
|
||||
self.sub_ = None
|
||||
|
||||
@property
|
||||
def sub(self):
|
||||
return self.sub_
|
||||
|
||||
@sub.setter
|
||||
def sub(self, value):
|
||||
self.sub_ = value
|
||||
if value in self.annots.keys():
|
||||
self.clips = self.annots[value]
|
||||
else:
|
||||
self.annots[value] = []
|
||||
self.clips = self.annots[value]
|
||||
self.print(0)
|
||||
|
||||
def start(self, annotator, **kwargs):
|
||||
self.start_ = annotator.frame
|
||||
print('>>> Start clip from frame {:6d}'.format(annotator.frame))
|
||||
|
||||
def end(self, annotator, **kwargs):
|
||||
self.end_ = annotator.frame
|
||||
print('>>> End clip from frame {:6d}'.format(annotator.frame))
|
||||
|
||||
def add(self, annotator, **kwargs):
|
||||
if self.start_ is None:
|
||||
print('[clip] Please check the start!')
|
||||
return 0
|
||||
if self.end_ is None:
|
||||
print('[clip] Please check the end!')
|
||||
return 0
|
||||
print('[{}, {})'.format(self.start_, self.end_))
|
||||
self.clips.append([self.start_, self.end_])
|
||||
self.start_ = None
|
||||
self.end_ = None
|
||||
|
||||
def delete(self, annotator, **kwargs):
|
||||
frame = annotator.frame
|
||||
ind = -1
|
||||
for i, (start, end) in enumerate(self.clips):
|
||||
if frame > start and frame < end:
|
||||
ind = i
|
||||
break
|
||||
else:
|
||||
print('[clip] current not in any clip')
|
||||
return 0
|
||||
self.clips.pop(ind)
|
||||
|
||||
def print(self, annotator, **kwargs):
|
||||
print('{}: '.format(self.sub))
|
||||
for (start, end) in self.clips:
|
||||
print(' - [{}, {})'.format(start, end))
|
||||
|
||||
def save(self):
|
||||
save_json(self.temp, self.annots)
|
||||
|
||||
def vis_clips(self, img, frame, nFrames, **kwargs):
|
||||
COL_CLIP = (0, 0, 255)
|
||||
COL_NEW = (0, 0, 255)
|
||||
width = img.shape[1]
|
||||
pos = lambda x: int(width*(x+1)/nFrames)
|
||||
lw = 12
|
||||
# 可视化标注的clips
|
||||
for (start, end) in self.clips:
|
||||
plot_line(img, (pos(start), lw/2), (pos(end), lw/2), lw, COL_CLIP)
|
||||
# 可视化当前的标注
|
||||
if self.start_ is not None:
|
||||
top = pos(self.start_)
|
||||
pts = np.array([[top, lw], [top-lw, lw*4], [top, lw*4]])
|
||||
cv2.fillPoly(img, [pts], COL_NEW)
|
||||
if self.end_ is not None:
|
||||
top = pos(self.end_)
|
||||
pts = np.array([[top, lw], [top, lw*4], [top+lw, lw*4]])
|
||||
cv2.fillPoly(img, [pts], COL_NEW)
|
||||
return img
|
||||
|
||||
def annot_example(path, sub, skip=False):
|
||||
# define datasets
|
||||
# define visualize
|
||||
if not os.path.exists(join(path, 'images', sub)):
|
||||
mywarn('[annot] No such sub: {}'.format(sub))
|
||||
return 0
|
||||
clip = Clips(path)
|
||||
vis_funcs = [resize_to_screen, plot_text, clip.vis_clips, capture_screen]
|
||||
clip.sub = sub
|
||||
if skip and len(clip.clips) > 0:
|
||||
return 0
|
||||
key_funcs = {
|
||||
'j': clip.start,
|
||||
'k': clip.end,
|
||||
'l': clip.add,
|
||||
'x': clip.delete,
|
||||
'v': clip.print,
|
||||
'w': get_any_move(-10),
|
||||
's': get_any_move(10),
|
||||
'f': get_any_move(100),
|
||||
'g': get_any_move(-100)
|
||||
}
|
||||
|
||||
dataset = ImageFolder(path, sub=sub, no_annot=True)
|
||||
print('[Info] Totally {} frames'.format(len(dataset)))
|
||||
# construct annotations
|
||||
annotator = AnnotBase(
|
||||
dataset=dataset,
|
||||
key_funcs=key_funcs,
|
||||
vis_funcs=vis_funcs)
|
||||
while annotator.isOpen:
|
||||
annotator.run()
|
||||
clip.save()
|
||||
|
||||
def copy_clips(path, out):
|
||||
from tqdm import tqdm
|
||||
import shutil
|
||||
from easymocap.mytools.debug_utils import log, mywarn, mkdir
|
||||
temp = join(path, 'clips.json')
|
||||
assert os.path.exists(temp), temp
|
||||
annots = read_json(temp)
|
||||
for key, clips in tqdm(annots.items()):
|
||||
for start, end in clips:
|
||||
outname = '{}+{:06d}+{:06d}'.format(key, start, end)
|
||||
outdir = join(out, 'images', outname)
|
||||
if os.path.exists(outdir) and len(os.listdir(outdir)) == end - start:
|
||||
mywarn('[copy] Skip {}'.format(outname))
|
||||
continue
|
||||
# check the input image
|
||||
srcname0 = join(path, 'images', key, '{:06d}.jpg'.format(start))
|
||||
srcname1 = join(path, 'images', key, '{:06d}.jpg'.format(end))
|
||||
if not os.path.exists(srcname0) or not os.path.exists(srcname1):
|
||||
myerror('[copy] No such file: {}, {}'.format(srcname0, srcname1))
|
||||
log('[copy] {}'.format(outname))
|
||||
mkdir(outdir)
|
||||
# copy the images
|
||||
for nnf, nf in enumerate(tqdm(range(start, end), desc='copy {}'.format(outname))):
|
||||
srcname = join(path, 'images', key, '{:06d}.jpg'.format(nf))
|
||||
dstname = join(outdir, '{:06d}.jpg'.format(nnf))
|
||||
shutil.copyfile(srcname, dstname)
|
||||
|
||||
def copy_mv_clips(path, out):
|
||||
temp = join(path, 'clips.json')
|
||||
assert os.path.exists(temp), temp
|
||||
annots = read_json(temp)
|
||||
clips = list(annots.values())[0]
|
||||
for start, end in clips:
|
||||
if out is None:
|
||||
outdir = path + '+{:06d}+{:06d}'.format(start, end)
|
||||
else:
|
||||
outdir = out + '+{:06d}+{:06d}'.format(start, end)
|
||||
print(outdir)
|
||||
cmd = f'python3 scripts/preprocess/copy_dataset.py {path} {outdir} --start {start} --end {end}'
|
||||
if len(args.sub) > 0:
|
||||
cmd += ' --subs {}'.format(' '.join(args.sub))
|
||||
if args.strip is not None:
|
||||
cmd += ' --strip {}'.format(args.strip)
|
||||
run_cmd(cmd)
|
||||
|
||||
if __name__ == "__main__":
|
||||
from easymocap.annotator import load_parser, parse_parser
|
||||
parser = load_parser()
|
||||
parser.add_argument('--strip', type=str, default=None)
|
||||
parser.add_argument('--copy', action='store_true')
|
||||
parser.add_argument('--skip', action='store_true')
|
||||
parser.add_argument('--mv', action='store_true')
|
||||
parser.add_argument('--sub_ignore', type=str, nargs='+', default=[])
|
||||
args = parse_parser(parser)
|
||||
|
||||
args.sub = [i for i in args.sub if i not in args.sub_ignore]
|
||||
|
||||
if args.copy:
|
||||
print(args.path, args.out)
|
||||
if args.mv:
|
||||
copy_mv_clips(args.path, args.out)
|
||||
else:
|
||||
if args.out is None:
|
||||
myerror('[copy] No output path')
|
||||
exit(0)
|
||||
copy_clips(args.path, args.out)
|
||||
else:
|
||||
if args.mv:
|
||||
annot_example(args.path, sub=args.sub[0], skip=args.skip)
|
||||
else:
|
||||
for sub in args.sub:
|
||||
annot_example(args.path, sub=sub, skip=args.skip)
|
@ -2,8 +2,8 @@
|
||||
@ Date: 2021-03-28 21:23:34
|
||||
@ Author: Qing Shuai
|
||||
@ LastEditors: Qing Shuai
|
||||
@ LastEditTime: 2021-07-21 20:46:27
|
||||
@ FilePath: /EasyMocap/apps/annotation/annot_keypoints.py
|
||||
@ LastEditTime: 2022-05-24 14:27:46
|
||||
@ FilePath: /EasyMocapPublic/apps/annotation/annot_keypoints.py
|
||||
'''
|
||||
from easymocap.annotator.basic_visualize import capture_screen, plot_skeleton_factory, resize_to_screen
|
||||
import os
|
||||
@ -92,7 +92,7 @@ def annot_example(path, sub, image, annot, step, args):
|
||||
key_funcs = {
|
||||
'v': set_unvisible,
|
||||
'V': set_unvisible_according_previous,
|
||||
'f': set_face_unvisible,
|
||||
# 'f': set_face_unvisible,
|
||||
'c': check_track,
|
||||
'm': mirror_keypoints2d,
|
||||
'M': mirror_keypoints2d_leg,
|
||||
@ -103,7 +103,7 @@ def annot_example(path, sub, image, annot, step, args):
|
||||
key_funcs['r'] = estimator.detect_with_bbox90
|
||||
key_funcs['t'] = estimator.detect_with_bbox180
|
||||
key_funcs['y'] = estimator.detect_with_bbox270
|
||||
key_funcs['g'] = estimator.detect_with_previous_slow
|
||||
# key_funcs['g'] = estimator.detect_with_previous_slow
|
||||
key_funcs['j'] = estimator.detect_with_previous_mid
|
||||
# callback of bounding box
|
||||
callbacks = [callback_select_bbox_corner, callback_select_bbox_center, callback_select_joints]
|
||||
|
@ -2,45 +2,169 @@
|
||||
@ Date: 2021-03-02 16:13:03
|
||||
@ Author: Qing Shuai
|
||||
@ LastEditors: Qing Shuai
|
||||
@ LastEditTime: 2021-03-27 22:08:18
|
||||
@ FilePath: /EasyMocap/scripts/calibration/calib_extri.py
|
||||
@ LastEditTime: 2022-08-03 17:35:16
|
||||
@ FilePath: /EasyMocapPublic/apps/calibration/calib_extri.py
|
||||
'''
|
||||
from easymocap.mytools.camera_utils import write_intri
|
||||
import os
|
||||
from glob import glob
|
||||
from os.path import join
|
||||
import numpy as np
|
||||
import cv2
|
||||
from easymocap.mytools import read_intri, write_extri, read_json
|
||||
from easymocap.mytools.debug_utils import mywarn
|
||||
|
||||
def calib_extri(path, intriname):
|
||||
assert os.path.exists(intriname), intriname
|
||||
intri = read_intri(intriname)
|
||||
camnames = list(intri.keys())
|
||||
extri = {}
|
||||
def init_intri(path, image):
|
||||
camnames = sorted(os.listdir(join(path, image)))
|
||||
cameras = {}
|
||||
for ic, cam in enumerate(camnames):
|
||||
imagenames = sorted(glob(join(path, 'images', cam, '*.jpg')))
|
||||
imagenames = sorted(glob(join(path, image, cam, '*.jpg')))
|
||||
assert len(imagenames) > 0
|
||||
imgname = imagenames[0]
|
||||
img = cv2.imread(imgname)
|
||||
height, width = img.shape[0], img.shape[1]
|
||||
focal = 1.2*max(height, width) # as colmap
|
||||
K = np.array([focal, 0., width/2, 0., focal, height/2, 0. ,0., 1.]).reshape(3, 3)
|
||||
dist = np.zeros((1, 5))
|
||||
cameras[cam] = {
|
||||
'K': K,
|
||||
'dist': dist
|
||||
}
|
||||
return cameras
|
||||
|
||||
def solvePnP(k3d, k2d, K, dist, flag, tryextri=False):
|
||||
k2d = np.ascontiguousarray(k2d[:, :2])
|
||||
# try different initial values:
|
||||
if tryextri:
|
||||
def closure(rvec, tvec):
|
||||
ret, rvec, tvec = cv2.solvePnP(k3d, k2d, K, dist, rvec, tvec, True, flags=flag)
|
||||
points2d_repro, xxx = cv2.projectPoints(k3d, rvec, tvec, K, dist)
|
||||
kpts_repro = points2d_repro.squeeze()
|
||||
err = np.linalg.norm(points2d_repro.squeeze() - k2d, axis=1).mean()
|
||||
return err, rvec, tvec, kpts_repro
|
||||
# create a series of extrinsic parameters looking at the origin
|
||||
height_guess = 2.1
|
||||
radius_guess = 7.
|
||||
infos = []
|
||||
for theta in np.linspace(0, 2*np.pi, 180):
|
||||
st = np.sin(theta)
|
||||
ct = np.cos(theta)
|
||||
center = np.array([radius_guess*ct, radius_guess*st, height_guess]).reshape(3, 1)
|
||||
R = np.array([
|
||||
[-st, ct, 0],
|
||||
[0, 0, -1],
|
||||
[-ct, -st, 0]
|
||||
])
|
||||
tvec = - R @ center
|
||||
rvec = cv2.Rodrigues(R)[0]
|
||||
err, rvec, tvec, kpts_repro = closure(rvec, tvec)
|
||||
infos.append({
|
||||
'err': err,
|
||||
'repro': kpts_repro,
|
||||
'rvec': rvec,
|
||||
'tvec': tvec
|
||||
})
|
||||
infos.sort(key=lambda x:x['err'])
|
||||
err, rvec, tvec, kpts_repro = infos[0]['err'], infos[0]['rvec'], infos[0]['tvec'], infos[0]['repro']
|
||||
else:
|
||||
ret, rvec, tvec = cv2.solvePnP(k3d, k2d, K, dist, flags=flag)
|
||||
points2d_repro, xxx = cv2.projectPoints(k3d, rvec, tvec, K, dist)
|
||||
kpts_repro = points2d_repro.squeeze()
|
||||
err = np.linalg.norm(points2d_repro.squeeze() - k2d, axis=1).mean()
|
||||
# print(err)
|
||||
return err, rvec, tvec, kpts_repro
|
||||
|
||||
def calib_extri(path, image, intriname, image_id):
|
||||
camnames = sorted(os.listdir(join(path, image)))
|
||||
camnames = [c for c in camnames if os.path.isdir(join(path, image, c))]
|
||||
if intriname is None:
|
||||
# initialize intrinsic parameters
|
||||
intri = init_intri(path, image)
|
||||
else:
|
||||
assert os.path.exists(intriname), intriname
|
||||
intri = read_intri(intriname)
|
||||
if len(intri.keys()) == 1:
|
||||
key0 = list(intri.keys())[0]
|
||||
for cam in camnames:
|
||||
intri[cam] = intri[key0].copy()
|
||||
extri = {}
|
||||
# methods = [cv2.SOLVEPNP_ITERATIVE, cv2.SOLVEPNP_P3P, cv2.SOLVEPNP_AP3P, cv2.SOLVEPNP_EPNP, cv2.SOLVEPNP_DLS, cv2.SOLVEPNP_IPPE, cv2.SOLVEPNP_SQPNP]
|
||||
methods = [cv2.SOLVEPNP_ITERATIVE]
|
||||
for ic, cam in enumerate(camnames):
|
||||
imagenames = sorted(glob(join(path, image, cam, '*{}'.format(args.ext))))
|
||||
chessnames = sorted(glob(join(path, 'chessboard', cam, '*.json')))
|
||||
chessname = chessnames[0]
|
||||
# chessname = chessnames[0]
|
||||
assert len(chessnames) > 0, cam
|
||||
chessname = chessnames[image_id]
|
||||
|
||||
data = read_json(chessname)
|
||||
k3d = np.array(data['keypoints3d'], dtype=np.float32)
|
||||
k3d[:, 0] *= -1
|
||||
k2d = np.array(data['keypoints2d'], dtype=np.float32)
|
||||
k2d = np.ascontiguousarray(k2d[:, :-1])
|
||||
ret, rvec, tvec = cv2.solvePnP(k3d, k2d, intri[cam]['K'], intri[cam]['dist'])
|
||||
if k3d.shape[0] != k2d.shape[0]:
|
||||
mywarn('k3d {} doesnot match k2d {}'.format(k3d.shape, k2d.shape))
|
||||
length = min(k3d.shape[0], k2d.shape[0])
|
||||
k3d = k3d[:length]
|
||||
k2d = k2d[:length]
|
||||
#k3d[:, 0] *= -1
|
||||
valididx = k2d[:, 2] > 0
|
||||
if valididx.sum() < 4:
|
||||
extri[cam] = {}
|
||||
rvec = np.zeros((1, 3))
|
||||
tvec = np.zeros((3, 1))
|
||||
extri[cam]['Rvec'] = rvec
|
||||
extri[cam]['R'] = cv2.Rodrigues(rvec)[0]
|
||||
extri[cam]['T'] = tvec
|
||||
print('[ERROR] Failed to initialize the extrinsic parameters')
|
||||
extri.pop(cam)
|
||||
continue
|
||||
k3d = k3d[valididx]
|
||||
k2d = k2d[valididx]
|
||||
if args.tryfocal:
|
||||
infos = []
|
||||
for focal in range(500, 5000, 10):
|
||||
dist = intri[cam]['dist']
|
||||
K = intri[cam]['K']
|
||||
K[0, 0] = focal
|
||||
K[1, 1] = focal
|
||||
for flag in methods:
|
||||
err, rvec, tvec, kpts_repro = solvePnP(k3d, k2d, K, dist, flag)
|
||||
infos.append({
|
||||
'focal': focal,
|
||||
'err': err,
|
||||
'repro': kpts_repro,
|
||||
'rvec': rvec,
|
||||
'tvec': tvec
|
||||
})
|
||||
infos.sort(key=lambda x:x['err'])
|
||||
err, rvec, tvec = infos[0]['err'], infos[0]['rvec'], infos[0]['tvec']
|
||||
kpts_repro = infos[0]['repro']
|
||||
focal = infos[0]['focal']
|
||||
intri[cam]['K'][0, 0] = focal
|
||||
intri[cam]['K'][1, 1] = focal
|
||||
else:
|
||||
K, dist = intri[cam]['K'], intri[cam]['dist']
|
||||
err, rvec, tvec, kpts_repro = solvePnP(k3d, k2d, K, dist, flag=cv2.SOLVEPNP_ITERATIVE)
|
||||
extri[cam] = {}
|
||||
extri[cam]['Rvec'] = rvec
|
||||
extri[cam]['R'] = cv2.Rodrigues(rvec)[0]
|
||||
extri[cam]['T'] = tvec
|
||||
center = - extri[cam]['R'].T @ tvec
|
||||
print('{} center => {}'.format(cam, center.squeeze()))
|
||||
write_extri(join(os.path.dirname(intriname), 'extri.yml'), extri)
|
||||
print('{} center => {}, err = {:.3f}'.format(cam, center.squeeze(), err))
|
||||
write_intri(join(path, 'intri.yml'), intri)
|
||||
write_extri(join(path, 'extri.yml'), extri)
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('path', type=str)
|
||||
parser.add_argument('--intri', type=str)
|
||||
parser.add_argument('--image', type=str, default='images')
|
||||
parser.add_argument('--intri', type=str, default=None)
|
||||
parser.add_argument('--ext', type=str, default='.jpg')
|
||||
parser.add_argument('--step', type=int, default=1)
|
||||
parser.add_argument('--debug', action='store_true')
|
||||
parser.add_argument('--tryfocal', action='store_true')
|
||||
parser.add_argument('--tryextri', action='store_true')
|
||||
parser.add_argument('--image_id', type=int, default=0, help='Image id used for extrinsic calibration')
|
||||
|
||||
args = parser.parse_args()
|
||||
calib_extri(args.path, intriname=args.intri)
|
||||
calib_extri(args.path, args.image, intriname=args.intri, image_id=args.image_id)
|
@ -2,10 +2,13 @@
|
||||
@ Date: 2021-03-02 16:12:59
|
||||
@ Author: Qing Shuai
|
||||
@ LastEditors: Qing Shuai
|
||||
@ LastEditTime: 2021-05-26 23:22:26
|
||||
@ FilePath: /EasyMocap/apps/calibration/calib_intri.py
|
||||
@ LastEditTime: 2022-08-10 16:27:17
|
||||
@ FilePath: /EasyMocapPublic/apps/calibration/calib_intri.py
|
||||
'''
|
||||
# This script calibrate each intrinsic parameters
|
||||
import shutil
|
||||
import random
|
||||
from easymocap.mytools.debug_utils import mywarn
|
||||
from easymocap.mytools.vis_base import plot_points2d
|
||||
from easymocap.mytools import write_intri, read_json, Timer
|
||||
import numpy as np
|
||||
@ -14,91 +17,113 @@ import os
|
||||
from os.path import join
|
||||
from glob import glob
|
||||
from easymocap.annotator.chessboard import get_lines_chessboard
|
||||
from tqdm import tqdm
|
||||
|
||||
def read_chess(chessname):
|
||||
data = read_json(chessname)
|
||||
k3d = np.array(data['keypoints3d'], dtype=np.float32)
|
||||
k2d = np.array(data['keypoints2d'], dtype=np.float32)
|
||||
if k2d[:, -1].sum() < 0.01:
|
||||
if (k2d[:, -1] > 0.).sum() < k2d.shape[0]//2:
|
||||
return False, k2d, k3d
|
||||
if k2d[:, -1].sum() < k2d.shape[0]:
|
||||
valid = k2d[:, -1] > 0.1
|
||||
k2d = k2d[valid]
|
||||
k3d = k3d[valid]
|
||||
# TODO:去除正对相机的
|
||||
# TODO:去除各条线不平行的噪声
|
||||
return True, k2d, k3d
|
||||
|
||||
def calib_intri_share(path, step):
|
||||
camnames = sorted(os.listdir(join(path, 'images')))
|
||||
imagenames = sorted(glob(join(path, 'images', '*', '*.jpg')))
|
||||
chessnames = sorted(glob(join(path, 'chessboard', '*', '*.json')))
|
||||
def pop(k2ds_, k3ds_, valid_idx, imgnames, max_num):
|
||||
k2ds = np.stack(k2ds_)
|
||||
dist = np.linalg.norm(k2ds[:, None] - k2ds[None, :], axis=-1).mean(axis=-1)
|
||||
size = np.linalg.norm(k2ds[:, -1] - k2ds[:, 0], axis=-1)
|
||||
dist = dist / size[:, None]
|
||||
row = np.arange(dist.shape[0])
|
||||
dist[row, row] = 9999.
|
||||
col = dist.argmin(axis=0)
|
||||
dist_min = dist[row, col]
|
||||
indices = dist_min.argsort()[:dist_min.shape[0] - max_num]
|
||||
if False:
|
||||
img0 = cv2.imread(imgnames[valid_idx[idx]])
|
||||
img1 = cv2.imread(imgnames[valid_idx[remove_id]])
|
||||
cv2.imshow('01', np.hstack([img0, img1]))
|
||||
cv2.waitKey(10)
|
||||
print('remove: ', imgnames[valid_idx[remove_id]], imgnames[valid_idx[idx]])
|
||||
indices = indices.tolist()
|
||||
indices.sort(reverse=True, key=lambda x:col[x])
|
||||
removed = set()
|
||||
for idx in indices:
|
||||
remove_id = col[idx]
|
||||
if remove_id in removed:
|
||||
continue
|
||||
removed.add(remove_id)
|
||||
valid_idx.pop(remove_id)
|
||||
k2ds_.pop(remove_id)
|
||||
k3ds_.pop(remove_id)
|
||||
|
||||
def load_chessboards(chessnames, imagenames, max_image, sample_image=-1, out='debug-calib'):
|
||||
os.makedirs(out, exist_ok=True)
|
||||
k3ds_, k2ds_, imgs = [], [], []
|
||||
valid_idx = []
|
||||
for i, chessname in enumerate(chessnames):
|
||||
for i, chessname in enumerate(tqdm(chessnames, desc='read')):
|
||||
flag, k2d, k3d = read_chess(chessname)
|
||||
k3ds_.append(k3d)
|
||||
k2ds_.append(k2d)
|
||||
if not flag:
|
||||
continue
|
||||
k3ds_.append(k3d)
|
||||
k2ds_.append(k2d)
|
||||
valid_idx.append(i)
|
||||
MAX_ERROR_PIXEL = 1.
|
||||
lines, line_cols = get_lines_chessboard()
|
||||
valid_idx = valid_idx[::step]
|
||||
len_valid = len(valid_idx)
|
||||
cameras = {}
|
||||
while True:
|
||||
# sample
|
||||
imgs = [imagenames[i] for i in valid_idx]
|
||||
k3ds = [k3ds_[i] for i in valid_idx]
|
||||
k2ds = [np.ascontiguousarray(k2ds_[i][:, :-1]) for i in valid_idx]
|
||||
gray = cv2.imread(imgs[0], 0)
|
||||
print('>> Detect {:3d} frames'.format(len(valid_idx)))
|
||||
with Timer('calibrate'):
|
||||
ret, K, dist, rvecs, tvecs = cv2.calibrateCamera(
|
||||
k3ds, k2ds, gray.shape[::-1], None, None)
|
||||
with Timer('check'):
|
||||
removed = []
|
||||
for i in range(len(imgs)):
|
||||
img = cv2.imread(imgs[i])
|
||||
points2d_repro, _ = cv2.projectPoints(
|
||||
k3ds[i], rvecs[i], tvecs[i], K, dist)
|
||||
points2d_repro = points2d_repro.squeeze()
|
||||
points2d = k2ds_[valid_idx[i]]
|
||||
err = np.linalg.norm(points2d_repro - points2d[:, :2], axis=1).mean()
|
||||
plot_points2d(img, points2d_repro, lines, col=(0, 0, 255), lw=1, putText=False)
|
||||
plot_points2d(img, points2d, lines, lw=1, putText=False)
|
||||
print(imgs[i], err)
|
||||
# cv2.imshow('vis', img)
|
||||
# cv2.waitKey(0)
|
||||
if err > MAX_ERROR_PIXEL:
|
||||
removed.append(i)
|
||||
for i in removed[::-1]:
|
||||
valid_idx.pop(i)
|
||||
if len_valid == len(valid_idx) or not args.remove:
|
||||
print(K)
|
||||
print(dist)
|
||||
for cam in camnames:
|
||||
cameras[cam] = {
|
||||
'K': K,
|
||||
'dist': dist # dist: (1, 5)
|
||||
}
|
||||
break
|
||||
len_valid = len(valid_idx)
|
||||
write_intri(join(path, 'output', 'intri.yml'), cameras)
|
||||
if max_image > 0 and len(valid_idx) > max_image + int(max_image * 0.1):
|
||||
pop(k2ds_, k3ds_, valid_idx, imagenames, max_num=max_image)
|
||||
if sample_image > 0:
|
||||
mywarn('[calibration] Load {} images, sample {} images'.format(len(k3ds_), sample_image))
|
||||
index = [i for i in range(len(k2ds_))]
|
||||
index_sample = random.sample(index, min(sample_image, len(index)))
|
||||
valid_idx = [valid_idx[i] for i in index_sample]
|
||||
k2ds_ = [k2ds_[i] for i in index_sample]
|
||||
k3ds_ = [k3ds_[i] for i in index_sample]
|
||||
for ii, idx in enumerate(valid_idx):
|
||||
shutil.copyfile(imagenames[idx], join(out, '{:06d}.jpg'.format(ii)))
|
||||
return k3ds_, k2ds_
|
||||
|
||||
def calib_intri_share(path, image, ext):
|
||||
camnames = sorted(os.listdir(join(path, image)))
|
||||
camnames = [cam for cam in camnames if os.path.isdir(join(path, image, cam))]
|
||||
|
||||
def calib_intri(path, step):
|
||||
camnames = sorted(os.listdir(join(path, 'images')))
|
||||
imagenames = sorted(glob(join(path, image, '*', '*' + ext)))
|
||||
chessnames = sorted(glob(join(path, 'chessboard', '*', '*.json')))
|
||||
k3ds_, k2ds_ = load_chessboards(chessnames, imagenames, args.num, args.sample, out=join(args.path, 'output'))
|
||||
with Timer('calibrate'):
|
||||
print('[Info] start calibration with {} detections'.format(len(k2ds_)))
|
||||
gray = cv2.imread(imagenames[0], 0)
|
||||
k3ds = k3ds_
|
||||
k2ds = [np.ascontiguousarray(k2d[:, :-1]) for k2d in k2ds_]
|
||||
ret, K, dist, rvecs, tvecs = cv2.calibrateCamera(
|
||||
k3ds, k2ds, gray.shape[::-1], None, None,
|
||||
flags=cv2.CALIB_FIX_K3)
|
||||
cameras = {}
|
||||
for cam in camnames:
|
||||
cameras[cam] = {
|
||||
'K': K,
|
||||
'dist': dist # dist: (1, 5)
|
||||
}
|
||||
write_intri(join(path, 'output', 'intri.yml'), cameras)
|
||||
|
||||
def calib_intri(path, image, ext):
|
||||
camnames = sorted(os.listdir(join(path, image)))
|
||||
camnames = [cam for cam in camnames if os.path.isdir(join(path, image, cam))]
|
||||
cameras = {}
|
||||
for ic, cam in enumerate(camnames):
|
||||
imagenames = sorted(glob(join(path, 'images', cam, '*.jpg')))
|
||||
imagenames = sorted(glob(join(path, image, cam, '*'+ext)))
|
||||
chessnames = sorted(glob(join(path, 'chessboard', cam, '*.json')))
|
||||
k3ds, k2ds = [], []
|
||||
for chessname in chessnames[::step]:
|
||||
flag, k2d, k3d = read_chess(chessname)
|
||||
if not flag:continue
|
||||
k3ds.append(k3d)
|
||||
k2ds.append(np.ascontiguousarray(k2d[:, :-1]))
|
||||
k3ds_, k2ds_ = load_chessboards(chessnames, imagenames, args.num, out=join(args.path, 'output', cam+'_used'))
|
||||
k3ds = k3ds_
|
||||
k2ds = [np.ascontiguousarray(k2d[:, :-1]) for k2d in k2ds_]
|
||||
gray = cv2.imread(imagenames[0], 0)
|
||||
print('>> Detect {}/{:3d} frames'.format(cam, len(k2ds)))
|
||||
print('>> Camera {}: {:3d} frames'.format(cam, len(k2ds)))
|
||||
with Timer('calibrate'):
|
||||
ret, K, dist, rvecs, tvecs = cv2.calibrateCamera(
|
||||
k3ds, k2ds, gray.shape[::-1], None, None)
|
||||
k3ds, k2ds, gray.shape[::-1], None, None,
|
||||
flags=cv2.CALIB_FIX_K3)
|
||||
cameras[cam] = {
|
||||
'K': K,
|
||||
'dist': dist # dist: (1, 5)
|
||||
@ -110,12 +135,14 @@ if __name__ == "__main__":
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('path', type=str, default='/home/')
|
||||
parser.add_argument('--step', type=int, default=1)
|
||||
parser.add_argument('--image', type=str, default='images')
|
||||
parser.add_argument('--ext', type=str, default='.jpg', choices=['.jpg', '.png'])
|
||||
parser.add_argument('--num', type=int, default=-1)
|
||||
parser.add_argument('--sample', type=int, default=-1)
|
||||
parser.add_argument('--share_intri', action='store_true')
|
||||
parser.add_argument('--debug', action='store_true')
|
||||
parser.add_argument('--remove', action='store_true')
|
||||
args = parser.parse_args()
|
||||
if args.share_intri:
|
||||
calib_intri_share(args.path, step=args.step)
|
||||
calib_intri_share(args.path, args.image, ext=args.ext)
|
||||
else:
|
||||
calib_intri(args.path, step=args.step)
|
||||
calib_intri(args.path, args.image, ext=args.ext)
|
||||
|
@ -1,11 +1,12 @@
|
||||
import shutil
|
||||
import cv2
|
||||
from tqdm import tqdm
|
||||
from .basic_keyboard import register_keys
|
||||
from .basic_keyboard import print_help, register_keys
|
||||
from .basic_visualize import plot_text, resize_to_screen, merge
|
||||
from .basic_callback import point_callback, CV_KEY, get_key
|
||||
from .bbox_callback import callback_select_image
|
||||
from .file_utils import load_annot_to_tmp, read_json, save_annot
|
||||
|
||||
import copy
|
||||
class ComposedCallback:
|
||||
def __init__(self, callbacks=[point_callback], processes=[]) -> None:
|
||||
self.callbacks = callbacks
|
||||
@ -22,7 +23,11 @@ class ComposedCallback:
|
||||
else:
|
||||
return 0
|
||||
for process in self.processes:
|
||||
process(**param)
|
||||
ret = process(**param)
|
||||
if ret: # 操作成功,结束
|
||||
param['click'] = None
|
||||
param['start'] = None
|
||||
param['end'] = None
|
||||
|
||||
def get_valid_yn():
|
||||
while True:
|
||||
@ -37,6 +42,69 @@ restore_key = {
|
||||
'handl': ('bbox_handl2d', 'handl2d'),
|
||||
'handr': ('bbox_handr2d', 'handr2d'),
|
||||
}
|
||||
|
||||
class BaseWindow:
|
||||
# 2021.10.11: 考虑新的层次的抽象
|
||||
# 这个实现的基类只包含了打开一个窗口->进行标注->关闭窗口 功能
|
||||
def __init__(self, name, param, register_keys, vis_funcs, callbacks) -> None:
|
||||
self.name = name
|
||||
register_keys['h'] = print_help
|
||||
register_keys['q'] = self.quit
|
||||
register_keys['x'] = self.clear
|
||||
register_keys['Q'] = self.quit_without_save
|
||||
self.register_keys = register_keys
|
||||
self.vis_funcs = vis_funcs
|
||||
self.isOpen = True
|
||||
param['click'] = None
|
||||
param['start'] = None
|
||||
param['end'] = None
|
||||
self.param0 = copy.deepcopy(param)
|
||||
self.param = param
|
||||
cv2.namedWindow(self.name)
|
||||
callback = ComposedCallback(processes=callbacks)
|
||||
cv2.setMouseCallback(self.name, callback.call, self.param)
|
||||
|
||||
def quit_without_save(self, annotator, param):
|
||||
self.quit(annotator, param, save=False)
|
||||
|
||||
def clear(self, annotator, param):
|
||||
select = param['select']
|
||||
for key in select.keys():
|
||||
select[key] = -1
|
||||
for key in ['click', 'start', 'end']:
|
||||
self.param[key] = None
|
||||
|
||||
def quit(self, annotator, param, save=True):
|
||||
for key in ['click', 'start', 'end']:
|
||||
if self.param[key] is not None:
|
||||
self.param[key] = None
|
||||
break
|
||||
else:
|
||||
if not save:
|
||||
for key in self.param.keys():
|
||||
self.param[key] = self.param0[key]
|
||||
else:
|
||||
self.save_and_quit()
|
||||
self.isOpen = False
|
||||
cv2.destroyWindow(self.name)
|
||||
|
||||
def save_and_quit(self, key=None):
|
||||
pass
|
||||
|
||||
def run(self, key=None, noshow=False):
|
||||
if key is None:
|
||||
key = chr(get_key())
|
||||
if key in self.register_keys.keys():
|
||||
self.register_keys[key](self, param=self.param)
|
||||
if not self.isOpen:
|
||||
return 0
|
||||
if noshow:
|
||||
return 0
|
||||
img = self.param['img0'].copy()
|
||||
for func in self.vis_funcs:
|
||||
img = func(img, **self.param)
|
||||
cv2.imshow(self.name, img)
|
||||
|
||||
class AnnotBase:
|
||||
def __init__(self, dataset, key_funcs={}, callbacks=[], vis_funcs=[],
|
||||
name = 'main', body='body25',
|
||||
@ -64,6 +132,7 @@ class AnnotBase:
|
||||
'select': {bbox_name: -1, 'corner': -1},
|
||||
'click': None,
|
||||
'name': name,
|
||||
'body': body,
|
||||
'capture_screen':False}
|
||||
self.set_frame(self.start)
|
||||
self.no_window = no_window
|
||||
@ -106,7 +175,7 @@ class AnnotBase:
|
||||
self.dataset.isTmp = False
|
||||
_, annname_ = self.dataset[frame]
|
||||
if annname is not None:
|
||||
shutil.copy(annname, annname_)
|
||||
shutil.copyfile(annname, annname_)
|
||||
|
||||
@property
|
||||
def frame(self):
|
||||
@ -231,23 +300,173 @@ class AnnotMV:
|
||||
for func in self.vis_funcs_all:
|
||||
img = func(img, sub, param=self.annotdict[sub].param)
|
||||
imgs.append(img)
|
||||
for func in [merge, resize_to_screen]:
|
||||
imgs = func(imgs, scale=0.1)
|
||||
imgs = merge(imgs, square=True)
|
||||
imgs = resize_to_screen(imgs, scale=CV_KEY.WINDOW_HEIGHT/imgs.shape[0])
|
||||
cv2.imshow(self.name, imgs)
|
||||
|
||||
import numpy as np
|
||||
def callback_select_image(click, select, ranges, **kwargs):
|
||||
if click is None:
|
||||
return 0
|
||||
ranges = np.array(ranges)
|
||||
click = np.array(click).reshape(1, -1)
|
||||
res = (click[:, 0]>ranges[:, 0])&(click[:, 0]<ranges[:, 2])&(click[:, 1]>ranges[:, 1])&(click[:, 1]<ranges[:, 3])
|
||||
if res.any():
|
||||
select['camera'] = int(np.where(res)[0])
|
||||
|
||||
class AnnotMVMerge(BaseWindow):
|
||||
# 这个类的设计理念是
|
||||
# 只负责整体的合并与可视化,不要考虑具体的操作
|
||||
restore_key = {
|
||||
'body25': ('bbox', 'keypoints'),
|
||||
'handl': ('bbox_handl2d', 'handl2d'),
|
||||
'handr': ('bbox_handr2d', 'handr2d'),
|
||||
'face': ('bbox_face2d', 'face2d'),
|
||||
}
|
||||
def __init__(self, datasets, register_keys, vis_funcs, vis_funcs_view, callbacks, body) -> None:
|
||||
self.subs = list(datasets.keys())
|
||||
self.isOpen = True
|
||||
for key in 'wasd':
|
||||
register_keys[key] = self.get_move(key)
|
||||
register_keys['q'] = self.quit
|
||||
register_keys['h'] = print_help
|
||||
register_keys['x'] = self.clear
|
||||
self.register_keys = register_keys
|
||||
self.vis_funcs = vis_funcs
|
||||
self.vis_funcs_view = vis_funcs_view
|
||||
self.callbacks = callbacks
|
||||
self.datasets = datasets
|
||||
self.name = 'main'
|
||||
frames = {sub:0 for sub in self.subs}
|
||||
self.body = body
|
||||
bbox_name, kpts_name = self.restore_key[body]
|
||||
|
||||
self.params_view = {sub:{
|
||||
'body': body, 'bbox_name': bbox_name, 'kpts_name': kpts_name,
|
||||
'select': {bbox_name:-1}} for sub in self.subs}
|
||||
imgs, annots = self.load_images_annots(self.datasets, frames)
|
||||
img0, ranges = merge(imgs, ret_range=True)
|
||||
scale = 10000./img0.shape[0]
|
||||
self.nFrames = len(self.datasets[self.subs[0]])
|
||||
self.start = 0
|
||||
self.end = self.nFrames
|
||||
self.step = 50
|
||||
self.visited_frames = {sub: set([self.start]) for sub in self.subs}
|
||||
|
||||
self.param = {
|
||||
'scale': scale, 'ranges': ranges,
|
||||
'click': None, 'start': None, 'end': None,
|
||||
'frames': frames,
|
||||
'body': body, 'bbox_name': bbox_name, 'kpts_name': kpts_name,
|
||||
'select': {'camera': -1, bbox_name:-1, 'corner': -1}}
|
||||
self.param['imgs'] = imgs
|
||||
self.param['annots'] = annots
|
||||
|
||||
self.no_window = False
|
||||
cv2.namedWindow(self.name)
|
||||
callback = ComposedCallback(processes=callbacks)
|
||||
cv2.setMouseCallback(self.name, callback.call, self.param)
|
||||
|
||||
def save_and_quit(self, key=None):
|
||||
self.isOpen = False
|
||||
self.update_param()
|
||||
cv2.destroyWindow(self.name)
|
||||
# get the input
|
||||
if key is None:
|
||||
key = get_valid_yn()
|
||||
if key == 'n':
|
||||
return 0
|
||||
for nv, sub in enumerate(self.subs):
|
||||
dataset = self.datasets[sub]
|
||||
for frame in tqdm(self.visited_frames[sub], desc='writing'):
|
||||
dataset.isTmp = True
|
||||
_, annname = dataset[frame]
|
||||
dataset.isTmp = False
|
||||
_, annname_ = dataset[frame]
|
||||
if annname is not None:
|
||||
print(annname, annname_)
|
||||
shutil.copyfile(annname, annname_)
|
||||
|
||||
@property
|
||||
def frame(self):
|
||||
return list(self.param['frames'].values())[0]
|
||||
|
||||
def update_param(self):
|
||||
# 先保存
|
||||
for nv, sub in enumerate(self.subs):
|
||||
self.visited_frames[sub].add(self.param['frames'][sub])
|
||||
save_annot(self.params_view[sub]['annname'], self.param['annots'][nv])
|
||||
imgs, annots = self.load_images_annots(self.datasets, self.param['frames'])
|
||||
self.param['imgs'] = imgs
|
||||
self.param['annots'] = annots
|
||||
|
||||
def move(self, delta):
|
||||
for sub in self.subs:
|
||||
self.param['frames'][sub] += delta
|
||||
self.update_param()
|
||||
|
||||
@staticmethod
|
||||
def get_move(wasd):
|
||||
get_frame = {
|
||||
'a': lambda x, f: f - 1,
|
||||
'd': lambda x, f: f + 1,
|
||||
'w': lambda x, f: f - x.step,
|
||||
's': lambda x, f: f + x.step
|
||||
}[wasd]
|
||||
text = {
|
||||
'a': 'Move to last frame',
|
||||
'd': 'Move to next frame',
|
||||
'w': 'Move to last step frame',
|
||||
's': 'Move to next step frame'
|
||||
}
|
||||
clip_frame = lambda x, f: max(x.start, min(x.nFrames-1, min(x.end-1, f)))
|
||||
def move(annotator, **kwargs):
|
||||
newframe = get_frame(annotator, annotator.frame)
|
||||
newframe = clip_frame(annotator, newframe)
|
||||
annotator.move(newframe - annotator.frame)
|
||||
move.__doc__ = text[wasd]
|
||||
return move
|
||||
|
||||
def load_images_annots(self, datasets, frames):
|
||||
imgs, annots = [], []
|
||||
for sub, dataset in datasets.items():
|
||||
imgname, annname = dataset[frames[sub]]
|
||||
img = cv2.imread(imgname)
|
||||
annot = load_annot_to_tmp(annname)
|
||||
imgs.append(img)
|
||||
annots.append(annot)
|
||||
self.params_view[sub]['imgname'] = imgname
|
||||
self.params_view[sub]['annname'] = annname
|
||||
return imgs, annots
|
||||
|
||||
def run(self, key=None, noshow=False):
|
||||
# 更新选中
|
||||
if key is None:
|
||||
key = chr(get_key())
|
||||
actv = self.param['select']['camera']
|
||||
for sub in self.subs:
|
||||
for sel in self.params_view[sub]['select']:
|
||||
self.params_view[sub]['select'][sel] = -1
|
||||
if actv != -1:
|
||||
self.params_view[self.subs[actv]]['select'].update(self.param['select'])
|
||||
if key in self.register_keys.keys():
|
||||
func = self.register_keys[key]
|
||||
if isinstance(func, list):
|
||||
[f(self, param=self.param) for f in func]
|
||||
else:
|
||||
func(self, param=self.param)
|
||||
if not self.isOpen:
|
||||
return 0
|
||||
if noshow:
|
||||
return 0
|
||||
imgs = self.param['imgs']
|
||||
imgs = [img.copy() for img in imgs]
|
||||
for nv, img in enumerate(imgs):
|
||||
for func in self.vis_funcs_view:
|
||||
img = func(img, self.param['annots'][nv], name=self.subs[nv], **self.params_view[self.subs[nv]])
|
||||
if nv == actv:
|
||||
cv2.rectangle(img, (0, 0), (img.shape[1], img.shape[0]), (0, 0, 255), img.shape[1]//100)
|
||||
imgs[nv] = img
|
||||
img = merge(imgs)
|
||||
for func in self.vis_funcs:
|
||||
img = func(img, **self.param)
|
||||
cv2.imshow(self.name, img)
|
||||
|
||||
class AnnotMVMain:
|
||||
def __init__(self, datasets, key_funcs={}, key_funcs_view={}, callbacks=[], vis_funcs=[], vis_funcs_all=[],
|
||||
name='main', step=100, body='body25', start=0, end=100000) -> None:
|
||||
name='main', step=100, body='body25', start=0, end=100000,
|
||||
scale=0.5) -> None:
|
||||
self.subs = list(datasets.keys())
|
||||
self.annotdict = {}
|
||||
self.nFrames = end
|
||||
@ -264,12 +483,15 @@ class AnnotMVMain:
|
||||
'A': register_keys['A']
|
||||
}
|
||||
self.register_keys.update(key_funcs)
|
||||
for key, val in self.register_keys.items():
|
||||
print(key, val.__doc__)
|
||||
self.vis_funcs_all = vis_funcs_all
|
||||
self.name = name
|
||||
imgs = self.load_images()
|
||||
imgs, ranges = merge(imgs, ret_range=True)
|
||||
imgs, ranges = merge(imgs, ret_range=True, square=True)
|
||||
self.scale = scale
|
||||
self.param = {
|
||||
'scale': 0.45, 'ranges': ranges,
|
||||
'scale': scale, 'ranges': ranges,
|
||||
'click': None, 'start': None, 'end': None,
|
||||
'select': {'camera': -1}}
|
||||
callbacks = [callback_select_image]
|
||||
@ -308,9 +530,12 @@ class AnnotMVMain:
|
||||
# run the key for all cameras
|
||||
if key in self.register_keys.keys():
|
||||
self.register_keys[key](self, param=self.param)
|
||||
else:
|
||||
elif ord(key) != 255:
|
||||
for sub in self.subs:
|
||||
self.annotdict[sub].run(key)
|
||||
elif key == 'x':
|
||||
self.param['select']['camera'] = -1
|
||||
self.param['click'] = None
|
||||
else:
|
||||
# run the key for the selected cameras
|
||||
self.annotdict[self.subs[active_v]].run(key=key)
|
||||
@ -325,24 +550,27 @@ class AnnotMVMain:
|
||||
cv2.rectangle(img, (0, 0), (img.shape[1], img.shape[0]), (0, 0, 255), img.shape[1]//100)
|
||||
# img = plot_text(img, self.annotdict[sub].param['annots'], self.annotdict[sub].param['imgname'])
|
||||
imgs.append(img)
|
||||
for func in [merge, resize_to_screen]:
|
||||
imgs = func(imgs, scale=0.45)
|
||||
imgs = merge(imgs, square=True)
|
||||
for func in [resize_to_screen]:
|
||||
# scale here
|
||||
imgs = func(imgs, scale=self.scale)
|
||||
cv2.imshow(self.name, imgs)
|
||||
|
||||
def load_parser():
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('path', type=str)
|
||||
parser.add_argument('--out', type=str)
|
||||
parser.add_argument('--out', type=str, default=None)
|
||||
parser.add_argument('--sub', type=str, nargs='+', default=[],
|
||||
help='the sub folder lists when in video mode')
|
||||
parser.add_argument('--from_file', type=str, default=None)
|
||||
parser.add_argument('--image', type=str, default='images')
|
||||
parser.add_argument('--annot', type=str, default='annots')
|
||||
parser.add_argument('--body', type=str, default='handl')
|
||||
parser.add_argument('--body', type=str, default='body25')
|
||||
parser.add_argument('--step', type=int, default=100)
|
||||
parser.add_argument('--vis', action='store_true')
|
||||
parser.add_argument('--debug', action='store_true')
|
||||
parser.add_argument('--ext', type=str, default='.jpg', choices=['.jpg', '.png'])
|
||||
|
||||
# new arguments
|
||||
parser.add_argument('--start', type=int, default=0, help='frame start')
|
||||
@ -359,7 +587,7 @@ def parse_parser(parser):
|
||||
datas = f.readlines()
|
||||
subs = [d for d in datas if not d.startswith('#')]
|
||||
subs = [d.rstrip().replace('https://www.youtube.com/watch?v=', '') for d in subs]
|
||||
newsubs = sorted(os.listdir(join(args.path, 'images')))
|
||||
newsubs = sorted(os.listdir(join(args.path, args.image)))
|
||||
clips = []
|
||||
for newsub in newsubs:
|
||||
if newsub in subs:
|
||||
@ -367,21 +595,23 @@ def parse_parser(parser):
|
||||
if newsub.split('+')[0] in subs:
|
||||
clips.append(newsub)
|
||||
for sub in subs:
|
||||
if os.path.exists(join(args.path, 'images', sub)):
|
||||
if os.path.exists(join(args.path, args.image, sub)):
|
||||
clips.append(sub)
|
||||
args.sub = sorted(clips)
|
||||
elif args.from_file is not None and args.from_file.endswith('.json'):
|
||||
data = read_json(args.from_file)
|
||||
args.sub = sorted([v['vid'] for v in data])
|
||||
elif len(args.sub) == 0:
|
||||
args.sub = sorted(os.listdir(join(args.path, 'images')))
|
||||
if args.sub[0].isdigit():
|
||||
args.sub = sorted(args.sub, key=lambda x:int(x))
|
||||
subs = sorted(os.listdir(join(args.path, args.image)))
|
||||
subs = [s for s in subs if os.path.isdir(join(args.path, args.image, s))]
|
||||
if len(subs) > 0 and subs[0].isdigit():
|
||||
subs = sorted(subs, key=lambda x:int(x))
|
||||
args.sub = subs
|
||||
helps = """
|
||||
Demo code for annotation:
|
||||
- Input : {}
|
||||
- => "{}"
|
||||
- => {}
|
||||
- => {}
|
||||
""".format(args.path, ', '.join(args.sub), args.annot)
|
||||
""".format(args.path, '", "'.join(args.sub), args.annot)
|
||||
print(helps)
|
||||
return args
|
@ -2,8 +2,8 @@
|
||||
@ Date: 2021-04-21 14:18:50
|
||||
@ Author: Qing Shuai
|
||||
@ LastEditors: Qing Shuai
|
||||
@ LastEditTime: 2021-07-11 16:56:39
|
||||
@ FilePath: /EasyMocap/easymocap/annotator/basic_callback.py
|
||||
@ LastEditTime: 2022-08-09 19:56:43
|
||||
@ FilePath: /EasyMocapPublic/easymocap/annotator/basic_callback.py
|
||||
'''
|
||||
import cv2
|
||||
|
||||
@ -16,8 +16,8 @@ class CV_KEY:
|
||||
q = 113
|
||||
ESC = 27
|
||||
BACKSPACE = 8
|
||||
WINDOW_WIDTH = int(1920*0.9)
|
||||
WINDOW_HEIGHT = int(1080*0.9)
|
||||
WINDOW_WIDTH = int(1920*0.8)
|
||||
WINDOW_HEIGHT = int(1080*0.8)
|
||||
LEFT = ord('a')
|
||||
RIGHT = ord('d')
|
||||
UP = ord('w')
|
||||
@ -40,19 +40,17 @@ def point_callback(event, x, y, flags, param):
|
||||
OpenCV使用的简单的回调函数,主要实现两个基础功能:
|
||||
1. 对于按住拖动的情况,记录起始点与终止点(当前点)
|
||||
2. 对于点击的情况,记录选择的点
|
||||
3. 记录当前是否按住了键
|
||||
"""
|
||||
if event not in [cv2.EVENT_LBUTTONDOWN, cv2.EVENT_MOUSEMOVE, cv2.EVENT_LBUTTONUP]:
|
||||
return 0
|
||||
param['button_down'] = flags == cv2.EVENT_FLAG_LBUTTON
|
||||
# 判断出了选择了的点的位置,直接写入这个位置
|
||||
if event == cv2.EVENT_LBUTTONDOWN:
|
||||
# 如果选中了框,那么在按下的时候,就不能清零
|
||||
param['click'] = None
|
||||
param['start'] = (x, y)
|
||||
param['end'] = (x, y)
|
||||
# 清除所有选择项:需要操作吗?
|
||||
for key in param['select'].keys():
|
||||
if key != 'bbox':
|
||||
param['select'][key] = -1
|
||||
elif event == cv2.EVENT_MOUSEMOVE and flags == cv2.EVENT_FLAG_LBUTTON:
|
||||
param['end'] = (x, y)
|
||||
elif event == cv2.EVENT_LBUTTONUP:
|
||||
@ -62,4 +60,4 @@ def point_callback(event, x, y, flags, param):
|
||||
param['end'] = None
|
||||
else:
|
||||
param['click'] = None
|
||||
return 1
|
||||
return 1
|
||||
|
@ -2,7 +2,7 @@
|
||||
@ Date: 2021-04-15 16:57:53
|
||||
@ Author: Qing Shuai
|
||||
@ LastEditors: Qing Shuai
|
||||
@ LastEditTime: 2021-07-14 22:15:26
|
||||
@ LastEditTime: 2021-10-15 16:43:12
|
||||
@ FilePath: /EasyMocap/easymocap/annotator/basic_dataset.py
|
||||
'''
|
||||
from os.path import join
|
||||
@ -11,19 +11,22 @@ import shutil
|
||||
from .file_utils import getFileList
|
||||
|
||||
class ImageFolder:
|
||||
def __init__(self, path, sub=None, image='images', annot='annots', no_annot=False, ext='.jpg', remove_tmp=True) -> None:
|
||||
def __init__(self, path, sub=None, image='images', annot='annots',
|
||||
no_annot=False, share_annot=False, ext='.jpg', remove_tmp=True,
|
||||
max_per_folder=-1) -> None:
|
||||
self.root = path
|
||||
self.image = image
|
||||
self.annot = annot
|
||||
self.image_root = join(path, self.image)
|
||||
self.annot_root = join(path, self.annot)
|
||||
if not os.path.exists(self.annot_root):
|
||||
if not os.path.exists(self.annot_root) or no_annot:
|
||||
no_annot = True
|
||||
self.share_annot = share_annot
|
||||
self.annot_root_tmp = join(path, self.annot + '_tmp')
|
||||
if os.path.exists(self.annot_root_tmp) and remove_tmp:
|
||||
shutil.rmtree(self.annot_root_tmp)
|
||||
if sub is None:
|
||||
self.imgnames = getFileList(self.image_root, ext=ext)
|
||||
self.imgnames = getFileList(self.image_root, ext=ext, max=max_per_folder)
|
||||
if not no_annot:
|
||||
self.annnames = getFileList(self.annot_root, ext='.json')
|
||||
else:
|
||||
@ -32,10 +35,13 @@ class ImageFolder:
|
||||
if not no_annot:
|
||||
self.annnames = getFileList(join(self.annot_root, sub), ext='.json')
|
||||
self.annnames = [join(sub, name) for name in self.annnames]
|
||||
length = min(len(self.imgnames), len(self.annnames))
|
||||
self.imgnames = self.imgnames[:length]
|
||||
self.annnames = self.annnames[:length]
|
||||
# assert len(self.imgnames) == len(self.annnames)
|
||||
if len(self.annnames) != len(self.imgnames) and share_annot:
|
||||
if len(self.annnames) == 1:
|
||||
self.annnames = [self.annnames[0] for _ in range(len(self.imgnames))]
|
||||
else:
|
||||
length = min(len(self.imgnames), len(self.annnames))
|
||||
self.imgnames = self.imgnames[:length]
|
||||
self.annnames = self.annnames[:length]
|
||||
self.isTmp = True
|
||||
self.no_annot = no_annot
|
||||
|
||||
|
@ -2,8 +2,8 @@
|
||||
@ Date: 2021-04-15 17:39:34
|
||||
@ Author: Qing Shuai
|
||||
@ LastEditors: Qing Shuai
|
||||
@ LastEditTime: 2021-07-24 17:01:18
|
||||
@ FilePath: /EasyMocap/easymocap/annotator/basic_keyboard.py
|
||||
@ LastEditTime: 2022-05-23 15:06:00
|
||||
@ FilePath: /EasyMocapPublic/easymocap/annotator/basic_keyboard.py
|
||||
'''
|
||||
from glob import glob
|
||||
from tqdm import tqdm
|
||||
@ -14,7 +14,12 @@ def print_help(annotator, **kwargs):
|
||||
print('Here is the help:')
|
||||
print( '------------------')
|
||||
for key, val in annotator.register_keys.items():
|
||||
print(' {}: '.format(key, ': '), str(val.__doc__))
|
||||
if isinstance(val, list):
|
||||
print(' {}: '.format(key, ': '), str(val[0].__doc__))
|
||||
for v in val[1:]:
|
||||
print(' ', str(v.__doc__))
|
||||
else:
|
||||
print(' {}: '.format(key, ': '), str(val.__doc__))
|
||||
|
||||
def print_help_mv(annotator, **kwargs):
|
||||
print_help(annotator)
|
||||
@ -31,6 +36,10 @@ def close(annotator, **kwargs):
|
||||
else:
|
||||
annotator.save_and_quit()
|
||||
# annotator.pbar.close()
|
||||
def close_wo_save(annotator, **kwargs):
|
||||
"""quit the annotation without saving"""
|
||||
annotator.save_and_quit(key='n')
|
||||
|
||||
def skip(annotator, **kwargs):
|
||||
"""skip the annotation"""
|
||||
annotator.save_and_quit(key='y')
|
||||
@ -49,14 +58,18 @@ def get_move(wasd):
|
||||
get_frame = {
|
||||
'a': lambda x, f: f - 1,
|
||||
'd': lambda x, f: f + 1,
|
||||
'w': lambda x, f: f - x.step,
|
||||
's': lambda x, f: f + x.step
|
||||
'w': lambda x, f: f - 10,
|
||||
's': lambda x, f: f + 10,
|
||||
'f': lambda x, f: f + 100,
|
||||
'g': lambda x, f: f - 100,
|
||||
}[wasd]
|
||||
text = {
|
||||
'a': 'Move to last frame',
|
||||
'd': 'Move to next frame',
|
||||
'w': 'Move to last step frame',
|
||||
's': 'Move to next step frame'
|
||||
's': 'Move to next step frame',
|
||||
'f': 'Move to last step frame',
|
||||
'g': 'Move to next step frame'
|
||||
}
|
||||
clip_frame = lambda x, f: max(x.start, min(x.nFrames-1, min(x.end-1, f)))
|
||||
def move(annotator, **kwargs):
|
||||
@ -174,6 +187,7 @@ register_keys = {
|
||||
'h': print_help,
|
||||
'H': print_help_mv,
|
||||
'q': close,
|
||||
'Q': close_wo_save,
|
||||
' ': skip,
|
||||
'p': capture_screen,
|
||||
'A': automatic,
|
||||
@ -181,7 +195,7 @@ register_keys = {
|
||||
'k': set_keyframe
|
||||
}
|
||||
|
||||
for key in 'wasd':
|
||||
for key in 'wasdfg':
|
||||
register_keys[key] = get_move(key)
|
||||
|
||||
for i in range(5):
|
||||
|
@ -95,6 +95,7 @@ def plot_bbox_sp(img, annots, bbox_type='handl_bbox', add_center=False):
|
||||
if bbox_type not in data.keys():
|
||||
continue
|
||||
bbox = data[bbox_type]
|
||||
if bbox[-1] < 0.001: continue
|
||||
# 画一个X形
|
||||
x1, y1, x2, y2 = bbox[:4]
|
||||
pid = data['personID']
|
||||
|
@ -1,4 +1,5 @@
|
||||
import numpy as np
|
||||
from ..dataset.config import CONFIG
|
||||
|
||||
MIN_PIXEL = 50
|
||||
def findNearestPoint(points, click):
|
||||
@ -24,6 +25,8 @@ def callback_select_bbox_corner(start, end, annots, select, bbox_name, **kwargs)
|
||||
return 0
|
||||
# 判断选择了哪个角点
|
||||
annots = annots['annots']
|
||||
if len(annots) == 0:
|
||||
return 0
|
||||
# not select a bbox
|
||||
if select[bbox_name] == -1 and select['corner'] == -1:
|
||||
corners = []
|
||||
@ -68,16 +71,20 @@ def callback_select_bbox_corner(start, end, annots, select, bbox_name, **kwargs)
|
||||
elif select[bbox_name] == -1 and select['corner'] != -1:
|
||||
select['corner'] = -1
|
||||
|
||||
def callback_select_bbox_center(click, annots, select, bbox_name, **kwargs):
|
||||
def callback_select_bbox_center(click, annots, select, bbox_name, min_pixel=-1, **kwargs):
|
||||
if click is None:
|
||||
return 0
|
||||
if min_pixel == -1:
|
||||
min_pixel = MIN_PIXEL
|
||||
annots = annots['annots']
|
||||
if len(annots) == 0:
|
||||
return 0
|
||||
bboxes = np.array([d[bbox_name] for d in annots])
|
||||
center = (bboxes[:, [2, 3]] + bboxes[:, [0, 1]])/2
|
||||
click = np.array(click)[None, :]
|
||||
dist = np.linalg.norm(click - center, axis=1)
|
||||
mindist, minid = dist.min(), dist.argmin()
|
||||
if mindist < MIN_PIXEL:
|
||||
if mindist < min_pixel:
|
||||
select[bbox_name] = minid
|
||||
|
||||
def get_auto_track(mode='kpts'):
|
||||
@ -161,8 +168,8 @@ def copy_previous_missing(self, param, **kwargs):
|
||||
return 0
|
||||
previous = self.previous()
|
||||
annots = param['annots']['annots']
|
||||
pre_ids = [d['personID'] for d in previous['annots']]
|
||||
now_ids = [d['personID'] for d in annots]
|
||||
pre_ids = [d.get('personID', d.get('id')) for d in previous['annots']]
|
||||
now_ids = [d.get('personID', d.get('id')) for d in annots]
|
||||
for i in range(len(pre_ids)):
|
||||
if pre_ids[i] not in now_ids:
|
||||
annots.append(previous['annots'][i])
|
||||
@ -194,6 +201,34 @@ def create_bbox(self, param, **kwargs):
|
||||
annots.append(data)
|
||||
param['start'], param['end'] = None, None
|
||||
|
||||
def create_bbox_mv(self, param, **kwargs):
|
||||
"add new boundbox"
|
||||
start, end = param['start'], param['end']
|
||||
if start is None or end is None:
|
||||
return 0
|
||||
nv = param['select']['camera']
|
||||
if nv == -1:
|
||||
return 0
|
||||
ranges = param['ranges']
|
||||
start = (start[0]-ranges[nv][0], start[1]-ranges[nv][1])
|
||||
end = (end[0]-ranges[nv][0], end[1]-ranges[nv][1])
|
||||
annots = param['annots'][nv]['annots']
|
||||
|
||||
nowids = [d['personID'] for d in annots]
|
||||
body = param['body']
|
||||
bbox_name, kpts_name = param['bbox_name'], param['kpts_name']
|
||||
if len(nowids) == 0:
|
||||
maxID = 0
|
||||
else:
|
||||
maxID = max(nowids) + 1
|
||||
data = {
|
||||
'personID': maxID,
|
||||
bbox_name: [start[0], start[1], end[0], end[1], 1],
|
||||
kpts_name: [[0., 0., 0.] for _ in range(CONFIG[body]['nJoints'])]
|
||||
}
|
||||
annots.append(data)
|
||||
param['start'], param['end'] = None, None
|
||||
|
||||
def delete_bbox(self, param, **kwargs):
|
||||
"delete the person"
|
||||
bbox_name = param['bbox_name']
|
||||
@ -210,4 +245,70 @@ def delete_all_bbox(self, param, **kwargs):
|
||||
bbox_name = param['bbox_name']
|
||||
param['annots']['annots'] = []
|
||||
param['select'][bbox_name] = -1
|
||||
return 0
|
||||
return 0
|
||||
|
||||
def callback_select_image(click, select, ranges, **kwargs):
|
||||
if click is None:
|
||||
return 0
|
||||
ranges = np.array(ranges)
|
||||
click = np.array(click).reshape(1, -1)
|
||||
res = (click[:, 0]>ranges[:, 0])&(click[:, 0]<ranges[:, 2])&(click[:, 1]>ranges[:, 1])&(click[:, 1]<ranges[:, 3])
|
||||
if res.any():
|
||||
select['camera'] = int(np.where(res)[0])
|
||||
|
||||
def callback_select_image_bbox(click, start, end, select, ranges, annots, bbox_name='bbox', **kwargs):
|
||||
if click is None:
|
||||
return 0
|
||||
ranges = np.array(ranges)
|
||||
click = np.array(click).reshape(1, -1)
|
||||
res = (click[:, 0]>ranges[:, 0])&(click[:, 0]<ranges[:, 2])&(click[:, 1]>ranges[:, 1])&(click[:, 1]<ranges[:, 3])
|
||||
if res.any():
|
||||
select['camera'] = int(np.where(res)[0])
|
||||
# 判断是否在人体bbox里面
|
||||
nv = select['camera']
|
||||
if nv == -1:
|
||||
return 0
|
||||
click_view = click[0] - ranges[nv][:2]
|
||||
callback_select_bbox_center(click_view, annots[nv], select, bbox_name, min_pixel=MIN_PIXEL*2)
|
||||
|
||||
def callback_move_bbox(start, end, click, select, annots, ranges, bbox_name='bbox', **kwargs):
|
||||
if start is None or end is None:
|
||||
return 0
|
||||
nv, nb = select['camera'], select[bbox_name]
|
||||
if nv == -1 or nb == -1:
|
||||
return 0
|
||||
start = (start[0]-ranges[nv][0], start[1]-ranges[nv][1])
|
||||
end = (end[0]-ranges[nv][0], end[1]-ranges[nv][1])
|
||||
annots = annots[nv]['annots']
|
||||
# 判断start是否在bbox的角点附近
|
||||
i = select[bbox_name]
|
||||
if select['corner'] == -1:
|
||||
l, t, r, b = annots[i][bbox_name][:4]
|
||||
corners = np.array([(l, t), (l, b), (r, t), (r, b), ((l+r)/2, (t+b)/2)])
|
||||
flag, minid = findNearestPoint(corners, start)
|
||||
if flag:
|
||||
select['corner'] = minid[0]
|
||||
else:
|
||||
flag, minid = findNearestPoint(corners, end)
|
||||
if flag:
|
||||
select['corner'] = minid[0]
|
||||
else:
|
||||
select['corner'] = -1
|
||||
if select['corner'] == -1:
|
||||
return 0
|
||||
x, y = end
|
||||
# Move the corner
|
||||
if select['corner'] < 4:
|
||||
(i, j) = [(0, 1), (0, 3), (2, 1), (2, 3)][select['corner']]
|
||||
data = annots[select[bbox_name]]
|
||||
data[bbox_name][i] = x
|
||||
data[bbox_name][j] = y
|
||||
# Move the center
|
||||
else:
|
||||
bbox = annots[select[bbox_name]][bbox_name]
|
||||
w = (bbox[2] - bbox[0])/2
|
||||
h = (bbox[3] - bbox[1])/2
|
||||
bbox[0] = x - w
|
||||
bbox[1] = y - h
|
||||
bbox[2] = x + w
|
||||
bbox[3] = y + h
|
@ -2,18 +2,21 @@
|
||||
@ Date: 2021-04-13 16:14:36
|
||||
@ Author: Qing Shuai
|
||||
@ LastEditors: Qing Shuai
|
||||
@ LastEditTime: 2021-07-17 16:00:17
|
||||
@ FilePath: /EasyMocap/easymocap/annotator/chessboard.py
|
||||
@ LastEditTime: 2022-08-17 16:49:40
|
||||
@ FilePath: /EasyMocapPublic/easymocap/annotator/chessboard.py
|
||||
'''
|
||||
import numpy as np
|
||||
import cv2
|
||||
from func_timeout import func_set_timeout
|
||||
|
||||
def getChessboard3d(pattern, gridSize):
|
||||
def getChessboard3d(pattern, gridSize, axis='xy'):
|
||||
object_points = np.zeros((pattern[1]*pattern[0], 3), np.float32)
|
||||
# 注意:这里为了让标定板z轴朝上,设定了短边是x,长边是y
|
||||
object_points[:,:2] = np.mgrid[0:pattern[0], 0:pattern[1]].T.reshape(-1,2)
|
||||
object_points[:, [0, 1]] = object_points[:, [1, 0]]
|
||||
object_points = object_points * gridSize
|
||||
if axis == 'zx':
|
||||
object_points = object_points[:, [1, 2, 0]]
|
||||
return object_points
|
||||
|
||||
colors_chessboard_bar = [
|
||||
@ -32,10 +35,10 @@ def get_lines_chessboard(pattern=(9, 6)):
|
||||
lines_cols = []
|
||||
for i in range(w*h-1):
|
||||
lines.append([i, i+1])
|
||||
lines_cols.append(colors_chessboard_bar[i//w])
|
||||
lines_cols.append(colors_chessboard_bar[(i//w)%len(colors_chessboard_bar)])
|
||||
return lines, lines_cols
|
||||
|
||||
def _findChessboardCorners(img, pattern):
|
||||
def _findChessboardCorners(img, pattern, debug):
|
||||
"basic function"
|
||||
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
|
||||
retval, corners = cv2.findChessboardCorners(img, pattern,
|
||||
@ -46,13 +49,14 @@ def _findChessboardCorners(img, pattern):
|
||||
corners = corners.squeeze()
|
||||
return True, corners
|
||||
|
||||
def _findChessboardCornersAdapt(img, pattern):
|
||||
def _findChessboardCornersAdapt(img, pattern, debug):
|
||||
"Adapt mode"
|
||||
img = cv2.adaptiveThreshold(img, 255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
|
||||
cv2.THRESH_BINARY,21, 2)
|
||||
return _findChessboardCorners(img, pattern)
|
||||
cv2.THRESH_BINARY, 21, 2)
|
||||
return _findChessboardCorners(img, pattern, debug)
|
||||
|
||||
def findChessboardCorners(img, annots, pattern):
|
||||
@func_set_timeout(5)
|
||||
def findChessboardCorners(img, annots, pattern, debug=False):
|
||||
conf = sum([v[2] for v in annots['keypoints2d']])
|
||||
if annots['visited'] and conf > 0:
|
||||
return True
|
||||
@ -61,8 +65,8 @@ def findChessboardCorners(img, annots, pattern):
|
||||
annots['visited'] = True
|
||||
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
|
||||
# Find the chess board corners
|
||||
for func in [_findChessboardCornersAdapt, _findChessboardCorners]:
|
||||
ret, corners = func(gray, pattern)
|
||||
for func in [_findChessboardCorners, _findChessboardCornersAdapt]:
|
||||
ret, corners = func(gray, pattern, debug)
|
||||
if ret:break
|
||||
else:
|
||||
return None
|
||||
@ -72,4 +76,66 @@ def findChessboardCorners(img, annots, pattern):
|
||||
assert corners.shape[0] == len(annots['keypoints2d'])
|
||||
corners = np.hstack((corners, np.ones((corners.shape[0], 1))))
|
||||
annots['keypoints2d'] = corners.tolist()
|
||||
return show
|
||||
return show
|
||||
|
||||
def create_chessboard(path, keypoints3d, out='annots'):
|
||||
from tqdm import tqdm
|
||||
from os.path import join
|
||||
from .file_utils import getFileList, save_json, read_json
|
||||
import os
|
||||
keypoints2d = np.zeros((keypoints3d.shape[0], 3))
|
||||
imgnames = getFileList(join(path, 'images'), ext='.jpg', max=1)
|
||||
imgnames = [join('images', i) for i in imgnames]
|
||||
template = {
|
||||
'keypoints3d': keypoints3d.tolist(),
|
||||
'keypoints2d': keypoints2d.tolist(),
|
||||
'visited': False
|
||||
}
|
||||
for imgname in tqdm(imgnames, desc='create template chessboard'):
|
||||
annname = imgname.replace('images', out).replace('.jpg', '.json')
|
||||
annname = join(path, annname)
|
||||
if not os.path.exists(annname):
|
||||
save_json(annname, template)
|
||||
elif True:
|
||||
annots = read_json(annname)
|
||||
annots['keypoints3d'] = template['keypoints3d']
|
||||
save_json(annname, annots)
|
||||
|
||||
ARUCO_DICT = {
|
||||
"4X4_50": cv2.aruco.DICT_4X4_50,
|
||||
"4X4_100": cv2.aruco.DICT_4X4_100,
|
||||
"5X5_100": cv2.aruco.DICT_5X5_100,
|
||||
"5X5_250": cv2.aruco.DICT_5X5_250,
|
||||
}
|
||||
|
||||
def detect_charuco(image, aruco_type, long, short, squareLength, aruco_len):
|
||||
# 创建ChArUco标定板
|
||||
dictionary = cv2.aruco.getPredefinedDictionary(dict=ARUCO_DICT[aruco_type])
|
||||
board = cv2.aruco.CharucoBoard_create(
|
||||
squaresY=long,
|
||||
squaresX=short,
|
||||
squareLength=squareLength,
|
||||
markerLength=aruco_len,
|
||||
dictionary=dictionary,
|
||||
)
|
||||
corners = board.chessboardCorners
|
||||
# ATTN: exchange the XY
|
||||
corners3d = corners[:, [1, 0, 2]]
|
||||
keypoints2d = np.zeros_like(corners3d)
|
||||
# 查找标志块的左上角点
|
||||
corners, ids, _ = cv2.aruco.detectMarkers(
|
||||
image=image, dictionary=dictionary, parameters=None
|
||||
)
|
||||
# 棋盘格黑白块内角点
|
||||
if ids is not None:
|
||||
retval, charucoCorners, charucoIds = cv2.aruco.interpolateCornersCharuco(
|
||||
markerCorners=corners, markerIds=ids, image=image, board=board
|
||||
)
|
||||
if retval:
|
||||
ids = charucoIds[:, 0]
|
||||
pts = charucoCorners[:, 0]
|
||||
keypoints2d[ids, :2] = pts
|
||||
keypoints2d[ids, 2] = 1.
|
||||
else:
|
||||
retval = False
|
||||
return retval, keypoints2d, corners3d
|
BIN
logo.png
BIN
logo.png
Binary file not shown.
Before Width: | Height: | Size: 46 KiB After Width: | Height: | Size: 144 KiB |
@ -1,9 +1,10 @@
|
||||
torch==1.4.0
|
||||
torchvision==0.5.0
|
||||
ipdb
|
||||
joblib
|
||||
tqdm
|
||||
opencv-python
|
||||
pyrender
|
||||
yacs
|
||||
tabulate
|
||||
termcolor
|
||||
chumpy
|
||||
mediapipe
|
||||
func_timeout
|
Loading…
Reference in New Issue
Block a user