[calib] add read colmap
This commit is contained in:
parent
530dde403c
commit
2e83642c7c
232
apps/calibration/align_colmap_ground.py
Normal file
232
apps/calibration/align_colmap_ground.py
Normal file
@ -0,0 +1,232 @@
|
||||
# 这个脚本用于对colmap的相机标定结果,寻找地面与场景中心
|
||||
# 方法:
|
||||
# 1. 使用棋盘格
|
||||
# 2. 估计点云里的地面
|
||||
import os
|
||||
from os.path import join
|
||||
from easymocap.annotator.file_utils import save_json
|
||||
from easymocap.mytools.debug_utils import myerror, run_cmd, mywarn, log
|
||||
from easymocap.mytools.camera_utils import read_cameras, write_camera
|
||||
from easymocap.mytools import read_json
|
||||
from easymocap.mytools import batch_triangulate, projectN3, Undistort
|
||||
import numpy as np
|
||||
import cv2
|
||||
from apps.calibration.calib_extri import solvePnP
|
||||
|
||||
def guess_ground(pcdname):
|
||||
pcd = o3d.io.read_point_cloud(pcdname)
|
||||
|
||||
def compute_rel(R_src, T_src, R_tgt, T_tgt):
|
||||
R_rel = R_src.T @ R_tgt
|
||||
T_rel = R_src.T @ (T_tgt - T_src)
|
||||
return R_rel, T_rel
|
||||
|
||||
def triangulate(cameras, areas):
|
||||
Ps, k2ds = [], []
|
||||
for cam, _, k2d, k3d in areas:
|
||||
k2d = Undistort.points(k2d, cameras[cam]['K'], cameras[cam]['dist'])
|
||||
P = cameras[cam]['K'] @ np.hstack([cameras[cam]['R'], cameras[cam]['T']])
|
||||
Ps.append(P)
|
||||
k2ds.append(k2d)
|
||||
Ps = np.stack(Ps)
|
||||
k2ds = np.stack(k2ds)
|
||||
k3d = batch_triangulate(k2ds, Ps)
|
||||
return k3d
|
||||
|
||||
def best_fit_transform(A, B):
|
||||
'''
|
||||
Calculates the least-squares best-fit transform that maps corresponding points A to B in m spatial dimensions
|
||||
Input:
|
||||
A: Nxm numpy array of corresponding points
|
||||
B: Nxm numpy array of corresponding points
|
||||
Returns:
|
||||
T: (m+1)x(m+1) homogeneous transformation matrix that maps A on to B
|
||||
R: mxm rotation matrix
|
||||
t: mx1 translation vector
|
||||
'''
|
||||
|
||||
assert A.shape == B.shape
|
||||
|
||||
# get number of dimensions
|
||||
m = A.shape[1]
|
||||
|
||||
# translate points to their centroids
|
||||
centroid_A = np.mean(A, axis=0)
|
||||
centroid_B = np.mean(B, axis=0)
|
||||
AA = A - centroid_A
|
||||
BB = B - centroid_B
|
||||
|
||||
# rotation matrix
|
||||
H = np.dot(AA.T, BB)
|
||||
U, S, Vt = np.linalg.svd(H)
|
||||
R = np.dot(Vt.T, U.T)
|
||||
|
||||
# special reflection case
|
||||
if np.linalg.det(R) < 0:
|
||||
Vt[m-1,:] *= -1
|
||||
R = np.dot(Vt.T, U.T)
|
||||
|
||||
# translation
|
||||
t = centroid_B.T - np.dot(R,centroid_A.T)
|
||||
|
||||
return R, t
|
||||
|
||||
def align_by_chessboard(cameras, path):
|
||||
camnames = sorted(os.listdir(join(path, 'chessboard')))
|
||||
areas = []
|
||||
for ic, cam in enumerate(camnames):
|
||||
imagename = join(path, 'images', cam, '000000.jpg')
|
||||
chessname = join(path, 'chessboard', cam, '000000.json')
|
||||
data = read_json(chessname)
|
||||
k3d = np.array(data['keypoints3d'], dtype=np.float32)
|
||||
k2d = np.array(data['keypoints2d'], dtype=np.float32)
|
||||
# TODO
|
||||
# pattern = (11, 8)
|
||||
if 'pattern' in data.keys():
|
||||
pattern = data['pattern']
|
||||
else:
|
||||
pattern = None
|
||||
img = cv2.imread(imagename)
|
||||
if args.scale2d is not None:
|
||||
k2d[:, :2] *= args.scale2d
|
||||
img = cv2.resize(img, None, fx=args.scale2d, fy=args.scale2d)
|
||||
if args.origin is not None:
|
||||
cameras[args.prefix+cam] = cameras.pop(args.origin+cam.replace('VID_', '0000'))
|
||||
cam = args.prefix + cam
|
||||
if cam not in cameras.keys():
|
||||
myerror('camera {} not found in {}'.format(cam, cameras.keys()))
|
||||
continue
|
||||
cameras[cam]['shape'] = img.shape[:2]
|
||||
if k2d[:, -1].sum() < 1:
|
||||
continue
|
||||
# calculate the area of the chessboard
|
||||
mask = np.zeros_like(img[:, :, 0])
|
||||
k2d_int = np.round(k2d[:, :2]).astype(int)
|
||||
if pattern is not None:
|
||||
cv2.fillPoly(mask, [k2d_int[[0, pattern[0]-1, -1, -pattern[0]]]], 1)
|
||||
else:
|
||||
cv2.fillPoly(mask, [k2d_int[[0, 1, 2, 3, 0]]], 1)
|
||||
area = mask.sum()
|
||||
print(cam, area)
|
||||
areas.append([cam, area, k2d, k3d])
|
||||
areas.sort(key=lambda x: -x[1])
|
||||
best_cam, area, k2d, k3d = areas[0]
|
||||
# 先解决尺度问题
|
||||
ref_point_id = np.linalg.norm(k3d - k3d[:1], axis=-1).argmax()
|
||||
k3d_pre = triangulate(cameras, areas)
|
||||
length_gt = np.linalg.norm(k3d[0, :3] - k3d[ref_point_id, :3])
|
||||
length = np.linalg.norm(k3d_pre[0, :3] - k3d_pre[ref_point_id, :3])
|
||||
log('gt diag={:.3f}, est diag={:.3f}, scale={:.3f}'.format(length_gt, length, length_gt/length))
|
||||
scale_colmap = length_gt / length
|
||||
for cam, camera in cameras.items():
|
||||
camera['T'] *= scale_colmap
|
||||
k3d_pre = triangulate(cameras, areas)
|
||||
length = np.linalg.norm(k3d_pre[0, :3] - k3d_pre[-1, :3])
|
||||
log('gt diag={:.3f}, est diag={:.3f}, scale={:.3f}'.format(length_gt, length, length_gt/length))
|
||||
# 计算相机相对于棋盘格的RT
|
||||
if False:
|
||||
for cam, _, k2d, k3d in areas:
|
||||
K, dist = cameras[cam]['K'], cameras[cam]['dist']
|
||||
R, T = cameras[cam]['R'], cameras[cam]['T']
|
||||
err, rvec, tvec, kpts_repro = solvePnP(k3d, k2d, K, dist, flag=cv2.SOLVEPNP_ITERATIVE)
|
||||
# 不同视角的计算的相对变换应该是一致的
|
||||
R_tgt = cv2.Rodrigues(rvec)[0]
|
||||
T_tgt = tvec.reshape(3, 1)
|
||||
R_rel, T_rel = compute_rel(R, T, R_tgt, T_tgt)
|
||||
break
|
||||
else:
|
||||
# 使用估计的棋盘格坐标与实际的棋盘格坐标
|
||||
X = k3d_pre[:, :3]
|
||||
X_gt = k3d[:, :3]
|
||||
R_rel, T_rel = best_fit_transform(X_gt, X)
|
||||
# 从棋盘格坐标系映射到colmap坐标系
|
||||
T_rel = T_rel.reshape(3, 1)
|
||||
centers = []
|
||||
for cam, camera in cameras.items():
|
||||
camera.pop('Rvec')
|
||||
R_old, T_old = camera['R'], camera['T']
|
||||
R_new = R_old @ R_rel
|
||||
T_new = T_old + R_old @ T_rel
|
||||
camera['R'] = R_new
|
||||
camera['T'] = T_new
|
||||
center = - camera['R'].T @ camera['T']
|
||||
centers.append(center)
|
||||
print('{}: ({:6.3f}, {:.3f}, {:.3f})'.format(cam, *np.round(center.T[0], 3)))
|
||||
# 使用棋盘格估计一下尺度
|
||||
k3d_pre = triangulate(cameras, areas)
|
||||
length = np.linalg.norm(k3d_pre[0, :3] - k3d_pre[ref_point_id, :3])
|
||||
log('{} {} {}'.format(length_gt, length, length_gt/length))
|
||||
log(k3d_pre)
|
||||
transform = np.eye(4)
|
||||
transform[:3, :3] = R_rel
|
||||
transform[:3, 3:] = T_rel
|
||||
return cameras, scale_colmap, np.linalg.inv(transform)
|
||||
|
||||
# for 3D points X,
|
||||
# in origin world: \Pi(RX + T) = x
|
||||
# in new world: \Pi(R'Y+T') = x
|
||||
# , where X = R_pY + T_p
|
||||
|
||||
if __name__ == '__main__':
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('path', type=str)
|
||||
parser.add_argument('out', type=str)
|
||||
parser.add_argument('--plane_by_chessboard', type=str, default=None)
|
||||
parser.add_argument('--plane_by_point', type=str, default=None)
|
||||
parser.add_argument('--num', type=int, default=1)
|
||||
parser.add_argument('--scale2d', type=float, default=None)
|
||||
parser.add_argument('--prefix', type=str, default='')
|
||||
parser.add_argument('--origin', type=str, default=None)
|
||||
parser.add_argument('--guess_plane', action='store_true')
|
||||
parser.add_argument('--noshow', action='store_true')
|
||||
parser.add_argument('--debug', action='store_true')
|
||||
args = parser.parse_args()
|
||||
|
||||
if not os.path.exists(join(args.path, 'intri.yml')):
|
||||
assert os.path.exists(join(args.path, 'cameras.bin')), os.listdir(args.path)
|
||||
cmd = f'python3 apps/calibration/read_colmap.py {args.path} .bin'
|
||||
run_cmd(cmd)
|
||||
|
||||
cameras = read_cameras(args.path)
|
||||
if args.plane_by_point is not None:
|
||||
# 读入点云
|
||||
import ipdb; ipdb.set_trace()
|
||||
if args.plane_by_chessboard is not None:
|
||||
cameras, scale, transform = align_by_chessboard(cameras, args.plane_by_chessboard)
|
||||
if not args.noshow:
|
||||
import open3d as o3d
|
||||
sparse_name = join(args.path, 'sparse.ply')
|
||||
dense_name = join(args.path, '..', '..', 'dense', 'fused.ply')
|
||||
if os.path.exists(dense_name):
|
||||
pcd = o3d.io.read_point_cloud(dense_name)
|
||||
else:
|
||||
pcd = o3d.io.read_point_cloud(sparse_name)
|
||||
save_json(join(args.out, 'transform.json'), {'scale': scale, 'transform': transform.tolist()})
|
||||
points = np.asarray(pcd.points)
|
||||
# TODO: read correspondence of points3D and points2D
|
||||
points_new = (scale*points) @ transform[:3, :3].T + transform[:3, 3:].T
|
||||
pcd.points = o3d.utility.Vector3dVector(points_new)
|
||||
o3d.io.write_point_cloud(join(args.out, 'sparse_aligned.ply'), pcd)
|
||||
grids = []
|
||||
center = o3d.geometry.TriangleMesh.create_coordinate_frame(
|
||||
size=1, origin=[0, 0, 0])
|
||||
grids.append(center)
|
||||
for cam, camera in cameras.items():
|
||||
center = - camera['R'].T @ camera['T']
|
||||
center = o3d.geometry.TriangleMesh.create_coordinate_frame(
|
||||
size=0.5, origin=[center[0, 0], center[1, 0], center[2, 0]])
|
||||
if cam.startswith(args.prefix):
|
||||
center.paint_uniform_color([1, 0, 1])
|
||||
center.rotate(camera['R'].T)
|
||||
grids.append(center)
|
||||
o3d.visualization.draw_geometries([pcd] + grids)
|
||||
write_camera(cameras, args.out)
|
||||
if args.prefix is not None:
|
||||
cameras_ = {}
|
||||
for key, camera in cameras.items():
|
||||
if args.prefix not in key:
|
||||
continue
|
||||
cameras_[key.replace(args.prefix, '')] = camera
|
||||
os.makedirs(join(args.out, args.prefix), exist_ok=True)
|
||||
write_camera(cameras_, join(args.out, args.prefix))
|
532
apps/calibration/read_colmap.py
Normal file
532
apps/calibration/read_colmap.py
Normal file
@ -0,0 +1,532 @@
|
||||
# Copyright (c) 2018, ETH Zurich and UNC Chapel Hill.
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
#
|
||||
# * Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution.
|
||||
#
|
||||
# * Neither the name of ETH Zurich and UNC Chapel Hill nor the names of
|
||||
# its contributors may be used to endorse or promote products derived
|
||||
# from this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
|
||||
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
# POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
# Author: Johannes L. Schoenberger (jsch-at-demuc-dot-de)
|
||||
|
||||
import os
|
||||
import sys
|
||||
import collections
|
||||
import numpy as np
|
||||
import struct
|
||||
import cv2
|
||||
|
||||
CameraModel = collections.namedtuple(
|
||||
"CameraModel", ["model_id", "model_name", "num_params"])
|
||||
Camera = collections.namedtuple(
|
||||
"Camera", ["id", "model", "width", "height", "params"])
|
||||
BaseImage = collections.namedtuple(
|
||||
"Image", ["id", "qvec", "tvec", "camera_id", "name", "xys", "point3D_ids"])
|
||||
Point3D = collections.namedtuple(
|
||||
"Point3D", ["id", "xyz", "rgb", "error", "image_ids", "point2D_idxs"])
|
||||
|
||||
class Image(BaseImage):
|
||||
def qvec2rotmat(self):
|
||||
return qvec2rotmat(self.qvec)
|
||||
|
||||
|
||||
CAMERA_MODELS = {
|
||||
CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3),
|
||||
CameraModel(model_id=1, model_name="PINHOLE", num_params=4),
|
||||
CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4),
|
||||
CameraModel(model_id=3, model_name="RADIAL", num_params=5),
|
||||
CameraModel(model_id=4, model_name="OPENCV", num_params=8),
|
||||
CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8),
|
||||
CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12),
|
||||
CameraModel(model_id=7, model_name="FOV", num_params=5),
|
||||
CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4),
|
||||
CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5),
|
||||
CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12)
|
||||
}
|
||||
CAMERA_MODEL_IDS = dict([(camera_model.model_id, camera_model) \
|
||||
for camera_model in CAMERA_MODELS])
|
||||
CAMERA_MODEL_NAMES = dict([(camera_model.model_name, camera_model)
|
||||
for camera_model in CAMERA_MODELS])
|
||||
|
||||
def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"):
|
||||
"""Read and unpack the next bytes from a binary file.
|
||||
:param fid:
|
||||
:param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc.
|
||||
:param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}.
|
||||
:param endian_character: Any of {@, =, <, >, !}
|
||||
:return: Tuple of read and unpacked values.
|
||||
"""
|
||||
data = fid.read(num_bytes)
|
||||
return struct.unpack(endian_character + format_char_sequence, data)
|
||||
|
||||
|
||||
def read_cameras_text(path):
|
||||
"""
|
||||
see: src/base/reconstruction.cc
|
||||
void Reconstruction::WriteCamerasText(const std::string& path)
|
||||
void Reconstruction::ReadCamerasText(const std::string& path)
|
||||
"""
|
||||
cameras = {}
|
||||
with open(path, "r") as fid:
|
||||
while True:
|
||||
line = fid.readline()
|
||||
if not line:
|
||||
break
|
||||
line = line.strip()
|
||||
if len(line) > 0 and line[0] != "#":
|
||||
elems = line.split()
|
||||
camera_id = int(elems[0])
|
||||
model = elems[1]
|
||||
width = int(elems[2])
|
||||
height = int(elems[3])
|
||||
params = np.array(tuple(map(float, elems[4:])))
|
||||
cameras[camera_id] = Camera(id=camera_id, model=model,
|
||||
width=width, height=height,
|
||||
params=params)
|
||||
return cameras
|
||||
|
||||
|
||||
def read_cameras_binary(path_to_model_file):
|
||||
"""
|
||||
see: src/base/reconstruction.cc
|
||||
void Reconstruction::WriteCamerasBinary(const std::string& path)
|
||||
void Reconstruction::ReadCamerasBinary(const std::string& path)
|
||||
"""
|
||||
cameras = {}
|
||||
with open(path_to_model_file, "rb") as fid:
|
||||
num_cameras = read_next_bytes(fid, 8, "Q")[0]
|
||||
for camera_line_index in range(num_cameras):
|
||||
camera_properties = read_next_bytes(
|
||||
fid, num_bytes=24, format_char_sequence="iiQQ")
|
||||
camera_id = camera_properties[0]
|
||||
model_id = camera_properties[1]
|
||||
model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name
|
||||
width = camera_properties[2]
|
||||
height = camera_properties[3]
|
||||
num_params = CAMERA_MODEL_IDS[model_id].num_params
|
||||
params = read_next_bytes(fid, num_bytes=8*num_params,
|
||||
format_char_sequence="d"*num_params)
|
||||
cameras[camera_id] = Camera(id=camera_id,
|
||||
model=model_name,
|
||||
width=width,
|
||||
height=height,
|
||||
params=np.array(params))
|
||||
assert len(cameras) == num_cameras
|
||||
return cameras
|
||||
|
||||
|
||||
def read_images_text(path):
|
||||
"""
|
||||
see: src/base/reconstruction.cc
|
||||
void Reconstruction::ReadImagesText(const std::string& path)
|
||||
void Reconstruction::WriteImagesText(const std::string& path)
|
||||
"""
|
||||
images = {}
|
||||
with open(path, "r") as fid:
|
||||
while True:
|
||||
line = fid.readline()
|
||||
if not line:
|
||||
break
|
||||
line = line.strip()
|
||||
if len(line) > 0 and line[0] != "#":
|
||||
elems = line.split()
|
||||
image_id = int(elems[0])
|
||||
qvec = np.array(tuple(map(float, elems[1:5])))
|
||||
tvec = np.array(tuple(map(float, elems[5:8])))
|
||||
camera_id = int(elems[8])
|
||||
image_name = elems[9]
|
||||
elems = fid.readline().split()
|
||||
xys = np.column_stack([tuple(map(float, elems[0::3])),
|
||||
tuple(map(float, elems[1::3]))])
|
||||
point3D_ids = np.array(tuple(map(int, elems[2::3])))
|
||||
images[image_id] = Image(
|
||||
id=image_id, qvec=qvec, tvec=tvec,
|
||||
camera_id=camera_id, name=image_name,
|
||||
xys=xys, point3D_ids=point3D_ids)
|
||||
return images
|
||||
|
||||
|
||||
def read_images_binary(path_to_model_file):
|
||||
"""
|
||||
see: src/base/reconstruction.cc
|
||||
void Reconstruction::ReadImagesBinary(const std::string& path)
|
||||
void Reconstruction::WriteImagesBinary(const std::string& path)
|
||||
"""
|
||||
images = {}
|
||||
with open(path_to_model_file, "rb") as fid:
|
||||
num_reg_images = read_next_bytes(fid, 8, "Q")[0]
|
||||
for image_index in range(num_reg_images):
|
||||
binary_image_properties = read_next_bytes(
|
||||
fid, num_bytes=64, format_char_sequence="idddddddi")
|
||||
image_id = binary_image_properties[0]
|
||||
qvec = np.array(binary_image_properties[1:5])
|
||||
tvec = np.array(binary_image_properties[5:8])
|
||||
camera_id = binary_image_properties[8]
|
||||
image_name = ""
|
||||
current_char = read_next_bytes(fid, 1, "c")[0]
|
||||
while current_char != b"\x00": # look for the ASCII 0 entry
|
||||
image_name += current_char.decode("utf-8")
|
||||
current_char = read_next_bytes(fid, 1, "c")[0]
|
||||
num_points2D = read_next_bytes(fid, num_bytes=8,
|
||||
format_char_sequence="Q")[0]
|
||||
x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,
|
||||
format_char_sequence="ddq"*num_points2D)
|
||||
xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),
|
||||
tuple(map(float, x_y_id_s[1::3]))])
|
||||
point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))
|
||||
images[image_id] = Image(
|
||||
id=image_id, qvec=qvec, tvec=tvec,
|
||||
camera_id=camera_id, name=image_name,
|
||||
xys=xys, point3D_ids=point3D_ids)
|
||||
return images
|
||||
|
||||
|
||||
def read_points3D_text(path):
|
||||
"""
|
||||
see: src/base/reconstruction.cc
|
||||
void Reconstruction::ReadPoints3DText(const std::string& path)
|
||||
void Reconstruction::WritePoints3DText(const std::string& path)
|
||||
"""
|
||||
points3D = {}
|
||||
with open(path, "r") as fid:
|
||||
while True:
|
||||
line = fid.readline()
|
||||
if not line:
|
||||
break
|
||||
line = line.strip()
|
||||
if len(line) > 0 and line[0] != "#":
|
||||
elems = line.split()
|
||||
point3D_id = int(elems[0])
|
||||
xyz = np.array(tuple(map(float, elems[1:4])))
|
||||
rgb = np.array(tuple(map(int, elems[4:7])))
|
||||
error = float(elems[7])
|
||||
image_ids = np.array(tuple(map(int, elems[8::2])))
|
||||
point2D_idxs = np.array(tuple(map(int, elems[9::2])))
|
||||
points3D[point3D_id] = Point3D(id=point3D_id, xyz=xyz, rgb=rgb,
|
||||
error=error, image_ids=image_ids,
|
||||
point2D_idxs=point2D_idxs)
|
||||
return points3D
|
||||
|
||||
|
||||
def read_points3d_binary(path_to_model_file):
|
||||
"""
|
||||
see: src/base/reconstruction.cc
|
||||
void Reconstruction::ReadPoints3DBinary(const std::string& path)
|
||||
void Reconstruction::WritePoints3DBinary(const std::string& path)
|
||||
"""
|
||||
points3D = {}
|
||||
with open(path_to_model_file, "rb") as fid:
|
||||
num_points = read_next_bytes(fid, 8, "Q")[0]
|
||||
for point_line_index in range(num_points):
|
||||
binary_point_line_properties = read_next_bytes(
|
||||
fid, num_bytes=43, format_char_sequence="QdddBBBd")
|
||||
point3D_id = binary_point_line_properties[0]
|
||||
xyz = np.array(binary_point_line_properties[1:4])
|
||||
rgb = np.array(binary_point_line_properties[4:7])
|
||||
error = np.array(binary_point_line_properties[7])
|
||||
track_length = read_next_bytes(
|
||||
fid, num_bytes=8, format_char_sequence="Q")[0]
|
||||
track_elems = read_next_bytes(
|
||||
fid, num_bytes=8*track_length,
|
||||
format_char_sequence="ii"*track_length)
|
||||
image_ids = np.array(tuple(map(int, track_elems[0::2])))
|
||||
point2D_idxs = np.array(tuple(map(int, track_elems[1::2])))
|
||||
points3D[point3D_id] = Point3D(
|
||||
id=point3D_id, xyz=xyz, rgb=rgb,
|
||||
error=error, image_ids=image_ids,
|
||||
point2D_idxs=point2D_idxs)
|
||||
return points3D
|
||||
|
||||
|
||||
def read_model(path, ext):
|
||||
if ext == ".txt":
|
||||
cameras = read_cameras_text(os.path.join(path, "cameras" + ext))
|
||||
images = read_images_text(os.path.join(path, "images" + ext))
|
||||
points3D = read_points3D_text(os.path.join(path, "points3D") + ext)
|
||||
else:
|
||||
cameras = read_cameras_binary(os.path.join(path, "cameras" + ext))
|
||||
images = read_images_binary(os.path.join(path, "images" + ext))
|
||||
points3D = read_points3d_binary(os.path.join(path, "points3D") + ext)
|
||||
return cameras, images, points3D
|
||||
|
||||
|
||||
def qvec2rotmat(qvec):
|
||||
return np.array([
|
||||
[1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,
|
||||
2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],
|
||||
2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],
|
||||
[2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],
|
||||
1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,
|
||||
2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],
|
||||
[2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],
|
||||
2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],
|
||||
1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]])
|
||||
|
||||
|
||||
def rotmat2qvec(R):
|
||||
Rxx, Ryx, Rzx, Rxy, Ryy, Rzy, Rxz, Ryz, Rzz = R.flat
|
||||
K = np.array([
|
||||
[Rxx - Ryy - Rzz, 0, 0, 0],
|
||||
[Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0],
|
||||
[Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0],
|
||||
[Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz]]) / 3.0
|
||||
eigvals, eigvecs = np.linalg.eigh(K)
|
||||
qvec = eigvecs[[3, 0, 1, 2], np.argmax(eigvals)]
|
||||
if qvec[0] < 0:
|
||||
qvec *= -1
|
||||
return qvec
|
||||
|
||||
|
||||
def write_cameras_text(cameras, path):
|
||||
"""
|
||||
see: src/base/reconstruction.cc
|
||||
void Reconstruction::WriteCamerasText(const std::string& path)
|
||||
void Reconstruction::ReadCamerasText(const std::string& path)
|
||||
"""
|
||||
HEADER = '# Camera list with one line of data per camera:\n'
|
||||
'# CAMERA_ID, MODEL, WIDTH, HEIGHT, PARAMS[]\n'
|
||||
'# Number of cameras: {}\n'.format(len(cameras))
|
||||
with open(path, "w") as fid:
|
||||
fid.write(HEADER)
|
||||
for _, cam in cameras.items():
|
||||
to_write = [cam.id, cam.model, cam.width, cam.height, *cam.params]
|
||||
line = " ".join([str(elem) for elem in to_write])
|
||||
fid.write(line + "\n")
|
||||
|
||||
def write_next_bytes(fid, data, format_char_sequence, endian_character="<"):
|
||||
"""pack and write to a binary file.
|
||||
:param fid:
|
||||
:param data: data to send, if multiple elements are sent at the same time,
|
||||
they should be encapsuled either in a list or a tuple
|
||||
:param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}.
|
||||
should be the same length as the data list or tuple
|
||||
:param endian_character: Any of {@, =, <, >, !}
|
||||
"""
|
||||
if isinstance(data, (list, tuple)):
|
||||
bytes = struct.pack(endian_character + format_char_sequence, *data)
|
||||
else:
|
||||
bytes = struct.pack(endian_character + format_char_sequence, data)
|
||||
fid.write(bytes)
|
||||
|
||||
def write_cameras_binary(cameras, path_to_model_file):
|
||||
"""
|
||||
see: src/base/reconstruction.cc
|
||||
void Reconstruction::WriteCamerasBinary(const std::string& path)
|
||||
void Reconstruction::ReadCamerasBinary(const std::string& path)
|
||||
"""
|
||||
with open(path_to_model_file, "wb") as fid:
|
||||
write_next_bytes(fid, len(cameras), "Q")
|
||||
for _, cam in cameras.items():
|
||||
model_id = CAMERA_MODEL_NAMES[cam.model].model_id
|
||||
camera_properties = [cam.id,
|
||||
model_id,
|
||||
cam.width,
|
||||
cam.height]
|
||||
write_next_bytes(fid, camera_properties, "iiQQ")
|
||||
for p in cam.params:
|
||||
write_next_bytes(fid, float(p), "d")
|
||||
return cameras
|
||||
|
||||
|
||||
def write_images_binary(images, path_to_model_file):
|
||||
"""
|
||||
see: src/base/reconstruction.cc
|
||||
void Reconstruction::ReadImagesBinary(const std::string& path)
|
||||
void Reconstruction::WriteImagesBinary(const std::string& path)
|
||||
"""
|
||||
with open(path_to_model_file, "wb") as fid:
|
||||
write_next_bytes(fid, len(images), "Q")
|
||||
for _, img in images.items():
|
||||
write_next_bytes(fid, img.id, "i")
|
||||
write_next_bytes(fid, img.qvec.tolist(), "dddd")
|
||||
write_next_bytes(fid, img.tvec.tolist(), "ddd")
|
||||
write_next_bytes(fid, img.camera_id, "i")
|
||||
for char in img.name:
|
||||
write_next_bytes(fid, char.encode("utf-8"), "c")
|
||||
write_next_bytes(fid, b"\x00", "c")
|
||||
write_next_bytes(fid, len(img.point3D_ids), "Q")
|
||||
for xy, p3d_id in zip(img.xys, img.point3D_ids):
|
||||
write_next_bytes(fid, [*xy, p3d_id], "ddq")
|
||||
|
||||
def write_images_text(images, path):
|
||||
"""
|
||||
see: src/base/reconstruction.cc
|
||||
void Reconstruction::ReadImagesText(const std::string& path)
|
||||
void Reconstruction::WriteImagesText(const std::string& path)
|
||||
"""
|
||||
if len(images) == 0:
|
||||
mean_observations = 0
|
||||
else:
|
||||
mean_observations = sum((len(img.point3D_ids) for _, img in images.items()))/len(images)
|
||||
HEADER = '# Image list with two lines of data per image:\n'
|
||||
'# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME\n'
|
||||
'# POINTS2D[] as (X, Y, POINT3D_ID)\n'
|
||||
'# Number of images: {}, mean observations per image: {}\n'.format(len(images), mean_observations)
|
||||
|
||||
with open(path, "w") as fid:
|
||||
fid.write(HEADER)
|
||||
for _, img in images.items():
|
||||
image_header = [img.id, *img.qvec, *img.tvec, img.camera_id, img.name]
|
||||
first_line = " ".join(map(str, image_header))
|
||||
fid.write(first_line + "\n")
|
||||
|
||||
points_strings = []
|
||||
for xy, point3D_id in zip(img.xys, img.point3D_ids):
|
||||
points_strings.append(" ".join(map(str, [*xy, point3D_id])))
|
||||
fid.write(" ".join(points_strings) + "\n")
|
||||
|
||||
def write_points3D_text(points3D, path):
|
||||
"""
|
||||
see: src/base/reconstruction.cc
|
||||
void Reconstruction::ReadPoints3DText(const std::string& path)
|
||||
void Reconstruction::WritePoints3DText(const std::string& path)
|
||||
"""
|
||||
if len(points3D) == 0:
|
||||
mean_track_length = 0
|
||||
else:
|
||||
mean_track_length = sum((len(pt.image_ids) for _, pt in points3D.items()))/len(points3D)
|
||||
HEADER = '# 3D point list with one line of data per point:\n'
|
||||
'# POINT3D_ID, X, Y, Z, R, G, B, ERROR, TRACK[] as (IMAGE_ID, POINT2D_IDX)\n'
|
||||
'# Number of points: {}, mean track length: {}\n'.format(len(points3D), mean_track_length)
|
||||
|
||||
with open(path, "w") as fid:
|
||||
fid.write(HEADER)
|
||||
for _, pt in points3D.items():
|
||||
point_header = [pt.id, *pt.xyz, *pt.rgb, pt.error]
|
||||
fid.write(" ".join(map(str, point_header)) + " ")
|
||||
track_strings = []
|
||||
for image_id, point2D in zip(pt.image_ids, pt.point2D_idxs):
|
||||
track_strings.append(" ".join(map(str, [image_id, point2D])))
|
||||
fid.write(" ".join(track_strings) + "\n")
|
||||
|
||||
|
||||
def write_points3d_binary(points3D, path_to_model_file):
|
||||
"""
|
||||
see: src/base/reconstruction.cc
|
||||
void Reconstruction::ReadPoints3DBinary(const std::string& path)
|
||||
void Reconstruction::WritePoints3DBinary(const std::string& path)
|
||||
"""
|
||||
with open(path_to_model_file, "wb") as fid:
|
||||
write_next_bytes(fid, len(points3D), "Q")
|
||||
for _, pt in points3D.items():
|
||||
write_next_bytes(fid, pt.id, "Q")
|
||||
write_next_bytes(fid, pt.xyz.tolist(), "ddd")
|
||||
write_next_bytes(fid, pt.rgb.tolist(), "BBB")
|
||||
write_next_bytes(fid, pt.error, "d")
|
||||
track_length = pt.image_ids.shape[0]
|
||||
write_next_bytes(fid, track_length, "Q")
|
||||
for image_id, point2D_id in zip(pt.image_ids, pt.point2D_idxs):
|
||||
write_next_bytes(fid, [image_id, point2D_id], "ii")
|
||||
|
||||
|
||||
class FileStorage(object):
|
||||
def __init__(self, filename, isWrite=False):
|
||||
version = cv2.__version__
|
||||
self.version = int(version.split('.')[0])
|
||||
if isWrite:
|
||||
self.fs = cv2.FileStorage(filename, cv2.FILE_STORAGE_WRITE)
|
||||
else:
|
||||
self.fs = cv2.FileStorage(filename, cv2.FILE_STORAGE_READ)
|
||||
|
||||
def __del__(self):
|
||||
cv2.FileStorage.release(self.fs)
|
||||
|
||||
def write(self, key, value, dt='mat'):
|
||||
if dt == 'mat':
|
||||
cv2.FileStorage.write(self.fs, key, value)
|
||||
elif dt == 'list':
|
||||
if self.version == 4: # 4.4
|
||||
# self.fs.write(key, '[')
|
||||
# for elem in value:
|
||||
# self.fs.write('none', elem)
|
||||
# self.fs.write('none', ']')
|
||||
# import ipdb; ipdb.set_trace()
|
||||
self.fs.startWriteStruct(key, cv2.FileNode_SEQ)
|
||||
for elem in value:
|
||||
self.fs.write('', elem)
|
||||
self.fs.endWriteStruct()
|
||||
else: # 3.4
|
||||
self.fs.write(key, '[')
|
||||
for elem in value:
|
||||
self.fs.write('none', elem)
|
||||
self.fs.write('none', ']')
|
||||
|
||||
def main():
|
||||
if len(sys.argv) != 3:
|
||||
print("Usage: python read_model.py path/to/model/folder [.txt,.bin]")
|
||||
return
|
||||
|
||||
cameras, images, points3D = read_model(path=sys.argv[1], ext=sys.argv[2])
|
||||
import cv2
|
||||
cameras_out = {}
|
||||
for key in cameras.keys():
|
||||
p = cameras[key].params
|
||||
if cameras[key].model == 'SIMPLE_RADIAL':
|
||||
f, cx, cy, k = p
|
||||
K = np.array([f, 0, cx, 0, f, cy, 0, 0, 1]).reshape(3, 3)
|
||||
dist = np.array([[k, 0, 0, 0, 0]])
|
||||
elif cameras[key].model == 'PINHOLE':
|
||||
fx, fy, cx, cy = p
|
||||
K = np.array([fx, 0, cx, 0, fy, cy, 0, 0, 1]).reshape(3, 3)
|
||||
dist = np.array([[0, 0, 0, 0, 0]])
|
||||
else:
|
||||
K = np.array([[p[0], 0, p[2], 0, p[1], p[3], 0, 0, 1]]).reshape(3, 3)
|
||||
dist = np.array([[p[4], p[5], p[6], p[7], 0.]])
|
||||
cameras_out[key] = {'K': K, 'dist': dist}
|
||||
mapkey = {}
|
||||
cameras_new = {}
|
||||
for key, val in images.items():
|
||||
cam = cameras_out[val.camera_id].copy()
|
||||
t = val.tvec.reshape(3, 1)
|
||||
R = qvec2rotmat(val.qvec)
|
||||
cam['R'] = R
|
||||
cam['Rvec'] = cv2.Rodrigues(R)[0]
|
||||
cam['T'] = t
|
||||
# mapkey[val.name.split('.')[0]] = val.camera_id
|
||||
|
||||
cameras_new[val.name.split('.')[0]] = cam
|
||||
# cameras_new[val.name.split('.')[0].split('/')[0]] = cam
|
||||
keys = sorted(list(cameras_new.keys()))
|
||||
cameras_new = {key:cameras_new[key] for key in keys}
|
||||
print("num_cameras: {}/{}".format(len(cameras), len(cameras_new)))
|
||||
print("num_images:", len(images))
|
||||
print("num_points3D:", len(points3D))
|
||||
if len(points3D) > 0:
|
||||
keys = list(points3D.keys())
|
||||
xyz = np.stack([points3D[k].xyz for k in keys])
|
||||
rgb = np.stack([points3D[k].rgb for k in keys])
|
||||
import open3d as o3d
|
||||
pcd = o3d.geometry.PointCloud()
|
||||
pcd.points = o3d.utility.Vector3dVector(xyz)
|
||||
pcd.colors = o3d.utility.Vector3dVector(rgb/255.)
|
||||
from os.path import join
|
||||
pcdname = join(sys.argv[1], 'sparse.ply')
|
||||
o3d.io.write_point_cloud(pcdname, pcd)
|
||||
if False:
|
||||
o3d.visualization.draw_geometries([pcd])
|
||||
from easymocap.mytools.camera_utils import write_camera
|
||||
write_camera(cameras_new, sys.argv[1])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
Loading…
Reference in New Issue
Block a user