完成第一个版本
This commit is contained in:
parent
17e965b379
commit
ad8480bd02
3
.gitignore
vendored
3
.gitignore
vendored
@ -4,4 +4,5 @@ __pycache__/
|
|||||||
*.pyc
|
*.pyc
|
||||||
*.pyo
|
*.pyo
|
||||||
test/
|
test/
|
||||||
data/
|
data/
|
||||||
|
data2/
|
@ -1,10 +1,7 @@
|
|||||||
import os
|
|
||||||
import numpy as np
|
|
||||||
import cv2 as cv
|
|
||||||
import glob
|
import glob
|
||||||
import os.path as osp
|
import os.path as osp
|
||||||
import json
|
import json
|
||||||
from tqdm import tqdm
|
import os
|
||||||
|
|
||||||
def write_json(data, output_path):
|
def write_json(data, output_path):
|
||||||
with open(output_path, "w") as f:
|
with open(output_path, "w") as f:
|
||||||
@ -28,4 +25,45 @@ def create_output_folder(baseFolder, outputFolder):
|
|||||||
folder = osp.join(baseFolder, outputFolder)
|
folder = osp.join(baseFolder, outputFolder)
|
||||||
if not osp.exists(folder):
|
if not osp.exists(folder):
|
||||||
os.makedirs(folder)
|
os.makedirs(folder)
|
||||||
return folder
|
return folder
|
||||||
|
|
||||||
|
class DataPaths:
|
||||||
|
def __init__(self, base_dir):
|
||||||
|
self.base_path = base_dir
|
||||||
|
# intri路径
|
||||||
|
self.intri_base = osp.join(self.base_path, "intri")
|
||||||
|
# 内参图片路径,按照cam0, cam1, cam2, ...组织子文件夹
|
||||||
|
self.intri_chessboard_data = osp.join(self.intri_base, "chessboard_data")
|
||||||
|
# 内参标定中的棋盘格角点检测可视化, 按照cam0, cam1, cam2, ...组织子文件夹
|
||||||
|
self.intri_chessboard_vis = osp.join(self.intri_base, "chessboard_vis")
|
||||||
|
# 需要畸变校正的图片路径,文件夹下存放cam0.jpg, cam1.jpg, cam2.jpg, ...只有图片
|
||||||
|
self.intri_undistort_data = osp.join(self.intri_base, "undistort_data")
|
||||||
|
self.intri_undistort_result = osp.join(self.intri_base, "undistort_result")
|
||||||
|
|
||||||
|
# extri路径
|
||||||
|
self.extri_base = osp.join(self.base_path, "extri")
|
||||||
|
# 外参标定使用的棋盘格图片路径,文件夹下存放cam0.jpg, cam1.jpg, cam2.jpg, ...一个相机一个图片
|
||||||
|
self.extri_chessboard_data = osp.join(self.extri_base,"extri_data")
|
||||||
|
# 绘制外参标定中的棋盘格角点检测可视化(原点和终点), 按照cam0, cam1, cam2, ...组织子文件夹
|
||||||
|
self.extri_chessboard_vis = osp.join(self.extri_base, "extri_vis")
|
||||||
|
|
||||||
|
# check需要的路径
|
||||||
|
self.check_base = osp.join(self.base_path, "check")
|
||||||
|
# check使用的棋盘格图片路径,文件夹下存放cam0.jpg, cam1.jpg, cam2.jpg, ...
|
||||||
|
# 一个相机一张图片,主要是用于角点提取函数
|
||||||
|
self.check_data = osp.join(self.check_base, "check_data")
|
||||||
|
# 绘制外参标定中的棋盘格角点检测可视化(原点和终点), 按照cam0, cam1, cam2, ...组织子文件夹
|
||||||
|
self.check_vis = osp.join(self.check_base, "extri_vis")
|
||||||
|
|
||||||
|
# json输出路径
|
||||||
|
self.json_out_base = osp.join(self.base_path, 'json_out')
|
||||||
|
self.intri_json_path = osp.join(self.json_out_base, 'intri.json')
|
||||||
|
self.extri_json_path = osp.join(self.json_out_base, 'extri.json')
|
||||||
|
|
||||||
|
|
||||||
|
def print_paths(self):
|
||||||
|
print("to be implemented")
|
||||||
|
|
||||||
|
|
||||||
|
base_dir = "data"
|
||||||
|
DataPath = DataPaths(base_dir)
|
@ -1,19 +1,120 @@
|
|||||||
import os
|
import os.path as osp
|
||||||
from glob import glob
|
|
||||||
from os.path import join
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import cv2 as cv
|
import cv2 as cv
|
||||||
import json
|
import argparse
|
||||||
|
from tqdm import tqdm
|
||||||
|
|
||||||
def read_json(input):
|
from calib_tools import read_json, write_json
|
||||||
with open(input, "r") as f:
|
from calib_tools import read_img_paths
|
||||||
data = json.load(f)
|
from calib_tools import DataPath
|
||||||
|
|
||||||
|
|
||||||
|
# //////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
# detect_chessboard
|
||||||
|
# 辅助外参检测,检测外参图片中的棋盘格角点,生成json文件,包含2d点和3d点
|
||||||
|
def _findChessboardCorners(img, pattern):
|
||||||
|
"basic function"
|
||||||
|
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
|
||||||
|
retval, corners = cv.findChessboardCorners(img, pattern,
|
||||||
|
flags=cv.CALIB_CB_ADAPTIVE_THRESH + cv.CALIB_CB_FAST_CHECK + cv.CALIB_CB_FILTER_QUADS)
|
||||||
|
if not retval:
|
||||||
|
return False, None
|
||||||
|
corners = cv.cornerSubPix(img, corners, (11, 11), (-1, -1), criteria)
|
||||||
|
corners = corners.squeeze()
|
||||||
|
return True, corners
|
||||||
|
|
||||||
|
|
||||||
|
def _findChessboardCornersAdapt(img, pattern):
|
||||||
|
"Adapt mode"
|
||||||
|
img = cv.adaptiveThreshold(img, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C,
|
||||||
|
cv.THRESH_BINARY, 21, 2)
|
||||||
|
return _findChessboardCorners(img, pattern)
|
||||||
|
|
||||||
|
|
||||||
|
# 检测棋盘格角点并且可视化
|
||||||
|
def findChessboardCorners(img_path, pattern, show):
|
||||||
|
img = cv.imread(img_path)
|
||||||
|
if img is None:
|
||||||
|
raise FileNotFoundError(f"Image not found at {img_path}")
|
||||||
|
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
|
||||||
|
|
||||||
|
# Find the chess board corners
|
||||||
|
for func in [_findChessboardCorners, _findChessboardCornersAdapt]:
|
||||||
|
ret, corners = func(gray, pattern)
|
||||||
|
if ret: break
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
# 附加置信度 1.0 并返回
|
||||||
|
kpts2d = np.hstack([corners, np.ones((corners.shape[0], 1))])
|
||||||
|
|
||||||
|
# 标注棋盘格的原点和最后一个点
|
||||||
|
if show:
|
||||||
|
# Draw and display the corners
|
||||||
|
img_with_corners = cv.drawChessboardCorners(img, pattern, corners, ret)
|
||||||
|
# 标出棋盘格的原点
|
||||||
|
origin = tuple(corners[0].astype(int)) # 原点的像素坐标
|
||||||
|
cv.circle(img_with_corners, origin, 10, (0, 0, 255), -1) # 绘制原点
|
||||||
|
cv.putText(img_with_corners, "Origin", (origin[0] + 10, origin[1] - 10),
|
||||||
|
cv.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
|
||||||
|
|
||||||
|
# 标出最后一个点
|
||||||
|
last_point = tuple(corners[-1].astype(int)) # 角点数组的最后一个点
|
||||||
|
cv.circle(img_with_corners, last_point, 10, (0, 255, 0), -1) # 绿色圆点
|
||||||
|
cv.putText(img_with_corners, "Last", (last_point[0] + 15, last_point[1] - 15),
|
||||||
|
cv.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2) # 添加文字 "Last"
|
||||||
|
|
||||||
|
# 显示图像
|
||||||
|
cv.imwrite(osp.join(DataPath.extri_chessboard_vis, osp.basename(img_path)), img_with_corners)
|
||||||
|
return kpts2d
|
||||||
|
|
||||||
|
|
||||||
|
# 根据棋盘格生成三维坐标,棋盘格坐标系原点在左上角(同时也是全局坐标原点)
|
||||||
|
# 设定标定板z轴朝上,yx表示棋盘在yx平面上
|
||||||
|
# easymocap
|
||||||
|
# 注意,采用11x8的棋盘格,长边是y轴11,短边是x轴8,可以用opencv试一下
|
||||||
|
def getChessboard3d(pattern, gridSize, axis='yx'):
|
||||||
|
# 注意:这里为了让标定板z轴朝上,设定了短边是x,长边是y
|
||||||
|
template = np.mgrid[0:pattern[0], 0:pattern[1]].T.reshape(-1, 2) # 棋盘格的坐标
|
||||||
|
object_points = np.zeros((pattern[1] * pattern[0], 3), np.float32) # 3d坐标,默认向上的坐标轴为0
|
||||||
|
# 长边是x,短边是z
|
||||||
|
if axis == 'xz':
|
||||||
|
object_points[:, 0] = template[:, 0]
|
||||||
|
object_points[:, 2] = template[:, 1]
|
||||||
|
elif axis == 'yx':
|
||||||
|
object_points[:, 0] = template[:, 1]
|
||||||
|
object_points[:, 1] = template[:, 0]
|
||||||
|
else:
|
||||||
|
raise NotImplementedError
|
||||||
|
object_points = object_points * gridSize
|
||||||
|
return object_points
|
||||||
|
|
||||||
|
|
||||||
|
# 检测文件夹下的所有棋盘格图片,生成3d点和2d点,存入json文件
|
||||||
|
# 图片应该按照cam0.jpg, cam1.jpg, cam2.jpg, ...的命名方式,要和内参文件夹对应
|
||||||
|
def detect_chessboard(extri_img_path, pattern, gridSize, show):
|
||||||
|
imgPaths = read_img_paths(extri_img_path)
|
||||||
|
if len(imgPaths) == 0:
|
||||||
|
print("No images found!")
|
||||||
|
return
|
||||||
|
|
||||||
|
data = {}
|
||||||
|
for imgPath in tqdm(imgPaths):
|
||||||
|
camname = osp.basename(imgPath).split(".")[0]
|
||||||
|
keypoints2d = findChessboardCorners(imgPath, pattern, show)
|
||||||
|
if keypoints2d is not None:
|
||||||
|
keypoints3d = getChessboard3d(pattern, gridSize)
|
||||||
|
data[camname] = {
|
||||||
|
"keypoints2d": keypoints2d.tolist(),
|
||||||
|
"keypoints3d": keypoints3d.tolist()
|
||||||
|
}
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
# //////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
def solvePnP(k3d, k2d, K, dist, flag, tryextri=False):
|
def solvePnP(k3d, k2d, K, dist, flag, tryextri=False):
|
||||||
k2d = np.ascontiguousarray(k2d[:, :2]) # 保留前两列
|
k2d = np.ascontiguousarray(k2d[:, :2]) # 保留前两列
|
||||||
# try different initial values:
|
# try different initial values:
|
||||||
if tryextri: # 尝试不同的初始化外参
|
if tryextri: # 尝试不同的初始化外参
|
||||||
def closure(rvec, tvec):
|
def closure(rvec, tvec):
|
||||||
ret, rvec, tvec = cv.solvePnP(k3d, k2d, K, dist, rvec, tvec, True, flags=flag)
|
ret, rvec, tvec = cv.solvePnP(k3d, k2d, K, dist, rvec, tvec, True, flags=flag)
|
||||||
points2d_repro, xxx = cv.projectPoints(k3d, rvec, tvec, K, dist)
|
points2d_repro, xxx = cv.projectPoints(k3d, rvec, tvec, K, dist)
|
||||||
@ -22,8 +123,8 @@ def solvePnP(k3d, k2d, K, dist, flag, tryextri=False):
|
|||||||
return err, rvec, tvec, kpts_repro
|
return err, rvec, tvec, kpts_repro
|
||||||
|
|
||||||
# create a series of extrinsic parameters looking at the origin
|
# create a series of extrinsic parameters looking at the origin
|
||||||
height_guess = 2.7 # 相机的初始高度猜测
|
height_guess = 2.7 # 相机的初始高度猜测
|
||||||
radius_guess = 4. # 相机的初始水平距离猜测,圆的半径,需要根据自己的实际情况调整
|
radius_guess = 4. # 相机的初始水平距离猜测,圆的半径,需要根据自己的实际情况调整
|
||||||
infos = []
|
infos = []
|
||||||
for theta in np.linspace(0, 2 * np.pi, 180):
|
for theta in np.linspace(0, 2 * np.pi, 180):
|
||||||
st = np.sin(theta)
|
st = np.sin(theta)
|
||||||
@ -54,8 +155,9 @@ def solvePnP(k3d, k2d, K, dist, flag, tryextri=False):
|
|||||||
# print(err)
|
# print(err)
|
||||||
return err, rvec, tvec, kpts_repro
|
return err, rvec, tvec, kpts_repro
|
||||||
|
|
||||||
|
|
||||||
# 对单个相机进行外参标定
|
# 对单个相机进行外参标定
|
||||||
def _calibrate_extri(k3d, k2d, K, dist, flag, tryfocal=False):
|
def _calibrate_extri(k3d, k2d, K, dist, tryfocal=False, tryextri=False):
|
||||||
extri = {}
|
extri = {}
|
||||||
methods = [cv.SOLVEPNP_ITERATIVE]
|
methods = [cv.SOLVEPNP_ITERATIVE]
|
||||||
# 检查关键点数据的数量是否匹配
|
# 检查关键点数据的数量是否匹配
|
||||||
@ -64,9 +166,9 @@ def _calibrate_extri(k3d, k2d, K, dist, flag, tryfocal=False):
|
|||||||
length = min(k3d.shape[0], k2d.shape[0])
|
length = min(k3d.shape[0], k2d.shape[0])
|
||||||
k3d = k3d[:length]
|
k3d = k3d[:length]
|
||||||
k2d = k2d[:length]
|
k2d = k2d[:length]
|
||||||
valididx = k2d[:, 2] > 0 # k2d第三列是置信度,检查是否大于0
|
valididx = k2d[:, 2] > 0 # k2d第三列是置信度,检查是否大于0
|
||||||
if valididx.sum() < 4: # 筛选出有效的2D和3D关键点,数量大于4
|
if valididx.sum() < 4: # 筛选出有效的2D和3D关键点,数量大于4
|
||||||
rvec = np.zeros((1, 3)) # 初始话旋转和平移为0并标记为失败
|
rvec = np.zeros((1, 3)) # 初始话旋转和平移为0并标记为失败
|
||||||
tvec = np.zeros((3, 1))
|
tvec = np.zeros((3, 1))
|
||||||
extri['Rvec'] = rvec
|
extri['Rvec'] = rvec
|
||||||
extri['R'] = cv.Rodrigues(rvec)[0]
|
extri['R'] = cv.Rodrigues(rvec)[0]
|
||||||
@ -85,7 +187,7 @@ def _calibrate_extri(k3d, k2d, K, dist, flag, tryfocal=False):
|
|||||||
K[1, 1] = focal # 更新 K 的 fy
|
K[1, 1] = focal # 更新 K 的 fy
|
||||||
for method in methods:
|
for method in methods:
|
||||||
# 调用 solvePnP
|
# 调用 solvePnP
|
||||||
err, rvec, tvec, kpts_repro = solvePnP(k3d, k2d, K, dist, method)
|
err, rvec, tvec, kpts_repro = solvePnP(k3d, k2d, K, dist, method, tryextri)
|
||||||
# 保存结果
|
# 保存结果
|
||||||
infos.append({
|
infos.append({
|
||||||
'focal': focal,
|
'focal': focal,
|
||||||
@ -105,20 +207,21 @@ def _calibrate_extri(k3d, k2d, K, dist, flag, tryfocal=False):
|
|||||||
print(f'[INFO] Optimal focal length found: {focal}, reprojection error: {err:.3f}')
|
print(f'[INFO] Optimal focal length found: {focal}, reprojection error: {err:.3f}')
|
||||||
else:
|
else:
|
||||||
# 如果不优化焦距,直接调用 solvePnP
|
# 如果不优化焦距,直接调用 solvePnP
|
||||||
err, rvec, tvec, kpts_repro = solvePnP(k3d, k2d, K, dist, flag)
|
err, rvec, tvec, kpts_repro = solvePnP(k3d, k2d, K, dist, cv.SOLVEPNP_ITERATIVE, tryextri)
|
||||||
|
|
||||||
# 保存外参结果
|
# 保存外参结果
|
||||||
extri['Rvec'] = rvec
|
extri['Rvec'] = rvec.tolist()
|
||||||
extri['R'] = cv.Rodrigues(rvec)[0]
|
extri['R'] = cv.Rodrigues(rvec)[0].tolist()
|
||||||
extri['T'] = tvec
|
extri['T'] = tvec.tolist()
|
||||||
center = - extri['R'].T @ tvec
|
center = - cv.Rodrigues(rvec)[0].T @ tvec
|
||||||
print(f'[INFO] Camera center: {center.squeeze()}, reprojection error: {err:.3f}')
|
print(f'[INFO] Camera center: {center.squeeze()}, reprojection error: {err:.3f}')
|
||||||
return extri
|
return extri
|
||||||
|
|
||||||
def calibrate_extri(kpts_path, intri_path, flag, tryfocal=False, tryextri=False):
|
|
||||||
|
def calibrate_extri(pattern, gridSize, show=True, tryfocal=True, tryextri=True):
|
||||||
extri = {}
|
extri = {}
|
||||||
intri_data = read_json(intri_path)
|
intri_data = read_json(DataPath.intri_json_path)
|
||||||
kpts_data = read_json(kpts_path)
|
kpts_data = detect_chessboard(DataPath.extri_chessboard_data, pattern, gridSize, show) # 棋盘格的模式和大小
|
||||||
# 获取内参
|
# 获取内参
|
||||||
camnames = list(intri_data.keys())
|
camnames = list(intri_data.keys())
|
||||||
for cam in camnames:
|
for cam in camnames:
|
||||||
@ -128,10 +231,22 @@ def calibrate_extri(kpts_path, intri_path, flag, tryfocal=False, tryextri=False)
|
|||||||
k3d = np.array(kpts_data[cam]['keypoints3d'])
|
k3d = np.array(kpts_data[cam]['keypoints3d'])
|
||||||
k2d = np.array(kpts_data[cam]['keypoints2d'])
|
k2d = np.array(kpts_data[cam]['keypoints2d'])
|
||||||
|
|
||||||
extri[cam] = _calibrate_extri(k3d, k2d, K, dist, flag, tryfocal=tryfocal)
|
extri[cam] = _calibrate_extri(k3d, k2d, K, dist, tryfocal, tryextri)
|
||||||
|
|
||||||
return extri
|
write_json(extri, osp.join(DataPath.extri_json_path, 'extri.json'))
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
pass
|
parser = argparse.ArgumentParser(description="相机外参标定")
|
||||||
|
parser.add_argument("--pattern", type=str, default="11,8",
|
||||||
|
help="棋盘格角点数 (列数, 行数),例如 '11,8'")
|
||||||
|
parser.add_argument("--gridSize", type=float, default=60.0,
|
||||||
|
help="棋盘格方块的实际边长(单位与数据一致,例如 mm 或 m)")
|
||||||
|
parser.add_argument("--show", dest="show", action="store_true", default=False, help="启用标定结果的可视化输出")
|
||||||
|
parser.add_argument("--tryfocal", dest="tryfocal", action="store_true", default=False, help="尝试优化焦距参数")
|
||||||
|
parser.add_argument("--tryextri", dest="tryextri", action="store_true", default=False, help="尝试优化外参")
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# 将解析结果传递给 calibrate_extri 函数
|
||||||
|
pattern = tuple(map(int, args.pattern.split(','))) # 将棋盘格角点数的字符串转换为元组
|
||||||
|
calibrate_extri(pattern, args.gridSize, show=args.show, tryfocal=args.tryfocal, tryextri=args.tryextri)
|
||||||
|
@ -1,16 +1,16 @@
|
|||||||
import os
|
|
||||||
import os.path as osp
|
import os.path as osp
|
||||||
import glob
|
import glob
|
||||||
import cv2 as cv
|
import cv2 as cv
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import json
|
|
||||||
import datetime
|
import datetime
|
||||||
import argparse
|
import argparse
|
||||||
|
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
|
from calib_tools import write_json, read_json
|
||||||
|
from calib_tools import read_img_paths, create_output_folder
|
||||||
|
from calib_tools import DataPath
|
||||||
|
|
||||||
|
|
||||||
def format_json_data(mtx, dist, image_shape, error):
|
def format_intri_json_data(mtx, dist, image_shape, error):
|
||||||
data = {
|
data = {
|
||||||
"time": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
"time": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
||||||
"K": mtx.tolist(),
|
"K": mtx.tolist(),
|
||||||
@ -20,43 +20,10 @@ def format_json_data(mtx, dist, image_shape, error):
|
|||||||
}
|
}
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
def write_json(data, output_path):
|
|
||||||
with open(output_path, "w") as f:
|
|
||||||
json.dump(data, f, indent=4)
|
|
||||||
|
|
||||||
|
|
||||||
def read_json(input):
|
|
||||||
with open(input, "r") as f:
|
|
||||||
data = json.load(f)
|
|
||||||
return data
|
|
||||||
|
|
||||||
|
|
||||||
def read_img_paths(imgFolder):
|
|
||||||
imgPaths = []
|
|
||||||
for extension in ["jpg", "png", "jpeg", "bmp"]:
|
|
||||||
imgPaths += glob.glob(osp.join(imgFolder, "*.{}".format(extension)))
|
|
||||||
return imgPaths
|
|
||||||
|
|
||||||
|
|
||||||
def create_output_folder(baseFolder, outputFolder):
|
|
||||||
folder = osp.join(baseFolder, outputFolder)
|
|
||||||
if not osp.exists(folder):
|
|
||||||
os.makedirs(folder)
|
|
||||||
return folder
|
|
||||||
|
|
||||||
|
|
||||||
base_path = "data"
|
|
||||||
intri_img_path = osp.join(base_path, "chessboard", "intri")
|
|
||||||
intri_vis_path = osp.join(base_path, "vis", "intri")
|
|
||||||
json_output_path = osp.join(base_path, 'output_json')
|
|
||||||
distortion_images_path = osp.join(base_path, "distortion_images")
|
|
||||||
|
|
||||||
|
|
||||||
def calibrate_camera(camera, chessboardSize, squareSize, visualization):
|
def calibrate_camera(camera, chessboardSize, squareSize, visualization):
|
||||||
# 设置输出目录
|
# 设置输出目录
|
||||||
if visualization:
|
if visualization:
|
||||||
outputFolder = create_output_folder(intri_vis_path, osp.basename(camera))
|
outputFolder = create_output_folder(DataPath.intri_chessboard_vis, osp.basename(camera))
|
||||||
|
|
||||||
# 图片路径
|
# 图片路径
|
||||||
imgPaths = read_img_paths(camera)
|
imgPaths = read_img_paths(camera)
|
||||||
@ -129,9 +96,8 @@ def calibrate_camera(camera, chessboardSize, squareSize, visualization):
|
|||||||
|
|
||||||
|
|
||||||
# calibrate_cameras函数中,照片按照相机编号进行分类
|
# calibrate_cameras函数中,照片按照相机编号进行分类
|
||||||
# baseFolder: 包含图片和输出数据的文件夹,默认是./data,可以通过--folder参数指定
|
|
||||||
def calibrate_cameras(chessboardSize, squareSize, visualization):
|
def calibrate_cameras(chessboardSize, squareSize, visualization):
|
||||||
cameras_path = glob.glob(osp.join(intri_img_path, "cam[0-7]"))
|
cameras_path = glob.glob(osp.join(DataPath.intri_chessboard_data, "cam[0-7]"))
|
||||||
if len(cameras_path) == 0:
|
if len(cameras_path) == 0:
|
||||||
print("No camera folders found!")
|
print("No camera folders found!")
|
||||||
return
|
return
|
||||||
@ -141,13 +107,13 @@ def calibrate_cameras(chessboardSize, squareSize, visualization):
|
|||||||
cameraId = osp.basename(camera_path)
|
cameraId = osp.basename(camera_path)
|
||||||
print("\nCalibrating camera {}... ".format(cameraId))
|
print("\nCalibrating camera {}... ".format(cameraId))
|
||||||
mtx, dist, image_shape, error = calibrate_camera(camera_path, chessboardSize, squareSize, visualization)
|
mtx, dist, image_shape, error = calibrate_camera(camera_path, chessboardSize, squareSize, visualization)
|
||||||
data[cameraId] = format_json_data(mtx, dist, image_shape, error)
|
data[cameraId] = format_intri_json_data(mtx, dist, image_shape, error)
|
||||||
write_json(data, osp.join(json_output_path, "intri.json"))
|
write_json(data, osp.join(DataPath.intri_json_path, "intri.json"))
|
||||||
print("Calibration data saved to: ", osp.join(json_output_path, "intri.json"))
|
print("Calibration data saved to: ", osp.join(DataPath.intri_json_path, "intri.json"))
|
||||||
|
|
||||||
|
|
||||||
# 去除图像畸变
|
# 去除图像畸变
|
||||||
def remove_image_distortion(img, mtx, dist):
|
def unistort_img(img, mtx, dist):
|
||||||
h, w = img.shape[:2]
|
h, w = img.shape[:2]
|
||||||
newcameramtx, roi = cv.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h))
|
newcameramtx, roi = cv.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h))
|
||||||
dst = cv.undistort(img, mtx, dist, None, newcameramtx)
|
dst = cv.undistort(img, mtx, dist, None, newcameramtx)
|
||||||
@ -156,26 +122,24 @@ def remove_image_distortion(img, mtx, dist):
|
|||||||
return dst
|
return dst
|
||||||
|
|
||||||
|
|
||||||
# 用于去除整个文件夹中的图像畸变,保存到文件夹下的distortion_corrected_images文件夹中
|
# 用于去除整个文件夹中的图像畸变
|
||||||
def remove_images_distortion(mtx, dist):
|
def unistort_imgs(mtx, dist):
|
||||||
imgPaths = read_img_paths(distortion_images_path)
|
imgPaths = read_img_paths(DataPath.intri_undistort_data)
|
||||||
if len(imgPaths) == 0:
|
if len(imgPaths) == 0:
|
||||||
print("No images found!")
|
print("No images found!")
|
||||||
return
|
return
|
||||||
|
|
||||||
outputFolder = create_output_folder(distortion_images_path, "output_images")
|
|
||||||
|
|
||||||
for imgPath in imgPaths:
|
for imgPath in imgPaths:
|
||||||
img = cv.imread(imgPath)
|
img = cv.imread(imgPath)
|
||||||
dst = remove_image_distortion(img, mtx, dist)
|
dst = unistort_img(img, mtx, dist)
|
||||||
cv.imwrite(osp.join(outputFolder, osp.basename(imgPath)), dst)
|
cv.imwrite(osp.join(DataPath.intri_undistort_result, osp.basename(imgPath)), dst)
|
||||||
|
|
||||||
print("Distortion corrected images saved to: ", outputFolder)
|
print("Distortion corrected images saved to: ", DataPath.intri_undistort_result)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
parser = argparse.ArgumentParser(description="相机内参标定和图像去畸变")
|
parser = argparse.ArgumentParser(description="相机内参标定和图像去畸变")
|
||||||
parser.add_argument("--action", type=str, required=True, choices=["cameras", "distortion"],
|
parser.add_argument("--action", type=str, required=True, choices=["cameras", "undistort"],
|
||||||
help=" --action cameras: 标定多个相机"
|
help=" --action cameras: 标定多个相机"
|
||||||
" --action distortion: 去除图像畸变")
|
" --action distortion: 去除图像畸变")
|
||||||
parser.add_argument("--chessboardSize", type=str, default="11,8",
|
parser.add_argument("--chessboardSize", type=str, default="11,8",
|
||||||
@ -188,12 +152,12 @@ if __name__ == "__main__":
|
|||||||
chessboardSize = tuple(map(int, args.chessboardSize.split(",")))
|
chessboardSize = tuple(map(int, args.chessboardSize.split(",")))
|
||||||
if args.action == "cameras":
|
if args.action == "cameras":
|
||||||
calibrate_cameras(chessboardSize, args.squareSize, args.vis)
|
calibrate_cameras(chessboardSize, args.squareSize, args.vis)
|
||||||
elif args.action == "distortion":
|
elif args.action == "undistort":
|
||||||
print("Removing image distortion, require input folder")
|
print("Removing image distortion, require input folder")
|
||||||
data = read_json(osp.join(json_output_path, "intri.json"))
|
data = read_json(osp.join(DataPath.intri_json_path, "intri.json"))
|
||||||
mtx = np.array(data["K"])
|
mtx = np.array(data["K"])
|
||||||
dist = np.array(data["dist"])
|
dist = np.array(data["dist"])
|
||||||
remove_images_distortion(mtx, dist)
|
unistort_imgs(mtx, dist)
|
||||||
else:
|
else:
|
||||||
print("Invalid action!")
|
print("Invalid action!")
|
||||||
parser.print_help()
|
parser.print_help()
|
||||||
|
231
check_calibrate.py
Normal file
231
check_calibrate.py
Normal file
@ -0,0 +1,231 @@
|
|||||||
|
import cv2 as cv
|
||||||
|
import numpy as np
|
||||||
|
import os.path as osp
|
||||||
|
import argparse
|
||||||
|
|
||||||
|
from calib_tools import read_json, DataPath
|
||||||
|
from calib_tools import read_img_paths
|
||||||
|
|
||||||
|
from calibrate_extri import findChessboardCorners
|
||||||
|
|
||||||
|
|
||||||
|
# ////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
# detect_chessboard
|
||||||
|
# 检测棋盘格上的所有2d角点,存储到dict中,key是相机名,value是2d角点的列表
|
||||||
|
def detect_chessboard(check_img_path, pattern):
|
||||||
|
imgPaths = read_img_paths(check_img_path)
|
||||||
|
if not imgPaths:
|
||||||
|
print("No images found!")
|
||||||
|
return {}
|
||||||
|
|
||||||
|
data = {}
|
||||||
|
for imgPath in imgPaths:
|
||||||
|
camname = osp.splitext(osp.basename(imgPath))[0]
|
||||||
|
keypoints2d = findChessboardCorners(imgPath, pattern, False)
|
||||||
|
if keypoints2d is not None:
|
||||||
|
data[camname] = keypoints2d.tolist()
|
||||||
|
else:
|
||||||
|
print(f"Failed to find chessboard corners in image: {imgPath}")
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
# //////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
def read_cameras(intri_path, extri_path):
|
||||||
|
cameras = {}
|
||||||
|
intri = read_json(intri_path)
|
||||||
|
extri = read_json(extri_path)
|
||||||
|
for key in intri:
|
||||||
|
cameras[key] = {
|
||||||
|
'intri': intri[key],
|
||||||
|
'extri': extri[key]
|
||||||
|
}
|
||||||
|
return cameras
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def plot_line(img, pt1, pt2, lw, col):
|
||||||
|
cv.line(img, (int(pt1[0] + 0.5), int(pt1[1] + 0.5)), (int(pt2[0] + 0.5), int(pt2[1] + 0.5)),
|
||||||
|
col, lw)
|
||||||
|
|
||||||
|
|
||||||
|
def plot_cross(img, x, y, col, width=-1, lw=-1):
|
||||||
|
if lw == -1:
|
||||||
|
lw = max(1, int(round(img.shape[0] / 1000)))
|
||||||
|
width = lw * 5
|
||||||
|
cv.line(img, (int(x - width), int(y)), (int(x + width), int(y)), col, lw)
|
||||||
|
cv.line(img, (int(x), int(y - width)), (int(x), int(y + width)), col, lw)
|
||||||
|
|
||||||
|
|
||||||
|
def plot_points2d(img, points2d, lines, lw=-1, col=(0, 255, 0), putText=True, style='+'):
|
||||||
|
# Draw 2D points on the image
|
||||||
|
if points2d.shape[1] == 2:
|
||||||
|
points2d = np.hstack([points2d, np.ones((points2d.shape[0], 1))])
|
||||||
|
if lw == -1:
|
||||||
|
lw = img.shape[0] // 200
|
||||||
|
for i, (x, y, v) in enumerate(points2d):
|
||||||
|
if v < 0.01:
|
||||||
|
continue
|
||||||
|
c = col
|
||||||
|
if '+' in style:
|
||||||
|
plot_cross(img, x, y, width=10, col=c, lw=lw * 2)
|
||||||
|
if 'o' in style:
|
||||||
|
cv.circle(img, (int(x), int(y)), 10, c, lw * 2)
|
||||||
|
cv.circle(img, (int(x), int(y)), lw, c, lw)
|
||||||
|
if putText:
|
||||||
|
c = col[::-1]
|
||||||
|
font_scale = img.shape[0] / 1000
|
||||||
|
cv.putText(img, '{}'.format(i), (int(x), int(y)), cv.FONT_HERSHEY_SIMPLEX, font_scale, c, 2)
|
||||||
|
for i, j in lines:
|
||||||
|
if points2d[i][2] < 0.01 or points2d[j][2] < 0.01:
|
||||||
|
continue
|
||||||
|
plot_line(img, points2d[i], points2d[j], max(1, lw // 2), col)
|
||||||
|
|
||||||
|
|
||||||
|
# 对一批关键点进行三角化
|
||||||
|
def batch_triangulate(keypoints_, Pall, min_view=2):
|
||||||
|
""" triangulate the keypoints of whole body
|
||||||
|
|
||||||
|
Args:
|
||||||
|
keypoints_ (nViews, nJoints, 3): 2D detections
|
||||||
|
Pall (nViews, 3, 4) | (nViews, nJoints, 3, 4): projection matrix of each view
|
||||||
|
min_view (int, optional): min view for visible points. Defaults to 2.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
keypoints3d: (nJoints, 4)
|
||||||
|
"""
|
||||||
|
# keypoints: (nViews, nJoints, 3)
|
||||||
|
# Pall: (nViews, 3, 4)
|
||||||
|
# A: (nJoints, nViewsx2, 4), x: (nJoints, 4, 1); b: (nJoints, nViewsx2, 1)
|
||||||
|
# 计算关键点的可见性,提取有效的关键点
|
||||||
|
v = (keypoints_[:, :, -1] > 0).sum(axis=0) # 每个关键点在多少个视角中被检测到
|
||||||
|
valid_joint = np.where(v >= min_view)[0] # 至少被 min_view 个视角捕获的点的索引
|
||||||
|
keypoints = keypoints_[:, valid_joint] # 筛选有效的关键点
|
||||||
|
conf3d = keypoints[:, :, -1].sum(axis=0) / v[valid_joint]
|
||||||
|
# P2: P矩阵的最后一行:(1, nViews, 1, 4)
|
||||||
|
if len(Pall.shape) == 3:
|
||||||
|
P0 = Pall[None, :, 0, :]
|
||||||
|
P1 = Pall[None, :, 1, :]
|
||||||
|
P2 = Pall[None, :, 2, :]
|
||||||
|
else:
|
||||||
|
P0 = Pall[:, :, 0, :].swapaxes(0, 1)
|
||||||
|
P1 = Pall[:, :, 1, :].swapaxes(0, 1)
|
||||||
|
P2 = Pall[:, :, 2, :].swapaxes(0, 1)
|
||||||
|
# uP2: x坐标乘上P2: (nJoints, nViews, 1, 4)
|
||||||
|
uP2 = keypoints[:, :, 0].T[:, :, None] * P2
|
||||||
|
vP2 = keypoints[:, :, 1].T[:, :, None] * P2
|
||||||
|
conf = keypoints[:, :, 2].T[:, :, None]
|
||||||
|
Au = conf * (uP2 - P0)
|
||||||
|
Av = conf * (vP2 - P1)
|
||||||
|
A = np.hstack([Au, Av])
|
||||||
|
u, s, v = np.linalg.svd(A)
|
||||||
|
X = v[:, -1, :]
|
||||||
|
X = X / X[:, 3:]
|
||||||
|
# out: (nJoints, 4)
|
||||||
|
result = np.zeros((keypoints_.shape[1], 4))
|
||||||
|
result[valid_joint, :3] = X[:, :3]
|
||||||
|
result[valid_joint, 3] = conf3d # * (conf[..., 0].sum(axis=-1)>min_view)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def reprojectN3(kpts3d, Pall):
|
||||||
|
# kpts3d: (N, 3) 或 (N, 4)
|
||||||
|
# Pall: (nViews, 3, 4) ,投影矩阵r|t
|
||||||
|
nViews = len(Pall)
|
||||||
|
# 在xyz坐标后面添加一个1,转换为齐次坐标
|
||||||
|
kp3d = np.hstack((kpts3d[:, :3], np.ones((kpts3d.shape[0], 1)))) # 转换为齐次坐标 (N, 4)
|
||||||
|
kp2ds = []
|
||||||
|
for nv in range(nViews):
|
||||||
|
kp2d = Pall[nv] @ kp3d.T # 投影到 2D (3, N)
|
||||||
|
kp2d[:2, :] /= kp2d[2:, :] # 归一化齐次坐标
|
||||||
|
kp2ds.append(kp2d.T[None, :, :]) # 添加视角维度 (1, N, 3)
|
||||||
|
kp2ds = np.vstack(kp2ds) # 拼接所有视角 (nViews, N, 3)
|
||||||
|
if kpts3d.shape[-1] == 4:
|
||||||
|
kp2ds[..., -1] = kp2ds[..., -1] * (kpts3d[None, :, -1] > 0.) # 保留置信度信息
|
||||||
|
return kp2ds
|
||||||
|
|
||||||
|
|
||||||
|
# 输入:内外参,图像上标注的2d点数据
|
||||||
|
# 将输入的2d点进行三角化,得到3d点,然后投影到图像上,与标注的2d点进行比较,计算重投影误差
|
||||||
|
# 输出:重投影误差,平均误差,最大误差
|
||||||
|
# 每个相机视角一张图像,计算重投影误差
|
||||||
|
def check_match(pattern):
|
||||||
|
# 读取内参和外参
|
||||||
|
cameras = read_cameras(DataPath.intri_json_path, DataPath.extri_json_path)
|
||||||
|
# 格式:{"cam1": {[x1, y1,conf], [x2, y2,conf], ...}, "cam2": {[x1, y1,conf], [x2, y2,conf], ...]}, ...},每个相机对应一张图片
|
||||||
|
kpts2d = detect_chessboard(DataPath.check_data, pattern)
|
||||||
|
|
||||||
|
# 去畸变
|
||||||
|
for cam in cameras:
|
||||||
|
K = np.array(cameras[cam]['intri']['K'])
|
||||||
|
dist = np.array(cameras[cam]['intri']['dist'])
|
||||||
|
points2d = np.array(kpts2d[cam])[:, :2]
|
||||||
|
# 将 points2d 数组在第一个轴上扩展一个维度,使其形状从 (N, 2) 变为 (N, 1, 2)
|
||||||
|
points2d_undistorted = cv.undistortPoints(np.expand_dims(points2d, axis=1), K, dist)
|
||||||
|
# 将去畸变后的点与原始数据中的置信度信息水平拼接,形成新的数组。
|
||||||
|
kpts2d[cam] = np.hstack((points2d_undistorted.squeeze(), np.array(kpts2d[cam])[:, 2:]))
|
||||||
|
|
||||||
|
# 三角化
|
||||||
|
# Prepare projection matrices (Pall)
|
||||||
|
Pall = []
|
||||||
|
keypoints = []
|
||||||
|
for cam in cameras:
|
||||||
|
K = np.array(cameras[cam]['intri']['K'])
|
||||||
|
R = np.array(cameras[cam]['extri']['R'])
|
||||||
|
T = np.array(cameras[cam]['extri']['T']).reshape(3, 1)
|
||||||
|
P = K @ np.hstack((R, T))
|
||||||
|
Pall.append(P)
|
||||||
|
keypoints.append(kpts2d[cam])
|
||||||
|
|
||||||
|
Pall = np.array(Pall)
|
||||||
|
keypoints = np.array(keypoints)
|
||||||
|
|
||||||
|
# Triangulate 3D points
|
||||||
|
keypoints3d = batch_triangulate(keypoints, Pall)
|
||||||
|
|
||||||
|
# Calculate reprojection error and plot results
|
||||||
|
# 计算重投影误差并绘制结果
|
||||||
|
reprojection_errors = []
|
||||||
|
for i, cam in enumerate(cameras):
|
||||||
|
P = Pall[i]
|
||||||
|
kpts2d_proj = keypoints3d[:, :3] @ P[:, :3].T + P[:, 3] # 将三维关键点投影到二维平面
|
||||||
|
kpts2d_proj /= kpts2d_proj[:, 2:3] # Normalize by z
|
||||||
|
|
||||||
|
# Compare with original 2D keypoints
|
||||||
|
kpts2d_actual = keypoints[i, :, :2]
|
||||||
|
kpts2d_error = np.linalg.norm(kpts2d_proj[:, :2] - kpts2d_actual, axis=1)
|
||||||
|
reprojection_errors.append(kpts2d_error)
|
||||||
|
|
||||||
|
# Plot reprojection results
|
||||||
|
# img = np.zeros((480, 640, 3), dtype=np.uint8) # Placeholder for the actual image
|
||||||
|
# 换成原图
|
||||||
|
img_path = osp.join(DataPath.check_data, f"{cam}.jpg")
|
||||||
|
img = cv.imread(img_path)
|
||||||
|
plot_points2d(img, np.hstack((kpts2d_proj[:, :2], np.ones((kpts2d_proj.shape[0], 1)))), [], col=(0, 255, 0))
|
||||||
|
mean_error_per_image = np.mean(kpts2d_error)
|
||||||
|
font_scale = img.shape[0] / 1000
|
||||||
|
cv.putText(img, f'Mean Error: {mean_error_per_image:.2f}', (50, 50), cv.FONT_HERSHEY_SIMPLEX, font_scale,
|
||||||
|
(0, 0, 255), 2)
|
||||||
|
cv.imwrite(osp.join(DataPath.check_vis, f"{cam}.jpg"), img)
|
||||||
|
|
||||||
|
# Combine errors for statistics
|
||||||
|
reprojection_errors = np.hstack(reprojection_errors)
|
||||||
|
mean_error = np.mean(reprojection_errors)
|
||||||
|
max_error = np.max(reprojection_errors)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'mean_error': mean_error,
|
||||||
|
'max_error': max_error
|
||||||
|
}
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
parser = argparse.ArgumentParser(description="Check camera calibration")
|
||||||
|
parser.add_argument("--pattern", type=str, default="11,8",
|
||||||
|
help="Chessboard pattern size (columns, rows), e.g., '11,8'")
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
pattern = tuple(map(int, args.pattern.split(','))) # Convert pattern string to tuple
|
||||||
|
result = check_match(pattern)
|
||||||
|
|
||||||
|
print(f"Mean Reprojection Error: {result['mean_error']:.2f}")
|
||||||
|
print(f"Max Reprojection Error: {result['max_error']:.2f}")
|
@ -1,185 +0,0 @@
|
|||||||
import os
|
|
||||||
import numpy as np
|
|
||||||
import cv2 as cv
|
|
||||||
import glob
|
|
||||||
import os.path as osp
|
|
||||||
import json
|
|
||||||
from tqdm import tqdm
|
|
||||||
|
|
||||||
|
|
||||||
# 先想清楚文件夹结构
|
|
||||||
# extri文件夹:存放棋盘格照片,命名规则是cam1.jpg, cam2.jpg, cam3.jpg, ...
|
|
||||||
# 另一种模式是只检测2d点,不生成3d点,需要指定文件夹和输出路径
|
|
||||||
|
|
||||||
|
|
||||||
def write_json(data, output_path):
|
|
||||||
with open(output_path, "w") as f:
|
|
||||||
json.dump(data, f)
|
|
||||||
|
|
||||||
|
|
||||||
def read_json(input):
|
|
||||||
with open(input, "r") as f:
|
|
||||||
data = json.load(f)
|
|
||||||
return data
|
|
||||||
|
|
||||||
|
|
||||||
def read_img_paths(imgFolder):
|
|
||||||
imgPaths = []
|
|
||||||
for extension in ["jpg", "png", "jpeg", "bmp"]:
|
|
||||||
imgPaths += glob.glob(osp.join(imgFolder, "*.{}".format(extension)))
|
|
||||||
return imgPaths
|
|
||||||
|
|
||||||
|
|
||||||
def create_output_folder(baseFolder, outputFolder):
|
|
||||||
folder = osp.join(baseFolder, outputFolder)
|
|
||||||
if not osp.exists(folder):
|
|
||||||
os.makedirs(folder)
|
|
||||||
return folder
|
|
||||||
|
|
||||||
base_path = "data"
|
|
||||||
extri_img_path = osp.join(base_path, "chessboard", "extri")
|
|
||||||
extri_vis_path = osp.join(base_path, "vis", "extri")
|
|
||||||
json_output_path = osp.join(base_path, 'output_json')
|
|
||||||
|
|
||||||
def _findChessboardCorners(img, pattern):
|
|
||||||
"basic function"
|
|
||||||
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
|
|
||||||
retval, corners = cv.findChessboardCorners(img, pattern,
|
|
||||||
flags=cv.CALIB_CB_ADAPTIVE_THRESH + cv.CALIB_CB_FAST_CHECK + cv.CALIB_CB_FILTER_QUADS)
|
|
||||||
if not retval:
|
|
||||||
return False, None
|
|
||||||
corners = cv.cornerSubPix(img, corners, (11, 11), (-1, -1), criteria)
|
|
||||||
corners = corners.squeeze()
|
|
||||||
return True, corners
|
|
||||||
|
|
||||||
|
|
||||||
def _findChessboardCornersAdapt(img, pattern):
|
|
||||||
"Adapt mode"
|
|
||||||
img = cv.adaptiveThreshold(img, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, \
|
|
||||||
cv.THRESH_BINARY, 21, 2)
|
|
||||||
return _findChessboardCorners(img, pattern)
|
|
||||||
|
|
||||||
# 检测棋盘格角点并且可视化
|
|
||||||
def findChessboardCorners(img_path, pattern, show=False):
|
|
||||||
img = cv.imread(img_path)
|
|
||||||
if img is None:
|
|
||||||
raise FileNotFoundError(f"Image not found at {img_path}")
|
|
||||||
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
|
|
||||||
|
|
||||||
# Find the chess board corners
|
|
||||||
for func in [_findChessboardCorners, _findChessboardCornersAdapt]:
|
|
||||||
ret, corners = func(gray, pattern)
|
|
||||||
if ret: break
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
# 附加置信度 1.0 并返回
|
|
||||||
kpts2d = np.hstack([corners, np.ones((corners.shape[0], 1))])
|
|
||||||
|
|
||||||
if show:
|
|
||||||
# Draw and display the corners
|
|
||||||
img_with_corners = cv.drawChessboardCorners(img, pattern, corners, ret)
|
|
||||||
# 标出棋盘格的原点
|
|
||||||
origin = tuple(corners[0].astype(int)) # 原点的像素坐标
|
|
||||||
cv.circle(img_with_corners, origin, 10, (0, 0, 255), -1) # 绘制原点
|
|
||||||
cv.putText(img_with_corners, "Origin", (origin[0] + 10, origin[1] - 10),
|
|
||||||
cv.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
|
|
||||||
|
|
||||||
# 标出最后一个点
|
|
||||||
last_point = tuple(corners[-1].astype(int)) # 角点数组的最后一个点
|
|
||||||
cv.circle(img_with_corners, last_point, 10, (0, 255, 0), -1) # 绿色圆点
|
|
||||||
cv.putText(img_with_corners, "Last", (last_point[0] + 15, last_point[1] - 15),
|
|
||||||
cv.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2) # 添加文字 "Last"
|
|
||||||
|
|
||||||
# 显示图像
|
|
||||||
cv.imwrite(osp.join(extri_vis_path, osp.basename(img_path)), img_with_corners)
|
|
||||||
return kpts2d
|
|
||||||
|
|
||||||
|
|
||||||
# 根据棋盘格生成三维坐标,棋盘格坐标系原点在左上角(同时也是全局坐标原点)
|
|
||||||
# 设定标定板z轴朝上,yx表示棋盘在yx平面上
|
|
||||||
# easymocap
|
|
||||||
# 注意,采用11x8的棋盘格,长边是y轴11,短边是x轴8,可以用opencv试一下
|
|
||||||
def getChessboard3d(pattern, gridSize, axis='yx'):
|
|
||||||
# 注意:这里为了让标定板z轴朝上,设定了短边是x,长边是y
|
|
||||||
template = np.mgrid[0:pattern[0], 0:pattern[1]].T.reshape(-1, 2) # 棋盘格的坐标
|
|
||||||
object_points = np.zeros((pattern[1] * pattern[0], 3), np.float32) # 3d坐标,默认向上的坐标轴为0
|
|
||||||
# 长边是x,短边是z
|
|
||||||
if axis == 'xz':
|
|
||||||
object_points[:, 0] = template[:, 0]
|
|
||||||
object_points[:, 2] = template[:, 1]
|
|
||||||
elif axis == 'yx':
|
|
||||||
object_points[:, 0] = template[:, 1]
|
|
||||||
object_points[:, 1] = template[:, 0]
|
|
||||||
else:
|
|
||||||
raise NotImplementedError
|
|
||||||
object_points = object_points * gridSize
|
|
||||||
return object_points
|
|
||||||
|
|
||||||
|
|
||||||
# 检测文件夹下的所有棋盘格图片,生成3d点和2d点,存入json文件
|
|
||||||
# 图片应该按照cam0.jpg, cam1.jpg, cam2.jpg, ...的命名方式,要和内参文件夹对应
|
|
||||||
def detect_chessboard(pattern, gridSize):
|
|
||||||
imgPaths = read_img_paths(extri_img_path)
|
|
||||||
if len(imgPaths) == 0:
|
|
||||||
print("No images found!")
|
|
||||||
return
|
|
||||||
|
|
||||||
data = {}
|
|
||||||
for imgPath in tqdm(imgPaths):
|
|
||||||
camname = osp.basename(imgPath).split(".")[0]
|
|
||||||
keypoints2d = findChessboardCorners(imgPath, pattern, show=True)
|
|
||||||
if keypoints2d is not None:
|
|
||||||
keypoints3d = getChessboard3d(pattern, gridSize)
|
|
||||||
data[camname] = {
|
|
||||||
"keypoints2d": keypoints2d.tolist(),
|
|
||||||
"keypoints3d": keypoints3d.tolist(),
|
|
||||||
"pattern": pattern,
|
|
||||||
"gridSize": gridSize
|
|
||||||
}
|
|
||||||
json_path = osp.join(json_output_path, "chessboard_keypoints.json")
|
|
||||||
write_json(data, json_path)
|
|
||||||
print(f"Saved keypoints to {json_path}")
|
|
||||||
|
|
||||||
|
|
||||||
# 只检测2d点存入json文件
|
|
||||||
def detect_chessboard_2d(imgFolder, pattern, outJsonPath):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def test_findChessboardCorners(img_path, pattern, saveDir):
|
|
||||||
imgpaths = read_img_paths(img_path)
|
|
||||||
for imgpath in imgpaths:
|
|
||||||
img = cv.imread(imgpath)
|
|
||||||
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
|
|
||||||
ret, corners = cv.findChessboardCorners(gray, pattern)
|
|
||||||
if ret:
|
|
||||||
# 在棋盘格上绘制角点
|
|
||||||
img_with_corners = cv.drawChessboardCorners(img, pattern, corners, ret)
|
|
||||||
|
|
||||||
# 标出原点
|
|
||||||
origin = tuple(corners[0][0]) # 角点数组的第一个点作为原点
|
|
||||||
cv.circle(img_with_corners, (int(origin[0]), int(origin[1])), 10, (0, 0, 255), -1) # 红色圆点
|
|
||||||
cv.putText(img_with_corners, "Origin", (int(origin[0]) + 15, int(origin[1]) - 15),
|
|
||||||
cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1) # 添加文字 "Origin"
|
|
||||||
|
|
||||||
# 标出最后一个点
|
|
||||||
last_point = tuple(corners[-1][0]) # 角点数组的最后一个点
|
|
||||||
cv.circle(img_with_corners, (int(last_point[0]), int(last_point[1])), 10, (0, 255, 0), -1) # 绿色圆点
|
|
||||||
cv.putText(img_with_corners, "Last", (int(last_point[0]) + 15, int(last_point[1]) - 15),
|
|
||||||
cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1) # 添加文字 "Last"
|
|
||||||
|
|
||||||
# 保存带角点的图像
|
|
||||||
cv.imwrite(osp.join(saveDir, osp.basename(imgpath)), img_with_corners)
|
|
||||||
else:
|
|
||||||
print(f"Failed to detect chessboard corners in {imgpath}")
|
|
||||||
print(f"Saved images to {saveDir}")
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
# test1
|
|
||||||
img_path = "data/chessboard/extri"
|
|
||||||
pattern = (11, 8)
|
|
||||||
# saveDir = "data/test1"
|
|
||||||
# os.makedirs(saveDir, exist_ok=True)
|
|
||||||
# test_findChessboardCorners(img_path, pattern, saveDir)
|
|
||||||
detect_chessboard(pattern, 60)
|
|
||||||
|
|
222
easymocap_tools/geometry.py
Normal file
222
easymocap_tools/geometry.py
Normal file
@ -0,0 +1,222 @@
|
|||||||
|
'''
|
||||||
|
@ Date: 2021-01-17 22:44:34
|
||||||
|
@ Author: Qing Shuai
|
||||||
|
@ LastEditors: Qing Shuai
|
||||||
|
@ LastEditTime: 2021-08-24 16:28:15
|
||||||
|
@ FilePath: /EasyMocap/easymocap/visualize/geometry.py
|
||||||
|
'''
|
||||||
|
import numpy as np
|
||||||
|
import cv2
|
||||||
|
import numpy as np
|
||||||
|
from tqdm import tqdm
|
||||||
|
from os.path import join
|
||||||
|
|
||||||
|
def load_sphere():
|
||||||
|
cur_dir = os.path.dirname(__file__)
|
||||||
|
faces = np.loadtxt(join(cur_dir, 'sphere_faces_20.txt'), dtype=int)
|
||||||
|
vertices = np.loadtxt(join(cur_dir, 'sphere_vertices_20.txt'))
|
||||||
|
return vertices, faces
|
||||||
|
|
||||||
|
def load_cylinder():
|
||||||
|
cur_dir = os.path.dirname(__file__)
|
||||||
|
faces = np.loadtxt(join(cur_dir, 'cylinder_faces_20.txt'), dtype=int)
|
||||||
|
vertices = np.loadtxt(join(cur_dir, 'cylinder_vertices_20.txt'))
|
||||||
|
return vertices, faces
|
||||||
|
|
||||||
|
def create_point(points, r=0.01):
|
||||||
|
""" create sphere
|
||||||
|
|
||||||
|
Args:
|
||||||
|
points (array): (N, 3)/(N, 4)
|
||||||
|
r (float, optional): radius. Defaults to 0.01.
|
||||||
|
"""
|
||||||
|
points = np.array(points)
|
||||||
|
nPoints = points.shape[0]
|
||||||
|
vert, face = load_sphere()
|
||||||
|
vert = vert * r
|
||||||
|
nVerts = vert.shape[0]
|
||||||
|
vert = vert[None, :, :].repeat(points.shape[0], 0)
|
||||||
|
vert = vert + points[:, None, :3]
|
||||||
|
verts = np.vstack(vert)
|
||||||
|
face = face[None, :, :].repeat(points.shape[0], 0)
|
||||||
|
face = face + nVerts * np.arange(nPoints).reshape(nPoints, 1, 1)
|
||||||
|
faces = np.vstack(face)
|
||||||
|
return {'vertices': verts, 'faces': faces, 'name': 'points'}
|
||||||
|
|
||||||
|
def calRot(axis, direc):
|
||||||
|
direc = direc/np.linalg.norm(direc)
|
||||||
|
axis = axis/np.linalg.norm(axis)
|
||||||
|
rotdir = np.cross(axis, direc)
|
||||||
|
rotdir = rotdir/np.linalg.norm(rotdir)
|
||||||
|
rotdir = rotdir * np.arccos(np.dot(direc, axis))
|
||||||
|
rotmat, _ = cv2.Rodrigues(rotdir)
|
||||||
|
return rotmat
|
||||||
|
|
||||||
|
def create_line(start, end, r=0.01, col=None):
|
||||||
|
length = np.linalg.norm(end[:3] - start[:3])
|
||||||
|
vertices, faces = load_cylinder()
|
||||||
|
vertices[:, :2] *= r
|
||||||
|
vertices[:, 2] *= length/2
|
||||||
|
rotmat = calRot(np.array([0, 0, 1]), end - start)
|
||||||
|
vertices = vertices @ rotmat.T + (start + end)/2
|
||||||
|
ret = {'vertices': vertices, 'faces': faces, 'name': 'line'}
|
||||||
|
if col is not None:
|
||||||
|
ret['colors'] = col.reshape(-1, 3).repeat(vertices.shape[0], 0)
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def create_ground(
|
||||||
|
center=[0, 0, 0], xdir=[1, 0, 0], ydir=[0, 1, 0], # 位置
|
||||||
|
step=1, xrange=10, yrange=10, # 尺寸
|
||||||
|
white=[1., 1., 1.], black=[0.,0.,0.], # 颜色
|
||||||
|
two_sides=True
|
||||||
|
):
|
||||||
|
if isinstance(center, list):
|
||||||
|
center = np.array(center)
|
||||||
|
xdir = np.array(xdir)
|
||||||
|
ydir = np.array(ydir)
|
||||||
|
print('[Vis Info] {}, x: {}, y: {}'.format(center, xdir, ydir))
|
||||||
|
xdir = xdir * step
|
||||||
|
ydir = ydir * step
|
||||||
|
vertls, trils, colls = [],[],[]
|
||||||
|
cnt = 0
|
||||||
|
min_x = -xrange if two_sides else 0
|
||||||
|
min_y = -yrange if two_sides else 0
|
||||||
|
for i in range(min_x, xrange):
|
||||||
|
for j in range(min_y, yrange):
|
||||||
|
point0 = center + i*xdir + j*ydir
|
||||||
|
point1 = center + (i+1)*xdir + j*ydir
|
||||||
|
point2 = center + (i+1)*xdir + (j+1)*ydir
|
||||||
|
point3 = center + (i)*xdir + (j+1)*ydir
|
||||||
|
if (i%2==0 and j%2==0) or (i%2==1 and j%2==1):
|
||||||
|
col = white
|
||||||
|
else:
|
||||||
|
col = black
|
||||||
|
vert = np.stack([point0, point1, point2, point3])
|
||||||
|
col = np.stack([col for _ in range(vert.shape[0])])
|
||||||
|
tri = np.array([[2, 3, 0], [0, 1, 2]]) + vert.shape[0] * cnt
|
||||||
|
cnt += 1
|
||||||
|
vertls.append(vert)
|
||||||
|
trils.append(tri)
|
||||||
|
colls.append(col)
|
||||||
|
vertls = np.vstack(vertls)
|
||||||
|
trils = np.vstack(trils)
|
||||||
|
colls = np.vstack(colls)
|
||||||
|
return {'vertices': vertls, 'faces': trils, 'colors': colls, 'name': 'ground'}
|
||||||
|
|
||||||
|
|
||||||
|
def get_rotation_from_two_directions(direc0, direc1):
|
||||||
|
direc0 = direc0/np.linalg.norm(direc0)
|
||||||
|
direc1 = direc1/np.linalg.norm(direc1)
|
||||||
|
rotdir = np.cross(direc0, direc1)
|
||||||
|
if np.linalg.norm(rotdir) < 1e-2:
|
||||||
|
return np.eye(3)
|
||||||
|
rotdir = rotdir/np.linalg.norm(rotdir)
|
||||||
|
rotdir = rotdir * np.arccos(np.dot(direc0, direc1))
|
||||||
|
rotmat, _ = cv2.Rodrigues(rotdir)
|
||||||
|
return rotmat
|
||||||
|
|
||||||
|
PLANE_VERTICES = np.array([
|
||||||
|
[0., 0., 0.],
|
||||||
|
[1., 0., 0.],
|
||||||
|
[0., 0., 1.],
|
||||||
|
[1., 0., 1.],
|
||||||
|
[0., 1., 0.],
|
||||||
|
[1., 1., 0.],
|
||||||
|
[0., 1., 1.],
|
||||||
|
[1., 1., 1.]])
|
||||||
|
PLANE_FACES = np.array([
|
||||||
|
[4, 7, 5],
|
||||||
|
[4, 6, 7],
|
||||||
|
[0, 2, 4],
|
||||||
|
[2, 6, 4],
|
||||||
|
[0, 1, 2],
|
||||||
|
[1, 3, 2],
|
||||||
|
[1, 5, 7],
|
||||||
|
[1, 7, 3],
|
||||||
|
[2, 3, 7],
|
||||||
|
[2, 7, 6],
|
||||||
|
[0, 4, 1],
|
||||||
|
[1, 4, 5]], dtype=np.int32)
|
||||||
|
|
||||||
|
def create_plane(normal, center, dx=1, dy=1, dz=0.005, color=[0.8, 0.8, 0.8]):
|
||||||
|
vertices = PLANE_VERTICES.copy()
|
||||||
|
vertices[:, 0] = vertices[:, 0]*dx - dx/2
|
||||||
|
vertices[:, 1] = vertices[:, 1]*dy - dy/2
|
||||||
|
vertices[:, 2] = vertices[:, 2]*dz - dz/2
|
||||||
|
# 根据normal计算旋转
|
||||||
|
rotmat = get_rotation_from_two_directions(
|
||||||
|
np.array([0, 0, 1]), np.array(normal))
|
||||||
|
vertices = vertices @ rotmat.T
|
||||||
|
vertices += np.array(center).reshape(-1, 3)
|
||||||
|
return {'vertices': vertices, 'faces': PLANE_FACES.copy(), 'name': 'plane'}
|
||||||
|
|
||||||
|
def merge_meshes(meshes):
|
||||||
|
verts = []
|
||||||
|
faces = []
|
||||||
|
# TODO:add colors
|
||||||
|
nVerts = 0
|
||||||
|
for mesh in meshes:
|
||||||
|
verts.append(mesh['vertices'])
|
||||||
|
faces.append(mesh['faces'] + nVerts)
|
||||||
|
nVerts += mesh['vertices'].shape[0]
|
||||||
|
return {'vertices': np.vstack(verts), 'faces':np.vstack(faces), 'name': 'compose_{}'.format(meshes[0]['name'])}
|
||||||
|
|
||||||
|
def create_cameras(cameras):
|
||||||
|
vertex = np.array([[0.203982,0.061435,0.00717595],[-0.116019,0.061435,0.00717595],[-0.116019,-0.178565,0.00717595],[0.203982,-0.178565,0.00717595],[0.203982,0.061435,-0.092824],[-0.116019,0.061435,-0.092824],[-0.116019,-0.178565,-0.092824],[0.203982,-0.178565,-0.092824],[0.131154,-0.0361827,0.00717595],[0.131154,-0.0361827,0.092176],[0.122849,-0.015207,0.00717595],[0.122849,-0.015207,0.092176],[0.109589,0.00304419,0.00717595],[0.109589,0.00304419,0.092176],[0.092206,0.0174247,0.00717595],[0.092206,0.0174247,0.092176],[0.071793,0.0270302,0.00717595],[0.071793,0.0270302,0.092176],[0.0496327,0.0312577,0.00717595],[0.0496327,0.0312577,0.092176],[0.0271172,0.0298412,0.00717595],[0.0271172,0.0298412,0.092176],[0.00566135,0.0228697,0.00717595],[0.00566135,0.0228697,0.092176],[-0.0133865,0.0107812,0.00717595],[-0.0133865,0.0107812,0.092176],[-0.02883,-0.0056643,0.00717595],[-0.02883,-0.0056643,0.092176],[-0.0396985,-0.0254336,0.00717595],[-0.0396985,-0.0254336,0.092176],[-0.045309,-0.0472848,0.00717595],[-0.045309,-0.0472848,0.092176],[-0.045309,-0.069845,0.00717595],[-0.045309,-0.069845,0.092176],[-0.0396985,-0.091696,0.00717595],[-0.0396985,-0.091696,0.092176],[-0.02883,-0.111466,0.00717595],[-0.02883,-0.111466,0.092176],[-0.0133865,-0.127911,0.00717595],[-0.0133865,-0.127911,0.092176],[0.00566135,-0.14,0.00717595],[0.00566135,-0.14,0.092176],[0.0271172,-0.146971,0.00717595],[0.0271172,-0.146971,0.092176],[0.0496327,-0.148388,0.00717595],[0.0496327,-0.148388,0.092176],[0.071793,-0.14416,0.00717595],[0.071793,-0.14416,0.092176],[0.092206,-0.134554,0.00717595],[0.092206,-0.134554,0.092176],[0.109589,-0.120174,0.00717595],[0.109589,-0.120174,0.092176],[0.122849,-0.101923,0.00717595],[0.122849,-0.101923,0.092176],[0.131154,-0.080947,0.00717595],[0.131154,-0.080947,0.092176],[0.133982,-0.058565,0.00717595],[0.133982,-0.058565,0.092176],[-0.0074325,0.061435,-0.0372285],[-0.0074325,0.074435,-0.0372285],[-0.0115845,0.061435,-0.0319846],[-0.0115845,0.074435,-0.0319846],[-0.018215,0.061435,-0.0274218],[-0.018215,0.074435,-0.0274218],[-0.0269065,0.061435,-0.0238267],[-0.0269065,0.074435,-0.0238267],[-0.0371125,0.061435,-0.0214253],[-0.0371125,0.074435,-0.0214253],[-0.048193,0.061435,-0.0203685],[-0.048193,0.074435,-0.0203685],[-0.0594505,0.061435,-0.0207226],[-0.0594505,0.074435,-0.0207226],[-0.0701785,0.061435,-0.0224655],[-0.0701785,0.074435,-0.0224655],[-0.0797025,0.061435,-0.0254875],[-0.0797025,0.074435,-0.0254875],[-0.0874245,0.061435,-0.0295989],[-0.0874245,0.074435,-0.0295989],[-0.0928585,0.061435,-0.0345412],[-0.0928585,0.074435,-0.0345412],[-0.0956635,0.061435,-0.040004],[-0.0956635,0.074435,-0.040004],[-0.0956635,0.061435,-0.045644],[-0.0956635,0.074435,-0.045644],[-0.0928585,0.061435,-0.051107],[-0.0928585,0.074435,-0.051107],[-0.0874245,0.061435,-0.056049],[-0.0874245,0.074435,-0.056049],[-0.0797025,0.061435,-0.0601605],[-0.0797025,0.074435,-0.0601605],[-0.0701785,0.061435,-0.0631825],[-0.0701785,0.074435,-0.0631825],[-0.0594505,0.061435,-0.0649255],[-0.0594505,0.074435,-0.0649255],[-0.048193,0.061435,-0.0652795],[-0.048193,0.074435,-0.0652795],[-0.0371125,0.061435,-0.064223],[-0.0371125,0.074435,-0.064223],[-0.0269065,0.061435,-0.0618215],[-0.0269065,0.074435,-0.0618215],[-0.018215,0.061435,-0.0582265],[-0.018215,0.074435,-0.0582265],[-0.0115845,0.061435,-0.0536635],[-0.0115845,0.074435,-0.0536635],[-0.0074325,0.061435,-0.0484195],[-0.0074325,0.074435,-0.0484195],[-0.0060185,0.061435,-0.0428241],[-0.0060185,0.074435,-0.0428241]])*0.5
|
||||||
|
tri = [[4,3,2],[1,4,2],[6,1,2],[6,5,1],[8,4,1],[5,8,1],[3,7,2],[7,6,2],[4,7,3],[8,7,4],[6,7,5],[7,8,5],[43,42,44],[42,43,41],[43,46,45],[46,43,44],[58,9,57],[9,58,10],[55,58,57],[56,58,55],[53,54,55],[54,56,55],[12,11,9],[12,9,10],[21,20,22],[20,21,19],[34,33,32],[32,33,31],[35,36,37],[37,36,38],[33,36,35],[36,33,34],[29,30,31],[30,32,31],[40,39,37],[40,37,38],[39,40,41],[40,42,41],[47,48,49],[49,48,50],[48,47,45],[46,48,45],[49,52,51],[52,49,50],[52,53,51],[52,54,53],[14,15,13],[15,14,16],[11,14,13],[12,14,11],[18,17,15],[18,15,16],[17,18,19],[18,20,19],[27,35,37],[17,27,15],[27,53,55],[27,49,51],[11,27,9],[27,47,49],[27,33,35],[23,27,21],[27,39,41],[27,55,57],[9,27,57],[15,27,13],[39,27,37],[47,27,45],[53,27,51],[27,11,13],[43,27,41],[27,29,31],[27,43,45],[27,17,19],[21,27,19],[33,27,31],[27,23,25],[23,24,25],[25,24,26],[24,21,22],[24,23,21],[28,36,34],[42,28,44],[28,58,56],[54,28,56],[52,28,54],[28,34,32],[28,46,44],[18,28,20],[20,28,22],[30,28,32],[40,28,42],[58,28,10],[28,48,46],[28,12,10],[28,14,12],[36,28,38],[28,24,22],[28,40,38],[48,28,50],[28,52,50],[14,28,16],[28,18,16],[24,28,26],[28,27,25],[28,25,26],[28,30,29],[27,28,29],[108,59,60],[59,108,107],[62,59,61],[59,62,60],[103,102,101],[102,103,104],[64,61,63],[64,62,61],[70,67,69],[67,70,68],[70,71,72],[71,70,69],[83,84,82],[83,82,81],[86,85,87],[86,87,88],[86,83,85],[83,86,84],[77,78,75],[75,78,76],[105,106,103],[103,106,104],[108,106,107],[106,105,107],[97,96,95],[96,97,98],[96,93,95],[93,96,94],[93,92,91],[92,93,94],[79,105,103],[59,79,61],[79,93,91],[83,79,85],[85,79,87],[61,79,63],[79,103,101],[65,79,67],[79,99,97],[89,79,91],[79,77,75],[79,59,107],[67,79,69],[79,89,87],[79,73,71],[105,79,107],[79,97,95],[79,71,69],[79,83,81],[99,79,101],[93,79,95],[79,65,63],[73,79,75],[99,100,97],[97,100,98],[102,100,101],[100,99,101],[89,90,87],[87,90,88],[90,89,91],[92,90,91],[66,67,68],[66,65,67],[66,64,63],[65,66,63],[74,75,76],[74,73,75],[71,74,72],[73,74,71],[80,106,108],[74,80,72],[86,80,84],[84,80,82],[64,80,62],[80,108,60],[80,100,102],[62,80,60],[66,80,64],[80,70,72],[80,102,104],[96,80,94],[80,90,92],[70,80,68],[80,86,88],[78,80,76],[106,80,104],[80,96,98],[80,92,94],[100,80,98],[90,80,88],[80,66,68],[80,74,76],[82,80,81],[80,79,81],[80,78,77],[79,80,77]]
|
||||||
|
tri = [a[::-1] for a in tri]
|
||||||
|
triangles = np.array(tri) - 1
|
||||||
|
meshes = []
|
||||||
|
for nv, (key, camera) in enumerate(cameras.items()):
|
||||||
|
vertices = (camera['R'].T @ (vertex.T - camera['T'])).T
|
||||||
|
meshes.append({
|
||||||
|
'vertices': vertices, 'faces': triangles, 'name': 'camera_{}'.format(nv), 'vid': nv
|
||||||
|
})
|
||||||
|
meshes = merge_meshes(meshes)
|
||||||
|
return meshes
|
||||||
|
|
||||||
|
import os
|
||||||
|
current_dir = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
|
||||||
|
def create_cameras_texture(cameras, imgnames, scale=5e-3):
|
||||||
|
import trimesh
|
||||||
|
import pyrender
|
||||||
|
from PIL import Image
|
||||||
|
from os.path import join
|
||||||
|
cam_path = join(current_dir, 'objs', 'background.obj')
|
||||||
|
meshes = []
|
||||||
|
for nv, (key, camera) in enumerate(tqdm(cameras.items(), desc='loading images')):
|
||||||
|
cam_trimesh = trimesh.load(cam_path, process=False)
|
||||||
|
vert = np.asarray(cam_trimesh.vertices)
|
||||||
|
K, R, T = camera['K'], camera['R'], camera['T']
|
||||||
|
img = Image.open(imgnames[nv])
|
||||||
|
height, width = img.height, img.width
|
||||||
|
vert[:, 0] *= width
|
||||||
|
vert[:, 1] *= height
|
||||||
|
vert[:, 2] *= 0
|
||||||
|
vert[:, 0] -= vert[:, 0]*0.5
|
||||||
|
vert[:, 1] -= vert[:, 1]*0.5
|
||||||
|
vert[:, 1] = - vert[:, 1]
|
||||||
|
vert[:, :2] *= scale
|
||||||
|
# vert[:, 2] = 1
|
||||||
|
cam_trimesh.vertices = (vert - T.T) @ R
|
||||||
|
cam_trimesh.visual.material.image = img
|
||||||
|
cam_mesh = pyrender.Mesh.from_trimesh(cam_trimesh, smooth=True)
|
||||||
|
meshes.append(cam_mesh)
|
||||||
|
return meshes
|
||||||
|
|
||||||
|
def create_mesh_pyrender(vert, faces, col):
|
||||||
|
import trimesh
|
||||||
|
import pyrender
|
||||||
|
mesh = trimesh.Trimesh(vert, faces, process=False)
|
||||||
|
material = pyrender.MetallicRoughnessMaterial(
|
||||||
|
metallicFactor=0.0,
|
||||||
|
alphaMode='OPAQUE',
|
||||||
|
baseColorFactor=col)
|
||||||
|
mesh = pyrender.Mesh.from_trimesh(
|
||||||
|
mesh,
|
||||||
|
material=material)
|
||||||
|
return mesh
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
pass
|
137
easymocap_tools/o3dwrapper.py
Normal file
137
easymocap_tools/o3dwrapper.py
Normal file
@ -0,0 +1,137 @@
|
|||||||
|
'''
|
||||||
|
@ Date: 2021-04-25 15:52:01
|
||||||
|
@ Author: Qing Shuai
|
||||||
|
@ LastEditors: Qing Shuai
|
||||||
|
@ LastEditTime: 2022-09-02 14:27:41
|
||||||
|
@ FilePath: /EasyMocapPublic/easymocap/visualize/o3dwrapper.py
|
||||||
|
'''
|
||||||
|
import open3d as o3d
|
||||||
|
import numpy as np
|
||||||
|
from .geometry import create_ground as create_ground_
|
||||||
|
from .geometry import create_point as create_point_
|
||||||
|
from .geometry import create_line as create_line_
|
||||||
|
from os.path import join
|
||||||
|
|
||||||
|
Vector3dVector = o3d.utility.Vector3dVector
|
||||||
|
Vector3iVector = o3d.utility.Vector3iVector
|
||||||
|
Vector2iVector = o3d.utility.Vector2iVector
|
||||||
|
TriangleMesh = o3d.geometry.TriangleMesh
|
||||||
|
load_mesh = o3d.io.read_triangle_mesh
|
||||||
|
load_pcd = o3d.io.read_point_cloud
|
||||||
|
vis = o3d.visualization.draw_geometries
|
||||||
|
write_mesh = o3d.io.write_triangle_mesh
|
||||||
|
|
||||||
|
def _create_cylinder():
|
||||||
|
# create_cylinder(radius=1.0, height=2.0, resolution=20, split=4, create_uv_map=False)
|
||||||
|
pass
|
||||||
|
|
||||||
|
def read_mesh(filename):
|
||||||
|
mesh = load_mesh(filename)
|
||||||
|
mesh.compute_vertex_normals()
|
||||||
|
return mesh
|
||||||
|
|
||||||
|
def create_mesh(vertices, faces, colors=None, normal=True, **kwargs):
|
||||||
|
mesh = TriangleMesh()
|
||||||
|
mesh.vertices = Vector3dVector(vertices)
|
||||||
|
mesh.triangles = Vector3iVector(faces)
|
||||||
|
if colors is not None and isinstance(colors, np.ndarray):
|
||||||
|
mesh.vertex_colors = Vector3dVector(colors)
|
||||||
|
elif colors is not None and isinstance(colors, list):
|
||||||
|
mesh.paint_uniform_color(colors)
|
||||||
|
else:
|
||||||
|
mesh.paint_uniform_color([1., 0.8, 0.8])
|
||||||
|
if normal:
|
||||||
|
mesh.compute_vertex_normals()
|
||||||
|
return mesh
|
||||||
|
|
||||||
|
def create_pcd(xyz, color=None, colors=None):
|
||||||
|
pcd = o3d.geometry.PointCloud()
|
||||||
|
pcd.points = Vector3dVector(xyz[:, :3])
|
||||||
|
if color is not None:
|
||||||
|
pcd.paint_uniform_color(color)
|
||||||
|
if colors is not None:
|
||||||
|
pcd.colors = Vector3dVector(colors)
|
||||||
|
return pcd
|
||||||
|
|
||||||
|
def create_point(**kwargs):
|
||||||
|
return create_mesh(**create_point_(**kwargs))
|
||||||
|
|
||||||
|
def create_line(**kwargs):
|
||||||
|
return create_mesh(**create_line_(**kwargs))
|
||||||
|
|
||||||
|
def create_ground(**kwargs):
|
||||||
|
ground = create_ground_(**kwargs)
|
||||||
|
return create_mesh(**ground)
|
||||||
|
|
||||||
|
def create_coord(camera = [0,0,0], radius=1, scale=1):
|
||||||
|
camera_frame = TriangleMesh.create_coordinate_frame(
|
||||||
|
size=radius, origin=camera)
|
||||||
|
if scale != 1:
|
||||||
|
camera_frame.scale(scale)
|
||||||
|
return camera_frame
|
||||||
|
|
||||||
|
def create_bbox(min_bound=(-3., -3., 0), max_bound=(3., 3., 2), flip=False):
|
||||||
|
if flip:
|
||||||
|
min_bound_ = min_bound.copy()
|
||||||
|
max_bound_ = max_bound.copy()
|
||||||
|
min_bound = [min_bound_[0], -max_bound_[1], -max_bound_[2]]
|
||||||
|
max_bound = [max_bound_[0], -min_bound_[1], -min_bound_[2]]
|
||||||
|
bbox = o3d.geometry.AxisAlignedBoundingBox(min_bound, max_bound)
|
||||||
|
bbox.color = [0., 0., 0.]
|
||||||
|
return bbox
|
||||||
|
|
||||||
|
def get_bound_corners(bounds):
|
||||||
|
min_x, min_y, min_z = bounds[0]
|
||||||
|
max_x, max_y, max_z = bounds[1]
|
||||||
|
corners_3d = np.array([
|
||||||
|
[min_x, min_y, min_z],
|
||||||
|
[min_x, min_y, max_z],
|
||||||
|
[min_x, max_y, min_z],
|
||||||
|
[min_x, max_y, max_z],
|
||||||
|
[max_x, min_y, min_z],
|
||||||
|
[max_x, min_y, max_z],
|
||||||
|
[max_x, max_y, min_z],
|
||||||
|
[max_x, max_y, max_z],
|
||||||
|
])
|
||||||
|
return corners_3d
|
||||||
|
|
||||||
|
def create_rt_bbox(rtbbox):
|
||||||
|
corners = get_bound_corners(rtbbox.aabb)
|
||||||
|
corners = corners @ rtbbox.R.T + rtbbox.T
|
||||||
|
lines = []
|
||||||
|
for (i, j) in [(0, 1), (0, 2), (2, 3), (3, 1),
|
||||||
|
(4, 5), (4, 6), (6, 7), (5, 7),
|
||||||
|
(0, 4), (2, 6), (1, 5), (3, 7)]:
|
||||||
|
line = create_line(start=corners[i], end=corners[j], r=0.001)
|
||||||
|
line.paint_uniform_color([0., 0., 0.])
|
||||||
|
lines.append(line)
|
||||||
|
return lines
|
||||||
|
|
||||||
|
def create_my_bbox(min_bound=(-3., -3., 0), max_bound=(3., 3., 2)):
|
||||||
|
# 使用圆柱去创建一个mesh
|
||||||
|
bbox = o3d.geometry.AxisAlignedBoundingBox(min_bound, max_bound)
|
||||||
|
return bbox
|
||||||
|
|
||||||
|
# def create_camera(path=None, cameras=None):
|
||||||
|
# if cameras is None:
|
||||||
|
# from ..mytools.camera_utils import read_cameras
|
||||||
|
# cameras = read_cameras(path)
|
||||||
|
# from .geometry import create_cameras
|
||||||
|
# meshes = create_cameras(cameras)
|
||||||
|
# return create_mesh(**meshes)
|
||||||
|
|
||||||
|
def read_and_vis(filename):
|
||||||
|
mesh = load_mesh(filename)
|
||||||
|
mesh.compute_vertex_normals()
|
||||||
|
# if not mesh.has_texture:
|
||||||
|
vis([mesh])
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
for res in [2, 4, 8, 20]:
|
||||||
|
mesh_sphere = o3d.geometry.TriangleMesh.create_sphere(radius=1.0, resolution=res)
|
||||||
|
mesh_sphere.paint_uniform_color([0.6, 0.7, 0.8])
|
||||||
|
outname = 'easymocap/visualize/assets/sphere_faces_{}.txt'.format(res)
|
||||||
|
np.savetxt(outname, np.asarray(mesh_sphere.triangles), fmt='%6d')
|
||||||
|
outname = outname.replace('faces', 'vertices')
|
||||||
|
np.savetxt(outname, np.asarray(mesh_sphere.vertices), fmt='%7.3f')
|
||||||
|
vis([mesh_sphere])
|
314
easymocap_tools/vis_base.py
Normal file
314
easymocap_tools/vis_base.py
Normal file
@ -0,0 +1,314 @@
|
|||||||
|
'''
|
||||||
|
@ Date: 2020-11-28 17:23:04
|
||||||
|
@ Author: Qing Shuai
|
||||||
|
@ LastEditors: Qing Shuai
|
||||||
|
@ LastEditTime: 2022-10-27 15:13:56
|
||||||
|
@ FilePath: /EasyMocapPublic/easymocap/mytools/vis_base.py
|
||||||
|
'''
|
||||||
|
import cv2
|
||||||
|
import numpy as np
|
||||||
|
import json
|
||||||
|
|
||||||
|
def generate_colorbar(N = 20, cmap = 'jet', rand=True,
|
||||||
|
ret_float=False, ret_array=False, ret_rgb=False):
|
||||||
|
bar = ((np.arange(N)/(N-1))*255).astype(np.uint8).reshape(-1, 1)
|
||||||
|
colorbar = cv2.applyColorMap(bar, cv2.COLORMAP_JET).squeeze()
|
||||||
|
if False:
|
||||||
|
colorbar = np.clip(colorbar + 64, 0, 255)
|
||||||
|
if rand:
|
||||||
|
import random
|
||||||
|
random.seed(666)
|
||||||
|
index = [i for i in range(N)]
|
||||||
|
random.shuffle(index)
|
||||||
|
rgb = colorbar[index, :]
|
||||||
|
else:
|
||||||
|
rgb = colorbar
|
||||||
|
if ret_rgb:
|
||||||
|
rgb = rgb[:, ::-1]
|
||||||
|
if ret_float:
|
||||||
|
rgb = rgb/255.
|
||||||
|
if not ret_array:
|
||||||
|
rgb = rgb.tolist()
|
||||||
|
return rgb
|
||||||
|
|
||||||
|
# colors_bar_rgb = generate_colorbar(cmap='hsv')
|
||||||
|
colors_bar_rgb = [
|
||||||
|
(94, 124, 226), # 青色
|
||||||
|
(255, 200, 87), # yellow
|
||||||
|
(74, 189, 172), # green
|
||||||
|
(8, 76, 97), # blue
|
||||||
|
(219, 58, 52), # red
|
||||||
|
(77, 40, 49), # brown
|
||||||
|
]
|
||||||
|
|
||||||
|
colors_table = {
|
||||||
|
'b': [0.65098039, 0.74117647, 0.85882353],
|
||||||
|
'_pink': [.9, .7, .7],
|
||||||
|
'_mint': [ 166/255., 229/255., 204/255.],
|
||||||
|
'_mint2': [ 202/255., 229/255., 223/255.],
|
||||||
|
'_green': [ 153/255., 216/255., 201/255.],
|
||||||
|
'_green2': [ 171/255., 221/255., 164/255.],
|
||||||
|
'r': [ 251/255., 128/255., 114/255.],
|
||||||
|
'_orange': [ 253/255., 174/255., 97/255.],
|
||||||
|
'y': [ 250/255., 230/255., 154/255.],
|
||||||
|
'g':[0,255/255,0],
|
||||||
|
'k':[0,0,0],
|
||||||
|
'_r':[255/255,0,0],
|
||||||
|
'_g':[0,255/255,0],
|
||||||
|
'_b':[0,0,255/255],
|
||||||
|
'_k':[0,0,0],
|
||||||
|
'_y':[255/255,255/255,0],
|
||||||
|
'purple':[128/255,0,128/255],
|
||||||
|
'smap_b':[51/255,153/255,255/255],
|
||||||
|
'smap_r':[255/255,51/255,153/255],
|
||||||
|
'person': [255/255,255/255,255/255],
|
||||||
|
'handl': [255/255,51/255,153/255],
|
||||||
|
'handr': [51/255,255/255,153/255],
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_rgb(index):
|
||||||
|
if isinstance(index, int):
|
||||||
|
if index == -1:
|
||||||
|
return (255, 255, 255)
|
||||||
|
if index < -1:
|
||||||
|
return (0, 0, 0)
|
||||||
|
# elif index == 0:
|
||||||
|
# return (245, 150, 150)
|
||||||
|
col = list(colors_bar_rgb[index%len(colors_bar_rgb)])[::-1]
|
||||||
|
elif isinstance(index, str):
|
||||||
|
col = colors_table.get(index, (1, 0, 0))
|
||||||
|
col = tuple([int(c*255) for c in col[::-1]])
|
||||||
|
else:
|
||||||
|
raise TypeError('index should be int or str')
|
||||||
|
return col
|
||||||
|
|
||||||
|
def get_rgb_01(index):
|
||||||
|
col = get_rgb(index)
|
||||||
|
return [i*1./255 for i in col[:3]]
|
||||||
|
|
||||||
|
def plot_point(img, x, y, r, col, pid=-1, font_scale=-1, circle_type=-1):
|
||||||
|
cv2.circle(img, (int(x+0.5), int(y+0.5)), r, col, circle_type)
|
||||||
|
if font_scale == -1:
|
||||||
|
font_scale = img.shape[0]/4000
|
||||||
|
if pid != -1:
|
||||||
|
cv2.putText(img, '{}'.format(pid), (int(x+0.5), int(y+0.5)), cv2.FONT_HERSHEY_SIMPLEX, font_scale, col, 1)
|
||||||
|
|
||||||
|
|
||||||
|
def plot_line(img, pt1, pt2, lw, col):
|
||||||
|
cv2.line(img, (int(pt1[0]+0.5), int(pt1[1]+0.5)), (int(pt2[0]+0.5), int(pt2[1]+0.5)),
|
||||||
|
col, lw)
|
||||||
|
|
||||||
|
def plot_cross(img, x, y, col, width=-1, lw=-1):
|
||||||
|
if lw == -1:
|
||||||
|
lw = max(1, int(round(img.shape[0]/1000)))
|
||||||
|
width = lw * 5
|
||||||
|
cv2.line(img, (int(x-width), int(y)), (int(x+width), int(y)), col, lw)
|
||||||
|
cv2.line(img, (int(x), int(y-width)), (int(x), int(y+width)), col, lw)
|
||||||
|
|
||||||
|
def plot_bbox(img, bbox, pid, scale=1, vis_id=True):
|
||||||
|
# 画bbox: (l, t, r, b)
|
||||||
|
x1, y1, x2, y2, c = bbox
|
||||||
|
if c < 0.01:return img
|
||||||
|
x1 = int(round(x1*scale))
|
||||||
|
x2 = int(round(x2*scale))
|
||||||
|
y1 = int(round(y1*scale))
|
||||||
|
y2 = int(round(y2*scale))
|
||||||
|
color = get_rgb(pid)
|
||||||
|
lw = max(img.shape[0]//300, 2)
|
||||||
|
cv2.rectangle(img, (x1, y1), (x2, y2), color, lw)
|
||||||
|
if vis_id:
|
||||||
|
font_scale = img.shape[0]/1000
|
||||||
|
cv2.putText(img, '{}'.format(pid), (x1, y1+int(25*font_scale)), cv2.FONT_HERSHEY_SIMPLEX, font_scale, color, 2)
|
||||||
|
|
||||||
|
def plot_keypoints(img, points, pid, config, vis_conf=False, use_limb_color=True, lw=2, fliplr=False):
|
||||||
|
lw = max(lw, 2)
|
||||||
|
H, W = img.shape[:2]
|
||||||
|
for ii, (i, j) in enumerate(config['kintree']):
|
||||||
|
if i >= len(points) or j >= len(points):
|
||||||
|
continue
|
||||||
|
if (i >25 or j > 25) and config['nJoints'] != 42:
|
||||||
|
_lw = max(int(lw/4), 1)
|
||||||
|
else:
|
||||||
|
_lw = lw
|
||||||
|
pt1, pt2 = points[i], points[j]
|
||||||
|
if fliplr:
|
||||||
|
pt1 = (W-pt1[0], pt1[1])
|
||||||
|
pt2 = (W-pt2[0], pt2[1])
|
||||||
|
if use_limb_color:
|
||||||
|
col = get_rgb(config['colors'][ii])
|
||||||
|
else:
|
||||||
|
col = get_rgb(pid)
|
||||||
|
if pt1[-1] > 0.01 and pt2[-1] > 0.01:
|
||||||
|
image = cv2.line(
|
||||||
|
img, (int(pt1[0]+0.5), int(pt1[1]+0.5)), (int(pt2[0]+0.5), int(pt2[1]+0.5)),
|
||||||
|
col, _lw)
|
||||||
|
for i in range(min(len(points), config['nJoints'])):
|
||||||
|
x, y = points[i][0], points[i][1]
|
||||||
|
if fliplr:
|
||||||
|
x = W - x
|
||||||
|
c = points[i][-1]
|
||||||
|
if c > 0.01:
|
||||||
|
text_size = img.shape[0]/1000
|
||||||
|
col = get_rgb(pid)
|
||||||
|
radius = int(lw/1.5)
|
||||||
|
if i > 25 and config['nJoints'] != 42:
|
||||||
|
radius = max(int(radius/4), 1)
|
||||||
|
cv2.circle(img, (int(x+0.5), int(y+0.5)), radius, col, -1)
|
||||||
|
if vis_conf:
|
||||||
|
cv2.putText(img, '{:.1f}'.format(c), (int(x), int(y)),
|
||||||
|
cv2.FONT_HERSHEY_SIMPLEX, text_size, col, 2)
|
||||||
|
|
||||||
|
def plot_keypoints_auto(img, points, pid, vis_conf=False, use_limb_color=True, scale=1, lw=-1, config_name=None, lw_factor=1):
|
||||||
|
from ..dataset.config import CONFIG
|
||||||
|
if config_name is None:
|
||||||
|
config_name = {25: 'body25', 15: 'body15', 21: 'hand', 42:'handlr', 17: 'coco', 1:'points', 67:'bodyhand', 137: 'total', 79:'up',
|
||||||
|
19:'ochuman'}[len(points)]
|
||||||
|
config = CONFIG[config_name]
|
||||||
|
if lw == -1:
|
||||||
|
lw = img.shape[0]//200
|
||||||
|
if config_name == 'hand':
|
||||||
|
lw = img.shape[0]//100
|
||||||
|
lw = max(lw, 1)
|
||||||
|
for ii, (i, j) in enumerate(config['kintree']):
|
||||||
|
if i >= len(points) or j >= len(points):
|
||||||
|
continue
|
||||||
|
if i >= 25 and config_name in ['bodyhand', 'total']:
|
||||||
|
lw = max(img.shape[0]//400, 1)
|
||||||
|
pt1, pt2 = points[i], points[j]
|
||||||
|
if use_limb_color:
|
||||||
|
col = get_rgb(config['colors'][ii])
|
||||||
|
else:
|
||||||
|
col = get_rgb(pid)
|
||||||
|
if pt1[0] < -10000 or pt1[1] < -10000 or pt1[0] > 10000 or pt1[1] > 10000:
|
||||||
|
continue
|
||||||
|
if pt2[0] < -10000 or pt2[1] < -10000 or pt2[0] > 10000 or pt2[1] > 10000:
|
||||||
|
continue
|
||||||
|
if pt1[-1] > 0.01 and pt2[-1] > 0.01:
|
||||||
|
image = cv2.line(
|
||||||
|
img, (int(pt1[0]*scale+0.5), int(pt1[1]*scale+0.5)), (int(pt2[0]*scale+0.5), int(pt2[1]*scale+0.5)),
|
||||||
|
col, lw)
|
||||||
|
lw = img.shape[0]//200
|
||||||
|
if config_name == 'hand':
|
||||||
|
lw = img.shape[0]//500
|
||||||
|
lw = max(lw, 1)
|
||||||
|
for i in range(len(points)):
|
||||||
|
x, y = points[i][0]*scale, points[i][1]*scale
|
||||||
|
if x < 0 or y < 0 or x >10000 or y >10000:
|
||||||
|
continue
|
||||||
|
if i >= 25 and config_name in ['bodyhand', 'total']:
|
||||||
|
lw = max(img.shape[0]//400, 1)
|
||||||
|
c = points[i][-1]
|
||||||
|
if c > 0.01:
|
||||||
|
col = get_rgb(pid)
|
||||||
|
if len(points) == 1:
|
||||||
|
_lw = max(0, int(lw * lw_factor))
|
||||||
|
cv2.circle(img, (int(x+0.5), int(y+0.5)), _lw*2, col, lw*2)
|
||||||
|
plot_cross(img, int(x+0.5), int(y+0.5), width=_lw, col=col, lw=lw*2)
|
||||||
|
else:
|
||||||
|
cv2.circle(img, (int(x+0.5), int(y+0.5)), lw*2, col, -1)
|
||||||
|
if vis_conf:
|
||||||
|
cv2.putText(img, '{:.1f}'.format(c), (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, col, 2)
|
||||||
|
|
||||||
|
def plot_keypoints_total(img, annots, scale, pid_offset=0):
|
||||||
|
_lw = img.shape[0] // 150
|
||||||
|
for annot in annots:
|
||||||
|
pid = annot['personID'] + pid_offset
|
||||||
|
for key in ['keypoints', 'handl2d', 'handr2d']:
|
||||||
|
if key not in annot.keys():continue
|
||||||
|
if key in ['handl2d', 'handr2d', 'face2d']:
|
||||||
|
lw = _lw // 2
|
||||||
|
else:
|
||||||
|
lw = _lw
|
||||||
|
lw = max(lw, 1)
|
||||||
|
plot_keypoints_auto(img, annot[key], pid, vis_conf=False, use_limb_color=False, scale=scale, lw=lw)
|
||||||
|
if 'bbox' not in annot.keys() or (annot['bbox'][0] < 0 or annot['bbox'][0] >10000):
|
||||||
|
continue
|
||||||
|
plot_bbox(img, annot['bbox'], pid, scale=scale, vis_id=True)
|
||||||
|
return img
|
||||||
|
|
||||||
|
def plot_points2d(img, points2d, lines, lw=-1, col=(0, 255, 0), putText=True, style='+'):
|
||||||
|
# 将2d点画上去
|
||||||
|
if points2d.shape[1] == 2:
|
||||||
|
points2d = np.hstack([points2d, np.ones((points2d.shape[0], 1))])
|
||||||
|
if lw == -1:
|
||||||
|
lw = img.shape[0]//200
|
||||||
|
for i, (x, y, v) in enumerate(points2d):
|
||||||
|
if v < 0.01:
|
||||||
|
continue
|
||||||
|
c = col
|
||||||
|
if '+' in style:
|
||||||
|
plot_cross(img, x, y, width=10, col=c, lw=lw*2)
|
||||||
|
if 'o' in style:
|
||||||
|
cv2.circle(img, (int(x), int(y)), 10, c, lw*2)
|
||||||
|
cv2.circle(img, (int(x), int(y)), lw, c, lw)
|
||||||
|
if putText:
|
||||||
|
c = col[::-1]
|
||||||
|
font_scale = img.shape[0]/1000
|
||||||
|
cv2.putText(img, '{}'.format(i), (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, font_scale, c, 2)
|
||||||
|
for i, j in lines:
|
||||||
|
if points2d[i][2] < 0.01 or points2d[j][2] < 0.01:
|
||||||
|
continue
|
||||||
|
plot_line(img, points2d[i], points2d[j], max(1, lw//2), col)
|
||||||
|
|
||||||
|
row_col_ = {
|
||||||
|
2: (2, 1),
|
||||||
|
7: (2, 4),
|
||||||
|
8: (2, 4),
|
||||||
|
9: (3, 3),
|
||||||
|
26: (4, 7)
|
||||||
|
}
|
||||||
|
|
||||||
|
row_col_square = {
|
||||||
|
2: (2, 1),
|
||||||
|
7: (3, 3),
|
||||||
|
8: (3, 3),
|
||||||
|
9: (3, 3),
|
||||||
|
26: (5, 5)
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_row_col(l, square):
|
||||||
|
if square and l in row_col_square.keys():
|
||||||
|
return row_col_square[l]
|
||||||
|
if l in row_col_.keys():
|
||||||
|
return row_col_[l]
|
||||||
|
else:
|
||||||
|
from math import sqrt
|
||||||
|
row = int(sqrt(l) + 0.5)
|
||||||
|
col = int(l/ row + 0.5)
|
||||||
|
if row*col<l:
|
||||||
|
col = col + 1
|
||||||
|
if row > col:
|
||||||
|
row, col = col, row
|
||||||
|
return row, col
|
||||||
|
|
||||||
|
def merge(images, row=-1, col=-1, resize=False, ret_range=False, square=False, **kwargs):
|
||||||
|
if row == -1 and col == -1:
|
||||||
|
row, col = get_row_col(len(images), square)
|
||||||
|
height = images[0].shape[0]
|
||||||
|
width = images[0].shape[1]
|
||||||
|
# special case
|
||||||
|
if height > width:
|
||||||
|
if len(images) == 3:
|
||||||
|
row, col = 1, 3
|
||||||
|
if len(images[0].shape) > 2:
|
||||||
|
ret_img = np.zeros((height * row, width * col, images[0].shape[2]), dtype=np.uint8) + 255
|
||||||
|
else:
|
||||||
|
ret_img = np.zeros((height * row, width * col), dtype=np.uint8) + 255
|
||||||
|
ranges = []
|
||||||
|
for i in range(row):
|
||||||
|
for j in range(col):
|
||||||
|
if i*col + j >= len(images):
|
||||||
|
break
|
||||||
|
img = images[i * col + j]
|
||||||
|
# resize the image size
|
||||||
|
img = cv2.resize(img, (width, height))
|
||||||
|
ret_img[height * i: height * (i+1), width * j: width * (j+1)] = img
|
||||||
|
ranges.append((width*j, height*i, width*(j+1), height*(i+1)))
|
||||||
|
if resize:
|
||||||
|
min_height = 1000
|
||||||
|
if ret_img.shape[0] > min_height:
|
||||||
|
scale = min_height/ret_img.shape[0]
|
||||||
|
ret_img = cv2.resize(ret_img, None, fx=scale, fy=scale)
|
||||||
|
if ret_range:
|
||||||
|
return ret_img, ranges
|
||||||
|
return ret_img
|
6
requirements.txt
Normal file
6
requirements.txt
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
numpy~=1.24.4
|
||||||
|
opencv-python~=4.8.1.78
|
||||||
|
tqdm~=4.66.1
|
||||||
|
open3d~=0.18.0
|
||||||
|
pyrender~=0.1.45
|
||||||
|
trimesh~=4.5.3
|
148
vis_camera_by_open3d.py
Normal file
148
vis_camera_by_open3d.py
Normal file
@ -0,0 +1,148 @@
|
|||||||
|
# this script is used to visualize the camera locations and the point clouds
|
||||||
|
# from easymocap
|
||||||
|
|
||||||
|
'''
|
||||||
|
@ Date: 2022-09-26 16:32:19
|
||||||
|
@ Author: Qing Shuai
|
||||||
|
@ Mail: s_q@zju.edu.cn
|
||||||
|
@ LastEditors: Qing Shuai
|
||||||
|
@ LastEditTime: 2022-10-17 13:05:28
|
||||||
|
@ FilePath: /EasyMocapPublic/apps/calibration/vis_camera_by_open3d.py
|
||||||
|
'''
|
||||||
|
import open3d as o3d
|
||||||
|
import os
|
||||||
|
import cv2
|
||||||
|
import numpy as np
|
||||||
|
from easymocap_tools.o3dwrapper import Vector3dVector, create_pcd
|
||||||
|
from easymocap_tools.vis_base import generate_colorbar
|
||||||
|
|
||||||
|
from calib_tools import read_json
|
||||||
|
|
||||||
|
from calib_tools import DataPath
|
||||||
|
|
||||||
|
def read_cameras(extri_path):
|
||||||
|
extri_data = read_json(extri_path)
|
||||||
|
cameras = {}
|
||||||
|
for cam_name, extri in extri_data.items():
|
||||||
|
extri = extri_data[cam_name]
|
||||||
|
R = np.array(extri['R']).reshape(3, 3)
|
||||||
|
T = np.array(extri['T']).reshape(3, 1)
|
||||||
|
Rvec = None
|
||||||
|
if 'Rvec' in extri:
|
||||||
|
Rvec = np.array(extri['Rvec']).reshape(3, 1)
|
||||||
|
cameras[cam_name] = {
|
||||||
|
'R': R,
|
||||||
|
'T': T,
|
||||||
|
'Rvec': Rvec if Rvec is not None else ''
|
||||||
|
}
|
||||||
|
return cameras
|
||||||
|
|
||||||
|
# 对所有相机的旋转矩阵和平移向量进行全局变换
|
||||||
|
# 将所有相机的R T矩阵变换到新的全局坐标中
|
||||||
|
def transform_cameras(cameras):
|
||||||
|
dims = {'x': 0, 'y': 1, 'z': 2}
|
||||||
|
# 初始化全局变换矩阵
|
||||||
|
R_global = np.eye(3)
|
||||||
|
T_global = np.zeros((3, 1))
|
||||||
|
# order: trans0, rot, trans
|
||||||
|
# 初始化平移参数
|
||||||
|
if len(args.trans0) == 3:
|
||||||
|
trans = np.array(args.trans0).reshape(3, 1)
|
||||||
|
T_global += trans
|
||||||
|
# 处理旋转变换, args.rot = ['x', 90, 'y', 45]
|
||||||
|
if len(args.rot) > 0:
|
||||||
|
for i in range(len(args.rot) // 2): # 整除法
|
||||||
|
dim = args.rot[2 * i] # 旋转轴 ('x', 'y', 'z')
|
||||||
|
val = float(args.rot[2 * i + 1]) # 旋转角度(度数)
|
||||||
|
rvec = np.zeros((3,)) # 创建一个 3 维零向量,表示旋转向量
|
||||||
|
# 构造旋转向量
|
||||||
|
rvec[dims[dim]] = np.deg2rad(val) # 将旋转角度转换为弧度,并填充到旋转向量中
|
||||||
|
# 转换为旋转矩阵
|
||||||
|
R = cv2.Rodrigues(rvec)[0] # 使用 OpenCV 的 Rodrigues 函数将旋转向量转换为旋转矩阵
|
||||||
|
# 累计旋转矩阵
|
||||||
|
R_global = R @ R_global # 将当前旋转矩阵与全局旋转矩阵累乘
|
||||||
|
T_global = R_global @ T_global # 最终的平移向量与旋转矩阵相乘
|
||||||
|
# 平移相机
|
||||||
|
if len(args.trans) == 3:
|
||||||
|
trans = np.array(args.trans).reshape(3, 1)
|
||||||
|
T_global += trans
|
||||||
|
trans = np.eye(4) # 构造全局变换矩阵
|
||||||
|
trans[:3, :3] = R_global
|
||||||
|
trans[:3, 3:] = T_global
|
||||||
|
# apply the transformation of each camera
|
||||||
|
# 对每个相机应用变换矩阵
|
||||||
|
for key, cam in cameras.items():
|
||||||
|
RT = np.eye(4)
|
||||||
|
RT[:3, :3] = cam['R']
|
||||||
|
RT[:3, 3:] = cam['T']
|
||||||
|
RT = RT @ np.linalg.inv(trans)
|
||||||
|
cam.pop('Rvec', '')
|
||||||
|
cam['R'] = RT[:3, :3]
|
||||||
|
cam['T'] = RT[:3, 3:]
|
||||||
|
return cameras, trans
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
import argparse
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
# parser.add_argument('--subs', type=str, default=[], nargs='+') # 指定需要可视化的相机的全部子集,后面删掉
|
||||||
|
# parser.add_argument('--pcd', type=str, default=[], nargs='+') # 指定要加载的点云或三维网格文件列表。
|
||||||
|
parser.add_argument('--trans0', type=float, nargs=3,
|
||||||
|
default=[], help='translation')
|
||||||
|
parser.add_argument('--rot', type=str, nargs='+',
|
||||||
|
default=[], help='control the rotation')
|
||||||
|
parser.add_argument('--trans', type=float, nargs=3,
|
||||||
|
default=[], help='translation')
|
||||||
|
parser.add_argument('--debug', action='store_true')
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
grids = []
|
||||||
|
cameras = read_cameras(DataPath.extri_json_path)
|
||||||
|
cameras, trans = transform_cameras(cameras)
|
||||||
|
|
||||||
|
print(repr(trans))
|
||||||
|
# for pcd in args.pcd:
|
||||||
|
# if not os.path.exists(pcd):
|
||||||
|
# print(pcd, ' not exist')
|
||||||
|
# continue
|
||||||
|
# if pcd.endswith('.npy'):
|
||||||
|
# data = np.load(pcd)
|
||||||
|
# points = data[:, :3]
|
||||||
|
# colors = data[:, 3:]
|
||||||
|
# points = (trans[:3, :3] @ points.T + trans[:3, 3:]).T
|
||||||
|
# p = create_pcd(points, colors=data[:, 3:])
|
||||||
|
# grids.append(p)
|
||||||
|
# elif pcd.endswith('.ply'):
|
||||||
|
# print('Load pcd: ', pcd)
|
||||||
|
# p = o3d.io.read_point_cloud(pcd)
|
||||||
|
# print(p)
|
||||||
|
# grids.append(p)
|
||||||
|
# elif pcd.endswith('.pkl'):
|
||||||
|
# p = o3d.io.read_triangle_mesh(pcd)
|
||||||
|
# grids.append(p)
|
||||||
|
# elif pcd.endswith('.obj'):
|
||||||
|
# p = o3d.io.read_triangle_mesh(pcd)
|
||||||
|
# vertices = np.asarray(p.vertices)
|
||||||
|
# print(vertices.shape)
|
||||||
|
# print(vertices.min(axis=0))
|
||||||
|
# print(vertices.max(axis=0))
|
||||||
|
# grids.append(p)
|
||||||
|
|
||||||
|
center = o3d.geometry.TriangleMesh.create_coordinate_frame(
|
||||||
|
size=1, origin=[0, 0, 0]) # 坐标系原点
|
||||||
|
grids.append(center)
|
||||||
|
colorbar = generate_colorbar(len(cameras), rand=False) # 生成颜色条
|
||||||
|
camera_locations = []
|
||||||
|
for ic, (cam, camera) in enumerate(cameras.items()):
|
||||||
|
# if len(args.subs) > 0 and cam not in args.subs: continue
|
||||||
|
center = - camera['R'].T @ camera['T'] # 相机位置
|
||||||
|
print('{}: {}'.format(cam, center.T[0]))
|
||||||
|
camera_locations.append(center)
|
||||||
|
# 创建并绘制相机的坐标系
|
||||||
|
center = o3d.geometry.TriangleMesh.create_coordinate_frame(
|
||||||
|
size=0.5, origin=[center[0, 0], center[1, 0], center[2, 0]])
|
||||||
|
center.rotate(camera['R'].T)
|
||||||
|
grids.append(center)
|
||||||
|
camera_locations = np.stack(camera_locations).reshape(-1, 3)
|
||||||
|
o3d.visualization.draw_geometries(grids)
|
Loading…
Reference in New Issue
Block a user