calib intrinsics ok

This commit is contained in:
davidpagnon 2023-07-23 23:38:50 +02:00
parent b2f5edb743
commit 054c8373d6
6 changed files with 179 additions and 77 deletions

3
.gitignore vendored
View File

@ -3,4 +3,5 @@
logs.txt*
*.egg-info/
dist/
**/*.avi
**/*.avi
**/*.mp4

View File

@ -54,7 +54,7 @@ calibration_type = 'convert' # 'convert' or 'calculate'
[calibration.calculate.board.extrinsics] # camera placement, needs to be done every time
extrinsics_board_type = 'checkerboard' # 'checkerboard', 'scene' ('charucoboard' not supported yet)
# 'board' should be large enough to be detected when laid on the floor.
# 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate
# 'scene' involves manually clicking any point of know coordinates on scene. Usually more accurate if points are spread out
calculate_extrinsic = true # true or false (lowercase)
extrinsics_extension = 'avi' # any video or image extension
@ -62,7 +62,7 @@ calibration_type = 'convert' # 'convert' or 'calculate'
# if extrinsics_board_type = 'checkerboard' or 'charucoboard'
extrinsics_corners_nb = [6,9] # [H,W] rather than [w,h]
extrinsics_square_size = 80 # mm # [h,w] if square is actually a rectangle
extrinsics_marker_size = 60 # mm # only checked if 'charucoboard'
extrinsics_marker_size = 60 # mm # only checked if 'charucoboard' (not supported yet)
extrinsics_aruco_dict = 'DICT_6X6_250' # only checked if 'charucoboard' # see https://docs.opencv.org/3.4/dc/df7/dictionary_8hpp.html
# if extrinsics_board_type = 'scene'

View File

@ -0,0 +1,30 @@
[cam_1]
name = "cam_01"
size = [ 1920.0, 1080.0]
matrix = [ [ 1203.246243533484, 0.0, 959.5], [ 0.0, 1198.3711959207449, 539.5], [ 0.0, 0.0, 1.0]]
distortions = [ -0.044787602246347216, -0.5273833010005556, 0.009327766682582903, 0.00034371130233083687]
rotation = [ 0, 0, 0]
translation = [ 0, 0, 0]
fisheye = false
[cam_2]
name = "cam_02"
size = [ 1920.0, 1080.0]
matrix = [ [ 1201.3535871331092, 0.0, 959.5], [ 0.0, 1296.9584035037935, 539.5], [ 0.0, 0.0, 1.0]]
distortions = [ 0.016997873879159856, 0.15076005731825853, -0.07849748162325841, 0.0031187917923049886]
rotation = [ 0, 0, 0]
translation = [ 0, 0, 0]
fisheye = false
[cam_3]
name = "cam_03"
size = [ 1920.0, 1080.0]
matrix = [ [ 1375.7450722547337, 0.0, 959.5], [ 0.0, 1367.6433832166495, 539.5], [ 0.0, 0.0, 1.0]]
distortions = [ -0.008547555195961013, -0.1321001559843561, 0.002017158533123475, 0.0033830082027901435]
rotation = [ 0, 0, 0]
translation = [ 0, 0, 0]
fisheye = false
[metadata]
adjusted = false
error = 0.0

View File

@ -1,8 +1,8 @@
[cam_1]
name = "cam_01"
size = [ 1920.0, 1080.0]
matrix = [ [ 1203.246243533484, 0.0, 959.5], [ 0.0, 1198.3711959207449, 539.5], [ 0.0, 0.0, 1.0]]
distortions = [ -0.044787602246347216, -0.5273833010005556, 0.009327766682582903, 0.00034371130233083687]
matrix = [ [ 1206.3039773711387, 0.0, 959.5], [ 0.0, 1201.3900241712472, 539.5], [ 0.0, 0.0, 1.0]]
distortions = [ -0.048195523248209984, -0.49433115008284967, 0.009146097668361275, 0.00023449575614186346]
rotation = [ 0, 0, 0]
translation = [ 0, 0, 0]
fisheye = false
@ -19,8 +19,8 @@ fisheye = false
[cam_3]
name = "cam_03"
size = [ 1920.0, 1080.0]
matrix = [ [ 1375.7450722547337, 0.0, 959.5], [ 0.0, 1367.6433832166495, 539.5], [ 0.0, 0.0, 1.0]]
distortions = [ -0.008547555195961013, -0.1321001559843561, 0.002017158533123475, 0.0033830082027901435]
matrix = [ [ 1334.9930217175306, 0.0, 959.5], [ 0.0, 1527.484911404606, 539.5], [ 0.0, 0.0, 1.0]]
distortions = [ 0.03903616767043438, -0.21066010320288212, -0.024219751825038247, 0.004221082947531837]
rotation = [ 0, 0, 0]
translation = [ 0, 0, 0]
fisheye = false

View File

@ -293,7 +293,7 @@ def calibrate_intrinsics(calib_dir, intrinsics_config_dict):
intrinsics_board_type = intrinsics_config_dict.get('intrinsics_board_type')
intrinsics_extension = intrinsics_config_dict.get('intrinsics_extension')
extract_every_N_sec = intrinsics_config_dict.get('extract_every_N_sec')
overwrite_extraction=False
overwrite_extraction = False
show_detection_intrinsics = intrinsics_config_dict.get('show_detection_intrinsics')
intrinsics_corners_nb = intrinsics_config_dict.get('intrinsics_corners_nb')
intrinsics_square_size = intrinsics_config_dict.get('intrinsics_square_size')
@ -319,7 +319,7 @@ def calibrate_intrinsics(calib_dir, intrinsics_config_dict):
cap.read()
if cap.read()[0] == False:
raise ValueError('No video in the folder or wrong extension.')
## extract frames from video
# extract frames from video
extract_frames(img_vid_files[0], extract_every_N_sec, overwrite_extraction)
img_vid_files = glob.glob(os.path.join(calib_dir, 'intrinsics', cam, f'*.png'))
img_vid_files = sorted(img_vid_files, key=lambda c: [int(n) for n in re.findall(r'\d+', c)])
@ -328,10 +328,16 @@ def calibrate_intrinsics(calib_dir, intrinsics_config_dict):
# find corners
for img_path in img_vid_files:
imgp_confirmed, objp_confirmed = findCorners(img_path, intrinsics_corners_nb, show_detection_intrinsics=show_detection_intrinsics, objp=objp)
if isinstance(imgp_confirmed, np.ndarray):
objpoints.append(objp_confirmed)
imgpoints.append(imgp_confirmed)
if show_detection_intrinsics == True:
imgp_confirmed, objp_confirmed = findCorners(img_path, intrinsics_corners_nb, objp=objp, show=show_detection_intrinsics)
if isinstance(imgp_confirmed, np.ndarray):
imgpoints.append(imgp_confirmed)
objpoints.append(objp_confirmed)
else:
imgp_confirmed = findCorners(img_path, intrinsics_corners_nb, objp=objp, show=show_detection_intrinsics)
if isinstance(imgp_confirmed, np.ndarray):
imgpoints.append(imgp_confirmed)
objpoints.append(objp)
if len(imgpoints) <= 10:
logging.info(f'Corners were detected only on {len(imgpoints)} images for camera {cam}. Calibration of intrinsic parameters may not be accurate with less than 20 good images of the board.')
@ -388,7 +394,7 @@ def calibrate_extrinsics(calib_dir, extrinsics_config_dict):
if extrinsics_board_type == 'checkerboard':
# find corners
imgp = findCorners(img_vid_files[0], extrinsics_corners_nb, show_detection_extrinsics)
imgp = findCorners(img_vid_files[0], extrinsics_corners_nb, objp=[], show_detection_intrinsics=show_detection_intrinsics=)
# CHANGE FINDCORNERS: 'O' for okay and next, 'D' for delete, 'C' for click
# DEFINE OBJECT POINTS
@ -536,7 +542,7 @@ def calibrate_extrinsics(calib_dir, extrinsics_config_dict):
imgpoints.append(imgp)
# Calibration
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img.shape[1::-1],
r, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img.shape[1::-1],
None, None, flags=(cv2.CALIB_FIX_K3 + cv2.CALIB_FIX_PRINCIPAL_POINT))
h, w = [np.float32(i) for i in img.shape[:-1]]
print(ret, repr(mtx), repr(dist))
@ -557,7 +563,6 @@ def calibrate_extrinsics(calib_dir, extrinsics_config_dict):
R = [np.array(cv2.Rodrigues(r)[0]).flatten() for r in R]
T = np.array(T)/1000
return ret, C, S, D, K, R, T
@ -591,7 +596,7 @@ def extract_frames(video_path, extract_every_N_sec=1, overwrite_extraction=False
break
def findCorners(img_path, corner_nb, show=True, objp=[]):
def findCorners(img_path, corner_nb, objp=[], show=True):
'''
Find corners in the photo of a checkerboard.
Press 'Y' to accept detection, 'N' to dismiss this image, 'C' to click points by hand.
@ -608,11 +613,11 @@ def findCorners(img_path, corner_nb, show=True, objp=[]):
- img_path: path to image (or video)
- corner_nb: [H, W] internal corners in checkerboard: list of two integers [9,6]
- optionnal: show: choose whether to show corner detections
- optionnal: objp: array [[3d corner coordinates]]
- optionnal: objp: array [3d corner coordinates]
OUTPUTS:
- imgp_confirmed: array of [[2d corner coordinates]]
- only if objp!=[]: objp_confirmed: array of [[3d corner coordinates]]
- only if objp!=[]: objp_confirmed: array of [3d corner coordinates]
'''
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) # stop refining after 30 iterations or if error less than 0.001px
@ -641,14 +646,16 @@ def findCorners(img_path, corner_nb, show=True, objp=[]):
# Add corner index
for i, corner in enumerate(imgp):
x, y = corner.ravel()
cv2.putText(img, str(i), (int(x)-5, int(y)-5), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255), 2)
cv2.putText(img, str(i), (int(x)-5, int(y)-5), cv2.FONT_HERSHEY_SIMPLEX, .5, (0,0,0), 1)
cv2.putText(img, str(i+1), (int(x)-5, int(y)-5), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255), 2)
cv2.putText(img, str(i+1), (int(x)-5, int(y)-5), cv2.FONT_HERSHEY_SIMPLEX, .5, (0,0,0), 1)
# Visualizer and key press event handler
for var_to_delete in ['imgp_confirmed', 'objp_confirmed']:
if var_to_delete in globals():
del var_to_delete
del globals()[var_to_delete]
imgp_objp_confirmed = imgp_objp_visualizer_clicker(img, imgp=imgp, objp=objp, img_path=img_path)
else:
imgp_objp_confirmed = imgp
# If corners are not found, dismiss or click points by hand
else:
@ -656,6 +663,8 @@ def findCorners(img_path, corner_nb, show=True, objp=[]):
if show:
# Visualizer and key press event handler
imgp_objp_confirmed = imgp_objp_visualizer_clicker(img, imgp=[], objp=objp, img_path=img_path)
else:
imgp_objp_confirmed = []
# if len(imgp_objp_confirmed) == 1:
# imgp_confirmed = imgp_objp_confirmed
@ -684,12 +693,12 @@ def imgp_objp_visualizer_clicker(img, imgp=[], objp=[], img_path=''):
INPUTS:
- img: image opened with openCV
- optional: imgp: detected image points, to be accepted or not. Array of [[2d corner coordinates]]
- optionnal: objp: array of [[3d corner coordinates]]
- optionnal: objp: array of [3d corner coordinates]
- optional: img_path: path to image
OUTPUTS:
- imgp_confirmed: image points that have been correctly identified. array of [[2d corner coordinates]]
- only if objp!=[]: objp_confirmed: array of [[3d corner coordinates]]
- only if objp!=[]: objp_confirmed: array of [3d corner coordinates]
'''
def on_key(event):
@ -707,42 +716,47 @@ def imgp_objp_visualizer_clicker(img, imgp=[], objp=[], img_path=''):
# If objp is given, objp_confirmed is returned in addition
if 'scat' not in globals():
imgp_confirmed = imgp
objp_confirmed = objp
else:
imgp_confirmed = [imgp.astype('float32') for imgp in imgp_confirmed]
objp_confirmed = np.array(objp_confirmed)
# close all, del all global variables except imgp_confirmed and objp_confirmed
plt.close('all')
for var_to_delete in ['events', 'count', 'scat', 'fig_3d', 'ax_3d', 'objp_confirmed_notok']:
if var_to_delete in globals():
del var_to_delete
if objp == []:
if len(objp) == 0:
if 'objp_confirmed' in globals():
del objp_confirmed
if event.key == 'n' or event.key == 'q':
# If 'n', close all and return nothing
plt.close('all')
for var_to_delete in ['events', 'count', 'scat', 'fig_3d', 'ax_3d', 'objp_confirmed_notok', 'imgp_confirmed', 'objp_confirmed']:
if var_to_delete in globals():
del var_to_delete
imgp_confirmed = []
if len(objp) == 0:
objp_confirmed = []
if event.key == 'c':
# If 'c', allows retrieving imgp_confirmed by clicking them on the image
scat = ax.scatter([],[],marker='+',color='g')
scat = ax.scatter([],[],s=100,marker='+',color='g')
plt.connect('button_press_event', on_click)
# If objp is given, display 3D object points in black
if objp != [] and not plt.fignum_exists(2):
if len(objp) != 0 and not plt.fignum_exists(2):
fig_3d = plt.figure()
fig_3d.tight_layout()
fig_3d.canvas.manager.set_window_title('Object points to be clicked')
ax_3d = fig_3d.add_subplot(projection='3d')
plt.rc('xtick', labelsize=5)
plt.rc('ytick', labelsize=5)
for i, (xs,ys,zs) in enumerate(np.float32(objp)):
ax_3d.scatter(xs,ys,zs, marker='.', color='k')
ax_3d.text(xs,ys,zs, f'{str(i+1)}', size=10, zorder=1, color='k')
set_axes_equal(ax_3d)
if np.all(objp[:,2] == 0):
ax_3d.view_init(elev=90, azim=-90)
fig_3d.show()
if event.key == 'h':
# If 'h', indicates that one of the objp is not visible on image
# Displays it in red on 3D plot
if objp != [] and 'ax_3d' in globals():
if len(objp) != 0 and 'ax_3d' in globals():
count = [0 if 'count' not in globals() else count+1][0]
if 'events' not in globals():
# retrieve first objp_confirmed_notok and plot 3D
@ -752,10 +766,11 @@ def imgp_objp_visualizer_clicker(img, imgp=[], objp=[], img_path=''):
fig_3d.canvas.draw()
elif count == len(objp)-1:
# if all objp have been clicked or indicated as not visible, close all
imgp_confirmed = [imgp.astype('float32') for imgp in imgp_confirmed]
plt.close('all')
for var_to_delete in ['events', 'count', 'scat', 'fig_3d', 'ax_3d', 'objp_confirmed_notok']:
if var_to_delete in globals():
del var_to_delete
del globals()[var_to_delete]
else:
# retrieve other objp_confirmed_notok and plot 3D
events.append(event)
@ -791,7 +806,7 @@ def imgp_objp_visualizer_clicker(img, imgp=[], objp=[], img_path=''):
plt.draw()
# Add clicked point to 3D object points if given
if objp != []:
if len(objp) != 0:
count = [0 if 'count' not in globals() else count+1][0]
if count==0:
# retrieve objp_confirmed and plot 3D
@ -801,11 +816,12 @@ def imgp_objp_visualizer_clicker(img, imgp=[], objp=[], img_path=''):
elif count == len(objp)-1:
# retrieve objp_confirmed
objp_confirmed = [[objp[count]] if 'objp_confirmed' not in globals() else objp_confirmed+[objp[count]]][0]
imgp_confirmed = [imgp.astype('float32') for imgp in imgp_confirmed]
# close all, delete all
plt.close('all')
for var_to_delete in ['events', 'count', 'scat', 'scat_3d', 'fig_3d', 'ax_3d', 'objp_confirmed_notok']:
if var_to_delete in globals():
del var_to_delete
del globals()[var_to_delete]
else:
# retrieve objp_confirmed and plot 3D
objp_confirmed = [[objp[count]] if 'objp_confirmed' not in globals() else objp_confirmed+[objp[count]]][0]
@ -825,7 +841,7 @@ def imgp_objp_visualizer_clicker(img, imgp=[], objp=[], img_path=''):
plt.draw()
# Remove last point from imgp_confirmed
imgp_confirmed = imgp_confirmed[:-1]
if objp != []:
if len(objp) != 0:
if count >= 1: count -= 1
# Remove last point from objp_confirmed
objp_confirmed = objp_confirmed[:-1]
@ -836,7 +852,7 @@ def imgp_objp_visualizer_clicker(img, imgp=[], objp=[], img_path=''):
# If last event was 'h' key
elif events[-1].key == 'h':
if objp != []:
if len(objp) != 0:
if count >= 1: count -= 1
# Remove last point from objp_confirmed_notok
objp_confirmed_notok = objp_confirmed_notok[:-1]
@ -899,7 +915,7 @@ def imgp_objp_visualizer_clicker(img, imgp=[], objp=[], img_path=''):
ax.axis("off")
for corner in imgp:
x, y = corner.ravel()
cv2.drawMarker(img, (int(x),int(y)), (0,255,0), cv2.MARKER_CROSS, 15, 2)
cv2.drawMarker(img, (int(x),int(y)), (128,128,128), cv2.MARKER_CROSS, 10, 2)
ax.imshow(img)
figManager = plt.get_current_fig_manager()
figManager.window.showMaximized()
@ -908,7 +924,7 @@ def imgp_objp_visualizer_clicker(img, imgp=[], objp=[], img_path=''):
# Allow for zoom and pan in image
zoom_factory(ax)
ph = panhandler(fig, button=2)
# Handles key presses to Accept, dismiss, or click points by hand
cid = fig.canvas.mpl_connect('key_press_event', on_key)
@ -918,6 +934,10 @@ def imgp_objp_visualizer_clicker(img, imgp=[], objp=[], img_path=''):
warnings.simplefilter("ignore")
plt.rcParams['toolbar'] = 'toolmanager'
for var_to_delete in ['events', 'count', 'scat', 'fig_3d', 'ax_3d', 'objp_confirmed_notok']:
if var_to_delete in globals():
del globals()[var_to_delete]
if 'imgp_confirmed' in globals() and 'objp_confirmed' in globals():
return imgp_confirmed, objp_confirmed
elif 'imgp_confirmed' in globals() and not 'objp_confirmed' in globals():
@ -927,6 +947,9 @@ def imgp_objp_visualizer_clicker(img, imgp=[], objp=[], img_path=''):
def calib_points_fun(config):
'''
Not implemented yet
'''
pass

120
README.md
View File

@ -145,7 +145,7 @@ Results are stored as .trc files in the `Demo/pose-3d` directory.
## 2D pose estimation
> _**Estimate 2D pose from images with Openpose or an other pose estimation solution.**_ \
N.B.: First film a short static pose that will be used for scaling the OpenSim model (A-pose for example), and then film your motions of interest
N.B.: First film a short static pose that will be used for scaling the OpenSim model (A-pose for example), and then film your motions of interest.\
N.B.: Note that the names of your camera folders must follow the same order as in the calibration file, and end with '_json'.
### With OpenPose:
@ -201,9 +201,6 @@ If you need to detect specific points on a human being, an animal, or an object,
```
* Make sure you change the `pose_model` and the `tracked_keypoint` in the `User\Config.toml` file.
<img src="Content/Pose2D.png" width="760">
N.B.: Markers are not needed in Pose2Sim and were used here for validation
@ -236,13 +233,11 @@ N.B.: Markers are not needed in Pose2Sim and were used here for validation
</details>
## Camera calibration
> _**Convert a preexisting calibration file, or calculate intrinsic and extrinsic parameters from scratch.**_ \
> _Intrinsic parameters:_ camera properties (focal length, optical center, distortion), usually need to be calculated only once in their lifetime\
> _Extrinsic parameters:_ camera placement in space (position and orientation), need to be calculated every time a camera is moved
> _**Convert a preexisting calibration file, or calculate intrinsic and extrinsic parameters from scratch.**_
### Convert file
If you already have a calibration file, set `calibration_type` type to 'convert' in your `Config.toml` file.
If you already have a calibration file, set `calibration_type` type to `convert` in your `Config.toml` file.
- **From Qualisys:**
- Export calibration to `.qca.txt` within QTM
- Copy it in the `calibration` folder
@ -255,27 +250,35 @@ If you already have a calibration file, set `calibration_type` type to 'convert'
### Calculate from scratch
> *N.B.:* Try the calibration tool on the Demo by changing `calibration_type` to `calculate` instead of `convert` in `Config.toml`.\
Then try changing `extrinsics_board_type` from `checkerboard` to `scene`.
- **With a board:**
- **Calculate intrinsic parameters:**
> *N.B.:* _Intrinsic parameters:_ camera properties (focal length, optical center, distortion), usually need to be calculated only once in their lifetime\
> *N.B.:* If you already calculated intrinsic parameters earlier, you can skip this step. Copy your intrinsic parameters (`size`, `mat`, and `dist`) in a new `Calib*.toml` file, and set `overwrite_intrinsics` to false. Run Demo to obtain an example Calib.toml file.
- Create a folder for each camera in your `calibration\intrinsics` folder.
- For each camera, film a checkerboard or a charucoboard. Either the board or the camera can be moved.
- Adjust parameters in the `Config.toml` file.
- Make sure that the board:\
is filmed from different angles, covers a large part of the video frame, and is in focus.\
is flat, without reflections, surrounded by a white border, that it is not rotationally invariant (Nrows ≠ Ncols, and Nrows odd if Ncols even).
*N.B.:* If you already calculated intrinsic parameters earlier, you can skip this step. Create a Calib*.toml file following the same model as earlier, and copy there your intrinsic parameters (extrinsic parameters can be randomly filled).
is flat, without reflections, surrounded by a white border, and is not rotationally invariant (Nrows ≠ Ncols, and Nrows odd if Ncols even).
- **Calculate extrinsic parameters:**
> *N.B.:* _Extrinsic parameters:_ camera placement in space (position and orientation), need to be calculated every time a camera is moved
- Create a folder for each camera in your `calibration\extrinsics` folder.
- Once your cameras are in place, shortly film a board laid on the floor or the raw scene \
(only one frame is needed, but do not just take one single photo unless you are sure it does not change the image format).
(only one frame is needed, but do not just take a photo unless you are sure it does not change the image format).
- Adjust parameters in the `Config.toml` file.
- If you film a board:\
Make sure that it is seen by all cameras. \
It should preferably be larger than the one used for intrinsics, as results will not be very accurate out of the covered zone.
- If you film the raw scene (potentially more accurate):\
Manually measure the 3D coordinates of 10 or more points in the scene (tiles, lines on wall, treadmill, etc). They should cover as large of a space as possible.\
- If you film the raw scene (potentially more accurate if points are spread out):\
Manually measure the 3D coordinates of 10 or more points in the scene (tiles, lines on wall, boxes, treadmill dimensions, etc). These points should be as spread out as possible.\
Then you will click on the corresponding image points for each view.
- **With points:**
@ -736,27 +739,72 @@ If you use this code or data, please cite [Pagnon et al., 2022b](https://doi.org
I would happily welcome any proposal for new features, code improvement, and more!\
If you want to contribute to Pose2Sim, please follow [this guide](https://docs.github.com/en/get-started/quickstart/contributing-to-projects) on how to fork, modify and push code, and submit a pull request. I would appreciate it if you provided as much useful information as possible about how you modified the code, and a rationale for why you're making this pull request. Please also specify on which operating system and on which python version you have tested the code.
- Supervised my PhD: @lreveret (INRIA, Université Grenoble Alpes), and @mdomalai (Université de Poitiers).
- Provided the Demo data: @aaiaueil from Université Gustave Eiffel.
</br>
*Here is a to-do list, for general guidance purposes only:*
> <li> <b>pose:</b> Support <a href='https://github.com/google/mediapipe/blob/master/docs/solutions/holistic.md'>Mediapipe holistic</a> for pronation/supination</li>
> <li> <b>calibration:</b> Calculate calibration with points rather than board. (1) SBA calibration with wand (cf <a href='https://argus.web.unc.edu/'>Argus</a>, see converter <a href='https://github.com/backyardbiomech/DLCconverterDLT/blob/master/DLTcameraPosition.py'>here</a>), or (2) with <a href='https://ietresearch.onlinelibrary.wiley.com/doi/full/10.1049/cvi2.12130'>OpenPose keypoints</a>. Set world reference frame in the end.</li>
> <li> <b>synchronization:</b> Synchronize cameras on 2D keypoint speeds.</li>
> <li> <b>personAssociation:</b> Multiple persons association. See <a href="https://arxiv.org/pdf/1901.04111.pdf">Dong 2021</a>. With a neural network instead of brute force?</li>
> <li> <b>triangulation:</b> Multiple persons kinematics (output multiple .trc coordinates files).</li>
> <li> <b>GUI:</b> Blender add-on, or webapp (e.g., with <a href="https://napari.org/stable/">Napari</a>). See <a href="https://github.com/davidpagnon/Maya-Mocap">Maya-Mocap</a> and <a href="https://github.com/JonathanCamargo/BlendOsim">BlendOSim</a>.</li>
> <li> <b>Tutorials:</b> Make video tutorials.</li>
> <li> <b>Doc:</b> Use <a href="https://www.sphinx-doc.org/en/master/">Sphinx</a> or <a href="https://www.mkdocs.org/">MkDocs</a> for clearer documentation.</li>
> </br>
> <li> Catch errors</li>
> <li> Conda package and Docker image</li>
> <li> Copy-paste muscles from OpenSim <a href="https://simtk.org/projects/lfbmodel">lifting full-body model</a> for inverse dynamics and more</li>
> <li> Implement optimal fixed-interval Kalman smoothing for inverse kinematics (<a href='https://github.com/pyomeca/biorbd/blob/f776fe02e1472aebe94a5c89f0309360b52e2cbc/src/RigidBody/KalmanReconsMarkers.cpp'>Biorbd</a> or <a href='https://github.com/antoinefalisse/opensim-core/blob/kalman_smoother/OpenSim/Tools/InverseKinematicsKSTool.cpp'>OpenSim fork</a>)</li>
> </br>
> <li> <a href="https://docs.opencv.org/3.4/da/d54/group__imgproc__transform.html#ga887960ea1bde84784e7f1710a922b93c">Undistort</a> 2D points before triangulating (and <a href="https://github.com/lambdaloop/aniposelib/blob/d03b485c4e178d7cff076e9fe1ac36837db49158/aniposelib/cameras.py#L301">distort</a> them before computing reprojection error).</li>
> <li> Offer the possibility of triangulating with Sparse Bundle Adjustment (SBA), Extended Kalman Filter (EKF), Full Trajectory Estimation (FTE) (see <a href="https://github.com/African-Robotics-Unit/AcinoSet">AcinoSet</a>). </li>
> <li> Implement SLEAP as an other 2D pose estimation solution (converter, skeleton.py, OpenSim model and setup files).</li>
> <li> Outlier rejection (sliding z-score?) Also solve limb swapping</li>
> <li> Implement normalized DLT and RANSAC triangulation, as well as a triangulation refinement step (cf DOI:10.1109/TMM.2022.3171102)</li>
> <li> Utilities: convert Vicon xcp calibration file to toml</li>
> <li> Run from command line via click or typer</li>
> - [x] **Pose:** Support OpenPose [body_25b](https://github.com/CMU-Perceptual-Computing-Lab/openpose_train/tree/master/experimental_models#body_25b-model---option-2-recommended) for more accuracy, [body_135](https://github.com/CMU-Perceptual-Computing-Lab/openpose_train/tree/master/experimental_models#single-network-whole-body-pose-estimation-model) for pronation/supination.
> - [x] **Pose:** Support [BlazePose](https://developers.google.com/mediapipe/solutions/vision/pose_landmarker) for faster inference (on mobile device).
> - [x] **Pose:** Support [DeepLabCut](http://www.mackenziemathislab.org/deeplabcut) for training on custom datasets.
> - [x] **Pose:** Support [AlphaPose](https://github.com/MVIG-SJTU/AlphaPose) as an alternative to OpenPose.
> - [ ] **Pose:** Support [MediaPipe holistic](https://github.com/google/mediapipe/blob/master/docs/solutions/holistic.md) for pronation/supination (converter, skeleton.py, OpenSim model and setup files).
> - [ ] **Pose:** Support [MMPose](https://github.com/open-mmlab/mmpose), [SLEAP](https://sleap.ai/), etc.
</br>
> - [x] **Calibration:** Convert [Qualisys](https://www.qualisys.com) .qca.txt calibration file.
> - [x] **Calibration:** Easier and clearer calibration procedure: separate intrinsic and extrinsic parameter calculation, edit corner detection if some are wrongly detected (or not visible).
> - [x] **Calibration:** Possibility to evaluate extrinsic parameters from cues on scene.
> - [ ] **Calibration:** Calculate calibration with points rather than board. (1) SBA calibration with wand (cf [Argus](https://argus.web.unc.edu), see converter [here](https://github.com/backyardbiomech/DLCconverterDLT/blob/master/DLTcameraPosition.py)). Set world reference frame in the end.
> - [ ] **Calibration:** Alternatively, calibrate with [OpenPose keypoints](https://ietresearch.onlinelibrary.wiley.com/doi/full/10.1049/cvi2.12130). Set world reference frame in the end.
> - [ ] **Calibration:** Smoother Optitrack calibration file conversion.
> - [ ] **Calibration:** Convert Vicon .xcp calibration file.
</br>
> - [ ] **Synchronization:** Synchronize cameras on 2D keypoint speeds. Cf [this draft script](https://github.com/perfanalytics/pose2sim/blob/draft/Pose2Sim/Utilities/synchronize_cams.py).
> - [x] **Person Association:** Automatically choose the main person to triangulate.
> - [ ] **Person Association:** Multiple persons association. See [Dong 2021](https://arxiv.org/pdf/1901.04111.pdf). With a neural network instead of brute force?
</br>
> - [x] **Triangulation:** Triangulation weighted with confidence.
> - [x] **Triangulation:** Set thresholds for triangulation from a camera on likelihood, reprojection error, and set minimum number of cameras allowed for triangulating a point.
> - [x] **Triangulation:** Show mean reprojection error in px and in mm for each point.
> - [x] **Triangulation:** Evaluate which cameras were the least reliable.
> - [x] **Triangulation:** Show which frames had to be interpolated for each point.
> - [ ] **Triangulation:** [Undistort](https://docs.opencv.org/3.4/da/d54/group__imgproc__transform.html#ga887960ea1bde84784e7f1710a922b93c) 2D points before triangulating (and [distort](https://github.com/lambdaloop/aniposelib/blob/d03b485c4e178d7cff076e9fe1ac36837db49158/aniposelib/cameras.py#L301) them before computing reprojection error).
> - [ ] **Triangulation:** Multiple person kinematics (output multiple .trc coordinates files).
> - [ ] **Triangulation:** Offer the possibility of triangulating with Sparse Bundle Adjustment (SBA), Extended Kalman Filter (EKF), Full Trajectory Estimation (FTE) (see [AcinoSet](https://github.com/African-Robotics-Unit/AcinoSet)).
> - [ ] **Triangulation:** Outlier rejection (sliding z-score?) Also solve limb swapping.
> - [ ] **Triangulation:** Implement normalized DLT and RANSAC triangulation, as well as a [triangulation refinement step](https://doi.org/10.1109/TMM.2022.3171102).
> - [x] **Filtering:** Available filtering methods: Butterworth, Butterworth on speed, LOESS, Gaussian, Median.
> - [ ] **Filtering:** Add Kalman smoothing filter.
</br>
> - [x] **OpenSim:** Integrate better spine from [lifting fullbody model](https://pubmed.ncbi.nlm.nih.gov/30714401) to the [gait full-body model](https://nmbl.stanford.edu/wp-content/uploads/07505900.pdf), more accurate for the knee.
> - [x] **OpenSim:** Optimize model marker positions as compared to ground-truth marker-based positions.
> - [x] **OpenSim:** Add scaling and inverse kinematics setup files.
> - [ ] **OpenSim:** Add muscles from OpenSim [lifting full-body model](https://simtk.org/projects/lfbmodel), add Hertzian footground contacts, for inverse dynamics and more.
> - [ ] **OpenSim:** Implement optimal fixed-interval Kalman smoothing for inverse kinematics ([this OpenSim fork](https://github.com/antoinefalisse/opensim-core/blob/kalman_smoother/OpenSim/Tools/InverseKinematicsKSTool.cpp)), or [Biorbd](https://github.com/pyomeca/biorbd/blob/f776fe02e1472aebe94a5c89f0309360b52e2cbc/src/RigidBody/KalmanReconsMarkers.cpp))
</br>
> - [ ] **GUI:** 3D plot of cameras and of triangulated keypoints.
> - [ ] **GUI:** Blender add-on, or webapp (e.g., with [Napari](https://napari.org/stable). See my draft project [Maya-Mocap](https://github.com/davidpagnon/Maya-Mocap) and [BlendOsim](https://github.com/JonathanCamargo/BlendOsim).
</br>
> - [x] **Demo:** Provide Demo data for users to test the code.
> - [ ] **Tutorials:** Make video tutorials.
> - [ ] **Doc:** Use [Sphinx](https://www.sphinx-doc.org/en/master) or [MkDocs](https://www.mkdocs.org) for clearer documentation.
</br>
> - [ ] **Catch errors**
> - [ ] **Conda package and Docker image**
> - [ ] **Run from command line via click or typer**