diff --git a/Content/Calib2D.png b/Content/Calib2D.png
index d16f120..4498986 100644
Binary files a/Content/Calib2D.png and b/Content/Calib2D.png differ
diff --git a/Content/Calib_ext.png b/Content/Calib_ext.png
new file mode 100644
index 0000000..3572937
Binary files /dev/null and b/Content/Calib_ext.png differ
diff --git a/Content/Calib_int.png b/Content/Calib_int.png
index 2f5aa3c..3a66981 100644
Binary files a/Content/Calib_int.png and b/Content/Calib_int.png differ
diff --git a/Content/Filter3D.png b/Content/Filter3D.png
index 0d0613c..72973df 100644
Binary files a/Content/Filter3D.png and b/Content/Filter3D.png differ
diff --git a/Content/Track2D.png b/Content/Track2D.png
index 9f2326f..e949963 100644
Binary files a/Content/Track2D.png and b/Content/Track2D.png differ
diff --git a/Content/Triangulate3D.png b/Content/Triangulate3D.png
index 0815ad0..57a905f 100644
Binary files a/Content/Triangulate3D.png and b/Content/Triangulate3D.png differ
diff --git a/Pose2Sim/calibration.py b/Pose2Sim/calibration.py
index 2144947..5032997 100644
--- a/Pose2Sim/calibration.py
+++ b/Pose2Sim/calibration.py
@@ -545,14 +545,14 @@ def calibrate_extrinsics(calib_dir, extrinsics_config_dict, C, S, K, D):
cv2.circle(img, (int(o[0]), int(o[1])), 8, (0,0,255), -1)
for i in imgp:
cv2.drawMarker(img, (int(i[0][0]), int(i[0][1])), (0,255,0), cv2.MARKER_CROSS, 15, 2)
- cv2.putText(img, 'Verify calibration results, then close window.', (20, 20), cv2.FONT_HERSHEY_SIMPLEX, .5, (255,255,255), 2, lineType = cv2.LINE_AA)
- cv2.putText(img, 'Verify calibration results, then close window.', (20, 20), cv2.FONT_HERSHEY_SIMPLEX, .5, (0,0,0), 1, lineType = cv2.LINE_AA)
+ cv2.putText(img, 'Verify calibration results, then close window.', (20, 20), cv2.FONT_HERSHEY_SIMPLEX, .7, (255,255,255), 7, lineType = cv2.LINE_AA)
+ cv2.putText(img, 'Verify calibration results, then close window.', (20, 20), cv2.FONT_HERSHEY_SIMPLEX, .7, (0,0,0), 2, lineType = cv2.LINE_AA)
cv2.drawMarker(img, (20,40), (0,255,0), cv2.MARKER_CROSS, 15, 2)
- cv2.putText(img, ' Clicked points', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, .5, (255,255,255), 2, lineType = cv2.LINE_AA)
- cv2.putText(img, ' Clicked points', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, .5, (0,0,0), 1, lineType = cv2.LINE_AA)
+ cv2.putText(img, ' Clicked points', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, .7, (255,255,255), 7, lineType = cv2.LINE_AA)
+ cv2.putText(img, ' Clicked points', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, .7, (0,0,0), 2, lineType = cv2.LINE_AA)
cv2.circle(img, (20,60), 8, (0,0,255), -1)
- cv2.putText(img, ' Reprojected object points', (20, 60), cv2.FONT_HERSHEY_SIMPLEX, .5, (255,255,255), 2, lineType = cv2.LINE_AA)
- cv2.putText(img, ' Reprojected object points', (20, 60), cv2.FONT_HERSHEY_SIMPLEX, .5, (0,0,0), 1, lineType = cv2.LINE_AA)
+ cv2.putText(img, ' Reprojected object points', (20, 60), cv2.FONT_HERSHEY_SIMPLEX, .7, (255,255,255), 7, lineType = cv2.LINE_AA)
+ cv2.putText(img, ' Reprojected object points', (20, 60), cv2.FONT_HERSHEY_SIMPLEX, .7, (0,0,0), 2, lineType = cv2.LINE_AA)
im_pil = Image.fromarray(img)
im_pil.show(title = os.path.basename(img_vid_files[0]))
@@ -617,8 +617,8 @@ def findCorners(img_path, corner_nb, objp=[], show=True):
# Add corner index
for i, corner in enumerate(imgp):
x, y = corner.ravel()
- cv2.putText(img, str(i+1), (int(x)-5, int(y)-5), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255), 2)
- cv2.putText(img, str(i+1), (int(x)-5, int(y)-5), cv2.FONT_HERSHEY_SIMPLEX, .5, (0,0,0), 1)
+ cv2.putText(img, str(i+1), (int(x)-5, int(y)-5), cv2.FONT_HERSHEY_SIMPLEX, .8, (255, 255, 255), 7)
+ cv2.putText(img, str(i+1), (int(x)-5, int(y)-5), cv2.FONT_HERSHEY_SIMPLEX, .8, (0,0,0), 2)
# Visualizer and key press event handler
for var_to_delete in ['imgp_confirmed', 'objp_confirmed']:
@@ -677,6 +677,7 @@ def imgp_objp_visualizer_clicker(img, imgp=[], objp=[], img_path=''):
global imgp_confirmed, objp_confirmed, objp_confirmed_notok, scat, ax_3d, fig_3d, events, count
if event.key == 'y':
+ # TODO: DETECT WHEN WINDOW IS CLOSED
# If 'y', close all
# If points have been clicked, imgp_confirmed is returned, else imgp
# If objp is given, objp_confirmed is returned in addition
@@ -869,19 +870,19 @@ def imgp_objp_visualizer_clicker(img, imgp=[], objp=[], img_path=''):
ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])
# Write instructions
- cv2.putText(img, 'Type "Y" to accept point detection.', (20, 20), cv2.FONT_HERSHEY_SIMPLEX, .7, (255,255,255), 3, lineType = cv2.LINE_AA)
+ cv2.putText(img, 'Type "Y" to accept point detection.', (20, 20), cv2.FONT_HERSHEY_SIMPLEX, .7, (255,255,255), 7, lineType = cv2.LINE_AA)
cv2.putText(img, 'Type "Y" to accept point detection.', (20, 20), cv2.FONT_HERSHEY_SIMPLEX, .7, (0,0,0), 2, lineType = cv2.LINE_AA)
- cv2.putText(img, 'If points are wrongfully (or not) detected:', (20, 43), cv2.FONT_HERSHEY_SIMPLEX, .7, (255,255,255), 3, lineType = cv2.LINE_AA)
+ cv2.putText(img, 'If points are wrongfully (or not) detected:', (20, 43), cv2.FONT_HERSHEY_SIMPLEX, .7, (255,255,255), 7, lineType = cv2.LINE_AA)
cv2.putText(img, 'If points are wrongfully (or not) detected:', (20, 43), cv2.FONT_HERSHEY_SIMPLEX, .7, (0,0,0), 2, lineType = cv2.LINE_AA)
- cv2.putText(img, '- type "N" to dismiss this image,', (20, 66), cv2.FONT_HERSHEY_SIMPLEX, .7, (255,255,255), 3, lineType = cv2.LINE_AA)
+ cv2.putText(img, '- type "N" to dismiss this image,', (20, 66), cv2.FONT_HERSHEY_SIMPLEX, .7, (255,255,255), 7, lineType = cv2.LINE_AA)
cv2.putText(img, '- type "N" to dismiss this image,', (20, 66), cv2.FONT_HERSHEY_SIMPLEX, .7, (0,0,0), 2, lineType = cv2.LINE_AA)
- cv2.putText(img, '- type "C" to click points by hand (beware of their order).', (20, 89), cv2.FONT_HERSHEY_SIMPLEX, .7, (255,255,255), 3, lineType = cv2.LINE_AA)
+ cv2.putText(img, '- type "C" to click points by hand (beware of their order).', (20, 89), cv2.FONT_HERSHEY_SIMPLEX, .7, (255,255,255), 7, lineType = cv2.LINE_AA)
cv2.putText(img, '- type "C" to click points by hand (beware of their order).', (20, 89), cv2.FONT_HERSHEY_SIMPLEX, .7, (0,0,0), 2, lineType = cv2.LINE_AA)
- cv2.putText(img, ' left click to add a point, right click to remove it, "H" to indicate it is not visible. ', (20, 112), cv2.FONT_HERSHEY_SIMPLEX, .7, (255,255,255), 3, lineType = cv2.LINE_AA)
+ cv2.putText(img, ' left click to add a point, right click to remove it, "H" to indicate it is not visible. ', (20, 112), cv2.FONT_HERSHEY_SIMPLEX, .7, (255,255,255), 7, lineType = cv2.LINE_AA)
cv2.putText(img, ' left click to add a point, right click to remove it, "H" to indicate it is not visible. ', (20, 112), cv2.FONT_HERSHEY_SIMPLEX, .7, (0,0,0), 2, lineType = cv2.LINE_AA)
- cv2.putText(img, ' Confirm with "Y", cancel with "N".', (20, 135), cv2.FONT_HERSHEY_SIMPLEX, .7, (255,255,255), 3, lineType = cv2.LINE_AA)
+ cv2.putText(img, ' Confirm with "Y", cancel with "N".', (20, 135), cv2.FONT_HERSHEY_SIMPLEX, .7, (255,255,255), 7, lineType = cv2.LINE_AA)
cv2.putText(img, ' Confirm with "Y", cancel with "N".', (20, 135), cv2.FONT_HERSHEY_SIMPLEX, .7, (0,0,0), 2, lineType = cv2.LINE_AA)
- cv2.putText(img, 'Use mouse wheel to zoom in and out and to pan', (20, 158), cv2.FONT_HERSHEY_SIMPLEX, .7, (255,255,255), 3, lineType = cv2.LINE_AA)
+ cv2.putText(img, 'Use mouse wheel to zoom in and out and to pan', (20, 158), cv2.FONT_HERSHEY_SIMPLEX, .7, (255,255,255), 7, lineType = cv2.LINE_AA)
cv2.putText(img, 'Use mouse wheel to zoom in and out and to pan', (20, 158), cv2.FONT_HERSHEY_SIMPLEX, .7, (0,0,0), 2, lineType = cv2.LINE_AA)
# Put image in a matplotlib figure for more controls
diff --git a/README.md b/README.md
index 665e1c2..7640745 100644
--- a/README.md
+++ b/README.md
@@ -250,13 +250,14 @@ If you already have a calibration file, set `calibration_type` type to `convert`
- **From Vicon:**
- Not possible yet. [Want to contribute?](#how-to-contribute)
+
### Calculate from scratch
> Calculate calibration parameters with a board, or with points (such as detected on a wand or a human body).
- **With a board:**
- > *N.B.:* Try the calibration tool on the Demo by changing `calibration_type` to `calculate` instead of `convert` in `Config.toml`.\
- For the sake of practicality, there are voluntarily few images for intrinsics, and few clicked points for extrinsics. *You should use more of them.* In spite of this, your reprojection error should be under 1-2 cm, which [does not hinder the quality of kinematic results in practice](https://www.mdpi.com/1424-8220/21/19/6530/htm).
+ > *N.B.:* Try the calibration tool on the Demo by changing `calibration_type` to `calculate` in `Config.toml`.\
+ For the sake of practicality, there are voluntarily few board images for intrinsics, and few points to click for extrinsics. *You should use more of them.* In spite of this, your reprojection error should be under 1-2 cm, which [does not hinder the quality of kinematic results in practice](https://www.mdpi.com/1424-8220/21/19/6530/htm).
- **Calculate intrinsic parameters:**
@@ -269,6 +270,8 @@ If you already have a calibration file, set `calibration_type` type to `convert`
- Make sure that the board:\
is filmed from different angles, covers a large part of the video frame, and is in focus.\
is flat, without reflections, surrounded by a white border, and is not rotationally invariant (Nrows ≠ Ncols, and Nrows odd if Ncols even).
+
+
- **Calculate extrinsic parameters:**
@@ -284,6 +287,8 @@ If you already have a calibration file, set `calibration_type` type to `convert`
- If you film the raw scene (potentially more accurate if points are spread out):\
Manually measure the 3D coordinates of 10 or more points in the scene (tiles, lines on wall, boxes, treadmill dimensions, etc). These points should be as spread out as possible.\
Then you will click on the corresponding image points for each view.
+
+
- **With points:**
- Points can be detected from a wand.\
@@ -302,7 +307,6 @@ Pose2Sim.calibration()
Output:\
-
@@ -753,7 +757,12 @@ If you want to contribute to Pose2Sim, please follow [this guide](https://docs.g
-*Here is a to-do list, for general guidance purposes only:*
+**Here is a to-do list, for general guidance purposes only:**\
+*The main projects are (see details below):*\
+*- Graphical User Interface*\
+*- Multiple person triangulation*\
+*- Synchronization*\
+*- Self-calibration based on keypoint detection*\
> - [x] **Pose:** Support OpenPose [body_25b](https://github.com/CMU-Perceptual-Computing-Lab/openpose_train/tree/master/experimental_models#body_25b-model---option-2-recommended) for more accuracy, [body_135](https://github.com/CMU-Perceptual-Computing-Lab/openpose_train/tree/master/experimental_models#single-network-whole-body-pose-estimation-model) for pronation/supination.
> - [x] **Pose:** Support [BlazePose](https://developers.google.com/mediapipe/solutions/vision/pose_landmarker) for faster inference (on mobile device).