From e6ea2c50c3268ca563ec67710f1c2961b5eaf85b Mon Sep 17 00:00:00 2001 From: shuaiqing Date: Mon, 28 Jun 2021 19:37:15 +0800 Subject: [PATCH] :rocket: update mvmp --- Readme.md | 21 +++++++-- config/vis3d/o3d_scene.yml | 1 + config/vis3d/o3d_scene_manol.yml | 30 +++++++++++- doc/mvmp.md | 77 +++++++++++++++++++++++++++++++ doc/realtime_visualization.md | 46 ++++++++++++++---- easymocap/config/vis_socket.py | 5 +- easymocap/smplmodel/body_model.py | 10 ++-- easymocap/socket/o3d.py | 6 ++- 8 files changed, 174 insertions(+), 22 deletions(-) create mode 100644 doc/mvmp.md diff --git a/Readme.md b/Readme.md index 71a6782..ebfa075 100644 --- a/Readme.md +++ b/Readme.md @@ -2,7 +2,7 @@ * @Date: 2021-01-13 20:32:12 * @Author: Qing Shuai * @LastEditors: Qing Shuai - * @LastEditTime: 2021-06-28 11:13:59 + * @LastEditTime: 2021-06-28 14:08:02 * @FilePath: /EasyMocapRelease/Readme.md --> @@ -61,7 +61,7 @@ This is the basic code for fitting SMPL[1]/SMPL+H[2]/SMPL-X[3]/MANO[2] model to ### Multiple views of multiple people -[![report](https://img.shields.io/badge/CVPR20-mvpose-red)](https://arxiv.org/pdf/1901.04111.pdf) [![quickstart](https://img.shields.io/badge/quickstart-green)](./doc/todo.md) +[![report](https://img.shields.io/badge/CVPR20-mvpose-red)](https://arxiv.org/pdf/1901.04111.pdf) [![quickstart](https://img.shields.io/badge/quickstart-green)](./doc/mvmp.md)

@@ -77,14 +77,27 @@ This is the basic code for fitting SMPL[1]/SMPL+H[2]/SMPL-X[3]/MANO[2] model to Captured with 8 consumer cameras
- ## Other features +### 3D Realtime visualization +[![quickstart](https://img.shields.io/badge/quickstart-green)](./doc/realtime_visualization.md) +
+ + + +
+ +
+ + + +
+ +### Other - [Camera calibration](apps/calibration/Readme.md): a simple calibration tool based on OpenCV - [Pose guided synchronization](./doc/todo.md) (comming soon) - [Annotator](apps/calibration/Readme.md): a simple GUI annotator based on OpenCV - [Exporting of multiple data formats(bvh, asf/amc, ...)](./doc/02_output.md) -- [Real-time visualization](./doc/realtime_visualization.md) ## Updates diff --git a/config/vis3d/o3d_scene.yml b/config/vis3d/o3d_scene.yml index 72c71fb..979d9fc 100644 --- a/config/vis3d/o3d_scene.yml +++ b/config/vis3d/o3d_scene.yml @@ -8,6 +8,7 @@ max_human: 5 track: True filter: True block: True # block visualization or not, True for visualize each frame, False in realtime applications +rotate: False debug: False write: False out: 'none' diff --git a/config/vis3d/o3d_scene_manol.yml b/config/vis3d/o3d_scene_manol.yml index ec99f63..eacbd8e 100644 --- a/config/vis3d/o3d_scene_manol.yml +++ b/config/vis3d/o3d_scene_manol.yml @@ -8,4 +8,32 @@ body_model: model_type: "mano" gender: "neutral" device: "cuda" - regressor_path: "data/smplx/J_regressor_mano_LEFT.txt" \ No newline at end of file + regressor_path: "data/smplx/J_regressor_mano_LEFT.txt" + +scene: + _no_merge_: True + "easymocap.visualize.o3dwrapper.create_coord": + camera: [-0.5, -0.5, -0.5] + radius: 1. + scale: 1. + "easymocap.visualize.o3dwrapper.create_bbox": + min_bound: [-0.5, -0.5, -0.5] + max_bound: [0.5, 0.5, 0.5] + flip: False + "easymocap.visualize.o3dwrapper.create_ground": + center: [-0.5, -0.5, -0.5] + xdir: [1, 0, 0] + ydir: [0, 1, 0] + step: 0.2 + xrange: 5 + yrange: 5 + white: [1., 1., 1.] + black: [0.,0.,0.] + two_sides: False + +camera: + phi: 0 + theta: -90 + cx: 0 + cy: 1 + cz: 0.5 \ No newline at end of file diff --git a/doc/mvmp.md b/doc/mvmp.md new file mode 100644 index 0000000..5fbce59 --- /dev/null +++ b/doc/mvmp.md @@ -0,0 +1,77 @@ + +# EasyMocap - mvmp + +This code aims to solve the problem of reconstructing multiple persons from multiple calibrated cameras. The released code is an easy-to-use version. See [Advanced](#Advanced) for more details. + +## 0. Preparation + +Prepare your calibrated and synchronized system by yourself. + +You can download our dataset [here](https://zjueducn-my.sharepoint.com/:u:/g/personal/s_q_zju_edu_cn/EZFGgpK2Y6RBkPbGvny_PC0BIS08qJvxGYEHYopjhHX_TQ?e=LY3pgm). + +```bash +├── intri.yml +├── extri.yml +├── annots +│   ├── 0 +│   ├── 1 +│   ├── 2 +│   ├── 3 +│   ├── 4 +│   ├── 5 +│   ├── 6 +│   └── 7 +└── videos + ├── 0.mp4 + ├── 1.mp4 + ├── 2.mp4 + ├── 3.mp4 + ├── 4.mp4 + ├── 5.mp4 + ├── 6.mp4 + └── 7.mp4 +``` + +Extract the images from videos: +```bash +data=/path/to/data +python3 scripts/preprocess/extract_video.py ${data} --no2d +``` + +## 1. Reconstucting human pose +This step will reconstruct the human pose in each frame. +```bash +python3 apps/demo/mvmp.py ${data} --out ${data}/output --annot annots --cfg config/exp/mvmp1f.yml --undis --vis_det --vis_repro +``` + +## 2. Recovering SMPL body model +First we should tract the human pose in each frame. This step will track and interpolate missing frames. +```bash +python3 apps/demo/auto_track.py ${data}/output ${data}/output-track --track3d +``` + +Then we can fit SMPL model to the tracked keyponts: + +```bash +python3 apps/demo/smpl_from_keypoints.py ${data} --skel ${data}/output-track/keypoints3d --out ${data}/output-track/smpl --verbose --opts smooth_poses 1e1 +``` + +To visualize the results, see [visualization tutorial](./doc/realtime_visualization.md) + + +## Advanced + +For more complicated scenes, our lab has a real-time version of this algorithm, which can perform 3D reconstruction and tracking simultaneously. + +If you want to use this part for commercial queries, please contact [Xiaowei Zhou](mailto:xwzhou@zju.edu.cn). + + + +https://user-images.githubusercontent.com/22812405/123629197-968c0080-d846-11eb-8417-4e6d3a65466d.mp4 + diff --git a/doc/realtime_visualization.md b/doc/realtime_visualization.md index 817caf4..e7ad843 100644 --- a/doc/realtime_visualization.md +++ b/doc/realtime_visualization.md @@ -2,7 +2,7 @@ * @Date: 2021-06-04 15:56:55 * @Author: Qing Shuai * @LastEditors: Qing Shuai - * @LastEditTime: 2021-06-28 12:11:58 + * @LastEditTime: 2021-06-28 13:56:41 * @FilePath: /EasyMocapRelease/doc/realtime_visualization.md --> # EasyMoCap -> Real-time Visualization @@ -31,7 +31,7 @@ This step will open the visualization window: ![](./assets/vis_server.png) -You can alternate the viewpoints free. The configuration file `config/vis/o3d_scene.yml` defines the scene and other properties. In the default setting, we define the xyz-axis in the origin, the bounding box of the scene and a chessboard in the ground. +You can alternate the viewpoints free. Press `a` to automatic rotate the scene. The configuration file `config/vis/o3d_scene.yml` defines the scene and other properties. In the default setting, we define the xyz-axis in the origin, the bounding box of the scene and a chessboard in the ground. ## Send the data @@ -80,60 +80,90 @@ In the configuration file, we main define the `body_model` and `scene`. You can To understand our code, we provide lots of results for visualization. +First download the data [here](https://zjueducn-my.sharepoint.com/:u:/g/personal/s_q_zju_edu_cn/EQO5cILlYS1BgTT1ufDv2N4Bj41cvhstgmw_tMyi-6smWA?e=wSS9FB) and set the path to `vis`: + +```bash +vis=/path/to/vis/data +``` + ### 1. Skeletons Basic skeletons: ```bash # Start the server: -python3 apps/vis/vis_server.py --cfg config/vis3d/o3d_scene.yml write True out ${vis}/output/skel/base camera.cz 3. camera.cy 0.5 +python3 apps/vis/vis_server.py --cfg config/vis3d/o3d_scene.yml write True out ${vis}/output/skel-body25 camera.cz 3. camera.cy 0.5 # Send the keypoints: python3 apps/vis/vis_client.py --path ${vis}/smpl/keypoints3d ``` +
+ +
+ Body+Face+Hand: ```bash # Start the server: -python3 apps/vis/vis_server.py --cfg config/vis3d/o3d_scene_total.yml write True out ${vis}/output/skel/total camera.cz 3. camera.cy 0.5 +python3 apps/vis/vis_server.py --cfg config/vis3d/o3d_scene_total.yml write True out ${vis}/output/skel-total camera.cz 3. camera.cy 0.5 # Send the keypoints: python3 apps/vis/vis_client.py --path ${vis}/smplx/keypoints3d ``` +
+ +
+ Multiple Person: ```bash # Start the server: -python3 apps/vis/vis_server.py --cfg config/vis3d/o3d_scene.yml write True out ${vis}/output/skel/base camera.cz 3. camera.cy 0.5 +python3 apps/vis/vis_server.py --cfg config/vis3d/o3d_scene.yml write True out ${vis}/output/skel-multi camera.cz 3. camera.cy 0.5 # Send the keypoints: python3 apps/vis/vis_client.py --path ${vis}/multi/keypoints3d --step 4 ``` +
+ +
+ ### 2. Mesh SMPL: ```bash # Start the server: -python3 apps/vis/vis_server.py --cfg config/vis3d/o3d_scene_smpl.yml write True out ${vis}/output/smpl/base camera.cz 3. camera.cy 0.5 +python3 apps/vis/vis_server.py --cfg config/vis3d/o3d_scene_smpl.yml write True out ${vis}/output/mesh-smpl camera.cz 3. camera.cy 0.5 # Send the keypoints: python3 apps/vis/vis_client.py --path ${vis}/smpl/smpl --smpl ``` +
+ +
+ SMPLX: ```bash # Start the server: -python3 apps/vis/vis_server.py --cfg config/vis3d/o3d_scene_smplx.yml write True out ${vis}/output/smpl/smplx camera.cz 3. camera.cy 0.5 +python3 apps/vis/vis_server.py --cfg config/vis3d/o3d_scene_smplx.yml write True out ${vis}/output/mesh-smplx camera.cz 3. camera.cy 0.5 # Send the keypoints: python3 apps/vis/vis_client.py --path ${vis}/smplx/smpl --smpl ``` +
+ +
+ MANO: ```bash # Start the server: -python3 apps/vis/vis_server.py --cfg config/vis3d/o3d_scene_manol.yml write True out ${vis}/output/smpl/manol camera.cz 3. camera.cy 0.5 +python3 apps/vis/vis_server.py --cfg config/vis3d/o3d_scene_manol.yml write True out ${vis}/output/mesh-manol camera.cz 3. camera.cy 0.5 # Send the keypoints: python3 apps/vis/vis_client.py --path ${vis}/manol/smpl --smpl ``` +
+ +
+ ## Advanced ### 1. Camera Setting diff --git a/easymocap/config/vis_socket.py b/easymocap/config/vis_socket.py index b3dcbd9..34a2196 100644 --- a/easymocap/config/vis_socket.py +++ b/easymocap/config/vis_socket.py @@ -2,8 +2,8 @@ @ Date: 2021-05-30 11:17:18 @ Author: Qing Shuai @ LastEditors: Qing Shuai - @ LastEditTime: 2021-06-22 10:35:26 - @ FilePath: /EasyMocap/easymocap/config/vis_socket.py + @ LastEditTime: 2021-06-28 13:03:37 + @ FilePath: /EasyMocapRelease/easymocap/config/vis_socket.py ''' from .baseconfig import CN from .baseconfig import Config as BaseConfig @@ -22,6 +22,7 @@ class Config(BaseConfig): cfg.max_human = 5 cfg.track = True cfg.block = True # block visualization or not, True for visualize each frame, False in realtime applications + cfg.rotate = False cfg.debug = False cfg.write = False cfg.out = '/' diff --git a/easymocap/smplmodel/body_model.py b/easymocap/smplmodel/body_model.py index bc9d7db..3524302 100644 --- a/easymocap/smplmodel/body_model.py +++ b/easymocap/smplmodel/body_model.py @@ -2,8 +2,8 @@ @ Date: 2020-11-18 14:04:10 @ Author: Qing Shuai @ LastEditors: Qing Shuai - @ LastEditTime: 2021-06-22 13:44:10 - @ FilePath: /EasyMocap/easymocap/smplmodel/body_model.py + @ LastEditTime: 2021-06-28 11:55:00 + @ FilePath: /EasyMocapRelease/easymocap/smplmodel/body_model.py ''' import torch import torch.nn as nn @@ -39,9 +39,9 @@ def load_regressor(regressor_path): import ipdb; ipdb.set_trace() return X_regressor -def load_bodydata(model_path, gender): +def load_bodydata(model_type, model_path, gender): if osp.isdir(model_path): - model_fn = 'SMPL_{}.{ext}'.format(gender.upper(), ext='pkl') + model_fn = '{}_{}.{ext}'.format(model_type.upper(), gender.upper(), ext='pkl') smpl_path = osp.join(model_path, model_fn) else: smpl_path = model_path @@ -73,7 +73,7 @@ class SMPLlayer(nn.Module): self.device = device self.model_type = model_type # create the SMPL model - data = load_bodydata(model_path, gender) + data = load_bodydata(model_type, model_path, gender) if with_color: self.color = data['vertex_colors'] else: diff --git a/easymocap/socket/o3d.py b/easymocap/socket/o3d.py index ddc2d74..011fd85 100644 --- a/easymocap/socket/o3d.py +++ b/easymocap/socket/o3d.py @@ -2,7 +2,7 @@ @ Date: 2021-05-25 11:15:53 @ Author: Qing Shuai @ LastEditors: Qing Shuai - @ LastEditTime: 2021-06-25 21:16:02 + @ LastEditTime: 2021-06-28 19:36:58 @ FilePath: /EasyMocapRelease/easymocap/socket/o3d.py ''' import open3d as o3d @@ -19,7 +19,7 @@ from ..assignment.criterion import CritRange import copy rotate = False -def o3d_callback_rotate(vis): +def o3d_callback_rotate(vis=None): global rotate rotate = not rotate return False @@ -36,6 +36,8 @@ class VisOpen3DSocket(BaseSocket): # scene vis = o3d.visualization.VisualizerWithKeyCallback() vis.register_key_callback(ord('A'), o3d_callback_rotate) + if cfg.rotate: + o3d_callback_rotate() vis.create_window(window_name='Visualizer', width=cfg.width, height=cfg.height) self.vis = vis # load the scene