diff --git a/Readme.md b/Readme.md index f59a0d9..71a6782 100644 --- a/Readme.md +++ b/Readme.md @@ -2,7 +2,7 @@ * @Date: 2021-01-13 20:32:12 * @Author: Qing Shuai * @LastEditors: Qing Shuai - * @LastEditTime: 2021-06-14 16:41:00 + * @LastEditTime: 2021-06-28 11:13:59 * @FilePath: /EasyMocapRelease/Readme.md --> @@ -59,13 +59,13 @@ This is the basic code for fitting SMPL[1]/SMPL+H[2]/SMPL-X[3]/MANO[2] model to Internet videos of Roger Federer's serving -### Multiple views of multiple people (Coming soon) +### Multiple views of multiple people [![report](https://img.shields.io/badge/CVPR20-mvpose-red)](https://arxiv.org/pdf/1901.04111.pdf) [![quickstart](https://img.shields.io/badge/quickstart-green)](./doc/todo.md)
-
- Captured with 4 consumer cameras +
+ Captured with 8 consumer cameras
### Novel view synthesis from sparse views @@ -88,6 +88,7 @@ This is the basic code for fitting SMPL[1]/SMPL+H[2]/SMPL-X[3]/MANO[2] model to ## Updates +- 06/28/2021: The **Multi-view Multi-person** part is released! - 06/10/2021: The **real-time 3D visualization** part is released! - 04/11/2021: The calibration tool and the annotator are released. - 04/11/2021: **Mirrored-Human** part is released. diff --git a/apps/vis/vis_client.py b/apps/vis/vis_client.py index 08e12e3..2893f66 100644 --- a/apps/vis/vis_client.py +++ b/apps/vis/vis_client.py @@ -2,9 +2,10 @@ @ Date: 2021-05-24 18:57:48 @ Author: Qing Shuai @ LastEditors: Qing Shuai - @ LastEditTime: 2021-06-04 16:43:00 + @ LastEditTime: 2021-06-28 11:30:45 @ FilePath: /EasyMocapRelease/apps/vis/vis_client.py ''' +from easymocap.mytools.reader import read_smpl import socket import time from easymocap.socket.base_client import BaseSocketClient @@ -12,35 +13,48 @@ import os def send_rand(client): import numpy as np - for _ in range(1000): - k3d = np.random.rand(25, 4) - data = [ - { - 'id': 0, - 'keypoints3d': k3d - } - ] - client.send(data) + N_person = 10 + datas = [] + for i in range(N_person): + transl = (np.random.rand(1, 3) - 0.5) * 3 + kpts = np.random.rand(25, 4) + kpts[:, :3] += transl + data = { + 'id': i, + 'keypoints3d': kpts + } + datas.append(data) + for _ in range(1): + for i in range(N_person): + move = (np.random.rand(1, 3) - 0.5) * 0.1 + datas[i]['keypoints3d'][:, :3] += move + client.send(datas) time.sleep(0.005) client.close() -def send_dir(client, path): +def send_dir(client, path, step): from os.path import join from glob import glob from tqdm import tqdm from easymocap.mytools.reader import read_keypoints3d results = sorted(glob(join(path, '*.json'))) - for result in tqdm(results): - data = read_keypoints3d(result) - client.send(data) + for result in tqdm(results[::step]): + if args.smpl: + data = read_smpl(result) + client.send_smpl(data) + else: + data = read_keypoints3d(result) + client.send(data) time.sleep(0.005) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() - parser.add_argument('--host', type=str, default='auto') + parser.add_argument('--host', type=str, default='127.0.0.1') parser.add_argument('--port', type=int, default=9999) + parser.add_argument('--step', type=int, default=1) parser.add_argument('--path', type=str, default=None) + parser.add_argument('--smpl', action='store_true') parser.add_argument('--debug', action='store_true') args = parser.parse_args() @@ -49,6 +63,6 @@ if __name__ == "__main__": client = BaseSocketClient(args.host, args.port) if args.path is not None and os.path.isdir(args.path): - send_dir(client, args.path) + send_dir(client, args.path, step=args.step) else: send_rand(client) \ No newline at end of file diff --git a/config/vis3d/o3d_scene.yml b/config/vis3d/o3d_scene.yml index 2839736..72c71fb 100644 --- a/config/vis3d/o3d_scene.yml +++ b/config/vis3d/o3d_scene.yml @@ -18,6 +18,13 @@ body_model: body_type: "body25" joint_radius: 0.02 +camera: + phi: 0 + theta: -30 + cx: 0. + cy: 0. + cz: 6. + scene: "easymocap.visualize.o3dwrapper.create_coord": camera: [0, 0, 0] diff --git a/config/vis3d/o3d_scene_manol.yml b/config/vis3d/o3d_scene_manol.yml new file mode 100644 index 0000000..ec99f63 --- /dev/null +++ b/config/vis3d/o3d_scene_manol.yml @@ -0,0 +1,11 @@ +parent: "config/vis3d/o3d_scene.yml" + +body_model: + module: "easymocap.smplmodel.body_model.SMPLlayer" + args: + _no_merge_: True + model_path: "data/smplx/smplh/MANO_LEFT.pkl" + model_type: "mano" + gender: "neutral" + device: "cuda" + regressor_path: "data/smplx/J_regressor_mano_LEFT.txt" \ No newline at end of file diff --git a/config/vis3d/o3d_scene_smplx.yml b/config/vis3d/o3d_scene_smplx.yml new file mode 100644 index 0000000..f41a8e7 --- /dev/null +++ b/config/vis3d/o3d_scene_smplx.yml @@ -0,0 +1,11 @@ +parent: "config/vis3d/o3d_scene.yml" + +body_model: + module: "easymocap.smplmodel.body_model.SMPLlayer" + args: + _no_merge_: True + model_path: "data/smplx/smplx" + model_type: "smplx" + gender: "neutral" + device: "cuda" + regressor_path: "data/smplx/J_regressor_body25_smplx.txt" \ No newline at end of file diff --git a/config/vis3d/o3d_scene_total.yml b/config/vis3d/o3d_scene_total.yml new file mode 100644 index 0000000..eb3ba32 --- /dev/null +++ b/config/vis3d/o3d_scene_total.yml @@ -0,0 +1,6 @@ +parent: "config/vis3d/o3d_scene.yml" + +body_model: + args: + body_type: "bodyhandface" + joint_radius: 0.02 \ No newline at end of file diff --git a/doc/assets/mvmp1f.gif b/doc/assets/mvmp1f.gif new file mode 100644 index 0000000..dc2d243 Binary files /dev/null and b/doc/assets/mvmp1f.gif differ diff --git a/doc/installation.md b/doc/installation.md index 61316d1..8c773cf 100644 --- a/doc/installation.md +++ b/doc/installation.md @@ -2,7 +2,7 @@ * @Date: 2021-04-02 11:52:33 * @Author: Qing Shuai * @LastEditors: Qing Shuai - * @LastEditTime: 2021-04-13 17:15:49 + * @LastEditTime: 2021-06-21 21:18:45 * @FilePath: /EasyMocapRelease/doc/installation.md --> # EasyMocap - Installation @@ -27,6 +27,8 @@ data ├── J_regressor_body25.npy ├── J_regressor_body25_smplh.txt ├── J_regressor_body25_smplx.txt + ├── J_regressor_mano_LEFT.txt + ├── J_regressor_mano_RIGHT.txt ├── smpl │   ├── SMPL_FEMALE.pkl │   ├── SMPL_MALE.pkl @@ -47,7 +49,7 @@ This part is used in `1v1p*.py`. You can skip this step if you only use the mult Download pretrained SPIN model [here](http://visiondata.cis.upenn.edu/spin/model_checkpoint.pt) and place it to `data/models/spin_checkpoints.pt`. -Fetch the extra data [here](http://visiondata.cis.upenn.edu/spin/dataset_extras.tar.gz) and place the `smpl_mean_params.npz` to `data/models/smpl_mean_params.npz`. +Fetch the extra data [here](http://visiondata.cis.upenn.edu/spin/data.tar.gz) and place the `smpl_mean_params.npz` to `data/models/smpl_mean_params.npz`. ## 0.3 (Optional) 2D model diff --git a/doc/realtime_visualization.md b/doc/realtime_visualization.md index 900f05c..817caf4 100644 --- a/doc/realtime_visualization.md +++ b/doc/realtime_visualization.md @@ -2,7 +2,7 @@ * @Date: 2021-06-04 15:56:55 * @Author: Qing Shuai * @LastEditors: Qing Shuai - * @LastEditTime: 2021-06-12 15:29:23 + * @LastEditTime: 2021-06-28 12:11:58 * @FilePath: /EasyMocapRelease/doc/realtime_visualization.md --> # EasyMoCap -> Real-time Visualization @@ -74,4 +74,84 @@ data = [ ## Define your scene -In the configuration file, we main define the `body_model` and `scene`. You can replace them for your data. \ No newline at end of file +In the configuration file, we main define the `body_model` and `scene`. You can replace them for your data. + +## Examples + +To understand our code, we provide lots of results for visualization. + +### 1. Skeletons + +Basic skeletons: + +```bash +# Start the server: +python3 apps/vis/vis_server.py --cfg config/vis3d/o3d_scene.yml write True out ${vis}/output/skel/base camera.cz 3. camera.cy 0.5 +# Send the keypoints: +python3 apps/vis/vis_client.py --path ${vis}/smpl/keypoints3d +``` + +Body+Face+Hand: +```bash +# Start the server: +python3 apps/vis/vis_server.py --cfg config/vis3d/o3d_scene_total.yml write True out ${vis}/output/skel/total camera.cz 3. camera.cy 0.5 +# Send the keypoints: +python3 apps/vis/vis_client.py --path ${vis}/smplx/keypoints3d +``` + +Multiple Person: + +```bash +# Start the server: +python3 apps/vis/vis_server.py --cfg config/vis3d/o3d_scene.yml write True out ${vis}/output/skel/base camera.cz 3. camera.cy 0.5 +# Send the keypoints: +python3 apps/vis/vis_client.py --path ${vis}/multi/keypoints3d --step 4 +``` + +### 2. Mesh + +SMPL: +```bash +# Start the server: +python3 apps/vis/vis_server.py --cfg config/vis3d/o3d_scene_smpl.yml write True out ${vis}/output/smpl/base camera.cz 3. camera.cy 0.5 +# Send the keypoints: +python3 apps/vis/vis_client.py --path ${vis}/smpl/smpl --smpl +``` + +SMPLX: +```bash +# Start the server: +python3 apps/vis/vis_server.py --cfg config/vis3d/o3d_scene_smplx.yml write True out ${vis}/output/smpl/smplx camera.cz 3. camera.cy 0.5 +# Send the keypoints: +python3 apps/vis/vis_client.py --path ${vis}/smplx/smpl --smpl +``` + +MANO: +```bash +# Start the server: +python3 apps/vis/vis_server.py --cfg config/vis3d/o3d_scene_manol.yml write True out ${vis}/output/smpl/manol camera.cz 3. camera.cy 0.5 +# Send the keypoints: +python3 apps/vis/vis_client.py --path ${vis}/manol/smpl --smpl +``` + +## Advanced + +### 1. Camera Setting + +Try to modify these keys to control the location and rotation of the cameras. +```yml +camera: + phi: 0 + theta: -30 + cx: 0. + cy: 0. + cz: 6. +``` + +### 2. Scene Setting + +We provide some useful mesh in `easymocap.visualize.o3dwrapper`. If you want to add your own 3D mesh, add it to the key `scene`. + +### 3. Body Model + +At present, we just allow use one type of body model in the scene for fast visualization. So you must set the body model before you run the `apps/vis/vis_server.py`. If you want to use different models in a scene, you can implement it and pull a request. \ No newline at end of file diff --git a/easymocap/assignment/track.py b/easymocap/assignment/track.py index 3c18e95..14b605b 100644 --- a/easymocap/assignment/track.py +++ b/easymocap/assignment/track.py @@ -2,7 +2,7 @@ @ Date: 2021-06-27 16:21:50 @ Author: Qing Shuai @ LastEditors: Qing Shuai - @ LastEditTime: 2021-06-28 10:32:40 + @ LastEditTime: 2021-06-28 10:59:59 @ FilePath: /EasyMocapRelease/easymocap/assignment/track.py ''' from tqdm import tqdm @@ -146,6 +146,8 @@ class BaseTrack: for pid in range(occupancy.shape[0]): if occupancy[pid].sum() > self.MIN_FRAMES: pids.append(pid) + else: + print('[track] remove {} with {} frames'.format(pid, occupancy[pid].sum())) occupancy = occupancy[pids] for nf in range(nFrames): result = results[nf] @@ -171,6 +173,7 @@ class BaseTrack: right = right.min() + nf + 1 else: continue + print('[interp] {} in [{}, {}]'.format(pid, left, right)) # find valid (left, right) # interpolate 3d pose info_left = [res for res in results[left] if res['id'] == pid][0] @@ -180,7 +183,7 @@ class BaseTrack: res = self._interpolate(info_left, info_right, weight) res['id'] = pid results[nf_i].append(res) - occupancy[pid, nf_i] = pid + occupancy[pid, nf_i] = 1 return results, occupancy def smooth(self, results, occupancy): @@ -227,9 +230,9 @@ class Track3D(BaseTrack): import ipdb; ipdb.set_trace() return results - def write(self, results, mapid): + def write(self, results, occupancy): os.makedirs(self.out, exist_ok=True) - for nf, res in enumerate(results): + for nf, res in enumerate(tqdm(results)): outname = join(self.out, 'keypoints3d', '{:06d}.json'.format(nf)) result = results[nf] write_keypoints3d(outname, result) diff --git a/easymocap/mytools/reader.py b/easymocap/mytools/reader.py index 158a274..6333c26 100644 --- a/easymocap/mytools/reader.py +++ b/easymocap/mytools/reader.py @@ -2,8 +2,8 @@ @ Date: 2021-04-21 15:19:21 @ Author: Qing Shuai @ LastEditors: Qing Shuai - @ LastEditTime: 2021-06-26 17:37:07 - @ FilePath: /EasyMocap/easymocap/mytools/reader.py + @ LastEditTime: 2021-06-28 11:55:27 + @ FilePath: /EasyMocapRelease/easymocap/mytools/reader.py ''' # function to read data """ @@ -44,8 +44,9 @@ def read_smpl(filename): datas = read_json(filename) outputs = [] for data in datas: - for key in ['Rh', 'Th', 'poses', 'shapes']: - data[key] = np.array(data[key]) + for key in ['Rh', 'Th', 'poses', 'shapes', 'expression']: + if key in data.keys(): + data[key] = np.array(data[key]) # for smplx results outputs.append(data) return outputs diff --git a/easymocap/socket/utils.py b/easymocap/socket/utils.py index cde700c..1c1d0bc 100644 --- a/easymocap/socket/utils.py +++ b/easymocap/socket/utils.py @@ -2,8 +2,8 @@ @ Date: 2021-05-24 20:07:34 @ Author: Qing Shuai @ LastEditors: Qing Shuai - @ LastEditTime: 2021-06-16 14:42:23 - @ FilePath: /EasyMocap/easymocap/socket/utils.py + @ LastEditTime: 2021-06-28 12:05:35 + @ FilePath: /EasyMocapRelease/easymocap/socket/utils.py ''' import cv2 import numpy as np @@ -15,7 +15,7 @@ def encode_detect(data): return res.encode('ascii') def encode_smpl(data): - res = write_common_results(None, data, ['poses', 'shapes', 'Rh', 'Th']) + res = write_common_results(None, data, ['poses', 'shapes', 'expression', 'Rh', 'Th']) res = res.replace('\r', '').replace('\n', '').replace(' ', '') return res.encode('ascii') diff --git a/easymocap/visualize/skelmodel.py b/easymocap/visualize/skelmodel.py index e2683e6..fd39ce4 100644 --- a/easymocap/visualize/skelmodel.py +++ b/easymocap/visualize/skelmodel.py @@ -2,8 +2,8 @@ @ Date: 2021-01-17 21:38:19 @ Author: Qing Shuai @ LastEditors: Qing Shuai - @ LastEditTime: 2021-06-18 18:48:37 - @ FilePath: /EasyMocap/easymocap/visualize/skelmodel.py + @ LastEditTime: 2021-06-28 11:43:00 + @ FilePath: /EasyMocapRelease/easymocap/visualize/skelmodel.py ''' import numpy as np import cv2 @@ -47,6 +47,7 @@ class SkelModel: config = CONFIG[body_type] self.nJoints = config['nJoints'] self.kintree = config['kintree'] + self.body_type = body_type self.device = 'none' cur_dir = os.path.dirname(__file__) faces = np.loadtxt(join(cur_dir, 'sphere_faces_20.txt'), dtype=np.int) @@ -76,22 +77,30 @@ class SkelModel: for nper in range(keypoints3d.shape[0]): vertices_all = [] kpts3d = keypoints3d[nper] - for nj in range(self.nJoints): - if kpts3d[nj, -1] < min_conf: - vertices_all.append(self.vertices*0.001) - continue - vertices_all.append(self.vertices*r + kpts3d[nj:nj+1, :3]) # limb + closet_joints = [] for nk, (i, j) in enumerate(self.kintree): if kpts3d[i][-1] < min_conf or kpts3d[j][-1] < min_conf: vertices_all.append(self.vertices*0.001) continue T, _, length = calTransformation(kpts3d[i, :3], kpts3d[j, :3], r=1) - if length > 2: # 超过两米的 + if length > 2: # large than 2 meter vertices_all.append(self.vertices*0.001) continue + if length < self.joint_radius * 5: + closet_joints.append(i) + closet_joints.append(j) vertices = self.vertices @ T[:3, :3].T + T[:3, 3:].T vertices_all.append(vertices) + for nj in range(self.nJoints): + if self.body_type in ['bodyhand', 'bodyhandface'] and nj > 25: + r_ = r / 2 + else: + r_ = r + if kpts3d[nj, -1] < min_conf: + vertices_all.append(self.vertices*0.001) + continue + vertices_all.append(self.vertices*r_ + kpts3d[nj:nj+1, :3]) vertices = np.vstack(vertices_all) verts_final.append(vertices) verts_final = np.stack(verts_final)