[vis] update realtime visualization

This commit is contained in:
shuaiqing 2021-06-28 12:14:56 +08:00
parent 6cce25792c
commit be1155aa7d
13 changed files with 188 additions and 43 deletions

View File

@ -2,7 +2,7 @@
* @Date: 2021-01-13 20:32:12 * @Date: 2021-01-13 20:32:12
* @Author: Qing Shuai * @Author: Qing Shuai
* @LastEditors: Qing Shuai * @LastEditors: Qing Shuai
* @LastEditTime: 2021-06-14 16:41:00 * @LastEditTime: 2021-06-28 11:13:59
* @FilePath: /EasyMocapRelease/Readme.md * @FilePath: /EasyMocapRelease/Readme.md
--> -->
@ -59,13 +59,13 @@ This is the basic code for fitting SMPL[1]/SMPL+H[2]/SMPL-X[3]/MANO[2] model to
<sup>Internet videos of Roger Federer's serving<sup/> <sup>Internet videos of Roger Federer's serving<sup/>
</div> </div>
### Multiple views of multiple people (Coming soon) ### Multiple views of multiple people
[![report](https://img.shields.io/badge/CVPR20-mvpose-red)](https://arxiv.org/pdf/1901.04111.pdf) [![quickstart](https://img.shields.io/badge/quickstart-green)](./doc/todo.md) [![report](https://img.shields.io/badge/CVPR20-mvpose-red)](https://arxiv.org/pdf/1901.04111.pdf) [![quickstart](https://img.shields.io/badge/quickstart-green)](./doc/todo.md)
<div align="center"> <div align="center">
<img src="doc/imocap/mvpose.gif" width="80%"><br/> <img src="doc/assets/mvmp1f.gif" width="80%"><br/>
<sup>Captured with 4 consumer cameras<sup/> <sup>Captured with 8 consumer cameras<sup/>
</div> </div>
### Novel view synthesis from sparse views ### Novel view synthesis from sparse views
@ -88,6 +88,7 @@ This is the basic code for fitting SMPL[1]/SMPL+H[2]/SMPL-X[3]/MANO[2] model to
## Updates ## Updates
- 06/28/2021: The **Multi-view Multi-person** part is released!
- 06/10/2021: The **real-time 3D visualization** part is released! - 06/10/2021: The **real-time 3D visualization** part is released!
- 04/11/2021: The calibration tool and the annotator are released. - 04/11/2021: The calibration tool and the annotator are released.
- 04/11/2021: **Mirrored-Human** part is released. - 04/11/2021: **Mirrored-Human** part is released.

View File

@ -2,9 +2,10 @@
@ Date: 2021-05-24 18:57:48 @ Date: 2021-05-24 18:57:48
@ Author: Qing Shuai @ Author: Qing Shuai
@ LastEditors: Qing Shuai @ LastEditors: Qing Shuai
@ LastEditTime: 2021-06-04 16:43:00 @ LastEditTime: 2021-06-28 11:30:45
@ FilePath: /EasyMocapRelease/apps/vis/vis_client.py @ FilePath: /EasyMocapRelease/apps/vis/vis_client.py
''' '''
from easymocap.mytools.reader import read_smpl
import socket import socket
import time import time
from easymocap.socket.base_client import BaseSocketClient from easymocap.socket.base_client import BaseSocketClient
@ -12,35 +13,48 @@ import os
def send_rand(client): def send_rand(client):
import numpy as np import numpy as np
for _ in range(1000): N_person = 10
k3d = np.random.rand(25, 4) datas = []
data = [ for i in range(N_person):
{ transl = (np.random.rand(1, 3) - 0.5) * 3
'id': 0, kpts = np.random.rand(25, 4)
'keypoints3d': k3d kpts[:, :3] += transl
} data = {
] 'id': i,
client.send(data) 'keypoints3d': kpts
}
datas.append(data)
for _ in range(1):
for i in range(N_person):
move = (np.random.rand(1, 3) - 0.5) * 0.1
datas[i]['keypoints3d'][:, :3] += move
client.send(datas)
time.sleep(0.005) time.sleep(0.005)
client.close() client.close()
def send_dir(client, path): def send_dir(client, path, step):
from os.path import join from os.path import join
from glob import glob from glob import glob
from tqdm import tqdm from tqdm import tqdm
from easymocap.mytools.reader import read_keypoints3d from easymocap.mytools.reader import read_keypoints3d
results = sorted(glob(join(path, '*.json'))) results = sorted(glob(join(path, '*.json')))
for result in tqdm(results): for result in tqdm(results[::step]):
data = read_keypoints3d(result) if args.smpl:
client.send(data) data = read_smpl(result)
client.send_smpl(data)
else:
data = read_keypoints3d(result)
client.send(data)
time.sleep(0.005) time.sleep(0.005)
if __name__ == "__main__": if __name__ == "__main__":
import argparse import argparse
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--host', type=str, default='auto') parser.add_argument('--host', type=str, default='127.0.0.1')
parser.add_argument('--port', type=int, default=9999) parser.add_argument('--port', type=int, default=9999)
parser.add_argument('--step', type=int, default=1)
parser.add_argument('--path', type=str, default=None) parser.add_argument('--path', type=str, default=None)
parser.add_argument('--smpl', action='store_true')
parser.add_argument('--debug', action='store_true') parser.add_argument('--debug', action='store_true')
args = parser.parse_args() args = parser.parse_args()
@ -49,6 +63,6 @@ if __name__ == "__main__":
client = BaseSocketClient(args.host, args.port) client = BaseSocketClient(args.host, args.port)
if args.path is not None and os.path.isdir(args.path): if args.path is not None and os.path.isdir(args.path):
send_dir(client, args.path) send_dir(client, args.path, step=args.step)
else: else:
send_rand(client) send_rand(client)

View File

@ -18,6 +18,13 @@ body_model:
body_type: "body25" body_type: "body25"
joint_radius: 0.02 joint_radius: 0.02
camera:
phi: 0
theta: -30
cx: 0.
cy: 0.
cz: 6.
scene: scene:
"easymocap.visualize.o3dwrapper.create_coord": "easymocap.visualize.o3dwrapper.create_coord":
camera: [0, 0, 0] camera: [0, 0, 0]

View File

@ -0,0 +1,11 @@
parent: "config/vis3d/o3d_scene.yml"
body_model:
module: "easymocap.smplmodel.body_model.SMPLlayer"
args:
_no_merge_: True
model_path: "data/smplx/smplh/MANO_LEFT.pkl"
model_type: "mano"
gender: "neutral"
device: "cuda"
regressor_path: "data/smplx/J_regressor_mano_LEFT.txt"

View File

@ -0,0 +1,11 @@
parent: "config/vis3d/o3d_scene.yml"
body_model:
module: "easymocap.smplmodel.body_model.SMPLlayer"
args:
_no_merge_: True
model_path: "data/smplx/smplx"
model_type: "smplx"
gender: "neutral"
device: "cuda"
regressor_path: "data/smplx/J_regressor_body25_smplx.txt"

View File

@ -0,0 +1,6 @@
parent: "config/vis3d/o3d_scene.yml"
body_model:
args:
body_type: "bodyhandface"
joint_radius: 0.02

BIN
doc/assets/mvmp1f.gif Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.3 MiB

View File

@ -2,7 +2,7 @@
* @Date: 2021-04-02 11:52:33 * @Date: 2021-04-02 11:52:33
* @Author: Qing Shuai * @Author: Qing Shuai
* @LastEditors: Qing Shuai * @LastEditors: Qing Shuai
* @LastEditTime: 2021-04-13 17:15:49 * @LastEditTime: 2021-06-21 21:18:45
* @FilePath: /EasyMocapRelease/doc/installation.md * @FilePath: /EasyMocapRelease/doc/installation.md
--> -->
# EasyMocap - Installation # EasyMocap - Installation
@ -27,6 +27,8 @@ data
├── J_regressor_body25.npy ├── J_regressor_body25.npy
├── J_regressor_body25_smplh.txt ├── J_regressor_body25_smplh.txt
├── J_regressor_body25_smplx.txt ├── J_regressor_body25_smplx.txt
├── J_regressor_mano_LEFT.txt
├── J_regressor_mano_RIGHT.txt
├── smpl ├── smpl
│   ├── SMPL_FEMALE.pkl │   ├── SMPL_FEMALE.pkl
│   ├── SMPL_MALE.pkl │   ├── SMPL_MALE.pkl
@ -47,7 +49,7 @@ This part is used in `1v1p*.py`. You can skip this step if you only use the mult
Download pretrained SPIN model [here](http://visiondata.cis.upenn.edu/spin/model_checkpoint.pt) and place it to `data/models/spin_checkpoints.pt`. Download pretrained SPIN model [here](http://visiondata.cis.upenn.edu/spin/model_checkpoint.pt) and place it to `data/models/spin_checkpoints.pt`.
Fetch the extra data [here](http://visiondata.cis.upenn.edu/spin/dataset_extras.tar.gz) and place the `smpl_mean_params.npz` to `data/models/smpl_mean_params.npz`. Fetch the extra data [here](http://visiondata.cis.upenn.edu/spin/data.tar.gz) and place the `smpl_mean_params.npz` to `data/models/smpl_mean_params.npz`.
## 0.3 (Optional) 2D model ## 0.3 (Optional) 2D model

View File

@ -2,7 +2,7 @@
* @Date: 2021-06-04 15:56:55 * @Date: 2021-06-04 15:56:55
* @Author: Qing Shuai * @Author: Qing Shuai
* @LastEditors: Qing Shuai * @LastEditors: Qing Shuai
* @LastEditTime: 2021-06-12 15:29:23 * @LastEditTime: 2021-06-28 12:11:58
* @FilePath: /EasyMocapRelease/doc/realtime_visualization.md * @FilePath: /EasyMocapRelease/doc/realtime_visualization.md
--> -->
# EasyMoCap -> Real-time Visualization # EasyMoCap -> Real-time Visualization
@ -74,4 +74,84 @@ data = [
## Define your scene ## Define your scene
In the configuration file, we main define the `body_model` and `scene`. You can replace them for your data. In the configuration file, we main define the `body_model` and `scene`. You can replace them for your data.
## Examples
To understand our code, we provide lots of results for visualization.
### 1. Skeletons
Basic skeletons:
```bash
# Start the server:
python3 apps/vis/vis_server.py --cfg config/vis3d/o3d_scene.yml write True out ${vis}/output/skel/base camera.cz 3. camera.cy 0.5
# Send the keypoints:
python3 apps/vis/vis_client.py --path ${vis}/smpl/keypoints3d
```
Body+Face+Hand:
```bash
# Start the server:
python3 apps/vis/vis_server.py --cfg config/vis3d/o3d_scene_total.yml write True out ${vis}/output/skel/total camera.cz 3. camera.cy 0.5
# Send the keypoints:
python3 apps/vis/vis_client.py --path ${vis}/smplx/keypoints3d
```
Multiple Person:
```bash
# Start the server:
python3 apps/vis/vis_server.py --cfg config/vis3d/o3d_scene.yml write True out ${vis}/output/skel/base camera.cz 3. camera.cy 0.5
# Send the keypoints:
python3 apps/vis/vis_client.py --path ${vis}/multi/keypoints3d --step 4
```
### 2. Mesh
SMPL:
```bash
# Start the server:
python3 apps/vis/vis_server.py --cfg config/vis3d/o3d_scene_smpl.yml write True out ${vis}/output/smpl/base camera.cz 3. camera.cy 0.5
# Send the keypoints:
python3 apps/vis/vis_client.py --path ${vis}/smpl/smpl --smpl
```
SMPLX:
```bash
# Start the server:
python3 apps/vis/vis_server.py --cfg config/vis3d/o3d_scene_smplx.yml write True out ${vis}/output/smpl/smplx camera.cz 3. camera.cy 0.5
# Send the keypoints:
python3 apps/vis/vis_client.py --path ${vis}/smplx/smpl --smpl
```
MANO:
```bash
# Start the server:
python3 apps/vis/vis_server.py --cfg config/vis3d/o3d_scene_manol.yml write True out ${vis}/output/smpl/manol camera.cz 3. camera.cy 0.5
# Send the keypoints:
python3 apps/vis/vis_client.py --path ${vis}/manol/smpl --smpl
```
## Advanced
### 1. Camera Setting
Try to modify these keys to control the location and rotation of the cameras.
```yml
camera:
phi: 0
theta: -30
cx: 0.
cy: 0.
cz: 6.
```
### 2. Scene Setting
We provide some useful mesh in `easymocap.visualize.o3dwrapper`. If you want to add your own 3D mesh, add it to the key `scene`.
### 3. Body Model
At present, we just allow use one type of body model in the scene for fast visualization. So you must set the body model before you run the `apps/vis/vis_server.py`. If you want to use different models in a scene, you can implement it and pull a request.

View File

@ -2,7 +2,7 @@
@ Date: 2021-06-27 16:21:50 @ Date: 2021-06-27 16:21:50
@ Author: Qing Shuai @ Author: Qing Shuai
@ LastEditors: Qing Shuai @ LastEditors: Qing Shuai
@ LastEditTime: 2021-06-28 10:32:40 @ LastEditTime: 2021-06-28 10:59:59
@ FilePath: /EasyMocapRelease/easymocap/assignment/track.py @ FilePath: /EasyMocapRelease/easymocap/assignment/track.py
''' '''
from tqdm import tqdm from tqdm import tqdm
@ -146,6 +146,8 @@ class BaseTrack:
for pid in range(occupancy.shape[0]): for pid in range(occupancy.shape[0]):
if occupancy[pid].sum() > self.MIN_FRAMES: if occupancy[pid].sum() > self.MIN_FRAMES:
pids.append(pid) pids.append(pid)
else:
print('[track] remove {} with {} frames'.format(pid, occupancy[pid].sum()))
occupancy = occupancy[pids] occupancy = occupancy[pids]
for nf in range(nFrames): for nf in range(nFrames):
result = results[nf] result = results[nf]
@ -171,6 +173,7 @@ class BaseTrack:
right = right.min() + nf + 1 right = right.min() + nf + 1
else: else:
continue continue
print('[interp] {} in [{}, {}]'.format(pid, left, right))
# find valid (left, right) # find valid (left, right)
# interpolate 3d pose # interpolate 3d pose
info_left = [res for res in results[left] if res['id'] == pid][0] info_left = [res for res in results[left] if res['id'] == pid][0]
@ -180,7 +183,7 @@ class BaseTrack:
res = self._interpolate(info_left, info_right, weight) res = self._interpolate(info_left, info_right, weight)
res['id'] = pid res['id'] = pid
results[nf_i].append(res) results[nf_i].append(res)
occupancy[pid, nf_i] = pid occupancy[pid, nf_i] = 1
return results, occupancy return results, occupancy
def smooth(self, results, occupancy): def smooth(self, results, occupancy):
@ -227,9 +230,9 @@ class Track3D(BaseTrack):
import ipdb; ipdb.set_trace() import ipdb; ipdb.set_trace()
return results return results
def write(self, results, mapid): def write(self, results, occupancy):
os.makedirs(self.out, exist_ok=True) os.makedirs(self.out, exist_ok=True)
for nf, res in enumerate(results): for nf, res in enumerate(tqdm(results)):
outname = join(self.out, 'keypoints3d', '{:06d}.json'.format(nf)) outname = join(self.out, 'keypoints3d', '{:06d}.json'.format(nf))
result = results[nf] result = results[nf]
write_keypoints3d(outname, result) write_keypoints3d(outname, result)

View File

@ -2,8 +2,8 @@
@ Date: 2021-04-21 15:19:21 @ Date: 2021-04-21 15:19:21
@ Author: Qing Shuai @ Author: Qing Shuai
@ LastEditors: Qing Shuai @ LastEditors: Qing Shuai
@ LastEditTime: 2021-06-26 17:37:07 @ LastEditTime: 2021-06-28 11:55:27
@ FilePath: /EasyMocap/easymocap/mytools/reader.py @ FilePath: /EasyMocapRelease/easymocap/mytools/reader.py
''' '''
# function to read data # function to read data
""" """
@ -44,8 +44,9 @@ def read_smpl(filename):
datas = read_json(filename) datas = read_json(filename)
outputs = [] outputs = []
for data in datas: for data in datas:
for key in ['Rh', 'Th', 'poses', 'shapes']: for key in ['Rh', 'Th', 'poses', 'shapes', 'expression']:
data[key] = np.array(data[key]) if key in data.keys():
data[key] = np.array(data[key])
# for smplx results # for smplx results
outputs.append(data) outputs.append(data)
return outputs return outputs

View File

@ -2,8 +2,8 @@
@ Date: 2021-05-24 20:07:34 @ Date: 2021-05-24 20:07:34
@ Author: Qing Shuai @ Author: Qing Shuai
@ LastEditors: Qing Shuai @ LastEditors: Qing Shuai
@ LastEditTime: 2021-06-16 14:42:23 @ LastEditTime: 2021-06-28 12:05:35
@ FilePath: /EasyMocap/easymocap/socket/utils.py @ FilePath: /EasyMocapRelease/easymocap/socket/utils.py
''' '''
import cv2 import cv2
import numpy as np import numpy as np
@ -15,7 +15,7 @@ def encode_detect(data):
return res.encode('ascii') return res.encode('ascii')
def encode_smpl(data): def encode_smpl(data):
res = write_common_results(None, data, ['poses', 'shapes', 'Rh', 'Th']) res = write_common_results(None, data, ['poses', 'shapes', 'expression', 'Rh', 'Th'])
res = res.replace('\r', '').replace('\n', '').replace(' ', '') res = res.replace('\r', '').replace('\n', '').replace(' ', '')
return res.encode('ascii') return res.encode('ascii')

View File

@ -2,8 +2,8 @@
@ Date: 2021-01-17 21:38:19 @ Date: 2021-01-17 21:38:19
@ Author: Qing Shuai @ Author: Qing Shuai
@ LastEditors: Qing Shuai @ LastEditors: Qing Shuai
@ LastEditTime: 2021-06-18 18:48:37 @ LastEditTime: 2021-06-28 11:43:00
@ FilePath: /EasyMocap/easymocap/visualize/skelmodel.py @ FilePath: /EasyMocapRelease/easymocap/visualize/skelmodel.py
''' '''
import numpy as np import numpy as np
import cv2 import cv2
@ -47,6 +47,7 @@ class SkelModel:
config = CONFIG[body_type] config = CONFIG[body_type]
self.nJoints = config['nJoints'] self.nJoints = config['nJoints']
self.kintree = config['kintree'] self.kintree = config['kintree']
self.body_type = body_type
self.device = 'none' self.device = 'none'
cur_dir = os.path.dirname(__file__) cur_dir = os.path.dirname(__file__)
faces = np.loadtxt(join(cur_dir, 'sphere_faces_20.txt'), dtype=np.int) faces = np.loadtxt(join(cur_dir, 'sphere_faces_20.txt'), dtype=np.int)
@ -76,22 +77,30 @@ class SkelModel:
for nper in range(keypoints3d.shape[0]): for nper in range(keypoints3d.shape[0]):
vertices_all = [] vertices_all = []
kpts3d = keypoints3d[nper] kpts3d = keypoints3d[nper]
for nj in range(self.nJoints):
if kpts3d[nj, -1] < min_conf:
vertices_all.append(self.vertices*0.001)
continue
vertices_all.append(self.vertices*r + kpts3d[nj:nj+1, :3])
# limb # limb
closet_joints = []
for nk, (i, j) in enumerate(self.kintree): for nk, (i, j) in enumerate(self.kintree):
if kpts3d[i][-1] < min_conf or kpts3d[j][-1] < min_conf: if kpts3d[i][-1] < min_conf or kpts3d[j][-1] < min_conf:
vertices_all.append(self.vertices*0.001) vertices_all.append(self.vertices*0.001)
continue continue
T, _, length = calTransformation(kpts3d[i, :3], kpts3d[j, :3], r=1) T, _, length = calTransformation(kpts3d[i, :3], kpts3d[j, :3], r=1)
if length > 2: # 超过两米的 if length > 2: # large than 2 meter
vertices_all.append(self.vertices*0.001) vertices_all.append(self.vertices*0.001)
continue continue
if length < self.joint_radius * 5:
closet_joints.append(i)
closet_joints.append(j)
vertices = self.vertices @ T[:3, :3].T + T[:3, 3:].T vertices = self.vertices @ T[:3, :3].T + T[:3, 3:].T
vertices_all.append(vertices) vertices_all.append(vertices)
for nj in range(self.nJoints):
if self.body_type in ['bodyhand', 'bodyhandface'] and nj > 25:
r_ = r / 2
else:
r_ = r
if kpts3d[nj, -1] < min_conf:
vertices_all.append(self.vertices*0.001)
continue
vertices_all.append(self.vertices*r_ + kpts3d[nj:nj+1, :3])
vertices = np.vstack(vertices_all) vertices = np.vstack(vertices_all)
verts_final.append(vertices) verts_final.append(vertices)
verts_final = np.stack(verts_final) verts_final = np.stack(verts_final)