This commit is contained in:
davidpagnon 2024-11-14 17:36:09 +01:00
commit 4f18dd770d
2 changed files with 27 additions and 21 deletions

View File

@ -33,7 +33,7 @@ import toml
import cv2
import json
import re
from anytree import Node, RenderTree
import hashlib
from copy import deepcopy
import argparse
@ -56,11 +56,15 @@ biocvplus_markers = ['ACROM_R', 'ACROM_L', 'C7', 'T10', 'CLAV', 'XIP_PROC', 'UA_
## FUNCTIONS
def str_to_id(string):
def str_to_id(string, length=8):
'''
Convert a string to an integer id
'''
return ''.join([str(abs(ord(char) - 96)) for char in string])
# return ''.join([str(abs(ord(char) - 96)) for char in string])
hash_int = int(hashlib.md5(string.encode()).hexdigest(), 16)
return hash_int % (10 ** length) # Trim to desired length
def computeP(calib_file, undistort=False):
@ -283,7 +287,7 @@ def dataset_to_mmpose2d(coords_df, mmpose_json_file, img_size, markerset='custom
# transform first name in integer, and append other numbers from persons
persons = list(set(['_'.join(item.split('_')[:5]) for item in coords_df.columns.levels[1]]))
person_ids = [str_to_id(p.split('_')[1]) + ''.join(p.split('_')[3:]) if len(p.split('_'))>=3
person_ids = [int(str(str_to_id(p.split('_')[1])) + ''.join(p.split('_')[3:])) if len(p.split('_'))>=3
else str_to_id(p.split('_')[0])
for p in persons]
@ -305,11 +309,12 @@ def dataset_to_mmpose2d(coords_df, mmpose_json_file, img_size, markerset='custom
file_name = coords_df.index[i]
w, h = img_size
# id from concatenation of numbers from path
file_id = ''.join(re.findall(r'\d+', str(file_name)))
# file_id = int(''.join(re.findall(r'\d+', str(file_name))))
file_id = int(hashlib.md5(file_name.encode()).hexdigest(), 16) % (10**12) # Keep only 12 digits
labels2d_json_data['images'] += [{'file_name': file_name,
'height': str(h),
'width': str(w),
'height': h,
'width': w,
'id': file_id,
'license': 1}]
@ -323,29 +328,28 @@ def dataset_to_mmpose2d(coords_df, mmpose_json_file, img_size, markerset='custom
coords_mk = coords.loc[coords.index.get_level_values(2)==marker]
coords_list += coords_mk.tolist()+[2] if not np.isnan(coords_mk).any() else [0.0, 0.0, 0]
num_keypoints = len(marker_list)
# bbox
x_coords = coords.loc[coords.index.get_level_values(3)=='x']
y_coords = coords.loc[coords.index.get_level_values(3)=='y']
min_x, min_y, max_x, max_y = np.nanmin(x_coords), np.nanmin(y_coords), np.nanmax(x_coords), np.nanmax(y_coords)
bbox = [min_x, min_y, max_x, max_y]
# bbox_width = max_x - min_x
# bbox_height = max_y - min_y
# bbox = [min_x, min_y, bbox_width, bbox_height]
bbox_width = np.round(max_x - min_x, decimals=1)
bbox_height = np.round(max_y - min_y, decimals=1)
# bbox = [min_x, min_y, max_x, max_y]
bbox = [min_x, min_y, bbox_width, bbox_height] # coco format
# num_keypoints, id, category_id
num_keypoints = len(marker_list)
id = person_ids[p]
person_id = person_ids[p]
category_id = 1
# segmentation and area not filled, and each annotation represents one single person
segmentation = []
area = 0
iscrowd = 0 # 1 if len(persons)>1 else 0
segmentation = [[min_x, min_y, min_x, max_y, max_x, max_y, max_x, min_y]] # no segmentation
area = np.round(bbox_width * bbox_height, decimals=1)
iscrowd = 0 # each annotation represents one single person
if not np.isnan(bbox).any():
labels2d_json_data['annotations'] += [{ 'keypoints': coords_list,
'num_keypoints': num_keypoints,
'bbox': bbox,
'id': id,
'id': person_id,
'image_id': file_id,
'category_id': category_id,
'segmentation': segmentation,
@ -447,8 +451,8 @@ def reproj_from_trc_calib_func(**args):
# Replace by nan when reprojection out of image
for cam in range(len(P_all_frame)):
x_valid = data_proj[cam].iloc[:,::2] < calib_params_size[cam][0]
y_valid = data_proj[cam].iloc[:,1::2] < calib_params_size[cam][1]
x_valid = (data_proj[cam].iloc[:, ::2] >= 0) & (data_proj[cam].iloc[:, ::2] < calib_params_size[cam][0])
y_valid = (data_proj[cam].iloc[:, 1::2] >= 0) & (data_proj[cam].iloc[:, 1::2] < calib_params_size[cam][1])
data_proj[cam].iloc[:, ::2] = data_proj[cam].iloc[:, ::2].where(x_valid, np.nan)
data_proj[cam].iloc[:, ::2] = np.where(y_valid==False, np.nan, data_proj[cam].iloc[:, ::2])
data_proj[cam].iloc[:, 1::2] = data_proj[cam].iloc[:, 1::2].where(y_valid, np.nan)

View File

@ -152,6 +152,7 @@ If you don't use Anaconda, type `python -V` in terminal to make sure python>=3.9
Finally, install ONNX Runtime with GPU support:
```
pip uninstall onnxruntime
pip install onnxruntime-gpu
```
@ -461,6 +462,7 @@ If you already have a calibration file, set `calibration_type` type to `convert`
- Export calibration to `.qca.txt` within QTM.
- Copy it in the `Calibration` Pose2Sim folder.
- set `convert_from` to 'qualisys' in your [Config.toml](https://github.com/perfanalytics/pose2sim/blob/main/Pose2Sim/Demo_SinglePerson/Config.toml) file. Change `binning_factor` to 2 if you film in 540p.
- If you set your cameras vertically and the videos are rendered sideways, you need to rotate them and the calibration file before running pose estimation. [Use this script](https://github.com/perfanalytics/pose2sim/issues/136#issuecomment-2398110061).
- **From [Optitrack](https://optitrack.com/):** Exporting calibration will be available in Motive 3.2. In the meantime:
- Calculate intrinsics with a board (see next section).
- Use their C++ API [to retrieve extrinsic properties](https://docs.optitrack.com/developer-tools/motive-api/motive-api-function-reference#tt_cameraxlocation). Translation can be copied as is in your `Calib.toml` file, but TT_CameraOrientationMatrix first needs to be [converted to a Rodrigues vector](https://docs.opencv.org/3.4/d9/d0c/group__calib3d.html#ga61585db663d9da06b68e70cfbf6a1eac) with OpenCV. See instructions [here](https://github.com/perfanalytics/pose2sim/issues/28).