This commit is contained in:
davidpagnon 2024-04-23 22:46:20 +02:00
commit 14a4c15fb5
3 changed files with 24 additions and 5 deletions

View File

@ -24,6 +24,7 @@
import os import os
import numpy as np import numpy as np
import json import json
import re
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation, FileMovieWriter from matplotlib.animation import FuncAnimation, FileMovieWriter
import argparse import argparse
@ -41,6 +42,21 @@ __status__ = "Development"
## FUNCTIONS ## FUNCTIONS
def sort_stringlist_by_last_number(string_list):
'''
Sort a list of strings based on the last number in the string.
Works if other numbers in the string, if strings after number. Ignores alphabetical order.
Example: ['json1', 'js4on2.b', 'eypoints_0000003.json', 'ajson0', 'json10']
gives: ['ajson0', 'json1', 'js4on2.b', 'eypoints_0000003.json', 'json10']
'''
def sort_by_last_number(s):
return int(re.findall(r'\d+', s)[-1])
return sorted(string_list, key=sort_by_last_number)
def save_inp_as_output(_img, c_name, dpi=100): def save_inp_as_output(_img, c_name, dpi=100):
h, w, _ = _img.shape h, w, _ = _img.shape
fig, axes = plt.subplots(figsize=(h/dpi, w/dpi)) fig, axes = plt.subplots(figsize=(h/dpi, w/dpi))
@ -66,7 +82,7 @@ def json_display_without_img_func(**args):
json_folder = os.path.realpath(args.get('json_folder')) json_folder = os.path.realpath(args.get('json_folder'))
json_fnames = [f for f in os.listdir(json_folder) if os.path.isfile(os.path.join(json_folder, f))] json_fnames = [f for f in os.listdir(json_folder) if os.path.isfile(os.path.join(json_folder, f))]
json_fnames.sort(key=lambda f: int(f.split('_')[0])) # sort by frame number json_fnames = sort_stringlist_by_last_number(json_fnames)
output_img_folder = args.get('output_img_folder') output_img_folder = args.get('output_img_folder')
if output_img_folder==None: if output_img_folder==None:

View File

@ -178,7 +178,7 @@ def sort_people(Q_kpt_old, Q_kpt):
# Compute distance between persons from one frame to another # Compute distance between persons from one frame to another
frame_by_frame_dist = [] frame_by_frame_dist = []
for comb in personsIDs_comb: for comb in personsIDs_comb:
frame_by_frame_dist += [euclidean_distance(Q_kpt_old[comb[0]][:3],Q_kpt[comb[1]][:3])] frame_by_frame_dist += [euclidean_distance(Q_kpt_old[comb[0]],Q_kpt[comb[1]])]
# sort correspondences by distance # sort correspondences by distance
minL, _, associated_tuples = min_with_single_indices(frame_by_frame_dist, personsIDs_comb) minL, _, associated_tuples = min_with_single_indices(frame_by_frame_dist, personsIDs_comb)

View File

@ -659,10 +659,13 @@ Displays X, Y, Z coordinates of each 3D keypoint of a TRC file in a different ma
<pre> <pre>
[trc_from_easymocap.py](https://github.com/perfanalytics/pose2sim/blob/main/Pose2Sim/Utilities/trc_from_easymocap.py) [trc_from_easymocap.py](https://github.com/perfanalytics/pose2sim/blob/main/Pose2Sim/Utilities/trc_from_easymocap.py)
Convert EasyMocap results keypoints3d json files to .trc. Convert EasyMocap results keypoints3d .json files to .trc.
[c3d_to_trc.py](https://github.com/perfanalytics/pose2sim/blob/main/Pose2Sim/Utilities/c3d_to_trc.py) [c3d_to_trc.py](https://github.com/perfanalytics/pose2sim/blob/main/Pose2Sim/Utilities/c3d_to_trc.py)
Converts 3D point data of a .c3d file to a .trc file compatible with OpenSim. No analog data (force plates, emg) nor computed data (angles, powers, etc.) are retrieved. Converts 3D point data from a .c3d file to a .trc file compatible with OpenSim. No analog data (force plates, emg) nor computed data (angles, powers, etc.) are retrieved.
[trc_to_c3d.py](https://github.com/perfanalytics/pose2sim/blob/main/Pose2Sim/Utilities/trc_to_c3d.py)
Converts 3D point data from a .trc file to a .c3d file compatible with Visual3D.
[trc_desample.py](https://github.com/perfanalytics/pose2sim/blob/main/Pose2Sim/Utilities/trc_desample.py) [trc_desample.py](https://github.com/perfanalytics/pose2sim/blob/main/Pose2Sim/Utilities/trc_desample.py)
Undersamples a trc file. Undersamples a trc file.
@ -772,7 +775,7 @@ You will be proposed a to-do list, but please feel absolutely free to propose yo
&#9634; **Calibration:** Alternatively, self-calibrate with [OpenPose keypoints](https://ietresearch.onlinelibrary.wiley.com/doi/full/10.1049/cvi2.12130). Set world reference frame in the end. &#9634; **Calibration:** Alternatively, self-calibrate with [OpenPose keypoints](https://ietresearch.onlinelibrary.wiley.com/doi/full/10.1049/cvi2.12130). Set world reference frame in the end.
&#9634; **Calibration:** Convert [fSpy calibration](https://fspy.io/) based on vanishing point. &#9634; **Calibration:** Convert [fSpy calibration](https://fspy.io/) based on vanishing point.
&#9634; **Synchronization:** Synchronize cameras on 2D keypoint speeds. Cf [this draft script](https://github.com/perfanalytics/pose2sim/blob/draft/Pose2Sim/Utilities/synchronize_cams.py). &#10004; **Synchronization:** Synchronize cameras on keypoint speeds.
&#10004; **Person Association:** Automatically choose the main person to triangulate. &#10004; **Person Association:** Automatically choose the main person to triangulate.
&#10004; **Person Association:** Multiple persons association. 1. Triangulate all the persons whose reprojection error is below a certain threshold (instead of only the one with minimum error), and then track in time with speed cf [Slembrouck 2020](https://link.springer.com/chapter/10.1007/978-3-030-40605-9_15)? or 2. Based on affinity matrices [Dong 2021](https://arxiv.org/pdf/1901.04111.pdf)? or 3. Based on occupancy maps [Yildiz 2012](https://link.springer.com/chapter/10.1007/978-3-642-35749-7_10)? or 4. With a neural network [Huang 2023](https://arxiv.org/pdf/2304.09471.pdf)? &#10004; **Person Association:** Multiple persons association. 1. Triangulate all the persons whose reprojection error is below a certain threshold (instead of only the one with minimum error), and then track in time with speed cf [Slembrouck 2020](https://link.springer.com/chapter/10.1007/978-3-030-40605-9_15)? or 2. Based on affinity matrices [Dong 2021](https://arxiv.org/pdf/1901.04111.pdf)? or 3. Based on occupancy maps [Yildiz 2012](https://link.springer.com/chapter/10.1007/978-3-642-35749-7_10)? or 4. With a neural network [Huang 2023](https://arxiv.org/pdf/2304.09471.pdf)?