revert to model.predict(outputs)
This commit is contained in:
parent
7d13dc4d38
commit
56f55674bf
@ -3,40 +3,39 @@
|
||||
|
||||
|
||||
'''
|
||||
###########################################################################
|
||||
## POSE2SIM ##
|
||||
###########################################################################
|
||||
|
||||
This repository offers a way to perform markerless kinematics, and gives an
|
||||
example workflow from an Openpose input to an OpenSim result.
|
||||
###########################################################################
|
||||
## POSE2SIM ##
|
||||
###########################################################################
|
||||
|
||||
It offers tools for:
|
||||
- 2D pose estimation,
|
||||
- Cameras calibration,
|
||||
- Tracking the person of interest,
|
||||
- Robust triangulation,
|
||||
- Filtration,
|
||||
- Marker augmentation,
|
||||
- OpenSim scaling and inverse kinematics
|
||||
This repository offers a way to perform markerless kinematics, and gives an
|
||||
example workflow from an Openpose input to an OpenSim result.
|
||||
|
||||
It has been tested on Windows, Linux and MacOS, and works for any Python version >= 3.8
|
||||
|
||||
Installation:
|
||||
# Open Anaconda prompt. Type:
|
||||
# - conda create -n Pose2Sim python=3.8
|
||||
# - conda activate Pose2Sim
|
||||
# - conda install Pose2Sim
|
||||
It offers tools for:
|
||||
- 2D pose estimation,
|
||||
- Cameras calibration,
|
||||
- Tracking the person of interest,
|
||||
- Robust triangulation,
|
||||
- Filtration,
|
||||
- Marker augmentation,
|
||||
- OpenSim scaling and inverse kinematics
|
||||
|
||||
Usage:
|
||||
# First run Pose estimation and organize your directories (see Readme.md)
|
||||
from Pose2Sim import Pose2Sim
|
||||
Pose2Sim.calibration()
|
||||
Pose2Sim.personAssociation()
|
||||
Pose2Sim.triangulation()
|
||||
Pose2Sim.filtering()
|
||||
Pose2Sim.markerAugmentation()
|
||||
# Then run OpenSim (see Readme.md)
|
||||
|
||||
It has been tested on Windows, Linux and MacOS, and works for any Python version >= 3.8
|
||||
|
||||
Installation:
|
||||
# Open Anaconda prompt. Type:
|
||||
# - conda create -n Pose2Sim python=3.8
|
||||
# - conda activate Pose2Sim
|
||||
# - conda install Pose2Sim
|
||||
|
||||
Usage:
|
||||
# First run Pose estimation and organize your directories (see Readme.md)
|
||||
from Pose2Sim import Pose2Sim
|
||||
Pose2Sim.calibration()
|
||||
Pose2Sim.personAssociation()
|
||||
Pose2Sim.triangulation()
|
||||
Pose2Sim.filtering()
|
||||
Pose2Sim.markerAugmentation()
|
||||
# Then run OpenSim (see Readme.md)
|
||||
'''
|
||||
|
||||
|
||||
|
@ -3,27 +3,27 @@
|
||||
|
||||
|
||||
'''
|
||||
###########################################################################
|
||||
## CAMERAS CALIBRATION ##
|
||||
###########################################################################
|
||||
|
||||
Use this module to calibrate your cameras and save results to a .toml file.
|
||||
|
||||
It either converts a Qualisys calibration .qca.txt file,
|
||||
Or calibrates cameras from checkerboard images.
|
||||
|
||||
Checkerboard calibration is based on
|
||||
https://docs.opencv.org/3.4.15/dc/dbb/tutorial_py_calibration.html.
|
||||
/!\ Beware that corners must be detected on all frames, or else extrinsic
|
||||
parameters may be wrong. Set show_corner_detection to 1 to verify.
|
||||
###########################################################################
|
||||
## CAMERAS CALIBRATION ##
|
||||
###########################################################################
|
||||
|
||||
INPUTS:
|
||||
- a calibration file in the 'calibration' folder (.qca.txt extension)
|
||||
- OR folders 'calibration\intrinsics' (populated with video or about 30 images) and 'calibration\extrinsics' (populated with video or one image)
|
||||
- a Config.toml file in the 'User' folder
|
||||
|
||||
OUTPUTS:
|
||||
- a calibration file in the 'calibration' folder (.toml extension)
|
||||
Use this module to calibrate your cameras and save results to a .toml file.
|
||||
|
||||
It either converts a Qualisys calibration .qca.txt file,
|
||||
Or calibrates cameras from checkerboard images.
|
||||
|
||||
Checkerboard calibration is based on
|
||||
https://docs.opencv.org/3.4.15/dc/dbb/tutorial_py_calibration.html.
|
||||
/!\ Beware that corners must be detected on all frames, or else extrinsic
|
||||
parameters may be wrong. Set show_corner_detection to 1 to verify.
|
||||
|
||||
INPUTS:
|
||||
- a calibration file in the 'calibration' folder (.qca.txt extension)
|
||||
- OR folders 'calibration\intrinsics' (populated with video or about 30 images) and 'calibration\extrinsics' (populated with video or one image)
|
||||
- a Config.toml file in the 'User' folder
|
||||
|
||||
OUTPUTS:
|
||||
- a calibration file in the 'calibration' folder (.toml extension)
|
||||
'''
|
||||
|
||||
# TODO: DETECT WHEN WINDOW IS CLOSED
|
||||
|
@ -3,12 +3,11 @@
|
||||
|
||||
|
||||
'''
|
||||
###########################################################################
|
||||
## OTHER SHARED UTILITIES ##
|
||||
###########################################################################
|
||||
|
||||
Functions shared between modules, and other utilities
|
||||
|
||||
###########################################################################
|
||||
## OTHER SHARED UTILITIES ##
|
||||
###########################################################################
|
||||
|
||||
Functions shared between modules, and other utilities
|
||||
'''
|
||||
|
||||
## INIT
|
||||
|
@ -3,22 +3,21 @@
|
||||
|
||||
|
||||
'''
|
||||
###########################################################################
|
||||
## FILTER 3D COORDINATES ##
|
||||
###########################################################################
|
||||
|
||||
Filter trc 3D coordinates.
|
||||
###########################################################################
|
||||
## FILTER 3D COORDINATES ##
|
||||
###########################################################################
|
||||
|
||||
Available filters: Butterworth, Butterworth on speed, Gaussian, LOESS, Median
|
||||
Set your parameters in Config.toml
|
||||
|
||||
INPUTS:
|
||||
- a trc file
|
||||
- filtering parameters in Config.toml
|
||||
|
||||
OUTPUT:
|
||||
- a filtered trc file
|
||||
Filter trc 3D coordinates.
|
||||
|
||||
Available filters: Butterworth, Butterworth on speed, Gaussian, LOESS, Median
|
||||
Set your parameters in Config.toml
|
||||
|
||||
INPUTS:
|
||||
- a trc file
|
||||
- filtering parameters in Config.toml
|
||||
|
||||
OUTPUT:
|
||||
- a filtered trc file
|
||||
'''
|
||||
|
||||
|
||||
|
@ -17,7 +17,6 @@ INPUTS:
|
||||
|
||||
OUTPUT:
|
||||
- a filtered trc file
|
||||
|
||||
'''
|
||||
|
||||
|
||||
@ -186,7 +185,7 @@ def augmentTRC(config_dict):
|
||||
json_file.close()
|
||||
model = tf.keras.models.model_from_json(pretrainedModel_json)
|
||||
model.load_weights(os.path.join(augmenterModelDir, "weights.h5"))
|
||||
outputs = model(inputs)
|
||||
outputs = model.predict(inputs)
|
||||
tf.keras.backend.clear_session()
|
||||
|
||||
# %% Post-process outputs.
|
||||
|
@ -3,28 +3,27 @@
|
||||
|
||||
|
||||
'''
|
||||
###########################################################################
|
||||
## TRACKING OF PERSON OF INTEREST ##
|
||||
###########################################################################
|
||||
|
||||
Openpose detects all people in the field of view.
|
||||
Which is the one of interest?
|
||||
|
||||
This module tries all possible triangulations of a chosen anatomical
|
||||
point. If "multi_person" mode is not used, it chooses the person for
|
||||
whom the reprojection error is smallest. Otherwise, it selects all
|
||||
persons with a reprojection error smaller than a threshold, and then
|
||||
associates them across time frames by minimizing the displacement speed.
|
||||
|
||||
INPUTS:
|
||||
- a calibration file (.toml extension)
|
||||
- json files from each camera folders with several detected persons
|
||||
- a Config.toml file
|
||||
- a skeleton model
|
||||
|
||||
OUTPUTS:
|
||||
- json files for each camera with only one person of interest
|
||||
|
||||
###########################################################################
|
||||
## TRACKING OF PERSON OF INTEREST ##
|
||||
###########################################################################
|
||||
|
||||
Openpose detects all people in the field of view.
|
||||
Which is the one of interest?
|
||||
|
||||
This module tries all possible triangulations of a chosen anatomical
|
||||
point. If "multi_person" mode is not used, it chooses the person for
|
||||
whom the reprojection error is smallest. Otherwise, it selects all
|
||||
persons with a reprojection error smaller than a threshold, and then
|
||||
associates them across time frames by minimizing the displacement speed.
|
||||
|
||||
INPUTS:
|
||||
- a calibration file (.toml extension)
|
||||
- json files from each camera folders with several detected persons
|
||||
- a Config.toml file
|
||||
- a skeleton model
|
||||
|
||||
OUTPUTS:
|
||||
- json files for each camera with only one person of interest
|
||||
'''
|
||||
|
||||
|
||||
|
@ -3,23 +3,23 @@
|
||||
|
||||
|
||||
'''
|
||||
###########################################################################
|
||||
## SKELETONS DEFINITIONS ##
|
||||
###########################################################################
|
||||
|
||||
The definition and hierarchy of the following skeletons are available:
|
||||
- OpenPose BODY_25B, BODY_25, BODY_135, COCO, MPII
|
||||
- Mediapipe BLAZEPOSE
|
||||
- AlphaPose HALPE_26, HALPE_68, HALPE_136, COCO_133, COCO, MPII
|
||||
(for COCO and MPII, AlphaPose must be run with the flag "--format cmu")
|
||||
- DeepLabCut CUSTOM: the skeleton will be defined in Config.toml
|
||||
|
||||
N.B.: Not all face and hand keypoints are reported in the skeleton architecture,
|
||||
since some are redundant for the orientation of some bodies.
|
||||
###########################################################################
|
||||
## SKELETONS DEFINITIONS ##
|
||||
###########################################################################
|
||||
|
||||
N.B.: The corresponding OpenSim model files are provided in the "Pose2Sim\Empty project" folder.
|
||||
If you wish to use any other, you will need to adjust the markerset in the .osim model file,
|
||||
as well as in the scaling and IK setup files.
|
||||
The definition and hierarchy of the following skeletons are available:
|
||||
- OpenPose BODY_25B, BODY_25, BODY_135, COCO, MPII
|
||||
- Mediapipe BLAZEPOSE
|
||||
- AlphaPose HALPE_26, HALPE_68, HALPE_136, COCO_133, COCO, MPII
|
||||
(for COCO and MPII, AlphaPose must be run with the flag "--format cmu")
|
||||
- DeepLabCut CUSTOM: the skeleton will be defined in Config.toml
|
||||
|
||||
N.B.: Not all face and hand keypoints are reported in the skeleton architecture,
|
||||
since some are redundant for the orientation of some bodies.
|
||||
|
||||
N.B.: The corresponding OpenSim model files are provided in the "Pose2Sim\Empty project" folder.
|
||||
If you wish to use any other, you will need to adjust the markerset in the .osim model file,
|
||||
as well as in the scaling and IK setup files.
|
||||
'''
|
||||
|
||||
## INIT
|
||||
|
@ -3,32 +3,31 @@
|
||||
|
||||
|
||||
'''
|
||||
###########################################################################
|
||||
## ROBUST TRIANGULATION OF 2D COORDINATES ##
|
||||
###########################################################################
|
||||
|
||||
This module triangulates 2D json coordinates and builds a .trc file readable
|
||||
by OpenSim.
|
||||
|
||||
The triangulation is weighted by the likelihood of each detected 2D keypoint
|
||||
(if they meet the likelihood threshold). If the reprojection error is above a
|
||||
threshold, right and left sides are swapped; if it is still above, a camera
|
||||
is removed for this point and this frame, until the threshold is met. If more
|
||||
cameras are removed than a predefined minimum, triangulation is skipped for
|
||||
the point and this frame. In the end, missing values are interpolated.
|
||||
###########################################################################
|
||||
## ROBUST TRIANGULATION OF 2D COORDINATES ##
|
||||
###########################################################################
|
||||
|
||||
In case of multiple subjects detection, make sure you first run the
|
||||
personAssociation module.
|
||||
This module triangulates 2D json coordinates and builds a .trc file readable
|
||||
by OpenSim.
|
||||
|
||||
INPUTS:
|
||||
- a calibration file (.toml extension)
|
||||
- json files for each camera with only one person of interest
|
||||
- a Config.toml file
|
||||
- a skeleton model
|
||||
|
||||
OUTPUTS:
|
||||
- a .trc file with 3D coordinates in Y-up system coordinates
|
||||
|
||||
The triangulation is weighted by the likelihood of each detected 2D keypoint
|
||||
(if they meet the likelihood threshold). If the reprojection error is above a
|
||||
threshold, right and left sides are swapped; if it is still above, a camera
|
||||
is removed for this point and this frame, until the threshold is met. If more
|
||||
cameras are removed than a predefined minimum, triangulation is skipped for
|
||||
the point and this frame. In the end, missing values are interpolated.
|
||||
|
||||
In case of multiple subjects detection, make sure you first run the
|
||||
personAssociation module.
|
||||
|
||||
INPUTS:
|
||||
- a calibration file (.toml extension)
|
||||
- json files for each camera with only one person of interest
|
||||
- a Config.toml file
|
||||
- a skeleton model
|
||||
|
||||
OUTPUTS:
|
||||
- a .trc file with 3D coordinates in Y-up system coordinates
|
||||
'''
|
||||
|
||||
|
||||
@ -44,7 +43,7 @@ import cv2
|
||||
import toml
|
||||
from tqdm import tqdm
|
||||
from scipy import interpolate
|
||||
from collections import Counter, OrderedDict
|
||||
from collections import Counter
|
||||
from anytree import RenderTree
|
||||
from anytree.importer import DictImporter
|
||||
import logging
|
||||
@ -762,7 +761,7 @@ def triangulate_all(config):
|
||||
try:
|
||||
Q_tot[n] = Q_tot[n].apply(interpolate_zeros_nans, axis=0, args = [interp_gap_smaller_than, interpolation_kind])
|
||||
except:
|
||||
logging.info(f'Interpolation was not possible for person {n}. This means that the not enough points are available, which is often due to a bad calibration.')
|
||||
logging.info(f'Interpolation was not possible for person {n}. This means that not enough points are available, which is often due to a bad calibration.')
|
||||
# Q_tot.replace(np.nan, 0, inplace=True)
|
||||
|
||||
# Create TRC file
|
||||
|
Loading…
Reference in New Issue
Block a user