From 450079033af9e3464e251fe41122e23070de369b Mon Sep 17 00:00:00 2001 From: davidpagnon Date: Thu, 21 Sep 2023 17:39:28 +0200 Subject: [PATCH] calib_toml_to_opencap --- Pose2Sim/Utilities/AlphaPose_to_OpenPose.py | 8 +- Pose2Sim/Utilities/Blazepose_runsave.py | 12 +- Pose2Sim/Utilities/DLC_to_OpenPose.py | 8 +- Pose2Sim/Utilities/c3d_to_trc.py | 8 +- Pose2Sim/Utilities/calib_qca_to_toml.py | 8 +- Pose2Sim/Utilities/calib_toml_to_opencap.py | 182 ++++++++++++++++++ Pose2Sim/Utilities/calib_toml_to_qca.py | 8 +- Pose2Sim/Utilities/calib_toml_to_yml.py | 10 +- Pose2Sim/Utilities/calib_yml_to_toml.py | 8 +- Pose2Sim/Utilities/json_display_with_img.py | 8 +- .../Utilities/json_display_without_img.py | 8 +- Pose2Sim/Utilities/trc_Zup_to_Yup.py | 8 +- Pose2Sim/Utilities/trc_combine.py | 8 +- Pose2Sim/Utilities/trc_desample.py | 8 +- Pose2Sim/Utilities/trc_filter.py | 28 +-- Pose2Sim/Utilities/trc_from_mot_osim.py | 8 +- Pose2Sim/Utilities/trc_gaitevents.py | 8 +- Pose2Sim/Utilities/trc_plot.py | 4 +- README.md | 6 +- 19 files changed, 264 insertions(+), 82 deletions(-) create mode 100644 Pose2Sim/Utilities/calib_toml_to_opencap.py diff --git a/Pose2Sim/Utilities/AlphaPose_to_OpenPose.py b/Pose2Sim/Utilities/AlphaPose_to_OpenPose.py index d9732a4..6071f63 100644 --- a/Pose2Sim/Utilities/AlphaPose_to_OpenPose.py +++ b/Pose2Sim/Utilities/AlphaPose_to_OpenPose.py @@ -10,8 +10,8 @@ Converts AlphaPose single json file to OpenPose frame-by-frame files. Usage: - python -m AlphaPose_to_OpenPose -i "" -o "" - OR python -m AlphaPose_to_OpenPose -i "" + python -m AlphaPose_to_OpenPose -i input_alphapose_json_file -o output_openpose_json_folder + OR python -m AlphaPose_to_OpenPose -i input_alphapose_json_file OR from Pose2Sim.Utilities import AlphaPose_to_OpenPose; AlphaPose_to_OpenPose.AlphaPose_to_OpenPose_func(r'input_alphapose_json_file', r'output_openpose_json_folder') ''' @@ -39,8 +39,8 @@ def AlphaPose_to_OpenPose_func(*args): Converts AlphaPose single json file to OpenPose frame-by-frame files. Usage: - python -m AlphaPose_to_OpenPose -i "" -o "" - OR python -m AlphaPose_to_OpenPose -i "" + python -m AlphaPose_to_OpenPose -i input_alphapose_json_file -o output_openpose_json_folder + OR python -m AlphaPose_to_OpenPose -i input_alphapose_json_file OR from Pose2Sim.Utilities import AlphaPose_to_OpenPose; AlphaPose_to_OpenPose.AlphaPose_to_OpenPose_func(r'input_alphapose_json_file', r'output_openpose_json_folder') ''' diff --git a/Pose2Sim/Utilities/Blazepose_runsave.py b/Pose2Sim/Utilities/Blazepose_runsave.py index 0edaa51..67671ea 100644 --- a/Pose2Sim/Utilities/Blazepose_runsave.py +++ b/Pose2Sim/Utilities/Blazepose_runsave.py @@ -15,9 +15,9 @@ You may also need to install tables: `pip install tables` Usage: - python -m Blazepose_runsave -i "" --display --save_images --save_video --to_csv --to_h5 --to_json --model_complexity 2 -o "" - OR python -m Blazepose_runsave -i "" --display --to_json --save_images - OR python -m Blazepose_runsave -i "" -dJs + python -m Blazepose_runsave -i input_file --display --save_images --save_video --to_csv --to_h5 --to_json --model_complexity 2 -o output_folder + OR python -m Blazepose_runsave -i input_file --display --to_json --save_images + OR python -m Blazepose_runsave -i input_file -dJs OR from Pose2Sim.Utilities import Blazepose_runsave; Blazepose_runsave.blazepose_detec_func(input_file=r'input_file', save_images=True, to_json=True, model_complexity=2) ''' @@ -133,9 +133,9 @@ def blazepose_detec_func(**args): You may also need to install tables: `pip install tables` Usage: - python -m Blazepose_runsave -i "" --display --save_images --save_video --to_csv --to_h5 --to_json --model_complexity 2 -o "" - OR python -m Blazepose_runsave -i "" --display --to_json --save_images - OR python -m Blazepose_runsave -i "" -dJs + python -m Blazepose_runsave -i input_file --display --save_images --save_video --to_csv --to_h5 --to_json --model_complexity 2 -o output_folder + OR python -m Blazepose_runsave -i input_file --display --to_json --save_images + OR python -m Blazepose_runsave -i input_file -dJs OR from Pose2Sim.Utilities import Blazepose_runsave; Blazepose_runsave.blazepose_detec_func(input_file=r'input_file', save_images=True, to_json=True, model_complexity=2) ''' diff --git a/Pose2Sim/Utilities/DLC_to_OpenPose.py b/Pose2Sim/Utilities/DLC_to_OpenPose.py index 5ac325c..a7eb24a 100644 --- a/Pose2Sim/Utilities/DLC_to_OpenPose.py +++ b/Pose2Sim/Utilities/DLC_to_OpenPose.py @@ -11,8 +11,8 @@ You may need to install tables: 'pip install tables' or 'conda install pytables' Usage: - python -m DLC_to_OpenPose -i "" -o "" - OR python -m DLC_to_OpenPose -i "" + python -m DLC_to_OpenPose -i input_h5_file -o output_json_folder + OR python -m DLC_to_OpenPose -i input_h5_file OR from Pose2Sim.Utilities import DLC_to_OpenPose; DLC_to_OpenPose.DLC_to_OpenPose_func(r'input_h5_file', r'output_json_folder') ''' @@ -43,8 +43,8 @@ def DLC_to_OpenPose_func(*args): Translates DeepLabCut (h5) 2D pose estimation files into OpenPose (json) files. Usage: - DLC_to_OpenPose -i "" -o "" - OR DLC_to_OpenPose -i "" + DLC_to_OpenPose -i input_h5_file -o output_json_folder + OR DLC_to_OpenPose -i input_h5_file OR import DLC_to_OpenPose; DLC_to_OpenPose.DLC_to_OpenPose_func(r'input_h5_file', r'output_json_folder') ''' diff --git a/Pose2Sim/Utilities/c3d_to_trc.py b/Pose2Sim/Utilities/c3d_to_trc.py index ee73dd6..f9660c5 100644 --- a/Pose2Sim/Utilities/c3d_to_trc.py +++ b/Pose2Sim/Utilities/c3d_to_trc.py @@ -14,8 +14,8 @@ Usage: from Pose2Sim.Utilities import c3d_to_trc; c3d_to_trc.c3d_to_trc_func(r'') - python -m c3d_to_trc -i "" - python -m c3d_to_trc -i "" -o "" + python -m c3d_to_trc -i input_c3d_file + python -m c3d_to_trc -i input_c3d_file -o output_c3d_file ''' @@ -45,8 +45,8 @@ def c3d_to_trc_func(*args): Usage: import c3d_to_trc; c3d_to_trc.c3d_to_trc_func(r'') - c3d_to_trc -i "" - c3d_to_trc -i "" -o "" + c3d_to_trc -i input_c3d_file + c3d_to_trc -i input_c3d_file -o output_c3d_file ''' try: diff --git a/Pose2Sim/Utilities/calib_qca_to_toml.py b/Pose2Sim/Utilities/calib_qca_to_toml.py index fd660f4..a4cb17c 100644 --- a/Pose2Sim/Utilities/calib_qca_to_toml.py +++ b/Pose2Sim/Utilities/calib_qca_to_toml.py @@ -12,8 +12,8 @@ Usage: from Pose2Sim.Utilities import calib_qca_to_toml; calib_qca_to_toml.calib_qca_to_toml_func(r'') - OR python -m calib_qca_to_toml -i "" - OR python -m calib_qca_to_toml -i "" --binning_factor 2 -o "" + OR python -m calib_qca_to_toml -i input_qca_file + OR python -m calib_qca_to_toml -i input_qca_file --binning_factor 2 -o output_toml_file ''' @@ -195,8 +195,8 @@ def calib_qca_to_toml_func(*args): Usage: import calib_qca_to_toml; calib_qca_to_toml.calib_qca_to_toml_func(r'') - OR calib_qca_to_toml -i "" - OR calib_qca_to_toml -i "" --binning_factor 2 -o "" + OR calib_qca_to_toml -i input_qca_file + OR calib_qca_to_toml -i input_qca_file --binning_factor 2 -o output_toml_file ''' try: diff --git a/Pose2Sim/Utilities/calib_toml_to_opencap.py b/Pose2Sim/Utilities/calib_toml_to_opencap.py new file mode 100644 index 0000000..ba7a020 --- /dev/null +++ b/Pose2Sim/Utilities/calib_toml_to_opencap.py @@ -0,0 +1,182 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + + +''' + ################################################## + ## TOML CALIBRATION TO OPENCAP CALIBRATION ## + ################################################## + + Convert an OpenCV .toml calibration file + to OpenCap .pickle calibration files. + One file will be created for each camera. + + Usage: + from Pose2Sim.Utilities import calib_toml_to_opencap; calib_toml_to_opencap.calib_toml_to_opencap_func(r'') + OR python -m calib_toml_to_opencap -t input_toml_file + OR python -m calib_toml_to_opencap -t input_toml_file -o output_calibration_folder> +''' + +## INIT +import os +import pickle +import argparse +import numpy as np +import toml +import cv2 + + +## AUTHORSHIP INFORMATION +__author__ = "David Pagnon" +__copyright__ = "Copyright 2021, Pose2Sim" +__credits__ = ["David Pagnon"] +__license__ = "BSD 3-Clause License" +__version__ = '0.4' +__maintainer__ = "David Pagnon" +__email__ = "contact@david-pagnon.com" +__status__ = "Development" + + +## FUNCTIONS +def RT_qca2cv(r, t): + ''' + Converts rotation R and translation T + from Qualisys object centered perspective + to OpenCV camera centered perspective + and inversely. + + Qc = RQ+T --> Q = R-1.Qc - R-1.T + ''' + + r = r.T + t = - r.dot(t) + + return r, t + + +def rotate_cam(r, t, ang_x=0, ang_y=0, ang_z=0): + ''' + Apply rotations around x, y, z in cameras coordinates + Angle in radians + ''' + + r,t = np.array(r), np.array(t) + if r.shape == (3,3): + rt_h = np.block([[r,t.reshape(3,1)], [np.zeros(3), 1 ]]) + elif r.shape == (3,): + rt_h = np.block([[cv2.Rodrigues(r)[0],t.reshape(3,1)], [np.zeros(3), 1 ]]) + + r_ax_x = np.array([1,0,0, 0,np.cos(ang_x),-np.sin(ang_x), 0,np.sin(ang_x),np.cos(ang_x)]).reshape(3,3) + r_ax_y = np.array([np.cos(ang_y),0,np.sin(ang_y), 0,1,0, -np.sin(ang_y),0,np.cos(ang_y)]).reshape(3,3) + r_ax_z = np.array([np.cos(ang_z),-np.sin(ang_z),0, np.sin(ang_z),np.cos(ang_z),0, 0,0,1]).reshape(3,3) + r_ax = r_ax_z.dot(r_ax_y).dot(r_ax_x) + + r_ax_h = np.block([[r_ax,np.zeros(3).reshape(3,1)], [np.zeros(3), 1]]) + r_ax_h__rt_h = r_ax_h.dot(rt_h) + + r = r_ax_h__rt_h[:3,:3] + t = r_ax_h__rt_h[:3,3] + + return r, t + + +def read_toml(toml_path): + ''' + Read an OpenCV .toml calibration file + Returns 5 lists of size N (N=number of cameras): + - S (image size), + - D (distorsion), + - K (intrinsic parameters), + - R (extrinsic rotation), + - T (extrinsic translation) + ''' + + calib = toml.load(toml_path) + C, S, D, K, R, T = [], [], [], [], [], [] + for cam in list(calib.keys()): + if cam != 'metadata': + C += [calib[cam]['name']] + S += [np.array(calib[cam]['size'])] + D += [np.array(calib[cam]['distortions'])] + K += [np.array(calib[cam]['matrix'])] + R += [np.array(calib[cam]['rotation'])] + T += [np.array(calib[cam]['translation'])] + + return C, S, D, K, R, T + + +def write_opencap_pickle(output_calibration_folder, C, S, D, K, R, T): + ''' + Writes OpenCap .pickle calibration files + + Extrinsics in OpenCap are calculated with a vertical board for the world frame. + As we want the world frame to be horizontal, we need to rotate cameras by -Pi/2 around x in the world frame. + T is good the way it is. + + INPUTS: + - Path of the output calibration folder + - C: list of camera names + - S: list of image sizes + - D: list of distortion coefficients + - K: list of intrinsic parameters + - R (extrinsic rotation), + - T (extrinsic translation) + ''' + + for i in range(len(C)): + # Transform rotation for vertical frame of reference (checkerboard vertical with OpenCap) + R_mat = cv2.Rodrigues(R[i])[0] # transform in matrix + R_w, T_w = RT_qca2cv(R_mat, T[i]) # transform in world centered perspective + R_w_90, T_w_90 = rotate_cam(R_w, T_w, ang_x=-np.pi/2, ang_y=0, ang_z=np.pi) # rotate cam wrt world frame + R_c, T_c = RT_qca2cv(R_w_90, T_w_90) # transform in camera centered perspective + + # retrieve data + calib_data = {'distortion': np.append(D[i],np.array([0])), + 'intrinsicMat': K[i], + 'imageSize': np.expand_dims(S[i][::-1], axis=1), + 'rotation': R_c, + 'translation': np.expand_dims(T[i], axis=1)*1000, + 'rotation_EulerAngles': cv2.Rodrigues(R_c)[0] # OpenCap calls these Euler angles but they are actually the Rodrigues vector (Euler is ambiguous) + } + + # write pickle + with open(os.path.join(output_calibration_folder, f'cam{i:02d}.pickle'), 'wb') as f_out: + pickle.dump(calib_data, f_out) + + +def calib_toml_to_opencap_func(*args): + ''' + Convert an OpenCV .toml calibration file + to OpenCap .pickle calibration files. + One file will be created for each camera. + + Usage: + from Pose2Sim.Utilities import calib_toml_to_opencap; calib_toml_to_opencap.calib_toml_to_opencap_func(r'') + OR python -m calib_toml_to_opencap -t input_toml_file + OR python -m calib_toml_to_opencap -t input_toml_file -o output_calibration_folder + ''' + + try: + toml_path = os.path.realpath(args[0].get('toml_file')) # invoked with argparse + if args[0]['output_calibration_folder'] == None: + output_calibration_folder = os.path.dirname(toml_path) + else: + output_calibration_folder = os.path.realpath(args[0]['output_calibration_folder']) + except: + toml_path = os.path.realpath(args[0]) # invoked as a function + output_calibration_folder = os.path.dirname(toml_path) + + C, S, D, K, R, T = read_toml(toml_path) + write_opencap_pickle(output_calibration_folder, C, S, D, K, R, T) + + print(f'OpenCap calibration files generated at {output_calibration_folder}.\n') + + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('-t', '--toml_file', required = True, help='Input OpenCV .toml calibration file') + parser.add_argument('-o', '--output_calibration_folder', required = False, help='OpenCap calibration folder') + args = vars(parser.parse_args()) + + calib_toml_to_opencap_func(args) diff --git a/Pose2Sim/Utilities/calib_toml_to_qca.py b/Pose2Sim/Utilities/calib_toml_to_qca.py index 8fbfac7..a50aaa8 100644 --- a/Pose2Sim/Utilities/calib_toml_to_qca.py +++ b/Pose2Sim/Utilities/calib_toml_to_qca.py @@ -12,8 +12,8 @@ Usage: from Pose2Sim.Utilities import calib_toml_to_qca; calib_toml_to_qca.calib_toml_to_qca_func(r'') - OR python -m calib_toml_to_qca -i "" - OR python -m calib_toml_to_qca -i "" --binning_factor 2 --pixel_size 5.54e-3 -o "" + OR python -m calib_toml_to_qca -i input_toml_file + OR python -m calib_toml_to_qca -i input_toml_file --binning_factor 2 --pixel_size 5.54e-3 -o output_qca_file ''' @@ -152,8 +152,8 @@ def calib_toml_to_qca_func(**args): Usage: import calib_toml_to_qca; calib_toml_to_qca.calib_toml_to_qca_func(input_file=r'') - OR calib_toml_to_qca -i "" - OR calib_toml_to_qca -i "" --binning_factor 2 --pixel_size 5.54e-3 -o "" + OR calib_toml_to_qca -i input_toml_file + OR calib_toml_to_qca -i input_toml_file --binning_factor 2 --pixel_size 5.54e-3 -o output_qca_file ''' toml_path = args.get('input_file') diff --git a/Pose2Sim/Utilities/calib_toml_to_yml.py b/Pose2Sim/Utilities/calib_toml_to_yml.py index bf0cf4e..9edb004 100644 --- a/Pose2Sim/Utilities/calib_toml_to_yml.py +++ b/Pose2Sim/Utilities/calib_toml_to_yml.py @@ -12,8 +12,8 @@ Usage: from Pose2Sim.Utilities import calib_toml_to_yml; calib_toml_to_yml.calib_toml_to_yml_func(r'') - OR python -m calib_yml_to_toml -t "" - OR python -m calib_yml_to_toml -t "" -i "" -e "" + OR python -m calib_yml_to_toml -t input_toml_file + OR python -m calib_yml_to_toml -t input_toml_file -i intrinsic_yml_file -e extrinsic_yml_file ''' ## INIT @@ -116,8 +116,8 @@ def calib_toml_to_yml_func(*args): Usage: import calib_toml_to_yml; calib_toml_to_yml.calib_toml_to_yml_func(r'') - OR python -m calib_toml_to_yml -t "" - OR python -m calib_toml_to_yml -t "" -i "" -e "" + OR python -m calib_toml_to_yml -t input_toml_file + OR python -m calib_toml_to_yml -t input_toml_file -i intrinsic_yml_file -e extrinsic_yml_file ''' try: @@ -142,7 +142,7 @@ def calib_toml_to_yml_func(*args): if __name__ == '__main__': parser = argparse.ArgumentParser() - parser.add_argument('-t', '--toml_file', required = True, help='OpenCV intrinsic .yml calibration file') + parser.add_argument('-t', '--toml_file', required = True, help='Input OpenCV .toml calibration file') parser.add_argument('-i', '--intrinsic_yml_file', required = False, help='OpenCV intrinsic .yml calibration file') parser.add_argument('-e', '--extrinsic_yml_file', required = False, help='OpenCV extrinsic .yml calibration file') args = vars(parser.parse_args()) diff --git a/Pose2Sim/Utilities/calib_yml_to_toml.py b/Pose2Sim/Utilities/calib_yml_to_toml.py index 6a54fe8..a7eb6e1 100644 --- a/Pose2Sim/Utilities/calib_yml_to_toml.py +++ b/Pose2Sim/Utilities/calib_yml_to_toml.py @@ -15,8 +15,8 @@ Usage: import calib_yml_to_toml; calib_yml_to_toml.calib_yml_to_toml_func(r'', r'') - OR python -m calib_yml_to_toml -i -e - OR python -m calib_yml_to_toml -i -e -o "" + OR python -m calib_yml_to_toml -i intrinsic_yml_file -e extrinsic_yml_file + OR python -m calib_yml_to_toml -i intrinsic_yml_file -e extrinsic_yml_file -o output_toml_file ''' @@ -123,8 +123,8 @@ def calib_yml_to_toml_func(*args): Usage: import calib_yml_to_toml; calib_yml_to_toml.calib_yml_to_toml_func(r'', r'') - OR python -m calib_yml_to_toml -i -e - OR python -m calib_yml_to_toml -i -e -o "" + OR python -m calib_yml_to_toml -i intrinsic_yml_file -e extrinsic_yml_file + OR python -m calib_yml_to_toml -i intrinsic_yml_file -e extrinsic_yml_file -o output_toml_file ''' try: intrinsic_path = os.path.realpath(args[0].get('intrinsic_file')) # invoked with argparse diff --git a/Pose2Sim/Utilities/json_display_with_img.py b/Pose2Sim/Utilities/json_display_with_img.py index a8bb915..26fd81d 100644 --- a/Pose2Sim/Utilities/json_display_with_img.py +++ b/Pose2Sim/Utilities/json_display_with_img.py @@ -17,8 +17,8 @@ images. Usage: - python -m json_display_with_img -j "" -i "" - python -m json_display_with_img -j "" -i "" -o "" -d True -s True + python -m json_display_with_img -j json_folder -i raw_img_folder + python -m json_display_with_img -j json_folder -i raw_img_folder -o output_img_folder -d True -s True from Pose2Sim.Utilities import json_display_with_img; json_display_with_img.json_display_with_img_func(json_folder=r'', raw_img_folder=r'') ''' @@ -54,8 +54,8 @@ def json_display_with_img_func(**args): images. Usage: - json_display_with_img -j "" -i "" - json_display_with_img -j "" -i "" -o "" -d True -s True + json_display_with_img -j json_folder -i raw_img_folder + json_display_with_img -j json_folder -i raw_img_folder -o output_img_folder -d True -s True import json_display_with_img; json_display_with_img.json_display_with_img_func(json_folder=r'', raw_img_folder=r'') ''' diff --git a/Pose2Sim/Utilities/json_display_without_img.py b/Pose2Sim/Utilities/json_display_without_img.py index 401cc4a..dad124a 100644 --- a/Pose2Sim/Utilities/json_display_without_img.py +++ b/Pose2Sim/Utilities/json_display_without_img.py @@ -14,8 +14,8 @@ coordinates on the original images. Usage: - python -m json_display_without_img -j "" - python -m json_display_without_img -j "" -o "" -d True -s True + python -m json_display_without_img -j json_folder + python -m json_display_without_img -j json_folder -o output_img_folder -d True -s True from Pose2Sim.Utilities import json_display_without_img; json_display_without_img.json_display_without_img_func(json_folder=r'') ''' @@ -80,8 +80,8 @@ def json_display_without_img_func(**args): coordinates on the original images. Usage: - json_display_without_img -j "" - json_display_without_img -j "" -o "" -d True -s True + json_display_without_img -j json_folder + json_display_without_img -j json_folder -o output_img_folder -d True -s True import json_display_without_img; json_display_without_img.json_display_without_img_func(json_folder=r'') ''' diff --git a/Pose2Sim/Utilities/trc_Zup_to_Yup.py b/Pose2Sim/Utilities/trc_Zup_to_Yup.py index 8d90cd3..b416665 100644 --- a/Pose2Sim/Utilities/trc_Zup_to_Yup.py +++ b/Pose2Sim/Utilities/trc_Zup_to_Yup.py @@ -11,8 +11,8 @@ Usage: from Pose2Sim.Utilities import trc_Zup_to_Yup; trc_Zup_to_Yup.trc_Zup_to_Yup_func(r'', r'') - python -m trc_Zup_to_Yup -i "" - python -m trc_Zup_to_Yup -i "" -o "" + python -m trc_Zup_to_Yup -i input_trc_file + python -m trc_Zup_to_Yup -i input_trc_file -o output_trc_file ''' @@ -40,8 +40,8 @@ def trc_Zup_to_Yup_func(*args): Usage: import trc_Zup_to_Yup; trc_Zup_to_Yup.trc_Zup_to_Yup_func(r'', r'') - trcZup_to_Yup -i "" - trcZup_to_Yup -i "" -o "" + trcZup_to_Yup -i input_trc_file + trcZup_to_Yup -i input_trc_file -o output_trc_file ''' try: diff --git a/Pose2Sim/Utilities/trc_combine.py b/Pose2Sim/Utilities/trc_combine.py index 76bd9df..e544962 100644 --- a/Pose2Sim/Utilities/trc_combine.py +++ b/Pose2Sim/Utilities/trc_combine.py @@ -14,8 +14,8 @@ Usage: from Pose2Sim.Utilities import trc_combine; trc_combine.trc_combine_func(r'', r'', r'') - OR python -m trc_combine -i "" -j "" -o "" - OR python -m trc_combine -i "" -j "" + OR python -m trc_combine -i first_path -j second_path -o output_path + OR python -m trc_combine -i first_path -j second_path ''' @@ -133,8 +133,8 @@ def trc_combine_func(*args): Usage: from Pose2Sim.Utilities import trc_combine; trc_combine.trc_combine_func(r'', r'', r'') - OR python -m trc_combine -i "" -j "" -o "" - OR python -m trc_combine -i "" -j "" + OR python -m trc_combine -i first_path -j second_path -o output_path + OR python -m trc_combine -i first_path -j second_path ''' try: diff --git a/Pose2Sim/Utilities/trc_desample.py b/Pose2Sim/Utilities/trc_desample.py index 8a3e735..217da52 100644 --- a/Pose2Sim/Utilities/trc_desample.py +++ b/Pose2Sim/Utilities/trc_desample.py @@ -10,8 +10,8 @@ Undersample a trc file Usage: - python -m trc_desample -i "" -f - python -m trc_desample -i "" -f -o "" + python -m trc_desample -i input_trc_file -f + python -m trc_desample -i input_trc_file -f -o output_trc_file from Pose2Sim.Utilities import trc_desample; trc_desample.trc_desample_func(r'input_trc_file', output_frequency, r'output_trc_file') ''' @@ -39,8 +39,8 @@ def trc_desample_func(*args): Undersample a trc file Usage: - trc_desample -i "" -f - trc_desample -i "" -f -o "" + trc_desample -i input_trc_file -f + trc_desample -i input_trc_file -f -o output_trc_file import trc_desample; trc_desample.trc_desample_func(r'input_trc_file', output_frequency, r'output_trc_file') ''' diff --git a/Pose2Sim/Utilities/trc_filter.py b/Pose2Sim/Utilities/trc_filter.py index b047e02..a650644 100644 --- a/Pose2Sim/Utilities/trc_filter.py +++ b/Pose2Sim/Utilities/trc_filter.py @@ -12,18 +12,18 @@ Usage examples: Butterworth filter, low-pass, 4th order, cut off frequency 6 Hz: - from Pose2Sim.Utilities import trc_filter; trc_filter.trc_filter_func(input_file = r"", output_file = r"", + from Pose2Sim.Utilities import trc_filter; trc_filter.trc_filter_func(input_file = input_trc_file, output_file = output_trc_file, display=True, type='butterworth', pass_type = 'low', order=4, cut_off_frequency=6) - OR python -m trc_filter -i "" -o "" -d True -t butterworth -p low -n 4 -f 6 - OR python -m trc_filter -i "" -t butterworth -p low -n 4 -f 6 + OR python -m trc_filter -i input_trc_file -o output_trc_file -d True -t butterworth -p low -n 4 -f 6 + OR python -m trc_filter -i input_trc_file -t butterworth -p low -n 4 -f 6 Butterworth filter on speed, low-pass, 4th order, cut off frequency 6 Hz: - python -m trc_filter -i "" -t butterworth_on_speed -p low -n 4 -f 6 + python -m trc_filter -i input_trc_file -t butterworth_on_speed -p low -n 4 -f 6 Gaussian filter, kernel 5: - python -m trc_filter -i "" -t gaussian, -k 5 + python -m trc_filter -i input_trc_file -t gaussian, -k 5 LOESS filter, kernel 5: NB: frac = kernel * frames_number - python -m trc_filter -i "" -t loess, -k 5 + python -m trc_filter -i input_trc_file -t loess, -k 5 Median filter, kernel 5: - python -m trc_filter -i "" -t gaussian, -k 5 + python -m trc_filter -i input_trc_file -t gaussian, -k 5 ''' @@ -298,18 +298,18 @@ def trc_filter_func(**args): Usage examples: Butterworth filter, low-pass, 4th order, cut off frequency 6 Hz: - import trc_filter; trc_filter.trc_filter_func(input_file = r"", output_file = r"", + import trc_filter; trc_filter.trc_filter_func(input_file = input_trc_file, output_file = output_trc_file, display=True, type='butterworth', pass_type = 'low', order=4, cut_off_frequency=6) - OR python -m trc_filter -i "" -o "" -d True -t butterworth -p low -n 4 -f 6 - OR python -m trc_filter -i "" -t butterworth, -p low -n 4 -f 6 + OR python -m trc_filter -i input_trc_file -o output_trc_file -d True -t butterworth -p low -n 4 -f 6 + OR python -m trc_filter -i input_trc_file -t butterworth, -p low -n 4 -f 6 Butterworth filter on speed, low-pass, 4th order, cut off frequency 6 Hz: - python -m trc_filter -i "" -t butterworth_on_speed, -p low -n 4 -f 6 + python -m trc_filter -i input_trc_file -t butterworth_on_speed, -p low -n 4 -f 6 Gaussian filter, kernel 5: - python -m trc_filter -i "" -t gaussian, -k 5 + python -m trc_filter -i input_trc_file -t gaussian, -k 5 LOESS filter, kernel 5: NB: frac = kernel * frames_number - python -m trc_filter -i "" -t loess, -k 5 + python -m trc_filter -i input_trc_file -t loess, -k 5 Median filter, kernel 5: - python -m trc_filter -i "" -t gaussian, -k 5 + python -m trc_filter -i input_trc_file -t gaussian, -k 5 ''' # Read trc header diff --git a/Pose2Sim/Utilities/trc_from_mot_osim.py b/Pose2Sim/Utilities/trc_from_mot_osim.py index ffcf787..790b2c4 100644 --- a/Pose2Sim/Utilities/trc_from_mot_osim.py +++ b/Pose2Sim/Utilities/trc_from_mot_osim.py @@ -12,8 +12,8 @@ Usage: from Pose2Sim.Utilities import trc_from_mot_osim; trc_from_mot_osim.trc_from_mot_osim_func(r'', r'', r'') - python -m trc_from_mot_osim -m "" -o "" - python -m trc_from_mot_osim -m "" -o "" -t "" + python -m trc_from_mot_osim -m input_mot_file -o input_osim_file + python -m trc_from_mot_osim -m input_mot_file -o input_osim_file -t output_trc_file ''' @@ -86,8 +86,8 @@ def trc_from_mot_osim_func(*args): Usage: from Pose2Sim.Utilities import trc_from_mot_osim; trc_from_mot_osim.trc_from_mot_osim_func(r'', r'', r'') - python -m trc_from_mot_osim -m "" -o "" - python -m trc_from_mot_osim -m "" -o "" -t "" + python -m trc_from_mot_osim -m input_mot_file -o input_osim_file + python -m trc_from_mot_osim -m input_mot_file -o input_osim_file -t trc_output_file ''' try: diff --git a/Pose2Sim/Utilities/trc_gaitevents.py b/Pose2Sim/Utilities/trc_gaitevents.py index db5a1b2..d09ad33 100644 --- a/Pose2Sim/Utilities/trc_gaitevents.py +++ b/Pose2Sim/Utilities/trc_gaitevents.py @@ -24,8 +24,8 @@ eg -d=-Z or --gait_direction=-Z from Pose2Sim.Utilities import trc_gaitevents; trc_gaitevents.trc_gaitevents_func(r'', '') - OR python -m trc_gaitevents -i "" - OR python -m trc_gaitevents -i "" --gait_direction=-Z + OR python -m trc_gaitevents -i input_trc_file + OR python -m trc_gaitevents -i input_trc_file --gait_direction=-Z ''' @@ -142,8 +142,8 @@ def trc_gaitevents_func(*args): eg -d=-Z or --gait_direction=-Z import trc_gaitevents; trc_gaitevents.trc_gaitevents_func(r'', '') - OR trc_gaitevents -i "" --gait_direction Z - OR trc_gaitevents -i "" --gait_direction=-Z + OR trc_gaitevents -i input_trc_file --gait_direction Z + OR trc_gaitevents -i input_trc_file --gait_direction=-Z ''' try: diff --git a/Pose2Sim/Utilities/trc_plot.py b/Pose2Sim/Utilities/trc_plot.py index 6fd8707..3ed25c1 100644 --- a/Pose2Sim/Utilities/trc_plot.py +++ b/Pose2Sim/Utilities/trc_plot.py @@ -11,7 +11,7 @@ Usage: from Pose2Sim.Utilities import trc_plot; trc_plot.trc_plot_func(r'') - OR python -m trc_plot -i "" + OR python -m trc_plot -i input_trc_file ''' @@ -139,7 +139,7 @@ def trc_plot_func(*args): Usage: import trc_plot; trc_plot.trc_plot_func(r'') - OR trc_plot -i "" + OR trc_plot -i input_trc_file ''' try: diff --git a/README.md b/README.md index 0ef57a9..6c0c93a 100644 --- a/README.md +++ b/README.md @@ -182,7 +182,7 @@ Make sure you modify the [User\Config.toml](https://github.com/perfanalytics/pos However, it is less robust and accurate than OpenPose, and can only detect a single person. * Use the script `Blazepose_runsave.py` (see [Utilities](#utilities)) to run BlazePose under Python, and store the detected coordinates in OpenPose (json) or DeepLabCut (h5 or csv) format: ``` - python -m Blazepose_runsave -i r"" -dJs + python -m Blazepose_runsave -i rinput_file -dJs ``` Type in `python -m Blazepose_runsave -h` for explanation on parameters and for additional ones. * Make sure you change the `pose_model` and the `tracked_keypoint` in the [User\Config.toml](https://github.com/perfanalytics/pose2sim/blob/main/Pose2Sim/Empty_project/User/Config.toml) file. @@ -192,7 +192,7 @@ If you need to detect specific points on a human being, an animal, or an object, 1. Train your DeepLabCut model and run it on your images or videos (more instruction on their repository) 2. Translate the h5 2D coordinates to json files (with `DLC_to_OpenPose.py` script, see [Utilities](#utilities)): ``` - python -m DLC_to_OpenPose -i r"" + python -m DLC_to_OpenPose -i rinput_h5_file ``` 3. Report the model keypoints in the [skeleton.py](https://github.com/perfanalytics/pose2sim/blob/main/Pose2Sim/skeletons.py) file, and make sure you change the `pose_model` and the `tracked_keypoint` in the [User\Config.toml](https://github.com/perfanalytics/pose2sim/blob/main/Pose2Sim/Empty_project/User/Config.toml) file. 4. Create an OpenSim model if you need 3D joint angles. @@ -202,7 +202,7 @@ If you need to detect specific points on a human being, an animal, or an object, * Install and run AlphaPose on your videos (more instruction on their repository) * Translate the AlphaPose single json file to OpenPose frame-by-frame files (with `AlphaPose_to_OpenPose.py` script, see [Utilities](#utilities)): ``` - python -m AlphaPose_to_OpenPose -i r"" + python -m AlphaPose_to_OpenPose -i input_alphapose_json_file ``` * Make sure you change the `pose_model` and the `tracked_keypoint` in the [User\Config.toml](https://github.com/perfanalytics/pose2sim/blob/main/Pose2Sim/Empty_project/User/Config.toml) file.