no torch dependency if running on CPU

This commit is contained in:
davidpagnon 2024-08-06 13:13:31 +02:00
parent 676c6825fb
commit 28e06edd58
4 changed files with 13 additions and 8 deletions

1
.gitignore vendored
View File

@ -1,4 +1,5 @@
**/__pycache__/
**/build/
*.pyc
logs.txt*
**/*.log

View File

@ -408,4 +408,4 @@ if __name__ == '__main__':
parser.add_argument('-O', '--output_file_root', required=False, help='output file root path, without extension')
args = vars(parser.parse_args())
reproj_from_trc_calib_func(**args)
reproj_from_trc_calib_func(**args)

View File

@ -39,7 +39,6 @@ import logging
from tqdm import tqdm
import numpy as np
import cv2
import torch
import onnxruntime as ort
from rtmlib import PoseTracker, Body, Wholebody, BodyWithFeet, draw_skeleton
@ -359,10 +358,15 @@ def rtm_estimator(config_dict):
frame_rate = 60
# If CUDA is available, use it with ONNXRuntime backend; else use CPU with openvino
if 'CUDAExecutionProvider' in ort.get_available_providers() and torch.cuda.is_available():
device = 'cuda'
backend = 'onnxruntime'
logging.info(f"\nValid CUDA installation found: using ONNXRuntime backend with GPU.")
if 'CUDAExecutionProvider' in ort.get_available_providers():
try:
import torch
if torch.cuda.is_available() == False:
device = 'cuda'
backend = 'onnxruntime'
logging.info(f"\nValid CUDA installation found: using ONNXRuntime backend with GPU.")
except:
pass
elif 'MPSExecutionProvider' in ort.get_available_providers() or 'CoreMLExecutionProvider' in ort.get_available_providers():
device = 'mps'
backend = 'onnxruntime'

View File

@ -36,7 +36,7 @@ install_requires =
lxml==4.9.4
matplotlib
mpl_interactions
Pillow
# Pillow
PyQt5
tqdm
anytree
@ -47,7 +47,7 @@ install_requires =
ipython
c3d
tensorflow
torch
# torch
rtmlib
onnxruntime
openvino