Using TensorRT backend.
This commit is contained in:
@ -13,9 +13,12 @@ class BaseModel(ABC):
|
||||
def __init__(self, model_path: str, warmup: int):
|
||||
self.opt = ort.SessionOptions()
|
||||
providers = ort.get_available_providers()
|
||||
# ort.set_default_logger_severity(1)
|
||||
|
||||
provider = ""
|
||||
if "CUDAExecutionProvider" in providers:
|
||||
if "TensorrtExecutionProvider" in providers:
|
||||
provider = "TensorrtExecutionProvider"
|
||||
elif "CUDAExecutionProvider" in providers:
|
||||
provider = "CUDAExecutionProvider"
|
||||
else:
|
||||
provider = "CPUExecutionProvider"
|
||||
@ -328,10 +331,10 @@ def load_model():
|
||||
print("Loading onnx model ...")
|
||||
|
||||
model = TopDown(
|
||||
"/RapidPoseTriangulation/extras/mmdeploy/exports/rtmdet-nano_320x320_with-norm.onnx",
|
||||
# "/RapidPoseTriangulation/extras/mmdeploy/exports/rtmdet-nano_320x320_fp16_with-norm.onnx",
|
||||
"/RapidPoseTriangulation/extras/mmdeploy/exports/rtmpose-m_384x288_with-norm.onnx",
|
||||
# "/RapidPoseTriangulation/extras/mmdeploy/exports/rtmpose-m_384x288_fp16_with-norm.onnx",
|
||||
# "/RapidPoseTriangulation/extras/mmdeploy/exports/rtmdet-nano_320x320_with-norm.onnx",
|
||||
"/RapidPoseTriangulation/extras/mmdeploy/exports/rtmdet-nano_320x320_fp16_with-norm.onnx",
|
||||
# "/RapidPoseTriangulation/extras/mmdeploy/exports/rtmpose-m_384x288_with-norm.onnx",
|
||||
"/RapidPoseTriangulation/extras/mmdeploy/exports/rtmpose-m_384x288_fp16_with-norm.onnx",
|
||||
conf_threshold=0.3,
|
||||
iou_threshold=0.3,
|
||||
warmup=30,
|
||||
|
||||
Reference in New Issue
Block a user