Some small updates.
This commit is contained in:
@ -43,5 +43,6 @@ mv /mmdeploy/work_dir/end2end.onnx /RapidPoseTriangulation/extras/mmdeploy/expor
|
||||
```
|
||||
|
||||
```bash
|
||||
python3 /RapidPoseTriangulation/extras/mmdeploy/make_extra_graphs.py
|
||||
python3 /RapidPoseTriangulation/extras/mmdeploy/add_extra_steps.py
|
||||
```
|
||||
|
||||
@ -20,8 +20,8 @@ class Letterbox(nn.Module):
|
||||
self.target_size = target_size
|
||||
self.fill_value = fill_value
|
||||
|
||||
def calc_params_and_crop(self, img, bbox=None):
|
||||
ih0, iw0 = img.shape[1:3]
|
||||
def calc_params_and_crop(self, ishape, bbox=None):
|
||||
ih0, iw0 = ishape[1], ishape[2]
|
||||
th, tw = self.target_size
|
||||
|
||||
if bbox is not None:
|
||||
@ -44,8 +44,6 @@ class Letterbox(nn.Module):
|
||||
y2 = torch.min(y2, ih0).to(torch.int64)
|
||||
bbox = torch.stack((x1, y1, x2, y2), dim=0).unsqueeze(0)
|
||||
|
||||
img = img.to(torch.float32)
|
||||
img = img[:, y1:y2, x1:x2, :]
|
||||
ih = y2 - y1
|
||||
iw = x2 - x1
|
||||
else:
|
||||
@ -63,10 +61,16 @@ class Letterbox(nn.Module):
|
||||
pad_bottom = pad_h - pad_top
|
||||
paddings = (pad_left, pad_right, pad_top, pad_bottom)
|
||||
|
||||
return img, paddings, scale, (nw, nh), bbox
|
||||
return paddings, scale, (nw, nh), bbox
|
||||
|
||||
def forward(self, img, bbox=None):
|
||||
img, paddings, _, (nw, nh), _ = self.calc_params_and_crop(img, bbox)
|
||||
paddings, _, (nw, nh), bbox = self.calc_params_and_crop(img.shape, bbox)
|
||||
|
||||
# Optional: Crop the image
|
||||
if bbox is not None:
|
||||
x1, y1, x2, y2 = bbox[0, 0], bbox[0, 1], bbox[0, 2], bbox[0, 3]
|
||||
img = img.to(torch.float32)
|
||||
img = img[:, y1:y2, x1:x2, :]
|
||||
|
||||
# Resize the image
|
||||
img = img.to(torch.float32)
|
||||
@ -117,7 +121,7 @@ class DetPostprocess(nn.Module):
|
||||
self.letterbox = Letterbox(target_size)
|
||||
|
||||
def forward(self, img, boxes):
|
||||
_, paddings, scale, _, _ = self.letterbox.calc_params_and_crop(img, None)
|
||||
paddings, scale, _, _ = self.letterbox.calc_params_and_crop(img.shape, None)
|
||||
|
||||
boxes = boxes.float()
|
||||
boxes[:, :, 0] -= paddings[0]
|
||||
@ -141,7 +145,10 @@ class DetPostprocess(nn.Module):
|
||||
b1 = torch.min(b1, max_h)
|
||||
b2 = torch.min(b2, max_w)
|
||||
b3 = torch.min(b3, max_h)
|
||||
boxes = torch.stack((b0, b1, b2, b3, boxes[:, :, 4]), dim=2)
|
||||
boxes[:, :, 0] = b0
|
||||
boxes[:, :, 1] = b1
|
||||
boxes[:, :, 2] = b2
|
||||
boxes[:, :, 3] = b3
|
||||
|
||||
boxes[:, :, 0:4] /= scale
|
||||
return boxes
|
||||
@ -173,7 +180,7 @@ class PosePostprocess(nn.Module):
|
||||
self.letterbox = Letterbox(target_size)
|
||||
|
||||
def forward(self, img, bbox, keypoints):
|
||||
_, paddings, scale, _, bbox = self.letterbox.calc_params_and_crop(img, bbox)
|
||||
paddings, scale, _, bbox = self.letterbox.calc_params_and_crop(img.shape, bbox)
|
||||
|
||||
kp = keypoints.float()
|
||||
kp[:, :, 0] -= paddings[0]
|
||||
@ -191,7 +198,8 @@ class PosePostprocess(nn.Module):
|
||||
k1 = kp[:, :, 1]
|
||||
k0 = torch.min(k0, max_w)
|
||||
k1 = torch.min(k1, max_h)
|
||||
kp = torch.stack((k0, k1), dim=2)
|
||||
kp[:, :, 0] = k0
|
||||
kp[:, :, 1] = k1
|
||||
|
||||
kp[:, :, 0:2] /= scale
|
||||
|
||||
@ -249,7 +257,7 @@ def main():
|
||||
# Initialize the PosePreprocess module
|
||||
preprocess_model = PosePreprocess(target_size=pose_target_size)
|
||||
det_dummy_input_c0 = torch.from_numpy(image).unsqueeze(0)
|
||||
det_dummy_input_c1 = torch.tensor([[10, 10, 90, 40]])
|
||||
det_dummy_input_c1 = torch.tensor([[10, 10, 90, 40]]).to(torch.int32)
|
||||
|
||||
# Export to ONNX
|
||||
torch.onnx.export(
|
||||
@ -268,7 +276,7 @@ def main():
|
||||
# Initialize the PosePostprocess module
|
||||
postprocess_model = PosePostprocess(target_size=pose_target_size)
|
||||
det_dummy_input_d0 = torch.from_numpy(image).unsqueeze(0)
|
||||
det_dummy_input_d1 = torch.tensor([[10, 10, 90, 40]])
|
||||
det_dummy_input_d1 = torch.tensor([[10, 10, 90, 40]]).to(torch.int32)
|
||||
det_dummy_input_d2 = torch.rand(1, 17, 3)
|
||||
|
||||
# Export to ONNX
|
||||
|
||||
@ -9,22 +9,23 @@ from tqdm import tqdm
|
||||
|
||||
|
||||
class BaseModel(ABC):
|
||||
def __init__(self, model_path: str, warmup: int):
|
||||
def __init__(
|
||||
self, model_path: str, warmup: int, usetrt: bool = True, usegpu: bool = True
|
||||
):
|
||||
self.opt = ort.SessionOptions()
|
||||
providers = ort.get_available_providers()
|
||||
# ort.set_default_logger_severity(1)
|
||||
|
||||
provider = ""
|
||||
if "CUDAExecutionProvider" in providers:
|
||||
provider = "CUDAExecutionProvider"
|
||||
else:
|
||||
provider = "CPUExecutionProvider"
|
||||
self.provider = provider
|
||||
print("Found providers:", providers)
|
||||
print("Using:", provider)
|
||||
self.providers = []
|
||||
if usetrt and "TensorrtExecutionProvider" in providers:
|
||||
self.providers.append("TensorrtExecutionProvider")
|
||||
if usegpu and "CUDAExecutionProvider" in providers:
|
||||
self.providers.append("CUDAExecutionProvider")
|
||||
self.providers.append("CPUExecutionProvider")
|
||||
print("Using providers:", self.providers)
|
||||
|
||||
self.session = ort.InferenceSession(
|
||||
model_path, providers=[provider], sess_options=self.opt
|
||||
model_path, providers=self.providers, sess_options=self.opt
|
||||
)
|
||||
|
||||
self.input_names = [input.name for input in self.session.get_inputs()]
|
||||
@ -65,7 +66,7 @@ class BaseModel(ABC):
|
||||
if "image" in iname:
|
||||
ishape = self.input_shapes[i]
|
||||
if "batch_size" in ishape:
|
||||
if self.provider == "TensorrtExecutionProvider":
|
||||
if "TensorrtExecutionProvider" in self.providers:
|
||||
# Using different images sizes for TensorRT warmup takes too long
|
||||
ishape = [1, 1000, 1000, 3]
|
||||
else:
|
||||
@ -89,7 +90,7 @@ class BaseModel(ABC):
|
||||
]
|
||||
)
|
||||
else:
|
||||
raise ValueError("Undefined input type")
|
||||
raise ValueError("Undefined input type:", iname)
|
||||
|
||||
tensor = tensor.astype(self.input_types[i])
|
||||
inputs[iname] = tensor
|
||||
|
||||
Reference in New Issue
Block a user