Some small updates.
This commit is contained in:
@ -43,5 +43,6 @@ mv /mmdeploy/work_dir/end2end.onnx /RapidPoseTriangulation/extras/mmdeploy/expor
|
|||||||
```
|
```
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
python3 /RapidPoseTriangulation/extras/mmdeploy/make_extra_graphs.py
|
||||||
python3 /RapidPoseTriangulation/extras/mmdeploy/add_extra_steps.py
|
python3 /RapidPoseTriangulation/extras/mmdeploy/add_extra_steps.py
|
||||||
```
|
```
|
||||||
|
|||||||
@ -20,8 +20,8 @@ class Letterbox(nn.Module):
|
|||||||
self.target_size = target_size
|
self.target_size = target_size
|
||||||
self.fill_value = fill_value
|
self.fill_value = fill_value
|
||||||
|
|
||||||
def calc_params_and_crop(self, img, bbox=None):
|
def calc_params_and_crop(self, ishape, bbox=None):
|
||||||
ih0, iw0 = img.shape[1:3]
|
ih0, iw0 = ishape[1], ishape[2]
|
||||||
th, tw = self.target_size
|
th, tw = self.target_size
|
||||||
|
|
||||||
if bbox is not None:
|
if bbox is not None:
|
||||||
@ -44,8 +44,6 @@ class Letterbox(nn.Module):
|
|||||||
y2 = torch.min(y2, ih0).to(torch.int64)
|
y2 = torch.min(y2, ih0).to(torch.int64)
|
||||||
bbox = torch.stack((x1, y1, x2, y2), dim=0).unsqueeze(0)
|
bbox = torch.stack((x1, y1, x2, y2), dim=0).unsqueeze(0)
|
||||||
|
|
||||||
img = img.to(torch.float32)
|
|
||||||
img = img[:, y1:y2, x1:x2, :]
|
|
||||||
ih = y2 - y1
|
ih = y2 - y1
|
||||||
iw = x2 - x1
|
iw = x2 - x1
|
||||||
else:
|
else:
|
||||||
@ -63,10 +61,16 @@ class Letterbox(nn.Module):
|
|||||||
pad_bottom = pad_h - pad_top
|
pad_bottom = pad_h - pad_top
|
||||||
paddings = (pad_left, pad_right, pad_top, pad_bottom)
|
paddings = (pad_left, pad_right, pad_top, pad_bottom)
|
||||||
|
|
||||||
return img, paddings, scale, (nw, nh), bbox
|
return paddings, scale, (nw, nh), bbox
|
||||||
|
|
||||||
def forward(self, img, bbox=None):
|
def forward(self, img, bbox=None):
|
||||||
img, paddings, _, (nw, nh), _ = self.calc_params_and_crop(img, bbox)
|
paddings, _, (nw, nh), bbox = self.calc_params_and_crop(img.shape, bbox)
|
||||||
|
|
||||||
|
# Optional: Crop the image
|
||||||
|
if bbox is not None:
|
||||||
|
x1, y1, x2, y2 = bbox[0, 0], bbox[0, 1], bbox[0, 2], bbox[0, 3]
|
||||||
|
img = img.to(torch.float32)
|
||||||
|
img = img[:, y1:y2, x1:x2, :]
|
||||||
|
|
||||||
# Resize the image
|
# Resize the image
|
||||||
img = img.to(torch.float32)
|
img = img.to(torch.float32)
|
||||||
@ -117,7 +121,7 @@ class DetPostprocess(nn.Module):
|
|||||||
self.letterbox = Letterbox(target_size)
|
self.letterbox = Letterbox(target_size)
|
||||||
|
|
||||||
def forward(self, img, boxes):
|
def forward(self, img, boxes):
|
||||||
_, paddings, scale, _, _ = self.letterbox.calc_params_and_crop(img, None)
|
paddings, scale, _, _ = self.letterbox.calc_params_and_crop(img.shape, None)
|
||||||
|
|
||||||
boxes = boxes.float()
|
boxes = boxes.float()
|
||||||
boxes[:, :, 0] -= paddings[0]
|
boxes[:, :, 0] -= paddings[0]
|
||||||
@ -141,7 +145,10 @@ class DetPostprocess(nn.Module):
|
|||||||
b1 = torch.min(b1, max_h)
|
b1 = torch.min(b1, max_h)
|
||||||
b2 = torch.min(b2, max_w)
|
b2 = torch.min(b2, max_w)
|
||||||
b3 = torch.min(b3, max_h)
|
b3 = torch.min(b3, max_h)
|
||||||
boxes = torch.stack((b0, b1, b2, b3, boxes[:, :, 4]), dim=2)
|
boxes[:, :, 0] = b0
|
||||||
|
boxes[:, :, 1] = b1
|
||||||
|
boxes[:, :, 2] = b2
|
||||||
|
boxes[:, :, 3] = b3
|
||||||
|
|
||||||
boxes[:, :, 0:4] /= scale
|
boxes[:, :, 0:4] /= scale
|
||||||
return boxes
|
return boxes
|
||||||
@ -173,7 +180,7 @@ class PosePostprocess(nn.Module):
|
|||||||
self.letterbox = Letterbox(target_size)
|
self.letterbox = Letterbox(target_size)
|
||||||
|
|
||||||
def forward(self, img, bbox, keypoints):
|
def forward(self, img, bbox, keypoints):
|
||||||
_, paddings, scale, _, bbox = self.letterbox.calc_params_and_crop(img, bbox)
|
paddings, scale, _, bbox = self.letterbox.calc_params_and_crop(img.shape, bbox)
|
||||||
|
|
||||||
kp = keypoints.float()
|
kp = keypoints.float()
|
||||||
kp[:, :, 0] -= paddings[0]
|
kp[:, :, 0] -= paddings[0]
|
||||||
@ -191,7 +198,8 @@ class PosePostprocess(nn.Module):
|
|||||||
k1 = kp[:, :, 1]
|
k1 = kp[:, :, 1]
|
||||||
k0 = torch.min(k0, max_w)
|
k0 = torch.min(k0, max_w)
|
||||||
k1 = torch.min(k1, max_h)
|
k1 = torch.min(k1, max_h)
|
||||||
kp = torch.stack((k0, k1), dim=2)
|
kp[:, :, 0] = k0
|
||||||
|
kp[:, :, 1] = k1
|
||||||
|
|
||||||
kp[:, :, 0:2] /= scale
|
kp[:, :, 0:2] /= scale
|
||||||
|
|
||||||
@ -249,7 +257,7 @@ def main():
|
|||||||
# Initialize the PosePreprocess module
|
# Initialize the PosePreprocess module
|
||||||
preprocess_model = PosePreprocess(target_size=pose_target_size)
|
preprocess_model = PosePreprocess(target_size=pose_target_size)
|
||||||
det_dummy_input_c0 = torch.from_numpy(image).unsqueeze(0)
|
det_dummy_input_c0 = torch.from_numpy(image).unsqueeze(0)
|
||||||
det_dummy_input_c1 = torch.tensor([[10, 10, 90, 40]])
|
det_dummy_input_c1 = torch.tensor([[10, 10, 90, 40]]).to(torch.int32)
|
||||||
|
|
||||||
# Export to ONNX
|
# Export to ONNX
|
||||||
torch.onnx.export(
|
torch.onnx.export(
|
||||||
@ -268,7 +276,7 @@ def main():
|
|||||||
# Initialize the PosePostprocess module
|
# Initialize the PosePostprocess module
|
||||||
postprocess_model = PosePostprocess(target_size=pose_target_size)
|
postprocess_model = PosePostprocess(target_size=pose_target_size)
|
||||||
det_dummy_input_d0 = torch.from_numpy(image).unsqueeze(0)
|
det_dummy_input_d0 = torch.from_numpy(image).unsqueeze(0)
|
||||||
det_dummy_input_d1 = torch.tensor([[10, 10, 90, 40]])
|
det_dummy_input_d1 = torch.tensor([[10, 10, 90, 40]]).to(torch.int32)
|
||||||
det_dummy_input_d2 = torch.rand(1, 17, 3)
|
det_dummy_input_d2 = torch.rand(1, 17, 3)
|
||||||
|
|
||||||
# Export to ONNX
|
# Export to ONNX
|
||||||
|
|||||||
@ -9,22 +9,23 @@ from tqdm import tqdm
|
|||||||
|
|
||||||
|
|
||||||
class BaseModel(ABC):
|
class BaseModel(ABC):
|
||||||
def __init__(self, model_path: str, warmup: int):
|
def __init__(
|
||||||
|
self, model_path: str, warmup: int, usetrt: bool = True, usegpu: bool = True
|
||||||
|
):
|
||||||
self.opt = ort.SessionOptions()
|
self.opt = ort.SessionOptions()
|
||||||
providers = ort.get_available_providers()
|
providers = ort.get_available_providers()
|
||||||
# ort.set_default_logger_severity(1)
|
# ort.set_default_logger_severity(1)
|
||||||
|
|
||||||
provider = ""
|
self.providers = []
|
||||||
if "CUDAExecutionProvider" in providers:
|
if usetrt and "TensorrtExecutionProvider" in providers:
|
||||||
provider = "CUDAExecutionProvider"
|
self.providers.append("TensorrtExecutionProvider")
|
||||||
else:
|
if usegpu and "CUDAExecutionProvider" in providers:
|
||||||
provider = "CPUExecutionProvider"
|
self.providers.append("CUDAExecutionProvider")
|
||||||
self.provider = provider
|
self.providers.append("CPUExecutionProvider")
|
||||||
print("Found providers:", providers)
|
print("Using providers:", self.providers)
|
||||||
print("Using:", provider)
|
|
||||||
|
|
||||||
self.session = ort.InferenceSession(
|
self.session = ort.InferenceSession(
|
||||||
model_path, providers=[provider], sess_options=self.opt
|
model_path, providers=self.providers, sess_options=self.opt
|
||||||
)
|
)
|
||||||
|
|
||||||
self.input_names = [input.name for input in self.session.get_inputs()]
|
self.input_names = [input.name for input in self.session.get_inputs()]
|
||||||
@ -65,7 +66,7 @@ class BaseModel(ABC):
|
|||||||
if "image" in iname:
|
if "image" in iname:
|
||||||
ishape = self.input_shapes[i]
|
ishape = self.input_shapes[i]
|
||||||
if "batch_size" in ishape:
|
if "batch_size" in ishape:
|
||||||
if self.provider == "TensorrtExecutionProvider":
|
if "TensorrtExecutionProvider" in self.providers:
|
||||||
# Using different images sizes for TensorRT warmup takes too long
|
# Using different images sizes for TensorRT warmup takes too long
|
||||||
ishape = [1, 1000, 1000, 3]
|
ishape = [1, 1000, 1000, 3]
|
||||||
else:
|
else:
|
||||||
@ -89,7 +90,7 @@ class BaseModel(ABC):
|
|||||||
]
|
]
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
raise ValueError("Undefined input type")
|
raise ValueError("Undefined input type:", iname)
|
||||||
|
|
||||||
tensor = tensor.astype(self.input_types[i])
|
tensor = tensor.astype(self.input_types[i])
|
||||||
inputs[iname] = tensor
|
inputs[iname] = tensor
|
||||||
|
|||||||
Reference in New Issue
Block a user