Some small updates.

This commit is contained in:
Daniel
2024-12-04 11:41:12 +01:00
parent 97ff32b9ce
commit 6452d20ec8
3 changed files with 34 additions and 24 deletions

View File

@ -43,5 +43,6 @@ mv /mmdeploy/work_dir/end2end.onnx /RapidPoseTriangulation/extras/mmdeploy/expor
```
```bash
python3 /RapidPoseTriangulation/extras/mmdeploy/make_extra_graphs.py
python3 /RapidPoseTriangulation/extras/mmdeploy/add_extra_steps.py
```

View File

@ -20,8 +20,8 @@ class Letterbox(nn.Module):
self.target_size = target_size
self.fill_value = fill_value
def calc_params_and_crop(self, img, bbox=None):
ih0, iw0 = img.shape[1:3]
def calc_params_and_crop(self, ishape, bbox=None):
ih0, iw0 = ishape[1], ishape[2]
th, tw = self.target_size
if bbox is not None:
@ -44,8 +44,6 @@ class Letterbox(nn.Module):
y2 = torch.min(y2, ih0).to(torch.int64)
bbox = torch.stack((x1, y1, x2, y2), dim=0).unsqueeze(0)
img = img.to(torch.float32)
img = img[:, y1:y2, x1:x2, :]
ih = y2 - y1
iw = x2 - x1
else:
@ -63,10 +61,16 @@ class Letterbox(nn.Module):
pad_bottom = pad_h - pad_top
paddings = (pad_left, pad_right, pad_top, pad_bottom)
return img, paddings, scale, (nw, nh), bbox
return paddings, scale, (nw, nh), bbox
def forward(self, img, bbox=None):
img, paddings, _, (nw, nh), _ = self.calc_params_and_crop(img, bbox)
paddings, _, (nw, nh), bbox = self.calc_params_and_crop(img.shape, bbox)
# Optional: Crop the image
if bbox is not None:
x1, y1, x2, y2 = bbox[0, 0], bbox[0, 1], bbox[0, 2], bbox[0, 3]
img = img.to(torch.float32)
img = img[:, y1:y2, x1:x2, :]
# Resize the image
img = img.to(torch.float32)
@ -117,7 +121,7 @@ class DetPostprocess(nn.Module):
self.letterbox = Letterbox(target_size)
def forward(self, img, boxes):
_, paddings, scale, _, _ = self.letterbox.calc_params_and_crop(img, None)
paddings, scale, _, _ = self.letterbox.calc_params_and_crop(img.shape, None)
boxes = boxes.float()
boxes[:, :, 0] -= paddings[0]
@ -141,7 +145,10 @@ class DetPostprocess(nn.Module):
b1 = torch.min(b1, max_h)
b2 = torch.min(b2, max_w)
b3 = torch.min(b3, max_h)
boxes = torch.stack((b0, b1, b2, b3, boxes[:, :, 4]), dim=2)
boxes[:, :, 0] = b0
boxes[:, :, 1] = b1
boxes[:, :, 2] = b2
boxes[:, :, 3] = b3
boxes[:, :, 0:4] /= scale
return boxes
@ -173,7 +180,7 @@ class PosePostprocess(nn.Module):
self.letterbox = Letterbox(target_size)
def forward(self, img, bbox, keypoints):
_, paddings, scale, _, bbox = self.letterbox.calc_params_and_crop(img, bbox)
paddings, scale, _, bbox = self.letterbox.calc_params_and_crop(img.shape, bbox)
kp = keypoints.float()
kp[:, :, 0] -= paddings[0]
@ -191,7 +198,8 @@ class PosePostprocess(nn.Module):
k1 = kp[:, :, 1]
k0 = torch.min(k0, max_w)
k1 = torch.min(k1, max_h)
kp = torch.stack((k0, k1), dim=2)
kp[:, :, 0] = k0
kp[:, :, 1] = k1
kp[:, :, 0:2] /= scale
@ -249,7 +257,7 @@ def main():
# Initialize the PosePreprocess module
preprocess_model = PosePreprocess(target_size=pose_target_size)
det_dummy_input_c0 = torch.from_numpy(image).unsqueeze(0)
det_dummy_input_c1 = torch.tensor([[10, 10, 90, 40]])
det_dummy_input_c1 = torch.tensor([[10, 10, 90, 40]]).to(torch.int32)
# Export to ONNX
torch.onnx.export(
@ -268,7 +276,7 @@ def main():
# Initialize the PosePostprocess module
postprocess_model = PosePostprocess(target_size=pose_target_size)
det_dummy_input_d0 = torch.from_numpy(image).unsqueeze(0)
det_dummy_input_d1 = torch.tensor([[10, 10, 90, 40]])
det_dummy_input_d1 = torch.tensor([[10, 10, 90, 40]]).to(torch.int32)
det_dummy_input_d2 = torch.rand(1, 17, 3)
# Export to ONNX