Some further speedups.

This commit is contained in:
Daniel
2024-12-02 17:00:04 +01:00
parent c8c48e4bf2
commit dc44a71b2c
4 changed files with 202 additions and 179 deletions

View File

@ -220,7 +220,7 @@ def update_sample(sample, new_dir=""):
def load_image(path: str):
image = cv2.imread(path, 3)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = np.array(image, dtype=np.float32)
image = np.asarray(image, dtype=np.uint8)
return image

View File

@ -35,6 +35,8 @@ class BaseModel(ABC):
input_type = self.session.get_inputs()[0].type
if input_type == "tensor(float16)":
self.input_type = np.float16
elif input_type == "tensor(uint8)":
self.input_type = np.uint8
else:
self.input_type = np.float32
@ -152,12 +154,12 @@ class RTMDet(BaseModel):
return boxes[keep_indices]
def preprocess(self, image: np.ndarray):
th, tw = self.input_shape[2:]
th, tw = self.input_shape[1:3]
image, self.dx, self.dy, self.scale = self.letterbox(
image, (tw, th), fill_value=114
)
tensor = np.asarray(image).astype(self.input_type, copy=False)[..., ::-1]
tensor = np.expand_dims(tensor, axis=0).transpose((0, 3, 1, 2))
tensor = np.expand_dims(tensor, axis=0)
return tensor
def postprocess(self, tensor: List[np.ndarray]):
@ -274,10 +276,10 @@ class RTMPose(BaseModel):
return extracted_region, new_box, scale
def preprocess(self, image: np.ndarray, bbox: np.ndarray):
th, tw = self.input_shape[2:]
th, tw = self.input_shape[1:3]
region, self.bbox, _ = self.region_of_interest_warped(image, bbox, (tw, th))
tensor = np.asarray(region).astype(self.input_type, copy=False)
tensor = np.expand_dims(tensor, axis=0).transpose((0, 3, 1, 2))
tensor = np.expand_dims(tensor, axis=0)
return tensor
def postprocess(self, tensor: List[np.ndarray], **kwargs):
@ -285,7 +287,7 @@ class RTMPose(BaseModel):
kp = np.concatenate([tensor[0][0], np.expand_dims(scores, axis=-1)], axis=-1)
# See: /mmpose/models/pose_estimators/topdown.py - add_pred_to_datasample()
th, tw = self.input_shape[2:]
th, tw = self.input_shape[1:3]
bw, bh = [self.bbox[2] - self.bbox[0], self.bbox[3] - self.bbox[1]]
kp[:, :2] /= np.array([tw, th])
kp[:, :2] *= np.array([bw, bh])
@ -331,10 +333,10 @@ def load_model():
print("Loading onnx model ...")
model = TopDown(
# "/RapidPoseTriangulation/extras/mmdeploy/exports/rtmdet-nano_320x320_with-norm.onnx",
"/RapidPoseTriangulation/extras/mmdeploy/exports/rtmdet-nano_320x320_fp16_with-norm.onnx",
# "/RapidPoseTriangulation/extras/mmdeploy/exports/rtmpose-m_384x288_with-norm.onnx",
"/RapidPoseTriangulation/extras/mmdeploy/exports/rtmpose-m_384x288_fp16_with-norm.onnx",
# "/RapidPoseTriangulation/extras/mmdeploy/exports/rtmdet-nano_320x320_extra-steps.onnx",
"/RapidPoseTriangulation/extras/mmdeploy/exports/rtmdet-nano_320x320_fp16_extra-steps.onnx",
# "/RapidPoseTriangulation/extras/mmdeploy/exports/rtmpose-m_384x288_extra-steps.onnx",
"/RapidPoseTriangulation/extras/mmdeploy/exports/rtmpose-m_384x288_fp16_extra-steps.onnx",
conf_threshold=0.3,
iou_threshold=0.3,
warmup=30,