Splitting 2D and 3D predictions into single loops to prevent CPU priority issues.
This commit is contained in:
@ -344,32 +344,19 @@ def main():
|
||||
# Print a dataset sample for debugging
|
||||
print(labels[0])
|
||||
|
||||
print("\nRunning predictions ...")
|
||||
all_poses = []
|
||||
all_ids = []
|
||||
print("\nCalculating 2D predictions ...")
|
||||
all_poses_2d = []
|
||||
times = []
|
||||
triangulator = rpt.Triangulator(
|
||||
min_match_score=min_match_score, min_group_size=min_group_size
|
||||
)
|
||||
old_scene = ""
|
||||
old_index = -1
|
||||
for label in tqdm.tqdm(labels):
|
||||
images_2d = []
|
||||
|
||||
if old_scene != label.get("scene", "") or (
|
||||
old_index + datasets[dataset_use]["take_interval"] < label["index"]
|
||||
):
|
||||
# Reset last poses if scene changes
|
||||
old_scene = label.get("scene", "")
|
||||
triangulator.reset()
|
||||
|
||||
try:
|
||||
start = time.time()
|
||||
for i in range(len(label["imgpaths"])):
|
||||
imgpath = label["imgpaths"][i]
|
||||
img = test_triangulate.load_image(imgpath)
|
||||
images_2d.append(img)
|
||||
print("IMG time:", time.time() - start)
|
||||
time_imgs = time.time() - start
|
||||
except cv2.error:
|
||||
print("One of the paths not found:", label["imgpaths"])
|
||||
continue
|
||||
@ -391,7 +378,28 @@ def main():
|
||||
poses_2d = utils_2d_pose.get_2d_pose(kpt_model, images_2d)
|
||||
poses_2d = test_triangulate.update_keypoints(poses_2d, joint_names_2d)
|
||||
time_2d = time.time() - start
|
||||
print("2D time:", time_2d)
|
||||
|
||||
all_poses_2d.append(poses_2d)
|
||||
times.append([time_imgs, time_2d, 0])
|
||||
|
||||
print("\nCalculating 3D predictions ...")
|
||||
all_poses_3d = []
|
||||
all_ids = []
|
||||
triangulator = rpt.Triangulator(
|
||||
min_match_score=min_match_score, min_group_size=min_group_size
|
||||
)
|
||||
old_scene = ""
|
||||
old_index = -1
|
||||
for i in tqdm.tqdm(range(len(labels))):
|
||||
label = labels[i]
|
||||
poses_2d = all_poses_2d[i]
|
||||
|
||||
if old_scene != label.get("scene", "") or (
|
||||
old_index + datasets[dataset_use]["take_interval"] < label["index"]
|
||||
):
|
||||
# Reset last poses if scene changes
|
||||
old_scene = label.get("scene", "")
|
||||
triangulator.reset()
|
||||
|
||||
start = time.time()
|
||||
if sum(np.sum(p) for p in poses_2d) == 0:
|
||||
@ -402,14 +410,12 @@ def main():
|
||||
poses3D = triangulator.triangulate_poses(
|
||||
poses_2d, rpt_cameras, roomparams, joint_names_2d
|
||||
)
|
||||
|
||||
time_3d = time.time() - start
|
||||
print("3D time:", time_3d)
|
||||
|
||||
old_index = label["index"]
|
||||
all_poses.append(np.array(poses3D).tolist())
|
||||
all_poses_3d.append(np.array(poses3D).tolist())
|
||||
all_ids.append(label["id"])
|
||||
times.append((time_2d, time_3d))
|
||||
times[i][2] = time_3d
|
||||
|
||||
# Print per-step triangulation timings
|
||||
print("")
|
||||
@ -418,9 +424,11 @@ def main():
|
||||
warmup_iters = 10
|
||||
if len(times) > warmup_iters:
|
||||
times = times[warmup_iters:]
|
||||
avg_time_2d = np.mean([t[0] for t in times])
|
||||
avg_time_3d = np.mean([t[1] for t in times])
|
||||
avg_time_im = np.mean([t[0] for t in times])
|
||||
avg_time_2d = np.mean([t[1] for t in times])
|
||||
avg_time_3d = np.mean([t[2] for t in times])
|
||||
tstats = {
|
||||
"img_loading": avg_time_im,
|
||||
"avg_time_2d": avg_time_2d,
|
||||
"avg_time_3d": avg_time_3d,
|
||||
"avg_fps": 1.0 / (avg_time_2d + avg_time_3d),
|
||||
@ -430,7 +438,7 @@ def main():
|
||||
|
||||
_ = evals.mpjpe.run_eval(
|
||||
labels,
|
||||
all_poses,
|
||||
all_poses_3d,
|
||||
all_ids,
|
||||
joint_names_net=joint_names_3d,
|
||||
joint_names_use=eval_joints,
|
||||
@ -438,7 +446,7 @@ def main():
|
||||
)
|
||||
_ = evals.pcp.run_eval(
|
||||
labels,
|
||||
all_poses,
|
||||
all_poses_3d,
|
||||
all_ids,
|
||||
joint_names_net=joint_names_3d,
|
||||
joint_names_use=eval_joints,
|
||||
|
||||
Reference in New Issue
Block a user