Merge branch 'upds' into 'master'

Various updates and improvements

See merge request Percipiote/RapidPoseTriangulation!2
This commit is contained in:
DANBER
2024-10-29 15:54:38 +00:00
24 changed files with 4152 additions and 4090 deletions

4
.gitignore vendored
View File

@ -1,5 +1,5 @@
spt_wrap.* rpt_wrap.*
spt.py rpt.py
*.bin *.bin
# Prerequisites # Prerequisites

View File

@ -1,4 +1,4 @@
FROM nvcr.io/nvidia/tensorflow:22.08-tf2-py3 FROM nvcr.io/nvidia/pytorch:23.02-py3
ARG DEBIAN_FRONTEND=noninteractive ARG DEBIAN_FRONTEND=noninteractive
ENV LANG=C.UTF-8 ENV LANG=C.UTF-8
@ -15,11 +15,9 @@ RUN apt-get update && apt-get install -y --no-install-recommends python3-tk
# Update pip to allow installation of skelda in editable mode # Update pip to allow installation of skelda in editable mode
RUN pip3 install --upgrade --no-cache-dir pip RUN pip3 install --upgrade --no-cache-dir pip
# Install pytorch
RUN pip3 install --upgrade --no-cache-dir "torch<2.0"
RUN pip3 install --upgrade --no-cache-dir "torchvision<0.15"
# Install MMPose # Install MMPose
ENV FORCE_CUDA="1"
ENV MMCV_WITH_OPS=1
RUN pip3 install --upgrade --no-cache-dir openmim RUN pip3 install --upgrade --no-cache-dir openmim
RUN mim install mmengine RUN mim install mmengine
RUN mim install "mmcv>=2,<2.2.0" RUN mim install "mmcv>=2,<2.2.0"
@ -45,5 +43,5 @@ RUN apt-get update && apt-get install -y --no-install-recommends libopencv-dev
COPY ./skelda/ /skelda/ COPY ./skelda/ /skelda/
RUN pip3 install --no-cache-dir -e /skelda/ RUN pip3 install --no-cache-dir -e /skelda/
WORKDIR /SimplePoseTriangulation/ WORKDIR /RapidPoseTriangulation/
CMD ["/bin/bash"] CMD ["/bin/bash"]

View File

@ -1,11 +1,14 @@
# SimplePoseTriangulation # RapidPoseTriangulation
Triangulation of multiple persons from multiple camera views. Fast triangulation of multiple persons from multiple camera views.
<div align="center"> <div align="center">
<img src="media/2d-k.png" alt="2D detections"" width="30%"/> <img src="media/2d-k.jpg" alt="2D detections"" width="65%"/>
<img src="media/3d-p.png" alt="3D detections" width="35%"/> <b>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</b>
<img src="media/2d-p.png" alt="3D to 2D projection" width="30%"/> <img src="media/3d-p.jpg" alt="3D detections" width="30%"/>
<br>
<br>
<img src="media/2d-p.jpg" alt="3D to 2D projection" width="95%"/>
</div> </div>
<br> <br>
@ -14,25 +17,25 @@ Triangulation of multiple persons from multiple camera views.
- Clone this project with submodules: - Clone this project with submodules:
```bash ```bash
git clone --recurse-submodules https://gitlab.com/Percipiote/SimplePoseTriangulation.git git clone --recurse-submodules https://gitlab.com/Percipiote/RapidPoseTriangulation.git
cd SimplePoseTriangulation/ cd RapidPoseTriangulation/
``` ```
- Build docker container: - Build docker container:
```bash ```bash
docker build --progress=plain -t simpleposetriangulation . docker build --progress=plain -t rapidposetriangulation .
./run_container.sh ./run_container.sh
``` ```
- Test with samples: - Test with samples:
```bash ```bash
python3 /SimplePoseTriangulation/scripts/test_triangulate.py python3 /RapidPoseTriangulation/scripts/test_triangulate.py
``` ```
- Test with _skelda_ dataset: - Test with _skelda_ dataset:
```bash ```bash
export CUDA_VISIBLE_DEVICES=0 export CUDA_VISIBLE_DEVICES=0
python3 /SimplePoseTriangulation/scripts/test_skelda_dataset.py python3 /RapidPoseTriangulation/scripts/test_skelda_dataset.py
``` ```
<br> <br>
@ -40,5 +43,5 @@ Triangulation of multiple persons from multiple camera views.
## Debugging ## Debugging
```bash ```bash
cd /SimplePoseTriangulation/swig/ && make all && cd ../tests/ && python3 test_interface.py cd /RapidPoseTriangulation/swig/ && make all && cd ../tests/ && python3 test_interface.py
``` ```

BIN
media/2d-k.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 329 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.9 MiB

BIN
media/2d-p.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 334 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.9 MiB

BIN
media/3d-p.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 229 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 542 KiB

File diff suppressed because it is too large Load Diff

View File

@ -1220,8 +1220,7 @@ std::vector<std::tuple<cv::Point3f, cv::Mat, std::vector<int>>> TriangulatorInte
cv::Mat &group_pose = std::get<1>(group); cv::Mat &group_pose = std::get<1>(group);
// Calculate average joint distance // Calculate average joint distance
float dist_sum = 0.0; std::vector<float> dists;
size_t count = 0;
for (size_t row = 0; row < num_joints; ++row) for (size_t row = 0; row < num_joints; ++row)
{ {
const float *pose_3d_ptr = pose_3d.ptr<float>(row); const float *pose_3d_ptr = pose_3d.ptr<float>(row);
@ -1236,15 +1235,20 @@ std::vector<std::tuple<cv::Point3f, cv::Mat, std::vector<int>>> TriangulatorInte
float dy = pose_3d_ptr[1] - group_pose_ptr[1]; float dy = pose_3d_ptr[1] - group_pose_ptr[1];
float dz = pose_3d_ptr[2] - group_pose_ptr[2]; float dz = pose_3d_ptr[2] - group_pose_ptr[2];
float dist_sq = dx * dx + dy * dy + dz * dz; float dist_sq = dx * dx + dy * dy + dz * dz;
dist_sum += std::sqrt(dist_sq); dists.push_back(std::sqrt(dist_sq));
count++;
} }
} }
if (dists.size() >= 5)
if (count > 0) {
// Drop highest value to reduce influence of outliers
auto max_it = std::max_element(dists.begin(), dists.end());
dists.erase(max_it);
}
if (dists.size() > 0)
{ {
// Check if the average joint distance is close enough // Check if the average joint distance is close enough
float avg_dist = dist_sum / count; float avg_dist = std::accumulate(dists.begin(), dists.end(), 0.0);
avg_dist /= static_cast<float>(dists.size());
if (avg_dist < max_joint_avg_dist && avg_dist < best_dist) if (avg_dist < max_joint_avg_dist && avg_dist < best_dist)
{ {
best_dist = avg_dist; best_dist = avg_dist;
@ -1310,7 +1314,113 @@ std::vector<std::tuple<cv::Point3f, cv::Mat, std::vector<int>>> TriangulatorInte
} }
} }
return groups; // Merge close groups
// Depending on the inital group creation, one or more groups can be created that in the end
// share the same persons, even if they had a larger distance at the beginning
// So merge them similar to the group assignment before
std::vector<std::tuple<cv::Point3f, cv::Mat, std::vector<int>>> merged_groups;
for (size_t i = 0; i < groups.size(); ++i)
{
size_t num_joints = std::get<1>(groups[i]).rows;
auto &group = groups[i];
auto &group_visible_counts = per_group_visible_counts[i];
float best_dist = std::numeric_limits<float>::infinity();
int best_group = -1;
for (size_t j = 0; j < merged_groups.size(); ++j)
{
auto &merged_group = merged_groups[j];
// Calculate average joint distance
std::vector<float> dists;
for (size_t row = 0; row < num_joints; ++row)
{
const float *group_pose_ptr = std::get<1>(group).ptr<float>(row);
const float *merged_pose_ptr = std::get<1>(merged_group).ptr<float>(row);
float score1 = group_pose_ptr[3];
float score2 = merged_pose_ptr[3];
if (score1 > min_score && score2 > min_score)
{
float dx = group_pose_ptr[0] - merged_pose_ptr[0];
float dy = group_pose_ptr[1] - merged_pose_ptr[1];
float dz = group_pose_ptr[2] - merged_pose_ptr[2];
float dist_sq = dx * dx + dy * dy + dz * dz;
dists.push_back(std::sqrt(dist_sq));
}
}
if (dists.size() >= 5)
{
// Drop highest value to reduce influence of outliers
auto max_it = std::max_element(dists.begin(), dists.end());
dists.erase(max_it);
}
if (dists.size() > 0)
{
// Check if the average joint distance is close enough
float avg_dist = std::accumulate(dists.begin(), dists.end(), 0.0);
avg_dist /= static_cast<float>(dists.size());
if (avg_dist < max_joint_avg_dist && avg_dist < best_dist)
{
best_dist = avg_dist;
best_group = static_cast<int>(j);
}
}
}
if (best_group == -1)
{
// Create a new group
merged_groups.push_back(group);
}
else
{
// Update existing group
auto &merged_group = merged_groups[best_group];
cv::Point3f &merged_center = std::get<0>(merged_group);
cv::Mat &merged_group_pose = std::get<1>(merged_group);
std::vector<int> &merged_group_indices = std::get<2>(merged_group);
float n_elems1 = static_cast<float>(merged_group_indices.size());
float n_elems2 = static_cast<float>(std::get<2>(group).size());
float inv1 = n_elems1 / (n_elems1 + n_elems2);
float inv2 = n_elems2 / (n_elems1 + n_elems2);
// Update group center
merged_center.x = (merged_center.x * inv1 + std::get<0>(group).x * inv2);
merged_center.y = (merged_center.y * inv1 + std::get<0>(group).y * inv2);
merged_center.z = (merged_center.z * inv1 + std::get<0>(group).z * inv2);
// Update group pose
for (size_t row = 0; row < num_joints; ++row)
{
const float *group_pose_ptr = std::get<1>(group).ptr<float>(row);
float *merged_pose_ptr = merged_group_pose.ptr<float>(row);
if (group_pose_ptr[3] > min_score)
{
float j_elems1 = static_cast<float>(group_visible_counts[row]);
float j_elems2 = static_cast<float>(per_group_visible_counts[best_group][row]);
float inv1 = j_elems1 / (j_elems1 + j_elems2);
float inv2 = j_elems2 / (j_elems1 + j_elems2);
merged_pose_ptr[0] = (merged_pose_ptr[0] * inv1 + group_pose_ptr[0] * inv2);
merged_pose_ptr[1] = (merged_pose_ptr[1] * inv1 + group_pose_ptr[1] * inv2);
merged_pose_ptr[2] = (merged_pose_ptr[2] * inv1 + group_pose_ptr[2] * inv2);
merged_pose_ptr[3] = (merged_pose_ptr[3] * inv1 + group_pose_ptr[3] * inv2);
group_visible_counts[row]++;
}
}
// Merge indices
merged_group_indices.insert(
merged_group_indices.end(), std::get<2>(group).begin(), std::get<2>(group).end());
}
}
return merged_groups;
} }
// ================================================================================================= // =================================================================================================
@ -1424,7 +1534,7 @@ cv::Mat TriangulatorInternal::merge_group(const std::vector<cv::Mat> &poses_3d,
} }
// Select the best-k proposals for each joint that are closest to the initial pose // Select the best-k proposals for each joint that are closest to the initial pose
int keep_best = 3; int keep_best = std::max(3, num_poses / 3);
cv::Mat best_k_mask = cv::Mat::zeros(num_poses, num_joints, CV_8U); cv::Mat best_k_mask = cv::Mat::zeros(num_poses, num_joints, CV_8U);
for (int j = 0; j < num_joints; ++j) for (int j = 0; j < num_joints; ++j)
{ {

4
run_container.sh Normal file → Executable file
View File

@ -3,9 +3,9 @@
xhost + xhost +
docker run --privileged --rm --network host -it \ docker run --privileged --rm --network host -it \
--gpus all --shm-size=16g --ulimit memlock=-1 --ulimit stack=67108864 \ --gpus all --shm-size=16g --ulimit memlock=-1 --ulimit stack=67108864 \
--volume "$(pwd)"/:/SimplePoseTriangulation/ \ --volume "$(pwd)"/:/RapidPoseTriangulation/ \
--volume "$(pwd)"/../datasets/:/datasets/ \ --volume "$(pwd)"/../datasets/:/datasets/ \
--volume "$(pwd)"/skelda/:/skelda/ \ --volume "$(pwd)"/skelda/:/skelda/ \
--volume /tmp/.X11-unix:/tmp/.X11-unix \ --volume /tmp/.X11-unix:/tmp/.X11-unix \
--env DISPLAY --env QT_X11_NO_MITSHM=1 \ --env DISPLAY --env QT_X11_NO_MITSHM=1 \
simpleposetriangulation rapidposetriangulation

View File

@ -1,46 +0,0 @@
import matplotlib.pyplot as plt
import numpy as np
from skelda import utils_view
# ==================================================================================================
def show_poses2d(bodies, images, joint_names, title=""):
num_imgs = len(images)
rowbreak = int(num_imgs / 2.0 + 0.5)
fig, axs = plt.subplots(2, rowbreak, figsize=(30, 20))
fig.suptitle(title, fontsize=20)
if isinstance(bodies, np.ndarray):
bodies = bodies.tolist()
# Draw skeletons into images
for i, image in enumerate(images):
colors = plt.cm.hsv(np.linspace(0, 1, len(bodies[i]), endpoint=False)).tolist()
colors = [[int(c[0] * 255), int(c[1] * 255), int(c[2] * 255)] for c in colors]
for j, body in enumerate(bodies[i]):
image = utils_view.draw_body_in_image(image, body, joint_names, colors[j])
# Rescale image range for plotting
images = [img / 255.0 for img in images]
if rowbreak == 1:
axs[0].imshow(images[0])
if len(images) == 2:
axs[1].imshow(images[1])
else:
# Optionally delete last empty plot
fig.delaxes(axs[1])
else:
for i in range(rowbreak):
axs[0][i].imshow(images[i])
if i + rowbreak < num_imgs:
axs[1][i].imshow(images[i + rowbreak])
else:
# Optionally delete last empty plot
fig.delaxes(axs[1][i])
return fig

View File

@ -10,10 +10,10 @@ import tqdm
import test_triangulate import test_triangulate
import utils_2d_pose import utils_2d_pose
from skelda import evals, utils_pose from skelda import evals
sys.path.append("/SimplePoseTriangulation/swig/") sys.path.append("/RapidPoseTriangulation/swig/")
import spt import rpt
# ================================================================================================== # ==================================================================================================
@ -100,7 +100,7 @@ if dataset_use.endswith("_wb"):
# eval_joints[eval_joints.index("head")] = "nose" # eval_joints[eval_joints.index("head")] = "nose"
eval_joints = list(joint_names_2d) eval_joints = list(joint_names_2d)
# output_dir = "/SimplePoseTriangulation/data/testoutput/" # output_dir = "/RapidPoseTriangulation/data/testoutput/"
output_dir = "" output_dir = ""
# ================================================================================================== # ==================================================================================================
@ -197,10 +197,6 @@ def load_labels(dataset: dict):
if take_interval > 1: if take_interval > 1:
labels = [l for i, l in enumerate(labels) if i % take_interval == 0] labels = [l for i, l in enumerate(labels) if i % take_interval == 0]
# Filter joints
fj_func = lambda x: utils_pose.filter_joints_3d(x, eval_joints)
labels = list(map(fj_func, labels))
return labels return labels
@ -273,14 +269,16 @@ def main():
print("\nRunning predictions ...") print("\nRunning predictions ...")
all_poses = [] all_poses = []
all_ids = [] all_ids = []
all_paths = []
times = [] times = []
triangulator = spt.Triangulator(min_score=minscore, min_group_size=min_group_size) triangulator = rpt.Triangulator(min_score=minscore, min_group_size=min_group_size)
old_scene = "" old_scene = ""
old_index = -1
for label in tqdm.tqdm(labels): for label in tqdm.tqdm(labels):
images_2d = [] images_2d = []
if old_scene != label.get("scene", "") or dataset_use == "human36m_wb": if old_scene != label.get("scene", "") or (
old_index + datasets[dataset_use]["take_interval"] < label["index"]
):
# Reset last poses if scene changes # Reset last poses if scene changes
old_scene = label.get("scene", "") old_scene = label.get("scene", "")
triangulator.reset() triangulator.reset()
@ -319,18 +317,18 @@ def main():
if sum(np.sum(p) for p in poses_2d) == 0: if sum(np.sum(p) for p in poses_2d) == 0:
poses3D = np.zeros([1, len(joint_names_3d), 4]).tolist() poses3D = np.zeros([1, len(joint_names_3d), 4]).tolist()
else: else:
spt_cameras = spt.convert_cameras(label["cameras"]) rpt_cameras = rpt.convert_cameras(label["cameras"])
roomparams = [label["room_size"], label["room_center"]] roomparams = [label["room_size"], label["room_center"]]
poses3D = triangulator.triangulate_poses( poses3D = triangulator.triangulate_poses(
poses_2d, spt_cameras, roomparams, joint_names_2d poses_2d, rpt_cameras, roomparams, joint_names_2d
) )
time_3d = time.time() - start time_3d = time.time() - start
print("3D time:", time_3d) print("3D time:", time_3d)
all_poses.append(np.array(poses3D)) old_index = label["index"]
all_poses.append(np.array(poses3D).tolist())
all_ids.append(label["id"]) all_ids.append(label["id"])
all_paths.append(label["imgpaths"])
times.append((time_2d, time_3d)) times.append((time_2d, time_3d))
# Print per-step triangulation timings # Print per-step triangulation timings

View File

@ -9,12 +9,11 @@ import cv2
import matplotlib import matplotlib
import numpy as np import numpy as np
import draw_utils
import utils_2d_pose import utils_2d_pose
from skelda import utils_pose from skelda import utils_pose, utils_view
sys.path.append("/SimplePoseTriangulation/swig/") sys.path.append("/RapidPoseTriangulation/swig/")
import spt import rpt
# ================================================================================================== # ==================================================================================================
@ -324,14 +323,14 @@ def main():
print("2D time:", time.time() - stime) print("2D time:", time.time() - stime)
# print([np.array(p).round(6).tolist() for p in poses_2d]) # print([np.array(p).round(6).tolist() for p in poses_2d])
fig1 = draw_utils.show_poses2d( fig1 = utils_view.draw_many_images(
poses_2d, np.array(images_2d), joint_names_2d, "2D detections" sample["imgpaths_color"], [], [], poses_2d, joint_names_2d, "2D detections"
) )
fig1.savefig(os.path.join(dirpath, "2d-k.png"), dpi=fig1.dpi) fig1.savefig(os.path.join(dirpath, "2d-k.png"), dpi=fig1.dpi)
# draw_utils.utils_view.show_plots() # draw_utils.utils_view.show_plots()
if len(images_2d) == 1: if len(images_2d) == 1:
draw_utils.utils_view.show_plots() utils_view.show_plots()
continue continue
# Get 3D poses # Get 3D poses
@ -339,9 +338,9 @@ def main():
poses3D = np.zeros([1, len(joint_names_3d), 4]) poses3D = np.zeros([1, len(joint_names_3d), 4])
poses2D = np.zeros([len(images_2d), 1, len(joint_names_3d), 3]) poses2D = np.zeros([len(images_2d), 1, len(joint_names_3d), 3])
else: else:
cameras = spt.convert_cameras(camparams) cameras = rpt.convert_cameras(camparams)
roomp = [roomparams["room_size"], roomparams["room_center"]] roomp = [roomparams["room_size"], roomparams["room_center"]]
triangulator = spt.Triangulator(min_score=0.95) triangulator = rpt.Triangulator(min_score=0.95)
stime = time.time() stime = time.time()
poses_3d = triangulator.triangulate_poses( poses_3d = triangulator.triangulate_poses(
@ -361,15 +360,13 @@ def main():
# print(poses2D) # print(poses2D)
# print(poses3D.round(3).tolist()) # print(poses3D.round(3).tolist())
fig2 = draw_utils.utils_view.show_poses3d( fig2 = utils_view.draw_poses3d(poses3D, joint_names_3d, roomparams, camparams)
poses3D, joint_names_3d, roomparams, camparams fig3 = utils_view.draw_many_images(
) sample["imgpaths_color"], [], [], poses2D, joint_names_3d, "2D projections"
fig3 = draw_utils.show_poses2d(
poses2D, np.array(images_2d), joint_names_3d, "2D reprojections"
) )
fig2.savefig(os.path.join(dirpath, "3d-p.png"), dpi=fig2.dpi) fig2.savefig(os.path.join(dirpath, "3d-p.png"), dpi=fig2.dpi)
fig3.savefig(os.path.join(dirpath, "2d-p.png"), dpi=fig3.dpi) fig3.savefig(os.path.join(dirpath, "2d-p.png"), dpi=fig3.dpi)
draw_utils.utils_view.show_plots() utils_view.show_plots()
# ================================================================================================== # ==================================================================================================

2
skelda

Submodule skelda updated: 87caa796fd...0127e9de87

View File

@ -7,7 +7,7 @@ PYTHONL = -Xlinker -export-dynamic
# Default super-target # Default super-target
all: all:
cd ../spt/ && g++ $(FLAGS) -std=c++2a -I/usr/include/opencv4 -c *.cpp ; cd ../swig/ cd ../rpt/ && g++ $(FLAGS) -std=c++2a -I/usr/include/opencv4 -c *.cpp ; cd ../swig/
swig -c++ -python -keyword -o spt_wrap.cxx spt.i swig -c++ -python -keyword -o rpt_wrap.cxx rpt.i
g++ $(FLAGS) $(PYTHONI) -c spt_wrap.cxx -o spt_wrap.o g++ $(FLAGS) $(PYTHONI) -c rpt_wrap.cxx -o rpt_wrap.o
g++ $(FLAGS) $(PYTHONL) -shared ../spt/*.o spt_wrap.o -lopencv_core -lopencv_imgproc -lopencv_calib3d -o _spt.so g++ $(FLAGS) $(PYTHONL) -shared ../rpt/*.o rpt_wrap.o -lopencv_core -lopencv_imgproc -lopencv_calib3d -o _rpt.so

View File

@ -1,8 +1,8 @@
%module spt %module rpt
%{ %{
// Includes the header in the wrapper code // Includes the header in the wrapper code
#include "../spt/camera.hpp" #include "../rpt/camera.hpp"
#include "../spt/interface.hpp" #include "../rpt/interface.hpp"
%} %}
// Some modules need extra imports beside the main .hpp file // Some modules need extra imports beside the main .hpp file
@ -47,8 +47,8 @@ namespace std {
#pragma SWIG nowarn=511 #pragma SWIG nowarn=511
// Parse the header file to generate wrappers // Parse the header file to generate wrappers
%include "../spt/camera.hpp" %include "../rpt/camera.hpp"
%include "../spt/interface.hpp" %include "../rpt/interface.hpp"
// Add additional Python code to the module // Add additional Python code to the module
%pythoncode %{ %pythoncode %{

View File

@ -5,7 +5,7 @@ import time
import numpy as np import numpy as np
sys.path.append("../swig/") sys.path.append("../swig/")
import spt import rpt
# ================================================================================================== # ==================================================================================================
@ -14,7 +14,7 @@ def main():
print("") print("")
# Test camera structure # Test camera structure
camera = spt.Camera() camera = rpt.Camera()
camera.name = "Camera 1" camera.name = "Camera 1"
camera.K = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] camera.K = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
camera.DC = [0, 0, 0, 0, 0] camera.DC = [0, 0, 0, 0, 0]
@ -49,18 +49,18 @@ def main():
"shoulder_middle", "shoulder_middle",
"head", "head",
] ]
cpath = "/SimplePoseTriangulation/data/h1/sample.json" cpath = "/RapidPoseTriangulation/data/h1/sample.json"
ppath = "/SimplePoseTriangulation/tests/poses_h1.json" ppath = "/RapidPoseTriangulation/tests/poses_h1.json"
with open(cpath, "r") as file: with open(cpath, "r") as file:
cdata = json.load(file) cdata = json.load(file)
with open(ppath, "r") as file: with open(ppath, "r") as file:
pdata = json.load(file) pdata = json.load(file)
cams = cdata["cameras"] cams = cdata["cameras"]
poses_2d = pdata["2D"] poses_2d = pdata["2D"]
cameras = spt.convert_cameras(cams) cameras = rpt.convert_cameras(cams)
# Run triangulation # Run triangulation
triangulator = spt.Triangulator(min_score=0.95) triangulator = rpt.Triangulator(min_score=0.95)
stime = time.time() stime = time.time()
poses_3d = triangulator.triangulate_poses( poses_3d = triangulator.triangulate_poses(
poses_2d, cameras, roomparams, joint_names poses_2d, cameras, roomparams, joint_names
@ -71,15 +71,15 @@ def main():
# Load input data # Load input data
roomparams = [[5.6, 6.4, 2.4], [0, -0.5, 1.2]] roomparams = [[5.6, 6.4, 2.4], [0, -0.5, 1.2]]
cpath = "/SimplePoseTriangulation/data/p1/sample.json" cpath = "/RapidPoseTriangulation/data/p1/sample.json"
ppath = "/SimplePoseTriangulation/tests/poses_p1.json" ppath = "/RapidPoseTriangulation/tests/poses_p1.json"
with open(cpath, "r") as file: with open(cpath, "r") as file:
cdata = json.load(file) cdata = json.load(file)
with open(ppath, "r") as file: with open(ppath, "r") as file:
pdata = json.load(file) pdata = json.load(file)
cams = cdata["cameras"] cams = cdata["cameras"]
poses_2d = pdata["2D"] poses_2d = pdata["2D"]
cameras = spt.convert_cameras(cams) cameras = rpt.convert_cameras(cams)
# Run triangulation # Run triangulation
triangulator.reset() triangulator.reset()
@ -102,15 +102,15 @@ def main():
# Load input data # Load input data
roomparams = [[6.0, 5.0, 2.0], [1.5, 1.0, -0.5]] roomparams = [[6.0, 5.0, 2.0], [1.5, 1.0, -0.5]]
cpath = "/SimplePoseTriangulation/data/e1/sample.json" cpath = "/RapidPoseTriangulation/data/e1/sample.json"
ppath = "/SimplePoseTriangulation/tests/poses_e1.json" ppath = "/RapidPoseTriangulation/tests/poses_e1.json"
with open(cpath, "r") as file: with open(cpath, "r") as file:
cdata = json.load(file) cdata = json.load(file)
with open(ppath, "r") as file: with open(ppath, "r") as file:
pdata = json.load(file) pdata = json.load(file)
cams = cdata["cameras"] cams = cdata["cameras"]
poses_2d = pdata["2D"] poses_2d = pdata["2D"]
cameras = spt.convert_cameras(cams) cameras = rpt.convert_cameras(cams)
# Run triangulation # Run triangulation
triangulator.reset() triangulator.reset()