Implemented custom midpoint triangulation.

This commit is contained in:
Daniel
2025-02-28 10:29:25 +01:00
parent 608f89d6b6
commit 0f2d597899
2 changed files with 298 additions and 196 deletions

View File

@ -412,7 +412,6 @@ std::vector<std::vector<std::array<float, 4>>> TriangulatorInternal::triangulate
// Calculate pair scores
std::vector<std::pair<std::vector<std::array<float, 4>>, float>> all_scored_poses;
all_scored_poses.resize(all_pairs.size());
#pragma omp parallel for
for (size_t i = 0; i < all_pairs.size(); ++i)
{
const auto &pids = all_pairs[i].first;
@ -479,7 +478,6 @@ std::vector<std::vector<std::array<float, 4>>> TriangulatorInternal::triangulate
// Calculate full 3D poses
std::vector<std::vector<std::array<float, 4>>> all_full_poses;
all_full_poses.resize(all_pairs.size());
#pragma omp parallel for
for (size_t i = 0; i < all_pairs.size(); ++i)
{
const auto &pids = all_pairs[i].first;
@ -1010,6 +1008,155 @@ std::vector<float> TriangulatorInternal::score_projection(
// =================================================================================================
/* Compute the inverse using the adjugate method */
std::array<std::array<float, 3>, 3> invert3x3(const std::array<std::array<float, 3>, 3> &M)
{
// See: https://scicomp.stackexchange.com/a/29206
std::array<std::array<float, 3>, 3> adj = {
{{
M[1][1] * M[2][2] - M[1][2] * M[2][1],
M[0][2] * M[2][1] - M[0][1] * M[2][2],
M[0][1] * M[1][2] - M[0][2] * M[1][1],
},
{
M[1][2] * M[2][0] - M[1][0] * M[2][2],
M[0][0] * M[2][2] - M[0][2] * M[2][0],
M[0][2] * M[1][0] - M[0][0] * M[1][2],
},
{
M[1][0] * M[2][1] - M[1][1] * M[2][0],
M[0][1] * M[2][0] - M[0][0] * M[2][1],
M[0][0] * M[1][1] - M[0][1] * M[1][0],
}}};
float det = M[0][0] * adj[0][0] + M[0][1] * adj[1][0] + M[0][2] * adj[2][0];
if (std::fabs(det) < 1e-6f)
{
return {{{0.0, 0.0, 0.0}, {0.0, 0.0, 0.0}, {0.0, 0.0, 0.0}}};
}
float idet = 1.0f / det;
std::array<std::array<float, 3>, 3> inv = {
{{
adj[0][0] * idet,
adj[0][1] * idet,
adj[0][2] * idet,
},
{
adj[1][0] * idet,
adj[1][1] * idet,
adj[1][2] * idet,
},
{
adj[2][0] * idet,
adj[2][1] * idet,
adj[2][2] * idet,
}}};
return inv;
}
std::array<std::array<float, 3>, 3> transpose3x3(const std::array<std::array<float, 3>, 3> &M)
{
return {{{M[0][0], M[1][0], M[2][0]},
{M[0][1], M[1][1], M[2][1]},
{M[0][2], M[1][2], M[2][2]}}};
}
float dot(const std::array<float, 3> &a, const std::array<float, 3> &b)
{
return a[0] * b[0] + a[1] * b[1] + a[2] * b[2];
}
std::array<float, 3> cross(const std::array<float, 3> &a, const std::array<float, 3> &b)
{
return {a[1] * b[2] - a[2] * b[1],
a[2] * b[0] - a[0] * b[2],
a[0] * b[1] - a[1] * b[0]};
}
std::array<float, 3> add(const std::array<float, 3> &a, const std::array<float, 3> &b)
{
return {a[0] + b[0], a[1] + b[1], a[2] + b[2]};
}
std::array<float, 3> subtract(const std::array<float, 3> &a, const std::array<float, 3> &b)
{
return {a[0] - b[0], a[1] - b[1], a[2] - b[2]};
}
std::array<float, 3> multiply(const std::array<float, 3> &a, float s)
{
return {a[0] * s, a[1] * s, a[2] * s};
}
std::array<float, 3> normalize(const std::array<float, 3> &v)
{
float norm = std::sqrt(dot(v, v));
if (norm < 1e-8f)
return v;
return multiply(v, 1.0f / norm);
}
std::array<float, 3> mat_mul_vec(
const std::array<std::array<float, 3>, 3> &M, const std::array<float, 3> &v)
{
std::array<float, 3> res = {M[0][0] * v[0] + M[0][1] * v[1] + M[0][2] * v[2],
M[1][0] * v[0] + M[1][1] * v[1] + M[1][2] * v[2],
M[2][0] * v[0] + M[2][1] * v[1] + M[2][2] * v[2]};
return res;
}
/* Compute camera center and corresponding ray direction */
std::tuple<std::array<float, 3>, std::array<float, 3>> calc_center_and_ray(
const CameraInternal &icam,
const std::array<float, 2> &pt)
{
// Compute Rᵀ and t
auto R_transpose = transpose3x3(icam.cam.R);
std::array<float, 3> t = {icam.cam.T[0][0], icam.cam.T[1][0], icam.cam.T[2][0]};
t = mat_mul_vec(icam.cam.R, multiply(t, -1.0f));
// Camera center: C = -Rᵀ * t
auto C = multiply(mat_mul_vec(R_transpose, t), -1.0f);
// Compute ray direction:
std::array<float, 3> uv1 = {pt[0], pt[1], 1.0f};
auto K_inv = invert3x3(icam.cam.K);
auto d = mat_mul_vec(R_transpose, mat_mul_vec(K_inv, uv1));
auto rayDir = normalize(d);
return std::make_tuple(C, rayDir);
}
/* Triangulate two points by computing their two rays and the midpoint of their closest approach */
std::array<float, 3> triangulate_midpoint(
const CameraInternal &icam1,
const CameraInternal &icam2,
const std::array<float, 2> &pt1,
const std::array<float, 2> &pt2)
{
// See: https://en.wikipedia.org/wiki/Skew_lines#Nearest_points
// Obtain the camera centers and ray directions for both views
auto [p1, d1] = calc_center_and_ray(icam1, pt1);
auto [p2, d2] = calc_center_and_ray(icam2, pt2);
// Compute the perpendicular plane vectors
std::array<float, 3> n = cross(d1, d2);
std::array<float, 3> n1 = cross(d1, n);
std::array<float, 3> n2 = cross(d2, n);
// Calculate point on Line 1 nearest to Line 2
float t1 = dot(subtract(p2, p1), n2) / dot(d1, n2);
std::array<float, 3> c1 = add(p1, multiply(d1, t1));
// Calculate point on Line 2 nearest to Line 1
float t2 = dot(subtract(p1, p2), n1) / dot(d2, n1);
std::array<float, 3> c2 = add(p2, multiply(d2, t2));
// Compute midpoint between c1 and c2.
std::array<float, 3> midpoint = multiply(add(c1, c2), 0.5);
return midpoint;
}
// =================================================================================================
std::pair<std::vector<std::array<float, 4>>, float> TriangulatorInternal::triangulate_and_score(
const std::vector<std::array<float, 3>> &pose1,
const std::vector<std::array<float, 3>> &pose2,
@ -1052,63 +1199,18 @@ std::pair<std::vector<std::array<float, 4>>, float> TriangulatorInternal::triang
return std::make_pair(empty, score);
}
// Extract coordinates of visible joints
std::vector<std::array<float, 2>> points1;
std::vector<std::array<float, 2>> points2;
points1.reserve(num_visible);
points2.reserve(num_visible);
for (size_t i = 0; i < num_joints; ++i)
{
if (mask[i])
{
points1.push_back({pose1[i][0], pose1[i][1]});
points2.push_back({pose2[i][0], pose2[i][1]});
}
}
// Convert vectors to mats
cv::Mat points1_mat(2, num_visible, CV_32F);
cv::Mat points2_mat(2, num_visible, CV_32F);
float *p1_ptr = points1_mat.ptr<float>(0);
float *p2_ptr = points2_mat.ptr<float>(0);
for (int i = 0; i < num_visible; ++i)
{
p1_ptr[i + 0 * num_visible] = points1[i][0];
p1_ptr[i + 1 * num_visible] = points1[i][1];
p2_ptr[i + 0 * num_visible] = points2[i][0];
p2_ptr[i + 1 * num_visible] = points2[i][1];
}
// Triangulate points
cv::Mat points4d_h;
cv::triangulatePoints(cam1.P, cam2.P, points1_mat, points2_mat, points4d_h);
// Convert homogeneous coordinates to 3D
std::vector<std::array<float, 3>> points_3d;
points_3d.reserve(num_visible);
const float *p4_ptr = points4d_h.ptr<float>(0);
for (int i = 0; i < points4d_h.cols; ++i)
{
float w = p4_ptr[i + 3 * num_visible];
std::array<float, 3> pt = {
p4_ptr[i + 0 * num_visible] / w,
p4_ptr[i + 1 * num_visible] / w,
p4_ptr[i + 2 * num_visible] / w};
points_3d.push_back(std::move(pt));
}
// Create the 3D pose
// Use midpoint triangulation instead of cv::triangulatePoints because it is much faster,
// while having almost the same accuracy.
std::vector<std::array<float, 4>> pose3d(num_joints, {0.0, 0.0, 0.0, 0.0});
int idx = 0;
for (size_t i = 0; i < num_joints; ++i)
{
if (mask[i])
{
pose3d[i][0] = points_3d[idx][0];
pose3d[i][1] = points_3d[idx][1];
pose3d[i][2] = points_3d[idx][2];
pose3d[i][3] = 1.0;
++idx;
auto &pt1 = pose1[i];
auto &pt2 = pose2[i];
std::array<float, 3> pt3d = triangulate_midpoint(
cam1, cam2, {pt1[0], pt1[1]}, {pt2[0], pt2[1]});
pose3d[i] = {pt3d[0], pt3d[1], pt3d[2], 1.0};
}
}