13 Commits

86 changed files with 3252 additions and 35950 deletions
+1
View File
@@ -1,4 +1,5 @@
rpt_wrap.*
rptPYTHON_wrap.*
rpt.py
*.bin
-3
View File
@@ -1,3 +0,0 @@
[submodule "skelda"]
path = skelda
url = https://gitlab.com/Percipiote/skelda.git
+17
View File
@@ -0,0 +1,17 @@
cmake_minimum_required(VERSION 3.18)
project(RapidPoseTriangulation
VERSION 0.1.0
LANGUAGES CXX
DESCRIPTION "Rapid Pose Triangulation library with Python bindings"
)
# Global settings
set(CMAKE_CXX_STANDARD 23)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
# Add subdirectories
add_subdirectory(rpt)
add_subdirectory(bindings)
+6 -51
View File
@@ -16,10 +16,10 @@ A general overview can be found in the paper [RapidPoseTriangulation: Multi-view
## Build
- Clone this project with submodules:
- Clone this project:
```bash
git clone --recurse-submodules https://gitlab.com/Percipiote/RapidPoseTriangulation.git
git clone https://gitlab.com/Percipiote/RapidPoseTriangulation.git
cd RapidPoseTriangulation/
```
@@ -53,57 +53,12 @@ A general overview can be found in the paper [RapidPoseTriangulation: Multi-view
- Build triangulator:
```bash
cd /RapidPoseTriangulation/swig/ && make all && cd ../tests/ && python3 test_interface.py && cd ..
cd /RapidPoseTriangulation/scripts/ && \
g++ -std=c++2a -fPIC -O3 -march=native -Wall -Werror -flto=auto \
-I /RapidPoseTriangulation/rpt/ \
-isystem /usr/include/opencv4/ \
-isystem /onnxruntime/include/ \
-isystem /onnxruntime/include/onnxruntime/core/session/ \
-isystem /onnxruntime/include/onnxruntime/core/providers/tensorrt/ \
-L /onnxruntime/build/Linux/Release/ \
test_skelda_dataset.cpp \
/RapidPoseTriangulation/rpt/*.cpp \
-o test_skelda_dataset.bin \
-Wl,--start-group \
-lonnxruntime_providers_tensorrt \
-lonnxruntime_providers_shared \
-lonnxruntime_providers_cuda \
-lonnxruntime \
-Wl,--end-group \
$(pkg-config --libs opencv4) \
-Wl,-rpath,/onnxruntime/build/Linux/Release/ \
&& cd ..
cd /RapidPoseTriangulation/
uv sync --group dev
uv run pytest tests/test_interface.py
uv build
```
- Download _ONNX_ models from [model registry](https://gitlab.com/Percipiote/RapidPoseTriangulation/-/ml/models) and save them to `mmdeploy/extras/exports/`.
Upon the first usage, they will be converted to _TensorRT_ models, which will take a few minutes. \
(Note that this conversion is not deterministic and will each time result in slightly different models and therefore also slightly different benchmark results.)
- Test with samples:
```bash
python3 /RapidPoseTriangulation/scripts/test_triangulate.py
```
- Test with [skelda](https://gitlab.com/Percipiote/skelda/) dataset:
```bash
export CUDA_VISIBLE_DEVICES=0
python3 /RapidPoseTriangulation/scripts/test_skelda_dataset.py
```
<br>
## Extras
- Exporting tools for 2D models are at [mmdeploy](extras/mmdeploy/README.md) directory.
- For usage in combination with ROS2 see [ros](extras/ros/README.md) directory.
- Running on a Nvidia Jetson is also possible following [jetson](extras/jetson/README.md) directory.
<br>
## Citation
+45
View File
@@ -0,0 +1,45 @@
find_package(Python 3.10 REQUIRED COMPONENTS Interpreter Development.Module)
if(NOT nanobind_DIR)
execute_process(
COMMAND "${Python_EXECUTABLE}" -c "import nanobind; print(nanobind.cmake_dir())"
OUTPUT_VARIABLE nanobind_DIR
OUTPUT_STRIP_TRAILING_WHITESPACE
RESULT_VARIABLE nanobind_dir_result
)
if(NOT nanobind_dir_result EQUAL 0 OR NOT nanobind_DIR)
message(FATAL_ERROR "Failed to resolve nanobind CMake directory from Python.")
endif()
endif()
find_package(nanobind CONFIG REQUIRED)
set(RPT_PYTHON_PACKAGE_DIR "${CMAKE_CURRENT_BINARY_DIR}/rpt")
file(MAKE_DIRECTORY "${RPT_PYTHON_PACKAGE_DIR}")
configure_file("${PROJECT_SOURCE_DIR}/src/rpt/__init__.py" "${RPT_PYTHON_PACKAGE_DIR}/__init__.py" COPYONLY)
configure_file("${PROJECT_SOURCE_DIR}/src/rpt/__init__.pyi" "${RPT_PYTHON_PACKAGE_DIR}/__init__.pyi" COPYONLY)
configure_file("${PROJECT_SOURCE_DIR}/src/rpt/_helpers.py" "${RPT_PYTHON_PACKAGE_DIR}/_helpers.py" COPYONLY)
configure_file("${PROJECT_SOURCE_DIR}/src/rpt/py.typed" "${RPT_PYTHON_PACKAGE_DIR}/py.typed" COPYONLY)
nanobind_add_module(rpt_core_ext "${CMAKE_CURRENT_SOURCE_DIR}/rpt_module.cpp")
set_target_properties(rpt_core_ext PROPERTIES
OUTPUT_NAME "_core"
LIBRARY_OUTPUT_DIRECTORY "${RPT_PYTHON_PACKAGE_DIR}"
)
target_link_libraries(rpt_core_ext PRIVATE rpt_core)
target_include_directories(rpt_core_ext PRIVATE
"${PROJECT_SOURCE_DIR}/rpt"
)
nanobind_add_stub(rpt_core_stub
MODULE rpt._core
OUTPUT "${RPT_PYTHON_PACKAGE_DIR}/_core.pyi"
PYTHON_PATH "${CMAKE_CURRENT_BINARY_DIR}"
DEPENDS rpt_core_ext
)
install(TARGETS rpt_core_ext LIBRARY DESTINATION rpt)
install(FILES "${RPT_PYTHON_PACKAGE_DIR}/__init__.pyi" DESTINATION rpt)
install(FILES "${RPT_PYTHON_PACKAGE_DIR}/_core.pyi" DESTINATION rpt)
+454
View File
@@ -0,0 +1,454 @@
#include <algorithm>
#include <array>
#include <cstdint>
#include <stdexcept>
#include <vector>
#include <nanobind/nanobind.h>
#include <nanobind/ndarray.h>
#include <nanobind/stl/array.h>
#include <nanobind/stl/string.h>
#include <nanobind/stl/vector.h>
#include "interface.hpp"
namespace nb = nanobind;
using namespace nb::literals;
namespace
{
using PoseArray2D =
nb::ndarray<nb::numpy, const float, nb::shape<-1, -1, -1, 3>, nb::c_contig>;
using CountArray = nb::ndarray<nb::numpy, const uint32_t, nb::shape<-1>, nb::c_contig>;
using TrackIdArray = nb::ndarray<nb::numpy, const int64_t, nb::shape<-1>, nb::c_contig>;
using PoseArray3DConst =
nb::ndarray<nb::numpy, const float, nb::shape<-1, -1, 4>, nb::c_contig>;
using PoseArray3D = nb::ndarray<nb::numpy, float, nb::shape<-1, -1, 4>, nb::c_contig>;
using PoseArray2DOut = nb::ndarray<nb::numpy, float, nb::shape<-1, 4>, nb::c_contig>;
PoseBatch2DView pose_batch_view_from_numpy(const PoseArray2D &poses_2d, const CountArray &person_counts)
{
if (poses_2d.shape(0) != person_counts.shape(0))
{
throw std::invalid_argument("poses_2d and person_counts must have the same number of views.");
}
for (size_t i = 0; i < static_cast<size_t>(person_counts.shape(0)); ++i)
{
if (person_counts(i) > poses_2d.shape(1))
{
throw std::invalid_argument("person_counts entries must not exceed the padded person dimension.");
}
}
return PoseBatch2DView {
poses_2d.data(),
person_counts.data(),
static_cast<size_t>(poses_2d.shape(0)),
static_cast<size_t>(poses_2d.shape(1)),
static_cast<size_t>(poses_2d.shape(2)),
};
}
PoseBatch3DView pose_batch3d_view_from_numpy(const PoseArray3DConst &poses_3d)
{
return PoseBatch3DView {
poses_3d.data(),
static_cast<size_t>(poses_3d.shape(0)),
static_cast<size_t>(poses_3d.shape(1)),
};
}
TrackedPoseBatch3DView tracked_pose_batch_view_from_numpy(
const PoseArray3DConst &poses_3d,
const TrackIdArray &track_ids)
{
if (poses_3d.shape(0) != track_ids.shape(0))
{
throw std::invalid_argument(
"previous_poses_3d and previous_track_ids must have the same number of tracks.");
}
return TrackedPoseBatch3DView {
track_ids.data(),
poses_3d.data(),
static_cast<size_t>(poses_3d.shape(0)),
static_cast<size_t>(poses_3d.shape(1)),
};
}
PoseArray3D pose_batch_to_numpy(PoseBatch3D batch)
{
auto *storage = new std::vector<float>(std::move(batch.data));
nb::capsule owner(storage, [](void *value) noexcept
{
delete static_cast<std::vector<float> *>(value);
});
const size_t shape[3] = {batch.num_persons, batch.num_joints, 4};
return PoseArray3D(storage->data(), 3, shape, owner);
}
PoseArray3D pose_batch_to_numpy_copy(const PoseBatch3D &batch)
{
PoseBatch3D copy = batch;
return pose_batch_to_numpy(std::move(copy));
}
PoseArray2DOut pose_rows_to_numpy_copy(const std::vector<std::array<float, 4>> &rows)
{
auto *storage = new std::vector<float>(rows.size() * 4, 0.0f);
for (size_t row = 0; row < rows.size(); ++row)
{
for (size_t coord = 0; coord < 4; ++coord)
{
(*storage)[row * 4 + coord] = rows[row][coord];
}
}
nb::capsule owner(storage, [](void *value) noexcept
{
delete static_cast<std::vector<float> *>(value);
});
const size_t shape[2] = {rows.size(), 4};
return PoseArray2DOut(storage->data(), 2, shape, owner);
}
PoseArray3D merged_poses_to_numpy_copy(const std::vector<std::vector<std::array<float, 4>>> &poses)
{
size_t num_poses = poses.size();
size_t num_joints = 0;
if (!poses.empty())
{
num_joints = poses[0].size();
}
auto *storage = new std::vector<float>(num_poses * num_joints * 4, 0.0f);
for (size_t pose = 0; pose < num_poses; ++pose)
{
if (poses[pose].size() != num_joints)
{
delete storage;
throw std::invalid_argument("Merged poses must use a consistent joint count.");
}
for (size_t joint = 0; joint < num_joints; ++joint)
{
for (size_t coord = 0; coord < 4; ++coord)
{
(*storage)[((pose * num_joints) + joint) * 4 + coord] = poses[pose][joint][coord];
}
}
}
nb::capsule owner(storage, [](void *value) noexcept
{
delete static_cast<std::vector<float> *>(value);
});
const size_t shape[3] = {num_poses, num_joints, 4};
return PoseArray3D(storage->data(), 3, shape, owner);
}
} // namespace
NB_MODULE(_core, m)
{
nb::enum_<CameraModel>(m, "CameraModel")
.value("PINHOLE", CameraModel::Pinhole)
.value("FISHEYE", CameraModel::Fisheye);
nb::class_<Camera>(m, "Camera")
.def_prop_ro("name", [](const Camera &camera)
{
return camera.name;
})
.def_prop_ro("K", [](const Camera &camera)
{
return camera.K;
})
.def_prop_ro("DC", [](const Camera &camera)
{
return camera.DC;
})
.def_prop_ro("R", [](const Camera &camera)
{
return camera.R;
})
.def_prop_ro("T", [](const Camera &camera)
{
return camera.T;
})
.def_prop_ro("width", [](const Camera &camera)
{
return camera.width;
})
.def_prop_ro("height", [](const Camera &camera)
{
return camera.height;
})
.def_prop_ro("model", [](const Camera &camera)
{
return camera.model;
})
.def_prop_ro("invR", [](const Camera &camera)
{
return camera.invR;
})
.def_prop_ro("center", [](const Camera &camera)
{
return camera.center;
})
.def_prop_ro("newK", [](const Camera &camera)
{
return camera.newK;
})
.def_prop_ro("invK", [](const Camera &camera)
{
return camera.invK;
})
.def("__repr__", [](const Camera &camera)
{
return camera.to_string();
});
nb::class_<TriangulationOptions>(m, "TriangulationOptions")
.def(nb::init<>())
.def_rw("min_match_score", &TriangulationOptions::min_match_score)
.def_rw("min_group_size", &TriangulationOptions::min_group_size);
nb::class_<TriangulationConfig>(m, "TriangulationConfig")
.def(nb::init<>())
.def_rw("cameras", &TriangulationConfig::cameras)
.def_rw("roomparams", &TriangulationConfig::roomparams)
.def_rw("joint_names", &TriangulationConfig::joint_names)
.def_rw("options", &TriangulationConfig::options);
nb::class_<PairCandidate>(m, "PairCandidate")
.def(nb::init<>())
.def_rw("view1", &PairCandidate::view1)
.def_rw("view2", &PairCandidate::view2)
.def_rw("person1", &PairCandidate::person1)
.def_rw("person2", &PairCandidate::person2)
.def_rw("global_person1", &PairCandidate::global_person1)
.def_rw("global_person2", &PairCandidate::global_person2);
nb::class_<PreviousPoseMatch>(m, "PreviousPoseMatch")
.def(nb::init<>())
.def_rw("previous_pose_index", &PreviousPoseMatch::previous_pose_index)
.def_rw("previous_track_id", &PreviousPoseMatch::previous_track_id)
.def_rw("score_view1", &PreviousPoseMatch::score_view1)
.def_rw("score_view2", &PreviousPoseMatch::score_view2)
.def_rw("matched_view1", &PreviousPoseMatch::matched_view1)
.def_rw("matched_view2", &PreviousPoseMatch::matched_view2)
.def_rw("kept", &PreviousPoseMatch::kept)
.def_rw("decision", &PreviousPoseMatch::decision);
nb::class_<PreviousPoseFilterDebug>(m, "PreviousPoseFilterDebug")
.def(nb::init<>())
.def_rw("used_previous_poses", &PreviousPoseFilterDebug::used_previous_poses)
.def_rw("matches", &PreviousPoseFilterDebug::matches)
.def_rw("kept_pair_indices", &PreviousPoseFilterDebug::kept_pair_indices)
.def_rw("kept_pairs", &PreviousPoseFilterDebug::kept_pairs);
nb::class_<CoreProposalDebug>(m, "CoreProposalDebug")
.def(nb::init<>())
.def_rw("pair_index", &CoreProposalDebug::pair_index)
.def_rw("pair", &CoreProposalDebug::pair)
.def_rw("score", &CoreProposalDebug::score)
.def_rw("kept", &CoreProposalDebug::kept)
.def_rw("drop_reason", &CoreProposalDebug::drop_reason)
.def_prop_ro("pose_3d", [](const CoreProposalDebug &proposal)
{
return pose_rows_to_numpy_copy(proposal.pose_3d);
}, nb::rv_policy::move);
nb::class_<ProposalGroupDebug>(m, "ProposalGroupDebug")
.def(nb::init<>())
.def_rw("center", &ProposalGroupDebug::center)
.def_rw("proposal_indices", &ProposalGroupDebug::proposal_indices)
.def_prop_ro("pose_3d", [](const ProposalGroupDebug &group)
{
return pose_rows_to_numpy_copy(group.pose_3d);
}, nb::rv_policy::move);
nb::class_<GroupingDebug>(m, "GroupingDebug")
.def(nb::init<>())
.def_rw("initial_groups", &GroupingDebug::initial_groups)
.def_rw("duplicate_pair_drops", &GroupingDebug::duplicate_pair_drops)
.def_rw("groups", &GroupingDebug::groups);
nb::class_<FullProposalDebug>(m, "FullProposalDebug")
.def(nb::init<>())
.def_rw("source_core_proposal_index", &FullProposalDebug::source_core_proposal_index)
.def_rw("pair", &FullProposalDebug::pair)
.def_prop_ro("pose_3d", [](const FullProposalDebug &proposal)
{
return pose_rows_to_numpy_copy(proposal.pose_3d);
}, nb::rv_policy::move);
nb::class_<MergeDebug>(m, "MergeDebug")
.def(nb::init<>())
.def_rw("group_proposal_indices", &MergeDebug::group_proposal_indices)
.def_prop_ro("merged_poses", [](const MergeDebug &merge)
{
return merged_poses_to_numpy_copy(merge.merged_poses);
}, nb::rv_policy::move);
nb::enum_<AssociationStatus>(m, "AssociationStatus")
.value("MATCHED", AssociationStatus::Matched)
.value("NEW", AssociationStatus::New)
.value("AMBIGUOUS", AssociationStatus::Ambiguous);
nb::class_<AssociationReport>(m, "AssociationReport")
.def(nb::init<>())
.def_rw("pose_previous_indices", &AssociationReport::pose_previous_indices)
.def_rw("pose_previous_track_ids", &AssociationReport::pose_previous_track_ids)
.def_rw("pose_status", &AssociationReport::pose_status)
.def_rw("pose_candidate_previous_indices", &AssociationReport::pose_candidate_previous_indices)
.def_rw("pose_candidate_previous_track_ids", &AssociationReport::pose_candidate_previous_track_ids)
.def_rw("unmatched_previous_indices", &AssociationReport::unmatched_previous_indices)
.def_rw("unmatched_previous_track_ids", &AssociationReport::unmatched_previous_track_ids)
.def_rw("new_pose_indices", &AssociationReport::new_pose_indices)
.def_rw("ambiguous_pose_indices", &AssociationReport::ambiguous_pose_indices);
nb::class_<FinalPoseAssociationDebug>(m, "FinalPoseAssociationDebug")
.def(nb::init<>())
.def_rw("final_pose_index", &FinalPoseAssociationDebug::final_pose_index)
.def_rw("source_core_proposal_indices", &FinalPoseAssociationDebug::source_core_proposal_indices)
.def_rw("source_pair_indices", &FinalPoseAssociationDebug::source_pair_indices)
.def_rw("candidate_previous_indices", &FinalPoseAssociationDebug::candidate_previous_indices)
.def_rw("candidate_previous_track_ids", &FinalPoseAssociationDebug::candidate_previous_track_ids)
.def_rw("resolved_previous_index", &FinalPoseAssociationDebug::resolved_previous_index)
.def_rw("resolved_previous_track_id", &FinalPoseAssociationDebug::resolved_previous_track_id)
.def_rw("status", &FinalPoseAssociationDebug::status);
nb::class_<TriangulationTrace>(m, "TriangulationTrace")
.def(nb::init<>())
.def_rw("pairs", &TriangulationTrace::pairs)
.def_rw("previous_filter", &TriangulationTrace::previous_filter)
.def_rw("core_proposals", &TriangulationTrace::core_proposals)
.def_rw("grouping", &TriangulationTrace::grouping)
.def_rw("full_proposals", &TriangulationTrace::full_proposals)
.def_rw("merge", &TriangulationTrace::merge)
.def_rw("association", &TriangulationTrace::association)
.def_rw("final_pose_associations", &TriangulationTrace::final_pose_associations)
.def_prop_ro("final_poses", [](const TriangulationTrace &trace)
{
return pose_batch_to_numpy_copy(trace.final_poses);
}, nb::rv_policy::move);
nb::class_<TriangulationResult>(m, "TriangulationResult")
.def(nb::init<>())
.def_rw("association", &TriangulationResult::association)
.def_prop_ro("poses_3d", [](const TriangulationResult &result)
{
return pose_batch_to_numpy_copy(result.poses);
}, nb::rv_policy::move);
m.def(
"make_camera",
&make_camera,
"name"_a,
"K"_a,
"DC"_a,
"R"_a,
"T"_a,
"width"_a,
"height"_a,
"model"_a);
m.def(
"build_pair_candidates",
[](const PoseArray2D &poses_2d, const CountArray &person_counts)
{
return build_pair_candidates(pose_batch_view_from_numpy(poses_2d, person_counts));
},
"poses_2d"_a,
"person_counts"_a);
m.def(
"filter_pairs_with_previous_poses",
[](const PoseArray2D &poses_2d,
const CountArray &person_counts,
const TriangulationConfig &config,
const PoseArray3DConst &previous_poses_3d,
const TrackIdArray &previous_track_ids)
{
return filter_pairs_with_previous_poses(
pose_batch_view_from_numpy(poses_2d, person_counts),
config,
tracked_pose_batch_view_from_numpy(previous_poses_3d, previous_track_ids));
},
"poses_2d"_a,
"person_counts"_a,
"config"_a,
"previous_poses_3d"_a,
"previous_track_ids"_a);
m.def(
"triangulate_debug",
[](const PoseArray2D &poses_2d,
const CountArray &person_counts,
const TriangulationConfig &config)
{
return triangulate_debug(pose_batch_view_from_numpy(poses_2d, person_counts), config);
},
"poses_2d"_a,
"person_counts"_a,
"config"_a);
m.def(
"triangulate_debug",
[](const PoseArray2D &poses_2d,
const CountArray &person_counts,
const TriangulationConfig &config,
const PoseArray3DConst &previous_poses_3d,
const TrackIdArray &previous_track_ids)
{
const TrackedPoseBatch3DView previous_view =
tracked_pose_batch_view_from_numpy(previous_poses_3d, previous_track_ids);
return triangulate_debug(
pose_batch_view_from_numpy(poses_2d, person_counts),
config,
&previous_view);
},
"poses_2d"_a,
"person_counts"_a,
"config"_a,
"previous_poses_3d"_a,
"previous_track_ids"_a);
m.def(
"triangulate_poses",
[](const PoseArray2D &poses_2d,
const CountArray &person_counts,
const TriangulationConfig &config)
{
const PoseBatch3D poses_3d =
triangulate_poses(pose_batch_view_from_numpy(poses_2d, person_counts), config);
return pose_batch_to_numpy(poses_3d);
},
"poses_2d"_a,
"person_counts"_a,
"config"_a);
m.def(
"triangulate_with_report",
[](const PoseArray2D &poses_2d,
const CountArray &person_counts,
const TriangulationConfig &config,
const PoseArray3DConst &previous_poses_3d,
const TrackIdArray &previous_track_ids)
{
const TriangulationResult result = triangulate_with_report(
pose_batch_view_from_numpy(poses_2d, person_counts),
config,
tracked_pose_batch_view_from_numpy(previous_poses_3d, previous_track_ids));
return result;
},
"poses_2d"_a,
"person_counts"_a,
"config"_a,
"previous_poses_3d"_a,
"previous_track_ids"_a);
}
-326
View File
@@ -1,326 +0,0 @@
import json
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from skelda import utils_pose, utils_view
# ==================================================================================================
filepath = os.path.dirname(os.path.realpath(__file__)) + "/"
core_triangs = [
[
[0.287, -0.282, 1.264, 1.000],
[0.504, -0.052, 1.272, 1.000],
[0.276, -0.160, 0.764, 1.000],
[0.443, -0.099, 0.768, 1.000],
[0.258, -0.313, 0.999, 1.000],
[0.513, -0.009, 1.008, 1.000],
[0.204, -0.126, 0.439, 1.000],
[0.422, -0.132, 0.436, 1.000],
[0.195, -0.265, 0.807, 1.000],
[0.415, 0.039, 0.823, 1.000],
[0.113, -0.103, 0.096, 1.000],
[0.389, -0.175, 0.097, 1.000],
],
[
[0.322, -0.192, 1.349, 1.000],
[0.268, -0.594, 1.336, 1.000],
[0.272, -0.100, 0.882, 1.000],
[0.281, -0.379, 0.870, 1.000],
[0.336, -0.104, 1.124, 1.000],
[0.249, -0.578, 1.089, 1.000],
[0.229, 0.009, 0.571, 1.000],
[0.269, -0.345, 0.553, 1.000],
[0.289, -0.016, 0.951, 1.000],
[0.216, -0.327, 0.908, 1.000],
[0.188, 0.128, 0.268, 1.000],
[0.267, -0.273, 0.243, 1.000],
],
[
[0.865, 1.058, 1.613, 1.000],
[0.862, 0.870, 1.604, 1.000],
[0.927, 1.562, 1.491, 1.000],
[0.954, 1.505, 1.486, 1.000],
[0.908, 1.309, 1.542, 1.000],
[0.905, 1.170, 1.525, 1.000],
[0.968, 1.911, 1.454, 1.000],
[1.019, 1.919, 1.457, 1.000],
[0.921, 1.542, 1.514, 1.000],
[0.931, 1.539, 1.506, 1.000],
[1.008, 2.230, 1.455, 1.000],
[1.071, 2.271, 1.460, 1.000],
],
[
[-0.260, 0.789, 1.316, 1.000],
[0.039, 1.073, 1.322, 1.000],
[-0.236, 0.798, 0.741, 1.000],
[-0.048, 0.952, 0.759, 1.000],
[-0.315, 0.734, 0.995, 1.000],
[0.080, 1.026, 1.046, 1.000],
[-0.291, 0.721, 0.339, 1.000],
[-0.101, 0.887, 0.366, 1.000],
[-0.300, 0.600, 0.742, 1.000],
[0.066, 0.768, 0.897, 1.000],
[-0.381, 0.685, -0.113, 1.000],
[-0.169, 0.775, -0.040, 1.000],
],
[
[-0.199, 0.854, 1.414, 1.000],
[-0.401, 0.566, 1.409, 1.000],
[-0.242, 0.818, 0.870, 1.000],
[-0.343, 0.654, 0.856, 1.000],
[-0.176, 0.903, 1.140, 1.000],
[-0.398, 0.480, 1.132, 1.000],
[-0.245, 0.812, 0.492, 1.000],
[-0.380, 0.642, 0.471, 1.000],
[-0.145, 0.817, 0.912, 1.000],
[-0.251, 0.396, 0.973, 1.000],
[-0.255, 0.879, 0.107, 1.000],
[-0.383, 0.633, 0.116, 1.000],
],
[
[0.641, 1.796, 1.681, 1.000],
[0.603, 1.719, 1.680, 1.000],
[0.711, 2.000, 1.518, 1.000],
[0.706, 1.970, 1.515, 1.000],
[0.689, 1.920, 1.588, 1.000],
[0.651, 1.784, 1.585, 1.000],
[0.786, 2.190, 1.448, 1.000],
[0.780, 2.167, 1.444, 1.000],
[0.747, 1.994, 1.531, 1.000],
[0.720, 1.783, 1.546, 1.000],
[0.868, 2.432, 1.427, 1.000],
[0.849, 2.341, 1.410, 1.000],
],
]
core_joints = [
"shoulder_left",
"shoulder_right",
"hip_left",
"hip_right",
"elbow_left",
"elbow_right",
"knee_left",
"knee_right",
"wrist_left",
"wrist_right",
"ankle_left",
"ankle_right",
]
poses_2d = [
[
[
[383.443, 144.912, 0.923],
[382.629, 135.143, 0.83],
[374.488, 134.329, 1.0],
[349.251, 136.771, 0.478],
[343.552, 139.213, 1.0],
[356.578, 201.899, 0.73],
[323.2, 201.899, 0.825],
[357.392, 282.494, 0.663],
[324.014, 289.821, 0.854],
[378.558, 339.481, 0.621],
[355.764, 356.577, 0.821],
[370.417, 357.391, 0.714],
[332.155, 359.834, 0.71],
[391.584, 452.641, 0.768],
[331.341, 458.34, 0.789],
[414.379, 547.076, 0.864],
[332.969, 550.333, 0.9],
[351.286, 358.613, 0.71],
[339.889, 201.899, 0.73],
[346.402, 137.992, 0.478],
],
[
[640.948, 116.443, 0.908],
[650.057, 100.249, 0.788],
[642.972, 100.249, 0.681],
[682.445, 103.285, 0.862],
[684.469, 100.249, 0.518],
[707.748, 181.219, 0.836],
[693.578, 180.207, 0.705],
[702.688, 290.528, 0.867],
[664.227, 276.358, 0.786],
[662.202, 375.547, 0.847],
[605.523, 319.88, 0.881],
[692.566, 373.522, 0.758],
[679.409, 372.51, 0.739],
[679.409, 500.038, 0.844],
[672.324, 494.978, 0.837],
[679.409, 635.663, 0.913],
[659.166, 599.226, 0.894],
[685.987, 373.016, 0.739],
[700.663, 180.713, 0.705],
[683.457, 101.767, 0.518],
],
],
[
[
[495.125, 304.671, 0.581],
[492.338, 301.885, 0.462],
[502.091, 301.885, 0.295],
[495.125, 308.851, 0.92],
[528.562, 306.064, 0.754],
[477.013, 359.703, 0.822],
[557.819, 355.523, 0.855],
[466.564, 431.452, 0.855],
[565.481, 425.879, 0.836],
[458.902, 480.911, 0.85],
[544.583, 464.889, 0.596],
[491.642, 490.663, 0.741],
[539.707, 492.056, 0.746],
[480.496, 569.379, 0.779],
[531.348, 577.041, 0.784],
[464.475, 646.005, 0.872],
[518.809, 661.33, 0.913],
[515.675, 491.36, 0.741],
[517.416, 357.613, 0.822],
[511.843, 307.458, 0.754],
],
[
[472.982, 273.983, 0.911],
[477.875, 266.645, 0.848],
[464.421, 266.645, 0.896],
[483.378, 268.48, 0.599],
[448.521, 268.48, 0.88],
[493.163, 308.841, 0.753],
[425.894, 311.899, 0.837],
[502.336, 363.268, 0.625],
[417.944, 368.16, 0.847],
[499.278, 407.91, 0.495],
[438.736, 410.357, 0.844],
[484.602, 426.868, 0.682],
[448.521, 427.48, 0.681],
[485.825, 504.534, 0.745],
[441.794, 505.757, 0.796],
[489.494, 571.803, 0.688],
[442.405, 577.918, 0.867],
[466.561, 427.174, 0.681],
[459.528, 310.37, 0.753],
[465.95, 268.48, 0.599],
],
[
[702.349, 208.747, 0.215],
[705.862, 207.944, 0.212],
[700.341, 207.944, 0.209],
[708.472, 196.399, 0.182],
[699.538, 196.299, 0.193],
[708.071, 196.7, 0.194],
[696.927, 196.098, 0.22],
[709.175, 206.137, 0.191],
[696.526, 206.94, 0.188],
[707.368, 210.052, 0.128],
[699.738, 209.751, 0.145],
[704.658, 215.172, 0.172],
[701.445, 215.273, 0.168],
[705.159, 224.007, 0.179],
[703.654, 225.211, 0.185],
[705.059, 225.914, 0.23],
[704.457, 230.532, 0.241],
[703.051, 215.223, 0.168],
[702.499, 196.399, 0.194],
[704.005, 196.349, 0.182],
],
],
]
joints_2d = [
"nose",
"eye_left",
"eye_right",
"ear_left",
"ear_right",
"shoulder_left",
"shoulder_right",
"elbow_left",
"elbow_right",
"wrist_left",
"wrist_right",
"hip_left",
"hip_right",
"knee_left",
"knee_right",
"ankle_left",
"ankle_right",
"hip_middle",
"shoulder_middle",
"head",
]
# ==================================================================================================
def main():
with open(os.path.join(filepath, "sample.json"), "r", encoding="utf-8") as file:
sample = json.load(file)
camparams = sample["cameras"]
roomparams = {
"room_size": sample["room_size"],
"room_center": sample["room_center"],
}
fig2 = utils_view.draw_poses3d(core_triangs, core_joints, roomparams, camparams)
fig2.axes[0].view_init(elev=30, azim=0)
fig2.savefig(os.path.join(filepath, "core-triangs.png"), dpi=fig2.dpi)
core_projections = []
for i in range(len(camparams)):
b2d, _ = utils_pose.project_poses(np.array(core_triangs), camparams[i])
core_projections.append(b2d)
img_size = [900, 900]
scale = 0.66
fig_size = 2
plotsize = (35, 30)
fig, axs = plt.subplots(fig_size, fig_size, figsize=plotsize)
fig.suptitle("core reprojections", fontsize=20)
fig.tight_layout(rect=[0, 0, 1, 0.97])
num_persons = max((len(b2d) for b2d in core_projections))
colors = plt.cm.hsv(np.linspace(0, 1, num_persons, endpoint=False)).tolist()
colors = [[int(c[0] * 255), int(c[1] * 255), int(c[2] * 255)] for c in colors]
for i in range(len(camparams)):
img = np.ones((img_size[0], img_size[1], 3), dtype=np.uint8) * 255
for j in range(len(core_projections[i])):
color = colors[j]
body = np.array(core_projections[i][j])
img = utils_view.draw_body_in_image(img, body, core_joints, color)
for k in range(len(poses_2d[i])):
body = np.array(poses_2d[i][k])
cjids = [joints_2d.index(j) for j in core_joints]
body = body[cjids]
img = utils_view.draw_body_in_image(
img, body, core_joints, [0, 0, 0], thickness=2
)
img = cv2.resize(img, (int(img.shape[1] * scale), int(img.shape[0] * scale)))
title = str(i)
x, y = divmod(i, fig_size)
axs[x][y].imshow(img)
axs[x][y].set_title(title)
# Delete empty plots
for i in range(2, fig_size**2):
x, y = divmod(i, fig_size)
fig.delaxes(axs[x][y])
fig.savefig(os.path.join(filepath, "core-reprojections.png"), dpi=fig.dpi)
# ==================================================================================================
if __name__ == "__main__":
main()
-52
View File
@@ -1,52 +0,0 @@
FROM nvcr.io/nvidia/tensorrt:24.10-py3
ARG DEBIAN_FRONTEND=noninteractive
ENV LANG=C.UTF-8
ENV LC_ALL=C.UTF-8
WORKDIR /
RUN apt-get update && apt-get install -y --no-install-recommends feh
RUN apt-get update && apt-get install -y --no-install-recommends python3-opencv
RUN pip uninstall -y opencv-python && pip install --no-cache-dir "opencv-python<4.3"
# Show matplotlib images
RUN apt-get update && apt-get install -y --no-install-recommends python3-tk
# Update pip to allow installation of skelda in editable mode
RUN pip3 install --upgrade --no-cache-dir pip
# Install swig and later dependencies
RUN apt-get update && apt-get install -y --no-install-recommends build-essential
RUN apt-get update && apt-get install -y --no-install-recommends swig
RUN apt-get update && apt-get install -y --no-install-recommends libopencv-dev
# Install ONNX runtime
# See: https://github.com/microsoft/onnxruntime/blob/main/dockerfiles/Dockerfile.tensorrt
RUN pip3 uninstall -y onnxruntime-gpu
RUN pip3 install --no-cache-dir psutil
RUN git clone --recursive --depth=1 --branch=v1.20.1 https://github.com/Microsoft/onnxruntime.git
# Next line fixes: https://github.com/microsoft/onnxruntime/issues/24861
RUN cat /onnxruntime/cmake/deps.txt && \
sed -i 's/;be8be39fdbc6e60e94fa7870b280707069b5b81a/;32b145f525a8308d7ab1c09388b2e288312d8eba/g' /onnxruntime/cmake/deps.txt && \
cat /onnxruntime/cmake/deps.txt
ENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:/cmake-3.30.1-linux-x86_64/bin:${PATH}
ARG CMAKE_CUDA_ARCHITECTURES=75;80;90
ENV TRT_VERSION=10.5.0.18
RUN /bin/sh onnxruntime/dockerfiles/scripts/install_common_deps.sh
RUN /bin/sh onnxruntime/dockerfiles/scripts/checkout_submodules.sh ${trt_version}
RUN ls
RUN cd onnxruntime && \
/bin/sh build.sh --allow_running_as_root --parallel --build_shared_lib \
--cuda_home /usr/local/cuda --cudnn_home /usr/lib/x86_64-linux-gnu/ --use_tensorrt \
--tensorrt_home /usr/lib/x86_64-linux-gnu/ --config Release --build_wheel --skip_tests \
--skip_submodule_sync --cmake_extra_defines '"CMAKE_CUDA_ARCHITECTURES='${CMAKE_CUDA_ARCHITECTURES}'"'
RUN cd onnxruntime && pip install build/Linux/Release/dist/*.whl
# Install skelda
RUN pip3 install --upgrade --no-cache-dir scipy
RUN apt-get update && apt-get install -y --no-install-recommends xvfb
COPY ./skelda/ /skelda/
RUN pip3 install --no-cache-dir -e /skelda/
WORKDIR /RapidPoseTriangulation/
CMD ["/bin/bash"]
File diff suppressed because it is too large Load Diff
-142
View File
@@ -1,142 +0,0 @@
# Setup with Nvidia-Jetson-Orin
Initial setup and installation of _RapidPoseTriangulation_ on a _Nvidia Jetson_ device. \
Tested with a _Jetson AGX Orin Developer Kit_ module.
<br>
## Base installation
- Install newest software image: \
(<https://developer.nvidia.com/sdk-manager>)
- Use manual recovery mode setup for first installation
- Find out the _ip-address_ of the _Jetson_ for the runtime component installation with:
```bash
sudo nmap -sn $(ip route get 1 | awk '{print $(NF-2);exit}')/24
```
- Initialize system: \
(<https://developer.nvidia.com/embedded/learn/get-started-jetson-agx-orin-devkit>)
- Connect via _ssh_, because using _screen_ did not work, skip _oem-config_ step
- Skip installation of _nvidia-jetpack_
- Install basic tools:
```bash
sudo apt install -y curl nano wget git
```
- Update hostname:
```bash
sudo nano /etc/hostname
sudo nano /etc/hosts
sudo reboot
```
- Enable maximum performance mode:
```bash
sudo nvpmodel -m 0
sudo jetson_clocks
```
- Test docker is working:
```bash
sudo docker run --rm hello-world
```
- Enable _docker_ without _sudo_: \
(<https://docs.docker.com/engine/install/linux-postinstall/#manage-docker-as-a-non-root-user>)
- Enable GPU-access for docker building:
- Run `sudo nano /etc/docker/daemon.json` and add:
```json
{
"runtimes": {
"nvidia": {
"args": [],
"path": "nvidia-container-runtime"
}
},
"default-runtime": "nvidia"
}
```
- Restart docker: `sudo systemctl restart docker`
- Test docker is working:
```bash
docker run --rm hello-world
docker run -it --rm --runtime=nvidia --network=host dustynv/onnxruntime:1.20-r36.4.0
```
<br>
## RPT installation
- Build docker container:
```bash
docker build --progress=plain -f extras/jetson/dockerfile -t rapidposetriangulation .
./run_container.sh
```
- Build _rpt_ package inside container:
```bash
cd /RapidPoseTriangulation/swig/ && make all && cd ../tests/ && python3 test_interface.py && cd ..
cd /RapidPoseTriangulation/scripts/ && \
g++ -std=c++2a -fPIC -O3 -march=native -Wall -Werror -flto=auto \
-I /RapidPoseTriangulation/rpt/ \
-isystem /usr/include/opencv4/ \
-isystem /usr/local/include/onnxruntime/ \
-L /usr/local/lib/ \
test_skelda_dataset.cpp \
/RapidPoseTriangulation/rpt/*.cpp \
-o test_skelda_dataset.bin \
-Wl,--start-group \
-lonnxruntime_providers_tensorrt \
-lonnxruntime_providers_shared \
-lonnxruntime_providers_cuda \
-lonnxruntime \
-Wl,--end-group \
$(pkg-config --libs opencv4) \
-Wl,-rpath,/onnxruntime/build/Linux/Release/ \
&& cd ..
```
- Test with samples:
```bash
python3 /RapidPoseTriangulation/scripts/test_triangulate.py
```
<br>
## ROS interface
- Build docker container:
```bash
docker build --progress=plain -f extras/ros/dockerfile_2d -t rapidposetriangulation_ros2d .
```
- Run and test:
```bash
export CAMID="camera01" && docker compose -f extras/jetson/docker-compose-2d.yml up
docker exec -it jetson-test_node-1 bash
export ROS_DOMAIN_ID=18
```
File diff suppressed because it is too large Load Diff
-37
View File
@@ -1,37 +0,0 @@
services:
test_node:
image: rapidposetriangulation_ros2d
network_mode: "host"
ipc: "host"
runtime: nvidia
privileged: true
volumes:
- ../../:/RapidPoseTriangulation/
- ../../skelda/:/skelda/
- /tmp/.X11-unix:/tmp/.X11-unix
- /dev/shm:/dev/shm
environment:
- CAMID
- DISPLAY
- QT_X11_NO_MITSHM=1
- "PYTHONUNBUFFERED=1"
command: /bin/bash -i -c 'sleep infinity'
estimator:
image: rapidposetriangulation_ros2d
network_mode: "host"
ipc: "host"
runtime: nvidia
privileged: true
volumes:
- ../../:/RapidPoseTriangulation/
- ../../skelda/:/skelda/
- /tmp/.X11-unix:/tmp/.X11-unix
- /dev/shm:/dev/shm
environment:
- CAMID
- DISPLAY
- QT_X11_NO_MITSHM=1
- "PYTHONUNBUFFERED=1"
command: /bin/bash -i -c 'export ROS_DOMAIN_ID=18 && ros2 run rpt2d_wrapper_cpp rpt2d_wrapper'
-25
View File
@@ -1,25 +0,0 @@
FROM dustynv/onnxruntime:1.20-r36.4.0
ARG DEBIAN_FRONTEND=noninteractive
ENV LANG=C.UTF-8
ENV LC_ALL=C.UTF-8
WORKDIR /
RUN apt-get update && apt-get install -y --no-install-recommends feh
RUN apt-get update && apt-get install -y --no-install-recommends python3-opencv
RUN apt-get update && apt-get install -y --no-install-recommends libatlas-base-dev
# Show matplotlib images
RUN apt-get update && apt-get install -y --no-install-recommends python3-tk
# Install swig and later dependencies
RUN apt-get update && apt-get install -y --no-install-recommends build-essential
RUN apt-get update && apt-get install -y --no-install-recommends swig
RUN apt-get update && apt-get install -y --no-install-recommends libopencv-dev
RUN pip3 install --no-cache-dir scipy
COPY ./skelda/ /skelda/
RUN pip3 install --no-cache-dir -e /skelda/
WORKDIR /RapidPoseTriangulation/
CMD ["/bin/bash"]
-131
View File
@@ -1,131 +0,0 @@
# Exporting MMPose models
```bash
docker build --progress=plain -f extras/mmdeploy/dockerfile -t rpt_mmdeploy .
./extras/mmdeploy/run_container.sh
```
<br>
## ONNX
```bash
cd /mmdeploy/
export withFP16="_fp16"
cp /RapidPoseTriangulation/extras/mmdeploy/configs/detection_onnxruntime_static-320x320"$withFP16".py configs/mmdet/detection/
python3 ./tools/deploy.py \
configs/mmdet/detection/detection_onnxruntime_static-320x320"$withFP16".py \
/mmpose/projects/rtmpose/rtmdet/person/rtmdet_nano_320-8xb32_coco-person.py \
https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth \
/mmpose/projects/rtmpose/examples/onnxruntime/human-pose.jpeg \
--work-dir work_dir \
--show
mv /mmdeploy/work_dir/end2end.onnx /RapidPoseTriangulation/extras/mmdeploy/exports/rtmdet-nano_1x3x320x320"$withFP16".onnx
python3 ./tools/deploy.py \
configs/mmdet/detection/detection_onnxruntime_static-320x320"$withFP16".py \
/mmpose/projects/rtmpose/rtmdet/person/rtmdet_m_640-8xb32_coco-person.py \
https://download.openmmlab.com/mmpose/v1/projects/rtmpose/rtmdet_m_8xb32-100e_coco-obj365-person-235e8209.pth \
/mmpose/projects/rtmpose/examples/onnxruntime/human-pose.jpeg \
--work-dir work_dir \
--show
mv /mmdeploy/work_dir/end2end.onnx /RapidPoseTriangulation/extras/mmdeploy/exports/rtmdet-m_1x3x320x320"$withFP16".onnx
```
```bash
cd /mmdeploy/
export withFP16="_fp16"
cp /RapidPoseTriangulation/extras/mmdeploy/configs/pose-detection_simcc_onnxruntime_static-384x288"$withFP16".py configs/mmpose/
cp /RapidPoseTriangulation/extras/mmdeploy/configs/pose-detection_simcc_onnxruntime_dynamic-384x288"$withFP16".py configs/mmpose/
python3 ./tools/deploy.py \
configs/mmpose/pose-detection_simcc_onnxruntime_static-384x288"$withFP16".py \
/mmpose/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-384x288.py \
https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-384x288-65e718c4_20230504.pth \
/mmpose/projects/rtmpose/examples/onnxruntime/human-pose.jpeg \
--work-dir work_dir \
--show
mv /mmdeploy/work_dir/end2end.onnx /RapidPoseTriangulation/extras/mmdeploy/exports/rtmpose-m_1x3x384x288"$withFP16".onnx
python3 ./tools/deploy.py \
configs/mmpose/pose-detection_simcc_onnxruntime_dynamic-384x288"$withFP16".py \
/mmpose/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-384x288.py \
https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-m_simcc-body7_pt-body7_420e-384x288-65e718c4_20230504.pth \
/mmpose/projects/rtmpose/examples/onnxruntime/human-pose.jpeg \
--work-dir work_dir \
--show
mv /mmdeploy/work_dir/end2end.onnx /RapidPoseTriangulation/extras/mmdeploy/exports/rtmpose-m_Bx3x384x288"$withFP16".onnx
python3 ./tools/deploy.py \
configs/mmpose/pose-detection_simcc_onnxruntime_static-384x288"$withFP16".py \
/mmpose/projects/rtmpose/rtmpose/wholebody_2d_keypoint/rtmpose-l_8xb32-270e_coco-wholebody-384x288.py \
https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-ucoco_dw-ucoco_270e-384x288-2438fd99_20230728.pth \
/mmpose/projects/rtmpose/examples/onnxruntime/human-pose.jpeg \
--work-dir work_dir \
--show
mv /mmdeploy/work_dir/end2end.onnx /RapidPoseTriangulation/extras/mmdeploy/exports/rtmpose-l_wb_1x3x384x288"$withFP16".onnx
python3 ./tools/deploy.py \
configs/mmpose/pose-detection_simcc_onnxruntime_dynamic-384x288"$withFP16".py \
/mmpose/projects/rtmpose/rtmpose/wholebody_2d_keypoint/rtmpose-l_8xb32-270e_coco-wholebody-384x288.py \
https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-ucoco_dw-ucoco_270e-384x288-2438fd99_20230728.pth \
/mmpose/projects/rtmpose/examples/onnxruntime/human-pose.jpeg \
--work-dir work_dir \
--show
mv /mmdeploy/work_dir/end2end.onnx /RapidPoseTriangulation/extras/mmdeploy/exports/rtmpose-l_wb_Bx3x384x288"$withFP16".onnx
```
```bash
python3 /RapidPoseTriangulation/extras/mmdeploy/make_extra_graphs_pt.py
python3 /RapidPoseTriangulation/extras/mmdeploy/make_extra_graphs_tf.py
```
```bash
python3 /RapidPoseTriangulation/extras/mmdeploy/add_extra_steps.py
```
<br>
## TensorRT
Run this directly in the inference container (the TensorRT versions need to be the same)
```bash
export withFP16="_fp16"
trtexec --fp16 \
--onnx=/RapidPoseTriangulation/extras/mmdeploy/exports/rtmdet-nano_1x320x320x3"$withFP16"_extra-steps.onnx \
--saveEngine=end2end.engine
mv ./end2end.engine /RapidPoseTriangulation/extras/mmdeploy/exports/rtmdet-nano_1x320x320x3"$withFP16"_extra-steps.engine
trtexec --fp16 \
--onnx=/RapidPoseTriangulation/extras/mmdeploy/exports/rtmpose-m_Bx384x288x3"$withFP16"_extra-steps.onnx \
--saveEngine=end2end.engine \
--minShapes=image_input:1x384x288x3 \
--optShapes=image_input:1x384x288x3 \
--maxShapes=image_input:1x384x288x3
mv ./end2end.engine /RapidPoseTriangulation/extras/mmdeploy/exports/rtmpose-m_1x384x288x3"$withFP16"_extra-steps.engine
```
<br>
## Benchmark
```bash
cd /mmdeploy/
export withFP16="_fp16"
python3 ./tools/profiler.py \
configs/mmpose/pose-detection_simcc_onnxruntime_static-384x288"$withFP16".py \
/mmpose/projects/rtmpose/rtmpose/body_2d_keypoint/rtmpose-m_8xb256-420e_coco-384x288.py \
/RapidPoseTriangulation/extras/mmdeploy/testimages/ \
--model /RapidPoseTriangulation/extras/mmdeploy/exports/rtmpose-m_1x3x384x288"$withFP16".onnx \
--shape 384x288 \
--device cuda \
--warmup 50 \
--num-iter 200
```
-233
View File
@@ -1,233 +0,0 @@
import re
import numpy as np
import onnx
from onnx import TensorProto, helper, numpy_helper
# ==================================================================================================
base_path = "/RapidPoseTriangulation/extras/mmdeploy/exports/"
det_model_path1 = base_path + "rtmdet-nano_1x3x320x320.onnx"
det_model_path2 = base_path + "rtmdet-m_1x3x320x320.onnx"
pose_model_path1 = base_path + "rtmpose-m_Bx3x384x288.onnx"
pose_model_path2 = base_path + "rtmpose-m_1x3x384x288.onnx"
pose_model_path3 = base_path + "rtmpose-l_wb_Bx3x384x288.onnx"
pose_model_path4 = base_path + "rtmpose-l_wb_1x3x384x288.onnx"
norm_mean = -1 * (np.array([0.485, 0.456, 0.406]) * 255)
norm_std = 1.0 / (np.array([0.229, 0.224, 0.225]) * 255)
# ==================================================================================================
def add_steps_to_onnx(model_path):
# Load existing model
model = onnx.load(model_path)
graph = model.graph
mean = norm_mean.astype(np.float32)
std = norm_std.astype(np.float32)
mean = np.reshape(mean, (1, 3, 1, 1)).astype(np.float32)
std = np.reshape(std, (1, 3, 1, 1)).astype(np.float32)
use_fp16 = bool("fp16" in model_path)
if use_fp16:
mean = mean.astype(np.float16)
std = std.astype(np.float16)
# Add the initializers to the graph
mean_initializer = numpy_helper.from_array(mean, name="norm_mean")
std_initializer = numpy_helper.from_array(std, name="norm_std")
graph.initializer.extend([mean_initializer, std_initializer])
# Define layer names, assuming the first input is the image tensor
input_name = graph.input[0].name
# Cast to internal type
# This has to be the first node, because tensorrt does not support uint8 layers
cast_type = 10 if use_fp16 else 1
casted_output = "casted_output"
cast_node = helper.make_node(
"Cast",
inputs=[input_name],
outputs=[casted_output],
to=cast_type,
name="Cast_Input",
)
# Node to transpose
transpose_output = "transpose_output"
transpose_node = helper.make_node(
"Transpose",
inputs=[casted_output],
outputs=[transpose_output],
perm=[0, 3, 1, 2],
name="Transpose",
)
# Node to add mean
mean_added_output = "mean_added_output"
mean_add_node = helper.make_node(
"Add",
inputs=[transpose_output, "norm_mean"],
outputs=[mean_added_output],
name="Mean_Addition",
)
# Node to multiply by std
std_mult_output = "std_mult_output"
std_mul_node = helper.make_node(
"Mul",
inputs=[mean_added_output, "norm_std"],
outputs=[std_mult_output],
name="Std_Multiplication",
)
# Replace original input of the model with the output of normalization
for node in graph.node:
for idx, input_name_in_node in enumerate(node.input):
if input_name_in_node == input_name:
node.input[idx] = std_mult_output
# Add the new nodes to the graph
graph.node.insert(0, cast_node)
graph.node.insert(1, transpose_node)
graph.node.insert(2, mean_add_node)
graph.node.insert(3, std_mul_node)
# Transpose the input shape
input_shape = graph.input[0].type.tensor_type.shape.dim
dims = [dim.dim_value for dim in input_shape]
for i, j in enumerate([0, 3, 1, 2]):
input_shape[j].dim_value = dims[i]
# Set the batch size to a defined string
input_shape = graph.input[0].type.tensor_type.shape.dim
if input_shape[0].dim_value == 0:
input_shape[0].dim_param = "batch_size"
# Rename the input tensor
main_input_image_name = model.graph.input[0].name
for node in model.graph.node:
for idx, name in enumerate(node.input):
if name == main_input_image_name:
node.input[idx] = "image_input"
model.graph.input[0].name = "image_input"
# Set input image type to int8
model.graph.input[0].type.tensor_type.elem_type = TensorProto.UINT8
# Cast all outputs to fp32 to avoid half precision issues in cpp code
for output in graph.output:
orig_output_name = output.name
internal_output_name = orig_output_name + "_internal"
# Rename the output tensor
for node in model.graph.node:
for idx, name in enumerate(node.output):
if name == orig_output_name:
node.output[idx] = internal_output_name
# Insert a Cast node that casts the internal output to fp32
cast_fp32_name = orig_output_name
cast_node_output = helper.make_node(
"Cast",
inputs=[internal_output_name],
outputs=[cast_fp32_name],
to=1,
name="Cast_Output_" + orig_output_name,
)
# Append the cast node to the graph
graph.node.append(cast_node_output)
# Update the output's data type info
output.type.tensor_type.elem_type = TensorProto.FLOAT
# Merge the two outputs
if "det" in model_path:
r1_output = "dets"
r2_output = "labels"
out_name = "bboxes"
out_dim = 6
if "pose" in model_path:
r1_output = "kpts"
r2_output = "scores"
out_name = "keypoints"
out_dim = 3
if "det" in model_path or "pose" in model_path:
# Node to expand
r2_expanded = r2_output + "_expanded"
unsqueeze_node = helper.make_node(
"Unsqueeze",
inputs=[r2_output],
outputs=[r2_expanded],
axes=[2],
name="Unsqueeze",
)
# Node to concatenate
r12_merged = out_name
concat_node = helper.make_node(
"Concat",
inputs=[r1_output, r2_expanded],
outputs=[r12_merged],
axis=2,
name="Merged",
)
# Define the new concatenated output
merged_output = helper.make_tensor_value_info(
r12_merged,
TensorProto.FLOAT,
[
(
graph.input[0].type.tensor_type.shape.dim[0].dim_value
if graph.input[0].type.tensor_type.shape.dim[0].dim_value > 0
else None
),
(
graph.output[0].type.tensor_type.shape.dim[1].dim_value
if graph.output[0].type.tensor_type.shape.dim[1].dim_value > 0
else None
),
out_dim,
],
)
# Update the graph
graph.node.append(unsqueeze_node)
graph.node.append(concat_node)
graph.output.pop()
graph.output.pop()
graph.output.append(merged_output)
path = re.sub(r"(x)(\d+)x(\d+)x(\d+)", r"\1\3x\4x\2", model_path)
path = path.replace(".onnx", "_extra-steps.onnx")
onnx.save(model, path)
# ==================================================================================================
def main():
add_steps_to_onnx(det_model_path1)
add_steps_to_onnx(det_model_path2)
add_steps_to_onnx(pose_model_path1)
add_steps_to_onnx(pose_model_path2)
add_steps_to_onnx(pose_model_path3)
add_steps_to_onnx(pose_model_path4)
add_steps_to_onnx(det_model_path1.replace(".onnx", "_fp16.onnx"))
add_steps_to_onnx(det_model_path2.replace(".onnx", "_fp16.onnx"))
add_steps_to_onnx(pose_model_path1.replace(".onnx", "_fp16.onnx"))
add_steps_to_onnx(pose_model_path2.replace(".onnx", "_fp16.onnx"))
add_steps_to_onnx(pose_model_path3.replace(".onnx", "_fp16.onnx"))
add_steps_to_onnx(pose_model_path4.replace(".onnx", "_fp16.onnx"))
# ==================================================================================================
if __name__ == "__main__":
main()
@@ -1,18 +0,0 @@
_base_ = ["../_base_/base_static.py", "../../_base_/backends/onnxruntime.py"]
onnx_config = dict(
input_shape=[320, 320],
)
codebase_config = dict(
# For later TensorRT inference, the number of output boxes needs to be as stable as possible,
# because a drop in the box count leads to a re-optimization which takes a lot of time,
# therefore reduce the maximum number of output boxes to the smallest usable value and sort out
# low confidence boxes outside the model.
post_processing=dict(
score_threshold=0.0,
confidence_threshold=0.0,
iou_threshold=0.5,
max_output_boxes_per_class=10,
),
)
@@ -1,18 +0,0 @@
_base_ = ["../_base_/base_static.py", "../../_base_/backends/onnxruntime-fp16.py"]
onnx_config = dict(
input_shape=[320, 320],
)
codebase_config = dict(
# For later TensorRT inference, the number of output boxes needs to be as stable as possible,
# because a drop in the box count leads to a re-optimization which takes a lot of time,
# therefore reduce the maximum number of output boxes to the smallest usable value and sort out
# low confidence boxes outside the model.
post_processing=dict(
score_threshold=0.0,
confidence_threshold=0.0,
iou_threshold=0.5,
max_output_boxes_per_class=10,
),
)
@@ -1,19 +0,0 @@
_base_ = ["./pose-detection_static.py", "../_base_/backends/onnxruntime.py"]
onnx_config = dict(
input_shape=[288, 384],
output_names=["kpts", "scores"],
dynamic_axes={
"input": {
0: "batch",
},
"kpts": {
0: "batch",
},
"scores": {
0: "batch",
},
},
)
codebase_config = dict(export_postprocess=True) # export get_simcc_maximum
@@ -1,19 +0,0 @@
_base_ = ["./pose-detection_static.py", "../_base_/backends/onnxruntime-fp16.py"]
onnx_config = dict(
input_shape=[288, 384],
output_names=["kpts", "scores"],
dynamic_axes={
"input": {
0: "batch",
},
"kpts": {
0: "batch",
},
"scores": {
0: "batch",
},
},
)
codebase_config = dict(export_postprocess=True) # export get_simcc_maximum
@@ -1,8 +0,0 @@
_base_ = ["./pose-detection_static.py", "../_base_/backends/onnxruntime.py"]
onnx_config = dict(
input_shape=[288, 384],
output_names=["kpts", "scores"],
)
codebase_config = dict(export_postprocess=True) # export get_simcc_maximum
@@ -1,8 +0,0 @@
_base_ = ["./pose-detection_static.py", "../_base_/backends/onnxruntime-fp16.py"]
onnx_config = dict(
input_shape=[288, 384],
output_names=["kpts", "scores"],
)
codebase_config = dict(export_postprocess=True) # export get_simcc_maximum
-41
View File
@@ -1,41 +0,0 @@
FROM openmmlab/mmdeploy:ubuntu20.04-cuda11.8-mmdeploy1.3.1
ARG DEBIAN_FRONTEND=noninteractive
ENV LANG=C.UTF-8
ENV LC_ALL=C.UTF-8
WORKDIR /
RUN apt-get update && apt-get install -y --no-install-recommends feh
RUN git clone https://github.com/open-mmlab/mmdeploy.git --depth=1
RUN cd mmdeploy/; python3 tools/scripts/build_ubuntu_x64_ort.py
# Install MMPose
ENV FORCE_CUDA="1"
ENV MMCV_WITH_OPS=1
RUN pip3 install --upgrade --no-cache-dir openmim
RUN mim install mmengine
RUN mim install "mmcv>=2,<2.2.0"
RUN mim install "mmdet>=3"
RUN mim install "mmpose>=1.1.0"
# Fix an error when importing mmpose
RUN pip3 install --upgrade --no-cache-dir "numpy<2" scipy
RUN git clone --depth=1 --branch=main https://github.com/open-mmlab/mmpose.git
RUN echo 'export PYTHONPATH=/mmdeploy/build/lib:$PYTHONPATH' >> ~/.bashrc
RUN echo 'export LD_LIBRARY_PATH=/mmdeploy/../mmdeploy-dep/onnxruntime-linux-x64-1.8.1/lib/:$LD_LIBRARY_PATH' >> ~/.bashrc
# Show images
RUN apt-get update && apt-get install -y --no-install-recommends python3-tk
# Tool for fp16 conversion
RUN pip3 install --upgrade --no-cache-dir onnxconverter_common
# Fix an error when profiling
RUN pip3 install --upgrade --no-cache-dir "onnxruntime-gpu<1.17"
RUN pip3 install --upgrade --no-cache-dir tensorflow
RUN pip3 install --upgrade --no-cache-dir tf2onnx
WORKDIR /mmdeploy/
CMD ["/bin/bash"]
-2
View File
@@ -1,2 +0,0 @@
*
!.gitignore
-338
View File
@@ -1,338 +0,0 @@
import cv2
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.ops import roi_align
# ==================================================================================================
base_path = "/RapidPoseTriangulation/extras/mmdeploy/exports/"
det_target_size = (320, 320)
pose_target_size = (384, 288)
# ==================================================================================================
class Letterbox(nn.Module):
def __init__(self, target_size, fill_value=128):
"""Resize and pad image while keeping aspect ratio"""
super(Letterbox, self).__init__()
self.target_size = target_size
self.fill_value = fill_value
def calc_params(self, ishape):
ih, iw = ishape[1], ishape[2]
th, tw = self.target_size
scale = torch.min(tw / iw, th / ih)
nw = torch.round(iw * scale)
nh = torch.round(ih * scale)
pad_w = tw - nw
pad_h = th - nh
pad_left = pad_w // 2
pad_top = pad_h // 2
pad_right = pad_w - pad_left
pad_bottom = pad_h - pad_top
paddings = (pad_left, pad_right, pad_top, pad_bottom)
return paddings, scale, (nw, nh)
def forward(self, img):
paddings, _, (nw, nh) = self.calc_params(img.shape)
# Resize the image
img = img.to(torch.float32)
img = img.permute(0, 3, 1, 2)
img = F.interpolate(
img,
size=(nh, nw),
mode="bilinear",
align_corners=False,
)
img = img.permute(0, 2, 3, 1)
img = img.round()
# Pad the image
img = F.pad(
img.permute(0, 3, 1, 2),
pad=paddings,
mode="constant",
value=self.fill_value,
)
img = img.permute(0, 2, 3, 1)
return img
# ==================================================================================================
class BoxCrop(nn.Module):
def __init__(self, target_size):
"""Crop bounding box from image"""
super(BoxCrop, self).__init__()
self.target_size = target_size
self.padding_scale = 1.25
def calc_params(self, bbox):
start_x, start_y, end_x, end_y = bbox[0, 0], bbox[0, 1], bbox[0, 2], bbox[0, 3]
target_h, target_w = self.target_size
# Calculate original bounding box width, height and center
bbox_w = end_x - start_x
bbox_h = end_y - start_y
center_x = (start_x + end_x) / 2.0
center_y = (start_y + end_y) / 2.0
# Calculate the aspect ratios
bbox_aspect = bbox_w / bbox_h
target_aspect = target_w / target_h
# Adjust the scaled bounding box to match the target aspect ratio
if bbox_aspect > target_aspect:
adjusted_h = bbox_w / target_aspect
adjusted_w = bbox_w
else:
adjusted_w = bbox_h * target_aspect
adjusted_h = bbox_h
# Scale the bounding box by the padding_scale
scaled_bbox_w = adjusted_w * self.padding_scale
scaled_bbox_h = adjusted_h * self.padding_scale
# Calculate scaled bounding box coordinates
new_start_x = center_x - scaled_bbox_w / 2.0
new_start_y = center_y - scaled_bbox_h / 2.0
new_end_x = center_x + scaled_bbox_w / 2.0
new_end_y = center_y + scaled_bbox_h / 2.0
# Define the new box coordinates
new_box = torch.stack((new_start_x, new_start_y, new_end_x, new_end_y), dim=0)
new_box = new_box.unsqueeze(0)
scale = torch.stack(
((target_w / scaled_bbox_w), (target_h / scaled_bbox_h)), dim=0
)
return scale, new_box
def forward(self, img, bbox):
_, bbox = self.calc_params(bbox)
batch_indices = torch.zeros(bbox.shape[0], 1)
rois = torch.cat([batch_indices, bbox], dim=1)
# Resize and crop
img = img.to(torch.float32)
img = img.permute(0, 3, 1, 2)
img = roi_align(
img,
rois,
output_size=self.target_size,
spatial_scale=1.0,
sampling_ratio=0,
)
img = img.permute(0, 2, 3, 1)
img = img.round()
return img
# ==================================================================================================
class DetPreprocess(nn.Module):
def __init__(self, target_size, fill_value=114):
super(DetPreprocess, self).__init__()
self.letterbox = Letterbox(target_size, fill_value)
def forward(self, img):
# img: torch.Tensor of shape [batch, H, W, C], dtype=torch.uint8
img = self.letterbox(img)
return img
# ==================================================================================================
class DetPostprocess(nn.Module):
def __init__(self, target_size):
super(DetPostprocess, self).__init__()
self.target_size = target_size
self.letterbox = Letterbox(target_size)
def forward(self, img, boxes):
paddings, scale, _ = self.letterbox.calc_params(img.shape)
boxes = boxes.float()
boxes[:, :, 0] -= paddings[0]
boxes[:, :, 2] -= paddings[0]
boxes[:, :, 1] -= paddings[2]
boxes[:, :, 3] -= paddings[2]
zero = torch.tensor(0)
boxes = torch.max(boxes, zero)
th, tw = self.target_size
pad_w = paddings[0] + paddings[1]
pad_h = paddings[2] + paddings[3]
max_w = tw - pad_w - 1
max_h = th - pad_h - 1
b0 = boxes[:, :, 0]
b1 = boxes[:, :, 1]
b2 = boxes[:, :, 2]
b3 = boxes[:, :, 3]
b0 = torch.min(b0, max_w)
b1 = torch.min(b1, max_h)
b2 = torch.min(b2, max_w)
b3 = torch.min(b3, max_h)
boxes[:, :, 0] = b0
boxes[:, :, 1] = b1
boxes[:, :, 2] = b2
boxes[:, :, 3] = b3
boxes[:, :, 0:4] /= scale
return boxes
# ==================================================================================================
class PosePreprocess(nn.Module):
def __init__(self, target_size, fill_value=114):
super(PosePreprocess, self).__init__()
self.boxcrop = BoxCrop(target_size)
def forward(self, img, bbox):
# img: torch.Tensor of shape [1, H, W, C], dtype=torch.uint8
# bbox: torch.Tensor of shape [1, 4], dtype=torch.float32
img = self.boxcrop(img, bbox)
return img
# ==================================================================================================
class PosePostprocess(nn.Module):
def __init__(self, target_size):
super(PosePostprocess, self).__init__()
self.boxcrop = BoxCrop(target_size)
self.target_size = target_size
def forward(self, img, bbox, keypoints):
scale, bbox = self.boxcrop.calc_params(bbox)
kp = keypoints.float()
kp[:, :, 0:2] /= scale
kp[:, :, 0] += bbox[0, 0]
kp[:, :, 1] += bbox[0, 1]
zero = torch.tensor(0)
kp = torch.max(kp, zero)
max_w = img.shape[2] - 1
max_h = img.shape[1] - 1
k0 = kp[:, :, 0]
k1 = kp[:, :, 1]
k0 = torch.min(k0, max_w)
k1 = torch.min(k1, max_h)
kp[:, :, 0] = k0
kp[:, :, 1] = k1
return kp
# ==================================================================================================
def main():
img_path = "/RapidPoseTriangulation/scripts/../data/h1/54138969-img_003201.jpg"
image = cv2.imread(img_path, 3)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Initialize the DetPreprocess module
preprocess_model = DetPreprocess(target_size=det_target_size)
det_dummy_input_a0 = torch.from_numpy(image).unsqueeze(0)
# Export to ONNX
torch.onnx.export(
preprocess_model,
det_dummy_input_a0,
base_path + "det_preprocess.onnx",
opset_version=11,
input_names=["input_image"],
output_names=["preprocessed_image"],
dynamic_axes={
"input_image": {0: "batch_size", 1: "height", 2: "width"},
"preprocessed_image": {0: "batch_size"},
},
)
# Initialize the DetPostprocess module
postprocess_model = DetPostprocess(target_size=det_target_size)
det_dummy_input_b0 = torch.from_numpy(image).unsqueeze(0)
det_dummy_input_b1 = torch.rand(1, 10, 5)
# Export to ONNX
torch.onnx.export(
postprocess_model,
(det_dummy_input_b0, det_dummy_input_b1),
base_path + "det_postprocess.onnx",
opset_version=11,
input_names=["input_image", "boxes"],
output_names=["output_boxes"],
dynamic_axes={
"input_image": {0: "batch_size", 1: "height", 2: "width"},
"boxes": {0: "batch_size", 1: "num_boxes"},
"output_boxes": {0: "batch_size", 1: "num_boxes"},
},
)
# Initialize the PosePreprocess module
preprocess_model = PosePreprocess(target_size=pose_target_size)
det_dummy_input_c0 = torch.from_numpy(image).unsqueeze(0)
det_dummy_input_c1 = torch.tensor([[352, 339, 518, 594]]).to(torch.int32)
# Export to ONNX
torch.onnx.export(
preprocess_model,
(det_dummy_input_c0, det_dummy_input_c1),
base_path + "pose_preprocess.onnx",
opset_version=11,
input_names=["input_image", "bbox"],
output_names=["preprocessed_image"],
dynamic_axes={
"input_image": {0: "batch_size", 1: "height", 2: "width"},
"preprocessed_image": {0: "batch_size"},
},
)
# Initialize the PosePostprocess module
postprocess_model = PosePostprocess(target_size=pose_target_size)
det_dummy_input_d0 = torch.from_numpy(image).unsqueeze(0)
det_dummy_input_d1 = torch.tensor([[352, 339, 518, 594]]).to(torch.int32)
det_dummy_input_d2 = torch.rand(1, 17, 2)
# Export to ONNX
torch.onnx.export(
postprocess_model,
(det_dummy_input_d0, det_dummy_input_d1, det_dummy_input_d2),
base_path + "pose_postprocess.onnx",
opset_version=11,
input_names=["input_image", "bbox", "keypoints"],
output_names=["output_keypoints"],
dynamic_axes={
"input_image": {0: "batch_size", 1: "height", 2: "width"},
"output_keypoints": {0: "batch_size"},
},
)
# ==================================================================================================
if __name__ == "__main__":
main()
-276
View File
@@ -1,276 +0,0 @@
import cv2
import numpy as np
import tensorflow as tf
import tf2onnx
# ==================================================================================================
base_path = "/RapidPoseTriangulation/extras/mmdeploy/exports/"
det_target_size = (320, 320)
# ==================================================================================================
class BayerToRGB(tf.keras.layers.Layer):
"""Convert Bayer image to RGB
See: https://stanford.edu/class/ee367/reading/Demosaicing_ICASSP04.pdf
See: https://github.com/cheind/pytorch-debayer/blob/master/debayer/modules.py#L231
"""
def __init__(self):
super().__init__()
self.layout = "RGGB"
self.max_val = 255.0
self.kernels = tf.constant(
np.array(
[
# G at R/B locations
[
[0, 0, -1, 0, 0],
[0, 0, 2, 0, 0],
[-1, 2, 4, 2, -1],
[0, 0, 2, 0, 0],
[0, 0, -1, 0, 0],
],
# R/B at G in R/B rows and B/R columns
[
[0, 0, 0.5, 0, 0],
[0, -1, 0, -1, 0],
[-1, 4, 5, 4, -1],
[0, -1, 0, -1, 0],
[0, 0, 0.5, 0, 0],
],
# R/B at G in B/R rows and R/B columns
[
[0, 0, 0.5, 0, 0],
[0, -1, 4, -1, 0],
[-1, 0, 5, 0, -1],
[0, -1, 4, -1, 0],
[0, 0, 0.5, 0, 0],
],
# R/B at B/R in B/R rows and B/R columns
[
[0, 0, -1.5, 0, 0],
[0, 2, 0, 2, 0],
[-1.5, 0, 6, 0, -1.5],
[0, 2, 0, 2, 0],
[0, 0, -1.5, 0, 0],
],
],
dtype=np.float32,
)
.reshape(1, 4, 5, 5)
.transpose(2, 3, 0, 1)
/ 8.0
)
self.index = tf.constant(
np.array(
# Describes the kernel indices that calculate the corresponding RGB values for
# the 2x2 layout (RGGB) sub-structure
[
# Destination R
[
[4, 1], # identity, R at G in R row B column
[2, 3], # R at G in B row R column, R at B in B row R column
],
# Destination G
[
[0, 4],
[4, 0],
],
# Destination B
[
[3, 2],
[1, 4],
],
]
).reshape(1, 3, 2, 2)
)
def call(self, img):
H, W = tf.shape(img)[1], tf.shape(img)[2]
# Pad the image
tpad = img[:, 0:2, :, :]
bpad = img[:, H - 2 : H, :, :]
ipad = tf.concat([tpad, img, bpad], axis=1)
lpad = ipad[:, :, 0:2, :]
rpad = ipad[:, :, W - 2 : W, :]
ipad = tf.concat([lpad, ipad, rpad], axis=2)
# Convolve with kernels
planes = tf.nn.conv2d(ipad, self.kernels, strides=[1, 1, 1, 1], padding="VALID")
# Concatenate identity kernel
planes = tf.concat([planes, img], axis=-1)
# Gather values
index_repeated = tf.tile(self.index, multiples=[1, 1, H // 2, W // 2])
index_repeated = tf.transpose(index_repeated, perm=[0, 2, 3, 1])
row_indices, col_indices = tf.meshgrid(tf.range(H), tf.range(W), indexing="ij")
index_tensor = tf.stack([row_indices, col_indices], axis=-1)
index_tensor = tf.expand_dims(index_tensor, axis=0)
index_tensor = tf.expand_dims(index_tensor, axis=-2)
index_tensor = tf.repeat(index_tensor, repeats=3, axis=-2)
index_repeated = tf.expand_dims(index_repeated, axis=-1)
indices = tf.concat([tf.cast(index_tensor, tf.int64), index_repeated], axis=-1)
rgb = tf.gather_nd(planes, indices, batch_dims=1)
if self.max_val == 255.0:
# Make value range valid again
rgb = tf.round(rgb)
return rgb
# ==================================================================================================
def bayer_resize(img, size):
"""Resize a Bayer image by splitting color channels"""
# Split the image into 4 channels
r = img[:, 0::2, 0::2, 0]
g1 = img[:, 0::2, 1::2, 0]
g2 = img[:, 1::2, 0::2, 0]
b = img[:, 1::2, 1::2, 0]
bsplit = tf.stack([r, g1, g2, b], axis=-1)
# Resize the image
# Make sure the target size is divisible by 2
size = (size[0] // 2, size[1] // 2)
bsized = tf.image.resize(bsplit, size=size, method="bilinear")
# Create a bayer image again
img = tf.nn.depth_to_space(bsized, block_size=2)
return img
# ==================================================================================================
class Letterbox(tf.keras.layers.Layer):
def __init__(self, target_size, fill_value=128):
"""Resize and pad image while keeping aspect ratio"""
super(Letterbox, self).__init__()
self.b2rgb = BayerToRGB()
self.target_size = target_size
self.fill_value = fill_value
def calc_params(self, ishape):
img_h, img_w = ishape[1], ishape[2]
target_h, target_w = self.target_size
scale = tf.minimum(target_w / img_w, target_h / img_h)
new_w = tf.round(tf.cast(img_w, scale.dtype) * scale)
new_h = tf.round(tf.cast(img_h, scale.dtype) * scale)
new_w = tf.cast(new_w, tf.int32)
new_h = tf.cast(new_h, tf.int32)
new_w = new_w - (new_w % 2)
new_h = new_h - (new_h % 2)
pad_w = target_w - new_w
pad_h = target_h - new_h
pad_left = tf.cast(tf.floor(tf.cast(pad_w, tf.float32) / 2.0), tf.int32)
pad_top = tf.cast(tf.floor(tf.cast(pad_h, tf.float32) / 2.0), tf.int32)
pad_right = pad_w - pad_left
pad_bottom = pad_h - pad_top
paddings = [pad_top, pad_bottom, pad_left, pad_right]
return paddings, scale, (new_w, new_h)
def call(self, img):
paddings, _, (nw, nh) = self.calc_params(tf.shape(img))
# Resize the image and convert to RGB
img = bayer_resize(img, (nh, nw))
img = self.b2rgb(img)
# Pad the image
pad_top, pad_bottom, pad_left, pad_right = paddings
img = tf.pad(
img,
paddings=[[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]],
mode="CONSTANT",
constant_values=self.fill_value,
)
return img
# ==================================================================================================
class DetPreprocess(tf.keras.layers.Layer):
def __init__(self, target_size, fill_value=114):
super(DetPreprocess, self).__init__()
self.letterbox = Letterbox(target_size, fill_value)
def call(self, img):
"""img: tf.Tensor of shape [batch, H, W, C], dtype=tf.uint8"""
# Cast to float32 since TensorRT does not support uint8 layers
img = tf.cast(img, tf.float32)
img = self.letterbox(img)
return img
# ==================================================================================================
def rgb2bayer(img):
bayer = np.zeros((img.shape[0], img.shape[1]), dtype=img.dtype)
bayer[0::2, 0::2] = img[0::2, 0::2, 0]
bayer[0::2, 1::2] = img[0::2, 1::2, 1]
bayer[1::2, 0::2] = img[1::2, 0::2, 1]
bayer[1::2, 1::2] = img[1::2, 1::2, 2]
return bayer
# ==================================================================================================
def main():
img_path = "/RapidPoseTriangulation/scripts/../data/h1/54138969-img_003201.jpg"
image = cv2.imread(img_path, 3)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = rgb2bayer(image)
image = np.expand_dims(image, axis=-1)
image = np.asarray(image, dtype=np.uint8)
# Initialize the DetPreprocess module
preprocess_model = tf.keras.Sequential()
preprocess_model.add(DetPreprocess(target_size=det_target_size))
det_dummy_input_a0 = tf.convert_to_tensor(
np.expand_dims(image, axis=0), dtype=tf.uint8
)
det_dummy_output_a0 = preprocess_model(det_dummy_input_a0)
print("\n", det_dummy_output_a0.shape, "\n")
output_a0 = det_dummy_output_a0.numpy()
output_a0 = np.squeeze(output_a0, axis=0)
output_a0 = np.asarray(output_a0, dtype=np.uint8)
output_a0 = cv2.cvtColor(output_a0, cv2.COLOR_RGB2BGR)
cv2.imwrite(base_path + "det_preprocess.jpg", output_a0)
# Export to ONNX
input_signature = [tf.TensorSpec([None, None, None, 1], tf.uint8, name="x")]
_, _ = tf2onnx.convert.from_keras(
preprocess_model,
input_signature,
opset=11,
output_path=base_path + "det_preprocess.onnx",
target=["tensorrt"],
)
# ==================================================================================================
if __name__ == "__main__":
main()
-9
View File
@@ -1,9 +0,0 @@
#! /bin/bash
xhost +
docker run --privileged --rm --network host -it \
--gpus all --shm-size=16g --ulimit memlock=-1 --ulimit stack=67108864 \
--volume "$(pwd)"/:/RapidPoseTriangulation/ \
--volume /tmp/.X11-unix:/tmp/.X11-unix \
--env DISPLAY --env QT_X11_NO_MITSHM=1 \
rpt_mmdeploy
Binary file not shown.

Before

Width:  |  Height:  |  Size: 38 KiB

-23
View File
@@ -1,23 +0,0 @@
# Finetuning MMPose models
See: <https://mmpose.readthedocs.io/en/latest/user_guides/train_and_test.html>
<br>
```bash
docker build --progress=plain -f extras/mmpose/dockerfile -t rpt_mmpose .
./extras/mmpose/run_container.sh
```
```bash
cd /mmpose/
export CUDA_VISIBLE_DEVICES=0
python3 ./tools/train.py \
/RapidPoseTriangulation/extras/mmpose/configs/rtmpose-l_8xb32-270e_coco-wholebody-384x288.py \
--amp \
--cfg-options \
load_from=https://download.openmmlab.com/mmpose/v1/projects/rtmposev1/rtmpose-l_simcc-coco-wholebody_pt-aic-coco_270e-384x288-eaeb96c8_20230125.pth \
base_lr=0.00004
```
@@ -1,235 +0,0 @@
_base_ = ['mmpose::_base_/default_runtime.py']
val_interval=1
max_epochs = 3
# common setting
num_keypoints = 133
input_size = (288, 384)
# runtime
stage2_num_epochs = 30
base_lr = 4e-3
train_batch_size = 32
val_batch_size = 32
train_cfg = dict(max_epochs=max_epochs, val_interval=val_interval)
randomness = dict(seed=21)
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05),
clip_grad=dict(max_norm=35, norm_type=2),
paramwise_cfg=dict(
norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True))
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0e-5,
by_epoch=False,
begin=0,
end=1000),
dict(
type='CosineAnnealingLR',
eta_min=base_lr * 0.05,
begin=max_epochs // 2,
end=max_epochs,
T_max=max_epochs // 2,
by_epoch=True,
convert_to_iter_based=True),
]
# automatically scaling LR based on the actual training batch size
auto_scale_lr = dict(base_batch_size=512)
# codec settings
codec = dict(
type='SimCCLabel',
input_size=input_size,
sigma=(6., 6.93),
simcc_split_ratio=2.0,
normalize=False,
use_dark=False)
# model settings
model = dict(
type='TopdownPoseEstimator',
data_preprocessor=dict(
type='PoseDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
_scope_='mmdet',
type='CSPNeXt',
arch='P5',
expand_ratio=0.5,
deepen_factor=1.,
widen_factor=1.,
out_indices=(4, ),
channel_attention=True,
norm_cfg=dict(type='SyncBN'),
act_cfg=dict(type='SiLU'),
init_cfg=dict(
type='Pretrained',
prefix='backbone.',
checkpoint='https://download.openmmlab.com/mmpose/v1/projects/'
'rtmposev1/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth' # noqa
)),
head=dict(
type='RTMCCHead',
in_channels=1024,
out_channels=num_keypoints,
input_size=codec['input_size'],
in_featuremap_size=tuple([s // 32 for s in codec['input_size']]),
simcc_split_ratio=codec['simcc_split_ratio'],
final_layer_kernel_size=7,
gau_cfg=dict(
hidden_dims=256,
s=128,
expansion_factor=2,
dropout_rate=0.,
drop_path=0.,
act_fn='SiLU',
use_rel_bias=False,
pos_enc=False),
loss=dict(
type='KLDiscretLoss',
use_target_weight=True,
beta=10.,
label_softmax=True),
decoder=codec),
test_cfg=dict(flip_test=True, ))
# base dataset settings
dataset_type = 'CocoWholeBodyDataset'
data_mode = 'topdown'
data_root = 'data/coco/'
backend_args = dict(backend='local')
# pipelines
train_pipeline = [
dict(type='LoadImage', backend_args=backend_args),
dict(type='GetBBoxCenterScale'),
dict(type='RandomFlip', direction='horizontal'),
dict(type='RandomHalfBody'),
dict(
type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80),
dict(type='TopdownAffine', input_size=codec['input_size']),
dict(type='mmdet.YOLOXHSVRandomAug'),
dict(
type='Albumentation',
transforms=[
dict(type='Blur', p=0.1),
dict(type='MedianBlur', p=0.1),
dict(
type='CoarseDropout',
max_holes=1,
max_height=0.4,
max_width=0.4,
min_holes=1,
min_height=0.2,
min_width=0.2,
p=1.0),
]),
dict(type='GenerateTarget', encoder=codec),
dict(type='PackPoseInputs')
]
val_pipeline = [
dict(type='LoadImage', backend_args=backend_args),
dict(type='GetBBoxCenterScale'),
dict(type='TopdownAffine', input_size=codec['input_size']),
dict(type='PackPoseInputs')
]
train_pipeline_stage2 = [
dict(type='LoadImage', backend_args=backend_args),
dict(type='GetBBoxCenterScale'),
dict(type='RandomFlip', direction='horizontal'),
dict(type='RandomHalfBody'),
dict(
type='RandomBBoxTransform',
shift_factor=0.,
scale_factor=[0.75, 1.25],
rotate_factor=60),
dict(type='TopdownAffine', input_size=codec['input_size']),
dict(type='mmdet.YOLOXHSVRandomAug'),
dict(
type='Albumentation',
transforms=[
dict(type='Blur', p=0.1),
dict(type='MedianBlur', p=0.1),
dict(
type='CoarseDropout',
max_holes=1,
max_height=0.4,
max_width=0.4,
min_holes=1,
min_height=0.2,
min_width=0.2,
p=0.5),
]),
dict(type='GenerateTarget', encoder=codec),
dict(type='PackPoseInputs')
]
# data loaders
train_dataloader = dict(
batch_size=train_batch_size,
num_workers=10,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_mode=data_mode,
ann_file='annotations/coco_wholebody_train_v1.0.json',
data_prefix=dict(img='train2017/'),
pipeline=train_pipeline,
))
val_dataloader = dict(
batch_size=val_batch_size,
num_workers=10,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_mode=data_mode,
ann_file='annotations/coco_wholebody_val_v1.0.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
# bbox_file='data/coco/person_detection_results/'
# 'COCO_val2017_detections_AP_H_56_person.json',
pipeline=val_pipeline,
))
test_dataloader = val_dataloader
# hooks
default_hooks = dict(
checkpoint=dict(
save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1))
custom_hooks = [
dict(
type='EMAHook',
ema_type='ExpMomentumEMA',
momentum=0.0002,
update_buffers=True,
priority=49),
dict(
type='mmdet.PipelineSwitchHook',
switch_epoch=max_epochs - stage2_num_epochs,
switch_pipeline=train_pipeline_stage2)
]
# evaluators
val_evaluator = dict(
type='CocoWholeBodyMetric',
ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json')
test_evaluator = val_evaluator
-9
View File
@@ -1,9 +0,0 @@
FROM rpt_mmdeploy
RUN apt-get update && apt-get install -y --no-install-recommends nano
RUN pip3 install --upgrade --no-cache-dir "albumentations<1.4"
RUN sed -i '94i\ self.runner.val_loop.run()' /usr/local/lib/python3.8/dist-packages/mmengine/runner/loops.py
WORKDIR /mmpose/
CMD ["/bin/bash"]
-11
View File
@@ -1,11 +0,0 @@
#! /bin/bash
xhost +
docker run --privileged --rm --network host -it \
--gpus all --shm-size=16g --ulimit memlock=-1 --ulimit stack=67108864 \
--volume "$(pwd)"/:/RapidPoseTriangulation/ \
--volume "$(pwd)"/../datasets/coco2017/annotations/:/mmpose/data/coco/annotations/ \
--volume "$(pwd)"/../datasets/coco2017/images/:/mmpose/data/coco/ \
--volume /tmp/.X11-unix:/tmp/.X11-unix \
--env DISPLAY --env QT_X11_NO_MITSHM=1 \
rpt_mmpose
-24
View File
@@ -1,24 +0,0 @@
# ROS-Wrapper
Run pose estimator with ros topics as inputs and publish detected poses.
<br>
- Build container:
```bash
docker build --progress=plain -t rapidposetriangulation_ros2d -f extras/ros/dockerfile_2d .
docker build --progress=plain -t rapidposetriangulation_ros3d -f extras/ros/dockerfile_3d .
```
- Update or remove the `ROS_DOMAIN_ID` in the `docker-compose.yml` files to fit your environment
- Run and test:
```bash
xhost + && export CAMID="camera01" && docker compose -f extras/ros/docker-compose-2d.yml up
xhost + && docker compose -f extras/ros/docker-compose-3d.yml up
docker exec -it ros-test_node-1 bash
export ROS_DOMAIN_ID=18
```
-71
View File
@@ -1,71 +0,0 @@
services:
test_node:
image: rapidposetriangulation_ros2d
network_mode: "host"
ipc: "host"
runtime: nvidia
privileged: true
volumes:
- ../../:/RapidPoseTriangulation/
- ../../skelda/:/skelda/
- /tmp/.X11-unix:/tmp/.X11-unix
- /dev/shm:/dev/shm
environment:
- CAMID
- DISPLAY
- QT_X11_NO_MITSHM=1
- "PYTHONUNBUFFERED=1"
command: /bin/bash -i -c 'sleep infinity'
estimator:
image: rapidposetriangulation_ros2d
network_mode: "host"
ipc: "host"
runtime: nvidia
privileged: true
volumes:
- ../../:/RapidPoseTriangulation/
- ../../skelda/:/skelda/
- /tmp/.X11-unix:/tmp/.X11-unix
- /dev/shm:/dev/shm
environment:
- CAMID
- DISPLAY
- QT_X11_NO_MITSHM=1
- "PYTHONUNBUFFERED=1"
command: /bin/bash -i -c 'export ROS_DOMAIN_ID=18 && ros2 run rpt2d_wrapper_cpp rpt2d_wrapper'
pose_visualizer:
image: rapidposetriangulation_ros2d
network_mode: "host"
ipc: "host"
runtime: nvidia
privileged: true
volumes:
- ../../:/RapidPoseTriangulation/
- ../../skelda/:/skelda/
- /tmp/.X11-unix:/tmp/.X11-unix
- /dev/shm:/dev/shm
environment:
- CAMID
- DISPLAY
- QT_X11_NO_MITSHM=1
- "PYTHONUNBUFFERED=1"
command: /bin/bash -i -c 'sleep 2 && export ROS_DOMAIN_ID=18 && ros2 run pose2d_visualizer pose2d_visualizer $CAMID'
pose_viewer:
image: rapidposetriangulation_ros2d
network_mode: "host"
ipc: "host"
runtime: nvidia
privileged: true
volumes:
- /tmp/.X11-unix:/tmp/.X11-unix
- /dev/shm:/dev/shm
environment:
- CAMID
- DISPLAY
- QT_X11_NO_MITSHM=1
- "PYTHONUNBUFFERED=1"
command: /bin/bash -i -c 'sleep 2 && export ROS_DOMAIN_ID=18 && ros2 run image_view image_view --ros-args --remap image:=/$CAMID/img_with_pose -p autosize:=True -p window_name:=MyPoseImage'
-72
View File
@@ -1,72 +0,0 @@
services:
test_node:
image: rapidposetriangulation_ros3d
network_mode: "host"
ipc: "host"
privileged: true
volumes:
- ../../:/RapidPoseTriangulation/
- ../../skelda/:/skelda/
- /tmp/.X11-unix:/tmp/.X11-unix
- /dev/shm:/dev/shm
environment:
- DISPLAY
- QT_X11_NO_MITSHM=1
- "PYTHONUNBUFFERED=1"
command: /bin/bash -i -c 'sleep infinity'
triangulator:
image: rapidposetriangulation_ros3d
network_mode: "host"
ipc: "host"
privileged: true
volumes:
- ../../:/RapidPoseTriangulation/
- ../../skelda/:/skelda/
- /tmp/.X11-unix:/tmp/.X11-unix
- /dev/shm:/dev/shm
environment:
- DISPLAY
- QT_X11_NO_MITSHM=1
- "PYTHONUNBUFFERED=1"
command: /bin/bash -i -c 'export ROS_DOMAIN_ID=18 && ros2 run rpt3d_wrapper_cpp rpt3d_wrapper'
skeleton_markers:
image: rapidposetriangulation_ros3d
network_mode: "host"
ipc: "host"
privileged: true
volumes:
- ../../:/RapidPoseTriangulation/
- /tmp/.X11-unix:/tmp/.X11-unix
- /dev/shm:/dev/shm
environment:
- "PYTHONUNBUFFERED=1"
command: /bin/bash -i -c 'export ROS_DOMAIN_ID=18 && ros2 run marker_publishers skeleton_markers'
cube_markers:
image: rapidposetriangulation_ros3d
network_mode: "host"
ipc: "host"
privileged: true
volumes:
- ../../:/RapidPoseTriangulation/
- /tmp/.X11-unix:/tmp/.X11-unix
- /dev/shm:/dev/shm
environment:
- "PYTHONUNBUFFERED=1"
command: /bin/bash -i -c 'export ROS_DOMAIN_ID=18 && ros2 run marker_publishers cube_markers'
skeleton_tfs:
image: rapidposetriangulation_ros3d
network_mode: "host"
ipc: "host"
privileged: true
volumes:
- ../../:/RapidPoseTriangulation/
- /tmp/.X11-unix:/tmp/.X11-unix
- /dev/shm:/dev/shm
environment:
- "PYTHONUNBUFFERED=1"
command: /bin/bash -i -c 'export ROS_DOMAIN_ID=18 && ros2 run marker_publishers skeleton_tfs'
-67
View File
@@ -1,67 +0,0 @@
FROM rapidposetriangulation
WORKDIR /
# Install ROS2
# https://docs.ros.org/en/humble/Installation/Ubuntu-Install-Debians.html
RUN apt-get update && apt-get install -y --no-install-recommends locales
RUN locale-gen en_US en_US.UTF-8 && update-locale LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8
RUN apt-get update && apt-get install -y --no-install-recommends software-properties-common
RUN add-apt-repository universe
RUN apt-get update && apt-get install -y --no-install-recommends curl
RUN curl -sSL https://raw.githubusercontent.com/ros/rosdistro/master/ros.key -o /usr/share/keyrings/ros-archive-keyring.gpg
RUN echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/ros-archive-keyring.gpg] http://packages.ros.org/ros2/ubuntu $(. /etc/os-release && echo $UBUNTU_CODENAME) main" > /etc/apt/sources.list.d/ros2.list
RUN apt-get update && apt-get install -y --no-install-recommends ros-humble-ros-base python3-argcomplete
RUN apt-get update && apt-get install -y --no-install-recommends ros-dev-tools
RUN echo "source /opt/ros/humble/setup.bash" >> ~/.bashrc
# Fix ros package building error
RUN pip3 install --no-cache-dir "setuptools<=58.2.0"
# Create ROS2 workspace for basic packages
RUN mkdir -p /project/base/src/
RUN cd /project/base/; colcon build
RUN echo "source /project/base/install/setup.bash" >> ~/.bashrc
# Install opencv and cv_bridge
RUN apt-get update && apt-get install -y --no-install-recommends libboost-dev
RUN apt-get update && apt-get install -y --no-install-recommends libboost-python-dev
RUN apt-get update && apt-get install -y --no-install-recommends libopencv-dev
RUN cd /project/base/src/; git clone --branch humble --depth=1 https://github.com/ros-perception/vision_opencv.git
RUN /bin/bash -i -c 'cd /project/base/; colcon build --symlink-install --cmake-args -DCMAKE_BUILD_TYPE=Release'
# Install ROS2 image viewer
RUN cd /project/base/src/; git clone --branch=humble --depth=1 https://github.com/ros-perception/image_pipeline.git
RUN cd /project/base/src/; git clone --branch=humble --depth=1 https://github.com/ros-perception/image_common.git
RUN /bin/bash -i -c 'cd /project/base/; colcon build --symlink-install --packages-select camera_calibration_parsers image_transport image_view --cmake-args -DCMAKE_BUILD_TYPE=Release'
# Fix module not found error when displaying images
RUN apt-get update && apt-get install -y --no-install-recommends libcanberra-gtk-module libcanberra-gtk3-module
# Create ROS2 workspace for project packages
RUN mkdir -p /project/dev_ws/src/
RUN cd /project/dev_ws/; colcon build
RUN echo "source /project/dev_ws/install/setup.bash" >> ~/.bashrc
# Copy modules
COPY ./extras/include/ /RapidPoseTriangulation/extras/include/
COPY ./scripts/ /RapidPoseTriangulation/scripts/
COPY ./extras/ros/rpt_msgs/ /RapidPoseTriangulation/extras/ros/rpt_msgs/
COPY ./extras/ros/pose2d_visualizer/ /RapidPoseTriangulation/extras/ros/pose2d_visualizer/
COPY ./extras/ros/rpt2d_wrapper_cpp/ /RapidPoseTriangulation/extras/ros/rpt2d_wrapper_cpp/
# Link and build as ros package
RUN ln -s /RapidPoseTriangulation/extras/ros/rpt_msgs/ /project/dev_ws/src/
RUN ln -s /RapidPoseTriangulation/extras/ros/pose2d_visualizer/ /project/dev_ws/src/
RUN ln -s /RapidPoseTriangulation/extras/ros/rpt2d_wrapper_cpp/ /project/dev_ws/src/
RUN /bin/bash -i -c 'cd /project/dev_ws/; colcon build --symlink-install --cmake-args -DCMAKE_BUILD_TYPE=Release'
# Update ros packages -> autocompletion and check
RUN /bin/bash -i -c 'ros2 pkg list'
# Clear cache to save space, only has an effect if image is squashed
RUN apt-get autoremove -y \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /RapidPoseTriangulation/
CMD ["/bin/bash"]
-46
View File
@@ -1,46 +0,0 @@
FROM ros:humble-ros-base-jammy
ARG DEBIAN_FRONTEND=noninteractive
# Set the working directory to /project
WORKDIR /project/
# Update and install basic tools
RUN apt-get update && apt-get upgrade -y
RUN apt-get update && apt-get install -y --no-install-recommends git nano wget
RUN apt-get update && apt-get install -y --no-install-recommends python3-pip
RUN pip3 install --upgrade pip
# Fix ros package building error
RUN pip3 install --no-cache-dir "setuptools<=58.2.0"
# Add ROS2 sourcing by default
RUN echo "source /opt/ros/humble/setup.bash" >> ~/.bashrc
# Create ROS2 workspace for project packages
RUN mkdir -p /project/dev_ws/src/
RUN cd /project/dev_ws/; colcon build
RUN echo "source /project/dev_ws/install/setup.bash" >> ~/.bashrc
# Copy modules
COPY ./extras/include/ /RapidPoseTriangulation/extras/include/
COPY ./rpt/ /RapidPoseTriangulation/rpt/
COPY ./extras/ros/marker_publishers/ /RapidPoseTriangulation/extras/ros/marker_publishers/
COPY ./extras/ros/rpt_msgs/ /RapidPoseTriangulation/extras/ros/rpt_msgs/
COPY ./extras/ros/rpt3d_wrapper_cpp/ /RapidPoseTriangulation/extras/ros/rpt3d_wrapper_cpp/
# Link and build as ros package
RUN ln -s /RapidPoseTriangulation/extras/ros/marker_publishers/ /project/dev_ws/src/
RUN ln -s /RapidPoseTriangulation/extras/ros/rpt_msgs/ /project/dev_ws/src/
RUN ln -s /RapidPoseTriangulation/extras/ros/rpt3d_wrapper_cpp/ /project/dev_ws/src/
RUN /bin/bash -i -c 'cd /project/dev_ws/; colcon build --symlink-install --cmake-args -DCMAKE_BUILD_TYPE=Release'
# Update ros packages -> autocompletion and check
RUN /bin/bash -i -c 'ros2 pkg list'
# Clear cache to save space, only has an effect if image is squashed
RUN apt-get autoremove -y \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /RapidPoseTriangulation/
CMD ["/bin/bash"]
@@ -1,63 +0,0 @@
cmake_minimum_required(VERSION 3.5)
project(marker_publishers)
# Default to C99
if(NOT CMAKE_C_STANDARD)
set(CMAKE_C_STANDARD 99)
endif()
# Default to C++14
if(NOT CMAKE_CXX_STANDARD)
set(CMAKE_CXX_STANDARD 14)
endif()
if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
add_compile_options(-Wall -Wextra -Wpedantic)
endif()
# find dependencies
find_package(ament_cmake REQUIRED)
find_package(rclcpp REQUIRED)
find_package(rpt_msgs REQUIRED)
find_package(geometry_msgs REQUIRED)
find_package(tf2_ros REQUIRED)
find_package(visualization_msgs REQUIRED)
add_executable(cube_markers src/cube_markers.cpp)
ament_target_dependencies(cube_markers rclcpp geometry_msgs visualization_msgs)
target_include_directories(cube_markers PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
$<INSTALL_INTERFACE:include>)
add_executable(skeleton_markers src/skeleton_markers.cpp)
ament_target_dependencies(skeleton_markers rclcpp rpt_msgs geometry_msgs visualization_msgs)
target_include_directories(skeleton_markers PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
$<INSTALL_INTERFACE:include>)
add_executable(skeleton_tfs src/skeleton_tfs.cpp)
ament_target_dependencies(skeleton_tfs rclcpp rpt_msgs geometry_msgs tf2_ros)
target_include_directories(skeleton_tfs PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
$<INSTALL_INTERFACE:include>)
install(TARGETS cube_markers
DESTINATION lib/${PROJECT_NAME})
install(TARGETS skeleton_markers
DESTINATION lib/${PROJECT_NAME})
install(TARGETS skeleton_tfs
DESTINATION lib/${PROJECT_NAME})
if(BUILD_TESTING)
find_package(ament_lint_auto REQUIRED)
# the following line skips the linter which checks for copyrights
# uncomment the line when a copyright and license is not present in all source files
#set(ament_cmake_copyright_FOUND TRUE)
# the following line skips cpplint (only works in a git repo)
# uncomment the line when this package is not in a git repo
#set(ament_cmake_cpplint_FOUND TRUE)
ament_lint_auto_find_test_dependencies()
endif()
ament_package()
-24
View File
@@ -1,24 +0,0 @@
<?xml version="1.0"?>
<?xml-model href="http://download.ros.org/schema/package_format3.xsd" schematypens="http://www.w3.org/2001/XMLSchema"?>
<package format="3">
<name>marker_publishers</name>
<version>0.0.0</version>
<description>TODO: Package description</description>
<maintainer email="root@todo.todo">root</maintainer>
<license>TODO: License declaration</license>
<buildtool_depend>ament_cmake</buildtool_depend>
<depend>rclcpp</depend>
<depend>rpt_msgs</depend>
<depend>geometry_msgs</depend>
<depend>tf2_ros</depend>
<depend>visualization_msgs</depend>
<test_depend>ament_lint_auto</test_depend>
<test_depend>ament_lint_common</test_depend>
<export>
<build_type>ament_cmake</build_type>
</export>
</package>
@@ -1,122 +0,0 @@
#include <chrono>
#include <vector>
#include <rclcpp/rclcpp.hpp>
#include <geometry_msgs/msg/point.hpp>
#include <visualization_msgs/msg/marker.hpp>
#include <visualization_msgs/msg/marker_array.hpp>
// =================================================================================================
const std::string output_topic = "/markers_cube";
static const std::array<std::array<float, 3>, 2> roomparams = {{
{4.0, 5.0, 2.2},
{1.0, 0.0, 1.1},
}};
// =================================================================================================
// =================================================================================================
class CubePublisher : public rclcpp::Node
{
public:
CubePublisher() : Node("cube_publisher")
{
publisher_ = this->create_publisher<visualization_msgs::msg::MarkerArray>(output_topic, 1);
timer_ = this->create_wall_timer(
std::chrono::seconds(1), std::bind(&CubePublisher::timer_callback, this));
cube_edges = generate_cube_edges();
}
private:
visualization_msgs::msg::MarkerArray cube_edges;
rclcpp::Publisher<visualization_msgs::msg::MarkerArray>::SharedPtr publisher_;
rclcpp::TimerBase::SharedPtr timer_;
void timer_callback()
{
publisher_->publish(cube_edges);
}
visualization_msgs::msg::MarkerArray generate_cube_edges();
};
// =================================================================================================
visualization_msgs::msg::MarkerArray CubePublisher::generate_cube_edges()
{
visualization_msgs::msg::MarkerArray marker_array;
visualization_msgs::msg::Marker marker;
marker.header.frame_id = "world";
marker.type = visualization_msgs::msg::Marker::LINE_LIST;
marker.action = visualization_msgs::msg::Marker::ADD;
marker.scale.x = 0.05;
marker.id = 0;
marker.color.a = 1.0;
marker.color.r = 1.0;
std::vector<std::vector<double>> corners = {
{-1, -1, -1},
{1, -1, -1},
{1, 1, -1},
{-1, 1, -1},
{-1, -1, 1},
{1, -1, 1},
{1, 1, 1},
{-1, 1, 1},
};
for (auto &corner : corners)
{
for (size_t i = 0; i < corner.size(); ++i)
{
corner[i] = 0.5 * roomparams[0][i] * corner[i] + roomparams[1][i];
}
}
std::vector<std::pair<int, int>> edge_indices = {
{0, 1},
{1, 2},
{2, 3},
{3, 0},
{4, 5},
{5, 6},
{6, 7},
{7, 4},
{0, 4},
{1, 5},
{2, 6},
{3, 7},
};
for (const auto &edge : edge_indices)
{
geometry_msgs::msg::Point p1, p2;
p1.x = corners[edge.first][0];
p1.y = corners[edge.first][1];
p1.z = corners[edge.first][2];
p2.x = corners[edge.second][0];
p2.y = corners[edge.second][1];
p2.z = corners[edge.second][2];
marker.points.push_back(p1);
marker.points.push_back(p2);
}
marker_array.markers.push_back(marker);
return marker_array;
}
// =================================================================================================
// =================================================================================================
int main(int argc, char *argv[])
{
rclcpp::init(argc, argv);
rclcpp::spin(std::make_shared<CubePublisher>());
rclcpp::shutdown();
return 0;
}
@@ -1,255 +0,0 @@
#include <cstdint>
#include <string>
#include <vector>
#include <rclcpp/rclcpp.hpp>
#include <geometry_msgs/msg/point.hpp>
#include <visualization_msgs/msg/marker.hpp>
#include <visualization_msgs/msg/marker_array.hpp>
#include <std_msgs/msg/color_rgba.hpp>
#include "rpt_msgs/msg/poses.hpp"
// =================================================================================================
const std::string input_topic = "/poses/humans3d";
const std::string output_topic = "/markers_skeleton";
// =================================================================================================
// =================================================================================================
std::array<uint8_t, 3> hue_to_rgb(double hue)
{
double r, g, b;
int h = static_cast<int>(hue * 6);
double f = hue * 6 - h;
double q = 1 - f;
switch (h % 6)
{
case 0:
r = 1;
g = f;
b = 0;
break;
case 1:
r = q;
g = 1;
b = 0;
break;
case 2:
r = 0;
g = 1;
b = f;
break;
case 3:
r = 0;
g = q;
b = 1;
break;
case 4:
r = f;
g = 0;
b = 1;
break;
case 5:
r = 1;
g = 0;
b = q;
break;
default:
r = g = b = 0;
break;
}
std::array<uint8_t, 3> rgb = {
static_cast<uint8_t>(r * 255),
static_cast<uint8_t>(g * 255),
static_cast<uint8_t>(b * 255)};
return rgb;
}
// =================================================================================================
// =================================================================================================
class SkeletonPublisher : public rclcpp::Node
{
public:
SkeletonPublisher() : Node("skeleton_publisher")
{
publisher_ = this->create_publisher<visualization_msgs::msg::MarkerArray>(output_topic, 1);
subscriber_ = this->create_subscription<rpt_msgs::msg::Poses>(
input_topic, 1,
std::bind(&SkeletonPublisher::listener_callback, this, std::placeholders::_1));
connections = {
{"shoulder_middle", "head"},
{"head", "nose"},
{"nose", "eye_left"},
{"nose", "eye_right"},
{"eye_left", "ear_left"},
{"eye_right", "ear_right"},
{"shoulder_left", "shoulder_right"},
{"hip_middle", "shoulder_left"},
{"hip_middle", "shoulder_right"},
{"shoulder_left", "elbow_left"},
{"elbow_left", "wrist_left"},
{"hip_middle", "hip_left"},
{"hip_left", "knee_left"},
{"knee_left", "ankle_left"},
{"shoulder_right", "elbow_right"},
{"elbow_right", "wrist_right"},
{"hip_middle", "hip_right"},
{"hip_right", "knee_right"},
{"knee_right", "ankle_right"},
};
}
private:
std::vector<std::array<std::string, 2>> connections;
rclcpp::Publisher<visualization_msgs::msg::MarkerArray>::SharedPtr publisher_;
rclcpp::Subscription<rpt_msgs::msg::Poses>::SharedPtr subscriber_;
void listener_callback(const rpt_msgs::msg::Poses::SharedPtr msg);
visualization_msgs::msg::MarkerArray generate_msg(
const std::vector<std::vector<std::array<float, 4>>> &poses,
const std::vector<std::string> &joint_names);
};
// =================================================================================================
void SkeletonPublisher::listener_callback(const rpt_msgs::msg::Poses::SharedPtr msg)
{
std::vector<std::vector<std::array<float, 4>>> poses;
const std::vector<std::string> &joint_names = msg->joint_names;
// Unflatten poses
size_t idx = 0;
std::vector<int32_t> &bshape = msg->bodies_shape;
for (int32_t i = 0; i < bshape[0]; ++i)
{
std::vector<std::array<float, 4>> body;
for (int32_t j = 0; j < bshape[1]; ++j)
{
std::array<float, 4> joint;
for (int32_t k = 0; k < bshape[2]; ++k)
{
joint[k] = msg->bodies_flat[idx];
++idx;
}
body.push_back(std::move(joint));
}
poses.push_back(std::move(body));
}
auto skelmsg = generate_msg(poses, joint_names);
publisher_->publish(skelmsg);
}
// =================================================================================================
visualization_msgs::msg::MarkerArray SkeletonPublisher::generate_msg(
const std::vector<std::vector<std::array<float, 4>>> &poses,
const std::vector<std::string> &joint_names)
{
visualization_msgs::msg::MarkerArray marker_array;
visualization_msgs::msg::Marker marker;
marker.header.frame_id = "world";
marker.type = visualization_msgs::msg::Marker::LINE_LIST;
marker.action = visualization_msgs::msg::Marker::ADD;
marker.scale.x = 0.02;
marker.id = 0;
size_t num_bodies = poses.size();
for (size_t i = 0; i < num_bodies; ++i)
{
std_msgs::msg::ColorRGBA color;
double hue = static_cast<double>(i) / num_bodies;
auto rgb = hue_to_rgb(hue);
color.r = rgb[0] / 255.0;
color.g = rgb[1] / 255.0;
color.b = rgb[2] / 255.0;
color.a = 1.0;
for (size_t j = 0; j < connections.size(); ++j)
{
std::string joint1 = connections[j][0];
std::string joint2 = connections[j][1];
int sidx = -1;
int eidx = -1;
for (size_t k = 0; k < joint_names.size(); ++k)
{
if (joint_names[k] == joint1)
{
sidx = k;
}
if (joint_names[k] == joint2)
{
eidx = k;
}
}
if (sidx == -1 || eidx == -1)
{
continue;
}
if (poses[i][sidx][3] <= 0 || poses[i][eidx][3] <= 0)
{
continue;
}
geometry_msgs::msg::Point p1, p2;
p1.x = poses[i][sidx][0];
p1.y = poses[i][sidx][1];
p1.z = poses[i][sidx][2];
p2.x = poses[i][eidx][0];
p2.y = poses[i][eidx][1];
p2.z = poses[i][eidx][2];
marker.points.push_back(p1);
marker.points.push_back(p2);
marker.colors.push_back(color);
marker.colors.push_back(color);
}
}
if (num_bodies == 0)
{
// Create an invisible line to remove any old skeletons
std_msgs::msg::ColorRGBA color;
color.r = 0.0;
color.g = 0.0;
color.b = 0.0;
color.a = 0.0;
geometry_msgs::msg::Point p1, p2;
p1.x = 0.0;
p1.y = 0.0;
p1.z = 0.0;
p2.x = 0.0;
p2.y = 0.0;
p2.z = 0.001;
marker.points.push_back(p1);
marker.points.push_back(p2);
marker.colors.push_back(color);
marker.colors.push_back(color);
}
marker_array.markers.push_back(marker);
return marker_array;
}
// =================================================================================================
// =================================================================================================
int main(int argc, char *argv[])
{
rclcpp::init(argc, argv);
rclcpp::spin(std::make_shared<SkeletonPublisher>());
rclcpp::shutdown();
return 0;
}
@@ -1,335 +0,0 @@
#include <array>
#include <cstdint>
#include <map>
#include <string>
#include <vector>
#include <rclcpp/rclcpp.hpp>
#include <tf2_ros/transform_broadcaster.h>
#include <geometry_msgs/msg/transform_stamped.hpp>
#include "rpt_msgs/msg/poses.hpp"
// =================================================================================================
const std::string input_topic = "/poses/humans3d";
// =================================================================================================
// =================================================================================================
class SkeletonTFPublisher : public rclcpp::Node
{
public:
SkeletonTFPublisher() : Node("skeleton_tf_publisher")
{
broadcaster_ = std::make_shared<tf2_ros::TransformBroadcaster>(this);
subscriber_ = this->create_subscription<rpt_msgs::msg::Poses>(
input_topic, 1,
std::bind(&SkeletonTFPublisher::listener_callback, this, std::placeholders::_1));
pc_connections = {
// main joints
{"hip_middle", "shoulder_middle"},
{"shoulder_middle", "head"},
{"head", "nose"},
{"head", "eye_left"},
{"head", "eye_right"},
{"head", "ear_left"},
{"head", "ear_right"},
{"shoulder_middle", "shoulder_left"},
{"shoulder_middle", "shoulder_right"},
{"shoulder_left", "elbow_left"},
{"elbow_left", "wrist_left"},
{"hip_middle", "hip_left"},
{"hip_left", "knee_left"},
{"knee_left", "ankle_left"},
{"shoulder_right", "elbow_right"},
{"elbow_right", "wrist_right"},
{"hip_middle", "hip_right"},
{"hip_right", "knee_right"},
{"knee_right", "ankle_right"},
// whole-body joints
{"ankle_left", "foot_toe_big_left"},
{"ankle_left", "foot_toe_small_left"},
{"ankle_left", "foot_heel_left"},
{"ankle_right", "foot_toe_big_right"},
{"ankle_right", "foot_toe_small_right"},
{"ankle_right", "foot_heel_right"},
{"ear_right", "face_jaw_right_1"},
{"ear_right", "face_jaw_right_2"},
{"ear_right", "face_jaw_right_3"},
{"ear_right", "face_jaw_right_4"},
{"ear_right", "face_jaw_right_5"},
{"ear_right", "face_jaw_right_6"},
{"ear_right", "face_jaw_right_7"},
{"ear_right", "face_jaw_right_8"},
{"head", "face_jaw_middle"},
{"ear_left", "face_jaw_left_1"},
{"ear_left", "face_jaw_left_2"},
{"ear_left", "face_jaw_left_3"},
{"ear_left", "face_jaw_left_4"},
{"ear_left", "face_jaw_left_5"},
{"ear_left", "face_jaw_left_6"},
{"ear_left", "face_jaw_left_7"},
{"ear_left", "face_jaw_left_8"},
{"eye_right", "face_eyebrow_right_1"},
{"eye_right", "face_eyebrow_right_2"},
{"eye_right", "face_eyebrow_right_3"},
{"eye_right", "face_eyebrow_right_4"},
{"eye_right", "face_eyebrow_right_5"},
{"eye_left", "face_eyebrow_left_1"},
{"eye_left", "face_eyebrow_left_2"},
{"eye_left", "face_eyebrow_left_3"},
{"eye_left", "face_eyebrow_left_4"},
{"eye_left", "face_eyebrow_left_5"},
{"nose", "face_nose_1"},
{"nose", "face_nose_2"},
{"nose", "face_nose_3"},
{"nose", "face_nose_4"},
{"nose", "face_nose_5"},
{"nose", "face_nose_6"},
{"nose", "face_nose_7"},
{"nose", "face_nose_8"},
{"nose", "face_nose_9"},
{"eye_right", "face_eye_right_1"},
{"eye_right", "face_eye_right_2"},
{"eye_right", "face_eye_right_3"},
{"eye_right", "face_eye_right_4"},
{"eye_right", "face_eye_right_5"},
{"eye_right", "face_eye_right_6"},
{"eye_left", "face_eye_left_1"},
{"eye_left", "face_eye_left_2"},
{"eye_left", "face_eye_left_3"},
{"eye_left", "face_eye_left_4"},
{"eye_left", "face_eye_left_5"},
{"eye_left", "face_eye_left_6"},
{"head", "face_mouth_1"},
{"head", "face_mouth_2"},
{"head", "face_mouth_3"},
{"head", "face_mouth_4"},
{"head", "face_mouth_5"},
{"head", "face_mouth_6"},
{"head", "face_mouth_7"},
{"head", "face_mouth_8"},
{"head", "face_mouth_9"},
{"head", "face_mouth_10"},
{"head", "face_mouth_11"},
{"head", "face_mouth_12"},
{"head", "face_mouth_13"},
{"head", "face_mouth_14"},
{"head", "face_mouth_15"},
{"head", "face_mouth_16"},
{"head", "face_mouth_17"},
{"head", "face_mouth_18"},
{"head", "face_mouth_19"},
{"head", "face_mouth_20"},
{"wrist_left", "hand_wrist_left"},
{"wrist_left", "hand_finger_thumb_left_1"},
{"wrist_left", "hand_finger_thumb_left_2"},
{"wrist_left", "hand_finger_thumb_left_3"},
{"wrist_left", "hand_finger_thumb_left_4"},
{"wrist_left", "hand_finger_index_left_1"},
{"wrist_left", "hand_finger_index_left_2"},
{"wrist_left", "hand_finger_index_left_3"},
{"wrist_left", "hand_finger_index_left_4"},
{"wrist_left", "hand_finger_middle_left_1"},
{"wrist_left", "hand_finger_middle_left_2"},
{"wrist_left", "hand_finger_middle_left_3"},
{"wrist_left", "hand_finger_middle_left_4"},
{"wrist_left", "hand_finger_ring_left_1"},
{"wrist_left", "hand_finger_ring_left_2"},
{"wrist_left", "hand_finger_ring_left_3"},
{"wrist_left", "hand_finger_ring_left_4"},
{"wrist_left", "hand_finger_pinky_left_1"},
{"wrist_left", "hand_finger_pinky_left_2"},
{"wrist_left", "hand_finger_pinky_left_3"},
{"wrist_left", "hand_finger_pinky_left_4"},
{"wrist_right", "hand_wrist_right"},
{"wrist_right", "hand_finger_thumb_right_1"},
{"wrist_right", "hand_finger_thumb_right_2"},
{"wrist_right", "hand_finger_thumb_right_3"},
{"wrist_right", "hand_finger_thumb_right_4"},
{"wrist_right", "hand_finger_index_right_1"},
{"wrist_right", "hand_finger_index_right_2"},
{"wrist_right", "hand_finger_index_right_3"},
{"wrist_right", "hand_finger_index_right_4"},
{"wrist_right", "hand_finger_middle_right_1"},
{"wrist_right", "hand_finger_middle_right_2"},
{"wrist_right", "hand_finger_middle_right_3"},
{"wrist_right", "hand_finger_middle_right_4"},
{"wrist_right", "hand_finger_ring_right_1"},
{"wrist_right", "hand_finger_ring_right_2"},
{"wrist_right", "hand_finger_ring_right_3"},
{"wrist_right", "hand_finger_ring_right_4"},
{"wrist_right", "hand_finger_pinky_right_1"},
{"wrist_right", "hand_finger_pinky_right_2"},
{"wrist_right", "hand_finger_pinky_right_3"},
{"wrist_right", "hand_finger_pinky_right_4"},
};
for (auto &pair : pc_connections)
{
cp_map[pair[1]] = pair[0];
}
}
private:
std::vector<std::array<std::string, 2>> pc_connections;
std::map<std::string, std::string> cp_map;
std::shared_ptr<tf2_ros::TransformBroadcaster> broadcaster_;
rclcpp::Subscription<rpt_msgs::msg::Poses>::SharedPtr subscriber_;
void listener_callback(const rpt_msgs::msg::Poses::SharedPtr msg);
std::vector<geometry_msgs::msg::TransformStamped> generate_msg(
const std::vector<std::vector<std::array<float, 4>>> &poses,
const std::vector<std::string> &joint_names);
};
// =================================================================================================
void SkeletonTFPublisher::listener_callback(const rpt_msgs::msg::Poses::SharedPtr msg)
{
std::vector<std::vector<std::array<float, 4>>> poses;
const std::vector<std::string> &joint_names = msg->joint_names;
// Unflatten poses
size_t idx = 0;
std::vector<int32_t> &bshape = msg->bodies_shape;
for (int32_t i = 0; i < bshape[0]; ++i)
{
std::vector<std::array<float, 4>> body;
for (int32_t j = 0; j < bshape[1]; ++j)
{
std::array<float, 4> joint;
for (int32_t k = 0; k < bshape[2]; ++k)
{
joint[k] = msg->bodies_flat[idx];
++idx;
}
body.push_back(std::move(joint));
}
poses.push_back(std::move(body));
}
if (poses.empty())
{
return;
}
auto tf_msgs = generate_msg(poses, joint_names);
if (!tf_msgs.empty())
{
broadcaster_->sendTransform(tf_msgs);
}
}
// =================================================================================================
std::vector<geometry_msgs::msg::TransformStamped> SkeletonTFPublisher::generate_msg(
const std::vector<std::vector<std::array<float, 4>>> &poses,
const std::vector<std::string> &joint_names)
{
std::vector<geometry_msgs::msg::TransformStamped> tf_msgs;
tf_msgs.reserve(poses.size() * pc_connections.size());
rclcpp::Time now = this->get_clock()->now();
for (size_t i = 0; i < poses.size(); ++i)
{
const auto &body = poses[i];
// Find index of "hip_middle" in joint_names
int hip_mid_idx = -1;
for (size_t k = 0; k < joint_names.size(); ++k)
{
if (joint_names[k] == "hip_middle")
{
hip_mid_idx = static_cast<int>(k);
break;
}
}
// Set "hip_middle" as child of "world"
const auto &joint = body[hip_mid_idx];
geometry_msgs::msg::TransformStamped tf_stamped;
tf_stamped.header.stamp = now;
tf_stamped.header.frame_id = "world";
tf_stamped.child_frame_id = "s" + std::to_string(i) + "-hip_middle";
tf_stamped.transform.translation.x = joint[0];
tf_stamped.transform.translation.y = joint[1];
tf_stamped.transform.translation.z = joint[2];
tf_stamped.transform.rotation.x = 0.0;
tf_stamped.transform.rotation.y = 0.0;
tf_stamped.transform.rotation.z = 0.0;
tf_stamped.transform.rotation.w = 1.0;
tf_msgs.push_back(std::move(tf_stamped));
// Add other joints
for (size_t j = 0; j < body.size(); ++j)
{
// Skip "hip_middle"
if (static_cast<int>(j) == hip_mid_idx)
{
continue;
}
const auto &joint_child = body[j];
const std::string &child_name = joint_names[j];
float conf = joint_child[3];
if (conf <= 0.0f)
{
continue;
}
// Look up its parent
auto it = cp_map.find(child_name);
if (it == cp_map.end())
{
continue;
}
const std::string &parent_name = it->second;
// Ensure the parent frame was actually published
int parent_idx = -1;
for (size_t k = 0; k < joint_names.size(); ++k)
{
if (joint_names[k] == parent_name)
{
parent_idx = static_cast<int>(k);
break;
}
}
if (parent_name != "hip_middle" && body[parent_idx][3] <= 0.0f)
{
// Parent not visible, skip this child
continue;
}
const auto &joint_parent = body[parent_idx];
// Publish child frame
geometry_msgs::msg::TransformStamped tf_stamped;
tf_stamped.header.stamp = now;
tf_stamped.header.frame_id = "s" + std::to_string(i) + "-" + parent_name;
tf_stamped.child_frame_id = "s" + std::to_string(i) + "-" + child_name;
tf_stamped.transform.translation.x = joint_child[0] - joint_parent[0];
tf_stamped.transform.translation.y = joint_child[1] - joint_parent[1];
tf_stamped.transform.translation.z = joint_child[2] - joint_parent[2];
tf_stamped.transform.rotation.x = 0.0;
tf_stamped.transform.rotation.y = 0.0;
tf_stamped.transform.rotation.z = 0.0;
tf_stamped.transform.rotation.w = 1.0;
tf_msgs.push_back(std::move(tf_stamped));
}
}
return tf_msgs;
}
// =================================================================================================
// =================================================================================================
int main(int argc, char *argv[])
{
rclcpp::init(argc, argv);
rclcpp::spin(std::make_shared<SkeletonTFPublisher>());
rclcpp::shutdown();
return 0;
}
-20
View File
@@ -1,20 +0,0 @@
<?xml version="1.0"?>
<?xml-model href="http://download.ros.org/schema/package_format3.xsd" schematypens="http://www.w3.org/2001/XMLSchema"?>
<package format="3">
<name>pose2d_visualizer</name>
<version>0.0.0</version>
<description>TODO: Package description</description>
<maintainer email="root@todo.todo">root</maintainer>
<license>TODO: License declaration</license>
<exec_depend>rpt_msgs</exec_depend>
<test_depend>ament_copyright</test_depend>
<test_depend>ament_flake8</test_depend>
<test_depend>ament_pep257</test_depend>
<test_depend>python3-pytest</test_depend>
<export>
<build_type>ament_python</build_type>
</export>
</package>
@@ -1,158 +0,0 @@
import sys
import threading
import time
import cv2
from matplotlib import pyplot as plt
import numpy as np
import rclpy
from cv_bridge import CvBridge
from rclpy.qos import QoSHistoryPolicy, QoSProfile, QoSReliabilityPolicy
from sensor_msgs.msg import Image
from rpt_msgs.msg import Poses
from skelda import utils_view
# ==================================================================================================
bridge = CvBridge()
node = None
publisher_img = None
img_input_topic = "/{}/pylon_ros2_camera_node/image_raw"
pose_input_topic = "/poses/{}"
img_output_topic = "/{}/img_with_pose"
last_input_image = None
lock = threading.Lock()
# ==================================================================================================
def bayer2rgb(bayer):
img = cv2.cvtColor(bayer, cv2.COLOR_BayerBG2RGB)
return img
# ==================================================================================================
def callback_images(image_data):
global last_input_image, lock
# Convert into cv images from image string
if image_data.encoding == "bayer_rggb8":
bayer_image = bridge.imgmsg_to_cv2(image_data, "bayer_rggb8")
color_image = bayer2rgb(bayer_image)
elif image_data.encoding == "mono8":
gray_image = bridge.imgmsg_to_cv2(image_data, "mono8")
color_image = cv2.cvtColor(gray_image, cv2.COLOR_GRAY2RGB)
elif image_data.encoding == "rgb8":
color_image = bridge.imgmsg_to_cv2(image_data, "rgb8")
else:
raise ValueError("Unknown image encoding:", image_data.encoding)
time_stamp = image_data.header.stamp.sec + image_data.header.stamp.nanosec / 1.0e9
with lock:
last_input_image = (color_image, time_stamp)
# ==================================================================================================
def callback_poses(pose_data):
global last_input_image, lock
ptime = time.time()
if last_input_image is None:
return
# Extract pose data
joint_names = pose_data.joint_names
bodies2D = np.array(pose_data.bodies_flat).reshape(pose_data.bodies_shape).tolist()
# Collect inputs
images_2d = []
timestamps = []
with lock:
img = np.copy(last_input_image[0])
ts = last_input_image[1]
images_2d.append(img)
timestamps.append(ts)
# Visualize 2D poses
colors = plt.cm.hsv(np.linspace(0, 1, len(bodies2D), endpoint=False)).tolist()
colors = [[int(c[0] * 255), int(c[1] * 255), int(c[2] * 255)] for c in colors]
for i, body in enumerate(bodies2D):
color = list(reversed(colors[i]))
img = utils_view.draw_body_in_image(img, body, joint_names, color)
# Publish image with poses
publish(img)
msg = "Visualization time: {:.3f}s"
print(msg.format(time.time() - ptime))
# ==================================================================================================
def publish(img):
# Publish image data
msg = bridge.cv2_to_imgmsg(img, "rgb8")
publisher_img.publish(msg)
# ==================================================================================================
def main():
global node, publisher_img
# Start node
rclpy.init(args=sys.argv)
cam_id = sys.argv[1]
node = rclpy.create_node("pose2d_visualizer")
# Quality of service settings
qos_profile = QoSProfile(
reliability=QoSReliabilityPolicy.RELIABLE,
history=QoSHistoryPolicy.KEEP_LAST,
depth=1,
)
# Create subscribers
_ = node.create_subscription(
Image,
img_input_topic.format(cam_id),
callback_images,
qos_profile,
)
_ = node.create_subscription(
Poses,
pose_input_topic.format(cam_id),
callback_poses,
qos_profile,
)
# Create publishers
publisher_img = node.create_publisher(
Image,
img_output_topic.format(cam_id),
qos_profile,
)
node.get_logger().info("Finished initialization of pose visualizer")
# Run ros update thread
rclpy.spin(node)
node.destroy_node()
rclpy.shutdown()
# ==================================================================================================
if __name__ == "__main__":
main()
-4
View File
@@ -1,4 +0,0 @@
[develop]
script_dir=$base/lib/pose2d_visualizer
[install]
install_scripts=$base/lib/pose2d_visualizer
-23
View File
@@ -1,23 +0,0 @@
from setuptools import setup
package_name = "pose2d_visualizer"
setup(
name=package_name,
version="0.0.0",
packages=[package_name],
data_files=[
("share/ament_index/resource_index/packages", ["resource/" + package_name]),
("share/" + package_name, ["package.xml"]),
],
install_requires=["setuptools"],
zip_safe=True,
maintainer="root",
maintainer_email="root@todo.todo",
description="TODO: Package description",
license="TODO: License declaration",
tests_require=["pytest"],
entry_points={
"console_scripts": ["pose2d_visualizer = pose2d_visualizer.pose2d_visualizer:main"],
},
)
@@ -1,27 +0,0 @@
# Copyright 2015 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from ament_copyright.main import main
# Remove the `skip` decorator once the source file(s) have a copyright header
@pytest.mark.skip(
reason="No copyright header has been placed in the generated source file."
)
@pytest.mark.copyright
@pytest.mark.linter
def test_copyright():
rc = main(argv=[".", "test"])
assert rc == 0, "Found errors"
@@ -1,25 +0,0 @@
# Copyright 2017 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from ament_flake8.main import main_with_errors
@pytest.mark.flake8
@pytest.mark.linter
def test_flake8():
rc, errors = main_with_errors(argv=[])
assert rc == 0, "Found %d code style errors / warnings:\n" % len(
errors
) + "\n".join(errors)
@@ -1,23 +0,0 @@
# Copyright 2015 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from ament_pep257.main import main
@pytest.mark.linter
@pytest.mark.pep257
def test_pep257():
rc = main(argv=[".", "test"])
assert rc == 0, "Found code style errors / warnings"
@@ -1,69 +0,0 @@
cmake_minimum_required(VERSION 3.5)
project(rpt2d_wrapper_cpp)
# Default to C99
if(NOT CMAKE_C_STANDARD)
set(CMAKE_C_STANDARD 99)
endif()
# Default to C++17
if(NOT CMAKE_CXX_STANDARD)
set(CMAKE_CXX_STANDARD 17)
endif()
if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
add_compile_options(-Wall -Wextra -Wpedantic)
endif()
# find dependencies
find_package(ament_cmake REQUIRED)
find_package(rclcpp REQUIRED)
find_package(rpt_msgs REQUIRED)
find_package(sensor_msgs REQUIRED)
find_package(cv_bridge REQUIRED)
find_package(OpenCV REQUIRED)
### 3) ONNX Runtime
# for desktop
include_directories(/onnxruntime/include/
/onnxruntime/include/onnxruntime/core/session/
/onnxruntime/include/onnxruntime/core/providers/tensorrt/)
link_directories(/onnxruntime/build/Linux/Release/)
# for jetson
include_directories(/usr/local/include/onnxruntime/)
link_directories(/usr/local/lib/)
add_executable(rpt2d_wrapper src/rpt2d_wrapper.cpp)
ament_target_dependencies(rpt2d_wrapper rclcpp sensor_msgs rpt_msgs cv_bridge)
target_include_directories(rpt2d_wrapper PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
$<INSTALL_INTERFACE:include>)
target_link_libraries(rpt2d_wrapper
${OpenCV_LIBS}
onnxruntime_providers_tensorrt
onnxruntime_providers_shared
onnxruntime_providers_cuda
onnxruntime
)
set_target_properties(rpt2d_wrapper PROPERTIES
BUILD_WITH_INSTALL_RPATH TRUE
INSTALL_RPATH "/onnxruntime/build/Linux/Release"
)
install(TARGETS rpt2d_wrapper
DESTINATION lib/${PROJECT_NAME})
if(BUILD_TESTING)
find_package(ament_lint_auto REQUIRED)
# the following line skips the linter which checks for copyrights
# uncomment the line when a copyright and license is not present in all source files
#set(ament_cmake_copyright_FOUND TRUE)
# the following line skips cpplint (only works in a git repo)
# uncomment the line when this package is not in a git repo
#set(ament_cmake_cpplint_FOUND TRUE)
ament_lint_auto_find_test_dependencies()
endif()
ament_package()
-25
View File
@@ -1,25 +0,0 @@
<?xml version="1.0"?>
<?xml-model href="http://download.ros.org/schema/package_format3.xsd" schematypens="http://www.w3.org/2001/XMLSchema"?>
<package format="3">
<name>rpt2d_wrapper_cpp</name>
<version>0.0.0</version>
<description>TODO: Package description</description>
<maintainer email="root@todo.todo">root</maintainer>
<license>TODO: License declaration</license>
<buildtool_depend>ament_cmake</buildtool_depend>
<depend>rclcpp</depend>
<depend>rpt_msgs</depend>
<depend>sensor_msgs</depend>
<depend>cv_bridge</depend>
<depend>OpenCV</depend>
<test_depend>ament_lint_auto</test_depend>
<test_depend>ament_lint_common</test_depend>
<export>
<build_type>ament_cmake</build_type>
</export>
</package>
@@ -1,237 +0,0 @@
#include <atomic>
#include <chrono>
#include <iostream>
#include <string>
#include <vector>
// ROS2
#include <rclcpp/rclcpp.hpp>
#include <sensor_msgs/msg/image.hpp>
// OpenCV / cv_bridge
#include <cv_bridge/cv_bridge.h>
#include <opencv2/opencv.hpp>
// JSON library
#include "/RapidPoseTriangulation/extras/include/nlohmann/json.hpp"
using json = nlohmann::json;
#include "rpt_msgs/msg/poses.hpp"
#include "/RapidPoseTriangulation/scripts/utils_2d_pose.hpp"
#include "/RapidPoseTriangulation/scripts/utils_pipeline.hpp"
// =================================================================================================
static const std::string img_input_topic = "/{}/pylon_ros2_camera_node/image_raw";
static const std::string pose_out_topic = "/poses/{}";
static const float min_bbox_score = 0.4;
static const float min_bbox_area = 0.1 * 0.1;
static const bool batch_poses = true;
static const std::map<std::string, bool> whole_body = {
{"foots", true},
{"face", true},
{"hands", true},
};
// =================================================================================================
// =================================================================================================
class Rpt2DWrapperNode : public rclcpp::Node
{
public:
Rpt2DWrapperNode(const std::string &cam_id)
: Node("rpt2d_wrapper_" + cam_id)
{
this->is_busy = false;
std::string img_topic = std::string(img_input_topic)
.replace(img_input_topic.find("{}"), 2, cam_id);
std::string pose_topic = std::string(pose_out_topic)
.replace(pose_out_topic.find("{}"), 2, cam_id);
// QoS
rclcpp::QoS qos_profile(1);
qos_profile.reliable();
qos_profile.keep_last(1);
// Setup subscriber
image_sub_ = this->create_subscription<sensor_msgs::msg::Image>(
img_topic, qos_profile,
std::bind(&Rpt2DWrapperNode::callback_images, this, std::placeholders::_1));
// Setup publisher
pose_pub_ = this->create_publisher<rpt_msgs::msg::Poses>(pose_topic, qos_profile);
// Load model
bool use_wb = utils_pipeline::use_whole_body(whole_body);
this->kpt_model = std::make_unique<utils_2d_pose::PosePredictor>(
use_wb, min_bbox_score, min_bbox_area, batch_poses);
RCLCPP_INFO(this->get_logger(), "Finished initialization of pose estimator.");
}
private:
rclcpp::Subscription<sensor_msgs::msg::Image>::SharedPtr image_sub_;
rclcpp::Publisher<rpt_msgs::msg::Poses>::SharedPtr pose_pub_;
std::atomic<bool> is_busy;
// Pose model pointer
std::unique_ptr<utils_2d_pose::PosePredictor> kpt_model;
const std::vector<std::string> joint_names_2d = utils_pipeline::get_joint_names(whole_body);
void callback_images(const sensor_msgs::msg::Image::SharedPtr msg);
std::vector<std::vector<std::array<float, 3>>> call_model(const cv::Mat &image);
};
// =================================================================================================
void Rpt2DWrapperNode::callback_images(const sensor_msgs::msg::Image::SharedPtr msg)
{
if (this->is_busy)
{
RCLCPP_WARN(this->get_logger(), "Skipping frame, still processing...");
return;
}
this->is_busy = true;
auto ts_image = std::chrono::high_resolution_clock::now();
// Load or convert image to Bayer format
cv::Mat bayer_image;
try
{
if (msg->encoding == "mono8")
{
cv_bridge::CvImageConstPtr cv_ptr = cv_bridge::toCvShare(msg, msg->encoding);
bayer_image = cv_ptr->image;
}
else if (msg->encoding == "bayer_rggb8")
{
cv_bridge::CvImageConstPtr cv_ptr = cv_bridge::toCvShare(msg, msg->encoding);
bayer_image = cv_ptr->image;
}
else if (msg->encoding == "rgb8")
{
cv_bridge::CvImageConstPtr cv_ptr = cv_bridge::toCvShare(msg, "rgb8");
cv::Mat color_image = cv_ptr->image;
bayer_image = utils_pipeline::rgb2bayer(color_image);
}
else
{
throw std::runtime_error("Unknown image encoding: " + msg->encoding);
}
}
catch (const std::exception &e)
{
RCLCPP_ERROR(this->get_logger(), "cv_bridge exception: %s", e.what());
return;
}
// Call model
const auto &valid_poses = this->call_model(bayer_image);
// Calculate timings
double time_stamp = msg->header.stamp.sec + msg->header.stamp.nanosec / 1.0e9;
auto ts_image_sec = std::chrono::duration<double>(ts_image.time_since_epoch()).count();
auto ts_pose = std::chrono::high_resolution_clock::now();
double ts_pose_sec = std::chrono::duration<double>(ts_pose.time_since_epoch()).count();
double z_trigger_image = ts_image_sec - time_stamp;
double z_trigger_pose = ts_pose_sec - time_stamp;
double z_image_pose = ts_pose_sec - ts_image_sec;
json jdata;
jdata["timestamps"] = {
{"trigger", time_stamp},
{"image", ts_image_sec},
{"pose2d", ts_pose_sec},
{"z-trigger-image", z_trigger_image},
{"z-image-pose2d", z_image_pose},
{"z-trigger-pose2d", z_trigger_pose}};
// Publish message
auto pose_msg = rpt_msgs::msg::Poses();
pose_msg.header = msg->header;
std::vector<int32_t> bshape = {(int)valid_poses.size(), (int)joint_names_2d.size(), 3};
pose_msg.bodies_shape = bshape;
pose_msg.bodies_flat.reserve(bshape[0] * bshape[1] * bshape[2]);
for (int32_t i = 0; i < bshape[0]; i++)
{
for (int32_t j = 0; j < bshape[1]; j++)
{
for (int32_t k = 0; k < bshape[2]; k++)
{
pose_msg.bodies_flat.push_back(valid_poses[i][j][k]);
}
}
}
pose_msg.joint_names = joint_names_2d;
pose_msg.extra_data = jdata.dump();
pose_pub_->publish(pose_msg);
// Print info
double elapsed_time = std::chrono::duration<double>(
std::chrono::high_resolution_clock::now() - ts_image)
.count();
std::cout << "Detected persons: " << valid_poses.size()
<< " - Prediction time: " << elapsed_time << "s" << std::endl;
this->is_busy = false;
}
// =================================================================================================
std::vector<std::vector<std::array<float, 3>>> Rpt2DWrapperNode::call_model(const cv::Mat &image)
{
// Create image vector
cv::Mat rgb_image = utils_pipeline::bayer2rgb(image);
std::vector<cv::Mat> images_2d = {rgb_image};
// Predict 2D poses
auto poses_2d_all = kpt_model->predict(images_2d);
auto poses_2d_upd = utils_pipeline::update_keypoints(
poses_2d_all, joint_names_2d, whole_body);
auto &poses_2d = poses_2d_upd[0];
// Drop persons with no joints
std::vector<std::vector<std::array<float, 3>>> valid_poses;
for (auto &person : poses_2d)
{
float sum_conf = 0.0;
for (auto &kp : person)
{
sum_conf += kp[2];
}
if (sum_conf > 0.0)
{
valid_poses.push_back(person);
}
}
// Round poses to 3 decimal places
for (auto &person : valid_poses)
{
for (auto &kp : person)
{
kp[0] = std::round(kp[0] * 1000.0) / 1000.0;
kp[1] = std::round(kp[1] * 1000.0) / 1000.0;
kp[2] = std::round(kp[2] * 1000.0) / 1000.0;
}
}
return valid_poses;
}
// =================================================================================================
// =================================================================================================
int main(int argc, char **argv)
{
rclcpp::init(argc, argv);
const std::string cam_id = std::getenv("CAMID");
auto node = std::make_shared<Rpt2DWrapperNode>(cam_id);
rclcpp::spin(node);
rclcpp::shutdown();
return 0;
}
@@ -1,64 +0,0 @@
cmake_minimum_required(VERSION 3.5)
project(rpt3d_wrapper_cpp)
# Default to C99
if(NOT CMAKE_C_STANDARD)
set(CMAKE_C_STANDARD 99)
endif()
# Default to C++20
if(NOT CMAKE_CXX_STANDARD)
set(CMAKE_CXX_STANDARD 20)
endif()
if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
add_compile_options(-Wall -Wextra -Wpedantic)
endif()
# Find dependencies
find_package(ament_cmake REQUIRED)
find_package(rclcpp REQUIRED)
find_package(rpt_msgs REQUIRED)
find_package(std_msgs REQUIRED)
# Add RapidPoseTriangulation implementation
set(RAPID_POSE_TRIANGULATION_DIR "/RapidPoseTriangulation")
add_library(rapid_pose_triangulation
${RAPID_POSE_TRIANGULATION_DIR}/rpt/camera.cpp
${RAPID_POSE_TRIANGULATION_DIR}/rpt/interface.cpp
${RAPID_POSE_TRIANGULATION_DIR}/rpt/triangulator.cpp
)
target_include_directories(rapid_pose_triangulation PUBLIC
${RAPID_POSE_TRIANGULATION_DIR}/extras/include
${RAPID_POSE_TRIANGULATION_DIR}/rpt
)
target_compile_options(rapid_pose_triangulation PUBLIC
-fPIC -O3 -march=native -Wall -Werror
)
# Build the executable
add_executable(rpt3d_wrapper src/rpt3d_wrapper.cpp)
ament_target_dependencies(rpt3d_wrapper rclcpp std_msgs rpt_msgs)
target_include_directories(rpt3d_wrapper PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
$<INSTALL_INTERFACE:include>)
target_link_libraries(rpt3d_wrapper
rapid_pose_triangulation
)
install(TARGETS rpt3d_wrapper
DESTINATION lib/${PROJECT_NAME})
if(BUILD_TESTING)
find_package(ament_lint_auto REQUIRED)
# the following line skips the linter which checks for copyrights
# uncomment the line when a copyright and license is not present in all source files
#set(ament_cmake_copyright_FOUND TRUE)
# the following line skips cpplint (only works in a git repo)
# uncomment the line when this package is not in a git repo
#set(ament_cmake_cpplint_FOUND TRUE)
ament_lint_auto_find_test_dependencies()
endif()
ament_package()
-24
View File
@@ -1,24 +0,0 @@
<?xml version="1.0"?>
<?xml-model href="http://download.ros.org/schema/package_format3.xsd" schematypens="http://www.w3.org/2001/XMLSchema"?>
<package format="3">
<name>rpt3d_wrapper_cpp</name>
<version>0.0.0</version>
<description>TODO: Package description</description>
<maintainer email="root@todo.todo">root</maintainer>
<license>TODO: License declaration</license>
<buildtool_depend>ament_cmake</buildtool_depend>
<depend>rclcpp</depend>
<depend>rpt_msgs</depend>
<depend>std_msgs</depend>
<depend>OpenCV</depend>
<test_depend>ament_lint_auto</test_depend>
<test_depend>ament_lint_common</test_depend>
<export>
<build_type>ament_cmake</build_type>
</export>
</package>
@@ -1,324 +0,0 @@
#include <atomic>
#include <chrono>
#include <iostream>
#include <mutex>
#include <string>
#include <vector>
// ROS2
#include <rclcpp/rclcpp.hpp>
#include <std_msgs/msg/string.hpp>
// JSON library
#include "/RapidPoseTriangulation/extras/include/nlohmann/json.hpp"
using json = nlohmann::json;
#include "rpt_msgs/msg/poses.hpp"
#include "/RapidPoseTriangulation/rpt/camera.hpp"
#include "/RapidPoseTriangulation/rpt/interface.hpp"
#include "/RapidPoseTriangulation/rpt/tracker.hpp"
// =================================================================================================
static const std::vector<std::string> cam_ids = {
"camera01",
"camera02",
"camera03",
"camera04",
"camera05",
"camera06",
"camera07",
"camera08",
"camera09",
"camera10",
};
static const std::string pose_in_topic = "/poses/{}";
static const std::string cam_info_topic = "/{}/calibration";
static const std::string pose_out_topic = "/poses/humans3d";
static const float min_match_score = 0.94;
static const size_t min_group_size = 4;
static const bool use_tracking = true;
static const float max_movement_speed = 2.0 * 1.5;
static const float cam_fps = 50;
static const float max_track_distance = 0.3 + max_movement_speed / cam_fps;
static const std::array<std::array<float, 3>, 2> roomparams = {{
{4.0, 5.0, 2.2},
{1.0, 0.0, 1.1},
}};
// =================================================================================================
// =================================================================================================
class Rpt3DWrapperNode : public rclcpp::Node
{
public:
Rpt3DWrapperNode()
: Node("rpt3d_wrapper")
{
this->all_cameras.resize(cam_ids.size());
this->all_poses.resize(cam_ids.size());
this->all_timestamps.resize(cam_ids.size());
this->joint_names = {};
this->all_poses_set.resize(cam_ids.size(), false);
// Load 3D models
tri_model = std::make_unique<Triangulator>(
min_match_score, min_group_size);
pose_tracker = std::make_unique<PoseTracker>(
max_movement_speed, max_track_distance);
// QoS
rclcpp::QoS qos_profile(1);
qos_profile.reliable();
qos_profile.keep_last(1);
// Parallel executable callbacks
auto my_callback_group = create_callback_group(rclcpp::CallbackGroupType::Reentrant);
rclcpp::SubscriptionOptions options;
options.callback_group = my_callback_group;
// Setup subscribers
for (size_t i = 0; i < cam_ids.size(); i++)
{
std::string cam_id = cam_ids[i];
std::string topic_pose = pose_in_topic;
std::string topic_cam = cam_info_topic;
topic_pose.replace(topic_pose.find("{}"), 2, cam_id);
topic_cam.replace(topic_cam.find("{}"), 2, cam_id);
auto sub_pose = this->create_subscription<rpt_msgs::msg::Poses>(
topic_pose, qos_profile,
[this, i](const rpt_msgs::msg::Poses::SharedPtr msg)
{
this->callback_poses(msg, i);
},
options);
sub_pose_list_.push_back(sub_pose);
auto sub_cam = this->create_subscription<std_msgs::msg::String>(
topic_cam, qos_profile,
[this, i](const std_msgs::msg::String::SharedPtr msg)
{
this->callback_cam_info(msg, i);
},
options);
sub_cam_list_.push_back(sub_cam);
}
// Setup publisher
pose_pub_ = this->create_publisher<rpt_msgs::msg::Poses>(pose_out_topic, qos_profile);
RCLCPP_INFO(this->get_logger(), "Finished initialization of pose triangulator.");
}
private:
std::vector<rclcpp::Subscription<rpt_msgs::msg::Poses>::SharedPtr> sub_pose_list_;
std::vector<rclcpp::Subscription<std_msgs::msg::String>::SharedPtr> sub_cam_list_;
rclcpp::Publisher<rpt_msgs::msg::Poses>::SharedPtr pose_pub_;
std::unique_ptr<Triangulator> tri_model;
std::unique_ptr<PoseTracker> pose_tracker;
std::vector<Camera> all_cameras;
std::mutex cams_mutex, pose_mutex, model_mutex;
std::vector<std::vector<std::vector<std::array<float, 3>>>> all_poses;
std::vector<double> all_timestamps;
std::vector<std::string> joint_names;
std::vector<bool> all_poses_set;
void callback_poses(const rpt_msgs::msg::Poses::SharedPtr msg, size_t cam_idx);
void callback_cam_info(const std_msgs::msg::String::SharedPtr msg, size_t cam_idx);
void call_model();
};
// =================================================================================================
void Rpt3DWrapperNode::callback_cam_info(const std_msgs::msg::String::SharedPtr msg, size_t cam_idx)
{
json cam = json::parse(msg->data);
Camera camera;
camera.name = cam["name"].get<std::string>();
camera.K = cam["K"].get<std::array<std::array<float, 3>, 3>>();
camera.DC = cam["DC"].get<std::vector<float>>();
camera.R = cam["R"].get<std::array<std::array<float, 3>, 3>>();
camera.T = cam["T"].get<std::array<std::array<float, 1>, 3>>();
camera.width = cam["width"].get<int>();
camera.height = cam["height"].get<int>();
camera.type = cam["type"].get<std::string>();
cams_mutex.lock();
all_cameras[cam_idx] = camera;
cams_mutex.unlock();
}
// =================================================================================================
void Rpt3DWrapperNode::callback_poses(const rpt_msgs::msg::Poses::SharedPtr msg, size_t cam_idx)
{
std::vector<std::vector<std::array<float, 3>>> poses;
if (this->joint_names.empty())
{
this->joint_names = msg->joint_names;
}
// Unflatten poses
size_t idx = 0;
std::vector<int32_t> &bshape = msg->bodies_shape;
for (int32_t i = 0; i < bshape[0]; ++i)
{
std::vector<std::array<float, 3>> body;
for (int32_t j = 0; j < bshape[1]; ++j)
{
std::array<float, 3> joint;
for (int32_t k = 0; k < bshape[2]; ++k)
{
joint[k] = msg->bodies_flat[idx];
++idx;
}
body.push_back(std::move(joint));
}
poses.push_back(std::move(body));
}
// If no pose was detected, create an empty placeholder
if (poses.size() == 0)
{
std::vector<std::array<float, 3>> body(joint_names.size(), {0, 0, 0});
poses.push_back(std::move(body));
}
pose_mutex.lock();
this->all_poses[cam_idx] = std::move(poses);
this->all_poses_set[cam_idx] = true;
this->all_timestamps[cam_idx] = msg->header.stamp.sec + msg->header.stamp.nanosec / 1.0e9;
pose_mutex.unlock();
// Trigger model callback
model_mutex.lock();
call_model();
model_mutex.unlock();
}
// =================================================================================================
void Rpt3DWrapperNode::call_model()
{
auto ts_msg = std::chrono::high_resolution_clock::now();
// Check if all cameras are set
cams_mutex.lock();
for (size_t i = 0; i < cam_ids.size(); i++)
{
if (all_cameras[i].name.empty())
{
RCLCPP_WARN(this->get_logger(), "Skipping frame, still waiting for cameras...");
cams_mutex.unlock();
return;
}
}
cams_mutex.unlock();
// If not all poses are set, return and wait for the others
pose_mutex.lock();
for (size_t i = 0; i < cam_ids.size(); i++)
{
if (!this->all_poses_set[i])
{
pose_mutex.unlock();
return;
}
}
pose_mutex.unlock();
// Call model, and meanwhile lock updates of the inputs
// Since the prediction is very fast, parallel callback threads only need to wait a short time
cams_mutex.lock();
pose_mutex.lock();
const auto poses_3d = tri_model->triangulate_poses(
all_poses, all_cameras, roomparams, joint_names);
double min_ts = *std::min_element(all_timestamps.begin(), all_timestamps.end());
this->all_poses_set = std::vector<bool>(cam_ids.size(), false);
std::vector<std::vector<std::array<float, 4>>> valid_poses;
std::vector<size_t> track_ids;
if (use_tracking)
{
auto pose_tracks = pose_tracker->track_poses(poses_3d, joint_names, min_ts);
std::vector<std::vector<std::array<float, 4>>> poses_3d_refined;
for (size_t j = 0; j < pose_tracks.size(); j++)
{
auto &pose = std::get<1>(pose_tracks[j]);
poses_3d_refined.push_back(pose);
auto &track_id = std::get<0>(pose_tracks[j]);
track_ids.push_back(track_id);
}
valid_poses = std::move(poses_3d_refined);
}
else
{
valid_poses = std::move(poses_3d);
track_ids = {};
}
pose_mutex.unlock();
cams_mutex.unlock();
// Calculate timings
auto ts_pose = std::chrono::high_resolution_clock::now();
double ts_pose_sec = std::chrono::duration<double>(ts_pose.time_since_epoch()).count();
double z_trigger_pose3d = ts_pose_sec - min_ts;
json jdata;
jdata["timestamps"] = {
{"trigger", min_ts},
{"pose3d", ts_pose_sec},
{"z-trigger-pose3d", z_trigger_pose3d}};
// Publish message
auto pose_msg = rpt_msgs::msg::Poses();
pose_msg.header.stamp.sec = static_cast<int>(min_ts);
pose_msg.header.stamp.nanosec = (min_ts - pose_msg.header.stamp.sec) * 1.0e9;
pose_msg.header.frame_id = "world";
std::vector<int32_t> pshape = {(int)valid_poses.size(), (int)joint_names.size(), 4};
pose_msg.bodies_shape = pshape;
pose_msg.bodies_flat.reserve(pshape[0] * pshape[1] * pshape[2]);
for (int32_t i = 0; i < pshape[0]; i++)
{
for (int32_t j = 0; j < pshape[1]; j++)
{
for (int32_t k = 0; k < pshape[2]; k++)
{
pose_msg.bodies_flat.push_back(valid_poses[i][j][k]);
}
}
}
pose_msg.joint_names = joint_names;
jdata["track_ids"] = track_ids;
pose_msg.extra_data = jdata.dump();
pose_pub_->publish(pose_msg);
// Print info
double elapsed_time = std::chrono::duration<double>(
std::chrono::high_resolution_clock::now() - ts_msg)
.count();
std::cout << "Detected persons: " << valid_poses.size()
<< " - Prediction time: " << elapsed_time << "s" << std::endl;
}
// =================================================================================================
// =================================================================================================
int main(int argc, char **argv)
{
rclcpp::init(argc, argv);
auto node = std::make_shared<Rpt3DWrapperNode>();
rclcpp::executors::MultiThreadedExecutor exec;
exec.add_node(node);
exec.spin();
rclcpp::shutdown();
return 0;
}
-33
View File
@@ -1,33 +0,0 @@
cmake_minimum_required(VERSION 3.8)
project(rpt_msgs)
if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
add_compile_options(-Wall -Wextra -Wpedantic)
endif()
# find dependencies
find_package(ament_cmake REQUIRED)
find_package(std_msgs REQUIRED)
find_package(rosidl_default_generators REQUIRED)
# uncomment the following section in order to fill in
# further dependencies manually.
# find_package(<dependency> REQUIRED)
rosidl_generate_interfaces(${PROJECT_NAME}
"msg/Poses.msg"
DEPENDENCIES std_msgs
)
if(BUILD_TESTING)
find_package(ament_lint_auto REQUIRED)
# the following line skips the linter which checks for copyrights
# comment the line when a copyright and license is added to all source files
set(ament_cmake_copyright_FOUND TRUE)
# the following line skips cpplint (only works in a git repo)
# comment the line when this package is in a git repo and when
# a copyright and license is added to all source files
set(ament_cmake_cpplint_FOUND TRUE)
ament_lint_auto_find_test_dependencies()
endif()
ament_package()
-7
View File
@@ -1,7 +0,0 @@
std_msgs/Header header
float32[] bodies_flat
int32[] bodies_shape
string[] joint_names
string extra_data
-23
View File
@@ -1,23 +0,0 @@
<?xml version="1.0"?>
<?xml-model href="http://download.ros.org/schema/package_format3.xsd" schematypens="http://www.w3.org/2001/XMLSchema"?>
<package format="3">
<name>rpt_msgs</name>
<version>0.0.0</version>
<description>TODO: Package description</description>
<maintainer email="root@todo.todo">root</maintainer>
<license>TODO: License declaration</license>
<depend>std_msgs</depend>
<buildtool_depend>rosidl_default_generators</buildtool_depend>
<exec_depend>rosidl_default_runtime</exec_depend>
<member_of_group>rosidl_interface_packages</member_of_group>
<buildtool_depend>ament_cmake</buildtool_depend>
<test_depend>ament_lint_auto</test_depend>
<test_depend>ament_lint_common</test_depend>
<export>
<build_type>ament_cmake</build_type>
</export>
</package>
+23 -1
View File
@@ -1,3 +1,25 @@
[build-system]
requires = [
"nanobind>=2.11",
"scikit-build-core>=0.10",
]
build-backend = "scikit_build_core.build"
[project]
name = "rapid-pose-triangulation"
version = "0.1.0"
description = "Rapid Pose Triangulation library with nanobind Python bindings"
readme = "README.md"
requires-python = ">=3.10"
dependencies = ["numpy>=2.0"]
[dependency-groups]
dev = ["pytest>=8.3"]
[tool.scikit-build]
minimum-version = "build-system.requires"
wheel.packages = ["src/rpt"]
[tool.isort]
profile = "black"
known_first_party = "skelda,draw_utils,test_triangulate,utils_2d_pose,triangulate_poses"
known_first_party = "rpt"
+24
View File
@@ -0,0 +1,24 @@
# RPT Core Library
set(RPT_SOURCES
camera.cpp
interface.cpp
triangulator.cpp
)
add_library(rpt_core STATIC ${RPT_SOURCES})
target_include_directories(rpt_core PUBLIC
${CMAKE_CURRENT_SOURCE_DIR}
)
target_compile_options(rpt_core PRIVATE
-fPIC
-march=native
-Wall
)
# Release mode optimizations
target_compile_options(rpt_core PRIVATE
$<$<CONFIG:Release>:-O3;-flto=auto>
)
+116 -78
View File
@@ -2,10 +2,60 @@
#include <cmath>
#include <iomanip>
#include <sstream>
#include <stdexcept>
#include <utility>
#include <vector>
#include "camera.hpp"
namespace
{
std::array<std::array<float, 3>, 3> transpose3x3(const std::array<std::array<float, 3>, 3> &M)
{
return {{{M[0][0], M[1][0], M[2][0]},
{M[0][1], M[1][1], M[2][1]},
{M[0][2], M[1][2], M[2][2]}}};
}
std::array<std::array<float, 3>, 3> invert3x3(const std::array<std::array<float, 3>, 3> &M)
{
std::array<std::array<float, 3>, 3> adj = {
{{
M[1][1] * M[2][2] - M[1][2] * M[2][1],
M[0][2] * M[2][1] - M[0][1] * M[2][2],
M[0][1] * M[1][2] - M[0][2] * M[1][1],
},
{
M[1][2] * M[2][0] - M[1][0] * M[2][2],
M[0][0] * M[2][2] - M[0][2] * M[2][0],
M[0][2] * M[1][0] - M[0][0] * M[1][2],
},
{
M[1][0] * M[2][1] - M[1][1] * M[2][0],
M[0][1] * M[2][0] - M[0][0] * M[2][1],
M[0][0] * M[1][1] - M[0][1] * M[1][0],
}}};
float det = M[0][0] * adj[0][0] + M[0][1] * adj[1][0] + M[0][2] * adj[2][0];
if (std::fabs(det) < 1e-6f)
{
return {{{0.0, 0.0, 0.0}, {0.0, 0.0, 0.0}, {0.0, 0.0, 0.0}}};
}
float idet = 1.0f / det;
return {{
{{adj[0][0] * idet, adj[0][1] * idet, adj[0][2] * idet}},
{{adj[1][0] * idet, adj[1][1] * idet, adj[1][2] * idet}},
{{adj[2][0] * idet, adj[2][1] * idet, adj[2][2] * idet}},
}};
}
std::array<std::array<float, 3>, 3> calc_optimal_camera_matrix_fisheye(
const Camera &cam, float balance, std::pair<int, int> new_size);
std::array<std::array<float, 3>, 3> calc_optimal_camera_matrix_pinhole(
const Camera &cam, float alpha, std::pair<int, int> new_size);
} // namespace
// =================================================================================================
// =================================================================================================
@@ -62,7 +112,7 @@ std::string Camera::to_string() const
out << "'width': " << width << ", ";
out << "'height': " << height << ", ";
out << "'type': " << type;
out << "'model': '" << camera_model_name(model) << "'";
out << "}";
return out.str();
@@ -70,6 +120,33 @@ std::string Camera::to_string() const
// =================================================================================================
const char *camera_model_name(CameraModel model)
{
switch (model)
{
case CameraModel::Pinhole:
return "pinhole";
case CameraModel::Fisheye:
return "fisheye";
}
return "unknown";
}
CameraModel parse_camera_model(const std::string &value)
{
if (value == "pinhole")
{
return CameraModel::Pinhole;
}
if (value == "fisheye")
{
return CameraModel::Fisheye;
}
throw std::invalid_argument("Unsupported camera model: " + value);
}
// =================================================================================================
std::ostream &operator<<(std::ostream &out, const Camera &cam)
{
out << cam.to_string();
@@ -79,93 +156,51 @@ std::ostream &operator<<(std::ostream &out, const Camera &cam)
// =================================================================================================
// =================================================================================================
CameraInternal::CameraInternal(const Camera &cam)
Camera make_camera(
std::string name,
std::array<std::array<float, 3>, 3> K,
std::array<float, 5> DC,
std::array<std::array<float, 3>, 3> R,
std::array<std::array<float, 1>, 3> T,
int width,
int height,
CameraModel model)
{
this->cam = cam;
this->invR = transpose3x3(cam.R);
Camera cam {
.name = std::move(name),
.K = K,
.DC = DC,
.R = R,
.T = T,
.width = width,
.height = height,
.model = model,
};
cam.invR = transpose3x3(cam.R);
// Camera center:
// C = -(Rᵀ * t) = -(Rᵀ * (R * (T * -1))) = -(Rᵀ * (R * -T)) = -(Rᵀ * -R * T) = -(-T) = T
this->center = {cam.T[0][0], cam.T[1][0], cam.T[2][0]};
cam.center = {cam.T[0][0], cam.T[1][0], cam.T[2][0]};
// Undistort camera matrix
// As with the undistortion, the own implementation avoids some overhead compared to OpenCV
if (cam.type == "fisheye")
if (cam.model == CameraModel::Fisheye)
{
newK = calc_optimal_camera_matrix_fisheye(1.0, {cam.width, cam.height});
cam.newK = calc_optimal_camera_matrix_fisheye(cam, 1.0f, {cam.width, cam.height});
}
else
{
newK = calc_optimal_camera_matrix_pinhole(1.0, {cam.width, cam.height});
cam.newK = calc_optimal_camera_matrix_pinhole(cam, 1.0f, {cam.width, cam.height});
}
this->invK = invert3x3(newK);
cam.invK = invert3x3(cam.newK);
return cam;
}
// =================================================================================================
std::array<std::array<float, 3>, 3> CameraInternal::transpose3x3(
const std::array<std::array<float, 3>, 3> &M)
{
return {{{M[0][0], M[1][0], M[2][0]},
{M[0][1], M[1][1], M[2][1]},
{M[0][2], M[1][2], M[2][2]}}};
}
// =================================================================================================
std::array<std::array<float, 3>, 3> CameraInternal::invert3x3(
const std::array<std::array<float, 3>, 3> &M)
{
// Compute the inverse using the adjugate method
// See: https://scicomp.stackexchange.com/a/29206
std::array<std::array<float, 3>, 3> adj = {
{{
M[1][1] * M[2][2] - M[1][2] * M[2][1],
M[0][2] * M[2][1] - M[0][1] * M[2][2],
M[0][1] * M[1][2] - M[0][2] * M[1][1],
},
{
M[1][2] * M[2][0] - M[1][0] * M[2][2],
M[0][0] * M[2][2] - M[0][2] * M[2][0],
M[0][2] * M[1][0] - M[0][0] * M[1][2],
},
{
M[1][0] * M[2][1] - M[1][1] * M[2][0],
M[0][1] * M[2][0] - M[0][0] * M[2][1],
M[0][0] * M[1][1] - M[0][1] * M[1][0],
}}};
float det = M[0][0] * adj[0][0] + M[0][1] * adj[1][0] + M[0][2] * adj[2][0];
if (std::fabs(det) < 1e-6f)
{
return {{{0.0, 0.0, 0.0}, {0.0, 0.0, 0.0}, {0.0, 0.0, 0.0}}};
}
float idet = 1.0f / det;
std::array<std::array<float, 3>, 3> inv = {
{{
adj[0][0] * idet,
adj[0][1] * idet,
adj[0][2] * idet,
},
{
adj[1][0] * idet,
adj[1][1] * idet,
adj[1][2] * idet,
},
{
adj[2][0] * idet,
adj[2][1] * idet,
adj[2][2] * idet,
}}};
return inv;
}
// =================================================================================================
void CameraInternal::undistort_point_pinhole(std::array<float, 3> &p, const std::vector<float> &k)
void undistort_point_pinhole(std::array<float, 3> &p, const std::array<float, 5> &k)
{
// Following: cv::cvUndistortPointsInternal
// Uses only the distortion coefficients: [k1, k2, p1, p2, k3]
@@ -201,14 +236,14 @@ void CameraInternal::undistort_point_pinhole(std::array<float, 3> &p, const std:
// =================================================================================================
void CameraInternal::undistort_point_fisheye(std::array<float, 3> &p, const std::vector<float> &k)
void undistort_point_fisheye(std::array<float, 3> &p, const std::array<float, 5> &k)
{
// Following: cv::fisheye::undistortPoints
// Uses only the distortion coefficients: [k1, k2, k3, k4]
// https://github.com/opencv/opencv/blob/4.x/modules/calib3d/src/fisheye.cpp#L429
float theta_d = std::sqrt(p[0] * p[0] + p[1] * p[1]);
float pi_half = std::numbers::pi * 0.5;
float pi_half = M_PI * 0.5;
theta_d = std::min(std::max(-pi_half, theta_d), pi_half);
if (theta_d < 1e-6)
@@ -249,8 +284,10 @@ void CameraInternal::undistort_point_fisheye(std::array<float, 3> &p, const std:
// =================================================================================================
std::array<std::array<float, 3>, 3> CameraInternal::calc_optimal_camera_matrix_fisheye(
float balance, std::pair<int, int> new_size)
namespace
{
std::array<std::array<float, 3>, 3> calc_optimal_camera_matrix_fisheye(
const Camera &cam, float balance, std::pair<int, int> new_size)
{
// Following: cv::fisheye::estimateNewCameraMatrixForUndistortRectify
// https://github.com/opencv/opencv/blob/4.x/modules/calib3d/src/fisheye.cpp#L630
@@ -354,8 +391,8 @@ std::array<std::array<float, 3>, 3> CameraInternal::calc_optimal_camera_matrix_f
// =================================================================================================
std::array<std::array<float, 3>, 3> CameraInternal::calc_optimal_camera_matrix_pinhole(
float alpha, std::pair<int, int> new_size)
std::array<std::array<float, 3>, 3> calc_optimal_camera_matrix_pinhole(
const Camera &cam, float alpha, std::pair<int, int> new_size)
{
// Following: cv::getOptimalNewCameraMatrix
// https://github.com/opencv/opencv/blob/4.x/modules/calib3d/src/calibration_base.cpp#L1565
@@ -478,3 +515,4 @@ std::array<std::array<float, 3>, 3> CameraInternal::calc_optimal_camera_matrix_p
{0.0, 0.0, 1.0}}};
return newK;
}
} // namespace
+30 -31
View File
@@ -7,46 +7,45 @@
// =================================================================================================
enum class CameraModel
{
Pinhole,
Fisheye,
};
const char *camera_model_name(CameraModel model);
CameraModel parse_camera_model(const std::string &value);
// =================================================================================================
struct Camera
{
std::string name;
std::array<std::array<float, 3>, 3> K;
std::vector<float> DC;
std::array<float, 5> DC = {0.0f, 0.0f, 0.0f, 0.0f, 0.0f};
std::array<std::array<float, 3>, 3> R;
std::array<std::array<float, 1>, 3> T;
int width;
int height;
std::string type;
int width = 0;
int height = 0;
CameraModel model = CameraModel::Pinhole;
std::array<std::array<float, 3>, 3> invR {};
std::array<float, 3> center {};
std::array<std::array<float, 3>, 3> newK {};
std::array<std::array<float, 3>, 3> invK {};
friend std::ostream &operator<<(std::ostream &out, Camera const &camera);
std::string to_string() const;
};
// =================================================================================================
Camera make_camera(
std::string name,
std::array<std::array<float, 3>, 3> K,
std::array<float, 5> DC,
std::array<std::array<float, 3>, 3> R,
std::array<std::array<float, 1>, 3> T,
int width,
int height,
CameraModel model);
class CameraInternal
{
public:
CameraInternal(const Camera &cam);
Camera cam;
std::array<std::array<float, 3>, 3> invR;
std::array<float, 3> center;
std::array<std::array<float, 3>, 3> newK;
std::array<std::array<float, 3>, 3> invK;
static std::array<std::array<float, 3>, 3> transpose3x3(
const std::array<std::array<float, 3>, 3> &M);
static std::array<std::array<float, 3>, 3> invert3x3(
const std::array<std::array<float, 3>, 3> &M);
static void undistort_point_pinhole(std::array<float, 3> &p, const std::vector<float> &k);
static void undistort_point_fisheye(std::array<float, 3> &p, const std::vector<float> &k);
std::array<std::array<float, 3>, 3> calc_optimal_camera_matrix_fisheye(
float balance, std::pair<int, int> new_size);
std::array<std::array<float, 3>, 3> calc_optimal_camera_matrix_pinhole(
float alpha, std::pair<int, int> new_size);
};
void undistort_point_pinhole(std::array<float, 3> &point, const std::array<float, 5> &distortion);
void undistort_point_fisheye(std::array<float, 3> &point, const std::array<float, 5> &distortion);
+156 -17
View File
@@ -1,35 +1,174 @@
#include "triangulator.hpp"
#include <algorithm>
#include <stdexcept>
#include <utility>
#include "interface.hpp"
// =================================================================================================
namespace
{
size_t pose2d_offset(
size_t view,
size_t person,
size_t joint,
size_t coord,
size_t max_persons,
size_t num_joints)
{
return ((((view * max_persons) + person) * num_joints) + joint) * 3 + coord;
}
size_t pose3d_offset(size_t person, size_t joint, size_t coord, size_t num_joints)
{
return (((person * num_joints) + joint) * 4) + coord;
}
} // namespace
// =================================================================================================
// =================================================================================================
Triangulator::Triangulator(float min_match_score, size_t min_group_size)
float &PoseBatch2D::at(size_t view, size_t person, size_t joint, size_t coord)
{
this->triangulator = new TriangulatorInternal(min_match_score, min_group_size);
return data[pose2d_offset(view, person, joint, coord, max_persons, num_joints)];
}
const float &PoseBatch2DView::at(size_t view, size_t person, size_t joint, size_t coord) const
{
return data[pose2d_offset(view, person, joint, coord, max_persons, num_joints)];
}
const float &PoseBatch3DView::at(size_t person, size_t joint, size_t coord) const
{
return data[pose3d_offset(person, joint, coord, num_joints)];
}
int64_t TrackedPoseBatch3DView::track_id(size_t person) const
{
return track_ids[person];
}
const float &TrackedPoseBatch3DView::at(size_t person, size_t joint, size_t coord) const
{
return data[pose3d_offset(person, joint, coord, num_joints)];
}
const float &PoseBatch2D::at(size_t view, size_t person, size_t joint, size_t coord) const
{
return data[pose2d_offset(view, person, joint, coord, max_persons, num_joints)];
}
PoseBatch2DView PoseBatch2D::view() const
{
return PoseBatch2DView {data.data(), person_counts.data(), num_views, max_persons, num_joints};
}
PoseBatch2D PoseBatch2D::from_nested(const RaggedPoses2D &poses_2d)
{
PoseBatch2D batch;
batch.num_views = poses_2d.size();
for (const auto &view : poses_2d)
{
batch.max_persons = std::max(batch.max_persons, view.size());
if (!view.empty())
{
if (batch.num_joints == 0)
{
batch.num_joints = view[0].size();
}
else if (batch.num_joints != view[0].size())
{
throw std::invalid_argument("All views must use the same joint count.");
}
for (const auto &person : view)
{
if (person.size() != batch.num_joints)
{
throw std::invalid_argument("All persons must use the same joint count.");
}
}
}
}
batch.person_counts.resize(batch.num_views);
batch.data.assign(batch.num_views * batch.max_persons * batch.num_joints * 3, 0.0f);
for (size_t view = 0; view < batch.num_views; ++view)
{
batch.person_counts[view] = static_cast<uint32_t>(poses_2d[view].size());
for (size_t person = 0; person < poses_2d[view].size(); ++person)
{
for (size_t joint = 0; joint < batch.num_joints; ++joint)
{
for (size_t coord = 0; coord < 3; ++coord)
{
batch.at(view, person, joint, coord) = poses_2d[view][person][joint][coord];
}
}
}
}
return batch;
}
// =================================================================================================
std::vector<std::vector<std::array<float, 4>>> Triangulator::triangulate_poses(
const std::vector<std::vector<std::vector<std::array<float, 3>>>> &poses_2d,
const std::vector<Camera> &cameras,
const std::array<std::array<float, 3>, 2> &roomparams,
const std::vector<std::string> &joint_names)
float &PoseBatch3D::at(size_t person, size_t joint, size_t coord)
{
return this->triangulator->triangulate_poses(poses_2d, cameras, roomparams, joint_names);
return data[pose3d_offset(person, joint, coord, num_joints)];
}
// =================================================================================================
void Triangulator::reset()
const float &PoseBatch3D::at(size_t person, size_t joint, size_t coord) const
{
this->triangulator->reset();
return data[pose3d_offset(person, joint, coord, num_joints)];
}
// =================================================================================================
void Triangulator::print_stats()
PoseBatch3DView PoseBatch3D::view() const
{
this->triangulator->print_stats();
return PoseBatch3DView {data.data(), num_persons, num_joints};
}
NestedPoses3D PoseBatch3D::to_nested() const
{
NestedPoses3D poses_3d(num_persons);
for (size_t person = 0; person < num_persons; ++person)
{
poses_3d[person].resize(num_joints);
for (size_t joint = 0; joint < num_joints; ++joint)
{
for (size_t coord = 0; coord < 4; ++coord)
{
poses_3d[person][joint][coord] = at(person, joint, coord);
}
}
}
return poses_3d;
}
PoseBatch3D PoseBatch3D::from_nested(const NestedPoses3D &poses_3d)
{
PoseBatch3D batch;
batch.num_persons = poses_3d.size();
if (!poses_3d.empty())
{
batch.num_joints = poses_3d[0].size();
}
batch.data.resize(batch.num_persons * batch.num_joints * 4);
for (size_t person = 0; person < batch.num_persons; ++person)
{
if (poses_3d[person].size() != batch.num_joints)
{
throw std::invalid_argument("All 3D poses must use the same joint count.");
}
for (size_t joint = 0; joint < batch.num_joints; ++joint)
{
for (size_t coord = 0; coord < 4; ++coord)
{
batch.at(person, joint, coord) = poses_3d[person][joint][coord];
}
}
}
return batch;
}
+259 -39
View File
@@ -1,5 +1,7 @@
#pragma once
#include <array>
#include <cstdint>
#include <string>
#include <vector>
@@ -7,48 +9,266 @@
// =================================================================================================
// Forward declaration of the class, that swig does not try to parse all its dependencies.
class TriangulatorInternal;
using RaggedPoses2D = std::vector<std::vector<std::vector<std::array<float, 3>>>>;
using NestedPoses3D = std::vector<std::vector<std::array<float, 4>>>;
// =================================================================================================
class Triangulator
struct PoseBatch2DView
{
public:
/**
* Triangulator to predict poses from multiple views.
*
*
* @param min_match_score Minimum score to consider a triangulated joint as valid.
* @param min_group_size Minimum number of camera pairs that need to see a person.
*/
Triangulator(
float min_match_score = 0.95,
size_t min_group_size = 1);
const float *data = nullptr;
const uint32_t *person_counts = nullptr;
size_t num_views = 0;
size_t max_persons = 0;
size_t num_joints = 0;
/**
* Calculate a triangulation.
*
*
* @param poses_2d List of shape [views, persons, joints, 3], containing the 2D poses.
* @param cameras List of cameras.
* @param roomparams Room parameters (room size, room center).
* @param joint_names List of 2D joint names.
*
* @return List of shape [persons, joints, 4], containing the 3D poses.
*/
std::vector<std::vector<std::array<float, 4>>> triangulate_poses(
const std::vector<std::vector<std::vector<std::array<float, 3>>>> &poses_2d,
const std::vector<Camera> &cameras,
const std::array<std::array<float, 3>, 2> &roomparams,
const std::vector<std::string> &joint_names);
/** Reset the triangulator. */
void reset();
/** Print triangulation statistics. */
void print_stats();
private:
TriangulatorInternal *triangulator;
const float &at(size_t view, size_t person, size_t joint, size_t coord) const;
};
struct PoseBatch3DView
{
const float *data = nullptr;
size_t num_persons = 0;
size_t num_joints = 0;
const float &at(size_t person, size_t joint, size_t coord) const;
};
struct TrackedPoseBatch3DView
{
const int64_t *track_ids = nullptr;
const float *data = nullptr;
size_t num_persons = 0;
size_t num_joints = 0;
int64_t track_id(size_t person) const;
const float &at(size_t person, size_t joint, size_t coord) const;
};
struct PoseBatch2D
{
std::vector<float> data;
std::vector<uint32_t> person_counts;
size_t num_views = 0;
size_t max_persons = 0;
size_t num_joints = 0;
float &at(size_t view, size_t person, size_t joint, size_t coord);
const float &at(size_t view, size_t person, size_t joint, size_t coord) const;
PoseBatch2DView view() const;
static PoseBatch2D from_nested(const RaggedPoses2D &poses_2d);
};
struct PoseBatch3D
{
std::vector<float> data;
size_t num_persons = 0;
size_t num_joints = 0;
float &at(size_t person, size_t joint, size_t coord);
const float &at(size_t person, size_t joint, size_t coord) const;
PoseBatch3DView view() const;
NestedPoses3D to_nested() const;
static PoseBatch3D from_nested(const NestedPoses3D &poses_3d);
};
// =================================================================================================
struct PairCandidate
{
int view1 = -1;
int view2 = -1;
int person1 = -1;
int person2 = -1;
int global_person1 = -1;
int global_person2 = -1;
};
struct PreviousPoseMatch
{
int previous_pose_index = -1;
int64_t previous_track_id = -1;
float score_view1 = 0.0f;
float score_view2 = 0.0f;
bool matched_view1 = false;
bool matched_view2 = false;
bool kept = true;
std::string decision = "unfiltered";
};
struct PreviousPoseFilterDebug
{
bool used_previous_poses = false;
std::vector<PreviousPoseMatch> matches;
std::vector<int> kept_pair_indices;
std::vector<PairCandidate> kept_pairs;
};
struct CoreProposalDebug
{
int pair_index = -1;
PairCandidate pair;
std::vector<std::array<float, 4>> pose_3d;
float score = 0.0f;
bool kept = false;
std::string drop_reason = "uninitialized";
};
struct ProposalGroupDebug
{
std::array<float, 3> center = {0.0f, 0.0f, 0.0f};
std::vector<std::array<float, 4>> pose_3d;
std::vector<int> proposal_indices;
};
struct GroupingDebug
{
std::vector<ProposalGroupDebug> initial_groups;
std::vector<int> duplicate_pair_drops;
std::vector<ProposalGroupDebug> groups;
};
struct FullProposalDebug
{
int source_core_proposal_index = -1;
PairCandidate pair;
std::vector<std::array<float, 4>> pose_3d;
};
struct MergeDebug
{
std::vector<std::vector<std::array<float, 4>>> merged_poses;
std::vector<std::vector<int>> group_proposal_indices;
};
enum class AssociationStatus
{
Matched,
New,
Ambiguous,
};
struct AssociationReport
{
std::vector<int> pose_previous_indices;
std::vector<int64_t> pose_previous_track_ids;
std::vector<AssociationStatus> pose_status;
std::vector<std::vector<int>> pose_candidate_previous_indices;
std::vector<std::vector<int64_t>> pose_candidate_previous_track_ids;
std::vector<int> unmatched_previous_indices;
std::vector<int64_t> unmatched_previous_track_ids;
std::vector<int> new_pose_indices;
std::vector<int> ambiguous_pose_indices;
};
struct FinalPoseAssociationDebug
{
int final_pose_index = -1;
std::vector<int> source_core_proposal_indices;
std::vector<int> source_pair_indices;
std::vector<int> candidate_previous_indices;
std::vector<int64_t> candidate_previous_track_ids;
int resolved_previous_index = -1;
int64_t resolved_previous_track_id = -1;
AssociationStatus status = AssociationStatus::New;
};
struct TriangulationTrace
{
std::vector<PairCandidate> pairs;
PreviousPoseFilterDebug previous_filter;
std::vector<CoreProposalDebug> core_proposals;
GroupingDebug grouping;
std::vector<FullProposalDebug> full_proposals;
MergeDebug merge;
AssociationReport association;
std::vector<FinalPoseAssociationDebug> final_pose_associations;
PoseBatch3D final_poses;
};
struct TriangulationResult
{
PoseBatch3D poses;
AssociationReport association;
};
// =================================================================================================
struct TriangulationOptions
{
float min_match_score = 0.95f;
size_t min_group_size = 1;
};
struct TriangulationConfig
{
std::vector<Camera> cameras;
std::array<std::array<float, 3>, 2> roomparams {{{0.0f, 0.0f, 0.0f}, {0.0f, 0.0f, 0.0f}}};
std::vector<std::string> joint_names;
TriangulationOptions options;
};
// =================================================================================================
std::vector<PairCandidate> build_pair_candidates(const PoseBatch2DView &poses_2d);
PreviousPoseFilterDebug filter_pairs_with_previous_poses(
const PoseBatch2DView &poses_2d,
const TriangulationConfig &config,
const TrackedPoseBatch3DView &previous_poses_3d,
const TriangulationOptions *options_override = nullptr);
TriangulationTrace triangulate_debug(
const PoseBatch2DView &poses_2d,
const TriangulationConfig &config,
const TrackedPoseBatch3DView *previous_poses_3d = nullptr,
const TriangulationOptions *options_override = nullptr);
// =================================================================================================
/**
* Calculate a triangulation using a padded pose tensor.
*
* @param poses_2d Padded poses of shape [views, max_persons, joints, 3].
* @param config Triangulation configuration (cameras, room parameters, joint names, options).
* @param options_override Optional per-call options override. Defaults to config.options.
*
* @return Pose tensor of shape [persons, joints, 4].
*/
PoseBatch3D triangulate_poses(
const PoseBatch2DView &poses_2d,
const TriangulationConfig &config,
const TriangulationOptions *options_override = nullptr);
TriangulationResult triangulate_with_report(
const PoseBatch2DView &poses_2d,
const TriangulationConfig &config,
const TrackedPoseBatch3DView &previous_poses_3d,
const TriangulationOptions *options_override = nullptr);
inline PoseBatch3D triangulate_poses(
const PoseBatch2D &poses_2d,
const TriangulationConfig &config,
const TriangulationOptions *options_override = nullptr)
{
return triangulate_poses(poses_2d.view(), config, options_override);
}
inline TriangulationTrace triangulate_debug(
const PoseBatch2D &poses_2d,
const TriangulationConfig &config,
const TriangulationOptions *options_override = nullptr)
{
return triangulate_debug(poses_2d.view(), config, nullptr, options_override);
}
inline TriangulationResult triangulate_with_report(
const PoseBatch2D &poses_2d,
const TriangulationConfig &config,
const TrackedPoseBatch3DView &previous_poses_3d,
const TriangulationOptions *options_override = nullptr)
{
return triangulate_with_report(poses_2d.view(), config, previous_poses_3d, options_override);
}
-325
View File
@@ -1,325 +0,0 @@
#pragma once
#include <array>
#include <string>
#include <vector>
#include <algorithm>
#include <limits>
#include <cmath>
#include <iostream>
// =================================================================================================
struct Track
{
std::vector<std::vector<std::array<float, 4>>> core_poses;
std::vector<std::vector<std::array<float, 4>>> full_poses;
std::vector<double> timestamps;
size_t id;
};
// =================================================================================================
class PoseTracker
{
public:
PoseTracker(float max_movement_speed, float max_distance);
std::vector<std::tuple<size_t, std::vector<std::array<float, 4>>>> track_poses(
const std::vector<std::vector<std::array<float, 4>>> &poses_3d,
const std::vector<std::string> &joint_names,
const double timestamp);
void reset();
private:
float max_distance;
float max_movement_speed;
size_t history_size = 3;
std::vector<double> timestamps;
std::vector<Track> pose_tracks;
const std::vector<std::string> core_joints = {
"shoulder_left",
"shoulder_right",
"hip_left",
"hip_right",
"elbow_left",
"elbow_right",
"knee_left",
"knee_right",
"wrist_left",
"wrist_right",
"ankle_left",
"ankle_right",
};
std::tuple<int, float> match_to_track(const std::vector<std::array<float, 4>> &core_pose_3d);
std::vector<std::array<float, 4>> refine_pose(const Track &track);
};
// =================================================================================================
// =================================================================================================
PoseTracker::PoseTracker(float max_movement_speed, float max_distance)
{
this->max_movement_speed = max_movement_speed;
this->max_distance = max_distance;
}
// =================================================================================================
void PoseTracker::reset()
{
pose_tracks.clear();
timestamps.clear();
}
// =================================================================================================
std::vector<std::tuple<size_t, std::vector<std::array<float, 4>>>> PoseTracker::track_poses(
const std::vector<std::vector<std::array<float, 4>>> &poses_3d,
const std::vector<std::string> &joint_names,
const double timestamp)
{
// Extract core joints
std::vector<size_t> core_joint_idx;
for (const auto &joint : core_joints)
{
auto it = std::find(joint_names.begin(), joint_names.end(), joint);
core_joint_idx.push_back(std::distance(joint_names.begin(), it));
}
std::vector<std::vector<std::array<float, 4>>> core_poses;
core_poses.resize(poses_3d.size());
for (size_t i = 0; i < poses_3d.size(); ++i)
{
core_poses[i].resize(core_joint_idx.size());
for (size_t j = 0; j < core_joint_idx.size(); ++j)
{
for (size_t k = 0; k < 4; ++k)
{
core_poses[i][j][k] = poses_3d[i][core_joint_idx[j]][k];
}
}
}
// Match core poses to tracks
std::vector<std::tuple<size_t, int, float>> matches;
for (size_t i = 0; i < core_poses.size(); ++i)
{
auto [track_idx, distance_sq] = match_to_track(core_poses[i]);
matches.emplace_back(i, track_idx, distance_sq);
}
std::sort(matches.begin(), matches.end(),
[](const auto &a, const auto &b)
{ return std::get<2>(a) < std::get<2>(b); });
// If track is matched multiple times, only add the best and create new tracks for the rest
std::vector<bool> used(pose_tracks.size(), false);
for (size_t i = 0; i < matches.size(); ++i)
{
auto [pose_idx, track_idx, distance_sq] = matches[i];
if (track_idx != -1 && !used[track_idx])
{
used[track_idx] = true;
}
else
{
std::get<1>(matches[i]) = -1;
}
}
// Update tracks
for (size_t i = 0; i < matches.size(); ++i)
{
auto [pose_idx, track_idx, distance_sq] = matches[i];
if (track_idx == -1)
{
// Create a new track
Track new_track;
new_track.core_poses.push_back(core_poses[pose_idx]);
new_track.full_poses.push_back(poses_3d[pose_idx]);
new_track.timestamps.push_back(timestamp);
new_track.id = pose_tracks.size();
pose_tracks.push_back(new_track);
}
else
{
// Update existing track
auto &track = pose_tracks[track_idx];
track.core_poses.push_back(core_poses[pose_idx]);
track.full_poses.push_back(poses_3d[pose_idx]);
track.timestamps.push_back(timestamp);
}
}
// Remove old track entries
timestamps.push_back(timestamp);
if (timestamps.size() > history_size)
{
timestamps.erase(timestamps.begin());
}
double max_age = timestamps.front();
for (size_t i = 0; i < pose_tracks.size();)
{
auto &track = pose_tracks[i];
for (size_t j = 0; j < track.timestamps.size();)
{
double ts = track.timestamps[j];
if (ts < max_age)
{
track.core_poses.erase(track.core_poses.begin() + j);
track.full_poses.erase(track.full_poses.begin() + j);
track.timestamps.erase(track.timestamps.begin() + j);
}
else
{
j++;
}
}
if (track.timestamps.size() == 0)
{
pose_tracks.erase(pose_tracks.begin() + i);
}
else
{
++i;
}
}
// Refine poses
std::vector<std::tuple<size_t, std::vector<std::array<float, 4>>>> tracked_poses;
for (size_t i = 0; i < pose_tracks.size(); ++i)
{
auto &track = pose_tracks[i];
// Create a refined pose for current tracks, or old tracks with a bit history,
// to avoid continuing tracks of flickering persons
if (track.core_poses.size() >= std::ceil(history_size / 2.0) ||
track.timestamps.back() == timestamps.back())
{
std::vector<std::array<float, 4>> refined_pose = refine_pose(track);
tracked_poses.emplace_back(track.id, refined_pose);
}
}
return tracked_poses;
}
// =================================================================================================
std::tuple<int, float> PoseTracker::match_to_track(
const std::vector<std::array<float, 4>> &core_pose_3d)
{
int best_track = -1;
float best_distance_sq = max_distance * max_distance;
for (size_t i = 0; i < pose_tracks.size(); ++i)
{
const auto &track = pose_tracks[i];
if (track.core_poses.size() == 0)
{
continue;
}
// Calculate distance to the last pose in the track
const auto &last_pose = track.core_poses.back();
float distance_sq = 0.0;
for (size_t j = 0; j < core_pose_3d.size(); ++j)
{
float dx = core_pose_3d[j][0] - last_pose[j][0];
float dy = core_pose_3d[j][1] - last_pose[j][1];
float dz = core_pose_3d[j][2] - last_pose[j][2];
distance_sq += dx * dx + dy * dy + dz * dz;
}
distance_sq /= core_pose_3d.size();
if (distance_sq < best_distance_sq)
{
best_distance_sq = distance_sq;
best_track = static_cast<int>(i);
}
}
return {best_track, best_distance_sq};
}
// =================================================================================================
std::vector<std::array<float, 4>> PoseTracker::refine_pose(const Track &track)
{
// Restrict maximum movement by physical constraints, by clipping the pose to the maximum
// movement distance from one of the track's history poses
//
// While advanced sensor filtering techniques, like using a Kalman-Filter, might improve the
// average accuracy of the pose, they introduce update delays on fast movement changes. For
// example, if a person stands still for a while and then suddenly moves, the first few frames
// will likely be treated as outliers, since the filter will not be able to adapt fast enough.
// This behaviour is not desired in a real-time critical applications, where the pose needs to
// be updated to the real physical position of the person as fast as possible. Therefore, only
// the movement speed is limited here.
if (track.core_poses.size() < 2)
{
return track.full_poses.back();
}
// Calculate maximum possible movement distance from each history pose
std::vector<float> max_movement_dists_sq;
max_movement_dists_sq.resize(track.core_poses.size() - 1);
double last_timestamp = track.timestamps.back();
for (size_t i = 0; i < track.core_poses.size() - 1; ++i)
{
double ts = track.timestamps[i];
float max_movement = max_movement_speed * (last_timestamp - ts);
max_movement_dists_sq[i] = max_movement * max_movement;
}
// Clip joint if required
std::vector<std::array<float, 4>> refined_pose = track.full_poses.back();
for (size_t i = 0; i < refined_pose.size(); ++i)
{
float min_dist_sq = std::numeric_limits<float>::infinity();
size_t closest_idx = 0;
bool clip = true;
for (size_t j = 0; j < max_movement_dists_sq.size(); ++j)
{
float dx = refined_pose[i][0] - track.full_poses[j][i][0];
float dy = refined_pose[i][1] - track.full_poses[j][i][1];
float dz = refined_pose[i][2] - track.full_poses[j][i][2];
float dist_sq = dx * dx + dy * dy + dz * dz;
if (dist_sq < min_dist_sq)
{
min_dist_sq = dist_sq;
closest_idx = j;
}
if (dist_sq <= max_movement_dists_sq[j])
{
clip = false;
break;
}
}
if (clip)
{
float dist_sq = min_dist_sq;
float scale = max_movement_dists_sq[closest_idx] / dist_sq;
float dx = refined_pose[i][0] - track.full_poses[closest_idx][i][0];
float dy = refined_pose[i][1] - track.full_poses[closest_idx][i][1];
float dz = refined_pose[i][2] - track.full_poses[closest_idx][i][2];
refined_pose[i][0] = track.full_poses[closest_idx][i][0] + dx * scale;
refined_pose[i][1] = track.full_poses[closest_idx][i][1] + dy * scale;
refined_pose[i][2] = track.full_poses[closest_idx][i][2] + dz * scale;
// Set confidence to a low value if the joint is clipped
refined_pose[i][3] = 0.1;
}
}
return refined_pose;
}
+1048 -690
View File
File diff suppressed because it is too large Load Diff
-130
View File
@@ -1,130 +0,0 @@
#pragma once
#include <array>
#include <string>
#include <vector>
#include "camera.hpp"
// =================================================================================================
class TriangulatorInternal
{
public:
TriangulatorInternal(float min_match_score, size_t min_group_size);
std::vector<std::vector<std::array<float, 4>>> triangulate_poses(
const std::vector<std::vector<std::vector<std::array<float, 3>>>> &poses_2d,
const std::vector<Camera> &cameras,
const std::array<std::array<float, 3>, 2> &roomparams,
const std::vector<std::string> &joint_names);
void reset();
void print_stats();
private:
float min_match_score;
float min_group_size;
size_t num_cams;
const std::vector<std::string> core_joints = {
"shoulder_left",
"shoulder_right",
"hip_left",
"hip_right",
"elbow_left",
"elbow_right",
"knee_left",
"knee_right",
"wrist_left",
"wrist_right",
"ankle_left",
"ankle_right",
};
const std::vector<std::pair<std::string, std::string>> core_limbs = {
{"knee_left", "ankle_left"},
{"hip_left", "knee_left"},
{"hip_right", "knee_right"},
{"knee_right", "ankle_right"},
{"elbow_left", "wrist_left"},
{"elbow_right", "wrist_right"},
{"shoulder_left", "elbow_left"},
{"shoulder_right", "elbow_right"},
};
std::vector<std::vector<std::array<float, 4>>> last_poses_3d;
void undistort_poses(
std::vector<std::vector<std::array<float, 3>>> &poses_2d, CameraInternal &icam);
std::tuple<std::vector<std::vector<std::array<float, 3>>>, std::vector<std::vector<float>>>
project_poses(
const std::vector<std::vector<std::array<float, 4>>> &poses_3d,
const CameraInternal &icam,
bool calc_dists);
float calc_pose_score(
const std::vector<std::array<float, 3>> &pose1,
const std::vector<std::array<float, 3>> &pose2,
const std::vector<float> &dist1,
const CameraInternal &icam);
std::vector<float> score_projection(
const std::vector<std::array<float, 3>> &pose,
const std::vector<std::array<float, 3>> &repro,
const std::vector<float> &dists,
const std::vector<bool> &mask,
float iscale);
std::pair<std::vector<std::array<float, 4>>, float> triangulate_and_score(
const std::vector<std::array<float, 3>> &pose1,
const std::vector<std::array<float, 3>> &pose2,
const CameraInternal &cam1,
const CameraInternal &cam2,
const std::array<std::array<float, 3>, 2> &roomparams,
const std::vector<std::array<size_t, 2>> &core_limbs_idx);
std::vector<std::tuple<
std::array<float, 3>, std::vector<std::array<float, 4>>, std::vector<int>>>
calc_grouping(
const std::vector<std::pair<
std::tuple<int, int, int, int>, std::pair<int, int>>> &all_pairs,
const std::vector<std::pair<std::vector<std::array<float, 4>>, float>> &all_scored_poses,
float min_score);
std::vector<std::array<float, 4>> merge_group(
const std::vector<std::vector<std::array<float, 4>>> &poses_3d,
float min_score);
void add_extra_joints(
std::vector<std::vector<std::array<float, 4>>> &poses,
const std::vector<std::string> &joint_names);
void filter_poses(
std::vector<std::vector<std::array<float, 4>>> &poses,
const std::array<std::array<float, 3>, 2> &roomparams,
const std::vector<size_t> &core_joint_idx,
const std::vector<std::array<size_t, 2>> &core_limbs_idx);
void add_missing_joints(
std::vector<std::vector<std::array<float, 4>>> &poses,
const std::vector<std::string> &joint_names);
void replace_far_joints(
std::vector<std::vector<std::array<float, 4>>> &poses,
const std::vector<std::string> &joint_names);
// Statistics
double num_calls = 0;
double total_time = 0;
double init_time = 0;
double undistort_time = 0;
double project_time = 0;
double match_time = 0;
double pairs_time = 0;
double pair_scoring_time = 0;
double grouping_time = 0;
double full_time = 0;
double merge_time = 0;
double post_time = 0;
double convert_time = 0;
};
-1
View File
@@ -5,7 +5,6 @@ docker run --privileged --rm --network host -it \
--runtime nvidia --shm-size=16g --ulimit memlock=-1 --ulimit stack=67108864 \
--volume "$(pwd)"/:/RapidPoseTriangulation/ \
--volume "$(pwd)"/../datasets/:/datasets/ \
--volume "$(pwd)"/skelda/:/skelda/ \
--volume /tmp/.X11-unix:/tmp/.X11-unix \
--env DISPLAY --env QT_X11_NO_MITSHM=1 \
rapidposetriangulation
-325
View File
@@ -1,325 +0,0 @@
#include <chrono>
#include <cmath>
#include <fstream>
#include <iostream>
#include <memory>
#include <sstream>
#include <stdexcept>
#include <string>
#include <vector>
// OpenCV
#include <opencv2/opencv.hpp>
// JSON library
#include "/RapidPoseTriangulation/extras/include/nlohmann/json.hpp"
using json = nlohmann::json;
#include "/RapidPoseTriangulation/rpt/camera.hpp"
#include "/RapidPoseTriangulation/rpt/interface.hpp"
#include "/RapidPoseTriangulation/rpt/tracker.hpp"
#include "/RapidPoseTriangulation/scripts/utils_2d_pose.hpp"
#include "/RapidPoseTriangulation/scripts/utils_pipeline.hpp"
// =================================================================================================
static const std::string path_data = "/tmp/rpt/all.json";
static const std::string path_cfg = "/tmp/rpt/config.json";
// =================================================================================================
std::vector<cv::Mat> load_images(json &item)
{
// Load images
std::vector<cv::Mat> images;
for (size_t j = 0; j < item["imgpaths"].size(); j++)
{
auto ipath = item["imgpaths"][j].get<std::string>();
cv::Mat image = cv::imread(ipath, cv::IMREAD_COLOR);
cv::cvtColor(image, image, cv::COLOR_BGR2RGB);
images.push_back(image);
}
if (item["dataset_name"] == "human36m")
{
// Since the images don't have the same shape, rescale some of them
for (size_t i = 0; i < images.size(); i++)
{
cv::Mat &img = images[i];
cv::Size ishape = img.size();
if (ishape != cv::Size(1000, 1000))
{
auto cam = item["cameras"][i];
cam["K"][1][1] = cam["K"][1][1].get<float>() * (1000.0 / ishape.height);
cam["K"][1][2] = cam["K"][1][2].get<float>() * (1000.0 / ishape.height);
cam["K"][0][0] = cam["K"][0][0].get<float>() * (1000.0 / ishape.width);
cam["K"][0][2] = cam["K"][0][2].get<float>() * (1000.0 / ishape.width);
cv::resize(img, img, cv::Size(1000, 1000));
images[i] = img;
}
}
}
// Convert image format to Bayer encoding to simulate real camera input
// This also resulted in notably better MPJPE results in most cases, presumably since the
// demosaicing algorithm from OpenCV is better than the default one from the cameras
for (size_t i = 0; i < images.size(); i++)
{
cv::Mat &img = images[i];
cv::Mat bayer_image = utils_pipeline::rgb2bayer(img);
images[i] = std::move(bayer_image);
}
return images;
}
// =================================================================================================
std::string read_file(const std::string &path)
{
std::ifstream file_stream(path);
if (!file_stream.is_open())
{
throw std::runtime_error("Unable to open file: " + path);
}
std::stringstream buffer;
buffer << file_stream.rdbuf();
return buffer.str();
}
void write_file(const std::string &path, const std::string &content)
{
std::ofstream file_stream(path, std::ios::out | std::ios::binary);
if (!file_stream.is_open())
{
throw std::runtime_error("Unable to open file for writing: " + path);
}
file_stream << content;
if (!file_stream)
{
throw std::runtime_error("Error occurred while writing to file: " + path);
}
file_stream.close();
}
// =================================================================================================
int main(int argc, char **argv)
{
// Load the files
auto dataset = json::parse(read_file(path_data));
auto config = json::parse(read_file(path_cfg));
// Load the configuration
const std::map<std::string, bool> whole_body = config["whole_body"];
const float min_bbox_score = config["min_bbox_score"];
const float min_bbox_area = config["min_bbox_area"];
const bool batch_poses = config["batch_poses"];
const std::vector<std::string> joint_names_2d = utils_pipeline::get_joint_names(whole_body);
const float min_match_score = config["min_match_score"];
const size_t min_group_size = config["min_group_size"];
const int take_interval = config["take_interval"];
const float ifps = config["fps"];
const float max_movement_speed = config["max_movement_speed"];
const float max_track_distance = config["max_track_distance"];
// Load 2D model
bool use_wb = utils_pipeline::use_whole_body(whole_body);
std::unique_ptr<utils_2d_pose::PosePredictor> kpt_model =
std::make_unique<utils_2d_pose::PosePredictor>(
use_wb, min_bbox_score, min_bbox_area, batch_poses);
// Load 3D models
std::unique_ptr<Triangulator> tri_model = std::make_unique<Triangulator>(
min_match_score, min_group_size);
std::unique_ptr<PoseTracker> pose_tracker = std::make_unique<PoseTracker>(
max_movement_speed, max_track_distance);
// Timers
size_t time_count = dataset.size();
std::vector<double> times_image;
std::vector<double> times_debayer;
std::vector<double> times_pose2d;
std::vector<double> times_pose3d;
std::vector<double> times_tracks;
times_image.reserve(time_count);
times_debayer.reserve(time_count);
times_pose2d.reserve(time_count);
times_pose3d.reserve(time_count);
times_tracks.reserve(time_count);
size_t print_steps = (size_t)std::floor((float)time_count / 100.0f);
print_steps = std::max((size_t)1, print_steps);
std::cout << "Running predictions: |";
size_t bar_width = (size_t)std::ceil((float)time_count / (float)print_steps);
for (size_t i = 0; i < bar_width; i++)
{
std::cout << "-";
}
std::cout << "|" << std::endl;
// Calculate 2D poses [items, views, persons, joints, 3]
std::vector<std::vector<std::vector<std::vector<std::array<float, 3>>>>> all_poses_2d;
std::cout << "Calculating 2D poses: |";
for (size_t i = 0; i < dataset.size(); i++)
{
if (i % print_steps == 0)
{
std::cout << "#" << std::flush;
}
std::chrono::duration<float> elapsed;
auto &item = dataset[i];
// Load images
auto stime = std::chrono::high_resolution_clock::now();
std::vector<cv::Mat> images = load_images(item);
elapsed = std::chrono::high_resolution_clock::now() - stime;
times_image.push_back(elapsed.count());
// Demosaic images
stime = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < images.size(); i++)
{
cv::Mat &img = images[i];
cv::Mat rgb = utils_pipeline::bayer2rgb(img);
images[i] = std::move(rgb);
}
elapsed = std::chrono::high_resolution_clock::now() - stime;
times_debayer.push_back(elapsed.count());
// Predict 2D poses
stime = std::chrono::high_resolution_clock::now();
auto poses_2d_all = kpt_model->predict(images);
auto poses_2d_upd = utils_pipeline::update_keypoints(
poses_2d_all, joint_names_2d, whole_body);
elapsed = std::chrono::high_resolution_clock::now() - stime;
times_pose2d.push_back(elapsed.count());
all_poses_2d.push_back(std::move(poses_2d_upd));
}
std::cout << "|" << std::endl;
// Calculate 3D poses [items, persons, joints, 4]
std::vector<std::vector<std::vector<std::array<float, 4>>>> all_poses_3d;
std::vector<std::string> all_ids;
std::string old_scene = "";
int old_id = -1;
std::cout << "Calculating 3D poses: |";
for (size_t i = 0; i < dataset.size(); i++)
{
if (i % print_steps == 0)
{
std::cout << "#" << std::flush;
}
std::chrono::duration<float> elapsed;
auto &item = dataset[i];
auto &poses_2d = all_poses_2d[i];
if (old_scene != item["scene"] || old_id + take_interval != item["index"])
{
// Reset last poses if scene changes
tri_model->reset();
pose_tracker->reset();
old_scene = item["scene"];
}
auto stime = std::chrono::high_resolution_clock::now();
std::vector<Camera> cameras;
for (size_t j = 0; j < item["cameras"].size(); j++)
{
auto &cam = item["cameras"][j];
Camera camera;
camera.name = cam["name"].get<std::string>();
camera.K = cam["K"].get<std::array<std::array<float, 3>, 3>>();
camera.DC = cam["DC"].get<std::vector<float>>();
camera.R = cam["R"].get<std::array<std::array<float, 3>, 3>>();
camera.T = cam["T"].get<std::array<std::array<float, 1>, 3>>();
camera.width = cam["width"].get<int>();
camera.height = cam["height"].get<int>();
camera.type = cam["type"].get<std::string>();
cameras.push_back(camera);
}
std::array<std::array<float, 3>, 2> roomparams = {
item["room_size"].get<std::array<float, 3>>(),
item["room_center"].get<std::array<float, 3>>()};
auto poses_3d = tri_model->triangulate_poses(poses_2d, cameras, roomparams, joint_names_2d);
elapsed = std::chrono::high_resolution_clock::now() - stime;
times_pose3d.push_back(elapsed.count());
if (ifps <= 0)
{
// Disable pose tracking if frame rate is too low
times_tracks.push_back(0.0);
}
else
{
stime = std::chrono::high_resolution_clock::now();
double ts = ((int)item["index"]) / ifps;
auto pose_tracks = pose_tracker->track_poses(poses_3d, joint_names_2d, ts);
std::vector<std::vector<std::array<float, 4>>> poses_3d_refined;
for (size_t j = 0; j < pose_tracks.size(); j++)
{
auto &pose = std::get<1>(pose_tracks[j]);
poses_3d_refined.push_back(pose);
}
poses_3d = poses_3d_refined;
elapsed = std::chrono::high_resolution_clock::now() - stime;
times_tracks.push_back(elapsed.count());
}
all_poses_3d.push_back(std::move(poses_3d));
all_ids.push_back(item["id"].get<std::string>());
old_id = item["index"];
}
std::cout << "|" << std::endl;
// Print timing stats
std::cout << "\nMetrics:" << std::endl;
size_t warmup = std::min((size_t)10, time_count - 1);
double time_image = 0.0;
double time_debayer = 0.0;
double time_pose2d = 0.0;
double time_pose3d = 0.0;
double time_tracks = 0.0;
for (size_t i = warmup; i < time_count; i++)
{
time_image += times_image[i];
time_debayer += times_debayer[i];
time_pose2d += times_pose2d[i];
time_pose3d += times_pose3d[i];
time_tracks += times_tracks[i];
}
double avg_time_image = time_image / (time_count - warmup);
double avg_time_debayer = time_debayer / (time_count - warmup);
double avg_time_pose2d = time_pose2d / (time_count - warmup);
double avg_time_pose3d = time_pose3d / (time_count - warmup);
double avg_time_tracks = time_tracks / (time_count - warmup);
double fps = 1.0 / (avg_time_debayer + avg_time_pose2d + avg_time_pose3d + avg_time_tracks);
std::cout << "{\n"
<< " \"img_loading\": " << avg_time_image << ",\n"
<< " \"demosaicing\": " << avg_time_debayer << ",\n"
<< " \"avg_time_2d\": " << avg_time_pose2d << ",\n"
<< " \"avg_time_3d\": " << avg_time_pose3d << ",\n"
<< " \"time_tracks\": " << avg_time_tracks << ",\n"
<< " \"fps\": " << fps << "\n"
<< "}" << std::endl;
tri_model->print_stats();
// Store the results as json
json all_results;
all_results["all_ids"] = all_ids;
all_results["all_poses_2d"] = all_poses_2d;
all_results["all_poses_3d"] = all_poses_3d;
all_results["joint_names_2d"] = joint_names_2d;
all_results["joint_names_3d"] = joint_names_2d;
// Save the results
std::string path_results = "/tmp/rpt/results.json";
write_file(path_results, all_results.dump(0));
return 0;
}
-513
View File
@@ -1,513 +0,0 @@
import json
import os
import numpy as np
import utils_pipeline
from skelda import evals
from skelda.writers import json_writer
# ==================================================================================================
whole_body = {
"foots": False,
"face": False,
"hands": False,
}
dataset_use = "human36m"
# dataset_use = "panoptic"
# dataset_use = "mvor"
# dataset_use = "shelf"
# dataset_use = "campus"
# dataset_use = "ikeaasm"
# dataset_use = "chi3d"
# dataset_use = "tsinghua"
# dataset_use = "human36m_wb"
# dataset_use = "egohumans_tagging"
# dataset_use = "egohumans_legoassemble"
# dataset_use = "egohumans_fencing"
# dataset_use = "egohumans_basketball"
# dataset_use = "egohumans_volleyball"
# dataset_use = "egohumans_badminton"
# dataset_use = "egohumans_tennis"
# Describes the minimum area as fraction of the image size for a 2D bounding box to be considered
# If the persons are small in the image, use a lower value
default_min_bbox_area = 0.1 * 0.1
# Describes how confident a 2D bounding box needs to be to be considered
# If the persons are small in the image, or poorly recognizable, use a lower value
default_min_bbox_score = 0.3
# Describes how good two 2D poses need to match each other to create a valid triangulation
# If the quality of the 2D detections is poor, use a lower value
default_min_match_score = 0.94
# Describes the minimum number of camera pairs that need to detect the same person
# If the number of cameras is high, and the views are not occluded, use a higher value
default_min_group_size = 1
# Batch poses per image for faster processing
# If most of the time only one person is in a image, disable it, because it is slightly slower then
default_batch_poses = True
# Approach speed of EN ISO 13855 with 2000 mm/sec for hand speed
# and an additional factor to compensate for noise-based jumps
default_max_movement_speed = 2.0 * 1.5
# The size of an A4 sheet of paper which is assumed to fit between two different persons
# and additionally the distance a person can move between two frames (here at 10 fps)
default_max_track_distance = 0.3 + default_max_movement_speed / 10
datasets = {
"human36m": {
"path": "/datasets/human36m/skelda/pose_test.json",
"take_interval": 5,
"fps": 50,
"min_match_score": 0.95,
"min_group_size": 1,
"min_bbox_score": 0.4,
"min_bbox_area": 0.1 * 0.1,
"batch_poses": False,
"max_movement_speed": 2.0 * 1.5,
"max_track_distance": 0.3 + default_max_movement_speed / (50 / 5),
},
"panoptic": {
"path": "/datasets/panoptic/skelda/test.json",
"cams": ["00_03", "00_06", "00_12", "00_13", "00_23"],
# "cams": ["00_03", "00_06", "00_12"],
# "cams": ["00_03", "00_06", "00_12", "00_13", "00_23", "00_15", "00_10"],
# "cams": ["00_03", "00_06", "00_12", "00_13", "00_23", "00_15", "00_10", "00_21", "00_09", "00_01"],
# "cams": [],
"take_interval": 3,
"fps": 30,
"min_match_score": 0.95,
"use_scenes": ["160906_pizza1", "160422_haggling1", "160906_ian5"],
"min_group_size": 1,
# "min_group_size": 1,
# "min_group_size": 1,
# "min_group_size": 2,
# "min_group_size": 11,
"min_bbox_area": 0.05 * 0.05,
"max_track_distance": 0.3 + default_max_movement_speed / (30 / 3),
},
"mvor": {
"path": "/datasets/mvor/skelda/all.json",
"take_interval": 1,
"fps": -1,
"min_match_score": 0.81,
"min_bbox_score": 0.25,
},
"campus": {
"path": "/datasets/campus/skelda/test.json",
"fps": 25,
"take_interval": 1,
"min_match_score": 0.91,
"min_bbox_score": 0.5,
"max_track_distance": 0.3 + default_max_movement_speed / 25,
},
"shelf": {
"path": "/datasets/shelf/skelda/test.json",
"take_interval": 1,
"fps": 25,
"min_match_score": 0.95,
"min_group_size": 3,
"max_track_distance": 0.3 + default_max_movement_speed / 25,
},
"ikeaasm": {
"path": "/datasets/ikeaasm/skelda/test.json",
"take_interval": 2,
"fps": -1,
"min_match_score": 0.81,
"min_bbox_score": 0.20,
},
"chi3d": {
"path": "/datasets/chi3d/skelda/all.json",
"take_interval": 5,
"fps": 50,
"min_match_score": 0.92,
"min_bbox_area": 0.2 * 0.2,
"max_track_distance": 0.3 + default_max_movement_speed / (50 / 5),
},
"tsinghua": {
"path": "/datasets/tsinghua/skelda/test.json",
"take_interval": 3,
"fps": 30,
"min_match_score": 0.95,
"min_group_size": 2,
"max_track_distance": 0.3 + default_max_movement_speed / (30 / 3),
},
"human36m_wb": {
"path": "/datasets/human36m/skelda/wb/test.json",
"take_interval": 100,
"fps": -1,
"min_bbox_score": 0.4,
"batch_poses": False,
},
"egohumans_tagging": {
"path": "/datasets/egohumans/skelda/all.json",
"take_interval": 2,
"fps": 20,
"subset": "tagging",
"min_match_score": 0.89,
"min_group_size": 1,
"min_bbox_score": 0.2,
"min_bbox_area": 0.05 * 0.05,
"max_movement_speed": 4.0 * 1.5,
"max_track_distance": 0.3 + (4.0 * 1.5) / (20 / 2),
},
"egohumans_legoassemble": {
"path": "/datasets/egohumans/skelda/all.json",
"take_interval": 2,
"fps": 20,
"subset": "legoassemble",
"min_group_size": 2,
"max_track_distance": 0.3 + default_max_movement_speed / (20 / 2),
},
"egohumans_fencing": {
"path": "/datasets/egohumans/skelda/all.json",
"take_interval": 2,
"fps": 20,
"subset": "fencing",
"min_group_size": 7,
"min_bbox_score": 0.5,
"min_bbox_area": 0.05 * 0.05,
"max_track_distance": 0.3 + default_max_movement_speed / (20 / 2),
},
"egohumans_basketball": {
"path": "/datasets/egohumans/skelda/all.json",
"take_interval": 2,
"fps": 20,
"subset": "basketball",
"min_group_size": 4,
"min_bbox_score": 0.25,
"min_bbox_area": 0.025 * 0.025,
"max_movement_speed": 4.0 * 1.5,
"max_track_distance": 0.3 + (4.0 * 1.5) / (20 / 2),
},
"egohumans_volleyball": {
"path": "/datasets/egohumans/skelda/all.json",
"take_interval": 2,
"fps": 20,
"subset": "volleyball",
"min_match_score": 0.95,
"min_group_size": 7,
"min_bbox_score": 0.20,
"min_bbox_area": 0.05 * 0.05,
"max_movement_speed": 4.0 * 1.5,
"max_track_distance": 0.3 + (4.0 * 1.5) / (20 / 2),
},
"egohumans_badminton": {
"path": "/datasets/egohumans/skelda/all.json",
"take_interval": 2,
"fps": 20,
"subset": "badminton",
"min_group_size": 7,
"min_bbox_score": 0.25,
"min_bbox_area": 0.05 * 0.05,
"max_movement_speed": 4.0 * 1.5,
"max_track_distance": 0.3 + (4.0 * 1.5) / (20 / 2),
},
"egohumans_tennis": {
"path": "/datasets/egohumans/skelda/all.json",
"take_interval": 2,
"fps": 20,
"subset": "tennis",
"min_group_size": 11,
"min_bbox_area": 0.025 * 0.025,
"max_movement_speed": 4.0 * 1.5,
"max_track_distance": 0.3 + (4.0 * 1.5) / (20 / 2),
},
}
joint_names_2d = utils_pipeline.get_joint_names(whole_body)
joint_names_3d = list(joint_names_2d)
eval_joints = [
"head",
"shoulder_left",
"shoulder_right",
"elbow_left",
"elbow_right",
"wrist_left",
"wrist_right",
"hip_left",
"hip_right",
"knee_left",
"knee_right",
"ankle_left",
"ankle_right",
]
if dataset_use == "human36m":
eval_joints[eval_joints.index("head")] = "nose"
if dataset_use == "panoptic":
eval_joints[eval_joints.index("head")] = "nose"
if dataset_use == "human36m_wb":
if utils_pipeline.use_whole_body(whole_body):
eval_joints = list(joint_names_2d)
else:
eval_joints[eval_joints.index("head")] = "nose"
# output_dir = "/RapidPoseTriangulation/data/testoutput/"
output_dir = ""
# pred_export_path = f"/datasets/predictions/{dataset_use}/RapidPoseTriangulation.json"
pred_export_path = ""
# ==================================================================================================
def load_labels(dataset: dict):
"""Load labels by dataset description"""
if "panoptic" in dataset:
labels = utils_pipeline.load_json(dataset["panoptic"]["path"])
labels = [lb for i, lb in enumerate(labels) if i % 1500 < 90]
# Filter by maximum number of persons
labels = [l for l in labels if len(l["bodies3D"]) <= 10]
# Filter scenes
if "use_scenes" in dataset["panoptic"]:
labels = [
l for l in labels if l["scene"] in dataset["panoptic"]["use_scenes"]
]
# Filter cameras
if not "cameras_depth" in labels[0] and len(dataset["panoptic"]["cams"]) > 0:
for label in labels:
for i, cam in reversed(list(enumerate(label["cameras"]))):
if cam["name"] not in dataset["panoptic"]["cams"]:
label["cameras"].pop(i)
label["imgpaths"].pop(i)
elif "human36m" in dataset:
labels = utils_pipeline.load_json(dataset["human36m"]["path"])
labels = [lb for lb in labels if lb["subject"] == "S9"]
labels = [lb for i, lb in enumerate(labels) if i % 4000 < 150]
elif "mvor" in dataset:
labels = utils_pipeline.load_json(dataset["mvor"]["path"])
# Rename keys
for label in labels:
label["cameras_color"] = label["cameras"]
label["imgpaths_color"] = label["imgpaths"]
elif "ikeaasm" in dataset:
labels = utils_pipeline.load_json(dataset["ikeaasm"]["path"])
cams0 = str(labels[0]["cameras"])
labels = [lb for lb in labels if str(lb["cameras"]) == cams0]
elif "shelf" in dataset:
labels = utils_pipeline.load_json(dataset["shelf"]["path"])
labels = [lb for lb in labels if "test" in lb["splits"]]
elif "campus" in dataset:
labels = utils_pipeline.load_json(dataset["campus"]["path"])
labels = [lb for lb in labels if "test" in lb["splits"]]
elif "tsinghua" in dataset:
labels = utils_pipeline.load_json(dataset["tsinghua"]["path"])
labels = [lb for lb in labels if "test" in lb["splits"]]
labels = [lb for lb in labels if lb["seq"] == "seq_1"]
labels = [lb for i, lb in enumerate(labels) if i % 300 < 90]
for label in labels:
label["bodyids"] = list(range(len(label["bodies3D"])))
elif "chi3d" in dataset:
labels = utils_pipeline.load_json(dataset["chi3d"]["path"])
labels = [lb for lb in labels if lb["setup"] == "s03"]
labels = [lb for i, lb in enumerate(labels) if i % 2000 < 150]
elif "human36m_wb" in dataset:
labels = utils_pipeline.load_json(dataset["human36m_wb"]["path"])
elif any(("egohumans" in key for key in dataset)):
labels = utils_pipeline.load_json(dataset[dataset_use]["path"])
labels = [lb for lb in labels if "test" in lb["splits"]]
labels = [lb for lb in labels if dataset[dataset_use]["subset"] in lb["seq"]]
if dataset[dataset_use]["subset"] in ["volleyball", "tennis"]:
labels = [lb for i, lb in enumerate(labels) if i % 150 < 60]
else:
raise ValueError("Dataset not available")
# Optionally drop samples to speed up train/eval
if "take_interval" in dataset:
take_interval = dataset["take_interval"]
if take_interval > 1:
labels = [l for i, l in enumerate(labels) if i % take_interval == 0]
# Add default values
for label in labels:
if "scene" not in label:
label["scene"] = "default"
for cam in label["cameras"]:
if not "type" in cam:
cam["type"] = "pinhole"
return labels
# ==================================================================================================
def main():
global joint_names_3d, eval_joints
print("Loading dataset ...")
labels = load_labels(
{
dataset_use: datasets[dataset_use],
"take_interval": datasets[dataset_use]["take_interval"],
}
)
# Print a dataset sample for debugging
print("Amount of samples:", len(labels))
print(labels[0])
# Save dataset
tmp_export_dir = "/tmp/rpt/"
for label in labels:
if "splits" in label:
label.pop("splits")
json_writer.save_dataset(labels, tmp_export_dir)
# Load dataset specific parameters
min_match_score = datasets[dataset_use].get(
"min_match_score", default_min_match_score
)
min_group_size = datasets[dataset_use].get("min_group_size", default_min_group_size)
min_bbox_score = datasets[dataset_use].get("min_bbox_score", default_min_bbox_score)
min_bbox_area = datasets[dataset_use].get("min_bbox_area", default_min_bbox_area)
batch_poses = datasets[dataset_use].get("batch_poses", default_batch_poses)
max_movement_speed = datasets[dataset_use].get(
"max_movement_speed", default_max_movement_speed
)
max_track_distance = datasets[dataset_use].get(
"max_track_distance", default_max_track_distance
)
# Save config
config_path = tmp_export_dir + "config.json"
config = {
"min_match_score": min_match_score,
"min_group_size": min_group_size,
"min_bbox_score": min_bbox_score,
"min_bbox_area": min_bbox_area,
"batch_poses": batch_poses,
"max_movement_speed": max_movement_speed,
"max_track_distance": max_track_distance,
"whole_body": whole_body,
"take_interval": datasets[dataset_use]["take_interval"],
"fps": datasets[dataset_use]["fps"],
}
utils_pipeline.save_json(config, config_path)
# Call the CPP binary
os.system("/RapidPoseTriangulation/scripts/test_skelda_dataset.bin")
# Load the results
print("Loading exports ...")
res_path = tmp_export_dir + "results.json"
results = utils_pipeline.load_json(res_path)
all_poses_3d = results["all_poses_3d"]
all_poses_2d = results["all_poses_2d"]
all_ids = results["all_ids"]
joint_names_3d = results["joint_names_3d"]
# # Visualize labels and predictions
# from skelda import utils_view
# for i in range(0, len(labels), 1):
# posesL = []
# posesR = []
# jnames = []
# for j in labels[i]["joints"]:
# if "->" in j:
# jnames.append(j.split("->")[-1])
# else:
# jnames.append(j)
# for j in range(len(labels[i]["bodies3D"])):
# pose = []
# for k in range(len(eval_joints)):
# n = eval_joints[k]
# pose.append(labels[i]["bodies3D"][j][jnames.index(n)])
# posesL.append(pose)
# for j in range(len(all_poses_3d[i])):
# pose = []
# for k in range(len(eval_joints)):
# n = eval_joints[k]
# pose.append(all_poses_3d[i][j][joint_names_3d.index(n)])
# posesR.append(pose)
# poses_3d = posesL + posesR
# sample = labels[i]
# sample["bodies3D"] = np.array(poses_3d).round(3).tolist()
# sample["joints"] = eval_joints
# sample["num_persons"] = len(poses_3d)
# print(sample)
# utils_view.draw_sample_3d(sample)
# utils_view.draw_many_images(
# sample["imgpaths"],
# sample["cameras"],
# [],
# all_poses_2d[i],
# joint_names_3d,
# "2D detections",
# )
# utils_view.show_plots()
if pred_export_path != "":
# Export predictions
print("\nExporting predictions ...")
all_poses_3d = [np.array(poses).round(3).tolist() for poses in all_poses_3d]
data = {
"poses3D": all_poses_3d,
"ids": all_ids,
"joint_names": joint_names_3d,
}
os.makedirs(os.path.dirname(pred_export_path), exist_ok=True)
with open(pred_export_path, "w") as file:
json.dump(data, file, indent=0)
# Run evaluation
_ = evals.mpjpe.run_eval(
labels,
all_poses_3d,
all_ids,
joint_names_net=joint_names_3d,
joint_names_use=eval_joints,
save_error_imgs=output_dir,
debug_2D_preds=all_poses_2d,
)
_ = evals.pcp.run_eval(
labels,
all_poses_3d,
all_ids,
joint_names_net=joint_names_3d,
joint_names_use=eval_joints,
replace_head_with_nose=True,
)
if dataset_use == "shelf":
# Also run old-style evaluation for shelf dataset
odir = os.path.join(output_dir, "pcp/") if output_dir != "" else ""
_ = evals.campus_shelf.run_eval(
labels,
all_poses_3d,
all_ids,
joint_names_net=joint_names_3d,
save_error_imgs=odir,
debug_2D_preds=all_poses_2d,
)
# ==================================================================================================
if __name__ == "__main__":
main()
-144
View File
@@ -1,144 +0,0 @@
import copy
import json
import os
import numpy as np
import utils_pipeline
from skelda import utils_pose, utils_view
from skelda.writers import json_writer
# ==================================================================================================
filepath = os.path.dirname(os.path.realpath(__file__)) + "/"
test_img_dir = filepath + "../data/"
whole_body = {
"foots": False,
"face": False,
"hands": False,
}
config = {
"min_match_score": 0.94,
"min_group_size": 1,
"min_bbox_score": 0.3,
"min_bbox_area": 0.1 * 0.1,
"batch_poses": True,
"whole_body": whole_body,
"take_interval": 1,
"fps": -1,
"max_movement_speed": 0,
"max_track_distance": 0,
}
joint_names_2d = utils_pipeline.get_joint_names(whole_body)
joint_names_3d = list(joint_names_2d)
# ==================================================================================================
def update_sample(sample, new_dir=""):
sample = copy.deepcopy(sample)
# Rename image paths
sample["imgpaths"] = [
os.path.join(new_dir, os.path.basename(v)) for v in sample["imgpaths"]
]
# Add placeholders for missing keys
if not "scene" in sample:
sample["scene"] = "default"
if not "id" in sample:
sample["id"] = "0"
if not "index" in sample:
sample["index"] = 0
for cam in sample["cameras"]:
if not "type" in cam:
cam["type"] = "pinhole"
return sample
# ==================================================================================================
def main():
for dirname in sorted(os.listdir(test_img_dir)):
dirpath = os.path.join(test_img_dir, dirname)
if not os.path.isdir(dirpath):
continue
if (dirname[0] not in ["p", "h", "e", "q"]) or len(dirname) != 2:
continue
# Load sample infos
print("\n" + dirpath)
with open(os.path.join(dirpath, "sample.json"), "r", encoding="utf-8") as file:
sample = json.load(file)
sample = update_sample(sample, dirpath)
if len(sample["imgpaths"]) == 1:
# At least two images are required
continue
# Save dataset
labels = [sample]
tmp_export_dir = "/tmp/rpt/"
for label in labels:
if "splits" in label:
label.pop("splits")
json_writer.save_dataset(labels, tmp_export_dir)
# Save config
config_path = tmp_export_dir + "config.json"
utils_pipeline.save_json(config, config_path)
# Call the CPP binary
os.system("/RapidPoseTriangulation/scripts/test_skelda_dataset.bin")
# Load the results
print("Loading exports ...")
res_path = tmp_export_dir + "results.json"
results = utils_pipeline.load_json(res_path)
poses_3d = results["all_poses_3d"][0]
poses_2d = results["all_poses_2d"][0]
joint_names_3d = results["joint_names_3d"]
# Visualize the 2D results
fig1 = utils_view.draw_many_images(
sample["imgpaths"], [], [], poses_2d, joint_names_2d, "2D detections"
)
fig1.savefig(os.path.join(dirpath, "2d-k.png"), dpi=fig1.dpi)
# Visualize the 3D results
print("Detected 3D poses:")
poses_3d = np.array(poses_3d)
print(poses_3d.round(3))
if len(poses_3d) == 0:
utils_view.show_plots()
continue
camparams = sample["cameras"]
roomparams = {
"room_size": sample["room_size"],
"room_center": sample["room_center"],
}
poses_2d_proj = []
for cam in camparams:
poses_2d_cam, _ = utils_pose.project_poses(poses_3d, cam)
poses_2d_proj.append(poses_2d_cam)
fig2 = utils_view.draw_poses3d(poses_3d, joint_names_3d, roomparams, camparams)
fig3 = utils_view.draw_many_images(
sample["imgpaths"], [], [], poses_2d_proj, joint_names_3d, "2D projections"
)
fig2.savefig(os.path.join(dirpath, "3d-p.png"), dpi=fig2.dpi)
fig3.savefig(os.path.join(dirpath, "2d-p.png"), dpi=fig3.dpi)
utils_view.show_plots()
# ==================================================================================================
if __name__ == "__main__":
main()
File diff suppressed because it is too large Load Diff
-316
View File
@@ -1,316 +0,0 @@
#pragma once
#include <string>
#include <vector>
#include <opencv2/opencv.hpp>
// =================================================================================================
namespace utils_pipeline
{
bool use_whole_body(const std::map<std::string, bool> &whole_body)
{
for (const auto &pair : whole_body)
{
if (pair.second)
{
return true;
}
}
return false;
}
// =============================================================================================
std::vector<std::string> get_joint_names(const std::map<std::string, bool> &whole_body)
{
std::vector<std::string> joint_names_2d = {
"nose",
"eye_left",
"eye_right",
"ear_left",
"ear_right",
"shoulder_left",
"shoulder_right",
"elbow_left",
"elbow_right",
"wrist_left",
"wrist_right",
"hip_left",
"hip_right",
"knee_left",
"knee_right",
"ankle_left",
"ankle_right",
};
if (whole_body.at("foots"))
{
joint_names_2d.insert(
joint_names_2d.end(),
{
"foot_toe_big_left",
"foot_toe_small_left",
"foot_heel_left",
"foot_toe_big_right",
"foot_toe_small_right",
"foot_heel_right",
});
}
if (whole_body.at("face"))
{
joint_names_2d.insert(
joint_names_2d.end(),
{
"face_jaw_right_1",
"face_jaw_right_2",
"face_jaw_right_3",
"face_jaw_right_4",
"face_jaw_right_5",
"face_jaw_right_6",
"face_jaw_right_7",
"face_jaw_right_8",
"face_jaw_middle",
"face_jaw_left_1",
"face_jaw_left_2",
"face_jaw_left_3",
"face_jaw_left_4",
"face_jaw_left_5",
"face_jaw_left_6",
"face_jaw_left_7",
"face_jaw_left_8",
"face_eyebrow_right_1",
"face_eyebrow_right_2",
"face_eyebrow_right_3",
"face_eyebrow_right_4",
"face_eyebrow_right_5",
"face_eyebrow_left_1",
"face_eyebrow_left_2",
"face_eyebrow_left_3",
"face_eyebrow_left_4",
"face_eyebrow_left_5",
"face_nose_1",
"face_nose_2",
"face_nose_3",
"face_nose_4",
"face_nose_5",
"face_nose_6",
"face_nose_7",
"face_nose_8",
"face_nose_9",
"face_eye_right_1",
"face_eye_right_2",
"face_eye_right_3",
"face_eye_right_4",
"face_eye_right_5",
"face_eye_right_6",
"face_eye_left_1",
"face_eye_left_2",
"face_eye_left_3",
"face_eye_left_4",
"face_eye_left_5",
"face_eye_left_6",
"face_mouth_1",
"face_mouth_2",
"face_mouth_3",
"face_mouth_4",
"face_mouth_5",
"face_mouth_6",
"face_mouth_7",
"face_mouth_8",
"face_mouth_9",
"face_mouth_10",
"face_mouth_11",
"face_mouth_12",
"face_mouth_13",
"face_mouth_14",
"face_mouth_15",
"face_mouth_16",
"face_mouth_17",
"face_mouth_18",
"face_mouth_19",
"face_mouth_20",
});
}
if (whole_body.at("hands"))
{
joint_names_2d.insert(
joint_names_2d.end(),
{
"hand_wrist_left",
"hand_finger_thumb_left_1",
"hand_finger_thumb_left_2",
"hand_finger_thumb_left_3",
"hand_finger_thumb_left_4",
"hand_finger_index_left_1",
"hand_finger_index_left_2",
"hand_finger_index_left_3",
"hand_finger_index_left_4",
"hand_finger_middle_left_1",
"hand_finger_middle_left_2",
"hand_finger_middle_left_3",
"hand_finger_middle_left_4",
"hand_finger_ring_left_1",
"hand_finger_ring_left_2",
"hand_finger_ring_left_3",
"hand_finger_ring_left_4",
"hand_finger_pinky_left_1",
"hand_finger_pinky_left_2",
"hand_finger_pinky_left_3",
"hand_finger_pinky_left_4",
"hand_wrist_right",
"hand_finger_thumb_right_1",
"hand_finger_thumb_right_2",
"hand_finger_thumb_right_3",
"hand_finger_thumb_right_4",
"hand_finger_index_right_1",
"hand_finger_index_right_2",
"hand_finger_index_right_3",
"hand_finger_index_right_4",
"hand_finger_middle_right_1",
"hand_finger_middle_right_2",
"hand_finger_middle_right_3",
"hand_finger_middle_right_4",
"hand_finger_ring_right_1",
"hand_finger_ring_right_2",
"hand_finger_ring_right_3",
"hand_finger_ring_right_4",
"hand_finger_pinky_right_1",
"hand_finger_pinky_right_2",
"hand_finger_pinky_right_3",
"hand_finger_pinky_right_4",
});
}
joint_names_2d.insert(
joint_names_2d.end(),
{
"hip_middle",
"shoulder_middle",
"head",
});
return joint_names_2d;
}
// =============================================================================================
cv::Mat bayer2rgb(const cv::Mat &bayer)
{
cv::Mat rgb;
cv::cvtColor(bayer, rgb, cv::COLOR_BayerBG2RGB);
return rgb;
}
cv::Mat rgb2bayer(const cv::Mat &img)
{
CV_Assert(img.type() == CV_8UC3);
cv::Mat bayer(img.rows, img.cols, CV_8UC1);
for (int r = 0; r < img.rows; ++r)
{
const uchar *imgData = img.ptr<uchar>(r);
uchar *bayerRowPtr = bayer.ptr<uchar>(r);
for (int c = 0; c < img.cols; ++c)
{
int pixelIndex = 3 * c;
// Use faster bit operation instead of modulo+if
// Even row, even col => R = 0
// Even row, odd col => G = 1
// Odd row, even col => G = 1
// Odd row, odd col => B = 2
int row_mod = r & 1;
int col_mod = c & 1;
int component = row_mod + col_mod;
bayerRowPtr[c] = imgData[pixelIndex + component];
}
}
return bayer;
}
// =============================================================================================
inline int find_index(const std::vector<std::string> &vec, const std::string &key)
{
auto it = std::find(vec.begin(), vec.end(), key);
if (it == vec.end())
{
throw std::runtime_error("Cannot find \"" + key + "\" in joint_names.");
}
return static_cast<int>(std::distance(vec.begin(), it));
}
std::vector<std::vector<std::vector<std::array<float, 3>>>> update_keypoints(
const std::vector<std::vector<std::vector<std::array<float, 3>>>> &poses_2d,
const std::vector<std::string> &joint_names,
const std::map<std::string, bool> &whole_body)
{
std::vector<std::vector<std::vector<std::array<float, 3>>>> new_views;
new_views.reserve(poses_2d.size());
for (const auto &view : poses_2d)
{
// "view" is a list of bodies => each body is Nx3
std::vector<std::vector<std::array<float, 3>>> new_bodies;
new_bodies.reserve(view.size());
for (const auto &body : view)
{
// 1) Copy first 17 keypoints
std::vector<std::array<float, 3>> new_body;
new_body.insert(new_body.end(), body.begin(), body.begin() + 17);
// 2) Optionally append extra keypoints
if (whole_body.at("foots"))
{
new_body.insert(new_body.end(), body.begin() + 17, body.begin() + 23);
}
if (whole_body.at("face"))
{
new_body.insert(new_body.end(), body.begin() + 23, body.begin() + 91);
}
if (whole_body.at("hands"))
{
new_body.insert(new_body.end(), body.begin() + 91, body.end());
}
// 3) Compute mid_hip
int hlid = find_index(joint_names, "hip_left");
int hrid = find_index(joint_names, "hip_right");
float mid_hip_x = 0.5 * (new_body[hlid][0] + new_body[hrid][0]);
float mid_hip_y = 0.5 * (new_body[hlid][1] + new_body[hrid][1]);
float mid_hip_c = std::min(new_body[hlid][2], new_body[hrid][2]);
new_body.push_back({mid_hip_x, mid_hip_y, mid_hip_c});
// 4) Compute mid_shoulder
int slid = find_index(joint_names, "shoulder_left");
int srid = find_index(joint_names, "shoulder_right");
float mid_shoulder_x = 0.5 * (new_body[slid][0] + new_body[srid][0]);
float mid_shoulder_y = 0.5 * (new_body[slid][1] + new_body[srid][1]);
float mid_shoulder_c = std::min(new_body[slid][2], new_body[srid][2]);
new_body.push_back({mid_shoulder_x, mid_shoulder_y, mid_shoulder_c});
// 5) Compute head
int elid = find_index(joint_names, "ear_left");
int erid = find_index(joint_names, "ear_right");
float head_x = 0.5 * (new_body[elid][0] + new_body[erid][0]);
float head_y = 0.5 * (new_body[elid][1] + new_body[erid][1]);
float head_c = std::min(new_body[elid][2], new_body[erid][2]);
new_body.push_back({head_x, head_y, head_c});
// Add this updated body into new_bodies
new_bodies.push_back(new_body);
}
// Add all updated bodies for this view
new_views.push_back(new_bodies);
}
return new_views;
}
}
-189
View File
@@ -1,189 +0,0 @@
import json
# ==================================================================================================
def load_json(path: str):
with open(path, "r", encoding="utf-8") as file:
data = json.load(file)
return data
def save_json(data: dict, path: str):
with open(path, "w+", encoding="utf-8") as file:
json.dump(data, file, indent=0)
# ==================================================================================================
def use_whole_body(whole_body: dict) -> bool:
return any((whole_body[k] for k in whole_body))
# ==================================================================================================
def get_joint_names(whole_body: dict):
joint_names_2d = [
"nose",
"eye_left",
"eye_right",
"ear_left",
"ear_right",
"shoulder_left",
"shoulder_right",
"elbow_left",
"elbow_right",
"wrist_left",
"wrist_right",
"hip_left",
"hip_right",
"knee_left",
"knee_right",
"ankle_left",
"ankle_right",
]
if whole_body["foots"]:
joint_names_2d.extend(
[
"foot_toe_big_left",
"foot_toe_small_left",
"foot_heel_left",
"foot_toe_big_right",
"foot_toe_small_right",
"foot_heel_right",
]
)
if whole_body["face"]:
joint_names_2d.extend(
[
"face_jaw_right_1",
"face_jaw_right_2",
"face_jaw_right_3",
"face_jaw_right_4",
"face_jaw_right_5",
"face_jaw_right_6",
"face_jaw_right_7",
"face_jaw_right_8",
"face_jaw_middle",
"face_jaw_left_1",
"face_jaw_left_2",
"face_jaw_left_3",
"face_jaw_left_4",
"face_jaw_left_5",
"face_jaw_left_6",
"face_jaw_left_7",
"face_jaw_left_8",
"face_eyebrow_right_1",
"face_eyebrow_right_2",
"face_eyebrow_right_3",
"face_eyebrow_right_4",
"face_eyebrow_right_5",
"face_eyebrow_left_1",
"face_eyebrow_left_2",
"face_eyebrow_left_3",
"face_eyebrow_left_4",
"face_eyebrow_left_5",
"face_nose_1",
"face_nose_2",
"face_nose_3",
"face_nose_4",
"face_nose_5",
"face_nose_6",
"face_nose_7",
"face_nose_8",
"face_nose_9",
"face_eye_right_1",
"face_eye_right_2",
"face_eye_right_3",
"face_eye_right_4",
"face_eye_right_5",
"face_eye_right_6",
"face_eye_left_1",
"face_eye_left_2",
"face_eye_left_3",
"face_eye_left_4",
"face_eye_left_5",
"face_eye_left_6",
"face_mouth_1",
"face_mouth_2",
"face_mouth_3",
"face_mouth_4",
"face_mouth_5",
"face_mouth_6",
"face_mouth_7",
"face_mouth_8",
"face_mouth_9",
"face_mouth_10",
"face_mouth_11",
"face_mouth_12",
"face_mouth_13",
"face_mouth_14",
"face_mouth_15",
"face_mouth_16",
"face_mouth_17",
"face_mouth_18",
"face_mouth_19",
"face_mouth_20",
]
)
if whole_body["hands"]:
joint_names_2d.extend(
[
"hand_wrist_left",
"hand_finger_thumb_left_1",
"hand_finger_thumb_left_2",
"hand_finger_thumb_left_3",
"hand_finger_thumb_left_4",
"hand_finger_index_left_1",
"hand_finger_index_left_2",
"hand_finger_index_left_3",
"hand_finger_index_left_4",
"hand_finger_middle_left_1",
"hand_finger_middle_left_2",
"hand_finger_middle_left_3",
"hand_finger_middle_left_4",
"hand_finger_ring_left_1",
"hand_finger_ring_left_2",
"hand_finger_ring_left_3",
"hand_finger_ring_left_4",
"hand_finger_pinky_left_1",
"hand_finger_pinky_left_2",
"hand_finger_pinky_left_3",
"hand_finger_pinky_left_4",
"hand_wrist_right",
"hand_finger_thumb_right_1",
"hand_finger_thumb_right_2",
"hand_finger_thumb_right_3",
"hand_finger_thumb_right_4",
"hand_finger_index_right_1",
"hand_finger_index_right_2",
"hand_finger_index_right_3",
"hand_finger_index_right_4",
"hand_finger_middle_right_1",
"hand_finger_middle_right_2",
"hand_finger_middle_right_3",
"hand_finger_middle_right_4",
"hand_finger_ring_right_1",
"hand_finger_ring_right_2",
"hand_finger_ring_right_3",
"hand_finger_ring_right_4",
"hand_finger_pinky_right_1",
"hand_finger_pinky_right_2",
"hand_finger_pinky_right_3",
"hand_finger_pinky_right_4",
]
)
joint_names_2d.extend(
[
"hip_middle",
"shoulder_middle",
"head",
]
)
return joint_names_2d
Submodule skelda deleted from d48d65b961
+225
View File
@@ -0,0 +1,225 @@
from __future__ import annotations
from collections.abc import Sequence
from typing import TYPE_CHECKING
from ._core import (
AssociationReport,
AssociationStatus,
Camera,
CameraModel,
FinalPoseAssociationDebug,
TriangulationConfig,
TriangulationOptions,
TriangulationResult,
CoreProposalDebug,
FullProposalDebug,
GroupingDebug,
MergeDebug,
PairCandidate,
PreviousPoseFilterDebug,
PreviousPoseMatch,
ProposalGroupDebug,
TriangulationTrace,
build_pair_candidates as _build_pair_candidates,
filter_pairs_with_previous_poses as _filter_pairs_with_previous_poses,
make_camera as _make_camera,
triangulate_debug as _triangulate_debug,
triangulate_poses as _triangulate_poses,
triangulate_with_report as _triangulate_with_report,
)
if TYPE_CHECKING:
import numpy as np
import numpy.typing as npt
from ._helpers import CameraLike, CameraModelLike, Matrix3x3Like, PoseViewLike, VectorLike
PoseArray2D = npt.NDArray[np.float32]
PoseArray3D = npt.NDArray[np.float32]
PersonCountArray = npt.NDArray[np.uint32]
TrackIdArray = npt.NDArray[np.int64]
Camera.__doc__ = """Immutable camera calibration with precomputed projection cache fields."""
TriangulationConfig.__doc__ = """Stable scene configuration used for triangulation."""
TriangulationOptions.__doc__ = """Score and grouping thresholds used by triangulation."""
TriangulationResult.__doc__ = """Tracked triangulation output containing poses and association metadata."""
AssociationReport.__doc__ = """Track-association summary for a tracked triangulation call."""
TriangulationTrace.__doc__ = """Full debug trace for triangulation, including pair, grouping, and association stages."""
def convert_cameras(cameras: "Sequence[CameraLike]") -> list[Camera]:
"""Normalize mapping-like camera inputs into immutable bound `Camera` instances."""
from ._helpers import convert_cameras as _convert_cameras
return _convert_cameras(cameras)
def make_camera(
name: str,
K: "Matrix3x3Like",
DC: "VectorLike",
R: "Matrix3x3Like",
T: "Sequence[Sequence[float]]",
width: int,
height: int,
model: "CameraModel | CameraModelLike",
) -> Camera:
"""Create an immutable camera and precompute its cached projection fields."""
from ._helpers import _coerce_camera_model, _coerce_distortion
camera_model = _coerce_camera_model(model)
return _make_camera(
name,
K,
_coerce_distortion(DC, camera_model),
R,
T,
width,
height,
camera_model,
)
def build_pair_candidates(
poses_2d: "PoseArray2D",
person_counts: "PersonCountArray",
) -> list[PairCandidate]:
"""Enumerate all cross-view person pairs implied by the padded 2D pose batch."""
return _build_pair_candidates(poses_2d, person_counts)
def pack_poses_2d(
views: "Sequence[PoseViewLike]", *, joint_count: int | None = None
) -> "tuple[npt.NDArray[np.float32], npt.NDArray[np.uint32]]":
"""Pack ragged per-view pose detections into the padded tensor expected by the core API."""
from ._helpers import pack_poses_2d as _pack_poses_2d
return _pack_poses_2d(views, joint_count=joint_count)
def make_triangulation_config(
cameras: "Sequence[CameraLike]",
roomparams: "npt.NDArray[np.generic] | Sequence[Sequence[float]]",
joint_names: "Sequence[str]",
*,
min_match_score: float = 0.95,
min_group_size: int = 1,
) -> TriangulationConfig:
"""Build a triangulation config from cameras, room parameters, and joint names."""
from ._helpers import make_triangulation_config as _make_triangulation_config
return _make_triangulation_config(
cameras,
roomparams,
joint_names,
min_match_score=min_match_score,
min_group_size=min_group_size,
)
def filter_pairs_with_previous_poses(
poses_2d: "PoseArray2D",
person_counts: "PersonCountArray",
config: TriangulationConfig,
previous_poses_3d: "PoseArray3D",
previous_track_ids: "TrackIdArray",
) -> PreviousPoseFilterDebug:
"""Filter raw cross-view pairs against caller-owned previous 3D tracks."""
return _filter_pairs_with_previous_poses(
poses_2d,
person_counts,
config,
previous_poses_3d,
previous_track_ids,
)
def triangulate_debug(
poses_2d: "PoseArray2D",
person_counts: "PersonCountArray",
config: TriangulationConfig,
previous_poses_3d: "PoseArray3D | None" = None,
previous_track_ids: "TrackIdArray | None" = None,
) -> TriangulationTrace:
"""Run triangulation and return the full debug trace.
If previous-frame 3D tracks are supplied, `previous_track_ids` must be supplied as an
aligned `int64` array of the same length.
"""
if previous_poses_3d is None:
return _triangulate_debug(poses_2d, person_counts, config)
if previous_track_ids is None:
raise ValueError("previous_track_ids is required when previous_poses_3d is provided.")
return _triangulate_debug(poses_2d, person_counts, config, previous_poses_3d, previous_track_ids)
def triangulate_poses(
poses_2d: "PoseArray2D",
person_counts: "PersonCountArray",
config: TriangulationConfig,
) -> "PoseArray3D":
"""Triangulate a frame into anonymous 3D poses without track association."""
return _triangulate_poses(poses_2d, person_counts, config)
def triangulate_with_report(
poses_2d: "PoseArray2D",
person_counts: "PersonCountArray",
config: TriangulationConfig,
previous_poses_3d: "PoseArray3D",
previous_track_ids: "TrackIdArray",
) -> TriangulationResult:
"""Triangulate a frame and return caller-owned track association results.
The previous-frame poses and IDs are treated as immutable caller state. The returned
association report classifies each final pose as matched, new, or ambiguous, and lists
unmatched previous tracks as deletion candidates.
"""
return _triangulate_with_report(
poses_2d,
person_counts,
config,
previous_poses_3d,
previous_track_ids,
)
__all__ = [
"Camera",
"CameraModel",
"AssociationReport",
"AssociationStatus",
"FinalPoseAssociationDebug",
"TriangulationConfig",
"TriangulationOptions",
"TriangulationResult",
"CoreProposalDebug",
"FullProposalDebug",
"GroupingDebug",
"MergeDebug",
"PairCandidate",
"PreviousPoseFilterDebug",
"PreviousPoseMatch",
"ProposalGroupDebug",
"TriangulationTrace",
"build_pair_candidates",
"convert_cameras",
"filter_pairs_with_previous_poses",
"make_camera",
"make_triangulation_config",
"pack_poses_2d",
"triangulate_debug",
"triangulate_poses",
"triangulate_with_report",
]
+115
View File
@@ -0,0 +1,115 @@
from collections.abc import Sequence
from typing import TypeAlias, overload
import numpy as np
import numpy.typing as npt
from ._core import (
AssociationReport,
AssociationStatus,
Camera,
CameraModel,
CoreProposalDebug,
FinalPoseAssociationDebug,
FullProposalDebug,
GroupingDebug,
MergeDebug,
PairCandidate,
PreviousPoseFilterDebug,
PreviousPoseMatch,
ProposalGroupDebug,
TriangulationConfig,
TriangulationOptions,
TriangulationResult,
TriangulationTrace,
)
from ._helpers import CameraLike, CameraModelLike, Matrix3x3Like, PoseViewLike, RoomParamsLike, VectorLike
PoseArray2D: TypeAlias = npt.NDArray[np.float32]
PoseArray3D: TypeAlias = npt.NDArray[np.float32]
PersonCountArray: TypeAlias = npt.NDArray[np.uint32]
TrackIdArray: TypeAlias = npt.NDArray[np.int64]
def convert_cameras(cameras: Sequence[CameraLike]) -> list[Camera]: ...
def make_camera(
name: str,
K: Matrix3x3Like,
DC: VectorLike,
R: Matrix3x3Like,
T: Sequence[Sequence[float]],
width: int,
height: int,
model: CameraModel | CameraModelLike,
) -> Camera: ...
def build_pair_candidates(
poses_2d: PoseArray2D,
person_counts: PersonCountArray,
) -> list[PairCandidate]: ...
def pack_poses_2d(
views: Sequence[PoseViewLike],
*,
joint_count: int | None = None,
) -> tuple[npt.NDArray[np.float32], npt.NDArray[np.uint32]]: ...
def make_triangulation_config(
cameras: Sequence[CameraLike],
roomparams: RoomParamsLike,
joint_names: Sequence[str],
*,
min_match_score: float = 0.95,
min_group_size: int = 1,
) -> TriangulationConfig: ...
def filter_pairs_with_previous_poses(
poses_2d: PoseArray2D,
person_counts: PersonCountArray,
config: TriangulationConfig,
previous_poses_3d: PoseArray3D,
previous_track_ids: TrackIdArray,
) -> PreviousPoseFilterDebug: ...
@overload
def triangulate_debug(
poses_2d: PoseArray2D,
person_counts: PersonCountArray,
config: TriangulationConfig,
) -> TriangulationTrace: ...
@overload
def triangulate_debug(
poses_2d: PoseArray2D,
person_counts: PersonCountArray,
config: TriangulationConfig,
previous_poses_3d: PoseArray3D,
previous_track_ids: TrackIdArray,
) -> TriangulationTrace: ...
def triangulate_poses(
poses_2d: PoseArray2D,
person_counts: PersonCountArray,
config: TriangulationConfig,
) -> PoseArray3D: ...
def triangulate_with_report(
poses_2d: PoseArray2D,
person_counts: PersonCountArray,
config: TriangulationConfig,
previous_poses_3d: PoseArray3D,
previous_track_ids: TrackIdArray,
) -> TriangulationResult: ...
__all__: list[str]
+159
View File
@@ -0,0 +1,159 @@
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal, TypeAlias, TypedDict
import numpy as np
import numpy.typing as npt
from ._core import Camera, CameraModel, TriangulationConfig, TriangulationOptions, make_camera
Matrix3x3Like: TypeAlias = Sequence[Sequence[float]]
VectorLike: TypeAlias = Sequence[float]
RoomParamsLike: TypeAlias = npt.NDArray[np.generic] | Sequence[Sequence[float]]
PoseViewLike: TypeAlias = npt.NDArray[np.generic] | Sequence[Sequence[Sequence[float]]] | Sequence[Sequence[float]]
class CameraDict(TypedDict, total=False):
name: str
K: Matrix3x3Like
DC: VectorLike
R: Matrix3x3Like
T: Sequence[Sequence[float]]
width: int
height: int
type: Literal["pinhole", "fisheye"]
model: Literal["pinhole", "fisheye"] | CameraModel
CameraModelLike: TypeAlias = CameraModel | Literal["pinhole", "fisheye"]
CameraLike = Camera | CameraDict
def _coerce_camera_model(model: CameraModelLike) -> CameraModel:
if isinstance(model, CameraModel):
return model
if model == "pinhole":
return CameraModel.PINHOLE
if model == "fisheye":
return CameraModel.FISHEYE
raise ValueError(f"Unsupported camera model: {model}")
def _coerce_distortion(distortion: VectorLike, camera_model: CameraModel) -> tuple[float, float, float, float, float]:
values = tuple(float(value) for value in distortion)
expected = 4 if camera_model is CameraModel.FISHEYE else 5
if len(values) not in {expected, 5}:
raise ValueError(
f"{camera_model.name.lower()} cameras require {expected} distortion coefficients"
+ (" (or 5 with a trailing zero)." if camera_model is CameraModel.FISHEYE else ".")
)
if camera_model is CameraModel.FISHEYE and len(values) == 4:
values = values + (0.0,)
if len(values) != 5:
raise ValueError("Distortion coefficients must normalize to exactly 5 values.")
return values
def convert_cameras(cameras: Sequence[CameraLike]) -> list[Camera]:
"""Normalize mappings or existing Camera objects into bound Camera instances."""
converted: list[Camera] = []
for cam in cameras:
if isinstance(cam, Camera):
converted.append(cam)
continue
camera_model = _coerce_camera_model(cam.get("model", cam.get("type", "pinhole")))
converted.append(
make_camera(
str(cam["name"]),
cam["K"],
_coerce_distortion(cam["DC"], camera_model),
cam["R"],
cam["T"],
int(cam["width"]),
int(cam["height"]),
camera_model,
)
)
return converted
def pack_poses_2d(
views: Sequence[PoseViewLike], *, joint_count: int | None = None
) -> tuple[npt.NDArray[np.float32], npt.NDArray[np.uint32]]:
"""Pack ragged per-view pose detections into the padded tensor expected by the core API."""
normalized: list[npt.NDArray[np.float32]] = []
inferred_joint_count = joint_count
for view in views:
array = np.asarray(view, dtype=np.float32)
if array.size == 0:
normalized.append(np.zeros((0, 0, 3), dtype=np.float32))
continue
if array.ndim == 2:
if array.shape[-1] != 3:
raise ValueError("Single-person pose inputs must have shape [joints, 3].")
array = array[np.newaxis, :, :]
elif array.ndim != 3 or array.shape[-1] != 3:
raise ValueError("Each view must have shape [persons, joints, 3] or [joints, 3].")
if inferred_joint_count is None:
inferred_joint_count = int(array.shape[1])
elif array.shape[1] != inferred_joint_count:
raise ValueError("All views must use the same joint count.")
normalized.append(np.ascontiguousarray(array, dtype=np.float32))
if inferred_joint_count is None:
raise ValueError("joint_count is required when all views are empty.")
fixed_views: list[npt.NDArray[np.float32]] = []
max_persons = 0
for array in normalized:
if array.size == 0:
array = np.zeros((0, inferred_joint_count, 3), dtype=np.float32)
elif array.shape[1] != inferred_joint_count:
raise ValueError("All views must use the same joint count.")
max_persons = max(max_persons, int(array.shape[0]))
fixed_views.append(array)
packed = np.zeros((len(fixed_views), max_persons, inferred_joint_count, 3), dtype=np.float32)
counts = np.zeros((len(fixed_views),), dtype=np.uint32)
for view_idx, array in enumerate(fixed_views):
person_count = int(array.shape[0])
counts[view_idx] = person_count
if person_count:
packed[view_idx, :person_count, :, :] = array
return packed, counts
def make_triangulation_config(
cameras: Sequence[CameraLike],
roomparams: RoomParamsLike,
joint_names: Sequence[str],
*,
min_match_score: float = 0.95,
min_group_size: int = 1,
) -> TriangulationConfig:
config = TriangulationConfig()
config.cameras = convert_cameras(cameras)
roomparams_array = np.asarray(roomparams, dtype=np.float32)
if roomparams_array.shape != (2, 3):
raise ValueError("roomparams must have shape [2, 3].")
config.roomparams = roomparams_array.tolist()
config.joint_names = [str(joint_name) for joint_name in joint_names]
options = TriangulationOptions()
options.min_match_score = float(min_match_score)
options.min_group_size = int(min_group_size)
config.options = options
return config
+1
View File
@@ -0,0 +1 @@
-14
View File
@@ -1,14 +0,0 @@
# Standard compile options for the C++ executable
FLAGS = -fPIC -O3 -march=native -Wall -Werror -flto=auto
# The Python interface through SWIG
PYTHON_VERSION = $(shell python3 -c 'import sys; print(f"{sys.version_info.major}.{sys.version_info.minor}");')
PYTHONI = -I/usr/include/python$(PYTHON_VERSION)/
PYTHONL = -Xlinker -export-dynamic
# Default super-target
all:
cd ../rpt/ && g++ $(FLAGS) -std=c++2a -c *.cpp ; cd ../swig/
swig -c++ -python -keyword -o rpt_wrap.cxx rpt.i
g++ $(FLAGS) $(PYTHONI) -c rpt_wrap.cxx -o rpt_wrap.o
g++ $(FLAGS) $(PYTHONL) -shared ../rpt/*.o rpt_wrap.o -o _rpt.so
-70
View File
@@ -1,70 +0,0 @@
%module rpt
%{
// Includes the header in the wrapper code
#include "../rpt/camera.hpp"
#include "../rpt/interface.hpp"
%}
// Some modules need extra imports beside the main .hpp file
%include "std_array.i"
%include "std_string.i"
%include "std_vector.i"
// Instantiate templates used by example
// If the template is too nested (>2), parts of it need to be declared as well
namespace std {
%template(FloatMatrix_3x3) array<array<float, 3>, 3>;
%template(VectorFloat) vector<float>;
%template(FloatMatrix_3x1) array<array<float, 1>, 3>;
%template(FloatMatrix_3x4) array<array<float, 3>, 4>;
%template(Matrix_Jx4) vector<array<float, 4>>;
%template(Matrix_NxJx4) vector<vector<array<float, 4>>>;
%template(Matrix_Jx3) vector<array<float, 3>>;
%template(Matrix_VxNxJx3) vector<vector<vector<array<float, 3>>>>;
%template(VectorCamera) vector<Camera>;
%template(FloatMatrix_2x3) array<array<float, 3>, 2>;
%template(VectorString) vector<std::string>;
}
// Convert vector to native (python) list
%naturalvar Camera::K;
%naturalvar Camera::DC;
%naturalvar Camera::R;
%naturalvar Camera::T;
// Improve printing of result objects
%extend Camera {
std::string __str__() const {
return $self->to_string();
}
}
// Ignore: Warning 503: Can't wrap 'operator <<' unless renamed to a valid identifier.
%warnfilter(503) Camera;
// Ignore: Warning 511: Can't use keyword arguments with overloaded functions.
// The warning is cause by enabling keyword arguments, which doesn't work for vectors.
#pragma SWIG nowarn=511
// Parse the header file to generate wrappers
%include "../rpt/camera.hpp"
%include "../rpt/interface.hpp"
// Add additional Python code to the module
%pythoncode %{
def convert_cameras(cameras):
"""Convert cameras from Python to C++."""
c_cameras = []
for cam in cameras:
camera = Camera()
camera.name = cam["name"]
camera.K = cam["K"]
camera.DC = cam["DC"]
camera.R = cam["R"]
camera.T = cam["T"]
camera.width = cam["width"]
camera.height = cam["height"]
camera.type = cam.get("type", "pinhole")
c_cameras.append(camera)
return c_cameras
%}
+4 -27
View File
@@ -2,33 +2,10 @@
Various module tests
### Triangulator
### Python Interface
```bash
cd /RapidPoseTriangulation/tests/ && python3 test_interface.py && cd ..
cd /RapidPoseTriangulation/
uv sync --group dev
uv run pytest tests/test_interface.py
```
### Onnx C++ Interface
```bash
cd /RapidPoseTriangulation/tests/
g++ -std=c++17 -O3 -march=native -Wall \
$(pkg-config --cflags opencv4) \
-I /onnxruntime/include \
-I /onnxruntime/include/onnxruntime/core/session \
-I /onnxruntime/include/onnxruntime/core/providers/tensorrt \
-L /onnxruntime/build/Linux/Release \
test_utils2d.cpp \
-o my_app.bin \
-Wl,--start-group \
-lonnxruntime_providers_tensorrt \
-lonnxruntime_providers_shared \
-lonnxruntime_providers_cuda \
-lonnxruntime \
-Wl,--end-group \
$(pkg-config --libs opencv4) \
-Wl,-rpath,/onnxruntime/build/Linux/Release
./my_app.bin
```
+257 -108
View File
@@ -1,132 +1,281 @@
import json
import sys
import time
from pathlib import Path
import numpy as np
import pytest
sys.path.append("../swig/")
import rpt
# ==================================================================================================
ROOT = Path(__file__).resolve().parents[1]
JOINT_NAMES = [
"nose",
"eye_left",
"eye_right",
"ear_left",
"ear_right",
"shoulder_left",
"shoulder_right",
"elbow_left",
"elbow_right",
"wrist_left",
"wrist_right",
"hip_left",
"hip_right",
"knee_left",
"knee_right",
"ankle_left",
"ankle_right",
"hip_middle",
"shoulder_middle",
"head",
]
def main():
print("")
def load_case(camera_path: str, pose_path: str):
with (ROOT / camera_path).open("r", encoding="utf-8") as file:
camera_data = json.load(file)
with (ROOT / pose_path).open("r", encoding="utf-8") as file:
pose_data = json.load(file)
# Test camera structure
camera = rpt.Camera()
camera.name = "Camera 1"
camera.K = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
camera.DC = [0, 0, 0, 0, 0]
camera.R = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
camera.T = [[1], [2], [3]]
camera.width = 640
camera.height = 480
print(camera)
print("")
poses_2d, person_counts = rpt.pack_poses_2d(pose_data["2D"], joint_count=len(JOINT_NAMES))
return poses_2d, person_counts, camera_data["cameras"]
# Load input data
roomparams = [[4.8, 6.0, 2.0], [0, 0, 1.0]]
joint_names = [
"nose",
"eye_left",
"eye_right",
"ear_left",
"ear_right",
"shoulder_left",
"shoulder_right",
"elbow_left",
"elbow_right",
"wrist_left",
"wrist_right",
"hip_left",
"hip_right",
"knee_left",
"knee_right",
"ankle_left",
"ankle_right",
"hip_middle",
"shoulder_middle",
"head",
]
cpath = "/RapidPoseTriangulation/data/h1/sample.json"
ppath = "/RapidPoseTriangulation/tests/poses_h1.json"
with open(cpath, "r") as file:
cdata = json.load(file)
with open(ppath, "r") as file:
pdata = json.load(file)
cams = cdata["cameras"]
poses_2d = pdata["2D"]
cameras = rpt.convert_cameras(cams)
# Run triangulation
triangulator = rpt.Triangulator(min_match_score=0.95)
stime = time.time()
poses_3d = triangulator.triangulate_poses(
poses_2d, cameras, roomparams, joint_names
def make_config(cameras, roomparams) -> rpt.TriangulationConfig:
return rpt.make_triangulation_config(
cameras,
np.asarray(roomparams, dtype=np.float32),
JOINT_NAMES,
)
print("3D time:", time.time() - stime)
print(np.array(poses_3d))
print("")
# Load input data
roomparams = [[5.6, 6.4, 2.4], [0, -0.5, 1.2]]
cpath = "/RapidPoseTriangulation/data/p1/sample.json"
ppath = "/RapidPoseTriangulation/tests/poses_p1.json"
with open(cpath, "r") as file:
cdata = json.load(file)
with open(ppath, "r") as file:
pdata = json.load(file)
cams = cdata["cameras"]
poses_2d = pdata["2D"]
cameras = rpt.convert_cameras(cams)
# Run triangulation
triangulator.reset()
stime = time.time()
poses_3d = triangulator.triangulate_poses(
poses_2d, cameras, roomparams, joint_names
def test_camera_structure_repr():
camera = rpt.make_camera(
"Camera 1",
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[0, 0, 0, 0, 0],
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[1], [2], [3]],
640,
480,
rpt.CameraModel.PINHOLE,
)
print("3D time:", time.time() - stime)
print(np.array(poses_3d))
print("")
# Run again to test last pose cache
stime = time.time()
poses_3d = triangulator.triangulate_poses(
poses_2d, cameras, roomparams, joint_names
rendered = repr(camera)
assert "Camera 1" in rendered
assert "pinhole" in rendered
@pytest.mark.parametrize(
("camera_path", "pose_path", "roomparams"),
[
("data/h1/sample.json", "tests/poses_h1.json", [[4.8, 6.0, 2.0], [0.0, 0.0, 1.0]]),
("data/p1/sample.json", "tests/poses_p1.json", [[5.6, 6.4, 2.4], [0.0, -0.5, 1.2]]),
("data/e1/sample.json", "tests/poses_e1.json", [[6.0, 5.0, 2.0], [1.5, 1.0, -0.5]]),
],
)
def test_triangulate_samples(camera_path: str, pose_path: str, roomparams):
poses_2d, person_counts, cameras = load_case(camera_path, pose_path)
config = make_config(cameras, roomparams)
poses_3d = rpt.triangulate_poses(poses_2d, person_counts, config)
assert isinstance(poses_3d, np.ndarray)
assert poses_3d.dtype == np.float32
assert poses_3d.ndim == 3
assert poses_3d.shape[1:] == (len(JOINT_NAMES), 4)
assert poses_3d.shape[0] > 0
assert np.isfinite(poses_3d).all()
def test_triangulate_repeatability():
poses_2d, person_counts, cameras = load_case("data/p1/sample.json", "tests/poses_p1.json")
config = make_config(cameras, [[5.6, 6.4, 2.4], [0.0, -0.5, 1.2]])
first = rpt.triangulate_poses(poses_2d, person_counts, config)
second = rpt.triangulate_poses(poses_2d, person_counts, config)
np.testing.assert_allclose(first, second, rtol=1e-5, atol=1e-5)
def test_build_pair_candidates_exposes_cartesian_view_pairs():
poses_2d, person_counts = rpt.pack_poses_2d(
[
np.zeros((2, 2, 3), dtype=np.float32),
np.zeros((1, 2, 3), dtype=np.float32),
np.zeros((3, 2, 3), dtype=np.float32),
],
joint_count=2,
)
print("3D time:", time.time() - stime)
print(np.array(poses_3d))
print("")
# Load input data
roomparams = [[6.0, 5.0, 2.0], [1.5, 1.0, -0.5]]
cpath = "/RapidPoseTriangulation/data/e1/sample.json"
ppath = "/RapidPoseTriangulation/tests/poses_e1.json"
with open(cpath, "r") as file:
cdata = json.load(file)
with open(ppath, "r") as file:
pdata = json.load(file)
cams = cdata["cameras"]
poses_2d = pdata["2D"]
cameras = rpt.convert_cameras(cams)
pairs = rpt.build_pair_candidates(poses_2d, person_counts)
# Run triangulation
triangulator.reset()
stime = time.time()
poses_3d = triangulator.triangulate_poses(
poses_2d, cameras, roomparams, joint_names
assert len(pairs) == (2 * 1) + (2 * 3) + (1 * 3)
assert (pairs[0].view1, pairs[0].view2, pairs[0].person1, pairs[0].person2) == (0, 1, 0, 0)
assert pairs[-1].global_person2 == 5
def test_triangulate_accepts_empty_previous_poses():
poses_2d, person_counts, cameras = load_case("data/p1/sample.json", "tests/poses_p1.json")
config = make_config(cameras, [[5.6, 6.4, 2.4], [0.0, -0.5, 1.2]])
empty_previous = np.zeros((0, len(JOINT_NAMES), 4), dtype=np.float32)
empty_previous_ids = np.zeros((0,), dtype=np.int64)
baseline = rpt.triangulate_poses(poses_2d, person_counts, config)
result = rpt.triangulate_with_report(
poses_2d,
person_counts,
config,
empty_previous,
empty_previous_ids,
)
print("3D time:", time.time() - stime)
print(np.array(poses_3d))
print("")
triangulator.print_stats()
print("")
np.testing.assert_allclose(result.poses_3d, baseline, rtol=1e-5, atol=1e-5)
assert result.association.unmatched_previous_track_ids == []
# ==================================================================================================
def test_triangulate_debug_matches_final_output():
poses_2d, person_counts, cameras = load_case("data/h1/sample.json", "tests/poses_h1.json")
config = make_config(cameras, [[4.8, 6.0, 2.0], [0.0, 0.0, 1.0]])
if __name__ == "__main__":
main()
final_poses = rpt.triangulate_poses(poses_2d, person_counts, config)
trace = rpt.triangulate_debug(poses_2d, person_counts, config)
np.testing.assert_allclose(trace.final_poses, final_poses, rtol=1e-5, atol=1e-5)
assert len(trace.pairs) >= len(trace.core_proposals)
assert trace.association.pose_previous_track_ids == []
for group in trace.grouping.groups:
assert all(0 <= index < len(trace.core_proposals) for index in group.proposal_indices)
for merge_indices in trace.merge.group_proposal_indices:
assert all(0 <= index < len(trace.core_proposals) for index in merge_indices)
def test_filter_pairs_with_previous_poses_returns_debug_matches():
poses_2d, person_counts, cameras = load_case("data/p1/sample.json", "tests/poses_p1.json")
config = make_config(cameras, [[5.6, 6.4, 2.4], [0.0, -0.5, 1.2]])
previous_poses = rpt.triangulate_poses(poses_2d, person_counts, config)
previous_track_ids = np.arange(previous_poses.shape[0], dtype=np.int64) + 100
debug = rpt.filter_pairs_with_previous_poses(
poses_2d,
person_counts,
config,
previous_poses,
previous_track_ids,
)
assert debug.used_previous_poses is True
assert len(debug.matches) == len(rpt.build_pair_candidates(poses_2d, person_counts))
assert len(debug.kept_pairs) == len(debug.kept_pair_indices)
assert any(match.matched_view1 or match.matched_view2 for match in debug.matches)
assert any(match.previous_track_id >= 100 for match in debug.matches if match.previous_pose_index >= 0)
def test_triangulate_with_report_resolves_previous_track_ids():
poses_2d, person_counts, cameras = load_case("data/p1/sample.json", "tests/poses_p1.json")
config = make_config(cameras, [[5.6, 6.4, 2.4], [0.0, -0.5, 1.2]])
previous_poses = rpt.triangulate_poses(poses_2d, person_counts, config)
previous_track_ids = np.arange(previous_poses.shape[0], dtype=np.int64) + 100
result = rpt.triangulate_with_report(
poses_2d,
person_counts,
config,
previous_poses,
previous_track_ids,
)
assert result.poses_3d.shape == previous_poses.shape
assert len(result.association.pose_previous_track_ids) == result.poses_3d.shape[0]
matched_track_ids = sorted(
track_id for track_id in result.association.pose_previous_track_ids if track_id >= 0
)
unmatched_track_ids = sorted(result.association.unmatched_previous_track_ids)
for pose_index in result.association.new_pose_indices:
assert result.association.pose_previous_track_ids[pose_index] == -1
for pose_index in result.association.ambiguous_pose_indices:
assert result.association.pose_previous_track_ids[pose_index] == -1
assert matched_track_ids == sorted(set(matched_track_ids))
assert sorted(matched_track_ids + unmatched_track_ids) == list(previous_track_ids)
def test_triangulate_with_report_rejects_duplicate_previous_track_ids():
poses_2d, person_counts, cameras = load_case("data/p1/sample.json", "tests/poses_p1.json")
config = make_config(cameras, [[5.6, 6.4, 2.4], [0.0, -0.5, 1.2]])
previous_poses = rpt.triangulate_poses(poses_2d, person_counts, config)
duplicate_ids = np.zeros((previous_poses.shape[0],), dtype=np.int64)
with pytest.raises(ValueError, match="unique"):
rpt.triangulate_with_report(
poses_2d,
person_counts,
config,
previous_poses,
duplicate_ids,
)
def test_triangulate_does_not_mutate_inputs():
poses_2d, person_counts, cameras = load_case("data/h1/sample.json", "tests/poses_h1.json")
config = make_config(cameras, [[4.8, 6.0, 2.0], [0.0, 0.0, 1.0]])
poses_before = poses_2d.copy()
counts_before = person_counts.copy()
rpt.triangulate_poses(poses_2d, person_counts, config)
np.testing.assert_array_equal(poses_2d, poses_before)
np.testing.assert_array_equal(person_counts, counts_before)
def test_pack_poses_2d_from_ragged_inputs():
packed, counts = rpt.pack_poses_2d(
[
[[[1, 2, 0.5], [3, 4, 0.6]]],
np.asarray(
[
[[5, 6, 0.7], [7, 8, 0.8]],
[[9, 10, 0.9], [11, 12, 1.0]],
],
dtype=np.float32,
),
[],
],
joint_count=2,
)
assert packed.shape == (3, 2, 2, 3)
assert packed.dtype == np.float32
np.testing.assert_array_equal(counts, np.asarray([1, 2, 0], dtype=np.uint32))
np.testing.assert_allclose(packed[0, 0], np.asarray([[1, 2, 0.5], [3, 4, 0.6]], dtype=np.float32))
np.testing.assert_allclose(packed[1, 1], np.asarray([[9, 10, 0.9], [11, 12, 1.0]], dtype=np.float32))
np.testing.assert_array_equal(packed[2], np.zeros((2, 2, 3), dtype=np.float32))
def test_pack_poses_2d_rejects_inconsistent_joint_count():
with pytest.raises(ValueError, match="joint count"):
rpt.pack_poses_2d(
[
[[[1, 2, 0.5], [3, 4, 0.6]]],
[[[5, 6, 0.7]]],
]
)
def test_pack_poses_2d_rejects_invalid_last_dimension():
with pytest.raises(ValueError, match="shape"):
rpt.pack_poses_2d([np.zeros((1, 2, 2), dtype=np.float32)])
def test_make_triangulation_config_builds_bound_struct():
_, _, cameras = load_case("data/p1/sample.json", "tests/poses_p1.json")
config = make_config(cameras, [[5.6, 6.4, 2.4], [0.0, -0.5, 1.2]])
assert isinstance(config, rpt.TriangulationConfig)
assert len(config.cameras) > 0
np.testing.assert_allclose(config.roomparams, [[5.6, 6.4, 2.4], [0.0, -0.5, 1.2]])
assert config.joint_names == JOINT_NAMES
assert config.options.min_match_score == pytest.approx(0.95)
assert config.options.min_group_size == 1
-96
View File
@@ -1,96 +0,0 @@
#include <filesystem>
#include <vector>
#include <string>
#include <memory>
#include <opencv2/opencv.hpp>
#include "/RapidPoseTriangulation/scripts/utils_2d_pose.hpp"
// =================================================================================================
// =================================================================================================
int main(int argc, char **argv)
{
using namespace utils_2d_pose;
std::string base_path = "/RapidPoseTriangulation/extras/mmdeploy/exports/";
std::string model_path1 = base_path + "rtmdet-nano_1x320x320x3_fp16_extra-steps.onnx";
std::string model_path2 = base_path + "rtmpose-m_1x384x288x3_fp16_extra-steps.onnx";
std::vector<std::string> img_paths = {
"/RapidPoseTriangulation/data/h1/54138969-img_003201.jpg",
"/RapidPoseTriangulation/data/h1/55011271-img_003201.jpg",
"/RapidPoseTriangulation/data/h1/58860488-img_003201.jpg",
"/RapidPoseTriangulation/data/h1/60457274-img_003201.jpg",
};
// {
// std::cout << "\nTesting RTMDet and RTMPose" << std::endl;
// RTMDet model1(model_path1, 0.3, 0.1 * 0.1, 30);
// RTMPose model2(model_path2, 30);
// for (size_t i = 0; i < img_paths.size(); i++)
// {
// cv::Mat img = cv::imread(img_paths[i]);
// cv::cvtColor(img, img, cv::COLOR_BGR2RGB);
// auto outputs1 = model1.call(img);
// std::cout << "Model1 outputs: " << outputs1[0][0] << " " << outputs1[0][1] << " "
// << outputs1[0][2] << " " << outputs1[0][3] << " " << outputs1[0][4] << " "
// << std::endl;
// for (size_t j = 0; j < outputs1.size(); j++)
// {
// std::vector<std::array<float, 5>> bboxes = {outputs1[j]};
// auto outputs2 = model2.call(img, bboxes);
// std::cout << "Model2 outputs: " << outputs2[0][0][0] << " "
// << outputs2[0][0][1] << " " << outputs2[0][0][2] << " " << std::endl;
// }
// }
// }
// {
// std::cout << "\nTesting TopDown" << std::endl;
// TopDown model3(model_path1, model_path2, 0.3, 0.1 * 0.1, false, 30);
// for (size_t i = 0; i < img_paths.size(); i++)
// {
// cv::Mat img = cv::imread(img_paths[i]);
// cv::cvtColor(img, img, cv::COLOR_BGR2RGB);
// auto outputs3 = model3.predict(img);
// std::cout << "Model3 outputs: " << outputs3[0][0][0] << " "
// << outputs3[0][0][1] << " " << outputs3[0][0][2] << " " << std::endl;
// }
// }
{
std::cout << "\nTesting PosePredictor 1" << std::endl;
PosePredictor model4(false, 0.3, 0.1 * 0.1, false);
std::vector<cv::Mat> images;
for (size_t i = 0; i < img_paths.size(); i++)
{
cv::Mat img = cv::imread(img_paths[i]);
cv::cvtColor(img, img, cv::COLOR_BGR2RGB);
images.push_back(img);
}
auto outputs4 = model4.predict(images);
std::cout << "Model4 outputs: " << outputs4[0][0][0][0] << " "
<< outputs4[0][0][0][1] << " " << outputs4[0][0][0][2] << " " << std::endl;
}
{
std::cout << "\nTesting PosePredictor 2" << std::endl;
PosePredictor model5(false, 0.3, 0.1 * 0.1, true);
std::vector<cv::Mat> images;
for (size_t i = 0; i < img_paths.size(); i++)
{
cv::Mat img = cv::imread(img_paths[i]);
cv::cvtColor(img, img, cv::COLOR_BGR2RGB);
images.push_back(img);
}
auto outputs5 = model5.predict(images);
std::cout << "Model5 outputs: " << outputs5[0][0][0][0] << " "
<< outputs5[0][0][0][1] << " " << outputs5[0][0][0][2] << " " << std::endl;
}
return 0;
}
Generated
+312
View File
@@ -0,0 +1,312 @@
version = 1
revision = 3
requires-python = ">=3.10"
resolution-markers = [
"python_full_version >= '3.11'",
"python_full_version < '3.11'",
]
[[package]]
name = "colorama"
version = "0.4.6"
source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" }
sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" }
wheels = [
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" },
]
[[package]]
name = "exceptiongroup"
version = "1.3.1"
source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" }
dependencies = [
{ name = "typing-extensions", marker = "python_full_version < '3.11'" },
]
sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/50/79/66800aadf48771f6b62f7eb014e352e5d06856655206165d775e675a02c9/exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219", size = 30371, upload-time = "2025-11-21T23:01:54.787Z" }
wheels = [
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/8a/0e/97c33bf5009bdbac74fd2beace167cab3f978feb69cc36f1ef79360d6c4e/exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598", size = 16740, upload-time = "2025-11-21T23:01:53.443Z" },
]
[[package]]
name = "iniconfig"
version = "2.3.0"
source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" }
sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/72/34/14ca021ce8e5dfedc35312d08ba8bf51fdd999c576889fc2c24cb97f4f10/iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730", size = 20503, upload-time = "2025-10-18T21:55:43.219Z" }
wheels = [
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" },
]
[[package]]
name = "numpy"
version = "2.2.6"
source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" }
resolution-markers = [
"python_full_version < '3.11'",
]
sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/76/21/7d2a95e4bba9dc13d043ee156a356c0a8f0c6309dff6b21b4d71a073b8a8/numpy-2.2.6.tar.gz", hash = "sha256:e29554e2bef54a90aa5cc07da6ce955accb83f21ab5de01a62c8478897b264fd", size = 20276440, upload-time = "2025-05-17T22:38:04.611Z" }
wheels = [
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9a/3e/ed6db5be21ce87955c0cbd3009f2803f59fa08df21b5df06862e2d8e2bdd/numpy-2.2.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b412caa66f72040e6d268491a59f2c43bf03eb6c96dd8f0307829feb7fa2b6fb", size = 21165245, upload-time = "2025-05-17T21:27:58.555Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/22/c2/4b9221495b2a132cc9d2eb862e21d42a009f5a60e45fc44b00118c174bff/numpy-2.2.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8e41fd67c52b86603a91c1a505ebaef50b3314de0213461c7a6e99c9a3beff90", size = 14360048, upload-time = "2025-05-17T21:28:21.406Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/fd/77/dc2fcfc66943c6410e2bf598062f5959372735ffda175b39906d54f02349/numpy-2.2.6-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:37e990a01ae6ec7fe7fa1c26c55ecb672dd98b19c3d0e1d1f326fa13cb38d163", size = 5340542, upload-time = "2025-05-17T21:28:30.931Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7a/4f/1cb5fdc353a5f5cc7feb692db9b8ec2c3d6405453f982435efc52561df58/numpy-2.2.6-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:5a6429d4be8ca66d889b7cf70f536a397dc45ba6faeb5f8c5427935d9592e9cf", size = 6878301, upload-time = "2025-05-17T21:28:41.613Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/eb/17/96a3acd228cec142fcb8723bd3cc39c2a474f7dcf0a5d16731980bcafa95/numpy-2.2.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:efd28d4e9cd7d7a8d39074a4d44c63eda73401580c5c76acda2ce969e0a38e83", size = 14297320, upload-time = "2025-05-17T21:29:02.78Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b4/63/3de6a34ad7ad6646ac7d2f55ebc6ad439dbbf9c4370017c50cf403fb19b5/numpy-2.2.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc7b73d02efb0e18c000e9ad8b83480dfcd5dfd11065997ed4c6747470ae8915", size = 16801050, upload-time = "2025-05-17T21:29:27.675Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/07/b6/89d837eddef52b3d0cec5c6ba0456c1bf1b9ef6a6672fc2b7873c3ec4e2e/numpy-2.2.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:74d4531beb257d2c3f4b261bfb0fc09e0f9ebb8842d82a7b4209415896adc680", size = 15807034, upload-time = "2025-05-17T21:29:51.102Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/01/c8/dc6ae86e3c61cfec1f178e5c9f7858584049b6093f843bca541f94120920/numpy-2.2.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8fc377d995680230e83241d8a96def29f204b5782f371c532579b4f20607a289", size = 18614185, upload-time = "2025-05-17T21:30:18.703Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/5b/c5/0064b1b7e7c89137b471ccec1fd2282fceaae0ab3a9550f2568782d80357/numpy-2.2.6-cp310-cp310-win32.whl", hash = "sha256:b093dd74e50a8cba3e873868d9e93a85b78e0daf2e98c6797566ad8044e8363d", size = 6527149, upload-time = "2025-05-17T21:30:29.788Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a3/dd/4b822569d6b96c39d1215dbae0582fd99954dcbcf0c1a13c61783feaca3f/numpy-2.2.6-cp310-cp310-win_amd64.whl", hash = "sha256:f0fd6321b839904e15c46e0d257fdd101dd7f530fe03fd6359c1ea63738703f3", size = 12904620, upload-time = "2025-05-17T21:30:48.994Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/da/a8/4f83e2aa666a9fbf56d6118faaaf5f1974d456b1823fda0a176eff722839/numpy-2.2.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f9f1adb22318e121c5c69a09142811a201ef17ab257a1e66ca3025065b7f53ae", size = 21176963, upload-time = "2025-05-17T21:31:19.36Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b3/2b/64e1affc7972decb74c9e29e5649fac940514910960ba25cd9af4488b66c/numpy-2.2.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c820a93b0255bc360f53eca31a0e676fd1101f673dda8da93454a12e23fc5f7a", size = 14406743, upload-time = "2025-05-17T21:31:41.087Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/4a/9f/0121e375000b5e50ffdd8b25bf78d8e1a5aa4cca3f185d41265198c7b834/numpy-2.2.6-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:3d70692235e759f260c3d837193090014aebdf026dfd167834bcba43e30c2a42", size = 5352616, upload-time = "2025-05-17T21:31:50.072Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/31/0d/b48c405c91693635fbe2dcd7bc84a33a602add5f63286e024d3b6741411c/numpy-2.2.6-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:481b49095335f8eed42e39e8041327c05b0f6f4780488f61286ed3c01368d491", size = 6889579, upload-time = "2025-05-17T21:32:01.712Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/52/b8/7f0554d49b565d0171eab6e99001846882000883998e7b7d9f0d98b1f934/numpy-2.2.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b64d8d4d17135e00c8e346e0a738deb17e754230d7e0810ac5012750bbd85a5a", size = 14312005, upload-time = "2025-05-17T21:32:23.332Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b3/dd/2238b898e51bd6d389b7389ffb20d7f4c10066d80351187ec8e303a5a475/numpy-2.2.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba10f8411898fc418a521833e014a77d3ca01c15b0c6cdcce6a0d2897e6dbbdf", size = 16821570, upload-time = "2025-05-17T21:32:47.991Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/83/6c/44d0325722cf644f191042bf47eedad61c1e6df2432ed65cbe28509d404e/numpy-2.2.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:bd48227a919f1bafbdda0583705e547892342c26fb127219d60a5c36882609d1", size = 15818548, upload-time = "2025-05-17T21:33:11.728Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ae/9d/81e8216030ce66be25279098789b665d49ff19eef08bfa8cb96d4957f422/numpy-2.2.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9551a499bf125c1d4f9e250377c1ee2eddd02e01eac6644c080162c0c51778ab", size = 18620521, upload-time = "2025-05-17T21:33:39.139Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/6a/fd/e19617b9530b031db51b0926eed5345ce8ddc669bb3bc0044b23e275ebe8/numpy-2.2.6-cp311-cp311-win32.whl", hash = "sha256:0678000bb9ac1475cd454c6b8c799206af8107e310843532b04d49649c717a47", size = 6525866, upload-time = "2025-05-17T21:33:50.273Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/31/0a/f354fb7176b81747d870f7991dc763e157a934c717b67b58456bc63da3df/numpy-2.2.6-cp311-cp311-win_amd64.whl", hash = "sha256:e8213002e427c69c45a52bbd94163084025f533a55a59d6f9c5b820774ef3303", size = 12907455, upload-time = "2025-05-17T21:34:09.135Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/82/5d/c00588b6cf18e1da539b45d3598d3557084990dcc4331960c15ee776ee41/numpy-2.2.6-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:41c5a21f4a04fa86436124d388f6ed60a9343a6f767fced1a8a71c3fbca038ff", size = 20875348, upload-time = "2025-05-17T21:34:39.648Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/66/ee/560deadcdde6c2f90200450d5938f63a34b37e27ebff162810f716f6a230/numpy-2.2.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:de749064336d37e340f640b05f24e9e3dd678c57318c7289d222a8a2f543e90c", size = 14119362, upload-time = "2025-05-17T21:35:01.241Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/3c/65/4baa99f1c53b30adf0acd9a5519078871ddde8d2339dc5a7fde80d9d87da/numpy-2.2.6-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:894b3a42502226a1cac872f840030665f33326fc3dac8e57c607905773cdcde3", size = 5084103, upload-time = "2025-05-17T21:35:10.622Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/cc/89/e5a34c071a0570cc40c9a54eb472d113eea6d002e9ae12bb3a8407fb912e/numpy-2.2.6-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:71594f7c51a18e728451bb50cc60a3ce4e6538822731b2933209a1f3614e9282", size = 6625382, upload-time = "2025-05-17T21:35:21.414Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f8/35/8c80729f1ff76b3921d5c9487c7ac3de9b2a103b1cd05e905b3090513510/numpy-2.2.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f2618db89be1b4e05f7a1a847a9c1c0abd63e63a1607d892dd54668dd92faf87", size = 14018462, upload-time = "2025-05-17T21:35:42.174Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/8c/3d/1e1db36cfd41f895d266b103df00ca5b3cbe965184df824dec5c08c6b803/numpy-2.2.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd83c01228a688733f1ded5201c678f0c53ecc1006ffbc404db9f7a899ac6249", size = 16527618, upload-time = "2025-05-17T21:36:06.711Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/61/c6/03ed30992602c85aa3cd95b9070a514f8b3c33e31124694438d88809ae36/numpy-2.2.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:37c0ca431f82cd5fa716eca9506aefcabc247fb27ba69c5062a6d3ade8cf8f49", size = 15505511, upload-time = "2025-05-17T21:36:29.965Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b7/25/5761d832a81df431e260719ec45de696414266613c9ee268394dd5ad8236/numpy-2.2.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fe27749d33bb772c80dcd84ae7e8df2adc920ae8297400dabec45f0dedb3f6de", size = 18313783, upload-time = "2025-05-17T21:36:56.883Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/57/0a/72d5a3527c5ebffcd47bde9162c39fae1f90138c961e5296491ce778e682/numpy-2.2.6-cp312-cp312-win32.whl", hash = "sha256:4eeaae00d789f66c7a25ac5f34b71a7035bb474e679f410e5e1a94deb24cf2d4", size = 6246506, upload-time = "2025-05-17T21:37:07.368Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/36/fa/8c9210162ca1b88529ab76b41ba02d433fd54fecaf6feb70ef9f124683f1/numpy-2.2.6-cp312-cp312-win_amd64.whl", hash = "sha256:c1f9540be57940698ed329904db803cf7a402f3fc200bfe599334c9bd84a40b2", size = 12614190, upload-time = "2025-05-17T21:37:26.213Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f9/5c/6657823f4f594f72b5471f1db1ab12e26e890bb2e41897522d134d2a3e81/numpy-2.2.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0811bb762109d9708cca4d0b13c4f67146e3c3b7cf8d34018c722adb2d957c84", size = 20867828, upload-time = "2025-05-17T21:37:56.699Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/dc/9e/14520dc3dadf3c803473bd07e9b2bd1b69bc583cb2497b47000fed2fa92f/numpy-2.2.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:287cc3162b6f01463ccd86be154f284d0893d2b3ed7292439ea97eafa8170e0b", size = 14143006, upload-time = "2025-05-17T21:38:18.291Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/4f/06/7e96c57d90bebdce9918412087fc22ca9851cceaf5567a45c1f404480e9e/numpy-2.2.6-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:f1372f041402e37e5e633e586f62aa53de2eac8d98cbfb822806ce4bbefcb74d", size = 5076765, upload-time = "2025-05-17T21:38:27.319Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/73/ed/63d920c23b4289fdac96ddbdd6132e9427790977d5457cd132f18e76eae0/numpy-2.2.6-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:55a4d33fa519660d69614a9fad433be87e5252f4b03850642f88993f7b2ca566", size = 6617736, upload-time = "2025-05-17T21:38:38.141Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/85/c5/e19c8f99d83fd377ec8c7e0cf627a8049746da54afc24ef0a0cb73d5dfb5/numpy-2.2.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f92729c95468a2f4f15e9bb94c432a9229d0d50de67304399627a943201baa2f", size = 14010719, upload-time = "2025-05-17T21:38:58.433Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/19/49/4df9123aafa7b539317bf6d342cb6d227e49f7a35b99c287a6109b13dd93/numpy-2.2.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1bc23a79bfabc5d056d106f9befb8d50c31ced2fbc70eedb8155aec74a45798f", size = 16526072, upload-time = "2025-05-17T21:39:22.638Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b2/6c/04b5f47f4f32f7c2b0e7260442a8cbcf8168b0e1a41ff1495da42f42a14f/numpy-2.2.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e3143e4451880bed956e706a3220b4e5cf6172ef05fcc397f6f36a550b1dd868", size = 15503213, upload-time = "2025-05-17T21:39:45.865Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/17/0a/5cd92e352c1307640d5b6fec1b2ffb06cd0dabe7d7b8227f97933d378422/numpy-2.2.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b4f13750ce79751586ae2eb824ba7e1e8dba64784086c98cdbbcc6a42112ce0d", size = 18316632, upload-time = "2025-05-17T21:40:13.331Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f0/3b/5cba2b1d88760ef86596ad0f3d484b1cbff7c115ae2429678465057c5155/numpy-2.2.6-cp313-cp313-win32.whl", hash = "sha256:5beb72339d9d4fa36522fc63802f469b13cdbe4fdab4a288f0c441b74272ebfd", size = 6244532, upload-time = "2025-05-17T21:43:46.099Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/cb/3b/d58c12eafcb298d4e6d0d40216866ab15f59e55d148a5658bb3132311fcf/numpy-2.2.6-cp313-cp313-win_amd64.whl", hash = "sha256:b0544343a702fa80c95ad5d3d608ea3599dd54d4632df855e4c8d24eb6ecfa1c", size = 12610885, upload-time = "2025-05-17T21:44:05.145Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/6b/9e/4bf918b818e516322db999ac25d00c75788ddfd2d2ade4fa66f1f38097e1/numpy-2.2.6-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0bca768cd85ae743b2affdc762d617eddf3bcf8724435498a1e80132d04879e6", size = 20963467, upload-time = "2025-05-17T21:40:44Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/61/66/d2de6b291507517ff2e438e13ff7b1e2cdbdb7cb40b3ed475377aece69f9/numpy-2.2.6-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:fc0c5673685c508a142ca65209b4e79ed6740a4ed6b2267dbba90f34b0b3cfda", size = 14225144, upload-time = "2025-05-17T21:41:05.695Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e4/25/480387655407ead912e28ba3a820bc69af9adf13bcbe40b299d454ec011f/numpy-2.2.6-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:5bd4fc3ac8926b3819797a7c0e2631eb889b4118a9898c84f585a54d475b7e40", size = 5200217, upload-time = "2025-05-17T21:41:15.903Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/aa/4a/6e313b5108f53dcbf3aca0c0f3e9c92f4c10ce57a0a721851f9785872895/numpy-2.2.6-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:fee4236c876c4e8369388054d02d0e9bb84821feb1a64dd59e137e6511a551f8", size = 6712014, upload-time = "2025-05-17T21:41:27.321Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b7/30/172c2d5c4be71fdf476e9de553443cf8e25feddbe185e0bd88b096915bcc/numpy-2.2.6-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e1dda9c7e08dc141e0247a5b8f49cf05984955246a327d4c48bda16821947b2f", size = 14077935, upload-time = "2025-05-17T21:41:49.738Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/12/fb/9e743f8d4e4d3c710902cf87af3512082ae3d43b945d5d16563f26ec251d/numpy-2.2.6-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f447e6acb680fd307f40d3da4852208af94afdfab89cf850986c3ca00562f4fa", size = 16600122, upload-time = "2025-05-17T21:42:14.046Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/12/75/ee20da0e58d3a66f204f38916757e01e33a9737d0b22373b3eb5a27358f9/numpy-2.2.6-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:389d771b1623ec92636b0786bc4ae56abafad4a4c513d36a55dce14bd9ce8571", size = 15586143, upload-time = "2025-05-17T21:42:37.464Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/76/95/bef5b37f29fc5e739947e9ce5179ad402875633308504a52d188302319c8/numpy-2.2.6-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8e9ace4a37db23421249ed236fdcdd457d671e25146786dfc96835cd951aa7c1", size = 18385260, upload-time = "2025-05-17T21:43:05.189Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/09/04/f2f83279d287407cf36a7a8053a5abe7be3622a4363337338f2585e4afda/numpy-2.2.6-cp313-cp313t-win32.whl", hash = "sha256:038613e9fb8c72b0a41f025a7e4c3f0b7a1b5d768ece4796b674c8f3fe13efff", size = 6377225, upload-time = "2025-05-17T21:43:16.254Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/67/0e/35082d13c09c02c011cf21570543d202ad929d961c02a147493cb0c2bdf5/numpy-2.2.6-cp313-cp313t-win_amd64.whl", hash = "sha256:6031dd6dfecc0cf9f668681a37648373bddd6421fff6c66ec1624eed0180ee06", size = 12771374, upload-time = "2025-05-17T21:43:35.479Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9e/3b/d94a75f4dbf1ef5d321523ecac21ef23a3cd2ac8b78ae2aac40873590229/numpy-2.2.6-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0b605b275d7bd0c640cad4e5d30fa701a8d59302e127e5f79138ad62762c3e3d", size = 21040391, upload-time = "2025-05-17T21:44:35.948Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/17/f4/09b2fa1b58f0fb4f7c7963a1649c64c4d315752240377ed74d9cd878f7b5/numpy-2.2.6-pp310-pypy310_pp73-macosx_14_0_x86_64.whl", hash = "sha256:7befc596a7dc9da8a337f79802ee8adb30a552a94f792b9c9d18c840055907db", size = 6786754, upload-time = "2025-05-17T21:44:47.446Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/af/30/feba75f143bdc868a1cc3f44ccfa6c4b9ec522b36458e738cd00f67b573f/numpy-2.2.6-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce47521a4754c8f4593837384bd3424880629f718d87c5d44f8ed763edd63543", size = 16643476, upload-time = "2025-05-17T21:45:11.871Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/37/48/ac2a9584402fb6c0cd5b5d1a91dcf176b15760130dd386bbafdbfe3640bf/numpy-2.2.6-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d042d24c90c41b54fd506da306759e06e568864df8ec17ccc17e9e884634fd00", size = 12812666, upload-time = "2025-05-17T21:45:31.426Z" },
]
[[package]]
name = "numpy"
version = "2.4.3"
source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" }
resolution-markers = [
"python_full_version >= '3.11'",
]
sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/10/8b/c265f4823726ab832de836cdd184d0986dcf94480f81e8739692a7ac7af2/numpy-2.4.3.tar.gz", hash = "sha256:483a201202b73495f00dbc83796c6ae63137a9bdade074f7648b3e32613412dd", size = 20727743, upload-time = "2026-03-09T07:58:53.426Z" }
wheels = [
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f9/51/5093a2df15c4dc19da3f79d1021e891f5dcf1d9d1db6ba38891d5590f3fe/numpy-2.4.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:33b3bf58ee84b172c067f56aeadc7ee9ab6de69c5e800ab5b10295d54c581adb", size = 16957183, upload-time = "2026-03-09T07:55:57.774Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b5/7c/c061f3de0630941073d2598dc271ac2f6cbcf5c83c74a5870fea07488333/numpy-2.4.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8ba7b51e71c05aa1f9bc3641463cd82308eab40ce0d5c7e1fd4038cbf9938147", size = 14968734, upload-time = "2026-03-09T07:56:00.494Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ef/27/d26c85cbcd86b26e4f125b0668e7a7c0542d19dd7d23ee12e87b550e95b5/numpy-2.4.3-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:a1988292870c7cb9d0ebb4cc96b4d447513a9644801de54606dc7aabf2b7d920", size = 5475288, upload-time = "2026-03-09T07:56:02.857Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2b/09/3c4abbc1dcd8010bf1a611d174c7aa689fc505585ec806111b4406f6f1b1/numpy-2.4.3-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:23b46bb6d8ecb68b58c09944483c135ae5f0e9b8d8858ece5e4ead783771d2a9", size = 6805253, upload-time = "2026-03-09T07:56:04.53Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/21/bc/e7aa3f6817e40c3f517d407742337cbb8e6fc4b83ce0b55ab780c829243b/numpy-2.4.3-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a016db5c5dba78fa8fe9f5d80d6708f9c42ab087a739803c0ac83a43d686a470", size = 15969479, upload-time = "2026-03-09T07:56:06.638Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/78/51/9f5d7a41f0b51649ddf2f2320595e15e122a40610b233d51928dd6c92353/numpy-2.4.3-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:715de7f82e192e8cae5a507a347d97ad17598f8e026152ca97233e3666daaa71", size = 16901035, upload-time = "2026-03-09T07:56:09.405Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/64/6e/b221dd847d7181bc5ee4857bfb026182ef69499f9305eb1371cbb1aea626/numpy-2.4.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2ddb7919366ee468342b91dea2352824c25b55814a987847b6c52003a7c97f15", size = 17325657, upload-time = "2026-03-09T07:56:12.067Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/eb/b8/8f3fd2da596e1063964b758b5e3c970aed1949a05200d7e3d46a9d46d643/numpy-2.4.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a315e5234d88067f2d97e1f2ef670a7569df445d55400f1e33d117418d008d52", size = 18635512, upload-time = "2026-03-09T07:56:14.629Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/5c/24/2993b775c37e39d2f8ab4125b44337ab0b2ba106c100980b7c274a22bee7/numpy-2.4.3-cp311-cp311-win32.whl", hash = "sha256:2b3f8d2c4589b1a2028d2a770b0fc4d1f332fb5e01521f4de3199a896d158ddd", size = 6238100, upload-time = "2026-03-09T07:56:17.243Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/76/1d/edccf27adedb754db7c4511d5eac8b83f004ae948fe2d3509e8b78097d4c/numpy-2.4.3-cp311-cp311-win_amd64.whl", hash = "sha256:77e76d932c49a75617c6d13464e41203cd410956614d0a0e999b25e9e8d27eec", size = 12609816, upload-time = "2026-03-09T07:56:19.089Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/92/82/190b99153480076c8dce85f4cfe7d53ea84444145ffa54cb58dcd460d66b/numpy-2.4.3-cp311-cp311-win_arm64.whl", hash = "sha256:eb610595dd91560905c132c709412b512135a60f1851ccbd2c959e136431ff67", size = 10485757, upload-time = "2026-03-09T07:56:21.753Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a9/ed/6388632536f9788cea23a3a1b629f25b43eaacd7d7377e5d6bc7b9deb69b/numpy-2.4.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:61b0cbabbb6126c8df63b9a3a0c4b1f44ebca5e12ff6997b80fcf267fb3150ef", size = 16669628, upload-time = "2026-03-09T07:56:24.252Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/74/1b/ee2abfc68e1ce728b2958b6ba831d65c62e1b13ce3017c13943f8f9b5b2e/numpy-2.4.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7395e69ff32526710748f92cd8c9849b361830968ea3e24a676f272653e8983e", size = 14696872, upload-time = "2026-03-09T07:56:26.991Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ba/d1/780400e915ff5638166f11ca9dc2c5815189f3d7cf6f8759a1685e586413/numpy-2.4.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:abdce0f71dcb4a00e4e77f3faf05e4616ceccfe72ccaa07f47ee79cda3b7b0f4", size = 5203489, upload-time = "2026-03-09T07:56:29.414Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/0b/bb/baffa907e9da4cc34a6e556d6d90e032f6d7a75ea47968ea92b4858826c4/numpy-2.4.3-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:48da3a4ee1336454b07497ff7ec83903efa5505792c4e6d9bf83d99dc07a1e18", size = 6550814, upload-time = "2026-03-09T07:56:32.225Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7b/12/8c9f0c6c95f76aeb20fc4a699c33e9f827fa0d0f857747c73bb7b17af945/numpy-2.4.3-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:32e3bef222ad6b052280311d1d60db8e259e4947052c3ae7dd6817451fc8a4c5", size = 15666601, upload-time = "2026-03-09T07:56:34.461Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/bd/79/cc665495e4d57d0aa6fbcc0aa57aa82671dfc78fbf95fe733ed86d98f52a/numpy-2.4.3-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e7dd01a46700b1967487141a66ac1a3cf0dd8ebf1f08db37d46389401512ca97", size = 16621358, upload-time = "2026-03-09T07:56:36.852Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a8/40/b4ecb7224af1065c3539f5ecfff879d090de09608ad1008f02c05c770cb3/numpy-2.4.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:76f0f283506c28b12bba319c0fab98217e9f9b54e6160e9c79e9f7348ba32e9c", size = 17016135, upload-time = "2026-03-09T07:56:39.337Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f7/b1/6a88e888052eed951afed7a142dcdf3b149a030ca59b4c71eef085858e43/numpy-2.4.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:737f630a337364665aba3b5a77e56a68cc42d350edd010c345d65a3efa3addcc", size = 18345816, upload-time = "2026-03-09T07:56:42.31Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f3/8f/103a60c5f8c3d7fc678c19cd7b2476110da689ccb80bc18050efbaeae183/numpy-2.4.3-cp312-cp312-win32.whl", hash = "sha256:26952e18d82a1dbbc2f008d402021baa8d6fc8e84347a2072a25e08b46d698b9", size = 5960132, upload-time = "2026-03-09T07:56:44.851Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d7/7c/f5ee1bf6ed888494978046a809df2882aad35d414b622893322df7286879/numpy-2.4.3-cp312-cp312-win_amd64.whl", hash = "sha256:65f3c2455188f09678355f5cae1f959a06b778bc66d535da07bf2ef20cd319d5", size = 12316144, upload-time = "2026-03-09T07:56:47.057Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/71/46/8d1cb3f7a00f2fb6394140e7e6623696e54c6318a9d9691bb4904672cf42/numpy-2.4.3-cp312-cp312-win_arm64.whl", hash = "sha256:2abad5c7fef172b3377502bde47892439bae394a71bc329f31df0fd829b41a9e", size = 10220364, upload-time = "2026-03-09T07:56:49.849Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b6/d0/1fe47a98ce0df229238b77611340aff92d52691bcbc10583303181abf7fc/numpy-2.4.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b346845443716c8e542d54112966383b448f4a3ba5c66409771b8c0889485dd3", size = 16665297, upload-time = "2026-03-09T07:56:52.296Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/27/d9/4e7c3f0e68dfa91f21c6fb6cf839bc829ec920688b1ce7ec722b1a6202fb/numpy-2.4.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2629289168f4897a3c4e23dc98d6f1731f0fc0fe52fb9db19f974041e4cc12b9", size = 14691853, upload-time = "2026-03-09T07:56:54.992Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/3a/66/bd096b13a87549683812b53ab211e6d413497f84e794fb3c39191948da97/numpy-2.4.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:bb2e3cf95854233799013779216c57e153c1ee67a0bf92138acca0e429aefaee", size = 5198435, upload-time = "2026-03-09T07:56:57.184Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a2/2f/687722910b5a5601de2135c891108f51dfc873d8e43c8ed9f4ebb440b4a2/numpy-2.4.3-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:7f3408ff897f8ab07a07fbe2823d7aee6ff644c097cc1f90382511fe982f647f", size = 6546347, upload-time = "2026-03-09T07:56:59.531Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/bf/ec/7971c4e98d86c564750393fab8d7d83d0a9432a9d78bb8a163a6dc59967a/numpy-2.4.3-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:decb0eb8a53c3b009b0962378065589685d66b23467ef5dac16cbe818afde27f", size = 15664626, upload-time = "2026-03-09T07:57:01.385Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7e/eb/7daecbea84ec935b7fc732e18f532073064a3816f0932a40a17f3349185f/numpy-2.4.3-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d5f51900414fc9204a0e0da158ba2ac52b75656e7dce7e77fb9f84bfa343b4cc", size = 16608916, upload-time = "2026-03-09T07:57:04.008Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/df/58/2a2b4a817ffd7472dca4421d9f0776898b364154e30c95f42195041dc03b/numpy-2.4.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6bd06731541f89cdc01b261ba2c9e037f1543df7472517836b78dfb15bd6e476", size = 17015824, upload-time = "2026-03-09T07:57:06.347Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/4a/ca/627a828d44e78a418c55f82dd4caea8ea4a8ef24e5144d9e71016e52fb40/numpy-2.4.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:22654fe6be0e5206f553a9250762c653d3698e46686eee53b399ab90da59bd92", size = 18334581, upload-time = "2026-03-09T07:57:09.114Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/cd/c0/76f93962fc79955fcba30a429b62304332345f22d4daec1cb33653425643/numpy-2.4.3-cp313-cp313-win32.whl", hash = "sha256:d71e379452a2f670ccb689ec801b1218cd3983e253105d6e83780967e899d687", size = 5958618, upload-time = "2026-03-09T07:57:11.432Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b1/3c/88af0040119209b9b5cb59485fa48b76f372c73068dbf9254784b975ac53/numpy-2.4.3-cp313-cp313-win_amd64.whl", hash = "sha256:0a60e17a14d640f49146cb38e3f105f571318db7826d9b6fef7e4dce758faecd", size = 12312824, upload-time = "2026-03-09T07:57:13.586Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/58/ce/3d07743aced3d173f877c3ef6a454c2174ba42b584ab0b7e6d99374f51ed/numpy-2.4.3-cp313-cp313-win_arm64.whl", hash = "sha256:c9619741e9da2059cd9c3f206110b97583c7152c1dc9f8aafd4beb450ac1c89d", size = 10221218, upload-time = "2026-03-09T07:57:16.183Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/62/09/d96b02a91d09e9d97862f4fc8bfebf5400f567d8eb1fe4b0cc4795679c15/numpy-2.4.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:7aa4e54f6469300ebca1d9eb80acd5253cdfa36f2c03d79a35883687da430875", size = 14819570, upload-time = "2026-03-09T07:57:18.564Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b5/ca/0b1aba3905fdfa3373d523b2b15b19029f4f3031c87f4066bd9d20ef6c6b/numpy-2.4.3-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:d1b90d840b25874cf5cd20c219af10bac3667db3876d9a495609273ebe679070", size = 5326113, upload-time = "2026-03-09T07:57:21.052Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c0/63/406e0fd32fcaeb94180fd6a4c41e55736d676c54346b7efbce548b94a914/numpy-2.4.3-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:a749547700de0a20a6718293396ec237bb38218049cfce788e08fcb716e8cf73", size = 6646370, upload-time = "2026-03-09T07:57:22.804Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b6/d0/10f7dc157d4b37af92720a196be6f54f889e90dcd30dce9dc657ed92c257/numpy-2.4.3-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:94f3c4a151a2e529adf49c1d54f0f57ff8f9b233ee4d44af623a81553ab86368", size = 15723499, upload-time = "2026-03-09T07:57:24.693Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/66/f1/d1c2bf1161396629701bc284d958dc1efa3a5a542aab83cf11ee6eb4cba5/numpy-2.4.3-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:22c31dc07025123aedf7f2db9e91783df13f1776dc52c6b22c620870dc0fab22", size = 16657164, upload-time = "2026-03-09T07:57:27.676Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/1a/be/cca19230b740af199ac47331a21c71e7a3d0ba59661350483c1600d28c37/numpy-2.4.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:148d59127ac95979d6f07e4d460f934ebdd6eed641db9c0db6c73026f2b2101a", size = 17081544, upload-time = "2026-03-09T07:57:30.664Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b9/c5/9602b0cbb703a0936fb40f8a95407e8171935b15846de2f0776e08af04c7/numpy-2.4.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:a97cbf7e905c435865c2d939af3d93f99d18eaaa3cabe4256f4304fb51604349", size = 18380290, upload-time = "2026-03-09T07:57:33.763Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ed/81/9f24708953cd30be9ee36ec4778f4b112b45165812f2ada4cc5ea1c1f254/numpy-2.4.3-cp313-cp313t-win32.whl", hash = "sha256:be3b8487d725a77acccc9924f65fd8bce9af7fac8c9820df1049424a2115af6c", size = 6082814, upload-time = "2026-03-09T07:57:36.491Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e2/9e/52f6eaa13e1a799f0ab79066c17f7016a4a8ae0c1aefa58c82b4dab690b4/numpy-2.4.3-cp313-cp313t-win_amd64.whl", hash = "sha256:1ec84fd7c8e652b0f4aaaf2e6e9cc8eaa9b1b80a537e06b2e3a2fb176eedcb26", size = 12452673, upload-time = "2026-03-09T07:57:38.281Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c4/04/b8cece6ead0b30c9fbd99bb835ad7ea0112ac5f39f069788c5558e3b1ab2/numpy-2.4.3-cp313-cp313t-win_arm64.whl", hash = "sha256:120df8c0a81ebbf5b9020c91439fccd85f5e018a927a39f624845be194a2be02", size = 10290907, upload-time = "2026-03-09T07:57:40.747Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/70/ae/3936f79adebf8caf81bd7a599b90a561334a658be4dcc7b6329ebf4ee8de/numpy-2.4.3-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:5884ce5c7acfae1e4e1b6fde43797d10aa506074d25b531b4f54bde33c0c31d4", size = 16664563, upload-time = "2026-03-09T07:57:43.817Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9b/62/760f2b55866b496bb1fa7da2a6db076bef908110e568b02fcfc1422e2a3a/numpy-2.4.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:297837823f5bc572c5f9379b0c9f3a3365f08492cbdc33bcc3af174372ebb168", size = 14702161, upload-time = "2026-03-09T07:57:46.169Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/32/af/a7a39464e2c0a21526fb4fb76e346fb172ebc92f6d1c7a07c2c139cc17b1/numpy-2.4.3-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:a111698b4a3f8dcbe54c64a7708f049355abd603e619013c346553c1fd4ca90b", size = 5208738, upload-time = "2026-03-09T07:57:48.506Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/29/8c/2a0cf86a59558fa078d83805589c2de490f29ed4fb336c14313a161d358a/numpy-2.4.3-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:4bd4741a6a676770e0e97fe9ab2e51de01183df3dcbcec591d26d331a40de950", size = 6543618, upload-time = "2026-03-09T07:57:50.591Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/aa/b8/612ce010c0728b1c363fa4ea3aa4c22fe1c5da1de008486f8c2f5cb92fae/numpy-2.4.3-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:54f29b877279d51e210e0c80709ee14ccbbad647810e8f3d375561c45ef613dd", size = 15680676, upload-time = "2026-03-09T07:57:52.34Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a9/7e/4f120ecc54ba26ddf3dc348eeb9eb063f421de65c05fc961941798feea18/numpy-2.4.3-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:679f2a834bae9020f81534671c56fd0cc76dd7e5182f57131478e23d0dc59e24", size = 16613492, upload-time = "2026-03-09T07:57:54.91Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2c/86/1b6020db73be330c4b45d5c6ee4295d59cfeef0e3ea323959d053e5a6909/numpy-2.4.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:d84f0f881cb2225c2dfd7f78a10a5645d487a496c6668d6cc39f0f114164f3d0", size = 17031789, upload-time = "2026-03-09T07:57:57.641Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/07/3a/3b90463bf41ebc21d1b7e06079f03070334374208c0f9a1f05e4ae8455e7/numpy-2.4.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d213c7e6e8d211888cc359bab7199670a00f5b82c0978b9d1c75baf1eddbeac0", size = 18339941, upload-time = "2026-03-09T07:58:00.577Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a8/74/6d736c4cd962259fd8bae9be27363eb4883a2f9069763747347544c2a487/numpy-2.4.3-cp314-cp314-win32.whl", hash = "sha256:52077feedeff7c76ed7c9f1a0428558e50825347b7545bbb8523da2cd55c547a", size = 6007503, upload-time = "2026-03-09T07:58:03.331Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/48/39/c56ef87af669364356bb011922ef0734fc49dad51964568634c72a009488/numpy-2.4.3-cp314-cp314-win_amd64.whl", hash = "sha256:0448e7f9caefb34b4b7dd2b77f21e8906e5d6f0365ad525f9f4f530b13df2afc", size = 12444915, upload-time = "2026-03-09T07:58:06.353Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9d/1f/ab8528e38d295fd349310807496fabb7cf9fe2e1f70b97bc20a483ea9d4a/numpy-2.4.3-cp314-cp314-win_arm64.whl", hash = "sha256:b44fd60341c4d9783039598efadd03617fa28d041fc37d22b62d08f2027fa0e7", size = 10494875, upload-time = "2026-03-09T07:58:08.734Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e6/ef/b7c35e4d5ef141b836658ab21a66d1a573e15b335b1d111d31f26c8ef80f/numpy-2.4.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:0a195f4216be9305a73c0e91c9b026a35f2161237cf1c6de9b681637772ea657", size = 14822225, upload-time = "2026-03-09T07:58:11.034Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/cd/8d/7730fa9278cf6648639946cc816e7cc89f0d891602584697923375f801ed/numpy-2.4.3-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:cd32fbacb9fd1bf041bf8e89e4576b6f00b895f06d00914820ae06a616bdfef7", size = 5328769, upload-time = "2026-03-09T07:58:13.67Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/47/01/d2a137317c958b074d338807c1b6a383406cdf8b8e53b075d804cc3d211d/numpy-2.4.3-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:2e03c05abaee1f672e9d67bc858f300b5ccba1c21397211e8d77d98350972093", size = 6649461, upload-time = "2026-03-09T07:58:15.912Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/5c/34/812ce12bc0f00272a4b0ec0d713cd237cb390666eb6206323d1cc9cedbb2/numpy-2.4.3-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7d1ce23cce91fcea443320a9d0ece9b9305d4368875bab09538f7a5b4131938a", size = 15725809, upload-time = "2026-03-09T07:58:17.787Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/25/c0/2aed473a4823e905e765fee3dc2cbf504bd3e68ccb1150fbdabd5c39f527/numpy-2.4.3-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c59020932feb24ed49ffd03704fbab89f22aa9c0d4b180ff45542fe8918f5611", size = 16655242, upload-time = "2026-03-09T07:58:20.476Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f2/c8/7e052b2fc87aa0e86de23f20e2c42bd261c624748aa8efd2c78f7bb8d8c6/numpy-2.4.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:9684823a78a6cd6ad7511fc5e25b07947d1d5b5e2812c93fe99d7d4195130720", size = 17080660, upload-time = "2026-03-09T07:58:23.067Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f3/3d/0876746044db2adcb11549f214d104f2e1be00f07a67edbb4e2812094847/numpy-2.4.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:0200b25c687033316fb39f0ff4e3e690e8957a2c3c8d22499891ec58c37a3eb5", size = 18380384, upload-time = "2026-03-09T07:58:25.839Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/07/12/8160bea39da3335737b10308df4f484235fd297f556745f13092aa039d3b/numpy-2.4.3-cp314-cp314t-win32.whl", hash = "sha256:5e10da9e93247e554bb1d22f8edc51847ddd7dde52d85ce31024c1b4312bfba0", size = 6154547, upload-time = "2026-03-09T07:58:28.289Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/42/f3/76534f61f80d74cc9cdf2e570d3d4eeb92c2280a27c39b0aaf471eda7b48/numpy-2.4.3-cp314-cp314t-win_amd64.whl", hash = "sha256:45f003dbdffb997a03da2d1d0cb41fbd24a87507fb41605c0420a3db5bd4667b", size = 12633645, upload-time = "2026-03-09T07:58:30.384Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/1f/b6/7c0d4334c15983cec7f92a69e8ce9b1e6f31857e5ee3a413ac424e6bd63d/numpy-2.4.3-cp314-cp314t-win_arm64.whl", hash = "sha256:4d382735cecd7bcf090172489a525cd7d4087bc331f7df9f60ddc9a296cf208e", size = 10565454, upload-time = "2026-03-09T07:58:33.031Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/64/e4/4dab9fb43c83719c29241c535d9e07be73bea4bc0c6686c5816d8e1b6689/numpy-2.4.3-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c6b124bfcafb9e8d3ed09130dbee44848c20b3e758b6bbf006e641778927c028", size = 16834892, upload-time = "2026-03-09T07:58:35.334Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c9/29/f8b6d4af90fed3dfda84ebc0df06c9833d38880c79ce954e5b661758aa31/numpy-2.4.3-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:76dbb9d4e43c16cf9aa711fcd8de1e2eeb27539dcefb60a1d5e9f12fae1d1ed8", size = 14893070, upload-time = "2026-03-09T07:58:37.7Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9a/04/a19b3c91dbec0a49269407f15d5753673a09832daed40c45e8150e6fa558/numpy-2.4.3-pp311-pypy311_pp73-macosx_14_0_arm64.whl", hash = "sha256:29363fbfa6f8ee855d7569c96ce524845e3d726d6c19b29eceec7dd555dab152", size = 5399609, upload-time = "2026-03-09T07:58:39.853Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/79/34/4d73603f5420eab89ea8a67097b31364bf7c30f811d4dd84b1659c7476d9/numpy-2.4.3-pp311-pypy311_pp73-macosx_14_0_x86_64.whl", hash = "sha256:bc71942c789ef415a37f0d4eab90341425a00d538cd0642445d30b41023d3395", size = 6714355, upload-time = "2026-03-09T07:58:42.365Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/58/ad/1100d7229bb248394939a12a8074d485b655e8ed44207d328fdd7fcebc7b/numpy-2.4.3-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7e58765ad74dcebd3ef0208a5078fba32dc8ec3578fe84a604432950cd043d79", size = 15800434, upload-time = "2026-03-09T07:58:44.837Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/0c/fd/16d710c085d28ba4feaf29ac60c936c9d662e390344f94a6beaa2ac9899b/numpy-2.4.3-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8e236dbda4e1d319d681afcbb136c0c4a8e0f1a5c58ceec2adebb547357fe857", size = 16729409, upload-time = "2026-03-09T07:58:47.972Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/57/a7/b35835e278c18b85206834b3aa3abe68e77a98769c59233d1f6300284781/numpy-2.4.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:4b42639cdde6d24e732ff823a3fa5b701d8acad89c4142bc1d0bd6dc85200ba5", size = 12504685, upload-time = "2026-03-09T07:58:50.525Z" },
]
[[package]]
name = "packaging"
version = "26.0"
source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" }
sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/65/ee/299d360cdc32edc7d2cf530f3accf79c4fca01e96ffc950d8a52213bd8e4/packaging-26.0.tar.gz", hash = "sha256:00243ae351a257117b6a241061796684b084ed1c516a08c48a3f7e147a9d80b4", size = 143416, upload-time = "2026-01-21T20:50:39.064Z" }
wheels = [
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b7/b9/c538f279a4e237a006a2c98387d081e9eb060d203d8ed34467cc0f0b9b53/packaging-26.0-py3-none-any.whl", hash = "sha256:b36f1fef9334a5588b4166f8bcd26a14e521f2b55e6b9de3aaa80d3ff7a37529", size = 74366, upload-time = "2026-01-21T20:50:37.788Z" },
]
[[package]]
name = "pluggy"
version = "1.6.0"
source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" }
sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" }
wheels = [
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" },
]
[[package]]
name = "pygments"
version = "2.19.2"
source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" }
sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" }
wheels = [
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" },
]
[[package]]
name = "pytest"
version = "9.0.2"
source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" }
dependencies = [
{ name = "colorama", marker = "sys_platform == 'win32'" },
{ name = "exceptiongroup", marker = "python_full_version < '3.11'" },
{ name = "iniconfig" },
{ name = "packaging" },
{ name = "pluggy" },
{ name = "pygments" },
{ name = "tomli", marker = "python_full_version < '3.11'" },
]
sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d1/db/7ef3487e0fb0049ddb5ce41d3a49c235bf9ad299b6a25d5780a89f19230f/pytest-9.0.2.tar.gz", hash = "sha256:75186651a92bd89611d1d9fc20f0b4345fd827c41ccd5c299a868a05d70edf11", size = 1568901, upload-time = "2025-12-06T21:30:51.014Z" }
wheels = [
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/3b/ab/b3226f0bd7cdcf710fbede2b3548584366da3b19b5021e74f5bde2a8fa3f/pytest-9.0.2-py3-none-any.whl", hash = "sha256:711ffd45bf766d5264d487b917733b453d917afd2b0ad65223959f59089f875b", size = 374801, upload-time = "2025-12-06T21:30:49.154Z" },
]
[[package]]
name = "rapid-pose-triangulation"
version = "0.1.0"
source = { editable = "." }
dependencies = [
{ name = "numpy", version = "2.2.6", source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" }, marker = "python_full_version < '3.11'" },
{ name = "numpy", version = "2.4.3", source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" }, marker = "python_full_version >= '3.11'" },
]
[package.dev-dependencies]
dev = [
{ name = "pytest" },
]
[package.metadata]
requires-dist = [{ name = "numpy", specifier = ">=2.0" }]
[package.metadata.requires-dev]
dev = [{ name = "pytest", specifier = ">=8.3" }]
[[package]]
name = "tomli"
version = "2.4.0"
source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" }
sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/82/30/31573e9457673ab10aa432461bee537ce6cef177667deca369efb79df071/tomli-2.4.0.tar.gz", hash = "sha256:aa89c3f6c277dd275d8e243ad24f3b5e701491a860d5121f2cdd399fbb31fc9c", size = 17477, upload-time = "2026-01-11T11:22:38.165Z" }
wheels = [
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/3c/d9/3dc2289e1f3b32eb19b9785b6a006b28ee99acb37d1d47f78d4c10e28bf8/tomli-2.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b5ef256a3fd497d4973c11bf142e9ed78b150d36f5773f1ca6088c230ffc5867", size = 153663, upload-time = "2026-01-11T11:21:45.27Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/51/32/ef9f6845e6b9ca392cd3f64f9ec185cc6f09f0a2df3db08cbe8809d1d435/tomli-2.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5572e41282d5268eb09a697c89a7bee84fae66511f87533a6f88bd2f7b652da9", size = 148469, upload-time = "2026-01-11T11:21:46.873Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d6/c2/506e44cce89a8b1b1e047d64bd495c22c9f71f21e05f380f1a950dd9c217/tomli-2.4.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:551e321c6ba03b55676970b47cb1b73f14a0a4dce6a3e1a9458fd6d921d72e95", size = 236039, upload-time = "2026-01-11T11:21:48.503Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b3/40/e1b65986dbc861b7e986e8ec394598187fa8aee85b1650b01dd925ca0be8/tomli-2.4.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5e3f639a7a8f10069d0e15408c0b96a2a828cfdec6fca05296ebcdcc28ca7c76", size = 243007, upload-time = "2026-01-11T11:21:49.456Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9c/6f/6e39ce66b58a5b7ae572a0f4352ff40c71e8573633deda43f6a379d56b3e/tomli-2.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1b168f2731796b045128c45982d3a4874057626da0e2ef1fdd722848b741361d", size = 240875, upload-time = "2026-01-11T11:21:50.755Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/aa/ad/cb089cb190487caa80204d503c7fd0f4d443f90b95cf4ef5cf5aa0f439b0/tomli-2.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:133e93646ec4300d651839d382d63edff11d8978be23da4cc106f5a18b7d0576", size = 246271, upload-time = "2026-01-11T11:21:51.81Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/0b/63/69125220e47fd7a3a27fd0de0c6398c89432fec41bc739823bcc66506af6/tomli-2.4.0-cp311-cp311-win32.whl", hash = "sha256:b6c78bdf37764092d369722d9946cb65b8767bfa4110f902a1b2542d8d173c8a", size = 96770, upload-time = "2026-01-11T11:21:52.647Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/1e/0d/a22bb6c83f83386b0008425a6cd1fa1c14b5f3dd4bad05e98cf3dbbf4a64/tomli-2.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:d3d1654e11d724760cdb37a3d7691f0be9db5fbdaef59c9f532aabf87006dbaa", size = 107626, upload-time = "2026-01-11T11:21:53.459Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2f/6d/77be674a3485e75cacbf2ddba2b146911477bd887dda9d8c9dfb2f15e871/tomli-2.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:cae9c19ed12d4e8f3ebf46d1a75090e4c0dc16271c5bce1c833ac168f08fb614", size = 94842, upload-time = "2026-01-11T11:21:54.831Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/3c/43/7389a1869f2f26dba52404e1ef13b4784b6b37dac93bac53457e3ff24ca3/tomli-2.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:920b1de295e72887bafa3ad9f7a792f811847d57ea6b1215154030cf131f16b1", size = 154894, upload-time = "2026-01-11T11:21:56.07Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e9/05/2f9bf110b5294132b2edf13fe6ca6ae456204f3d749f623307cbb7a946f2/tomli-2.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7d6d9a4aee98fac3eab4952ad1d73aee87359452d1c086b5ceb43ed02ddb16b8", size = 149053, upload-time = "2026-01-11T11:21:57.467Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e8/41/1eda3ca1abc6f6154a8db4d714a4d35c4ad90adc0bcf700657291593fbf3/tomli-2.4.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36b9d05b51e65b254ea6c2585b59d2c4cb91c8a3d91d0ed0f17591a29aaea54a", size = 243481, upload-time = "2026-01-11T11:21:58.661Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d2/6d/02ff5ab6c8868b41e7d4b987ce2b5f6a51d3335a70aa144edd999e055a01/tomli-2.4.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1c8a885b370751837c029ef9bc014f27d80840e48bac415f3412e6593bbc18c1", size = 251720, upload-time = "2026-01-11T11:22:00.178Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7b/57/0405c59a909c45d5b6f146107c6d997825aa87568b042042f7a9c0afed34/tomli-2.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8768715ffc41f0008abe25d808c20c3d990f42b6e2e58305d5da280ae7d1fa3b", size = 247014, upload-time = "2026-01-11T11:22:01.238Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2c/0e/2e37568edd944b4165735687cbaf2fe3648129e440c26d02223672ee0630/tomli-2.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b438885858efd5be02a9a133caf5812b8776ee0c969fea02c45e8e3f296ba51", size = 251820, upload-time = "2026-01-11T11:22:02.727Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/5a/1c/ee3b707fdac82aeeb92d1a113f803cf6d0f37bdca0849cb489553e1f417a/tomli-2.4.0-cp312-cp312-win32.whl", hash = "sha256:0408e3de5ec77cc7f81960c362543cbbd91ef883e3138e81b729fc3eea5b9729", size = 97712, upload-time = "2026-01-11T11:22:03.777Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/69/13/c07a9177d0b3bab7913299b9278845fc6eaaca14a02667c6be0b0a2270c8/tomli-2.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:685306e2cc7da35be4ee914fd34ab801a6acacb061b6a7abca922aaf9ad368da", size = 108296, upload-time = "2026-01-11T11:22:04.86Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/18/27/e267a60bbeeee343bcc279bb9e8fbed0cbe224bc7b2a3dc2975f22809a09/tomli-2.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:5aa48d7c2356055feef06a43611fc401a07337d5b006be13a30f6c58f869e3c3", size = 94553, upload-time = "2026-01-11T11:22:05.854Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/34/91/7f65f9809f2936e1f4ce6268ae1903074563603b2a2bd969ebbda802744f/tomli-2.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:84d081fbc252d1b6a982e1870660e7330fb8f90f676f6e78b052ad4e64714bf0", size = 154915, upload-time = "2026-01-11T11:22:06.703Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/20/aa/64dd73a5a849c2e8f216b755599c511badde80e91e9bc2271baa7b2cdbb1/tomli-2.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9a08144fa4cba33db5255f9b74f0b89888622109bd2776148f2597447f92a94e", size = 149038, upload-time = "2026-01-11T11:22:07.56Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9e/8a/6d38870bd3d52c8d1505ce054469a73f73a0fe62c0eaf5dddf61447e32fa/tomli-2.4.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c73add4bb52a206fd0c0723432db123c0c75c280cbd67174dd9d2db228ebb1b4", size = 242245, upload-time = "2026-01-11T11:22:08.344Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/59/bb/8002fadefb64ab2669e5b977df3f5e444febea60e717e755b38bb7c41029/tomli-2.4.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1fb2945cbe303b1419e2706e711b7113da57b7db31ee378d08712d678a34e51e", size = 250335, upload-time = "2026-01-11T11:22:09.951Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a5/3d/4cdb6f791682b2ea916af2de96121b3cb1284d7c203d97d92d6003e91c8d/tomli-2.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bbb1b10aa643d973366dc2cb1ad94f99c1726a02343d43cbc011edbfac579e7c", size = 245962, upload-time = "2026-01-11T11:22:11.27Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f2/4a/5f25789f9a460bd858ba9756ff52d0830d825b458e13f754952dd15fb7bb/tomli-2.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4cbcb367d44a1f0c2be408758b43e1ffb5308abe0ea222897d6bfc8e8281ef2f", size = 250396, upload-time = "2026-01-11T11:22:12.325Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/aa/2f/b73a36fea58dfa08e8b3a268750e6853a6aac2a349241a905ebd86f3047a/tomli-2.4.0-cp313-cp313-win32.whl", hash = "sha256:7d49c66a7d5e56ac959cb6fc583aff0651094ec071ba9ad43df785abc2320d86", size = 97530, upload-time = "2026-01-11T11:22:13.865Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/3b/af/ca18c134b5d75de7e8dc551c5234eaba2e8e951f6b30139599b53de9c187/tomli-2.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:3cf226acb51d8f1c394c1b310e0e0e61fecdd7adcb78d01e294ac297dd2e7f87", size = 108227, upload-time = "2026-01-11T11:22:15.224Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/22/c3/b386b832f209fee8073c8138ec50f27b4460db2fdae9ffe022df89a57f9b/tomli-2.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:d20b797a5c1ad80c516e41bc1fb0443ddb5006e9aaa7bda2d71978346aeb9132", size = 94748, upload-time = "2026-01-11T11:22:16.009Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f3/c4/84047a97eb1004418bc10bdbcfebda209fca6338002eba2dc27cc6d13563/tomli-2.4.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:26ab906a1eb794cd4e103691daa23d95c6919cc2fa9160000ac02370cc9dd3f6", size = 154725, upload-time = "2026-01-11T11:22:17.269Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a8/5d/d39038e646060b9d76274078cddf146ced86dc2b9e8bbf737ad5983609a0/tomli-2.4.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:20cedb4ee43278bc4f2fee6cb50daec836959aadaf948db5172e776dd3d993fc", size = 148901, upload-time = "2026-01-11T11:22:18.287Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/73/e5/383be1724cb30f4ce44983d249645684a48c435e1cd4f8b5cded8a816d3c/tomli-2.4.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:39b0b5d1b6dd03684b3fb276407ebed7090bbec989fa55838c98560c01113b66", size = 243375, upload-time = "2026-01-11T11:22:19.154Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/31/f0/bea80c17971c8d16d3cc109dc3585b0f2ce1036b5f4a8a183789023574f2/tomli-2.4.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a26d7ff68dfdb9f87a016ecfd1e1c2bacbe3108f4e0f8bcd2228ef9a766c787d", size = 250639, upload-time = "2026-01-11T11:22:20.168Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2c/8f/2853c36abbb7608e3f945d8a74e32ed3a74ee3a1f468f1ffc7d1cb3abba6/tomli-2.4.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:20ffd184fb1df76a66e34bd1b36b4a4641bd2b82954befa32fe8163e79f1a702", size = 246897, upload-time = "2026-01-11T11:22:21.544Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/49/f0/6c05e3196ed5337b9fe7ea003e95fd3819a840b7a0f2bf5a408ef1dad8ed/tomli-2.4.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:75c2f8bbddf170e8effc98f5e9084a8751f8174ea6ccf4fca5398436e0320bc8", size = 254697, upload-time = "2026-01-11T11:22:23.058Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f3/f5/2922ef29c9f2951883525def7429967fc4d8208494e5ab524234f06b688b/tomli-2.4.0-cp314-cp314-win32.whl", hash = "sha256:31d556d079d72db7c584c0627ff3a24c5d3fb4f730221d3444f3efb1b2514776", size = 98567, upload-time = "2026-01-11T11:22:24.033Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7b/31/22b52e2e06dd2a5fdbc3ee73226d763b184ff21fc24e20316a44ccc4d96b/tomli-2.4.0-cp314-cp314-win_amd64.whl", hash = "sha256:43e685b9b2341681907759cf3a04e14d7104b3580f808cfde1dfdb60ada85475", size = 108556, upload-time = "2026-01-11T11:22:25.378Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/48/3d/5058dff3255a3d01b705413f64f4306a141a8fd7a251e5a495e3f192a998/tomli-2.4.0-cp314-cp314-win_arm64.whl", hash = "sha256:3d895d56bd3f82ddd6faaff993c275efc2ff38e52322ea264122d72729dca2b2", size = 96014, upload-time = "2026-01-11T11:22:26.138Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b8/4e/75dab8586e268424202d3a1997ef6014919c941b50642a1682df43204c22/tomli-2.4.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:5b5807f3999fb66776dbce568cc9a828544244a8eb84b84b9bafc080c99597b9", size = 163339, upload-time = "2026-01-11T11:22:27.143Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/06/e3/b904d9ab1016829a776d97f163f183a48be6a4deb87304d1e0116a349519/tomli-2.4.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c084ad935abe686bd9c898e62a02a19abfc9760b5a79bc29644463eaf2840cb0", size = 159490, upload-time = "2026-01-11T11:22:28.399Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e3/5a/fc3622c8b1ad823e8ea98a35e3c632ee316d48f66f80f9708ceb4f2a0322/tomli-2.4.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0f2e3955efea4d1cfbcb87bc321e00dc08d2bcb737fd1d5e398af111d86db5df", size = 269398, upload-time = "2026-01-11T11:22:29.345Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/fd/33/62bd6152c8bdd4c305ad9faca48f51d3acb2df1f8791b1477d46ff86e7f8/tomli-2.4.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e0fe8a0b8312acf3a88077a0802565cb09ee34107813bba1c7cd591fa6cfc8d", size = 276515, upload-time = "2026-01-11T11:22:30.327Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/4b/ff/ae53619499f5235ee4211e62a8d7982ba9e439a0fb4f2f351a93d67c1dd2/tomli-2.4.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:413540dce94673591859c4c6f794dfeaa845e98bf35d72ed59636f869ef9f86f", size = 273806, upload-time = "2026-01-11T11:22:32.56Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/47/71/cbca7787fa68d4d0a9f7072821980b39fbb1b6faeb5f5cf02f4a5559fa28/tomli-2.4.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:0dc56fef0e2c1c470aeac5b6ca8cc7b640bb93e92d9803ddaf9ea03e198f5b0b", size = 281340, upload-time = "2026-01-11T11:22:33.505Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f5/00/d595c120963ad42474cf6ee7771ad0d0e8a49d0f01e29576ee9195d9ecdf/tomli-2.4.0-cp314-cp314t-win32.whl", hash = "sha256:d878f2a6707cc9d53a1be1414bbb419e629c3d6e67f69230217bb663e76b5087", size = 108106, upload-time = "2026-01-11T11:22:34.451Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/de/69/9aa0c6a505c2f80e519b43764f8b4ba93b5a0bbd2d9a9de6e2b24271b9a5/tomli-2.4.0-cp314-cp314t-win_amd64.whl", hash = "sha256:2add28aacc7425117ff6364fe9e06a183bb0251b03f986df0e78e974047571fd", size = 120504, upload-time = "2026-01-11T11:22:35.764Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b3/9f/f1668c281c58cfae01482f7114a4b88d345e4c140386241a1a24dcc9e7bc/tomli-2.4.0-cp314-cp314t-win_arm64.whl", hash = "sha256:2b1e3b80e1d5e52e40e9b924ec43d81570f0e7d09d11081b797bc4692765a3d4", size = 99561, upload-time = "2026-01-11T11:22:36.624Z" },
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/23/d1/136eb2cb77520a31e1f64cbae9d33ec6df0d78bdf4160398e86eec8a8754/tomli-2.4.0-py3-none-any.whl", hash = "sha256:1f776e7d669ebceb01dee46484485f43a4048746235e683bcdffacdf1fb4785a", size = 14477, upload-time = "2026-01-11T11:22:37.446Z" },
]
[[package]]
name = "typing-extensions"
version = "4.15.0"
source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" }
sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" }
wheels = [
{ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" },
]